How to use the zmq.POLLIN function in zmq

To help you get started, we’ve selected a few zmq examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Mononofu / P2P-DNS / dns-server.py View on Github external
return defer.succeed([
                    (dns.RRHeader(name,
                                  dns.A,
                                  dns.IN,
                                  self.ttl,
                                  dns.Record_A(ip, self.ttl)),),
                    (),()
                    ])
        else:
            try:
                self.socket.send(name)
                # our p2p server should answer within 5 ms
                socks = dict(self.poller.poll(timeout=5))

                if self.socket in socks and socks[self.socket] == zmq.POLLIN:
                    # format is "IP TTL"
                    msg = self.socket.recv().split(' ')
                    self.cache[name] = CacheEntry(msg[0], msg[1])
                    if msg[0] == "0.0.0.0":
                        # entry doesn't exist
                        return self._lookup(name, dns.IN, dns.A, timeout)
                    return self.lookupAddress(name)
            except zmq._zmq.ZMQError:
                log.msg("please start p2p-dns server")
            return self._lookup(name, dns.IN, dns.A, timeout)
github EDSM-NET / EDDN / examples / Python 2.7 / Client_Simple.py View on Github external
subscriber.setsockopt(zmq.SUBSCRIBE, "")

    while True:
        try:
            subscriber.connect(__relayEDDN)
            print 'Connect to EDDN'
            sys.stdout.flush()
            
            poller = zmq.Poller()
            poller.register(subscriber, zmq.POLLIN)
 
            while True:
                socks = dict(poller.poll(__timeoutEDDN))
                if socks:
                    if socks.get(subscriber) == zmq.POLLIN:
                        __message   = subscriber.recv(zmq.NOBLOCK)
                        __message   = zlib.decompress(__message)
                        __json      = simplejson.loads(__message)

                        # call dumps() to ensure double quotes in output
                        print simplejson.dumps(__json)
                        sys.stdout.flush()
                else:
                    print 'Disconnect from EDDN (After timeout)'
                    sys.stdout.flush()
                    
                    subscriber.disconnect(__relayEDDN)
                    break
                
        except zmq.ZMQError, e:
            print 'Disconnect from EDDN (After receiving ZMQError)'
github samueltardieu / zmq-broker / broker.py View on Github external
worker and worker answer addresses. If context is None, a new
        0MQ context with one I/O thread will be created."""
        self.context = context or zmq.Context(1)

        self.poller = zmq.Poller()

        self.requesters = self.context.socket(zmq.XREP)
        self.requesters.bind(requesters_addr)
        self.poller.register(self.requesters, zmq.POLLIN)

        self.workers_query = self.context.socket(zmq.REP)
        self.workers_query.bind(workers_query_addr)

        self.workers_answer = self.context.socket(zmq.PULL)
        self.workers_answer.bind(workers_answer_addr)
        self.poller.register(self.workers_answer, zmq.POLLIN)

        self.requests = Requests(timeout, tries)
github justinfx / pyRpc / pyRpc / client.py View on Github external
def _worker_routine(self, context, remote_address, work_address):
        """
        Worker loop for processing rpc calls.
        """
        logger.debug("Starting local worker thread: %s" % current_thread().name)

        receiver = context.socket(zmq.PULL)
        receiver.connect(work_address)

        logger.debug("Making connection to RPC server at: %s" % remote_address)
        remote = context.socket(zmq.REQ)
        remote.connect(remote_address)

        poller = zmq.Poller()
        poller.register(receiver, zmq.POLLIN)

        while not self.exit_request:

            socks = dict(poller.poll(500))
            if socks.get(receiver, None) == zmq.POLLIN:
                msg = receiver.recv_pyobj()

                cbk = self._callbacks.pop(msg.id, None)

                logger.debug("(%s) Forwarding a RPC call to method: %s" % (current_thread().name, msg.method))
                remote.send_pyobj(msg)
                resp = remote.recv_pyobj()

                if cbk:
                    logger.debug("Response received from server. Running callback.")
                    cbk(resp)
github cloudify-cosmo / cloudify-plugins-common / cloudify / celery / logging_server.py View on Github external
def __init__(self, logdir, socket_url, cache_size):
        self.closed = False
        self.zmq_context = zmq.Context(io_threads=1)
        self.socket = self.zmq_context.socket(zmq.PULL)
        self.socket.bind(socket_url)
        self.poller = zmq.Poller()
        self.poller.register(self.socket, zmq.POLLIN)
        self.logdir = logdir

        # on the management server, log files are handled by logrotate
        # with copytruncate so we use the simple FileHandler.
        # on agent hosts, we want to rotate the logs using python's
        # RotatingFileHandler.
        if os.environ.get('MGMTWORKER_HOME'):
            self.handler_func = logging.FileHandler
        else:
            self.handler_func = functools.partial(
                logging.handlers.RotatingFileHandler,
                maxBytes=LOGFILE_SIZE_BYTES,
                backupCount=LOGFILE_BACKUP_COUNT)

        # wrap the _get_handler method with an lru cache decorator
        # so we only keep the last 'cache_size' used handlers in in turn
github srinikom / leveldb-server / leveldb-server.py View on Github external
poll = zmq.Poller()
    poll.register(frontend, zmq.POLLIN)
    poll.register(backend,  zmq.POLLIN)

    db = leveldb.LevelDB(options.dbfile)

    workers = []
    for i in xrange(3):
        worker = workerThread(context, db)
        worker.start()
        workers.append(worker)
            
    while True:
        sockets = dict(poll.poll())
        if frontend in sockets:
            if sockets[frontend] == zmq.POLLIN:
                msg = frontend.recv_multipart()
                backend.send_multipart(msg)
                
        if backend in sockets:
            if sockets[backend] == zmq.POLLIN:
                msg = backend.recv_multipart()
                frontend.send_multipart(msg)
                
    #never here
    frontend.close()
    backend.close()
    context.term()
github ESGF / esgf-compute-wps / compute / compute_provisioner / compute_provisioner / provisioner.py View on Github external
BRCVTIMEO = os.environ.get('BACKEND_RECV_TIMEOUT', 15)

        self.backend.setsockopt(zmq.SNDTIMEO, BSNDTIMEO * 1000)
        self.backend.setsockopt(zmq.RCVTIMEO, BRCVTIMEO * 1000)

        backend_addr = 'tcp://*:{!s}'.format(backend_port)

        self.backend.bind(backend_addr)

        logger.info('Binding backend to %r', backend_addr)

        self.poll_both = zmq.Poller()

        self.poll_both.register(self.frontend, zmq.POLLIN)

        self.poll_both.register(self.backend, zmq.POLLIN)

        self.redis = redis.Redis(host=redis_host, db=1)

        logger.info('Connected to redis db %r', self.redis)
github westpa / westpa / lib / wwmgr / work_managers / zeromq / work_manager.py View on Github external
rr_socket = self.context.socket(zmq.REP)
        ann_socket = self.context.socket(zmq.PUB)
        
        for endpoint in (self.local_rr_endpoint, self.downstream_rr_endpoint):
            if endpoint: rr_socket.bind(endpoint)
            
        for endpoint in (self.local_ann_endpoint, self.downstream_ann_endpoint):
            if endpoint: ann_socket.bind(endpoint)

        inproc_socket = self.context.socket(zmq.SUB)
        inproc_socket.setsockopt(zmq.SUBSCRIBE,b'')
        inproc_socket.bind(self.inproc_endpoint)
        
        poller = zmq.Poller()
        poller.register(inproc_socket, zmq.POLLIN)
        poller.register(rr_socket, zmq.POLLIN)
        
        timers = PassiveMultiTimer()
        timers.add_timer('tasks_avail', self.master_beacon_period)
        timers.add_timer('master_beacon', self.master_beacon_period)
        timers.add_timer('worker_timeout_check', self.worker_beacon_period*self.timeout_factor)
        timers.add_timer('startup_timeout', self.startup_timeout)
        timers.reset()
        
        self.log.debug('master beacon period: {!r}'.format(self.master_beacon_period))
        self.log.debug('startup timeout: {!r}'.format(self.startup_timeout))
                
        peer_found = False
        
        try:
            # Send a master alive message immediately; it will get discarded if necessary
github tranquilit / WAPT-archives / lib / site-packages / zmq / devices / monitoredqueue.py View on Github external
def monitored_queue(in_socket, out_socket, mon_socket,
                    in_prefix=b'in', out_prefix=b'out'):
    
    swap_ids = in_socket.type == zmq.ROUTER and out_socket.type == zmq.ROUTER
    
    poller = zmq.Poller()
    poller.register(in_socket, zmq.POLLIN)
    poller.register(out_socket, zmq.POLLIN)
    while True:
        events = dict(poller.poll())
        if in_socket in events:
            _relay(in_socket, out_socket, mon_socket, in_prefix, swap_ids)
        if out_socket in events:
            _relay(out_socket, in_socket, mon_socket, out_prefix, swap_ids)
github dieseldev / diesel / diesel / transports / zeromq.py View on Github external
def handle_events(self):
        if self.zsock.closed:
            self.shutdown()
            return
        events = self.zsock.getsockopt(zmq.EVENTS)
        while events:
            if events & zmq.POLLIN:
                self._handle_read()
            if events & zmq.POLLOUT:
                self._handle_write()
            if self.zsock.closed:
                self.shutdown()
                return
            events = self.zsock.getsockopt(zmq.EVENTS)
    handle_read = handle_events