How to use the eventlet.spawn_n function in eventlet

To help you get started, we’ve selected a few eventlet examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github openstack / magnum / magnum / openstack / common / service.py View on Github external
def _child_process(self, service):
        self._child_process_handle_signal()

        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        eventlet.hubs.use_hub()

        # Close write to ensure only parent has it open
        os.close(self.writepipe)
        # Create greenthread to watch for parent to close pipe
        eventlet.spawn_n(self._pipe_watcher)

        # Reseed random number generator
        random.seed()

        launcher = Launcher()
        launcher.launch_service(service)
        return launcher
github openstack / manila / manila / openstack / common / service.py View on Github external
def _child_process(self, service):
        self._child_process_handle_signal()

        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        eventlet.hubs.use_hub()

        # Close write to ensure only parent has it open
        os.close(self.writepipe)
        # Create greenthread to watch for parent to close pipe
        eventlet.spawn_n(self._pipe_watcher)

        # Reseed random number generator
        random.seed()

        launcher = Launcher()
        launcher.launch_service(service)
        return launcher
github openstack / blazar / climate / openstack / common / rpc / impl_zmq.py View on Github external
LOG.debug(_("Sending message(s) to: %s"), queues)

    # Don't stack if we have no matchmaker results
    if not queues:
        LOG.warn(_("No matchmaker results. Not casting."))
        # While not strictly a timeout, callers know how to handle
        # this exception and a timeout isn't too big a lie.
        raise rpc_common.Timeout(_("No match from matchmaker."))

    # This supports brokerless fanout (addresses > 1)
    for queue in queues:
        (_topic, ip_addr) = queue
        _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)

        if method.__name__ == '_cast':
            eventlet.spawn_n(method, _addr, context,
                             _topic, msg, timeout, envelope,
                             _msg_id)
            return
        return method(_addr, context, _topic, msg, timeout,
                      envelope)
github openstack / heat / heat / openstack / common / rpc / impl_zmq.py View on Github external
LOG.debug("Sending message(s) to: %s", queues)

    # Don't stack if we have no matchmaker results
    if not queues:
        LOG.warn(_("No matchmaker results. Not casting."))
        # While not strictly a timeout, callers know how to handle
        # this exception and a timeout isn't too big a lie.
        raise rpc_common.Timeout(_("No match from matchmaker."))

    # This supports brokerless fanout (addresses > 1)
    for queue in queues:
        (_topic, ip_addr) = queue
        _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)

        if method.__name__ == '_cast':
            eventlet.spawn_n(method, _addr, context,
                             _topic, msg, timeout, envelope,
                             _msg_id)
            return
        return method(_addr, context, _topic, msg, timeout,
                      envelope)
github openstack / barbican / barbican / openstack / common / service.py View on Github external
def _child_process(self, service):
        self._child_process_handle_signal()

        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        eventlet.hubs.use_hub()

        # Close write to ensure only parent has it open
        os.close(self.writepipe)
        # Create greenthread to watch for parent to close pipe
        eventlet.spawn_n(self._pipe_watcher)

        # Reseed random number generator
        random.seed()

        launcher = Launcher()
        launcher.launch_service(service)
        return launcher
github openstack / tricircle / neutronproxy / l3proxy / neutron / agent / l3_proxy.py View on Github external
def _send_gratuitous_arp_packet(self, ns_name, interface_name, ip_address,
                                    distributed=False):
        if self.conf.send_arp_for_ha > 0:
            eventlet.spawn_n(self._arping, ns_name, interface_name, ip_address,
                             distributed)
github cloudera / hue / desktop / core / ext-py / eventlet-0.24.1 / examples / forwarder.py View on Github external
def forward(source, dest, cb=lambda: None):
    """Forwards bytes unidirectionally from source to dest"""
    while True:
        d = source.recv(32384)
        if d == '':
            cb()
            break
        dest.sendall(d)

listener = eventlet.listen(('localhost', 7000))
while True:
    client, addr = listener.accept()
    server = eventlet.connect(('localhost', 22))
    # two unidirectional forwarders make a bidirectional one
    eventlet.spawn_n(forward, client, server, closed_callback)
    eventlet.spawn_n(forward, server, client)
github openstack / storlets / storlets / gateway / gateways / docker / runtime.py View on Github external
def communicate(self):
        try:
            self._invoke()

            if not self.srequest.has_fd:
                self._wait_for_write_with_timeout(self._input_data_write_fd)

                # We do the writing in a different thread.
                # Otherwise, we can run into the following deadlock
                # 1. middleware writes to Storlet
                # 2. Storlet reads and starts to write metadata and then data
                # 3. middleware continues writing
                # 4. Storlet continues writing and gets stuck as middleware
                #    is busy writing, but still not consuming the reader end
                #    of the Storlet writer.
                eventlet.spawn_n(self._write_input_data,
                                 self._input_data_write_fd,
                                 self.srequest.data_iter)

            for source in self.extra_data_sources:
                # NOTE(kota_): not sure right now if using eventlet.spawn_n is
                #              right way. GreenPool is better? I don't get
                #              whole for the dead lock described in above.
                self._wait_for_write_with_timeout(source['write_fd'])
                eventlet.spawn_n(self._write_input_data,
                                 source['write_fd'],
                                 source['data_iter'])

            out_md = self._read_metadata()
            self._wait_for_read_with_timeout(self.data_read_fd)

            return StorletResponse(out_md, data_fd=self.data_read_fd,
github openstack / trove / trove / openstack / common / eventlet_backdoor.py View on Github external
# read the output too.
    def displayhook(val):
        if val is not None:
            pprint.pprint(val)
    sys.displayhook = displayhook

    sock = _listen('localhost', start_port, end_port, eventlet.listen)

    # In the case of backdoor port being zero, a port number is assigned by
    # listen().  In any case, pull the port number out here.
    port = sock.getsockname()[1]
    LOG.info(
        _LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
        {'port': port, 'pid': os.getpid()}
    )
    eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
                     locals=backdoor_locals)
    return port