How to use the eventlet.GreenPool function in eventlet

To help you get started, we’ve selected a few eventlet examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github openstack / swift / test / unit / common / test_memcached.py View on Github external
val = connections.get()
                if val is not None:
                    errors.append(val)

            mock_sock.connect = wait_connect

            memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
                                                     connect_timeout=10,
                                                     logger=self.logger)
            # sanity
            self.assertEqual(1, len(memcache_client._client_cache))
            for server, pool in memcache_client._client_cache.items():
                self.assertEqual(2, pool.max_size)

            # make 10 requests "at the same time"
            p = GreenPool()
            for i in range(10):
                p.spawn(memcache_client.set, 'key', 'value')
            for i in range(3):
                sleep(0.1)
                self.assertEqual(2, len(connected))

            # give out a connection
            connections.put(None)

            # at this point, only one connection should have actually been
            # created, the other is in the creation step, and the rest of the
            # clients are not attempting to connect. we let this play out a
            # bit to verify.
            for i in range(3):
                sleep(0.1)
                self.assertEqual(2, len(connected))
github pandemicsyn / stalker / stalker / stalker_runner.py View on Github external
def __init__(self, conf):
        self.conf = conf
        self.name = 'stalker-runner-%d' % os.getpid()
        log_type = conf.get('log_type', 'syslog')
        log_file = conf.get('log_file', '/var/log/stalker/stalker-runner.log')
        if log_type == 'syslog':
            self.logger = get_syslogger(conf, self.name)
        else:
            self.logger = get_logger(self.name, log_path=log_file)
        self.pool = eventlet.GreenPool()
        self.check_key = conf.get('check_key', 'canhazstatus')
        redis_host = conf.get('redis_host', '127.0.0.1')
        redis_port = int(conf.get('redis_port', '6379'))
        redis_pass = conf.get('redis_password', '')
        redis_usock = conf.get('redis_socket', None)
        self.wq = conf.get('worker_id', 'worker1')
        self.rc = redis.Redis(redis_host, redis_port, password=redis_pass,
                              unix_socket_path=redis_usock)
        mongo_host = conf.get('mongo_host', '127.0.0.1')
        mongo_port = int(conf.get('mongo_port', '27017'))
        db_name = conf.get('db_name', 'stalkerweb')
        self.c = MongoClient(host=mongo_host, port=mongo_port)
        self.debug = False
        self.db = self.c[db_name]
        self.checks = self.db['checks']
        self.state_log = self.db['state_log']
github openstack / neutron / quantum / wsgi.py View on Github external
def __init__(self, name, threads=1000):
        self.pool = eventlet.GreenPool(threads)
        self.name = name
github eventlet / eventlet / benchmarks / spawn_plot.py View on Github external
def setup():
    global pool
    pool = eventlet.GreenPool(iters)
github eventlet / eventlet / benchmarks / localhost_socket.py View on Github external
def launch_green_threads():
    pool = eventlet.GreenPool(CONCURRENCY * 2 + 1)
    server_sock = eventlet.green.socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    server_sock.bind(('localhost', 0))
    server_sock.listen(50)
    addr = ('localhost', server_sock.getsockname()[1])
    pool.spawn_n(green_accepter, server_sock, pool)
    for i in six.moves.range(CONCURRENCY):
        pool.spawn_n(writer, addr, eventlet.green.socket.socket)
    pool.waitall()
github StackStorm / st2 / st2stream / st2stream / cmd / api.py View on Github external
def _run_server():
    host = cfg.CONF.stream.host
    port = cfg.CONF.stream.port

    LOG.info('(PID=%s) ST2 Stream API is serving on http://%s:%s.', os.getpid(), host, port)

    max_pool_size = eventlet.wsgi.DEFAULT_MAX_SIMULTANEOUS_REQUESTS
    worker_pool = eventlet.GreenPool(max_pool_size)
    sock = eventlet.listen((host, port))

    def queue_shutdown(signal_number, stack_frame):
        eventlet.spawn_n(
            shutdown_server_kill_pending_requests,
            sock=sock,
            worker_pool=worker_pool,
            wait_time=WSGI_SERVER_REQUEST_SHUTDOWN_TIME,
        )

    # We register a custom SIGINT handler which allows us to kill long running active requests.
    # Note: Eventually we will support draining (waiting for short-running requests), but we
    # will still want to kill long running stream requests.
    register_stream_signal_handlers(handler_func=queue_shutdown)

    wsgi.server(sock, app.setup_app(), custom_pool=worker_pool)
github apg / musniper / sniper.py View on Github external
from eventlet.green import urllib2, httplib, socket

from itty import get, run_itty, handle_request

import re
import gdbm
import json
import os
import urllib
import urlparse

STREAM_URL = 'http://stream.meetup.com/2/open_events'

DBM_FILE = os.path.join(os.path.dirname(__file__), 'sniperdata.gdbm')

rsvp_pool = GreenPool()

def rsvp_url(**data):
    return 'http://api.meetup.com/rsvp?' + \
        urllib.urlencode(data)


class MupMap(object):
    """Class to store tokens
    """

    def __init__(self, db):
        self._muptokens = defaultdict(set)
        self._db = db
        self._init_from_db()

    def add_token(self, mup, token):
github openstack / neutron / neutron / plugins / ml2 / drivers / mech_bigswitch / driver.py View on Github external
def initialize(self):
        LOG.debug('Initializing driver')

        # register plugin config opts
        pl_config.register_config()
        self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size)

        # init network ctrl connections
        self.servers = servermanager.ServerPool()
        self.servers.get_topo_function = self._get_all_data
        self.servers.get_topo_function_args = {'get_ports': True,
                                               'get_floating_ips': False,
                                               'get_routers': False}
        self.segmentation_types = ', '.join(cfg.CONF.ml2.type_drivers)
        # Track hosts running IVS to avoid excessive calls to the backend
        self.ivs_host_cache = {}

        LOG.debug("Initialization done")
github adblockplus / sitescripts / sitescripts / subscriptions / bin / updateStatusPage.py View on Github external
def checkSubscriptions():
    subscriptions = subscriptionParser.readSubscriptions().values()
    subscriptions.sort(key=lambda s: s.name.lower())

    urls = {}
    sites = {}
    for subscription in subscriptions:
        for key in ('homepage', 'forum', 'blog', 'faq', 'contact', 'changelog', 'policy'):
            url = getattr(subscription, key)
            if url != None:
                urls[url] = True
        for title, url, complete in subscription.variants:
            urls[url] = True

    pool = eventlet.GreenPool()
    for url, result in pool.imap(checkURL, urls.iterkeys()):
        urls[url] = result
        if result is False:
            sites[urlparse(url).netloc] = True
    for site, result in pool.imap(checkSite, sites.iterkeys()):
        sites[site] = result

    result = []
    for subscription in subscriptions:
        s = {'name': subscription.name, 'links': []}
        result.append(s)
        for key in ('homepage', 'forum', 'blog', 'faq', 'contact', 'changelog', 'policy'):
            url = getattr(subscription, key)
            if url != None:
                site = urlparse(url).netloc
                s['links'].append({
github Juniper / contrail-neutron-plugin / neutron_plugin_contrail / plugins / opencontrail / vnc_client / vmi_res_handler.py View on Github external
def _get_vmis_nets_ips(self, context, project_ids=None,
                           device_ids=None, vmi_uuids=None, vn_ids=None):
        vn_list_handler = vn_handler.VNetworkGetHandler(self._vnc_lib)
        pool = eventlet.GreenPool()
        vn_objs_t = pool.spawn(vn_list_handler.get_vn_obj_list,
                               parent_id=project_ids, detail=True)

        vmi_objs_t = None
        vmi_obj_uuids_t = None
        back_ref_id = []
        if device_ids:
            back_ref_id = device_ids

        if vn_ids:
            back_ref_id.extend(vn_ids)

        if back_ref_id:
            vmi_objs_t = pool.spawn(self._resource_list,
                                    back_ref_id=back_ref_id, back_refs=True)