How to use the pycurl.CurlMulti function in pycurl

To help you get started, we’ve selected a few pycurl examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Lispython / pycurl / old_tests / test_internals.py View on Github external
# basic check of reference counting (use a memory checker like valgrind)
if 1:
    c = Curl()
    m = CurlMulti()
    m.add_handle(c)
    del m
    m = CurlMulti()
    c.close()
    del m, c

# basic check of cyclic garbage collection
if 1 and gc:
    gc.collect()
    c = Curl()
    c.m = CurlMulti()
    c.m.add_handle(c)
    # create some nasty cyclic references
    c.c = c
    c.c.c1 = c
    c.c.c2 = c
    c.c.c3 = c.c
    c.c.c4 = c.m
    c.m.c = c
    c.m.m = c.m
    c.m.c = c
    # delete
    gc.collect()
    flags = gc.DEBUG_COLLECTABLE | gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_OBJECTS
    if opts.verbose >= 1:
        flags = flags | gc.DEBUG_STATS
    gc.set_debug(flags)
github CouchPotato / CouchPotatoServer / libs / tornado / curl_httpclient.py View on Github external
def initialize(self, io_loop, max_clients=10, defaults=None):
        super(CurlAsyncHTTPClient, self).initialize(io_loop, defaults=defaults)
        self._multi = pycurl.CurlMulti()
        self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
        self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
        self._curls = [_curl_create() for i in range(max_clients)]
        self._free_list = self._curls[:]
        self._requests = collections.deque()
        self._fds = {}
        self._timeout = None

        # libcurl has bugs that sometimes cause it to not report all
        # relevant file descriptors and timeouts to TIMERFUNCTION/
        # SOCKETFUNCTION.  Mitigate the effects of such bugs by
        # forcing a periodic scan of all active requests.
        self._force_timeout_callback = ioloop.PeriodicCallback(
            self._handle_force_timeout, 1000, io_loop=io_loop)
        self._force_timeout_callback.start()
github janeczku / calibre-web / vendor / tornado / curl_httpclient.py View on Github external
def initialize(self, io_loop, max_clients=10, defaults=None):
        super(CurlAsyncHTTPClient, self).initialize(io_loop, defaults=defaults)
        self._multi = pycurl.CurlMulti()
        self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
        self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
        self._curls = [self._curl_create() for i in range(max_clients)]
        self._free_list = self._curls[:]
        self._requests = collections.deque()
        self._fds = {}
        self._timeout = None

        # libcurl has bugs that sometimes cause it to not report all
        # relevant file descriptors and timeouts to TIMERFUNCTION/
        # SOCKETFUNCTION.  Mitigate the effects of such bugs by
        # forcing a periodic scan of all active requests.
        self._force_timeout_callback = ioloop.PeriodicCallback(
            self._handle_force_timeout, 1000, io_loop=io_loop)
        self._force_timeout_callback.start()
github emlid / ntripbrowser / ntripbrowser / ntripbrowser.py View on Github external
def setup(self):
        self.urls_processed = []
        self.results = None
        self._multicurl = pycurl.CurlMulti()
        self._buffers = {}
        self._curls_failed = []
        self._initialize()
        logger.info('DataFetcher: curls setup in process')
        for curl in self.curls:
            self._multicurl.add_handle(curl)
github Lispython / human_curl / human_curl / async_client.py View on Github external
def __init__(self, size=None):
        self._requests = {}
        self._responses = {}

        self._urls_count = None
        self._urls_mapping = {}
        self._handlers = []
        self._multi_curl = pycurl.CurlMulti()
github pingf / falsy / falsy / netboy / curl_loop.py View on Github external
import asyncio as aio
import atexit
import pycurl

from falsy.loader.func import load
from falsy.netboy.curl_result import curl_result


class CurlLoop:
    class CurlException(Exception):
        def __init__(self, code, desc, data):
            self.code = code
            self.desc = desc
            self.data = data

    _multi = pycurl.CurlMulti()
    _multi.setopt(pycurl.M_PIPELINING, 1)
    atexit.register(_multi.close)
    _futures = {}

    @classmethod
    async def handler_ready(cls, c):
        cls._futures[c] = aio.Future()
        cls._multi.add_handle(c)
        try:
            try:
                curl_ret = await cls._futures[c]
            except CurlLoop.CurlException as e:
               return {
                    'url': c._raw_url,
                    'id': c._raw_id,
                    'payload': c._raw_payload,
github lorien / grab / grab / spider / network_service / multicurl.py View on Github external
def __init__(self, spider, socket_number):
        """
        Args:
            spider: argument is not used in multicurl transport
        """

        self.spider = spider
        self.socket_number = socket_number
        self.multi = pycurl.CurlMulti()
        self.multi.handles = []
        self.freelist = []
        self.registry = {}
        self.connection_count = {}
        self.sigint_handler = PycurlSigintHandler()
        self.network_op_lock = Lock()

        # Create curl instances
        for _ in six.moves.range(self.socket_number):
            curl = pycurl.Curl()
            self.connection_count[id(curl)] = 0
            self.freelist.append(curl)
            # self.multi.handles.append(curl)

        self.spawner = self.create_worker(self.spawner_callback)
        self.async_loop = self.create_worker(self.async_loop_callback)
github jtackaberry / stagehand / src / curl.py View on Github external
def _reinit_curl(self):
        # So, we use a separate CurlMulti() for every Curl object.  This is
        # obviously silly in terms of how curl is designed, except that
        # there is some braindamaged bug (probably in pycurl) that makes it
        # impossible to properly abort a transfer.  You could return -1
        # from WRITEFUNC or PROGRESSFUNC, or maybe curl.close(), but pycurl
        # just dumps some error to the console and then proceeds to block the
        # whole thread, reading all data from the server and pegging a core at
        # 100% until it's finished. *grmbl*
        #
        # multi.close() is the only problem-free approach I've found, but of
        # course it would stop any Curl objects associated with it, and so we're
        # forced to have 1:1.
        self._multi = pycurl.CurlMulti()
        self._curl = pycurl.Curl()
        self._curl._obj = self # XXX: weakref instead?
        self._multi.add_handle(self._curl)

        # Reinitialize curl options.
        for prop, value in self._curl_opts.items():
            prop.setter(self, value)
            pass
github pyload / pyload-webui / module / network / HTTPDownload.py View on Github external
self._name = ""# will be parsed from content disposition

        self.chunks = []

        self.log = getLogger("log")

        try:
            self.info = ChunkInfo.load(filename)
            self.info.resume = True #resume is only possible with valid info file
            self.size = self.info.size
            self.infoSaved = True
        except IOError:
            self.info = ChunkInfo(filename)

        self.chunkSupport = None
        self.m = pycurl.CurlMulti()

        #needed for speed calculation
        self.lastArrived = []
        self.speeds = []
        self.lastSpeeds = [0, 0]
github gfidente / openshift-hellotornado / misc / virtenv / lib / python2.6 / site-packages / tornado / curl_httpclient.py View on Github external
def initialize(self, io_loop, max_clients=10, defaults=None):
        super(CurlAsyncHTTPClient, self).initialize(io_loop, defaults=defaults)
        self._multi = pycurl.CurlMulti()
        self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
        self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
        self._curls = [_curl_create() for i in range(max_clients)]
        self._free_list = self._curls[:]
        self._requests = collections.deque()
        self._fds = {}
        self._timeout = None

        try:
            self._socket_action = self._multi.socket_action
        except AttributeError:
            # socket_action is found in pycurl since 7.18.2 (it's been
            # in libcurl longer than that but wasn't accessible to
            # python).
            gen_log.warning("socket_action method missing from pycurl; "
                            "falling back to socket_all. Upgrading "