How to use the pycurl.FOLLOWLOCATION function in pycurl

To help you get started, we’ve selected a few pycurl examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github phillipberndt / scripts / paxel / paxel.py View on Github external
def gen_curl(state, request_range=None):
    """Return a cURL easy instance for the download in the given state dict.

    The download is automatically added to the manager.

    If given, request_range must be a string "-", where
    the upper end is inclusive and either end can be omitted."""
    download = pycurl.Curl()
    download.setopt(pycurl.URL, state["url"])
    if request_range:
        download.setopt(pycurl.RANGE, request_range)
    download.setopt(pycurl.FOLLOWLOCATION, True)
    download.setopt(pycurl.HEADERFUNCTION, functools.partial(header_callback, state, request_range, download))
    download.setopt(pycurl.WRITEFUNCTION, functools.partial(write_callback, state, request_range, download))
    download.setopt(pycurl.XFERINFOFUNCTION, functools.partial(progress_callback, state, request_range, download))
    download.setopt(pycurl.NOPROGRESS, False)
    download.setopt(pycurl.LOW_SPEED_LIMIT, 1024)
    download.setopt(pycurl.LOW_SPEED_TIME, 30)
    download.setopt(pycurl.MAXREDIRS, 50)

    file_pos = 0
    range_upper = 1<<50  # "A lot"; no download should ever be a PiB
    if request_range:
        decoded_range_lower, decoded_range_upper = request_range.split("-")
        if decoded_range_lower:
            file_pos = int(decoded_range_lower)
        if decoded_range_upper:
            range_upper = int(decoded_range_upper)
github pyload / pyload-webui / pyload / plugins / hoster / ShragleCom.py View on Github external
self.req.http.c.setopt(FOLLOWLOCATION, 0)
        self.html = self.load(action, post=inputs)

        found = re.search(r"Location\s*:\s*(\S*)", self.req.http.header, re.I)
        if found:
            self.correctCaptcha()
            download_url = found.group(1)
        else:
            if "Sicherheitscode falsch" in self.html:
                self.invalidCaptcha()
                self.retry(max_tries=5, reason="Invalid captcha")
            else:
                self.fail("Invalid session")

        #download
        self.req.http.c.setopt(FOLLOWLOCATION, 1)
        self.download(download_url)

        check = self.checkDownload({
            "ip_blocked": re.compile(r'
github openstack / nova / vendor / tornado / tornado / httpclient.py View on Github external
curl.setopt(pycurl.HTTPHEADER,
                ["%s: %s" % i for i in request.headers.iteritems()])
    try:
        if request.header_callback:
            curl.setopt(pycurl.HEADERFUNCTION, request.header_callback)
        else:
            curl.setopt(pycurl.HEADERFUNCTION,
                        functools.partial(_curl_header_callback, headers))
    except:
        # Old version of curl; response will not include headers
        pass
    if request.streaming_callback:
        curl.setopt(pycurl.WRITEFUNCTION, request.streaming_callback)
    else:
        curl.setopt(pycurl.WRITEFUNCTION, buffer.write)
    curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects)
    curl.setopt(pycurl.MAXREDIRS, request.max_redirects)
    curl.setopt(pycurl.CONNECTTIMEOUT, int(request.connect_timeout))
    curl.setopt(pycurl.TIMEOUT, int(request.request_timeout))
    if request.user_agent:
        curl.setopt(pycurl.USERAGENT, request.user_agent)
    else:
        curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)")
    if request.network_interface:
        curl.setopt(pycurl.INTERFACE, request.network_interface)
    if request.use_gzip:
        curl.setopt(pycurl.ENCODING, "gzip,deflate")
    else:
        curl.setopt(pycurl.ENCODING, "none")

    # Set the request method through curl's retarded interface which makes
    # up names for almost every single method
github tornadoweb / tornado / tornado / curl_httpclient.py View on Github external
pycurl.HEADERFUNCTION,
            functools.partial(
                self._curl_header_callback, headers, request.header_callback
            ),
        )
        if request.streaming_callback:

            def write_function(b: Union[bytes, bytearray]) -> int:
                assert request.streaming_callback is not None
                self.io_loop.add_callback(request.streaming_callback, b)
                return len(b)

        else:
            write_function = buffer.write
        curl.setopt(pycurl.WRITEFUNCTION, write_function)
        curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects)
        curl.setopt(pycurl.MAXREDIRS, request.max_redirects)
        assert request.connect_timeout is not None
        curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout))
        assert request.request_timeout is not None
        curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout))
        if request.user_agent:
            curl.setopt(pycurl.USERAGENT, native_str(request.user_agent))
        else:
            curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)")
        if request.network_interface:
            curl.setopt(pycurl.INTERFACE, request.network_interface)
        if request.decompress_response:
            curl.setopt(pycurl.ENCODING, "gzip,deflate")
        else:
            curl.setopt(pycurl.ENCODING, None)
        if request.proxy_host and request.proxy_port:
github orleven / Tentacle / special / waf_bypass_ssl.py View on Github external
def _curl(url,ciphers,poc):
    try:
        import pycurl, tempfile
    #     out_temp = tempfile.TemporaryFile(mode='w+')
        # fileno = out_temp.fileno()
        c = pycurl.Curl()
        c.setopt(c.URL, url + poc)
        c.setopt(pycurl.FOLLOWLOCATION, 1)
        c.setopt(pycurl.SSL_CIPHER_LIST,ciphers)
        c.setopt(pycurl.SSL_VERIFYPEER, 0)
        c.setopt(pycurl.CONNECTTIMEOUT, 5)
        c.setopt(pycurl.TIMEOUT, 5)
        c.setopt(pycurl.SSL_VERIFYHOST, 0)
        c.setopt(pycurl.PROXY, "127.0.0.1")
        c.setopt(pycurl.PROXYPORT, 7999)
        c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5)
        with  tempfile.NamedTemporaryFile() as fp:
            c.setopt(pycurl.WRITEHEADER, fp)
            c.setopt(pycurl.WRITEDATA, fp)
            c.perform()
            # out_temp.seek(0)
            # rt = out_temp.read()
        return c.getinfo(pycurl.HTTP_CODE)
    except Exception as e:
github sfyn / django-etherpad-lite / etherpadlite / simplecurl.py View on Github external
def json(request):
  """Perform a curl request and returns a json result, formatted as an array

  Keywork arguments:
  request -- a string representing a url and querystring

  """
  curlReq = pycurl.Curl()
  curlReq.setopt(pycurl.URL, request.__str__())
  curlReq.setopt(pycurl.FOLLOWLOCATION, 1)
  curlReq.setopt(pycurl.MAXREDIRS, 5)
  result = StringIO.StringIO()
  curlReq.setopt(pycurl.WRITEFUNCTION, result.write)
  curlReq.perform()
  result = StringIO.StringIO(result.getvalue())
  result = simplejson.load(result)
  return result
github nicolassmith / xapers / lib / xapers / sources / dcc.py View on Github external
def dccRetrieveXML(docid):
    url = 'https://dcc.ligo.org/Shibboleth.sso/Login?target=https%3A%2F%2Fdcc.ligo.org%2Fcgi-bin%2Fprivate%2FDocDB%2FShowDocument?docid=' + docid + '%26outformat=xml&entityID=https%3A%2F%2Flogin.ligo.org%2Fidp%2Fshibboleth'

    curl = pycurl.Curl()
    cookies = tempfile.NamedTemporaryFile()

    curl.setopt(pycurl.URL, url)
    curl.setopt(pycurl.UNRESTRICTED_AUTH, 1)
    curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_GSSNEGOTIATE)
    curl.setopt(pycurl.COOKIEJAR, cookies.name)
    curl.setopt(pycurl.USERPWD, ':')
    curl.setopt(pycurl.FOLLOWLOCATION, 1)

    doc = cStringIO.StringIO()
    curl.setopt(pycurl.WRITEFUNCTION, doc.write)
    try:
        curl.perform()
    except:
        import traceback
        traceback.print_exc(file=sys.stderr)
        sys.stderr.flush()

    xml = doc.getvalue()

    curl.close()
    cookies.close()
    doc.close()
github pulp / pulp / platform / src / pulp / common / download / downloaders / curl.py View on Github external
def _add_connection_configuration(self, easy_handle):
        # TODO (jconnor 2013-01-22) make these configurable
        easy_handle.setopt(pycurl.FOLLOWLOCATION, DEFAULT_FOLLOW_LOCATION)
        easy_handle.setopt(pycurl.MAXREDIRS, DEFAULT_MAX_REDIRECTS)
        easy_handle.setopt(pycurl.CONNECTTIMEOUT, DEFAULT_CONNECT_TIMEOUT)
        easy_handle.setopt(pycurl.TIMEOUT, DEFAULT_REQUEST_TIMEOUT)
        easy_handle.setopt(pycurl.NOPROGRESS, DEFAULT_NO_PROGRESS)
github pycurl / pycurl / examples / retriever-multi.py View on Github external
# Check args
assert queue, "no URLs given"
num_urls = len(queue)
num_conn = min(num_conn, num_urls)
assert 1 <= num_conn <= 10000, "invalid number of concurrent connections"
print("PycURL %s (compiled against 0x%x)" % (pycurl.version, pycurl.COMPILE_LIBCURL_VERSION_NUM))
print("----- Getting", num_urls, "URLs using", num_conn, "connections -----")


# Pre-allocate a list of curl objects
m = pycurl.CurlMulti()
m.handles = []
for i in range(num_conn):
    c = pycurl.Curl()
    c.fp = None
    c.setopt(pycurl.FOLLOWLOCATION, 1)
    c.setopt(pycurl.MAXREDIRS, 5)
    c.setopt(pycurl.CONNECTTIMEOUT, 30)
    c.setopt(pycurl.TIMEOUT, 300)
    c.setopt(pycurl.NOSIGNAL, 1)
    m.handles.append(c)


# Main loop
freelist = m.handles[:]
num_processed = 0
while num_processed < num_urls:
    # If there is an url to process and a free curl object, add to multi stack
    while queue and freelist:
        url, filename = queue.pop(0)
        c = freelist.pop()
        c.fp = open(filename, "wb")
github pymedusa / Medusa / lib / tornado / curl_httpclient.py View on Github external
def write_function(chunk):
                self.io_loop.add_callback(request.streaming_callback, chunk)
        else:
            write_function = buffer.write
        if bytes is str:  # py2
            curl.setopt(pycurl.WRITEFUNCTION, write_function)
        else:  # py3
            # Upstream pycurl doesn't support py3, but ubuntu 12.10 includes
            # a fork/port.  That version has a bug in which it passes unicode
            # strings instead of bytes to the WRITEFUNCTION.  This means that
            # if you use a WRITEFUNCTION (which tornado always does), you cannot
            # download arbitrary binary data.  This needs to be fixed in the
            # ported pycurl package, but in the meantime this lambda will
            # make it work for downloading (utf8) text.
            curl.setopt(pycurl.WRITEFUNCTION, lambda s: write_function(utf8(s)))
        curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects)
        curl.setopt(pycurl.MAXREDIRS, request.max_redirects)
        curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout))
        curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout))
        if request.user_agent:
            curl.setopt(pycurl.USERAGENT, native_str(request.user_agent))
        else:
            curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)")
        if request.network_interface:
            curl.setopt(pycurl.INTERFACE, request.network_interface)
        if request.decompress_response:
            curl.setopt(pycurl.ENCODING, "gzip,deflate")
        else:
            curl.setopt(pycurl.ENCODING, "none")
        if request.proxy_host and request.proxy_port:
            curl.setopt(pycurl.PROXY, request.proxy_host)
            curl.setopt(pycurl.PROXYPORT, request.proxy_port)