How to use the future.moves.urllib.parse.urlparse function in future

To help you get started, we’ve selected a few future examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github CenterForOpenScience / osf.io / api_tests / comments / views / test_comment_detail.py View on Github external
def test_file_comment_has_target_link_with_correct_type(
            self, app, public_url, public_file):
        res = app.get(public_url)
        url = res.json['data']['relationships']['target']['links']['related']['href']
        expected_url = '/{}files/{}/'.format(API_BASE, public_file._id)
        target_type = res.json['data']['relationships']['target']['links']['related']['meta']['type']
        expected_type = 'files'
        assert res.status_code == 200
        assert urlparse(url).path == expected_url
        assert target_type == expected_type
github CenterForOpenScience / osf.io / osf / migrations / 0107_add_dependent_styles.py View on Github external
with open(style_file, 'r') as f:
            try:
                root = etree.parse(f).getroot()
            except etree.XMLSyntaxError:
                continue

            namespace = root.nsmap.get(None)
            selector = '{{{ns}}}info/{{{ns}}}'.format(ns=namespace)
            title = root.find(selector + 'title').text
            has_bibliography = root.find('{{{ns}}}{tag}'.format(ns=namespace, tag='bibliography')) is not None or 'Bluebook' in title

            style_id = os.path.splitext(os.path.basename(style_file))[0]
            links = root.findall(selector + 'link')
            for link in links:
                if link.get('rel') == 'independent-parent':
                    parent_style_id = urlparse(link.get('href')).path.split('/')[-1]
                    parent_style = CitationStyle.objects.get(_id=parent_style_id)

                    if parent_style is not None:
                        parent_has_bibliography = parent_style.has_bibliography
                        fields = {
                            '_id': style_id,
                            'title': title,
                            'has_bibliography': parent_has_bibliography,
                            'parent_style': parent_style_id
                        }

                        # Optional
                        try:
                            fields['short_title'] = root.find(selector + 'title-short').text
                        except AttributeError:
                            pass
github Flexget / Flexget / flexget / components / managed_lists / lists / couchpotato_list.py View on Github external
def movie_list_request(base_url, port, api_key):
        parsedurl = urlparse(base_url)
        log.debug('Received movie list request')
        return '%s://%s:%s%s/api/%s/movie.list?status=active' % (
            parsedurl.scheme,
            parsedurl.netloc,
            port,
            parsedurl.path,
            api_key,
        )
github eduvpn / python-eduvpn-client / eduvpn / oauth2.py View on Github external
logo, name = get_brand(lets_connect)
            logo = stringify_image(logo)
            content = landing_page.format(logo=logo, brand=name).encode('utf-8')
            self.wfile.write(content)
            self.server.path = self.path

    httpd = HTTPServer(('', port), RequestHandler)
    if timeout:
        httpd.socket.settimeout(timeout)
    httpd.handle_request()
    httpd.server_close()

    if not hasattr(httpd, "path"):
        raise Exception("Invalid response received")

    parsed = urlparse(httpd.path)  # type: ignore
    logger.info(u"received a request {}".format(httpd.path))  # type: ignore
    return parse_qs(parsed.query)
github Flexget / Flexget / flexget / components / sites / sites / piratebay.py View on Github external
def parse_download_page(self, url, requests):
        page = requests.get(url).content
        try:
            soup = get_soup(page)
            tag_div = soup.find('div', attrs={'class': 'download'})
            if not tag_div:
                raise UrlRewritingError('Unable to locate download link from url %s' % url)
            tag_a = tag_div.find('a')
            torrent_url = tag_a.get('href')
            # URL is sometimes missing the schema
            if torrent_url.startswith('//'):
                torrent_url = urlparse(url).scheme + ':' + torrent_url
            return torrent_url
        except Exception as e:
            raise UrlRewritingError(e)
github google / grr / grr / core / grr_response_core / lib / rdfvalues / standard.py View on Github external
def FromHumanReadable(cls, value):
    precondition.AssertType(value, Text)
    return cls(urlparse.urlparse(value))
github google / grr / grr / server / grr_response_server / gui / api_plugins / client.py View on Github external
def _GetAddrFromFleetspeak(client_id):
  res = fleetspeak_connector.CONN.outgoing.ListClients(
      admin_pb2.ListClientsRequest(
          client_ids=[fleetspeak_utils.GRRIDToFleetspeakID(client_id)]))
  if not res.clients or not res.clients[0].last_contact_address:
    return "", None
  # last_contact_address typically includes a port
  parsed = urlparse.urlparse("//{}".format(res.clients[0].last_contact_address))
  ip_str = parsed.hostname
  return ip_str, ipaddress.ip_address(ip_str)
github zhanghe06 / news_spider / tools / url.py View on Github external
def get_url_query_param(url, param):
    """
    获取url参数值
    :param url:
    :param param:
    :return:
    """
    result = urlparse(url)
    return dict(parse_qsl(result.query)).get(param)
github google / grr / api_client / python / grr_api_client / connectors / http_connector.py View on Github external
self.api_methods = {}
    for method in proto.items:
      if not method.http_route.startswith("/api/v2/"):
        method.http_route = method.http_route.replace("/api/", "/api/v2/", 1)

      self.api_methods[method.name] = method
      routing_rules.append(
          routing.Rule(
              method.http_route,
              methods=method.http_methods,
              endpoint=method.name))

    self.handlers_map = routing.Map(routing_rules)

    parsed_endpoint_url = urlparse.urlparse(self.api_endpoint)
    self.urls = self.handlers_map.bind(
        parsed_endpoint_url.netloc, url_scheme=parsed_endpoint_url.scheme)
github zatosource / zato / code / zato-server / src / zato / server / base / worker / __init__.py View on Github external
def _update_aws_config(self, msg):
        """ Parses the address to AWS we store into discrete components S3Connection objects expect.
        Also turns metadata string into a dictionary
        """
        url_info = urlparse(msg.address)

        msg.is_secure = True if url_info.scheme == 'https' else False
        msg.port = url_info.port if url_info.port else (443 if msg.is_secure else 80)
        msg.host = url_info.netloc

        msg.metadata = parse_extra_into_dict(msg.metadata_)