Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
subname = test_url.replace(join_url(location, name), '', 1).strip('/')
channel_name = join_url(name, subname)
channel = _get_channel_for_name(channel_name)
return channel.location, channel_name, channel.scheme, channel.auth, channel.token
# Step 3. migrated_channel_aliases matches
for migrated_alias in context.migrated_channel_aliases:
if test_url.startswith(migrated_alias.location):
name = test_url.replace(migrated_alias.location, '', 1).strip('/')
ca = context.channel_alias
return ca.location, name, ca.scheme, ca.auth, ca.token
# Step 4. custom_channels matches
for name, channel in sorted(context.custom_channels.items(), reverse=True,
key=lambda x: len(x[0])):
that_test_url = join_url(channel.location, channel.name)
if tokenized_startswith(test_url.split('/'), that_test_url.split('/')):
subname = test_url.replace(that_test_url, '', 1).strip('/')
return (channel.location, join_url(channel.name, subname), scheme,
channel.auth, channel.token)
# Step 5. channel_alias match
ca = context.channel_alias
if ca.location and tokenized_startswith(test_url.split('/'), ca.location.split('/')):
name = test_url.replace(ca.location, '', 1).strip('/') or None
return ca.location, name, scheme, ca.auth, ca.token
# Step 6. not-otherwise-specified file://-type urls
if host is None:
# this should probably only happen with a file:// type url
assert port is None
location, name = test_url.rsplit('/', 1)
return channel.location, channel_name, channel.scheme, channel.auth, channel.token
# Step 3. migrated_channel_aliases matches
for migrated_alias in context.migrated_channel_aliases:
if test_url.startswith(migrated_alias.location):
name = test_url.replace(migrated_alias.location, '', 1).strip('/')
ca = context.channel_alias
return ca.location, name, ca.scheme, ca.auth, ca.token
# Step 4. custom_channels matches
for name, channel in sorted(context.custom_channels.items(), reverse=True,
key=lambda x: len(x[0])):
that_test_url = join_url(channel.location, channel.name)
if tokenized_startswith(test_url.split('/'), that_test_url.split('/')):
subname = test_url.replace(that_test_url, '', 1).strip('/')
return (channel.location, join_url(channel.name, subname), scheme,
channel.auth, channel.token)
# Step 5. channel_alias match
ca = context.channel_alias
if ca.location and tokenized_startswith(test_url.split('/'), ca.location.split('/')):
name = test_url.replace(ca.location, '', 1).strip('/') or None
return ca.location, name, scheme, ca.auth, ca.token
# Step 6. not-otherwise-specified file://-type urls
if host is None:
# this should probably only happen with a file:// type url
assert port is None
location, name = test_url.rsplit('/', 1)
if not location:
location = '/'
_scheme, _auth, _token = 'file', None, None
def test_bare_channel_file(self):
url = "file:///conda-01"
channel = Channel(url)
assert channel.scheme == "file"
assert channel.location == "/"
assert channel.platform is None
assert channel.canonical_name == url
assert channel.name == "conda-01"
assert channel.base_url == url
assert channel.url() == join_url(url, context.subdir)
assert channel.urls() == [
join_url(url, context.subdir),
join_url(url, 'noarch'),
]
path = path and path.rstrip('/')
test_url = Url(host=host, port=port, path=path).url
# Step 1. No path given; channel name is None
if not path:
return Url(host=host, port=port).url.rstrip('/'), None, scheme or None, None, None
# Step 2. migrated_custom_channels matches
for name, location in sorted(context.migrated_custom_channels.items(), reverse=True,
key=lambda x: len(x[0])):
location, _scheme, _auth, _token = split_scheme_auth_token(location)
if tokenized_conda_url_startswith(test_url, join_url(location, name)):
# translate location to new location, with new credentials
subname = test_url.replace(join_url(location, name), '', 1).strip('/')
channel_name = join_url(name, subname)
channel = _get_channel_for_name(channel_name)
return channel.location, channel_name, channel.scheme, channel.auth, channel.token
# Step 3. migrated_channel_aliases matches
for migrated_alias in context.migrated_channel_aliases:
if test_url.startswith(migrated_alias.location):
name = test_url.replace(migrated_alias.location, '', 1).strip('/')
ca = context.channel_alias
return ca.location, name, ca.scheme, ca.auth, ca.token
# Step 4. custom_channels matches
for name, channel in sorted(context.custom_channels.items(), reverse=True,
key=lambda x: len(x[0])):
that_test_url = join_url(channel.location, channel.name)
if tokenized_startswith(test_url.split('/'), that_test_url.split('/')):
subname = test_url.replace(that_test_url, '', 1).strip('/')
}
channel_url = self.url_w_credentials
legacy_packages = json_obj.get("packages", {})
conda_packages = json_obj.get("packages.conda", {})
_tar_bz2 = CONDA_PACKAGE_EXTENSION_V1
use_these_legacy_keys = set(iterkeys(legacy_packages)) - set(
k[:-6] + _tar_bz2 for k in iterkeys(conda_packages)
)
for group, copy_legacy_md5 in (
(iteritems(conda_packages), True),
(((k, legacy_packages[k]) for k in use_these_legacy_keys), False)):
for fn, info in group:
info['fn'] = fn
info['url'] = join_url(channel_url, fn)
if copy_legacy_md5:
counterpart = fn.replace('.conda', '.tar.bz2')
if counterpart in legacy_packages:
info['legacy_bz2_md5'] = legacy_packages[counterpart].get('md5')
info['legacy_bz2_size'] = legacy_packages[counterpart].get('size')
if (add_pip and info['name'] == 'python' and
info['version'].startswith(('2.', '3.'))):
info['depends'].append('pip')
info.update(meta_in_common)
if info.get('record_version', 0) > 1:
log.debug("Ignoring record_version %d from %s",
info["record_version"], info['url'])
continue
package_record = PackageRecord(**info)
_package_records.append(package_record)
The remote server could not find the noarch directory for the
requested channel with url: %s
As of conda 4.3, a valid channel must contain a `noarch/repodata.json` and
associated `noarch/repodata.json.bz2` file, even if `noarch/repodata.json` is
empty. please request that the channel administrator create
`noarch/repodata.json` and associated `noarch/repodata.json.bz2` files.
$ mkdir noarch
$ echo '{}' > noarch/repodata.json
$ bzip2 -k noarch/repodata.json
You will need to adjust your conda configuration to proceed.
Use `conda config --show` to view your configuration's current state.
Further configuration help can be found at <%s>.
""") % (maybe_unquote(dirname(url)),
join_url(CONDA_HOMEPAGE_URL, 'docs/config.html'))
elif status_code == 401:
channel = Channel(url)
if channel.token:
help_message = dals("""
The token '%s' given for the URL is invalid.
If this token was pulled from anaconda-client, you will need to use
anaconda-client to reauthenticate.
If you supplied this token to conda directly, you will need to adjust your
conda configuration to proceed.
Use `conda config --show` to view your configuration's current state.
Further configuration help can be found at <%s>.
""") % (channel.token, join_url(CONDA_HOMEPAGE_URL, 'docs/config.html'))
if etag:
headers["If-None-Match"] = etag
if mod_stamp:
headers["If-Modified-Since"] = mod_stamp
if 'repo.continuum.io' in url or url.startswith("file://"):
filename = 'repodata.json.bz2'
headers['Accept-Encoding'] = 'identity'
else:
headers['Accept-Encoding'] = 'gzip, deflate, compress, identity'
headers['Content-Type'] = 'application/json'
filename = 'repodata.json'
try:
timeout = context.remote_connect_timeout_secs, context.remote_read_timeout_secs
resp = session.get(join_url(url, filename), headers=headers, proxies=session.proxies,
timeout=timeout)
if log.isEnabledFor(DEBUG):
log.debug(stringify(resp))
resp.raise_for_status()
if resp.status_code == 304:
raise Response304ContentUnchanged()
def maybe_decompress(filename, resp_content):
return ensure_text_type(bz2.decompress(resp_content)
if filename.endswith('.bz2')
else resp_content).strip()
json_str = maybe_decompress(filename, resp.content)
fetched_repodata = json.loads(json_str) if json_str else {}
fetched_repodata['_url'] = url
add_http_value_to_dict(resp, 'Etag', fetched_repodata, '_etag')
The remote server could not find the noarch directory for the
requested channel with url: %s
As of conda 4.3, a valid channel must contain a `noarch/repodata.json` and
associated `noarch/repodata.json.bz2` file, even if `noarch/repodata.json` is
empty. please request that the channel administrator create
`noarch/repodata.json` and associated `noarch/repodata.json.bz2` files.
$ mkdir noarch
$ echo '{}' > noarch/repodata.json
$ bzip2 -k noarch/repodata.json
You will need to adjust your conda configuration to proceed.
Use `conda config --show` to view your configuration's current state.
Further configuration help can be found at <%s>.
""") % (maybe_unquote(dirname(url)),
join_url(CONDA_HOMEPAGE_URL, 'docs/config.html'))
elif status_code == 403:
if not url.endswith('/noarch'):
return None
else:
if context.allow_non_channel_urls:
help_message = dedent("""
WARNING: The remote server could not find the noarch directory for the
requested channel with url: %s
It is possible you have given conda an invalid channel. Please double-check
your conda configuration using `conda config --show`.
If the requested url is in fact a valid conda channel, please request that the
channel administrator create `noarch/repodata.json` and associated
`noarch/repodata.json.bz2` files, even if `noarch/repodata.json` is empty.
def urls(self, with_credentials=False, subdirs=None):
if subdirs is None:
subdirs = context.subdirs
assert isiterable(subdirs), subdirs # subdirs must be a non-string iterable
if self.canonical_name == UNKNOWN_CHANNEL:
return Channel(DEFAULTS_CHANNEL_NAME).urls(with_credentials, subdirs)
base = [self.location]
if with_credentials and self.token:
base.extend(['t', self.token])
base.append(self.name)
base = join_url(*base)
def _platforms():
if self.platform:
yield self.platform
if self.platform != 'noarch':
yield 'noarch'
else:
for subdir in subdirs:
yield subdir
bases = (join_url(base, p) for p in _platforms())
if with_credentials and self.auth:
return ["%s://%s@%s" % (self.scheme, self.auth, b) for b in bases]
else:
return ["%s://%s" % (self.scheme, b) for b in bases]
session = CondaSession()
headers = {}
if etag:
headers["If-None-Match"] = etag
if mod_stamp:
headers["If-Modified-Since"] = mod_stamp
headers['Accept-Encoding'] = 'gzip, deflate, compress, identity'
headers['Content-Type'] = 'application/json'
filename = repodata_fn
try:
timeout = context.remote_connect_timeout_secs, context.remote_read_timeout_secs
resp = session.get(join_url(url, filename), headers=headers, proxies=session.proxies,
timeout=timeout)
if log.isEnabledFor(DEBUG):
log.debug(stringify(resp, content_max_len=256))
resp.raise_for_status()
except RequestsProxyError:
raise ProxyError() # see #3962
except InvalidSchema as e:
if 'SOCKS' in text_type(e):
message = dals("""
Requests has identified that your current working environment is configured
to use a SOCKS proxy, but pysocks is not installed. To proceed, remove your
proxy configuration, run `conda install pysocks`, and then you can re-enable
your proxy configuration.
""")