Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _load_server_info(self):
def just_json(_, serialized):
return serialized
if not self._cache.get('version'):
try:
self._cache['version'] = {
'kubernetes': self.client.request('get', '/version', serializer=just_json)
}
except (ValueError, MaxRetryError) as e:
if isinstance(e, MaxRetryError) and not isinstance(e.reason, ProtocolError):
raise
if not self.client.configuration.host.startswith("https://"):
raise ValueError("Host value %s should start with https:// when talking to HTTPS endpoint" %
self.client.configuration.host)
else:
raise
try:
self._cache['version']['openshift'] = self.client.request(
'get',
'/version/openshift',
serializer=just_json,
)
except ApiException:
pass
self.__version = self._cache['version']
def test_raise_on_status(self):
with PoolManager() as http:
with pytest.raises(MaxRetryError):
# the default is to raise
r = http.request(
"GET",
"%s/status" % self.base_url,
fields={"status": "500 Internal Server Error"},
retries=Retry(total=1, status_forcelist=range(500, 600)),
)
with pytest.raises(MaxRetryError):
# raise explicitly
r = http.request(
"GET",
"%s/status" % self.base_url,
fields={"status": "500 Internal Server Error"},
retries=Retry(
total=1, status_forcelist=range(500, 600), raise_on_status=True
# Get file list and download
files = get_data_tree_remote(
pool_manager=pool_manager, api_origin=api_origin)
extfiles = [f for f in files if f.lower().startswith(
ext[1:]+"/") and f.lower().endswith(ext)]
extfiles.sort()
dl_files = []
for f in extfiles:
dest = join(dldir, f)
if not exists(dest):
dl_file(join(raw_origin, f), dest)
dl_files.append(dest)
except (urllib3.exceptions.MaxRetryError, KeyError):
# e.g. no internet connection
warnings.warn(
"No connection, using previuously downloaded files only.")
files = get_data_tree_local(dldir=dldir)
dl_files = [f for f in files if f.lower().endswith(ext)]
return dl_files
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if retries < 0 and retries is not False:
raise MaxRetryError(self, url)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries - 1)
conn = None
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = 'GET'
retries = kw.get('retries')
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
raise
return response
kw['retries'] = retries
kw['redirect'] = redirect
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, **kw)
if not retries:
if isinstance(e, TimeoutError):
# TimeoutError is exempt from MaxRetryError-wrapping.
# FIXME: ... Not sure why. Add a reason here.
raise
# Wrap unexpected exceptions with the most appropriate
# module-level exception and re-raise.
if isinstance(e, SocketError) and self.proxy:
raise ProxyError('Cannot connect to proxy.', e)
if retries is False:
raise ConnectionError('Connection failed.', e)
raise MaxRetryError(self, url, e)
# Keep track of the error for the retry warning.
err = e
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning("Retrying (%d attempts remain) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries - 1,
raise error.ConnectError(str(ex), ex)
except OpenSSL.SSL.Error as ex:
raise error.ConnectError(str(ex), ex)
except ssl.SSLError as ex:
raise error.ConnectError(str(ex), ex)
except exceptions.LocationParseError as ex:
raise error.MalformedResponseError(str(ex), ex)
except exceptions.LocationValueError as ex:
raise error.InvalidUrlError(str(ex), ex)
except exceptions.DecodeError as ex:
raise error.MalformedResponseError(str(ex), ex)
except exceptions.InvalidHeader as ex:
raise error.MalformedResponseError(str(ex), ex)
except exceptions.ProxyError as ex:
raise error.ProxyError(str(ex), ex)
except exceptions.MaxRetryError as ex:
# Might be raised by multiple reasons
# So we just raise original error and process it
# with `self.handle_network_error()` once again
with self.handle_network_error(req):
raise ex.reason
except exceptions.ResponseError as ex:
if 'too many redirects' in str(ex):
raise error.TooManyRedirectsError(str(ex), ex)
else:
raise
except AttributeError:
# See https://github.com/urllib3/urllib3/issues/1556
etype, evalue, tb = sys.exc_info()
frames = traceback.extract_tb(tb)
found = False
for frame in frames:
bot = Bot(multi_logs=True, selenium_local_session=False,
proxy_address_port=get_proxy(os.environ.get('INSTA_USER')), disable_image_load=True)
selenium_url = "http://%s:%d/wd/hub" % (os.environ.get('SELENIUM', 'selenium'), 4444)
bot.set_selenium_remote_session(logger=logging.getLogger(), selenium_url=selenium_url, selenium_driver=selenium_driver(selenium_url))
bot.login()
bot.set_settings()
bot.act()
except (NewConnectionError, WebDriverException) as exc:
bot.logger.warning("Exception in run: %s; try again: count=%s" % (exc, count))
if count > 3:
print("Exception in run(): %s \n %s" % (exc, traceback.format_exc()))
report_exception(exc)
else:
run(count=count + 1)
except (ProtocolError, MaxRetryError) as exc:
bot.logger.error("Abort because of %s; \n%s" % (exc, traceback.format_exc()))
return
except Exception as exc:
print("Exception in run(): %s \n %s" % (exc, traceback.format_exc()))
report_exception(exc)
finally:
print("END")
bot.end()
def minio_config(tag, endpoint, accesskey, secretkey):
""" Setup remote minio host """
home_dir = os.path.expanduser('~')
lab_dir = os.path.join(home_dir, '.lab')
# Test connection
if not os.path.exists(lab_dir):
os.makedirs(lab_dir)
try:
minioClient = Minio(endpoint,
access_key=accesskey,
secret_key=secretkey,
secure=False)
minioClient.list_buckets()
except MaxRetryError:
click.secho('Cannot connect to minio instance. Check your credentials '
'and hostname. Ensure that endpoint is not prefixed with'
'http or https.', fg='red')
raise click.Abort()
# Create configuration
config = {'minio_endpoint': endpoint,
'minio_accesskey': accesskey,
'minio_secretkey': secretkey}
if os.path.exists(os.path.join(lab_dir, 'config.yaml')):
with open(os.path.join(lab_dir, 'config.yaml'), 'r') as file:
minio_config = yaml.safe_load(file)
if tag in minio_config.keys():
click.secho('Host tag '+tag+' already exists in your '
'configuration. Try a different name.', fg='red')