Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
@retrying.retry(wait_fixed=5000, stop_max_delay=timeout * 1000,
retry_on_result=lambda ret: ret is None,
retry_on_exception=lambda x: False)
def _poll_marathon_for_app_deployment(app_id):
Endpoint = collections.namedtuple("Endpoint", ["host", "port", "ip"])
# Some of the counters need to be explicitly enabled now and/or in
# future versions of Marathon:
req_params = (('embed', 'apps.lastTaskFailure'),
('embed', 'apps.counts'))
log.info('Waiting for application to be deployed...')
r = self.get(path_join('v2/apps', app_id), params=req_params)
r.raise_for_status()
data = r.json()
log.debug('Current application state data: {}'.format(repr(data)))
@retrying.retry(
wait_fixed=STD_INTERVAL,
stop_max_delay=METRICS_WAITTIME,
retry_on_exception=lambda e: isinstance(e, AssertionError)
)
def check_adminrouter_metrics():
measurements = set()
expect_dropped = set([
'nginx_vts_filter',
'nginx_vts_upstream',
'nginx_vts_server',
])
unexpected_samples = []
response = get_metrics_prom(dcos_api_session, node)
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
@retrying.retry(wait_fixed=1000,
retry_on_result=lambda ret: ret is False,
retry_on_exception=lambda x: False)
def _wait_for_slaves_to_join(self):
r = self.get('/mesos/master/slaves')
if r.status_code != 200:
msg = "Mesos master returned status code {} != 200 "
msg += "continuing to wait..."
log.info(msg.format(r.status_code))
return False
data = r.json()
# Check that there are all the slaves the test knows about. They are all
# needed to pass the test.
num_slaves = len(data['slaves'])
if num_slaves >= len(self.all_slaves):
msg = "Sufficient ({} >= {}) number of slaves have joined the cluster"
log.info(msg.format(num_slaves, self.all_slaves))
@retry(stop_max_attempt_number=3, wait_fixed=3000)
def urlopen_with_retry(url):
return urllib2.urlopen(url)
@retry(stop_max_attempt_number=3, wait_exponential_multiplier=1000, wait_exponential_max=10000, wait_jitter_max=2000,
retry_on_exception=retry_utils.retry_on_timeouts_connection_internal_server_and_throttles)
def _make_retrying_upload_file_call(upload_manager, namespace, bucket_name, name, file_name, **kwargs):
return upload_manager.upload_file(namespace, bucket_name, name, file_name, **kwargs)
@retrying.retry(
retry_on_exception=should_retry,
stop_max_attempt_number=CONF.ibmc.connection_attempts,
wait_fixed=CONF.ibmc.connection_retry_interval * 1000)
@functools.wraps(f)
def wrapper(*args, **kwargs):
# NOTE(dtantsur): this code could be written simpler, but then unit
# testing decorated functions is pretty hard, as we usually pass a
# Mock object instead of TaskManager there.
if len(args) > 1:
is_task_mgr = isinstance(args[1], task_manager.TaskManager)
task = args[1] if is_task_mgr else args[0]
else:
task = args[0]
node = task.node
@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500,
stop_max_attempt_number=50)
@utils.no_4byte_params
def _image_update(context, values, image_id, purge_props=False,
from_state=None):
"""
Used internally by image_create and image_update
:param context: Request context
:param values: A dict of attributes to set
:param image_id: If None, create the image, otherwise, find and update it
"""
# NOTE(jbresnah) values is altered in this so a copy is needed
values = values.copy()
session = get_session()
@retry(stop_max_attempt_number=5, wait_fixed=60000)
def remote_read_disk_apply_async(self, remote_server, IOQueue_obj_id, ArchiveObject_obj_ObjectUUID, queue='smdisk'):
"""Remote REST call to appy_async
:param remote_server: example: [https://servername:port/api/write_storage_method_tape_apply, user, password]
:param IOQueue_obj_id: Primary key to IOQueue database table to be performed, ex 'id1'
:param ArchiveObject_obj_ObjectUUID: ObjectUUID key to ArchiveObject database table to be performed, ex 'id1'
:param queue: celery queue name, ex 'smdisk'
"""
logger = logging.getLogger('Storage')
base_url, ruser, rpass = remote_server
read_disk_rest_endpoint = urljoin(base_url, '/api/read_storage_method_disk_apply/')
requests_session = requests.Session()
requests_session.verify = False
requests_session.auth = (ruser, rpass)
data = {'queue': queue,
@retrying.retry(stop_max_attempt_number=3,
wait_fixed=2000,
retry_on_exception=lambda exc: True)
def attach_volume():
self.compute_api.instance_volume_attach(
self.admin_context, instance_id, volume['id'])
@retry(wait_fixed=500, stop_max_attempt_number=3)
def clear_expired(obj):
if obj.location != 'transfer_targets':
obj.go_to('transfer_targets')
expired_bids = obj.__get_items__(p_element='../..', p_type='xpath', gp_element="//*[contains(text(), 'Expired Items')]", gp_type='xpath', get_price=False)
expired = {}
for expired_bid in expired_bids:
if expired_bid['asset_id'] not in list(expired.keys()):
expired[expired_bid['asset_id']] = {
'bid_amounts': 0,
'num_results': 0
}
expired[expired_bid['asset_id']]['bid_amounts'] += expired_bid['current_bid']
expired[expired_bid['asset_id']]['num_results'] += 1
for asset, data in expired.items():
tier = info.get_tier(data['bid_amounts'] / data['num_results'])
rounding = Global.rounding_tiers[tier]