How to use the toil.lib.retry.retry function in toil

To help you get started, we’ve selected a few toil examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github DataBiosphere / toil / src / toil / common.py View on Github external
subprocess.check_call(["docker", "run",
                                       "--name", "toil_grafana",
                                       "-d", "-p=3000:3000",
                                       self.grafanaImage])
        except subprocess.CalledProcessError:
            logger.warn("Could not start prometheus/grafana dashboard.")
            return

        # Add prometheus data source
        def requestPredicate(e):
            if isinstance(e, requests.exceptions.ConnectionError):
                return True
            return False

        try:
            for attempt in retry(delays=(0, 1, 1, 4, 16), predicate=requestPredicate):
                with attempt:
                    requests.post('http://localhost:3000/api/datasources', auth=('admin', 'admin'),
                                  data='{"name":"DS_PROMETHEUS","type":"prometheus", \
                                  "url":"http://localhost:9090", "access":"direct"}',
                                  headers={'content-type': 'application/json', "access": "direct"})
        except requests.exceptions.ConnectionError:
            logger.debug(
                "Could not add data source to Grafana dashboard - no metrics will be displayed.")
github DataBiosphere / toil / src / toil / jobStores / googleJobStore.py View on Github external
def wrapper(*args, **kwargs):
        for attempt in retry(delays=truncExpBackoff(),
                             timeout=300,
                             predicate=googleRetryPredicate):
            with attempt:
                return f(*args, **kwargs)
    return wrapper
github DataBiosphere / toil / src / toil / common.py View on Github external
subprocess.check_call(["docker", "run",
                                       "--name", "toil_grafana",
                                       "-d", "-p=3000:3000",
                                       self.grafanaImage])
        except subprocess.CalledProcessError:
            logger.warn("Could not start prometheus/grafana dashboard.")
            return

        # Add prometheus data source
        def requestPredicate(e):
            if isinstance(e, requests.exceptions.ConnectionError):
                return True
            return False

        try:
            for attempt in retry(delays=(0, 1, 1, 4, 16), predicate=requestPredicate):
                with attempt:
                    requests.post('http://localhost:3000/api/datasources', auth=('admin', 'admin'),
                                  data='{"name":"DS_PROMETHEUS","type":"prometheus", \
                                  "url":"http://localhost:9090", "access":"direct"}',
                                  headers={'content-type': 'application/json', "access": "direct"})
        except requests.exceptions.ConnectionError:
            logger.debug(
                "Could not add data source to Grafana dashboard - no metrics will be displayed.")
github DataBiosphere / toil / src / toil / jobStores / aws / utils.py View on Github external
def retry_sdb(delays=default_delays, timeout=default_timeout, predicate=retryable_sdb_errors):
    return retry(delays=delays, timeout=timeout, predicate=predicate)
github DataBiosphere / toil / src / toil / lib / ec2.py View on Github external
def retry_ec2(t=a_short_time, retry_for=10 * a_short_time, retry_while=not_found):
    return retry(delays=(t, t, t * 2, t * 4),
                 timeout=retry_for,
                 predicate=retry_while)
github DataBiosphere / toil / src / toil / jobStores / aws / utils.py View on Github external
def retry_s3(delays=default_delays, timeout=default_timeout, predicate=retryable_s3_errors):
    return retry(delays=delays, timeout=timeout, predicate=predicate)
github DataBiosphere / toil / src / toil / __init__.py View on Github external
def _obtain_credentials_from_boto3(self):
        """
        We know the current cached credentials are not good, and that we
        need to get them from Boto 3. Fill in our credential fields
        (_access_key, _secret_key, _security_token,
        _credential_expiry_time) from Boto 3.
        """
        
        # We get a Credentials object
        # 
        # or a RefreshableCredentials, or None on failure.
        creds = None
        for attempt in retry(timeout=10, predicate=lambda _: True):
            with attempt:
                creds = self._boto3_resolver.load_credentials()
                
                if creds is None:
                    try:
                        resolvers = str(self._boto3_resolver.providers)
                    except:
                        resolvers = "(Resolvers unavailable)"
                    raise RuntimeError("Could not obtain AWS credentials from Boto3. Resolvers tried: " + resolvers)
        
        # Make sure the credentials actually has some credentials if it is lazy
        creds.get_frozen_credentials()
        
        # Get when the credentials will expire, if ever
        if isinstance(creds, RefreshableCredentials):
            # Credentials may expire.
github DataBiosphere / toil / src / toil / provisioners / aws / awsProvisioner.py View on Github external
def wrapper(*args, **kwargs):
        for attempt in retry(delays=truncExpBackoff(),
                             timeout=300,
                             predicate=awsRetryPredicate):
            with attempt:
                return f(*args, **kwargs)
    return wrapper