How to use the boto.exception function in boto

To help you get started, we’ve selected a few boto examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github saltstack / salt / tests / unit / utils / test_botomod.py View on Github external
def test_get_conn_with_no_auth_params_raises_invocation_error(self):
        with patch('boto.{0}.connect_to_region'.format(service),
                   side_effect=boto.exception.NoAuthHandlerFound()):
            with self.assertRaises(SaltInvocationError):
                botomod.get_connection(service)
github mnielsen / mini_qa / mini_qa.py View on Github external
wolfram_server = 'http://api.wolframalpha.com/v1/query.jsp'

#### Parameters used to score results returned from the Google-based
#### system
CAPITALIZATION_FACTOR = 2.2
QUOTED_QUERY_SCORE = 5
UNQUOTED_QUERY_SCORE = 2

#### Create or retrieve an S3 bucket for the cache of Google search
#### results
s3conn = S3Connection(config.AWS_ACCESS_KEY_ID, config.AWS_SECRET_ACCESS_KEY)
google_cache_bucket_name = (config.AWS_ACCESS_KEY_ID).lower()+"-google-cache"
try:
    GOOGLE_CACHE = Key(s3conn.create_bucket(google_cache_bucket_name))
except boto.exception.S3CreateError:
    print ("When creating an S3 bucket for Google cache results, a conflict\n"
           "occurred, and a bucket with the desired name already exists.")
    sys.exit()

#### Create or retrieve an S3 bucket for the cache of Wolfram Alpha
#### results
wolfram_cache_bucket_name = (config.AWS_ACCESS_KEY_ID).lower()+"-wolfram-cache"
try:
    WOLFRAM_CACHE = Key(s3conn.create_bucket(wolfram_cache_bucket_name))
except boto.exception.S3CreateError:
    print ("When creating an S3 bucket for Wolfram Alpha cache results, a\n"
           "conflict occurred, and a bucket with the desired name already\n"
           "exists.")
    sys.exit()
github saltstack / salt / salt / modules / boto_secgroup.py View on Github external
group_ids=group_ids,
                                         filters=filters)
        for g in r:
            n = {}
            for a in interesting:
                v = getattr(g, a, None)
                if a == 'region':
                    v = v.name
                elif a in ('rules', 'rules_egress'):
                    v = _parse_rules(g, v)
                elif a == 'instances':
                    v = [i.id for i in v()]
                n[a] = v
            ret += [n]
        return ret
    except boto.exception.BotoServerError as e:
        log.debug(e)
        return []
github edx / configuration / playbooks / library / ec2_ami_2_0_0_1.py View on Github external
img = ec2.get_image(image_id)
        time.sleep(3)
    if wait and wait_timeout <= time.time():
        # waiting took too long
        module.fail_json(msg = "timed out waiting for image to be created")

    if tags:
        try:
            ec2.create_tags(image_id, tags)
        except boto.exception.EC2ResponseError, e:
            module.fail_json(msg = "Image tagging failed => %s: %s" % (e.error_code, e.error_message))
    if launch_permissions:
        try:
            img = ec2.get_image(image_id)
            img.set_launch_permissions(**launch_permissions)
        except boto.exception.BotoServerError, e:
            module.fail_json(msg="%s: %s" % (e.error_code, e.error_message), image_id=image_id)

    module.exit_json(msg="AMI creation operation complete", image_id=image_id, state=img.state, changed=True)
github crits / crits / crits / core / s3_tools.py View on Github external
:param bucket: The bucket to connect to.
    :type bucket: str
    :returns: :class:`boto.s3.connection.S3Connection`, S3Error
    """

    S3_hostname = getattr(settings, 'S3_HOSTNAME', S3Connection.DefaultHost)
    try:
        conn = S3Connection(aws_access_key_id = settings.AWS_ACCESS_KEY_ID,
                            aws_secret_access_key = settings.AWS_SECRET_ACCESS_KEY,
                            is_secure = True,
                            host = S3_hostname)

        mybucket = conn.get_bucket(bucket)
        return mybucket
    except boto.exception.S3ResponseError as e:
        raise S3Error("Error connecting to S3: %s" % e)
    except:
        raise
github Nextdoor / kingpin / kingpin / actors / aws / base.py View on Github external
def thread(self, function, *args, **kwargs):
        """Execute `function` in a concurrent thread.

        Example:
            >>> zones = yield thread(ec2_conn.get_all_zones)

        This allows execution of any function in a thread without having
        to write a wrapper method that is decorated with run_on_executor()
        """
        try:
            return function(*args, **kwargs)
        except boto_exception.BotoServerError as e:
            # If we're using temporary IAM credentials, when those expire we
            # can get back a blank 400 from Amazon. This is confusing, but it
            # happens because of https://github.com/boto/boto/issues/898. In
            # most cases, these temporary IAM creds can be re-loaded by
            # reaching out to the AWS API (for example, if we're using an IAM
            # Instance Profile role), so thats what Boto tries to do. However,
            # if you're using short-term creds (say from SAML auth'd logins),
            # then this fails and Boto returns a blank 400.
            if (e.status == 400 and
                e.reason == 'Bad Request' and
                    e.error_code is None):
                msg = 'Access credentials have expired'
                raise exceptions.InvalidCredentials(msg)

            msg = '%s: %s' % (e.error_code, e.message)
            if e.status == 403:
github cloudlinux / kuberdock-platform / node_storage_manage / aws.py View on Github external
{
                        'message': err_msg.format(
                            'iops should be specified for volume type "{}"'\
                            .format(volume_type)
                        )
                    }
                )
        elif not volume_type:
            volume_type = DEFAULT_EBS_VOLUME_TYPE

        try:
            volume = create_ebs_volume(
                connection, av_zone, name, call_args.size,
                volume_type=volume_type, iops=iops
            )
        except (boto.exception.BotoClientError,
                boto.exception.BotoServerError,
                TimeoutError) as err:
            return ERROR, {'message': err_msg.format(err)}
        if not volume:
            return ERROR, {'message': err_msg.format('Unknown error')}

    try:
        return attach_ebs_volume(connection, instance_id, volume)
    except (boto.exception.BotoClientError,
            boto.exception.BotoServerError) as err:
        return ERROR, {'message': 'Failed to attach volume: {}'.format(err)}
github ansible / awx / awx / lib / site-packages / django_extensions / management / commands / sync_media_s3.py View on Github external
def open_s3(self):
        """
        Opens connection to S3 returning bucket and key
        """
        conn = boto.connect_s3(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY)
        try:
            bucket = conn.get_bucket(self.AWS_BUCKET_NAME)
        except boto.exception.S3ResponseError:
            bucket = conn.create_bucket(self.AWS_BUCKET_NAME)
        return bucket, boto.s3.key.Key(bucket)
github ansible / ansible-modules-core / cloud / amazon / ec2.py View on Github external
while spot_wait_timeout > time.time():
                        reqs = ec2.get_all_spot_instance_requests()
                        for sirb in res:
                            if sirb.id in spot_req_inst_ids:
                                continue
                            for sir in reqs:
                                if sir.id == sirb.id and sir.instance_id is not None:
                                    spot_req_inst_ids[sirb.id] = sir.instance_id
                        if len(spot_req_inst_ids) < count:
                            time.sleep(5)
                        else:
                            break
                    if spot_wait_timeout <= time.time():
                        module.fail_json(msg = "wait for spot requests timeout on %s" % time.asctime())
                    instids = spot_req_inst_ids.values()
        except boto.exception.BotoServerError, e:
            module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message))

        # wait here until the instances are up
        num_running = 0
        wait_timeout = time.time() + wait_timeout
        while wait_timeout > time.time() and num_running < len(instids):
            try: 
                res_list = ec2.get_all_instances(instids)
            except boto.exception.BotoServerError, e:
                if e.error_code == 'InvalidInstanceID.NotFound':
                    time.sleep(1)
                    continue
                else:
                    raise

            num_running = 0
github conda / conda / conda / common / connection.py View on Github external
'Make sure to run `source deactivate` if you '
                           'are in a conda environment.\n')
            resp.status_code = 404
            return resp

        conn = boto.connect_s3()

        bucket_name, key_string = url_to_S3_info(request.url)

        # Get the bucket without validation that it exists and that we have
        # permissions to list its contents.
        bucket = conn.get_bucket(bucket_name, validate=False)

        try:
            key = bucket.get_key(key_string)
        except boto.exception.S3ResponseError as exc:
            # This exception will occur if the bucket does not exist or if the
            # user does not have permission to list its contents.
            resp.status_code = 404
            resp.raw = exc
            return resp

        if key and key.exists:
            modified = key.last_modified
            content_type = key.content_type or "text/plain"
            resp.headers = requests.structures.CaseInsensitiveDict({
                "Content-Type": content_type,
                "Content-Length": key.size,
                "Last-Modified": modified,
                })

            _, self._temp_file = tempfile.mkstemp()