How to use the boto3.Session function in boto3

To help you get started, we’ve selected a few boto3 examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github aws / sagemaker-chainer-container / test / utils / local_mode.py View on Github external
def create_docker_services(command, tmpdir, hosts, image, additional_volumes, additional_env_vars,
                           customer_script, source_dir, entrypoint, use_gpu=False):

    environment = []
    session = boto3.Session()

    optml_dirs = set()
    if command == 'train':
        optml_dirs = {'output', 'input'}

    elif command == 'serve':
        environment.extend(DEFAULT_HOSTING_ENV)

        if customer_script:
            timestamp = utils.sagemaker_timestamp()
            s3_script_path = fw_utils.tar_and_upload_dir(session=session,
                                                         bucket=default_bucket(session),
                                                         s3_key_prefix='test-{}'.format(timestamp),
                                                         script=customer_script,
                                                         directory=source_dir)[0]
github jonhadfield / ansible-lookups / aws_account_id.py View on Github external
def run(self, terms, variables=None, **kwargs):
        session = boto3.Session()
        # Try getting from current user
        try:
            iam_client = session.client('iam')
            return [iam_client.get_user().arn.split(':')[4]]
        except:
            # User doesn't exist
            pass
        # Try getting from sts
        try:
            sts_client = session.client('sts')
            return [sts_client.get_caller_identity()['Account']]
        except:
            pass
        # Try getting from instance role
        try:
            response = urllib.request.urlopen('http://169.254.169.254/latest/meta-data/iam/info/')
github AICoE / prometheus-anomaly-detector-legacy / ceph.py View on Github external
def store_data(self, name, values, object_path = None):
        '''
        Function to store predictions to ceph
        '''
        if not values:
            return "No values for {}".format(name)
        # Create a session with CEPH (or any black storage) storage with the stored credentials
        session = boto3.Session(
            aws_access_key_id=self.boto_settings['access_key'],
            aws_secret_access_key=self.boto_settings['secret_key']
        )

        s3 = session.resource('s3',
                              endpoint_url=self.boto_settings['object_store_endpoint'],
                              verify=False)
        # prometheus-openshift-devops-monitor.a3c1.starter-us-west-1.openshiftapps.com/container_cpu_usage_percent_by_host/201807040259.json.bz2
        if not object_path:
            object_path = str(name)
            pass
        object_path = object_path + ".bz2"
        try:
            payload = bz2.compress(values.encode('utf-8'))

        except AttributeError:
github cloud-custodian / cloud-custodian / c7n / credentials.py View on Github external
def __call__(self, assume=True, region=None):
        if self.assume_role and assume:
            session = Session(profile_name=self.profile)
            session = assumed_session(
                self.assume_role, self.session_name, session,
                region or self.region, self.external_id)
        else:
            session = Session(
                region_name=region or self.region, profile_name=self.profile)

        return self.update(session)
github aws / lumberyard / dev / Gems / CloudGemComputeFarm / v1 / Harness / main.py View on Github external
print("Couldn't load EC2 instance data from environment, using computer hostname {}".format(identity))

    if not args.region:
        args.region = ec2_region

    # You can supply a profile to use if you are testing locally.
    session = boto3.Session(profile_name=args.profile)

    # You can supply a role arn to use if you are testing locally.
    if args.role_arn:
        sts_result = session.client('sts').assume_role(
            DurationSeconds=3600,
            RoleSessionName="Harness-" + str(uuid.uuid4()),
            RoleArn=args.role_arn
        )['Credentials']
        session = boto3.Session(
            aws_access_key_id=sts_result['AccessKeyId'],
            aws_secret_access_key=sts_result['SecretAccessKey'],
            aws_session_token=sts_result['SessionToken']
        )

    if args.stdout:
        if args.stdout == 'cloudwatch':
            writeHandler = cloudwatch.OutputHandler('HARNESS-DEBUG', session, args.region, identity, 'decider' if args.run_decider else 'worker')
        else:
            fp = open(args.stdout, "w")
            sys.stdout = fp
            sys.stderr = fp

    divide_task = config.TaskConfig(args.div_task, args.div_task_version, divide.handler)
    merge_task = config.TaskConfig(args.merge_task, args.merge_task_version, merge.handler)
    build_task = config.TaskConfig(args.build_task, args.build_task_version, build.handler) if args.build_task else merge_task
github AICoE / prometheus-anomaly-detector-legacy / lib / ceph.py View on Github external
def store_data(self, name, values, object_path = None):
        '''
        Function to store predictions to ceph
        '''
        if not values:
            return "No values for {}".format(name)
        # Create a session with CEPH (or any black storage) storage with the stored credentials
        session = boto3.Session(
            aws_access_key_id=self.boto_settings['access_key'],
            aws_secret_access_key=self.boto_settings['secret_key']
        )

        s3 = session.resource('s3',
                              endpoint_url=self.boto_settings['object_store_endpoint'],
                              verify=False)
        # prometheus-openshift-devops-monitor.a3c1.starter-us-west-1.openshiftapps.com/container_cpu_usage_percent_by_host/201807040259.json.bz2
        if not object_path:
            object_path = str(name)
            pass
        object_path = object_path + ".bz2"
        try:
            payload = bz2.compress(values.encode('utf-8'))

        except AttributeError:
github snowflakedb / SnowAlert / src / connectors / utils.py View on Github external
aws_access_key_id=src_role['Credentials']['AccessKeyId'],
        aws_secret_access_key=src_role['Credentials']['SecretAccessKey'],
        aws_session_token=src_role['Credentials']['SessionToken'],
    ).client('sts')

    sts_role = (
        sts_client.assume_role(
            RoleArn=dest_role_arn,
            RoleSessionName=session_name,
            ExternalId=dest_external_id,
        )
        if dest_external_id
        else sts_client.assume_role(RoleArn=dest_role_arn, RoleSessionName=session_name)
    )

    return boto3.Session(
        aws_access_key_id=sts_role['Credentials']['AccessKeyId'],
        aws_secret_access_key=sts_role['Credentials']['SecretAccessKey'],
        aws_session_token=sts_role['Credentials']['SessionToken'],
    )
github turnerlabs / antiope / search-cluster / lambda / ingest_s3.py View on Github external
def lambda_handler(event, context):
    logger.debug("Received event: " + json.dumps(event, sort_keys=True))

    region = os.environ['AWS_REGION']
    service = 'es'
    credentials = boto3.Session().get_credentials()
    awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)

    host = "https://{}".format(os.environ['ES_DOMAIN_ENDPOINT'])
    es_type = "_doc"  # This is what es is moving to after deprecating types in 6.0
    headers = {"Content-Type": "application/json"}

    bulk_ingest_body = ""
    count = 0

    for record in event['Records']:
        message = json.loads(record['body'])
        if 'Records' not in message:
            logger.error(f"Got Message with no records: {json.dumps(message, indent=2, sort_keys=True)}")
            continue
        logger.debug("records: {} message: {}".format(len(message['Records']), json.dumps(message, sort_keys=True)))
github carnal0wnage / weirdAAL / libs / aws / ecr.py View on Github external
import boto3
import botocore
import os
import pprint
import sys

pp = pprint.PrettyPrinter(indent=5, width=80)

# from http://docs.aws.amazon.com/general/latest/gr/rande.html
regions = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'ca-central-1', 'eu-central-1', 'eu-west-1', 'eu-west-2', 'ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2']

'''
Code to get the AWS_ACCESS_KEY_ID from boto3
'''
session = boto3.Session()
credentials = session.get_credentials()
AWS_ACCESS_KEY_ID = credentials.access_key


def ecr_describe_repositories():
    '''
    Use ecr describe_repositories function to list available repositories
    '''
    print("### Printing ECR Repositories ###")
    try:
        for region in regions:
            client = boto3.client('ecr', region_name=region)
            response = client.describe_repositories()

            if response.get('repositories') is None:
                print("{} likely does not have ECR permissions\n" .format(AWS_ACCESS_KEY_ID))
github devopsloft / devopsloft / project / spin-docker.py View on Github external
if environment == 'dev':
        command = "docker-compose down -v --rmi all --remove-orphans"
        completed_response = subprocess.run(
            command,
            env=envVars,
            shell=True,
            check=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            universal_newlines=True
        )
        print(completed_response.stdout)
    elif environment in ['stage', 'prod']:
        dotenv.load_dotenv()
        logging.info("AWS Profile - {0}".format(os.getenv('AWS_PROFILE')))
        session = boto3.Session(profile_name=os.getenv('AWS_PROFILE'))
        client = session.client('ecs')
        clusterList = client.list_clusters()
        if clusterList['clusterArns']:
            containerInstancesList = client.list_container_instances(
                cluster=clusterList['clusterArns'][0],
                status='ACTIVE'
            )
            if containerInstancesList['containerInstanceArns']:
                response = client.deregister_container_instance(
                    cluster=clusterList['clusterArns'][0],
                    containerInstance=containerInstancesList['containerInstanceArns'][0], # noqa
                    force=True
                )

            response = client.delete_cluster(
                cluster=clusterList['clusterArns'][0]