How to use the boto3.s3 function in boto3

To help you get started, we’ve selected a few boto3 examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sony / nnabla / python / src / nnabla / utils / cli / uploader.py View on Github external
if endpoint == 'https://console-api.dl.sony.com':
            self._log('Got upload_url')
        else:
            self._log('upload_url is [{}]'.format(
                upload_url))

        bucketname, key = upload_url.split('://', 1)[1].split('/', 1)
        upload_key = '{}/{}.tar'.format(key, name)

        s3 = boto3.session.Session(aws_access_key_id=info['access_key_id'],
                                   aws_secret_access_key=info['secret_access_key'],
                                   aws_session_token=info['session_token']).client('s3')
        tc = boto3.s3.transfer.TransferConfig(
            multipart_threshold=10 * 1024 * 1024,
            max_concurrency=10)
        t = boto3.s3.transfer.S3Transfer(client=s3, config=tc)

        self._progress.init(os.path.getsize(filename), 'Upload')
        t.upload_file(filename, bucketname, upload_key,
                      callback=self._progress)
        self._progress.finish()

        return True
github sony / nnabla / python / src / nnabla / utils / cli / uploader.py View on Github external
return False

        upload_url = info['upload_path']
        if endpoint == 'https://console-api.dl.sony.com':
            self._log('Got upload_url')
        else:
            self._log('upload_url is [{}]'.format(
                upload_url))

        bucketname, key = upload_url.split('://', 1)[1].split('/', 1)
        upload_key = '{}/{}.tar'.format(key, name)

        s3 = boto3.session.Session(aws_access_key_id=info['access_key_id'],
                                   aws_secret_access_key=info['secret_access_key'],
                                   aws_session_token=info['session_token']).client('s3')
        tc = boto3.s3.transfer.TransferConfig(
            multipart_threshold=10 * 1024 * 1024,
            max_concurrency=10)
        t = boto3.s3.transfer.S3Transfer(client=s3, config=tc)

        self._progress.init(os.path.getsize(filename), 'Upload')
        t.upload_file(filename, bucketname, upload_key,
                      callback=self._progress)
        self._progress.finish()

        return True
github IntelAI / OpenVINO-model-server / ie_serving / models / s3_model.py View on Github external
def s3_download_file(cls, path):
        if path is None:
            return None
        parsed_path = urlparse(path)
        bucket_name = parsed_path.netloc
        file_path = parsed_path.path[1:]
        tmp_path = os.path.join('/tmp', file_path.split(os.sep)[-1])
        try:
            s3_client = boto3.client(
                's3', endpoint_url=S3_CONFIG['endpoint'],
                aws_access_key_id=S3_CONFIG['access_key_id'],
                aws_secret_access_key=S3_CONFIG['secret_access_key'],
                config=Config(signature_version=S3_CONFIG['signature']),
                region_name=S3_CONFIG['region'])
            s3_transfer = boto3.s3.transfer.S3Transfer(s3_client)
            s3_transfer.download_file(bucket_name, file_path, tmp_path)
        except exceptions.ClientError:
            s3_client = boto3.client(
                's3', endpoint_url=S3_CONFIG['endpoint'],
                aws_access_key_id=S3_CONFIG['access_key_id'],
                aws_secret_access_key=S3_CONFIG['secret_access_key'],
                config=Config(signature_version=UNSIGNED),
                region_name=S3_CONFIG['region'])
            s3_transfer = boto3.s3.transfer.S3Transfer(s3_client)
            s3_transfer.download_file(bucket_name, file_path, tmp_path)
        return tmp_path
github onnxbot / onnx-fb-universe / scripts / update-models-from-caffe2.py View on Github external
if only_local:
        print('No uploading in local only mode.')
        return
    model_dir = os.path.join(zoo_dir, model_name)
    suffix = '-backup' if backup else ''
    if backup:
        print('Backing up the previous version of ONNX model {}...'.format(model_name))
    rel_file_name = '{}{}.tar.gz'.format(model_name, suffix)
    abs_file_name = os.path.join(zoo_dir, rel_file_name)
    print('Compressing {} model to {}'.format(model_name, abs_file_name))
    with tarfile.open(abs_file_name, 'w:gz') as f:
        f.add(model_dir, arcname=model_name)
    file_size = os.stat(abs_file_name).st_size
    print('Uploading {} ({} MB) to s3 cloud...'.format(abs_file_name, float(file_size) / 1024 / 1024))
    client = boto3.client('s3', 'us-east-1')
    transfer = boto3.s3.transfer.S3Transfer(client)
    transfer.upload_file(abs_file_name, 'download.onnx', 'models/latest/{}'.format(rel_file_name),
                         extra_args={'ACL': 'public-read'})

    print('Successfully uploaded {} to s3!'.format(rel_file_name))
github aws-quickstart / quickstart-illumina-dragen / app / source / dragen / src / scheduler / aws_utils.py View on Github external
def _s3_initialize_client(s3_bucket):
    client = boto3.client('s3', region_name=_s3_get_bucket_location(s3_bucket))
    config = boto3.s3.transfer.TransferConfig(
        multipart_chunksize=256 * 1024 * 1024,
        max_concurrency=10,
        max_io_queue=1000,
        io_chunksize=2 * 1024 * 1024)
    transfer_client = boto3.s3.transfer.S3Transfer(client, config, boto3.s3.transfer.OSUtils())
    return client, transfer_client
github awslabs / autogluon / autogluon / utils / tabular / utils / savers / save_pkl.py View on Github external
def save_s3(path: str, obj, pickle_fn, verbose=True):
    if verbose:
        logger.info(f'save object to {path}')
    with tempfile.TemporaryFile() as f:
        pickle_fn(obj, f)
        f.flush()
        f.seek(0)

        bucket, key = s3_utils.s3_path_to_bucket_prefix(path)
        s3_client = boto3.client('s3')
        try:
            config = boto3.s3.transfer.TransferConfig()   # enable multipart uploading for files larger than 8MB
            response = s3_client.upload_fileobj(f, bucket, key, Config=config)
        except:
            logger.exception('Failed to save object to s3')
            raise
github FredHutch / maxquant-pipeline / code / mqsubmit.py View on Github external
def uploadS3(mqBucket, jobFolder, mqparams, mqconfig):
    """
    Upload the datafiles, fastafiles, configuration file, etc... needed by the job to
    the job folder in the maxquant-jobs S3 bucket
    """
    client = boto3.client('s3', 'us-west-2')
    transfer = boto3.s3.transfer.S3Transfer(client)
    print("\nUploading data file(s)...")
    for f in mqparams['mzxmlFiles']:
        sys.stdout.write("\tUploading: {0}...".format(f))
        transfer.upload_file(f, mqBucket, "{0}/{1}".format(jobFolder, f))
        print(" Done!")
    print("\nUploading FASTA file(s)...")
    for f in mqparams['fastaFiles']:
        sys.stdout.write("\tUploading: {0}...".format(f))
        transfer.upload_file(f, mqBucket, "{0}/{1}".format(jobFolder, f))
        print(" Done!")
    sys.stdout.write("\nUploading configuration file...")
    transfer.upload_file(mqconfig, mqBucket, "{0}/{1}".format(jobFolder, "mqpar.xml"))
    print(" Done!")

    # If a custom database was provided, upload it to the job folder in S3
    if 'database' in mqparams:
github pytorch / pytorch / scripts / model_zoo / update-models-from-caffe2.py View on Github external
if only_local:
        print('No uploading in local only mode.')
        return
    model_dir = os.path.join(zoo_dir, model_name)
    suffix = '-backup' if backup else ''
    if backup:
        print('Backing up the previous version of ONNX model {}...'.format(model_name))
    rel_file_name = '{}{}.tar.gz'.format(model_name, suffix)
    abs_file_name = os.path.join(zoo_dir, rel_file_name)
    print('Compressing {} model to {}'.format(model_name, abs_file_name))
    with tarfile.open(abs_file_name, 'w:gz') as f:
        f.add(model_dir, arcname=model_name)
    file_size = os.stat(abs_file_name).st_size
    print('Uploading {} ({} MB) to s3 cloud...'.format(abs_file_name, float(file_size) / 1024 / 1024))
    client = boto3.client('s3', 'us-east-1')
    transfer = boto3.s3.transfer.S3Transfer(client)
    transfer.upload_file(abs_file_name, 'download.onnx', 'models/latest/{}'.format(rel_file_name),
                         extra_args={'ACL': 'public-read'})

    print('Successfully uploaded {} to s3!'.format(rel_file_name))
github dask / distributed / distributed / s3fs.py View on Github external
def _fetch_range(client, bucket, key, start, end, max_attempts=10):
    logger.debug("Fetch: %s/%s, %s-%s", bucket, key, start, end)
    for i in range(max_attempts):
        try:
            resp = client.get_object(Bucket=bucket, Key=key,
                                     Range='bytes=%i-%i' % (start, end - 1))
            return resp['Body'].read()
        except boto3.s3.transfer.S3_RETRYABLE_ERRORS as e:
            logger.debug('Exception %e on S3 download, retrying',
                         exc_info=True)
            continue
        except ClientError as e:
            if e.response['Error'].get('Code', 'Unknown') in ['416', 'InvalidRange']:
                return b''
            else:
                raise
    raise RuntimeError("Max number of S3 retries exceeded")
github rackerlabs / lambda-uploader / lambda_uploader / uploader.py View on Github external
def _upload_s3(self, zip_file):
        '''
        Uploads the lambda package to s3
        '''
        s3_client = self._aws_session.client('s3')
        transfer = boto3.s3.transfer.S3Transfer(s3_client)
        transfer.upload_file(zip_file, self._config.s3_bucket,
                             self._config.s3_package_name())