Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_download_progress(self):
self.contents = b'A' * 55
self.stub_multipart_download(
contents=self.contents, part_size=5, num_parts=11)
transfer_config = TransferConfig(
multipart_chunksize=5, multipart_threshold=1,
max_concurrency=1)
def progress_callback(amount):
self.progress += amount
self.progress_times_called += 1
with self.stubber:
self.s3.meta.client.download_fileobj(
Bucket=self.bucket, Key=self.key, Fileobj=self.fileobj,
Config=transfer_config, Callback=progress_callback)
# Assert that the progress callback was called the correct number of
# times with the correct amounts.
self.assertEqual(self.progress_times_called, 11)
self.assertEqual(self.progress, 55)
data = open(filePath, 'r').readlines()
accessID = data[1].split('=')[1].strip(' ').strip('\n')
secret = data[2].split('=')[1].strip(' ').strip('\n')
session = boto3.session.Session()
client = session.client(
's3',
region_name=plan.region,
endpoint_url='https://' + plan.region + '.digitaloceanspaces.com',
aws_access_key_id=accessID,
aws_secret_access_key=secret
)
config = TransferConfig(multipart_threshold=1024 * 25, max_concurrency=10,
multipart_chunksize=1024 * 25, use_threads=True)
## Set Expiration for objects
try:
client.put_bucket_lifecycle_configuration(
Bucket='string',
LifecycleConfiguration={
'Rules': [
{
'Expiration': {
'Days': plan.retention,
'ExpiredObjectDeleteMarker': True
},
'ID': plan.name,
'Prefix': '',
except Exception as e:
print(bucketname + ' error getting object ACL on ' + s3objectkey)
print(e)
else:
new_acl = {'Grants':current_acl['Grants'], 'Owner':current_acl['Owner']}
# get tags because S3.copy TaggingDirective doesn't exist
try:
tags = s3_account.get_object_tagging(
Bucket=bucketname,
Key=s3objectkey,
)
except Exception as e:
print(e)
config = boto3.s3.transfer.TransferConfig(
multipart_threshold=8388608,
max_concurrency=10,
multipart_chunksize=8388608,
num_download_attempts=5,
max_io_queue=100,
io_chunksize=262144,
use_threads=True
)
large_copy_extra_args={
'MetadataDirective':'COPY',
'StorageClass':storageclass
}
if target_encryption == 'SSE-S3':
large_copy_extra_args['ServerSideEncryption']='AES256'
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download.
"""
if not hasattr(Fileobj, 'write'):
raise ValueError('Fileobj must implement write')
subscribers = None
if Callback is not None:
subscribers = [ProgressCallbackInvoker(Callback)]
config = Config
if config is None:
config = TransferConfig()
with create_transfer_manager(self, config) as manager:
future = manager.download(
bucket=Bucket, key=Key, fileobj=Fileobj,
extra_args=ExtraArgs, subscribers=subscribers)
return future.result()
def _copy_dir(self, source_path, destination_path, threads=DEFAULT_THREADS,
start_time=None, end_time=None, part_size=DEFAULT_PART_SIZE, **kwargs):
start = datetime.datetime.now()
copy_jobs = []
management_pool = ThreadPool(processes=threads)
transfer_config = TransferConfig(max_concurrency=threads, multipart_chunksize=part_size)
src_bucket, src_key = self._path_to_bucket_and_key(source_path)
dst_bucket, dst_key = self._path_to_bucket_and_key(destination_path)
src_prefix = self._add_path_delimiter(src_key)
dst_prefix = self._add_path_delimiter(dst_key)
key_path_len = len(src_prefix)
total_size_bytes = 0
total_keys = 0
for item in self.list(source_path, start_time=start_time, end_time=end_time, return_key=True):
path = item.key[key_path_len:]
# prevents copy attempt of empty key in folder
if path != '' and path != '/':
total_keys += 1
total_size_bytes += item.size
copy_source = {
'Bucket': src_bucket,
'Key': src_prefix + path
def upload_object_via_stream(self, iterator, container, object_name, extra=None, **kwargs):
import boto3.s3.transfer
stream = _Stream(iterator)
try:
container.bucket.upload_fileobj(stream, object_name, Config=boto3.s3.transfer.TransferConfig(
use_threads=container.config.multipart,
max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1,
num_download_attempts=container.config.retries))
except Exception as ex:
log.error('Failed uploading: %s' % ex)
return False
return True
def upload_file(s3_client, file_name, bucket_name, args):
if args['key']:
key = args['key']
else:
key = file_name
print('Uploading files...')
try:
tc = boto3.s3.transfer.TransferConfig()
t = boto3.s3.transfer.S3Transfer(client=s3_client, config=tc)
t.upload_file(file_name, bucket_name, key, extra_args={'ACL': 'public-read'})
file_url = 'https://{}.s3.amazonaws.com/{}'.format(bucket_name, key)
print('The uploaded file is public and accessible with the following url: {}'.format(file_url))
except S3UploadFailedError:
print('File upload is not successful: PutObject permission missing.')
def _do_file_upload(context, this_file_path, bucketName, thisKey, hashValue):
s3 = context.aws.client('s3')
config = TransferConfig(
max_concurrency=10,
num_download_attempts=10,
)
transfer = S3Transfer(s3, config)
transfer.upload_file(this_file_path, bucketName, thisKey,
callback=ProgressPercentage(context, this_file_path), extra_args= {'Metadata' : { _get_meta_hash_name() : hashValue}})
show_manifest.done_uploading(this_file_path)
context.view.done_uploading(this_file_path)
def _native_download_file(meta, full_dst_file_name, max_concurrency):
logger = getLogger(__name__)
try:
akey = SnowflakeS3Util._get_s3_object(meta, meta['src_file_name'])
akey.download_file(
full_dst_file_name,
Callback=meta['get_callback'](
meta['src_file_name'],
meta['src_file_size'],
output_stream=meta['get_callback_output_stream'],
show_progress_bar=meta['show_progress_bar']) if
meta['get_callback'] else None,
Config=TransferConfig(
multipart_threshold=SnowflakeS3Util.DATA_SIZE_THRESHOLD,
max_concurrency=max_concurrency,
num_download_attempts=10,
)
)
meta['result_status'] = ResultStatus.DOWNLOADED
except botocore.exceptions.ClientError as err:
if err.response['Error']['Code'] == EXPIRED_TOKEN:
meta['result_status'] = ResultStatus.RENEW_TOKEN
else:
logger.debug(
"Failed to download a file: %s, err: %s",
full_dst_file_name, err, exc_info=True)
raise err
except RetriesExceededError as err:
meta['result_status'] = ResultStatus.NEED_RETRY