How to use the aiobotocore.get_session function in aiobotocore

To help you get started, we’ve selected a few aiobotocore examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github HDFGroup / hsds / tools / root_scan.py View on Github external
if not isValidUuid(rootid):
        print("Invalid root id!")
        sys.exit(1)

    if not isSchema2Id(rootid):
        print("This tool can only be used with Schema v2 ids")
        sys.exit(1)


    # we need to setup a asyncio loop to query s3
    loop = asyncio.get_event_loop()

    app = {}
    app["bucket_name"] = config.get("bucket_name")
    app["loop"] = loop
    session = get_session()
    app["session"] = session

    # need the metadata cache since we will be calling into some SN methods
    metadata_mem_cache_size = int(config.get("metadata_mem_cache_size"))
    app['meta_cache'] = LruCache(mem_target=metadata_mem_cache_size, chunk_cache=False)
   
    loop.run_until_complete(run_scan(app, rootid=rootid, update=do_update))

    loop.close()

    results = app["scanRoot_results"]
    datasets = results["datasets"]
    lastModified = datetime.fromtimestamp(results["lastModified"])
    print(f"lastModified: {lastModified}")
    if "md5_sum" in results:
        checksum = results["md5_sum"]
github HDFGroup / hsds / tools / get_chunk_values.py View on Github external
def main():
    if len(sys.argv) == 1 or sys.argv[1] == "-h" or sys.argv[1] == "--help":
        printUsage()
        sys.exit(1)

    chunk_id = sys.argv[-1]
    if not isValidChunkId(chunk_id):
        print("Invalid chunk id")
        sys.exit(1)

    # we need to setup a asyncio loop to query s3
    loop = asyncio.get_event_loop()
    session = get_session(loop=loop)

    app = {}
    app["session"] = session
    app['bucket_name'] = config.get("bucket_name")
    app['node_count'] = 1
    app['node_number'] = 0  
    app['deleted_ids'] = set()
    app['meta_cache'] = {}
    app['pending_s3_read'] = {}
    app['meta_cache'] = LruCache(mem_target=1024*1024, chunk_cache=False)
    app['chunk_cache'] = LruCache(mem_target=64*1024*1024, chunk_cache=True)
    domain = config.get("domain")
    if not domain:
        printUsage()
        sys.exit(-1)
    print("got domain:", domain)
github HDFGroup / hsds / tools / delete_bucket.py View on Github external
def main():

    if len(sys.argv) > 1 and (sys.argv[1] == "-h" or sys.argv[1] == "--help"):
        printUsage()
        sys.exit(1)

    # we need to setup a asyncio loop to query s3
    loop = asyncio.get_event_loop()
    #loop.run_until_complete(init(loop))
    session = get_session()

    app = {}
    app['bucket_name'] = config.get("bucket_name")
    app["session"] = session
    app["loop"] = loop

    loop.run_until_complete(deleteAll(app))
    #releaseClient(app)

    loop.close()

    print("done!")
github HDFGroup / hsds / tools / bucket_scan.py View on Github external
printUsage()


    if len(sys.argv) > 1 and sys.argv[1] == "--update":
        do_update = True
    else:
        do_update = False


    # we need to setup a asyncio loop to query s3
    loop = asyncio.get_event_loop()

    app = {}
    app["bucket_name"] = config.get("bucket_name")
    app["loop"] = loop
    session = get_session()
    app["session"] = session
    loop.run_until_complete(run_scan(app, update=do_update))

    loop.close()

    results = app["bucket_scan"]
    print("root_count:", results["root_count"])
    print("info_count:", results["info_count"])
    print("group_count", results["group_count"])
    print("dataset_count:", results["dataset_count"])
    print("datatype_count", results["datatype_count"])
    print("chunk_count:", results["chunk_count"])
    print('allocated_bytes:', results["allocated_bytes"])
    print("metadata_bytes:", results["metadata_bytes"])
    print("updated_count:", results["updated_count"])
github home-assistant / home-assistant / homeassistant / components / aws / notify.py View on Github external
async def get_available_regions(hass, service):
    """Get available regions for a service."""
    import aiobotocore

    session = aiobotocore.get_session()
    # get_available_regions is not a coroutine since it does not perform
    # network I/O. But it still perform file I/O heavily, so put it into
    # an executor thread to unblock event loop
    return await hass.async_add_executor_job(session.get_available_regions, service)
github guyingbo / cowfish / cowfish / kinesis.py View on Github external
def __init__(
        self,
        stream_name: str,
        region_name: Optional[str] = None,
        encode_func: Optional[Callable] = None,
        key_func: Optional[Callable] = None,
        *,
        worker_params: Optional[dict] = None,
        client_params: Optional[dict] = None,
    ):
        self.session = aiobotocore.get_session()
        self.stream_name = stream_name
        self.encode_func = encode_func or (lambda o: json.dumps(o).encode())
        self.key_func = key_func
        self.client_params = client_params or {}
        self.client_params["region_name"] = region_name
        worker_params = worker_params or {}
        worker_params.setdefault("name", "KinesisWorker")
        self.worker = BatchWorker(self.write_batch, **worker_params)
github mozilla-services / buildhub / jobs / buildhub / s3_inventory_to_kinto.py View on Github external
# Convert the simple env var integer to a datetime.datetime instance.
    if MIN_AGE_LAST_MODIFIED_HOURS:
        assert MIN_AGE_LAST_MODIFIED_HOURS > 0, MIN_AGE_LAST_MODIFIED_HOURS
        min_last_modified = datetime.datetime.utcnow() - datetime.timedelta(
            hours=MIN_AGE_LAST_MODIFIED_HOURS
        )
        # Make it timezone aware (to UTC)
        min_last_modified = min_last_modified.replace(
            tzinfo=datetime.timezone.utc
        )

    # Fetch all existing records as a big dict from kinto
    existing = fetch_existing(kinto_client)

    # Download CSVs, deduce records and push to Kinto.
    session = aiobotocore.get_session(loop=loop)
    boto_config = botocore.config.Config(signature_version=botocore.UNSIGNED)
    async with session.create_client(
        's3', region_name=REGION_NAME, config=boto_config
    ) as client:
        for inventory in inventories:
            files_stream = list_manifest_entries(loop, client, inventory)
            csv_stream = download_csv(loop, client, files_stream)
            records_stream = csv_to_records(
                loop,
                csv_stream,
                skip_incomplete=True,
                min_last_modified=min_last_modified,
            )
            await to_kinto_main(
                loop,
                records_stream,
github kalaspuff / tomodachi / tomodachi / transport / aws_sns_sqs.py View on Github external
def create_client(cls: Any, name: str, context: Dict) -> None:
        logging.getLogger('botocore.vendored.requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)

        if not cls.clients:
            cls.clients = {}
            cls.clients_creation_time = {}
        loop = asyncio.get_event_loop()
        session = aiobotocore.get_session()

        config_base = context.get('options', {}).get('aws_sns_sqs', context.get('options', {}).get('aws', {}))
        aws_config_base = context.get('options', {}).get('aws', {})

        region_name = config_base.get('aws_region_name', config_base.get('region_name')) or \
            aws_config_base.get('aws_region_name', aws_config_base.get('region_name'))
        aws_secret_access_key = config_base.get('aws_secret_access_key', config_base.get('secret_access_key')) or \
            aws_config_base.get('aws_secret_access_key', aws_config_base.get('secret_access_key'))
        aws_access_key_id = config_base.get('aws_access_key_id', config_base.get('access_key_id')) or \
            aws_config_base.get('aws_access_key_id', aws_config_base.get('access_key_id'))
        endpoint_url = config_base.get('aws_endpoint_urls', config_base.get('endpoint_urls', {})).get(name) or \
            config_base.get('aws_{}_endpoint_url'.format(name), config_base.get('{}_endpoint_url'.format(name))) or \
            aws_config_base.get('aws_endpoint_urls', aws_config_base.get('endpoint_urls', {})).get(name) or \
            config_base.get('aws_endpoint_url', config_base.get('endpoint_url')) or \
            aws_config_base.get('aws_endpoint_url', aws_config_base.get('endpoint_url')) or \
            context.get('options', {}).get('aws_endpoint_urls', {}).get(name)
github nteract / bookstore / bookstore / archive.py View on Github external
def __init__(self, *args, **kwargs):
        super(FileContentsManager, self).__init__(*args, **kwargs)

        # opt ourselves into being part of the Jupyter App that should have Bookstore Settings applied
        self.settings = BookstoreSettings(parent=self)

        self.log.info(
            "Archiving notebooks to {}".format(
                s3_display_path(self.settings.s3_bucket, self.settings.workspace_prefix)
            )
        )

        try:
            # create a session object from the current event loop
            self.session = aiobotocore.get_session()
        except Exception:
            self.log.warn("Unable to create a session")
            raise

        # a collection of locks per path to suppress writing while the path may be in use
        self.path_locks: Dict[str, Lock] = {}
        self.path_lock_ready = Lock()