Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
stream_name = stream["arn"].split(":")[-1]
first_event_ts = datetime.utcfromtimestamp(stream.get("firstEventTimestamp", 0) // 1000)
last_event_ts = datetime.utcfromtimestamp(stream.get("lastEventTimestamp", 0) // 1000)
if args.end_time and first_event_ts > args.end_time:
continue
if args.start_time and last_event_ts < args.start_time:
break
streams.append(stream_name)
for stream in streams:
get_log_events_args = dict(logGroupName=args.log_group, startFromHead=True, limit=100)
if args.start_time:
get_log_events_args.update(startTime=int(timestamp(args.start_time) * 1000))
if args.end_time:
get_log_events_args.update(endTime=int(timestamp(args.end_time) * 1000))
while True:
page = clients.logs.get_log_events(logStreamName=stream, **get_log_events_args)
for event in page["events"]:
if "timestamp" not in event or "message" not in event:
continue
print_log_event(event)
if len(page["events"]) == 0 or "nextForwardToken" not in page:
break
get_log_events_args.update(nextToken=page["nextForwardToken"], limit=10000)
raise
else:
raise Exception("cloud-init encountered errors")
sys.stderr.write(GREEN("OK") + "\n")
description = "Built by {} for {}".format(__name__, ARN.get_iam_username())
for existing_ami in resources.ec2.images.filter(Owners=["self"], Filters=[{"Name": "name", "Values": [args.name]}]):
logger.info("Deleting existing image {}".format(existing_ami))
existing_ami.deregister()
image = instance.create_image(Name=args.name, Description=description, BlockDeviceMappings=get_bdm())
tags = dict(tag.split("=", 1) for tag in args.tags)
base_ami = resources.ec2.Image(args.ami)
tags.update(Owner=ARN.get_iam_username(), AegeaVersion=__version__,
Base=base_ami.id, BaseName=base_ami.name, BaseDescription=base_ami.description or "")
add_tags(image, **tags)
logger.info("Waiting for %s to become available...", image.id)
clients.ec2.get_waiter("image_available").wait(ImageIds=[image.id])
while resources.ec2.Image(image.id).state != "available":
sys.stderr.write(".")
sys.stderr.flush()
time.sleep(1)
instance.terminate()
return dict(ImageID=image.id, **tags)
def credential_report(self):
if "credential_report" not in self.cache:
iam = clients.iam
iam.generate_credential_report()
while True:
try:
self.cache["credential_report"] = iam.get_credential_report()
break
except ClientError as e:
expect_error_codes(e, "ReportInProgress")
return csv.DictReader(self.cache["credential_report"]["Content"].decode("utf-8").splitlines())
def ls(args):
table = []
describe_repositories_args = dict(repositoryNames=args.repositories) if args.repositories else {}
for repo in paginate(clients.ecr.get_paginator("describe_repositories"), **describe_repositories_args):
try:
res = clients.ecr.get_repository_policy(repositoryName=repo["repositoryName"])
repo["policy"] = json.loads(res["policyText"])
except clients.ecr.exceptions.RepositoryPolicyNotFoundException:
pass
orig_len = len(table)
for image in paginate(clients.ecr.get_paginator("describe_images"), repositoryName=repo["repositoryName"]):
table.append(dict(image, **repo))
if len(table) == orig_len:
table.append(repo)
page_output(tabulate(table, args))
def configure(args):
bucket_name = args.billing_reports_bucket.format(account_id=ARN.get_account_id())
bucket_policy = IAMPolicyBuilder(principal="arn:aws:iam::386209384616:root",
action=["s3:GetBucketAcl", "s3:GetBucketPolicy"],
resource="arn:aws:s3:::{}".format(bucket_name))
bucket_policy.add_statement(principal="arn:aws:iam::386209384616:root",
action=["s3:PutObject"],
resource="arn:aws:s3:::{}/*".format(bucket_name))
bucket = ensure_s3_bucket(bucket_name, policy=bucket_policy)
try:
clients.cur.put_report_definition(ReportDefinition=dict(ReportName=__name__,
TimeUnit="HOURLY",
Format="textORcsv",
Compression="GZIP",
S3Bucket=bucket.name,
S3Prefix="aegea",
S3Region=clients.cur.meta.region_name,
AdditionalSchemaElements=["RESOURCES"]))
except clients.cur.exceptions.DuplicateReportNameException:
pass
print("Configured cost and usage reports. Enable cost allocation tags: http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/activate-built-in-tags.html.") # noqa
if args.subnet:
subnet = resources.ec2.Subnet(args.subnet)
vpc = resources.ec2.Vpc(subnet.vpc_id)
else:
vpc = ensure_vpc()
subnet = ensure_subnet(vpc, availability_zone=args.availability_zone)
if not subnet.map_public_ip_on_launch:
raise AegeaException("Subnets without public IP mapping are not supported")
if args.security_groups:
security_groups = [resolve_security_group(sg, vpc) for sg in args.security_groups]
else:
security_groups = [ensure_security_group(__name__, vpc)]
if args.efs_home:
for filesystem in clients.efs.describe_file_systems()["FileSystems"]:
if {"Key": "mountpoint", "Value": "/home"} in filesystem["Tags"]:
break
else:
create_efs_args = ["aegea_home", "--tags", "mountpoint=/home", "managedBy=aegea"]
create_efs(parser_create_efs.parse_args(create_efs_args))
security_groups.append(resolve_security_group(efs_security_group_name, vpc))
ssh_host_key = new_ssh_key()
user_data_args = dict(host_key=ssh_host_key,
commands=get_startup_commands(args, ARN.get_iam_username()),
packages=args.packages,
storage=args.storage)
if args.bless_config:
with open(args.bless_config) as fh:
bless_config = yaml.safe_load(fh)
user_data_args["ssh_ca_keys"] = get_ssh_ca_keys(bless_config)
def put_alarm(args):
sns = resources.sns
logs = clients.logs
cloudwatch = clients.cloudwatch
topic = sns.create_topic(Name=args.alarm_name)
topic.subscribe(Protocol="email", Endpoint=args.email)
logs.put_metric_filter(logGroupName=args.log_group_name,
filterName=args.alarm_name,
filterPattern=args.pattern,
metricTransformations=[dict(metricName=args.alarm_name,
metricNamespace=__name__,
metricValue="1")])
cloudwatch.put_metric_alarm(AlarmName=args.alarm_name,
MetricName=args.alarm_name,
Namespace=__name__,
Statistic="Sum",
Period=300,
Threshold=1,
ComparisonOperator="GreaterThanOrEqualToThreshold",
def resolve_instance_ids(input_names):
ids = [n for n in input_names if n.startswith("i-")]
names = [n for n in input_names if not n.startswith("i-")]
if names:
descriptions = clients.ec2.describe_instances(Filters=[dict(Name="tag:Name", Values=names)])
for reservation in descriptions["Reservations"]:
for instance in reservation["Instances"]:
ids.append(instance["InstanceId"])
if len(ids) != len(input_names):
raise Exception("Unable to resolve one or more of the instance names")
return ids, names
if not args.end_time:
args.end_time = Timestamp.match_precision(Timestamp("-0s"), args.start_time)
export_task_args = dict(logGroupName=args.log_group,
fromTime=int(timestamp(args.start_time) * 1000),
to=int(timestamp(args.end_time) * 1000),
destination=bucket.name)
if args.log_stream:
export_task_args.update(logStreamNamePrefix=args.log_stream)
cache_key = hashlib.sha256(json.dumps(export_task_args, sort_keys=True).encode()).hexdigest()[:32]
export_task_args.update(destinationPrefix=cache_key)
for log_object in bucket.objects.filter(Prefix=cache_key):
logger.debug("Reusing completed export task %s", log_object.key)
break
else:
logger.debug("Starting new log export task %s", export_task_args)
task_desc = clients.logs.create_export_task(**export_task_args)
try:
while task_desc.get("status", {}).get("code") != "COMPLETED":
res = clients.logs.describe_export_tasks(taskId=task_desc["taskId"])
assert len(res["exportTasks"]) == 1
task_desc = res["exportTasks"][0]
if task_desc["status"]["code"] in {"CANCELLED", "FAILED"}:
raise Exception("Log export task failed: " + task_desc["status"]["message"])
msg = "log export task: {logGroupName} {from}..{to} -> s3://{destination}/{destinationPrefix} %s"
logger.info(msg.format(**task_desc), task_desc["status"]["code"])
time.sleep(1)
finally:
try:
clients.logs.cancel_export_task(taskId=task_desc["taskId"])
# TODO: if cancel successful, clean up s3 prefix
except Exception:
pass
def upload_bootstrap_asset(cloud_config_data, rootfs_skel_dirs):
key_name = "".join(random.choice(string.ascii_letters) for x in range(32))
enc_key = "".join(random.choice(string.ascii_letters) for x in range(32))
logger.info("Uploading bootstrap asset %s to S3", key_name)
bucket = ensure_s3_bucket()
cipher = subprocess.Popen(["openssl", "aes-256-cbc", "-e", "-k", enc_key],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
encrypted_tarfile = cipher.communicate(get_bootstrap_files(rootfs_skel_dirs, dest="tarfile"))[0]
bucket.upload_fileobj(io.BytesIO(encrypted_tarfile), key_name)
url = clients.s3.generate_presigned_url(ClientMethod='get_object', Params=dict(Bucket=bucket.name, Key=key_name))
cmd = "curl -s '{url}' | openssl aes-256-cbc -d -k {key} | tar -xz --no-same-owner -C /"
cloud_config_data["runcmd"].insert(0, cmd.format(url=url, key=enc_key))
del cloud_config_data["write_files"]