Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
stream_name = stream["arn"].split(":")[-1]
first_event_ts = datetime.utcfromtimestamp(stream.get("firstEventTimestamp", 0) // 1000)
last_event_ts = datetime.utcfromtimestamp(stream.get("lastEventTimestamp", 0) // 1000)
if args.end_time and first_event_ts > args.end_time:
continue
if args.start_time and last_event_ts < args.start_time:
break
streams.append(stream_name)
for stream in streams:
get_log_events_args = dict(logGroupName=args.log_group, startFromHead=True, limit=100)
if args.start_time:
get_log_events_args.update(startTime=int(timestamp(args.start_time) * 1000))
if args.end_time:
get_log_events_args.update(endTime=int(timestamp(args.end_time) * 1000))
while True:
page = clients.logs.get_log_events(logStreamName=stream, **get_log_events_args)
for event in page["events"]:
if "timestamp" not in event or "message" not in event:
continue
print_log_event(event)
if len(page["events"]) == 0 or "nextForwardToken" not in page:
break
get_log_events_args.update(nextToken=page["nextForwardToken"], limit=10000)
def logs(args):
if args.log_group and (args.log_stream or args.start_time or args.end_time):
if args.export:
return export_and_print_log_events(args)
else:
return print_log_events(args)
table = []
group_cols = ["logGroupName"]
stream_cols = ["logStreamName", "lastIngestionTime", "storedBytes"]
args.columns = group_cols + stream_cols
for group in paginate(clients.logs.get_paginator("describe_log_groups")):
if args.log_group and group["logGroupName"] != args.log_group:
continue
n = 0
for stream in paginate(clients.logs.get_paginator("describe_log_streams"),
logGroupName=group["logGroupName"], orderBy="LastEventTime", descending=True):
now = datetime.utcnow().replace(microsecond=0)
stream["lastIngestionTime"] = now - datetime.utcfromtimestamp(stream.get("lastIngestionTime", 0) // 1000)
table.append(dict(group, **stream))
n += 1
if n >= args.max_streams_per_group:
break
page_output(tabulate(table, args))
" service sshd reload)")
if provision_users:
# TODO: UIDs should be deterministic
# uid_bytes = hashlib.sha256(username.encode()).digest()[-2:]
# uid = 2000 + (int.from_bytes(uid_bytes, byteorder=sys.byteorder) // 2)
cloud_config_data["users"] = [dict(name=u, gecos="", sudo="ALL=(ALL) NOPASSWD:ALL") for u in provision_users]
for key in sorted(kwargs):
cloud_config_data[key] = kwargs[key]
if host_key is not None:
buf = StringIO()
host_key.write_private_key(buf)
cloud_config_data["ssh_keys"] = dict(rsa_private=buf.getvalue(),
rsa_public=get_public_key_from_pair(host_key))
payload = encode_cloud_config_payload(cloud_config_data, mime_multipart_archive=mime_multipart_archive)
if len(payload) >= 16384:
logger.warn("Cloud-init payload is too large to be passed in user data, extracting rootfs.skel")
upload_bootstrap_asset(cloud_config_data, rootfs_skel_dirs)
payload = encode_cloud_config_payload(cloud_config_data, mime_multipart_archive=mime_multipart_archive)
return payload
def ls(args):
bucket = resources.s3.Bucket(args.billing_reports_bucket.format(account_id=ARN.get_account_id()))
now = datetime.utcnow()
year = args.year or now.year
month = str(args.month or now.month).zfill(2)
next_year = year + ((args.month or now.month) + 1) // 12
next_month = str(((args.month or now.month) + 1) % 12).zfill(2)
manifest_name = "aegea/{report}/{yr}{mo}01-{next_yr}{next_mo}01/{report}-Manifest.json"
manifest_name = manifest_name.format(report=__name__, yr=year, mo=month, next_yr=next_year, next_mo=next_month)
try:
manifest = json.loads(bucket.Object(manifest_name).get().get("Body").read())
for report_key in manifest["reportKeys"]:
report = BytesIO(bucket.Object(report_key).get().get("Body").read())
with gzip.GzipFile(fileobj=report) as fh:
reader = csv.DictReader(fh)
for line in reader:
page_output(tabulate(filter_line_items(reader, args), args))
except ClientError as e:
break
if i + 1 < len(devices) and re.search("InvalidParameterValue.+Attachment point.+is already in use", str(e)):
logger.warn("BDM node %s is already in use, looking for next available node", devices[i])
continue
raise
res = clients.ec2.get_waiter("volume_in_use").wait(VolumeIds=[args.volume_id])
if args.format or args.mount:
for i in range(30):
try:
find_devnode(args.volume_id)
break
except Exception:
logger.debug("Waiting for device node to appear for %s", args.volume_id)
time.sleep(1)
if args.format:
logger.info("Formatting %s (%s)", args.volume_id, find_devnode(args.volume_id))
label = get_fs_label(args.volume_id)
command = get_mkfs_command(fs_type=args.format, label=label) + find_devnode(args.volume_id)
subprocess.check_call(command, shell=True, stdout=sys.stderr.buffer)
if args.mount:
logger.info("Mounting %s at %s", args.volume_id, args.mount)
subprocess.check_call(["mount", find_devnode(args.volume_id), args.mount], stdout=sys.stderr.buffer)
return res
parser_attach = register_parser(attach, parent=ebs_parser, help="Attach an EBS volume to an EC2 instance")
def ensure_subnet(vpc, availability_zone=None):
if availability_zone is not None and availability_zone not in availability_zones():
msg = "Unknown availability zone {} (choose from {})"
raise AegeaException(msg.format(availability_zone, list(availability_zones())))
for subnet in vpc.subnets.all():
if availability_zone is not None and subnet.availability_zone != availability_zone:
continue
break
else:
from ipaddress import ip_network
from ... import config
subnet_cidrs = ip_network(str(config.vpc.cidr[ARN.get_region()])).subnets(new_prefix=config.vpc.subnet_prefix)
subnets = {}
for az, subnet_cidr in zip(availability_zones(), subnet_cidrs):
logger.info("Creating subnet with CIDR %s in %s, %s", subnet_cidr, vpc, az)
subnets[az] = resources.ec2.create_subnet(VpcId=vpc.id, CidrBlock=str(subnet_cidr), AvailabilityZone=az)
clients.ec2.get_waiter("subnet_available").wait(SubnetIds=[subnets[az].id])
add_tags(subnets[az], Name=__name__)
clients.ec2.modify_subnet_attribute(SubnetId=subnets[az].id,
MapPublicIpOnLaunch=dict(Value=config.vpc.map_public_ip_on_launch))
subnet = subnets[availability_zone] if availability_zone is not None else list(subnets.values())[0]
return subnet
def watch(args):
_, cluster, task_id = ARN(args.task_arn).resource.split("/")
logger.info("Watching task %s (%s)", task_id, cluster)
last_status, events_received = None, 0
while last_status != "STOPPED":
res = clients.ecs.describe_tasks(cluster=cluster, tasks=[args.task_arn])
if len(res["tasks"]) == 1:
task_desc = res["tasks"][0]
if task_desc["lastStatus"] != last_status:
logger.info("Task %s %s", args.task_arn, format_task_status(task_desc["lastStatus"]))
last_status = task_desc["lastStatus"]
try:
for event in CloudwatchLogReader("/".join([args.task_name, args.task_name, task_id]),
log_group_name=args.task_name):
print(str(Timestamp(event["timestamp"])), event["message"])
events_received += 1
except ClientError as e:
expect_error_codes(e, "ResourceNotFoundException")
if last_status is None and events_received > 0:
break # Logs retrieved successfully but task record is no longer in ECS
time.sleep(1)
def watch(args):
job_desc = get_job_desc(args.job_id)
args.job_name = job_desc["jobName"]
logger.info("Watching job %s (%s)", args.job_id, args.job_name)
last_status = None
while last_status not in {"SUCCEEDED", "FAILED"}:
job_desc = get_job_desc(args.job_id)
if job_desc["status"] != last_status:
logger.info("Job %s %s", args.job_id, format_job_status(job_desc["status"]))
last_status = job_desc["status"]
if job_desc["status"] in {"RUNNING", "SUCCEEDED", "FAILED"}:
logger.info("Job %s log stream: %s", args.job_id, job_desc.get("container", {}).get("logStreamName"))
save_job_desc(job_desc)
if job_desc["status"] in {"RUNNING", "SUCCEEDED", "FAILED"} and "logStreamName" in job_desc["container"]:
args.log_stream_name = job_desc["container"]["logStreamName"]
get_logs(args)
if "statusReason" in job_desc:
logger.info("Job %s: %s", args.job_id, job_desc["statusReason"])
if job_desc.get("container", {}).get("exitCode"):
return SystemExit(job_desc["container"]["exitCode"])
raise
else:
raise Exception("cloud-init encountered errors")
sys.stderr.write(GREEN("OK") + "\n")
description = "Built by {} for {}".format(__name__, ARN.get_iam_username())
for existing_ami in resources.ec2.images.filter(Owners=["self"], Filters=[{"Name": "name", "Values": [args.name]}]):
logger.info("Deleting existing image {}".format(existing_ami))
existing_ami.deregister()
image = instance.create_image(Name=args.name, Description=description, BlockDeviceMappings=get_bdm())
tags = dict(tag.split("=", 1) for tag in args.tags)
base_ami = resources.ec2.Image(args.ami)
tags.update(Owner=ARN.get_iam_username(), AegeaVersion=__version__,
Base=base_ami.id, BaseName=base_ami.name, BaseDescription=base_ami.description or "")
add_tags(image, **tags)
logger.info("Waiting for %s to become available...", image.id)
clients.ec2.get_waiter("image_available").wait(ImageIds=[image.id])
while resources.ec2.Image(image.id).state != "available":
sys.stderr.write(".")
sys.stderr.flush()
time.sleep(1)
instance.terminate()
return dict(ImageID=image.id, **tags)
def find_acm_cert(dns_name):
for cert in paginate(clients.acm.get_paginator("list_certificates")):
cert.update(clients.acm.describe_certificate(CertificateArn=cert["CertificateArn"])["Certificate"])
for name in cert["SubjectAlternativeNames"]:
if name in [dns_name, ".".join(["*"] + dns_name.split(".")[1:])]:
return cert
raise AegeaException("Unable to find ACM certificate for {}".format(dns_name))