How to use the aegea.ls.register_parser function in aegea

To help you get started, we’ve selected a few aegea examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github kislyuk / aegea / aegea / aegea_config.py View on Github external
else:
                collector.append([path.lstrip(".") + "." + k, repr(v)])
    collector = []
    collect_kv(config, "", collector)
    page_output(format_table(collector))

ls_parser = register_listing_parser(ls, parent=configure_parser)

def get(args):
    """Get an Aegea configuration parameter by name"""
    from . import config
    for key in args.key.split("."):
        config = getattr(config, key)
    print(json.dumps(config))

get_parser = register_parser(get, parent=configure_parser)
get_parser.add_argument("key")

def set(args):
    """Set an Aegea configuration parameter to a given value"""
    from . import config, tweak

    class ConfigSaver(tweak.Config):
        @property
        def config_files(self):
            return [config.config_files[2]]

    config_saver = ConfigSaver(use_yaml=True, save_on_exit=False)
    c = config_saver
    for key in args.key.split(".")[:-1]:
        try:
            c = c[key]
github kislyuk / aegea / aegea / batch.py View on Github external
if job_desc["status"] != last_status:
            logger.info("Job %s %s", args.job_id, format_job_status(job_desc["status"]))
            last_status = job_desc["status"]
            if job_desc["status"] in {"RUNNING", "SUCCEEDED", "FAILED"}:
                logger.info("Job %s log stream: %s", args.job_id, job_desc.get("container", {}).get("logStreamName"))
                save_job_desc(job_desc)
        if job_desc["status"] in {"RUNNING", "SUCCEEDED", "FAILED"} and "logStreamName" in job_desc["container"]:
            args.log_stream_name = job_desc["container"]["logStreamName"]
            get_logs(args)
        if "statusReason" in job_desc:
            logger.info("Job %s: %s", args.job_id, job_desc["statusReason"])
        if job_desc.get("container", {}).get("exitCode"):
            return SystemExit(job_desc["container"]["exitCode"])
        time.sleep(1)

get_logs_parser = register_parser(get_logs, parent=batch_parser, help="Retrieve logs for a Batch job")
get_logs_parser.add_argument("log_stream_name")
watch_parser = register_parser(watch, parent=batch_parser, help="Monitor a running Batch job and stream its logs")
watch_parser.add_argument("job_id")
for parser in get_logs_parser, watch_parser:
    lines_group = parser.add_mutually_exclusive_group()
    lines_group.add_argument("--head", type=int, nargs="?", const=10,
                             help="Retrieve this number of lines from the beginning of the log (default 10)")
    lines_group.add_argument("--tail", type=int, nargs="?", const=10,
                             help="Retrieve this number of lines from the end of the log (default 10)")

def ssh(args):
    job_desc = clients.batch.describe_jobs(jobs=[args.job_id])["jobs"][0]
    job_queue_desc = clients.batch.describe_job_queues(jobQueues=[job_desc["jobQueue"]])["jobQueues"][0]
    ce = job_queue_desc["computeEnvironmentOrder"][0]["computeEnvironment"]
    ce_desc = clients.batch.describe_compute_environments(computeEnvironments=[ce])["computeEnvironments"][0]
    ecs_ci_arn = job_desc["container"]["containerInstanceArn"]
github kislyuk / aegea / aegea / batch.py View on Github external
logger.info("Creating queue %s in %s", args.name, ces)
    queue = clients.batch.create_job_queue(jobQueueName=args.name, priority=args.priority, computeEnvironmentOrder=ces)
    make_waiter(clients.batch.describe_job_queues, "jobQueues[].status", "VALID", "pathAny").wait(jobQueues=[args.name])
    return queue

parser = register_parser(create_queue, parent=batch_parser, help="Create a Batch queue")
parser.add_argument("name")
parser.add_argument("--priority", type=int, default=5)
parser.add_argument("--compute-environments", nargs="+", required=True)

def delete_queue(args):
    clients.batch.update_job_queue(jobQueue=args.name, state="DISABLED")
    make_waiter(clients.batch.describe_job_queues, "jobQueues[].status", "VALID", "pathAny").wait(jobQueues=[args.name])
    clients.batch.delete_job_queue(jobQueue=args.name)

parser = register_parser(delete_queue, parent=batch_parser, help="Delete a Batch queue")
parser.add_argument("name").completer = complete_queue_name

def compute_environments(args):
    page_output(tabulate(paginate(clients.batch.get_paginator("describe_compute_environments")), args))

parser = register_listing_parser(compute_environments, parent=batch_parser, help="List Batch compute environments")

def ensure_launch_template(prefix=__name__.replace(".", "_"), **kwargs):
    name = prefix + "_" + hashlib.sha256(json.dumps(kwargs, sort_keys=True).encode()).hexdigest()[:32]
    try:
        clients.ec2.create_launch_template(LaunchTemplateName=name, LaunchTemplateData=kwargs)
    except ClientError as e:
        expect_error_codes(e, "InvalidLaunchTemplateName.AlreadyExistsException")
    return name

def get_ssm_parameter(name):
github kislyuk / aegea / aegea / ecr.py View on Github external
Use ``aws ecr create-repository`` and ``aws ecr delete-repository`` to manage ECR repositories.
"""

from __future__ import absolute_import, division, print_function, unicode_literals

import json

from .ls import register_parser, register_listing_parser
from .util import paginate
from .util.printing import page_output, tabulate
from .util.aws import clients

def ecr(args):
    ecr_parser.print_help()

ecr_parser = register_parser(ecr, help="Manage Elastic Container Registry resources", description=__doc__)

def ls(args):
    table = []
    describe_repositories_args = dict(repositoryNames=args.repositories) if args.repositories else {}
    for repo in paginate(clients.ecr.get_paginator("describe_repositories"), **describe_repositories_args):
        try:
            res = clients.ecr.get_repository_policy(repositoryName=repo["repositoryName"])
            repo["policy"] = json.loads(res["policyText"])
        except clients.ecr.exceptions.RepositoryPolicyNotFoundException:
            pass
        orig_len = len(table)
        for image in paginate(clients.ecr.get_paginator("describe_images"), repositoryName=repo["repositoryName"]):
            table.append(dict(image, **repo))
        if len(table) == orig_len:
            table.append(repo)
    page_output(tabulate(table, args))
github kislyuk / aegea / aegea / elb.py View on Github external
parser_create.add_argument("--path-pattern")
parser_create.add_argument("--health-check-protocol", default="HTTP", choices={"HTTP", "HTTPS"})
parser_create.add_argument("--health-check-port", default="traffic-port", help="Port to be queried by ELB health check")
parser_create.add_argument("--health-check-path", default="/", help="Path to be queried by ELB health check")
parser_create.add_argument("--ok-http-codes", default="200-399",
                           help="Comma or dash-separated HTTP response codes considered healthy by ELB health check")

def delete(args):
    if args.type == "ELB":
        clients.elb.delete_load_balancer(LoadBalancerName=args.elb_name)
    elif args.type == "ALB":
        elbs = clients.elbv2.describe_load_balancers(Names=[args.elb_name])["LoadBalancers"]
        assert len(elbs) == 1
        clients.elbv2.delete_load_balancer(LoadBalancerArn=elbs[0]["LoadBalancerArn"])

parser_delete = register_parser(delete, parent=elb_parser, help="Delete an ELB")

def list_load_balancers():
    elbs = paginate(clients.elb.get_paginator("describe_load_balancers"))
    albs = paginate(clients.elbv2.get_paginator("describe_load_balancers"))
    return list(elbs) + list(albs)

for parser in parser_register, parser_deregister, parser_replace, parser_create, parser_delete:
    parser.add_argument("elb_name").completer = lambda **kw: [i["LoadBalancerName"] for i in list_load_balancers()]
    parser.add_argument("--type", choices={"ELB", "ALB"}, default="ALB")
    if parser != parser_delete:
        parser.add_argument("instances", nargs="+", type=resolve_instance_id)
        parser.add_argument("--target-group", default="{elb_name}-default-tg")
        parser.add_argument("--instance-port", type=int, default=80)
github kislyuk / aegea / aegea / ecs.py View on Github external
task_desc = res["tasks"][0]
            if task_desc["lastStatus"] != last_status:
                logger.info("Task %s %s", args.task_arn, format_task_status(task_desc["lastStatus"]))
                last_status = task_desc["lastStatus"]
        try:
            for event in CloudwatchLogReader("/".join([args.task_name, args.task_name, task_id]),
                                             log_group_name=args.task_name):
                print(str(Timestamp(event["timestamp"])), event["message"])
                events_received += 1
        except ClientError as e:
            expect_error_codes(e, "ResourceNotFoundException")
        if last_status is None and events_received > 0:
            break  # Logs retrieved successfully but task record is no longer in ECS
        time.sleep(1)

watch_parser = register_parser(watch, parent=ecs_parser, help="Monitor a running ECS Fargate task and stream its logs")
watch_parser.add_argument("task_arn")
watch_parser.add_argument("--task-name", default=__name__.replace(".", "_"))
lines_group = watch_parser.add_mutually_exclusive_group()
lines_group.add_argument("--head", type=int, nargs="?", const=10,
                         help="Retrieve this number of lines from the beginning of the log (default 10)")
lines_group.add_argument("--tail", type=int, nargs="?", const=10,
                         help="Retrieve this number of lines from the end of the log (default 10)")
github kislyuk / aegea / aegea / secrets.py View on Github external
except clients.secretsmanager.exceptions.ResourceExistsException:
        res = clients.secretsmanager.put_secret_value(SecretId=args.secret_name, SecretString=secret_value)
    if parse_principal(args):
        ensure_policy(parse_principal(args), res["ARN"])
    if args.generate_ssh_key:
        return dict(ssh_public_key=hostkey_line(hostnames=[], key=ssh_key).strip(),
                    ssh_key_fingerprint=key_fingerprint(ssh_key))

put_parser = register_parser(put, parent=secrets_parser)
put_parser.add_argument("--generate-ssh-key", action="store_true",
                        help="Generate a new SSH key pair and write the private key as the secret value; write the public key to stdout")  # noqa

def get(args):
    sys.stdout.write(clients.secretsmanager.get_secret_value(SecretId=args.secret_name)["SecretString"])

get_parser = register_parser(get, parent=secrets_parser)

def delete(args):
    return clients.secretsmanager.delete_secret(SecretId=args.secret_name)

delete_parser = register_parser(delete, parent=secrets_parser)

for parser in put_parser, get_parser, delete_parser:
    parser.add_argument("secret_name",
                        help="List the secret name. For put, pass the secret value on stdin, or via an environment variable with the same name as the secret.")  # noqa
    parser.add_argument("--instance-profile")
    parser.add_argument("--iam-role")
    parser.add_argument("--iam-group")
    parser.add_argument("--iam-user",
                        help="Name of IAM instance profile, role, group, or user who will be granted access to secret")
github kislyuk / aegea / aegea / elb.py View on Github external
if res["Listeners"]:
            res = clients.elbv2.modify_listener(ListenerArn=res["Listeners"][0]["ListenerArn"], **listener_params)
        else:
            res = clients.elbv2.create_listener(LoadBalancerArn=elb["LoadBalancerArn"], **listener_params)
        listener = res["Listeners"][0]
        if args.path_pattern:
            rules = clients.elbv2.describe_rules(ListenerArn=listener["ListenerArn"])["Rules"]
            clients.elbv2.create_rule(ListenerArn=listener["ListenerArn"],
                                      Conditions=[dict(Field="path-pattern", Values=[args.path_pattern])],
                                      Actions=[dict(Type="forward", TargetGroupArn=target_group["TargetGroupArn"])],
                                      Priority=len(rules))
    replace(args)
    DNSZone(zone["Name"]).update(args.dns_alias.replace("." + zone["Name"].rstrip("."), ""), elb["DNSName"])
    return dict(elb_name=args.elb_name, dns_name=elb["DNSName"], dns_alias=args.dns_alias)

parser_create = register_parser(create, parent=elb_parser, help="Create a new ELB")
parser_create.add_argument("--security-groups", nargs="+", type=resolve_security_group, required=True, help="""
Security groups to assign the ELB. You must allow TCP traffic to flow between clients and the ELB on ports 80/443
and allow TCP traffic to flow between the ELB and the instances on INSTANCE_PORT.""")
parser_create.add_argument("--dns-alias", required=True, help="Fully qualified DNS name that will point to the ELB")
parser_create.add_argument("--path-pattern")
parser_create.add_argument("--health-check-protocol", default="HTTP", choices={"HTTP", "HTTPS"})
parser_create.add_argument("--health-check-port", default="traffic-port", help="Port to be queried by ELB health check")
parser_create.add_argument("--health-check-path", default="/", help="Path to be queried by ELB health check")
parser_create.add_argument("--ok-http-codes", default="200-399",
                           help="Comma or dash-separated HTTP response codes considered healthy by ELB health check")

def delete(args):
    if args.type == "ELB":
        clients.elb.delete_load_balancer(LoadBalancerName=args.elb_name)
    elif args.type == "ALB":
        elbs = clients.elbv2.describe_load_balancers(Names=[args.elb_name])["LoadBalancers"]
github kislyuk / aegea / aegea / flow_logs.py View on Github external
parser.add_argument("--resource")
parser.add_argument("--traffic_type", choices=["ACCEPT", "REJECT", "ALL"], default="ALL")

def ls(args):
    describe_flow_logs_args = dict(Filters=[dict(Name="resource-id", Values=[args.resource])]) if args.resource else {}
    page_output(tabulate(clients.ec2.describe_flow_logs(**describe_flow_logs_args)["FlowLogs"], args))

parser = register_listing_parser(ls, parent=flow_logs_parser, help="List VPC flow logs")
parser.add_argument("--resource")

def get(args):
    args.log_group, args.pattern = __name__, None
    args.log_stream = "-".join([args.network_interface, args.traffic_type]) if args.network_interface else None
    grep(args)

parser = register_parser(get, parent=flow_logs_parser, help="Get VPC flow logs")
parser.add_argument("--network-interface")
parser.add_argument("--traffic_type", choices=["ACCEPT", "REJECT", "ALL"], default="ALL")
add_time_bound_args(parser)
github kislyuk / aegea / aegea / ecs.py View on Github external
from .ls import register_parser, register_listing_parser
from .util import Timestamp, paginate
from .util.compat import USING_PYTHON2
from .util.printing import page_output, tabulate, YELLOW, RED, GREEN, BOLD, ENDC
from .util.aws import (ARN, clients, ensure_security_group, ensure_vpc, ensure_iam_role, ensure_log_group,
                       ensure_ecs_cluster, expect_error_codes)
from .util.aws.logs import CloudwatchLogReader
from .util.aws.batch import get_command_and_env, set_ulimits, set_volumes, get_ecr_image_uri

def complete_cluster_name(**kwargs):
    return [ARN(c).resource.partition("/")[2] for c in paginate(clients.ecs.get_paginator("list_clusters"))]

def ecs(args):
    ecs_parser.print_help()

ecs_parser = register_parser(ecs, help="Manage Elastic Container Service resources", description=__doc__)

def clusters(args):
    if not args.clusters:
        args.clusters = list(paginate(clients.ecs.get_paginator("list_clusters")))
    cluster_desc = clients.ecs.describe_clusters(clusters=args.clusters)["clusters"]
    page_output(tabulate(cluster_desc, args))

parser = register_listing_parser(clusters, parent=ecs_parser, help="List ECS clusters")
parser.add_argument("clusters", nargs="*").completer = complete_cluster_name

def tasks(args):
    list_clusters = clients.ecs.get_paginator("list_clusters")
    list_tasks = clients.ecs.get_paginator("list_tasks")

    def list_tasks_worker(worker_args):
        cluster, status = worker_args