Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def ecs_taggable(model, r):
# Tag support requires new arn format
# https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html
#
# New arn format details
# https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-resource-ids.html
#
path_parts = r[model.id].rsplit(':', 1)[-1].split('/')
if path_parts[0] not in NEW_ARN_STYLE:
return True
return len(path_parts) > 2
@resources.register('ecs')
class ECSCluster(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ecs'
enum_spec = ('list_clusters', 'clusterArns', None)
batch_detail_spec = (
'describe_clusters', 'clusters', None, 'clusters', {'include': ['TAGS']})
name = "clusterName"
arn = id = "clusterArn"
def augment(self, resources):
resources = super(ECSCluster, self).augment(resources)
ecs_tag_normalize(resources)
return resources
@ECSCluster.filter_registry.register('metrics')
def process_asg(self, client, asg):
force_delete = self.data.get('force', False)
try:
self.manager.retry(
client.delete_auto_scaling_group,
AutoScalingGroupName=asg['AutoScalingGroupName'],
ForceDelete=force_delete)
except ClientError as e:
if e.response['Error']['Code'] == 'ValidationError':
return
raise
@resources.register('launch-config')
class LaunchConfig(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'autoscaling'
arn_type = 'launchConfiguration'
id = name = 'LaunchConfigurationName'
date = 'CreatedTime'
enum_spec = (
'describe_launch_configurations', 'LaunchConfigurations', None)
filter_name = 'LaunchConfigurationNames'
filter_type = 'list'
config_type = 'AWS::AutoScaling::LaunchConfiguration'
@LaunchConfig.filter_registry.register('age')
class LaunchConfigAge(AgeFilter):
"""Filter ASG launch configuration by age (in days)
return results
class DescribeS3(query.DescribeSource):
def augment(self, buckets):
with self.manager.executor_factory(
max_workers=min((10, len(buckets)))) as w:
results = w.map(
assemble_bucket,
zip(itertools.repeat(self.manager.session_factory), buckets))
results = filter(None, results)
return results
class ConfigS3(query.ConfigSource):
def load_resource(self, item):
resource = super(ConfigS3, self).load_resource(item)
cfg = item['supplementaryConfiguration']
if item['awsRegion'] != 'us-east-1': # aka standard
resource['Location'] = {'LocationConstraint': item['awsRegion']}
# owner is under acl per describe
resource.pop('Owner', None)
resource['CreationDate'] = parse_date(resource['CreationDate'])
for k, null_value in S3_CONFIG_SUPPLEMENT_NULL_MAP.items():
if cfg.get(k) == null_value:
continue
method = getattr(self, "handle_%s" % k, None)
if method is None:
force_delete = self.data.get('force', False)
try:
self.manager.retry(
client.delete_auto_scaling_group,
AutoScalingGroupName=asg['AutoScalingGroupName'],
ForceDelete=force_delete)
except ClientError as e:
if e.response['Error']['Code'] == 'ValidationError':
return
raise
@resources.register('launch-config')
class LaunchConfig(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'autoscaling'
arn_type = 'launchConfiguration'
id = name = 'LaunchConfigurationName'
date = 'CreatedTime'
enum_spec = (
'describe_launch_configurations', 'LaunchConfigurations', None)
filter_name = 'LaunchConfigurationNames'
filter_type = 'list'
config_type = 'AWS::AutoScaling::LaunchConfiguration'
@LaunchConfig.filter_registry.register('age')
class LaunchConfigAge(AgeFilter):
"""Filter ASG launch configuration by age (in days)
:example:
from c7n.utils import (
chunks, local_session, set_annotation, type_schema, dumps)
log = logging.getLogger('custodian.s3')
filters = FilterRegistry('s3.filters')
actions = ActionRegistry('s3.actions')
filters.register('marked-for-op', TagActionFilter)
actions.register('put-metric', PutMetric)
MAX_COPY_SIZE = 1024 * 1024 * 1024 * 2
@resources.register('s3')
class S3(query.QueryResourceManager):
class resource_type(object):
service = 's3'
type = 'bucket'
enum_spec = ('list_buckets', 'Buckets[]', None)
detail_spec = ('list_objects', 'Bucket', 'Contents[]')
name = id = 'Name'
filter_name = None
date = 'CreationDate'
dimension = 'BucketName'
config_type = 'AWS::S3::Bucket'
filter_registry = filters
action_registry = actions
def __init__(self, ctx, data):
class DeleteDatabase(BaseAction):
schema = type_schema('delete')
permissions = ('glue:DeleteDatabase',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('glue')
for r in resources:
try:
client.delete_database(Name=r['Name'])
except client.exceptions.EntityNotFoundException:
continue
@resources.register('glue-table')
class GlueTable(query.ChildResourceManager):
child_source = 'describe-table'
class resource_type(TypeInfo):
service = 'glue'
parent_spec = ('glue-database', 'DatabaseName', None)
enum_spec = ('get_tables', 'TableList', None)
name = 'Name'
date = 'CreatedOn'
arn_type = 'table'
@query.sources.register('describe-table')
class DescribeTable(query.ChildDescribeSource):
def get_query(self):
def validate(self):
super(CloudTrailMode, self).validate()
from c7n import query
events = self.policy.data['mode'].get('events')
assert events, "cloud trail mode requires specifiying events to subscribe"
for e in events:
if isinstance(e, six.string_types):
assert e in CloudWatchEvents.trail_events, "event shortcut not defined: %s" % e
if isinstance(e, dict):
jmespath.compile(e['ids'])
if isinstance(self.policy.resource_manager, query.ChildResourceManager):
if not getattr(self.policy.resource_manager.resource_type,
'supports_trailevents', False):
raise ValueError(
"resource:%s does not support cloudtrail mode policies" % (
self.policy.resource_type))
"""
permissions = ('apigateway:Delete',)
schema = type_schema('delete')
def process(self, resources):
client = utils.local_session(
self.manager.session_factory).client('apigateway')
for r in resources:
try:
client.delete_rest_api(restApiId=r['id'])
except client.exceptions.NotFoundException:
continue
@resources.register('rest-stage')
class RestStage(query.ChildResourceManager):
child_source = 'describe-rest-stage'
class resource_type(query.TypeInfo):
service = 'apigateway'
parent_spec = ('rest-api', 'restApiId', None)
enum_spec = ('get_stages', 'item', None)
name = id = 'stageName'
date = 'createdDate'
universal_taggable = True
config_type = "AWS::ApiGateway::Stage"
arn_type = 'stages'
def get_source(self, source_type):
if source_type == 'describe-rest-stage':
return DescribeRestStage(self)
return results
@resources.register('transit-gateway')
class TransitGateway(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
enum_spec = ('describe_transit_gateways', 'TransitGateways', None)
name = id = 'TransitGatewayId'
arn = "TransitGatewayArn"
filter_name = 'TransitGatewayIds'
filter_type = 'list'
class TransitGatewayAttachmentQuery(query.ChildResourceQuery):
def get_parent_parameters(self, params, parent_id, parent_key):
merged_params = dict(params)
merged_params.setdefault('Filters', []).append(
{'Name': parent_key, 'Values': [parent_id]})
return merged_params
@query.sources.register('transit-attachment')
class TransitAttachmentSource(query.ChildDescribeSource):
resource_query_factory = TransitGatewayAttachmentQuery
@resources.register('transit-attachment')
class TransitGatewayAttachment(query.ChildResourceManager):
def process_dynamodb_backups(self, table_set, c):
for t in table_set:
try:
c.delete_backup(
BackupArn=t['BackupArn'])
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
self.log.warning("Could not complete DynamoDB backup deletion for table:%s", t)
continue
raise
@resources.register('dynamodb-stream')
class Stream(query.QueryResourceManager):
# Note stream management takes place on the table resource
class resource_type(query.TypeInfo):
service = 'dynamodbstreams'
# Note max rate of 5 calls per second
enum_spec = ('list_streams', 'Streams', None)
# Note max rate of 10 calls per second.
detail_spec = (
"describe_stream", "StreamArn", "StreamArn", "StreamDescription")
arn = id = 'StreamArn'
arn_type = 'stream'
name = 'TableName'
date = 'CreationDateTime'
dimension = 'TableName'