Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def load_policy_set(self, data, config=None):
filename = self.write_policy_file(data, format="json")
if config:
e = Config.empty(**config)
else:
e = Config.empty()
return policy.load(e, filename)
def get_azure_output(self, custom_pyformat=None):
output_dir = "azure://mystorage.blob.core.windows.net/logs"
if custom_pyformat:
output_dir = AzureStorageOutput.join(output_dir, custom_pyformat)
output = AzureStorageOutput(
ExecutionContext(
None,
Bag(name="xyz", provider_name='azure'),
Config.empty(output_dir=output_dir)
),
{'url': output_dir},
)
self.addCleanup(shutil.rmtree, output.root_dir)
return output
def test_doc_examples(provider_name, provider):
policies, duplicate_names = get_doc_policies(provider.resources)
with tempfile.NamedTemporaryFile(suffix='.json', delete=False) as fh:
atexit.register(os.unlink, fh.name)
fh.write(json.dumps({'policies': list(policies.values())}).encode('utf8'))
fh.flush()
collection = load(Config.empty(), fh.name)
assert isinstance(collection, PolicyCollection)
assert not duplicate_names
for p in policies.values():
# Note max name size here is 54 if it a lambda policy given
# our default prefix custodian- to stay under 64 char limit on
# lambda function names. This applies to AWS and GCP, and
# afaict Azure.
if len(p['name']) >= 54 and 'mode' in p:
raise ValueError(
"doc policy exceeds name limit policy:%s" % (p['name']))
if not options.policy_regex:
options.policy_regex = f"^{options.prefix}.*"
if not options.regions:
options.regions = [os.environ.get('AWS_DEFAULT_REGION', 'us-east-1')]
files = []
files.extend(itertools.chain(*options.config_files))
files.extend(options.configs)
options.config_files = files
if not files:
parser.print_help()
sys.exit(1)
policy_config = Config.empty(
regions=options.regions,
profile=options.profile,
assume_role=options.assume_role)
# use cloud provider to initialize policies to get region expansion
policies = AWS().initialize_policies(
PolicyCollection([
p for p in load_policies(
options, policy_config)
if p.provider_name == 'aws'],
policy_config),
policy_config)
resources_gc_prefix(options, policy_config, policies)
if not options.policy_regex:
options.policy_regex = f"^{options.prefix}.*"
if not options.regions:
options.regions = [os.environ.get('AWS_DEFAULT_REGION', 'us-east-1')]
files = []
files.extend(itertools.chain(*options.config_files))
files.extend(options.configs)
options.config_files = files
if not files:
parser.print_help()
sys.exit(1)
policy_config = Config.empty(
regions=options.regions,
profile=options.profile,
assume_role=options.assume_role)
# use cloud provider to initialize policies to get region expansion
policies = AWS().initialize_policies(
PolicyCollection([
p for p in load_policies(
options, policy_config)
if p.provider_name == 'aws'],
policy_config),
policy_config)
resources_gc_prefix(options, policy_config, policies)
def __init__(self, policies=None, options=None):
self.policies = policies or []
self.options = options or Config.empty()
self.pmap = {p.name: p for p in self.policies}
def initialize_tree(self, tree):
assert not self.policy_files
for tree_ent in tree:
fpath = tree_ent.name
if not self.matcher(fpath):
continue
self.policy_files[fpath] = PolicyCollection.from_data(
yaml.safe_load(self.repo.get(tree[fpath].id).data),
Config.empty(), fpath)
def _policy_file_rev(self, f, commit):
try:
return self._validate_policies(
PolicyCollection.from_data(
yaml.safe_load(self.repo.get(commit.tree[f].id).data),
Config.empty(), f))
except Exception as e:
log.warning(
"invalid policy file %s @ %s %s %s \n error:%s",
f, str(commit.id)[:6], commit_date(commit).isoformat(),
commit.author.name, e)
return PolicyCollection()
def build_options(output_dir=None, log_group=None, metrics=None):
"""
Initialize the Azure provider to apply global config across all policy executions.
"""
if not output_dir:
output_dir = tempfile.mkdtemp()
log.warning('Output directory not specified. Using directory: %s' % output_dir)
config = Config.empty(
**{
'log_group': log_group,
'metrics': metrics,
'output_dir': output_dir
}
)
return Azure().initialize(config)
def run_account(account, region, policies_config, output_path,
cache_period, cache_path, metrics, dryrun, debug):
"""Execute a set of policies on an account.
"""
logging.getLogger('custodian.output').setLevel(logging.ERROR + 1)
CONN_CACHE.session = None
CONN_CACHE.time = None
# allow users to specify interpolated output paths
if '{' not in output_path:
output_path = os.path.join(output_path, account['name'], region)
cache_path = os.path.join(cache_path, "%s-%s.cache" % (account['account_id'], region))
config = Config.empty(
region=region, cache=cache_path,
cache_period=cache_period, dryrun=dryrun, output_dir=output_path,
account_id=account['account_id'], metrics_enabled=metrics,
log_group=None, profile=None, external_id=None)
env_vars = account_tags(account)
if account.get('role'):
if isinstance(account['role'], six.string_types):
config['assume_role'] = account['role']
config['external_id'] = account.get('external_id')
else:
env_vars.update(
_get_env_creds(get_session(account, 'custodian', region), region))
elif account.get('profile'):