Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
check_external_dependency('ssh')
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
# TODO: Check that master up first and error out cleanly if not
# via ClusterInvalidState.
cluster.login(user=user, identity_file=identity_file)
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
provider_options = {
'min_root_ebs_size_gb': ec2_min_root_ebs_size_gb,
'spot_price': ec2_spot_price,
'tags': ec2_tags
}
else:
raise UnsupportedProviderError(provider)
if cluster.num_masters == 0:
raise Error(
"Cannot add slaves to cluster '{c}' since it does not "
"appear to have a master."
.format(
c=cluster_name))
cluster.load_manifest(
user=user,
identity_file=identity_file)
cluster.add_slaves_check()
if provider == 'ec2':
cluster.add_slaves(
user=user,
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-user',
'--ec2-identity-file'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
if num_slaves > cluster.num_slaves:
logger.warning(
"Warning: Cluster has {c} slave{cs}. "
"You asked to remove {n} slave{ns}."
.format(
c=cluster.num_slaves,
cs='' if cluster.num_slaves == 1 else 's',
n=num_slaves,
ns='' if num_slaves == 1 else 's'))
num_slaves = cluster.num_slaves
if not assume_yes:
cluster.print()
click.confirm(
text=("Are you sure you want to remove {n} slave{s} from this cluster?"
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=['--ec2-region'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
else:
raise UnsupportedProviderError(provider)
if not assume_yes:
cluster.print()
click.confirm(
text="Are you sure you want to destroy this cluster?",
abort=True)
logger.info("Destroying {c}...".format(c=cluster.name))
cluster.destroy()
scope=locals())
# We assume POSIX for the remote path since Flintrock
# only supports clusters running CentOS / Amazon Linux.
if not posixpath.basename(remote_path):
remote_path = posixpath.join(remote_path, os.path.basename(local_path))
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
cluster.copy_file_check()
if not assume_yes and not master_only:
file_size_bytes = os.path.getsize(local_path)
num_nodes = len(cluster.slave_ips) + 1 # TODO: cluster.num_nodes
total_size_bytes = file_size_bytes * num_nodes
if total_size_bytes > 10 ** 6:
logger.warning("WARNING:")
logger.warning(
format_message(
message="""\
You are trying to upload {total_size} bytes ({size} bytes x {count}
nodes in {cluster}). Depending on your upload bandwidth, this may take
a long time.
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
cluster.run_command_check()
logger.info("Running command on {target}...".format(
target="master only" if master_only else "cluster"))
cluster.run_command(
command=command,
master_only=master_only,
user=user,
identity_file=identity_file)
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
cluster.start_check()
logger.info("Starting {c}...".format(c=cluster_name))
cluster.start(user=user, identity_file=identity_file)
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=['--ec2-region'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
else:
raise UnsupportedProviderError(provider)
cluster.stop_check()
if not assume_yes:
cluster.print()
click.confirm(
text="Are you sure you want to stop this cluster?",
abort=True)
logger.info("Stopping {c}...".format(c=cluster_name))
cluster.stop()
logger.info("{c} is now stopped.".format(c=cluster_name))
requires_all=['--ec2-region'],
scope=locals())
if cluster_name:
cluster_names = [cluster_name]
else:
cluster_names = []
if provider == 'ec2':
search_area = "in region {r}".format(r=ec2_region)
clusters = ec2.get_clusters(
cluster_names=cluster_names,
region=ec2_region,
vpc_id=ec2_vpc_id)
else:
raise UnsupportedProviderError(provider)
if cluster_name:
cluster = clusters[0]
if master_hostname_only:
logger.info(cluster.master_host)
else:
cluster.print()
else:
if master_hostname_only:
for cluster in sorted(clusters, key=lambda x: x.name):
logger.info("{}: {}".format(cluster.name, cluster.master_host))
else:
logger.info("Found {n} cluster{s}{space}{search_area}.".format(
n=len(clusters),
s='' if len(clusters) == 1 else 's',
space=' ' if search_area else '',
ami=ec2_ami,
user=ec2_user,
security_groups=ec2_security_groups,
spot_price=ec2_spot_price,
min_root_ebs_size_gb=ec2_min_root_ebs_size_gb,
vpc_id=ec2_vpc_id,
subnet_id=ec2_subnet_id,
instance_profile_name=ec2_instance_profile_name,
placement_group=ec2_placement_group,
tenancy=ec2_tenancy,
ebs_optimized=ec2_ebs_optimized,
instance_initiated_shutdown_behavior=ec2_instance_initiated_shutdown_behavior,
user_data=ec2_user_data,
tags=ec2_tags)
else:
raise UnsupportedProviderError(provider)
print("Cluster master: {}".format(cluster.master_host))
print("Login with: flintrock login {}".format(cluster.name))