How to use the boto.ec2 function in boto

To help you get started, we’ve selected a few boto examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github platform9 / openstack-omni / cinder / volume / drivers / aws / ebs.py View on Github external
def __init__(self, *args, **kwargs):
        super(EBSDriver, self).__init__(*args, **kwargs)
        self.VERSION = '1.0.0'
        self._wait_time_sec = 60 * (CONF.AWS.wait_time_min)

        self._check_config()
        region_name = CONF.AWS.region_name
        endpoint = '.'.join(['ec2', region_name, 'amazonaws.com'])
        region = RegionInfo(name=region_name, endpoint=endpoint)
        self._conn = ec2.EC2Connection(aws_access_key_id=CONF.AWS.access_key,
                                       aws_secret_access_key=CONF.AWS.secret_key,
                                       region=region)
        # resort to first AZ for now. TODO: expose this through API
        az = CONF.AWS.az
        self._zone = filter(lambda z: z.name == az,
                            self._conn.get_all_zones())[0]
        self.set_initialized()
github zalando / spilo / etcd-cluster-appliance / etcd.py View on Github external
def get_autoscaling_members(self):
        me = self.get_my_instace()

        conn = boto.ec2.connect_to_region(self.region)
        res = conn.get_all_reservations(filters={'tag:{}'.format(EtcdMember.AG_TAG): me.autoscaling_group})

        return [i for r in res for i in r.instances if i.state != 'terminated' and i.tags.get(EtcdMember.AG_TAG, '')
                == me.autoscaling_group]
github eucalyptus / eucalyptus-console / eucaconsole / cache.py View on Github external
#logging.info("CACHE SUMMARY: instance running :"+str(numRunning))
        summary['keypair'] = -1 if session.clc.caches['keypairs'].isCacheStale() else len(
            session.clc.caches['keypairs'].values)
        summary['sgroup'] = -1 if session.clc.caches['sgroups'].isCacheStale() else len(
            session.clc.caches['sgroups'].values)
        summary['volume'] = -1 if session.clc.caches['volumes'].isCacheStale() else len(
            session.clc.caches['volumes'].values)
        summary['snapshot'] = -1 if session.clc.caches['snapshots'].isCacheStale() else len(
            session.clc.caches['snapshots'].values)
        summary['addresses'] = -1 if session.clc.caches['addresses'].isCacheStale() else len(
            session.clc.caches['addresses'].values)
        if session.scaling != None:
            scaling = 0;
            if not (session.clc.caches['instances'].isCacheStale()):
                for inst in session.scaling.caches['scalinginsts'].values:
                    if isinstance(inst, boto.ec2.autoscale.instance.Instance):
                        if zone == 'all' or inst.availability_zone == zone:
                            scaling += 1
                    else:
                        if zone == 'all' or inst['availability_zone'] == zone:
                            scaling += 1
            else:
                scaling = -1
            summary['scalinginsts'] = scaling
        return summary
github CloudScale-Project / CloudStore / deployment-scripts / cloudscale / deployment_scripts / scripts / infrastructure / aws / aws_create_keypair.py View on Github external
def create(self):
        conn = ec2.connect_to_region(
            self.region,
            aws_access_key_id=self.access_key,
            aws_secret_access_key=self.secret_key
        )

        try:
            keypair = conn.create_key_pair(self.key_name)
        except EC2ResponseError as e:
            if e.error_code == 'InvalidKeyPair.Duplicate':
                conn.delete_key_pair(key_name=self.key_name)
                keypair = conn.create_key_pair(self.key_name)
            else:
                raise e

        keypair.save(self.user_path)
github mozilla / telemetry-server / provisioning / aws / aws_util.py View on Github external
def connect(region, aws_key, aws_secret_key):
    # Use AWS keys from config
    conn = boto.ec2.connect_to_region(region,
            aws_access_key_id=aws_key,
            aws_secret_access_key=aws_secret_key)
    return conn
github wikimedia / wikidata-query-blazegraph / src / resources / deployment / vagrant / bigdata-aws-ha3-launcher / bin / createSecurityGroup.py View on Github external
#! /usr/bin/python

import os
from boto import ec2
from boto.manage.cmdshell import sshclient_from_instance
import paramiko
from datetime import datetime


if __name__ == '__main__':

	# create a security group fo this cluster only.  Just create it now so that it can be associated with the new
	# instance at create time.  Add rules to this group once the instance IP addresses are known.

	ec2conn = ec2.connection.EC2Connection( os.environ["AWS_ACCESS_KEY_ID"], os.environ["AWS_SECRET_ACCESS_KEY"] )

	group_name = "BDHA " + str( datetime.utcnow() )

	group = ec2conn.create_security_group( group_name, "BigdataHA Security Group" )
 
	envFile = open( ".aws_security_group", "w" ) 
	envFile.write( 'export AWS_SECURITY_GROUP_PRIVATE="' + group_name + '"')
github nowait-tools / ansible-rabbitmq / library / ec2_search.py View on Github external
def todict(obj, classkey=None):
    if isinstance(obj, dict):
        data = {}
        for (k, v) in obj.items():
            data[k] = todict(v, classkey)
        return data
    elif hasattr(obj, "_ast"):
        return todict(obj._ast())
    elif hasattr(obj, "__iter__"):
        return [todict(v, classkey) for v in obj]
    elif hasattr(obj, "__dict__"):
        # This Class causes a recursive loop and at this time is not worth
        # debugging. If it's useful later I'll look into it.
        if not isinstance(obj, boto.ec2.blockdevicemapping.BlockDeviceType):
            data = dict([(key, todict(value, classkey))
                for key, value in obj.__dict__.iteritems()
                if not callable(value) and not key.startswith('_')])
            if classkey is not None and hasattr(obj, "__class__"):
                data[classkey] = obj.__class__.__name__
            return data
    else:
        return obj
github galaxyproject / bioblend / bioblend / cloudman / launch.py View on Github external
try:
                    sn = self.vpc_conn.get_all_subnets(subnet_id)[0]
                    vpc_id = sn.vpc_id
                except (EC2ResponseError, IndexError):
                    bioblend.log.exception("Trouble fetching subnet %s", subnet_id)
            cmsg = self.create_cm_security_group(sg, vpc_id=vpc_id)
            ret['error'] = cmsg['error']
            if ret['error']:
                return ret
            if cmsg['name']:
                ret['sg_names'].append(cmsg['name'])
                ret['sg_ids'].append(cmsg['sg_id'])
                if subnet_id:
                    # Must setup a network interface if launching into VPC
                    security_groups = None
                    interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
                        subnet_id=subnet_id, groups=[cmsg['sg_id']],
                        associate_public_ip_address=True)
                    network_interfaces = (boto.ec2.networkinterface.
                                          NetworkInterfaceCollection(interface))
                else:
                    network_interfaces = None
        kp_info = self.create_key_pair(key_name)
        ret['kp_name'] = kp_info['name']
        ret['kp_material'] = kp_info['material']
        ret['error'] = kp_info['error']
        if ret['error']:
            return ret
        # If not provided, try to find a placement
        # TODO: Should placement always be checked? To make sure it's correct
        # for existing clusters.
        if not placement:
github Voronenko / ansible-developer_recipes / ansible_extras / lookup_plugins / aws_secgroup_ids_from_names.py View on Github external
def run(self, terms, variables=None, **kwargs):
        if isinstance(terms, basestring):
            terms = [terms]
        region = terms[0]
        group_names = terms[1]
        conn = boto.ec2.connect_to_region(region)
        filters = {'group_name': group_names}
        sg = conn.get_all_security_groups(filters=filters)
        sg_list = [x.id.encode('utf-8') for x in sg]
        return sg_list
github ansible / ansible-modules-core / cloud / amazon / ec2_asg.py View on Github external
vpc_zone_identifier = module.params.get('vpc_zone_identifier')
    set_tags = module.params.get('tags')
    health_check_period = module.params.get('health_check_period')
    health_check_type = module.params.get('health_check_type')
    default_cooldown = module.params.get('default_cooldown')
    wait_for_instances = module.params.get('wait_for_instances')
    as_groups = connection.get_all_groups(names=[group_name])
    wait_timeout = module.params.get('wait_timeout')
    termination_policies = module.params.get('termination_policies')
    notification_topic = module.params.get('notification_topic')
    notification_types = module.params.get('notification_types')

    if not vpc_zone_identifier and not availability_zones:
        region, ec2_url, aws_connect_params = get_aws_connection_info(module)
        try:
            ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
        except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
            module.fail_json(msg=str(e))
    elif vpc_zone_identifier:
        vpc_zone_identifier = ','.join(vpc_zone_identifier)

    asg_tags = []
    for tag in set_tags:
        for k,v in tag.iteritems():
            if k !='propagate_at_launch':
                asg_tags.append(Tag(key=k,
                     value=v,
                     propagate_at_launch=bool(tag.get('propagate_at_launch', True)),
                     resource_id=group_name))

    if not as_groups:
        if not vpc_zone_identifier and not availability_zones: