How to use the nova.openstack.common.gettextutils._ function in nova

To help you get started, we’ve selected a few nova examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github openstack / nova / nova / api / ec2 / cloud.py View on Github external
def attach_volume(self, context,
                      volume_id,
                      instance_id,
                      device, **kwargs):
        validate_ec2_id(instance_id)
        validate_ec2_id(volume_id)
        volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
        instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
        instance = self.compute_api.get(context, instance_uuid,
                                        want_objects=True)
        LOG.audit(_('Attach volume %(volume_id)s to instance %(instance_id)s '
                    'at %(device)s'),
                  {'volume_id': volume_id,
                   'instance_id': instance_id,
                   'device': device},
                  context=context)

        self.compute_api.attach_volume(context, instance, volume_id, device)
        volume = self.volume_api.get(context, volume_id)
        ec2_attach_status = ec2utils.status_to_ec2_attach_status(volume)

        return {'attachTime': volume['attach_time'],
                'device': volume['mountpoint'],
                'instanceId': ec2utils.id_to_ec2_inst_id(instance_uuid),
                'requestId': context.request_id,
                'status': ec2_attach_status,
                'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
github openstack / nova / nova / virt / baremetal / virtual_power_driver.py View on Github external
def _get_conn(self):
        if not CONF.baremetal.virtual_power_ssh_host:
            raise exception.NovaException(
                _('virtual_power_ssh_host not defined. Can not Start'))

        if not CONF.baremetal.virtual_power_host_user:
            raise exception.NovaException(
                _('virtual_power_host_user not defined. Can not Start'))

        if not CONF.baremetal.virtual_power_host_pass:
            # it is ok to not have a password if you have a keyfile
            if CONF.baremetal.virtual_power_host_key is None:
                raise exception.NovaException(
                    _('virtual_power_host_pass/key not set. Can not Start'))

        _conn = connection.Connection(
            CONF.baremetal.virtual_power_ssh_host,
            CONF.baremetal.virtual_power_host_user,
            CONF.baremetal.virtual_power_host_pass,
            CONF.baremetal.virtual_power_ssh_port,
github openstack / nova / nova / openstack / common / rpc / impl_zmq.py View on Github external
with Timeout(timeout, exception=rpc_common.Timeout):
        try:
            msg_waiter = ZmqSocket(
                "ipc://%s/zmq_topic_zmq_replies.%s" %
                (CONF.rpc_zmq_ipc_dir,
                 CONF.rpc_zmq_host),
                zmq.SUB, subscribe=msg_id, bind=False
            )

            LOG.debug(_("Sending cast"))
            _cast(addr, context, topic, payload, envelope)

            LOG.debug(_("Cast sent; Waiting reply"))
            # Blocks until receives reply
            msg = msg_waiter.recv()
            LOG.debug(_("Received message: %s"), msg)
            LOG.debug(_("Unpacking response"))

            if msg[2] == 'cast':  # Legacy version
                raw_msg = _deserialize(msg[-1])[-1]
            elif msg[2] == 'impl_zmq_v2':
                rpc_envelope = unflatten_envelope(msg[4:])
                raw_msg = rpc_common.deserialize_msg(rpc_envelope)
            else:
                raise rpc_common.UnsupportedRpcEnvelopeVersion(
                    _("Unsupported or unknown ZMQ envelope returned."))

            responses = raw_msg['args']['response']
        # ZMQError trumps the Timeout error.
        except zmq.ZMQError:
            raise RPCException("ZMQ Socket Error")
        except (IndexError, KeyError):
github openstack / nova / nova / virt / baremetal / pxe.py View on Github external
% instance['uuid'])
                    locals['started'] = True
                elif status in (baremetal_states.DEPLOYDONE,
                                baremetal_states.ACTIVE):
                    LOG.info(_("PXE deploy completed for instance %s")
                                % instance['uuid'])
                    raise loopingcall.LoopingCallDone()
                elif status == baremetal_states.DEPLOYFAIL:
                    locals['error'] = _("PXE deploy failed for instance %s")
            except exception.NodeNotFound:
                locals['error'] = _("Baremetal node deleted while waiting "
                                    "for deployment of instance %s")

            if (CONF.baremetal.pxe_deploy_timeout and
                    timeutils.utcnow() > expiration):
                locals['error'] = _("Timeout reached while waiting for "
                                     "PXE deploy of instance %s")
            if locals['error']:
                raise loopingcall.LoopingCallDone()
github gridcentric / cobalt / cobalt / nova / osapi / cobalt_extension.py View on Github external
def _handle_quota_error(self, error):
        """
        Reraise quota errors as api-specific http exceptions
        """

        code_mappings = {
            "OnsetFileLimitExceeded":
                    _("Personality file limit exceeded"),
            "OnsetFilePathLimitExceeded":
                    _("Personality file path too long"),
            "OnsetFileContentLimitExceeded":
                    _("Personality file content too long"),

            # NOTE(bcwaldon): expose the message generated below in order
            # to better explain how the quota was exceeded
            "InstanceLimitExceeded": error.message,
        }

        code = error.kwargs['code']
        expl = code_mappings.get(code, error.message) % error.kwargs
        raise webob.exc.HTTPRequestEntityTooLarge(explanation=expl,
                                            headers={'Retry-After': 0})
github gridcentric / cobalt / cobalt / nova / extension / manager.py View on Github external
context = context.elevated()
        # FIXME: This live migration code does not currently support volumes,
        # nor floating IPs. Both of these would be fairly straight-forward to
        # add but probably cry out for a better factoring of this class as much
        # as this code can be inherited directly from the ComputeManager. The
        # only real difference is that the migration must not go through
        # libvirt, instead we drive it via our bless, launch routines.

        src = instance['host']

        if src != self.host:
            # This can happen if two migration requests come in at the same time. We lock the
            # instance so that the migrations will happen serially. However, after the first
            # migration, we cannot proceed with the second one. For that case we just throw an
            # exception and leave the instance intact.
            raise exception.NovaException(_("Cannot migrate an instance that is on another host."))

        # Figure out the migration address.
        migration_address = self._get_migration_address(dest)

        # Grab the network info.
        network_info = self.network_api.get_instance_nw_info(context, instance)

        # Update the system_metadata for migration.
        system_metadata = self._system_metadata_get(instance)
        system_metadata['gc_src_host'] = self.host
        system_metadata['gc_dst_host'] = dest
        self._instance_update(context, instance,
                              system_metadata=system_metadata)

        # Prepare the destination for live migration.
        # NOTE(dscannell): The instance's host needs to change for the pre_live_migration
github openstack / nova / nova / virt / powervm / operator.py View on Github external
# Memory
        mem = instance['memory_mb']
        if host_stats and mem > host_stats['host_memory_free']:
            LOG.error(_('Not enough free memory in the host'))
            raise exception.PowerVMInsufficientFreeMemory(
                                           instance_name=instance['name'])
        mem_min = min(mem, constants.POWERVM_MIN_MEM)
        mem_max = mem + constants.POWERVM_MAX_MEM

        # CPU
        cpus = instance['vcpus']
        if host_stats:
            avail_cpus = host_stats['vcpus'] - host_stats['vcpus_used']
            if cpus > avail_cpus:
                LOG.error(_('Insufficient available CPU on PowerVM'))
                raise exception.PowerVMInsufficientCPU(
                                           instance_name=instance['name'])
        cpus_min = min(cpus, constants.POWERVM_MIN_CPUS)
        cpus_max = cpus + constants.POWERVM_MAX_CPUS
        cpus_units_min = decimal.Decimal(cpus_min) / decimal.Decimal(10)
        cpus_units = decimal.Decimal(cpus) / decimal.Decimal(10)

        # Network
        # To ensure the MAC address on the guest matches the
        # generated value, pull the first 10 characters off the
        # MAC address for the mac_base_value parameter and then
        # get the integer value of the final 2 characters as the
        # slot_id parameter
        mac = network_info[0]['address']
        mac_base_value = (mac[:-2]).replace(':', '')
        eth_id = self._operator.get_virtual_eth_adapter_id()
github openstack / nova / nova / db / sqlalchemy / migrate_repo / versions / 145_add_volume_usage_cache.py View on Github external
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    volume_usage_cache = Table('volume_usage_cache', meta, autoload=True)
    try:
        volume_usage_cache.drop()
    except Exception:
        LOG.error(_("volume_usage_cache table not dropped"))
        raise
github openstack / nova / nova / virt / baremetal / virtual_power_driver.py View on Github external
If {_NodeName_} is in the command it will get replaced by
        the _matched_name value.

        base_cmd will also get prepended to the command.
        """
        self._set_connection()

        cmd = cmd.replace('{_NodeName_}', self._matched_name)

        cmd = '%s %s' % (self._vp_cmd.base_cmd, cmd)

        try:
            stdout, stderr = processutils.ssh_execute(
                self._connection, cmd, check_exit_code=check_exit_code)
            result = stdout.strip().splitlines()
            LOG.debug(_('Result for run_command: %s'), result)
        except processutils.ProcessExecutionError:
            result = []
            LOG.exception(_("Error running command: %s"), cmd)
        return result
github openstack / nova / nova / virt / baremetal / db / sqlalchemy / api.py View on Github external
def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
    session = db_session.get_session()
    with session.begin():
        bm_interface = model_query(context, models.BareMetalInterface,
                                read_deleted="no", session=session).\
                         filter_by(id=if_id).\
                         with_lockmode('update').\
                         first()
        if not bm_interface:
            raise exception.NovaException(_("Baremetal interface %s "
                        "not found") % if_id)

        bm_interface.vif_uuid = vif_uuid
        try:
            session.add(bm_interface)
            session.flush()
        except db_exc.DBError as e:
            # TODO(deva): clean up when db layer raises DuplicateKeyError
            if str(e).find('IntegrityError') != -1:
                raise exception.NovaException(_("Baremetal interface %s "
                        "already in use") % vif_uuid)
            raise