How to use the nova.objects function in nova

To help you get started, we’ve selected a few nova examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github openstack / nova / nova / api / openstack / compute / quota_sets.py View on Github external
if not force_update:
                minimum = settable_quotas[key]['minimum']
                maximum = settable_quotas[key]['maximum']
                self._validate_quota_limit(key, value, minimum, maximum)
            valid_quotas[key] = value

        # NOTE(dims): Pass #2 - At this point we know that all the
        # values are correct and we can iterate and update them all in one
        # shot without having to worry about rolling back etc as we have done
        # the validation up front in the loop above.
        for key, value in valid_quotas.items():
            try:
                objects.Quotas.create_limit(context, project_id,
                                            key, value, user_id=user_id)
            except exception.QuotaExists:
                objects.Quotas.update_limit(context, project_id,
                                            key, value, user_id=user_id)
        # Note(gmann): Removed 'id' from update's response to make it same
        # as V2. If needed it can be added with microversion.
        return self._format_quota_set(
            None,
            self._get_quotas(context, id, user_id=user_id),
            filtered_quotas=filtered_quotas)
github openstack / nova / nova / api / openstack / compute / extended_volumes.py View on Github external
def _get_instance_bdms_in_multiple_cells(ctxt, servers):
        instance_uuids = [server['id'] for server in servers]
        inst_maps = objects.InstanceMappingList.get_by_instance_uuids(
                        ctxt, instance_uuids)

        cell_mappings = {}
        for inst_map in inst_maps:
            if (inst_map.cell_mapping is not None and
                    inst_map.cell_mapping.uuid not in cell_mappings):
                cell_mappings.update(
                    {inst_map.cell_mapping.uuid: inst_map.cell_mapping})

        bdms = {}
        results = context.scatter_gather_cells(
                        ctxt, cell_mappings.values(), 60,
                        objects.BlockDeviceMappingList.bdms_by_instance_uuid,
                        instance_uuids)
        for cell_uuid, result in results.items():
            if result is context.raised_exception_sentinel:
                LOG.warning('Failed to get block device mappings for cell %s',
                            cell_uuid)
            elif result is context.did_not_respond_sentinel:
                LOG.warning('Timeout getting block device mappings for cell '
                            '%s', cell_uuid)
            else:
                bdms.update(result)
        return bdms
github openstack / nova / nova / conductor / tasks / migrate.py View on Github external
def _support_resource_request(self, selection):
        """Returns true if the host is new enough to support resource request
        during migration and that the RPC API version is not pinned during
        rolling upgrade.
        """
        svc = objects.Service.get_by_host_and_binary(
            self.context, selection.service_host, 'nova-compute')
        return (svc.version >= 39 and
                self.compute_rpcapi.supports_resize_with_qos_port(
                    self.context))
github openstack / nova / nova / network / neutronv2 / api.py View on Github external
def _make_floating_ip_obj(self, context, fip, pool_dict, port_dict):
        pool = pool_dict[fip['floating_network_id']]
        # NOTE(danms): Don't give these objects a context, since they're
        # not lazy-loadable anyway
        floating = objects.floating_ip.NeutronFloatingIP(
            id=fip['id'], address=fip['floating_ip_address'],
            pool=(pool['name'] or pool['id']), project_id=fip['tenant_id'],
            fixed_ip_id=fip['port_id'])
        # In Neutron v2 API fixed_ip_address and instance uuid
        # (= device_id) are known here, so pass it as a result.
        if fip['fixed_ip_address']:
            floating.fixed_ip = objects.FixedIP(
                address=fip['fixed_ip_address'])
        else:
            floating.fixed_ip = None
        if fip['port_id']:
            instance_uuid = port_dict[fip['port_id']]['device_id']
            # NOTE(danms): This could be .refresh()d, so give it context
            floating.instance = objects.Instance(context=context,
                                                 uuid=instance_uuid)
            if floating.fixed_ip:
                floating.fixed_ip.instance_uuid = instance_uuid
        else:
            floating.instance = None
        return floating
github openstack / nova / nova / api / openstack / compute / plugins / v3 / servers.py View on Github external
search_opts['user_id'] = context.user_id

        limit, marker = common.get_limit_and_marker(req)
        sort_keys, sort_dirs = common.get_sort_params(req.params)
        try:
            instance_list = self.compute_api.get_all(elevated or context,
                    search_opts=search_opts, limit=limit, marker=marker,
                    want_objects=True, expected_attrs=['pci_devices'],
                    sort_keys=sort_keys, sort_dirs=sort_dirs)
        except exception.MarkerNotFound:
            msg = _('marker [%s] not found') % marker
            raise exc.HTTPBadRequest(explanation=msg)
        except exception.FlavorNotFound:
            LOG.debug("Flavor '%s' could not be found ",
                      search_opts['flavor'])
            instance_list = objects.InstanceList()

        if is_detail:
            instance_list.fill_faults()
            response = self._view_builder.detail(req, instance_list)
        else:
            response = self._view_builder.index(req, instance_list)
        req.cache_db_instances(instance_list)
        return response
github openstack / nova / nova / api / openstack / compute / server_tags.py View on Github external
def _get_instance_mapping(context, server_id):
    try:
        return objects.InstanceMapping.get_by_instance_uuid(context,
                                                            server_id)
    except exception.InstanceMappingNotFound as e:
        raise webob.exc.HTTPNotFound(explanation=e.format_message())
github openstack / nova / nova / objects / fixed_ip.py View on Github external
def _from_db_object(context, fixedip, db_fixedip, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        for field in fixedip.fields:
            if field == 'default_route':
                # NOTE(danms): This field is only set when doing a
                # FixedIPList.get_by_network() because it's a relatively
                # special-case thing, so skip it here
                continue
            if field not in FIXED_IP_OPTIONAL_ATTRS:
                fixedip[field] = db_fixedip[field]
        # NOTE(danms): Instance could be deleted, and thus None
        if 'instance' in expected_attrs:
            fixedip.instance = objects.Instance._from_db_object(
                context,
                objects.Instance(context),
                db_fixedip['instance']) if db_fixedip['instance'] else None
        if 'network' in expected_attrs:
            fixedip.network = objects.Network._from_db_object(
                context,
                objects.Network(context),
                db_fixedip['network']) if db_fixedip['network'] else None
        if 'virtual_interface' in expected_attrs:
            db_vif = db_fixedip['virtual_interface']
            vif = objects.VirtualInterface._from_db_object(
                context,
                objects.VirtualInterface(context),
                db_fixedip['virtual_interface']) if db_vif else None
            fixedip.virtual_interface = vif
        if 'floating_ips' in expected_attrs:
github openstack / nova / nova / network / manager.py View on Github external
def deallocate_fixed_ip(self, context, address, host=None, teardown=True,
            instance=None):
        """Call the superclass deallocate_fixed_ip if i'm the correct host
        otherwise call to the correct host
        """
        fixed_ip = objects.FixedIP.get_by_address(
            context, address, expected_attrs=['network'])
        network = fixed_ip.network

        # NOTE(vish): if we are not multi_host pass to the network host
        # NOTE(tr3buchet): but if we are, host came from instance.host
        if not network.multi_host:
            host = network.host
        if host == self.host:
            # NOTE(vish): deallocate the fixed ip locally
            return super(RPCAllocateFixedIP, self).deallocate_fixed_ip(context,
                    address, instance=instance)

        if network.multi_host:
            service = objects.Service.get_by_host_and_binary(
                context, host, 'nova-network')
            if not service or not self.servicegroup_api.service_is_up(service):
github openstack / compute-hyperv / compute_hyperv / nova / block_device_manager.py View on Github external
elif block_device.new_format_is_ephemeral(bdm):
            attachment_info = self._get_eph_bdm_attachment_info(
                instance, bdm)

        if not attachment_info:
            LOG.debug("No attachment info retrieved for bdm %s.", bdm)
            return

        tags = [bdm.tag] if bdm.tag else []
        bus = self._get_device_bus(
            attachment_info['controller_type'],
            attachment_info['controller_addr'],
            attachment_info['controller_slot'])
        serial = attachment_info.get('serial')

        return objects.DiskMetadata(bus=bus,
                                    tags=tags,
                                    serial=serial)
github openstack / nova / nova / api / openstack / compute / legacy_v2 / contrib / simple_tenant_usage.py View on Github external
def _tenant_usages_for_period(self, context, period_start,
                                  period_stop, tenant_id=None, detailed=True):

        instances = objects.InstanceList.get_active_by_window_joined(
                        context, period_start, period_stop, tenant_id,
                        expected_attrs=['flavor'])
        rval = {}
        flavors = {}

        for instance in instances:
            info = {}
            info['hours'] = self._hours_for(instance,
                                            period_start,
                                            period_stop)
            flavor = self._get_flavor(context, instance, flavors)
            if not flavor:
                info['flavor'] = ''
            else:
                info['flavor'] = flavor.name