Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Contrib contains extensions that are shipped with nova.
It can't be called 'extensions' because that causes namespacing problems.
"""
from oslo_log import log as logging
from nova.api.openstack import extensions
import nova.conf
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
def standard_extensions(ext_mgr):
extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__)
def select_extensions(ext_mgr):
extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__,
CONF.osapi_compute_ext_list)
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova.conf
from nova import utils
from nova.virt.libvirt.volume import volume as libvirt_volume
from os_brick import initiator
from os_brick.initiator import connector
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class LibvirtNVMEVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
"""Driver to attach NVMe volumes to libvirt."""
def __init__(self, connection):
super(LibvirtNVMEVolumeDriver,
self).__init__(connection)
self.connector = connector.InitiatorConnector.factory(
initiator.NVME, utils.get_root_helper(),
device_scan_attempts=CONF.libvirt.num_nvme_discover_tries)
def connect_volume(self, connection_info, instance):
device_info = self.connector.connect_volume(
# License for the specific language governing permissions and limitations
# under the License.
from os_win import constants
from os_win import exceptions as os_win_exc
from os_win import utilsfactory
from oslo_log import log as logging
import nova.conf
from nova import utils
from nova.virt import event as virtevent
from nova.virt.hyperv import serialconsoleops
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class InstanceEventHandler(object):
# The event listener timeout is set to 0 in order to return immediately
# and avoid blocking the thread.
_WAIT_TIMEOUT = 0
_TRANSITION_MAP = {
constants.HYPERV_VM_STATE_ENABLED: virtevent.EVENT_LIFECYCLE_STARTED,
constants.HYPERV_VM_STATE_DISABLED: virtevent.EVENT_LIFECYCLE_STOPPED,
constants.HYPERV_VM_STATE_PAUSED: virtevent.EVENT_LIFECYCLE_PAUSED,
constants.HYPERV_VM_STATE_SUSPENDED:
virtevent.EVENT_LIFECYCLE_SUSPENDED
}
def __init__(self, state_change_callback=None):
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova.conf
from nova.notifications.objects import base
from nova.notifications.objects import flavor as flavor_payload
from nova.notifications.objects import keypair as keypair_payload
from nova.objects import base as nova_base
from nova.objects import fields
CONF = nova.conf.CONF
@nova_base.NovaObjectRegistry.register_notification
class InstancePayload(base.NotificationPayloadBase):
SCHEMA = {
'uuid': ('instance', 'uuid'),
'user_id': ('instance', 'user_id'),
'tenant_id': ('instance', 'project_id'),
'reservation_id': ('instance', 'reservation_id'),
'display_name': ('instance', 'display_name'),
'display_description': ('instance', 'display_description'),
'host_name': ('instance', 'hostname'),
'host': ('instance', 'host'),
'node': ('instance', 'node'),
'os_type': ('instance', 'os_type'),
'architecture': ('instance', 'architecture'),
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields as obj_fields
from nova.objects import instance as obj_instance
from nova import rpc
from nova.scheduler.filters import utils as filters_utils
from nova.virt import hardware
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
GroupDetails = collections.namedtuple('GroupDetails', ['hosts', 'policy',
'members'])
class ResourceRequest(object):
"""Presents a granular resource request via RequestGroup instances."""
# extra_specs-specific consts
XS_RES_PREFIX = 'resources'
XS_TRAIT_PREFIX = 'trait'
# Regex patterns for numbered or un-numbered resources/trait keys
XS_KEYPAT = re.compile(r"^(%s)([1-9][0-9]*)?:(.*)$" %
'|'.join((XS_RES_PREFIX, XS_TRAIT_PREFIX)))
def __init__(self, request_spec, enable_pinning_translate=True):
"""Create a new instance of ResourceRequest from a RequestSpec.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
from oslo_serialization import jsonutils
from nova import conf as cfg
from nova.objects import fields
CONF = cfg.CONF
# Power VM hypervisor info
# Normally, the hypervisor version is a string in the form of '8.0.0' and
# converted to an int with nova.virt.utils.convert_version_to_int() however
# there isn't currently a mechanism to retrieve the exact version.
# Complicating this is the fact that nova conductor only allows live migration
# from the source host to the destination if the source is equal to or less
# than the destination version. PowerVM live migration limitations are
# checked by the PowerVM capabilities flags and not specific version levels.
# For that reason, we'll just publish the major level.
IBM_POWERVM_HYPERVISOR_VERSION = 8
# The types of LPARS that are supported.
POWERVM_SUPPORTED_INSTANCES = [
(fields.Architecture.PPC64, fields.HVType.PHYP, fields.VMMode.HVM),
(fields.Architecture.PPC64LE, fields.HVType.PHYP, fields.VMMode.HVM)]
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
BuildFailure Weigher. Weigh hosts by the number of recent failed boot attempts.
"""
import nova.conf
from nova.scheduler import utils
from nova.scheduler import weights
CONF = nova.conf.CONF
class BuildFailureWeigher(weights.BaseHostWeigher):
def weight_multiplier(self, host_state):
"""Override the weight multiplier. Note this is negated."""
return -1 * utils.get_weight_multiplier(
host_state, 'build_failure_weight_multiplier',
CONF.filter_scheduler.build_failure_weight_multiplier)
def _weigh_object(self, host_state, weight_properties):
"""Higher weights win. Our multiplier is negative, so reduce our
weight by number of failed builds.
"""
return host_state.failed_builds
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import nova.conf
from nova import exception
from nova.i18n import _, _LI
import nova.image.download.base as xfer_base
import nova.virt.libvirt.utils as lv_utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
# This module extends the configuration options for nova.conf. If the user
# wishes to use the specific configuration settings the following needs to
# be added to nova.conf:
# [image_file_url]
# filesystem = <a list="">
#
# For each entry in the filesystem list a new configuration section must be
# added with the following format:
# [image_file_url:]
# id =
# mountpoint =
#
# id:</a>
"""
Ironic host manager.
This host manager will consume all cpu's, disk space, and
ram from a host / node as it is supporting Baremetal hosts, which can not be
subdivided into multiple instances.
"""
from oslo_log import log as logging
import nova.conf
from nova import context as context_module
from nova import objects
from nova.objects import fields as obj_fields
from nova.scheduler import host_manager
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class IronicNodeState(host_manager.HostState):
"""Mutable and immutable information tracked for a host.
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
def _update_from_compute_node(self, compute):
"""Update information about a host from a ComputeNode object."""
# NOTE(jichenjc): if the compute record is just created but not updated
# some field such as free_disk_gb can be None
if 'free_disk_gb' not in compute or compute.free_disk_gb is None:
"""VIF drivers for XenAPI."""
from oslo_log import log as logging
from nova.compute import power_state
import nova.conf
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova.network import model as network_model
from nova.virt.xenapi import network_utils
from nova.virt.xenapi import vm_utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class XenVIFDriver(object):
def __init__(self, xenapi_session):
self._session = xenapi_session
def _get_vif_ref(self, vif, vm_ref):
vif_refs = self._session.call_xenapi("VM.get_VIFs", vm_ref)
for vif_ref in vif_refs:
try:
vif_rec = self._session.call_xenapi('VIF.get_record', vif_ref)
if vif_rec['MAC'] == vif['address']:
return vif_ref
except Exception: