Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_stdout_capture(self):
stdout = sys.stdout
messages = ["abcdef", "defgaga"]
with utils.StdOutCapture() as out:
for msg in messages:
print(msg)
self.assertEqual(messages, out.getvalue().rstrip("\n").split("\n"))
self.assertEqual(sys.stdout, stdout)
class Two(utils.RandomNameGeneratorMixin):
RESOURCE_NAME_FORMAT = "foo_XXX_XXX"
name_matches_object = mock.Mock(return_value=False)
class Three(utils.RandomNameGeneratorMixin):
RESOURCE_NAME_ALLOWED_CHARACTERS = "12345"
name_matches_object = mock.Mock(return_value=False)
class Four(utils.RandomNameGeneratorMixin):
RESOURCE_NAME_FORMAT = "bar_XXX_XXX"
RESOURCE_NAME_ALLOWED_CHARACTERS = "abcdef"
name_matches_object = mock.Mock(return_value=False)
classes = (One, Two, Three, Four)
name = "foo"
self.assertFalse(utils.name_matches_object(name, *classes))
for cls in classes:
cls.name_matches_object.assert_called_once_with(name)
def init_sahara_context(context_instance):
context_instance.context["sahara"] = context_instance.context.get("sahara",
{})
for user, tenant_id in rutils.iterate_per_tenants(
context_instance.context["users"]):
context_instance.context["tenants"][tenant_id]["sahara"] = (
context_instance.context["tenants"][tenant_id].get("sahara", {}))
visibility = ("public" if image_args["is_public"]
else "private")
if "min_ram" in image_args:
if "min_ram" not in self.config:
min_ram = image_args["min_ram"]
if "min_disk" in image_args:
if "min_disk" not in self.config:
min_disk = image_args["min_disk"]
# None image_name means that image.Image will generate a random name
image_name = None
if "image_name" in self.config and images_per_tenant == 1:
image_name = self.config["image_name"]
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
current_images = []
clients = osclients.Clients(user["credential"])
image_service = image.Image(
clients, name_generator=self.generate_random_name)
for i in range(images_per_tenant):
image_obj = image_service.create_image(
image_name=image_name,
container_format=container_format,
image_location=image_url,
disk_format=disk_format,
visibility=visibility,
min_disk=min_disk,
min_ram=min_ram)
current_images.append(image_obj.id)
def setup(self):
# NOTE(rkiran): Some clients are not thread-safe. Thus during
# multithreading/multiprocessing, it is likely the
# sockets are left open. This problem is eliminated by
# creating a connection in setup and cleanup separately.
net_wrapper = network_wrapper.wrap(
osclients.Clients(self.context["admin"]["credential"]),
self, config=self.config)
kwargs = {}
if self.config["dns_nameservers"] is not None:
kwargs["dns_nameservers"] = self.config["dns_nameservers"]
for user, tenant_id in (utils.iterate_per_tenants(
self.context.get("users", []))):
self.context["tenants"][tenant_id]["networks"] = []
for i in range(self.config["networks_per_tenant"]):
# NOTE(amaretskiy): router_create_args and subnets_num take
# effect for Neutron only.
network_create_args = self.config["network_create_args"].copy()
network = net_wrapper.create_network(
tenant_id,
dualstack=self.config["dualstack"],
subnets_num=self.config["subnets_per_network"],
network_create_args=network_create_args,
router_create_args=self.config["router"],
**kwargs)
self.context["tenants"][tenant_id]["networks"].append(network)
:param volume_id: volume uuid for creating snapshot
:param force: flag to indicate whether to snapshot a volume even if
it's attached to an instance
:param name: Name of the snapshot
:param description: Description of the snapshot
:returns: Created snapshot object
"""
kwargs = {"force": force,
"name": name or self.generate_random_name(),
"description": description,
"metadata": metadata}
snapshot = self._get_client().volume_snapshots.create(volume_id,
**kwargs)
rutils.interruptable_sleep(
CONF.openstack.cinder_volume_create_prepoll_delay)
snapshot = self._wait_available_volume(snapshot)
return snapshot
creds = keystone().ec2.list(user['id'])
if not creds:
creds = keystone().ec2.create(user['id'],
user['tenant_id'])
else:
creds = creds[0]
url = keystone.service_catalog.url_for(service_type='ec2')
user['ec2args'] = {
'region': 'RegionOne',
'url': url,
'access': creds.access,
'secret': creds.secret
}
if self.net_wrapper.SERVICE_IMPL == consts.Service.NEUTRON:
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
body = {"quota": {"router": -1, "floatingip": -1}}
self.net_wrapper.client.update_quota(tenant_id, body)
network = self.net_wrapper.create_network(
tenant_id, add_router=True, subnets_num=1)
self.context["tenants"][tenant_id]["network"] = network
except Exception as e:
msg = "Can't prepare ec2 client: %s" % e.message
if logging.is_debug():
LOG.exception(msg)
else:
LOG.warning(msg)
task = api.task.get(task_id=task_id, detailed=True)
failed_criteria = 0
data = []
STATUS_PASS = "PASS"
STATUS_FAIL = "FAIL"
for workload in itertools.chain(
*[s["workloads"] for s in task["subtasks"]]):
for sla in sorted(workload["sla_results"].get("sla", []),
key=lambda x: x["criterion"]):
success = sla.pop("success")
sla["status"] = success and STATUS_PASS or STATUS_FAIL
sla["benchmark"] = workload["name"]
sla["pos"] = workload["position"]
failed_criteria += int(not success)
data.append(sla if tojson else rutils.Struct(**sla))
if tojson:
print(json.dumps(data, sort_keys=False))
else:
cliutils.print_list(data, ("benchmark", "pos", "criterion",
"status", "detail"))
return failed_criteria
def setup(self):
utils.init_sahara_context(self)
self.context["sahara"]["clusters"] = {}
wait_dict = {}
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
image_id = self.context["tenants"][tenant_id]["sahara"]["image"]
floating_ip_pool = self.config.get("floating_ip_pool")
temporary_context = {
"user": user,
"tenant": self.context["tenants"][tenant_id],
"task": self.context["task"],
"owner_id": self.context["owner_id"]
}
scenario = utils.SaharaScenario(context=temporary_context)
cluster = scenario._launch_cluster(
plugin_name=self.config["plugin_name"],
port=port,
protocol=protocol,
labels=labels,
status_wait=status_wait
)
self.client.create_service(
name,
namespace=namespace,
port=port,
protocol=protocol,
type="ClusterIP",
labels=(None if custom_endpoint else labels)
)
commonutils.interruptable_sleep(CONF.kubernetes.start_prepoll_delay)
if custom_endpoint:
ip = self.client.get_pod(name, namespace=namespace).status.pod_ip
self.client.create_endpoints(
name,
namespace=namespace,
ip=ip,
port=port
)
command = ["curl", "%s:%s" % (ip, port)]
else:
endpoints = self.client.get_endpoints(name, namespace=namespace)
ips = []
for subset in endpoints.subsets:
addrs = [addr.ip for addr in subset.addresses]
ports = [p.port for p in subset.ports]