Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def after_lap(self):
logger.info("Finished lap [%d/%d]" % (self.current_lap, self.race.total_laps))
if self.race.total_laps > 1:
lap_time = self.lap_timer.split_time() - self.lap_times
self.lap_times += lap_time
hl, ml, sl = convert.seconds_to_hour_minute_seconds(lap_time)
lap_results = reporter.calculate_results(self.metrics_store, self.race, self.current_lap)
self.race.add_lap_results(lap_results)
reporter.summarize(self.race, self.cfg, lap=self.current_lap)
console.println("")
if self.current_lap < self.race.total_laps:
remaining = (self.race.total_laps - self.current_lap) * self.lap_times / self.current_lap
hr, mr, sr = convert.seconds_to_hour_minute_seconds(remaining)
console.info("Lap time %02d:%02d:%02d (ETA: %02d:%02d:%02d)" % (hl, ml, sl, hr, mr, sr), logger=logger)
else:
console.info("Lap time %02d:%02d:%02d" % (hl, ml, sl), logger=logger)
console.println("")
console.println("")
for num, task in enumerate(c.schedule, start=1):
if task.nested:
console.println(format_task(task, suffix=":", num="{}. ".format(num)))
for leaf_num, leaf_task in enumerate(task, start=1):
console.println(format_task(leaf_task, indent="\t", num="{}.{} ".format(num, leaf_num)))
else:
console.println(format_task(task, num="{}. ".format(num)))
t = load_track(cfg)
console.println("Showing details for track [{}]:\n".format(t.name))
console.println("* Description: {}".format(t.description))
if t.number_of_documents:
console.println("* Documents: {}".format(convert.number_to_human_string(t.number_of_documents)))
console.println("* Compressed Size: {}".format(convert.bytes_to_human_string(t.compressed_size_in_bytes)))
console.println("* Uncompressed Size: {}".format(convert.bytes_to_human_string(t.uncompressed_size_in_bytes)))
console.println("")
challenge_name = cfg.opts("track", "challenge.name", mandatory=False)
if challenge_name:
challenge = t.find_challenge(challenge_name)
challenge_info(challenge)
else:
for challenge in t.challenges:
challenge_info(challenge)
console.println("")
absolute_expected_schedule_time = total_start + expected_scheduled_time
throughput_throttled = expected_scheduled_time > 0
if throughput_throttled:
rest = absolute_expected_schedule_time - time.perf_counter()
if rest > 0:
time.sleep(rest)
start = time.perf_counter()
total_ops, total_ops_unit, request_meta_data = execute_single(runner, self.es, params, self.abort_on_error)
stop = time.perf_counter()
service_time = stop - start
# Do not calculate latency separately when we don't throttle throughput. This metric is just confusing then.
latency = stop - absolute_expected_schedule_time if throughput_throttled else service_time
# last sample should bump progress to 100% if externally completed.
completed = percent_completed if not self.complete.is_set() else 1.0
self.sampler.add(sample_type, request_meta_data, convert.seconds_to_ms(latency), convert.seconds_to_ms(service_time),
total_ops, total_ops_unit, (stop - total_start), completed)
if self.complete.is_set():
logger.info("Task is considered completed due to external event.")
break
except BaseException:
logger.exception("Could not execute schedule")
raise
finally:
# Actively set it if this task completes its parent
if self.task.completes_parent:
self.complete.set()
cfg.add(config.Scope.applicationOverride, "mechanic", "team.path", os.path.abspath(io.normalize_path(args.team_path)))
cfg.add(config.Scope.applicationOverride, "mechanic", "repository.name", None)
cfg.add(config.Scope.applicationOverride, "mechanic", "repository.revision", None)
else:
cfg.add(config.Scope.applicationOverride, "mechanic", "repository.name", args.team_repository)
cfg.add(config.Scope.applicationOverride, "mechanic", "repository.revision", args.team_revision)
cfg.add(config.Scope.applicationOverride, "mechanic", "car.plugins", opts.csv_to_list(args.elasticsearch_plugins))
cfg.add(config.Scope.applicationOverride, "mechanic", "car.params", opts.to_dict(args.car_params))
cfg.add(config.Scope.applicationOverride, "mechanic", "plugin.params", opts.to_dict(args.plugin_params))
if args.keep_cluster_running:
cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running", True)
# force-preserve the cluster nodes.
cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install", True)
else:
cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running", False)
cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install", convert.to_bool(args.preserve_install))
cfg.add(config.Scope.applicationOverride, "mechanic", "skip.rest.api.check", convert.to_bool(args.skip_rest_api_check))
cfg.add(config.Scope.applicationOverride, "mechanic", "runtime.jdk", args.runtime_jdk)
cfg.add(config.Scope.applicationOverride, "telemetry", "devices", opts.csv_to_list(args.telemetry))
cfg.add(config.Scope.applicationOverride, "telemetry", "params", opts.to_dict(args.telemetry_params))
cfg.add(config.Scope.applicationOverride, "race", "pipeline", args.pipeline)
cfg.add(config.Scope.applicationOverride, "race", "user.tag", args.user_tag)
cfg.add(config.Scope.applicationOverride, "track", "repository.revision", args.track_revision)
# We can assume here that if a track-path is given, the user did not specify a repository either (although argparse sets it to
# its default value)
if args.track_path:
cfg.add(config.Scope.applicationOverride, "track", "track.path", os.path.abspath(io.normalize_path(args.track_path)))
cfg.add(config.Scope.applicationOverride, "track", "repository.name", None)
if args.track_revision:
def _ask_data_store(self):
data_store_host = self._ask_property("Enter the host name of the ES metrics store", default_value="localhost")
data_store_port = self._ask_property("Enter the port of the ES metrics store", check_pattern=ConfigFactory.PORT_RANGE_PATTERN)
data_store_secure = self._ask_property("Use secure connection (True, False)", default_value=False,
check_pattern=ConfigFactory.BOOLEAN_PATTERN)
data_store_user = self._ask_property("Username for basic authentication (empty if not needed)", mandatory=False, default_value="")
data_store_password = self._ask_property("Password for basic authentication (empty if not needed)", mandatory=False,
default_value="", sensitive=True)
# do an intermediate conversion to bool in order to normalize input
return data_store_host, data_store_port, str(convert.to_bool(data_store_secure)), data_store_user, data_store_password
def __init__(self, cfg):
self._config = cfg
host = self._config.opts("reporting", "datastore.host")
port = self._config.opts("reporting", "datastore.port")
secure = convert.to_bool(self._config.opts("reporting", "datastore.secure"))
user = self._config.opts("reporting", "datastore.user")
password = self._config.opts("reporting", "datastore.password")
verify = self._config.opts("reporting", "datastore.ssl.verification_mode", default_value="full", mandatory=False) != "none"
ca_path = self._config.opts("reporting", "datastore.ssl.certificate_authorities", default_value=None, mandatory=False)
self.probe_version = self._config.opts("reporting", "datastore.probe.cluster_version", default_value=True, mandatory=False)
from esrally import client
# Instead of duplicating code, we're just adapting the metrics store specific properties to match the regular client options.
client_options = {
"use_ssl": secure,
"verify_certs": verify,
"timeout": 120
}
if ca_path:
client_options["ca_certs"] = ca_path
def report_total_time_per_shard(self, name, baseline_per_shard, contender_per_shard):
unit = "min"
return self.join(
self.line("Min cumulative {} across primary shard".format(name), baseline_per_shard.get("min"), contender_per_shard.get("min"), "", unit,
treat_increase_as_improvement=False, formatter=convert.ms_to_minutes),
self.line("Median cumulative {} across primary shard".format(name), baseline_per_shard.get("median"), contender_per_shard.get("median"), "", unit,
treat_increase_as_improvement=False, formatter=convert.ms_to_minutes),
self.line("Max cumulative {} across primary shard".format(name), baseline_per_shard.get("max"), contender_per_shard.get("max"), "", unit,
treat_increase_as_improvement=False, formatter=convert.ms_to_minutes),
)
def download(self, base_url, target_path, size_in_bytes, detail_on_missing_root_url):
file_name = os.path.basename(target_path)
if not base_url:
raise exceptions.DataError("%s and it cannot be downloaded because no base URL is provided."
% detail_on_missing_root_url)
if self.offline:
raise exceptions.SystemSetupError("Cannot find %s. Please disable offline mode and retry again." % target_path)
data_url = "%s/%s" % (base_url, file_name)
try:
io.ensure_dir(os.path.dirname(target_path))
if size_in_bytes:
size_in_mb = round(convert.bytes_to_mb(size_in_bytes))
self.logger.info("Downloading data from [%s] (%s MB) to [%s].", data_url, size_in_mb, target_path)
else:
self.logger.info("Downloading data from [%s] to [%s].", data_url, target_path)
# we want to have a bit more accurate download progress as these files are typically very large
progress = net.Progress("[INFO] Downloading data for track %s" % self.track_name, accuracy=1)
net.download(data_url, target_path, size_in_bytes, progress_indicator=progress)
progress.finish()
self.logger.info("Downloaded data from [%s] to [%s].", data_url, target_path)
except urllib.error.HTTPError as e:
if e.code == 404 and self.test_mode:
raise exceptions.DataError("Track [%s] does not support test mode. Please ask the track author to add it or "
"disable test mode and retry." % self.track_name)
else:
msg = "Could not download [%s] to [%s]" % (data_url, target_path)
if e.reason:
def report_disk_usage(self, baseline_stats, contender_stats):
return self.join(
self.line("Store size", baseline_stats.store_size, contender_stats.store_size, "", "GB",
treat_increase_as_improvement=False, formatter=convert.bytes_to_gb),
self.line("Translog size", baseline_stats.translog_size, contender_stats.translog_size, "", "GB",
treat_increase_as_improvement=False, formatter=convert.bytes_to_gb),
)
def _download(self, url, local_path, size_in_bytes=None, force_download=False, raise_url_error=False):
offline = self._config.opts("system", "offline.mode")
file_exists = os.path.isfile(local_path)
if file_exists and not force_download:
logger.info("[%s] already exists locally. Skipping download." % local_path)
return
if not offline:
logger.info("Downloading from [%s] to [%s]." % (url, local_path))
try:
io.ensure_dir(os.path.dirname(local_path))
if size_in_bytes:
size_in_mb = round(convert.bytes_to_mb(size_in_bytes))
# ensure output appears immediately
print("Downloading data from %s (%s MB) ... " % (url, size_in_mb), end='', flush=True)
if url.startswith("http"):
net.download(url, local_path, size_in_bytes)
elif url.startswith("s3"):
self._do_download_via_s3(url, local_path, size_in_bytes)
else:
raise exceptions.SystemSetupError("Cannot download benchmark data from [%s]. Only http(s) and s3 are supported." % url)
if size_in_bytes:
print("Done")
except urllib.error.URLError:
logger.exception("Could not download [%s] to [%s]." % (url, local_path))
if raise_url_error:
raise
# file must exist at this point -> verify