Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def with_actor_system(runnable, cfg):
already_running = actor.actor_system_already_running()
logger.info("Actor system already running locally? [%s]" % str(already_running))
try:
actors = actor.bootstrap_actor_system(try_join=already_running, prefer_local_only=not already_running)
# We can only support remote benchmarks if we have a dedicated daemon that is not only bound to 127.0.0.1
cfg.add(config.Scope.application, "system", "remote.benchmarking.supported", already_running)
except RuntimeError as e:
logger.exception("Could not bootstrap actor system.")
if str(e) == "Unable to determine valid external socket address.":
console.warn("Could not determine a socket address. Are you running without any network? Switching to degraded mode.",
logger=logger)
actor.use_offline_actor_system()
actors = actor.bootstrap_actor_system(try_join=True)
else:
raise
try:
runnable(cfg)
self.send(self.start_sender, msg)
elif isinstance(msg, thespian.actors.ActorExitRequest):
logger.info("Main driver received ActorExitRequest and will terminate all load generators.")
self.status = "exiting"
for driver in self.coordinator.drivers:
self.send(driver, thespian.actors.ActorExitRequest())
logger.info("Main driver has notified all load generators of termination.")
elif isinstance(msg, thespian.actors.ChildActorExited):
# is it a driver?
if msg.childAddress in self.coordinator.drivers:
driver_index = self.coordinator.drivers.index(msg.childAddress)
if self.status == "exiting":
logger.info("Load generator [%d] has exited." % driver_index)
else:
logger.error("Load generator [%d] has exited prematurely. Aborting benchmark." % driver_index)
self.send(self.start_sender, actor.BenchmarkFailure("Load generator [%d] has exited prematurely." % driver_index))
else:
logger.info("A track preparator has exited.")
else:
logger.info("Main driver received unknown message [%s] (ignoring)." % (str(msg)))
except BaseException as e:
logger.exception("Main driver encountered a fatal exception. Shutting down.")
if self.coordinator:
self.coordinator.close()
self.status = "exiting"
for driver in self.coordinator.drivers:
self.send(driver, thespian.actors.ActorExitRequest())
self.send(self.start_sender, actor.BenchmarkFailure("Could not execute benchmark", e))
self.coordinator.update_samples(msg.samples)
def load_local_config(coordinator_config):
cfg = config.auto_load_local_config(coordinator_config, additional_sections=[
# only copy the relevant bits
"track", "driver", "client",
# due to distribution version...
"mechanic"
])
# set root path (normally done by the main entry point)
cfg.add(config.Scope.application, "node", "rally.root", paths.rally_root())
return cfg
class TrackPreparationActor(actor.RallyActor):
def __init__(self):
super().__init__()
actor.RallyActor.configure_logging(logger)
self.start_sender = None
def receiveMessage(self, msg, sender):
try:
if isinstance(msg, PrepareTrack):
self.start_sender = sender
# load node-specific config to have correct paths available
cfg = load_local_config(msg.config)
logger.info("Preparing track [%s]" % msg.track.name)
# for "proper" track repositories this will ensure that all state is identical to the coordinator node. For simple tracks
# the track is usually self-contained but in some cases (plugins are defined) we still need to ensure that the track
# is present on all machines.
if msg.track.has_plugins:
def race(cfg, sources=False, build=False, distribution=False, external=False, docker=False):
logger = logging.getLogger(__name__)
# at this point an actor system has to run and we should only join
actor_system = actor.bootstrap_actor_system(try_join=True)
benchmark_actor = actor_system.createActor(BenchmarkActor, targetActorRequirements={"coordinator": True})
try:
result = actor_system.ask(benchmark_actor, Setup(cfg, sources, build, distribution, external, docker))
if isinstance(result, Success):
logger.info("Benchmark has finished successfully.")
# may happen if one of the load generators has detected that the user has cancelled the benchmark.
elif isinstance(result, actor.BenchmarkCancelled):
logger.info("User has cancelled the benchmark (detected by actor).")
elif isinstance(result, actor.BenchmarkFailure):
logger.error("A benchmark failure has occurred")
raise exceptions.RallyError(result.message, result.cause)
else:
raise exceptions.RallyError("Got an unexpected result during benchmarking: [%s]." % str(result))
except KeyboardInterrupt:
logger.info("User has cancelled the benchmark (detected by race control).")
# notify the coordinator so it can properly handle this state. Do it blocking so we don't have a race between this message
def race(cfg, sources=False, build=False, distribution=False, external=False, docker=False):
# at this point an actor system has to run and we should only join
actor_system = actor.bootstrap_actor_system(try_join=True)
benchmark_actor = actor_system.createActor(BenchmarkActor, targetActorRequirements={"coordinator": True})
try:
result = actor_system.ask(benchmark_actor, Setup(cfg, sources, build, distribution, external, docker))
if isinstance(result, Success):
logger.info("Benchmark has finished successfully.")
# may happen if one of the load generators has detected that the user has cancelled the benchmark.
elif isinstance(result, actor.BenchmarkCancelled):
logger.info("User has cancelled the benchmark (detected by actor).")
elif isinstance(result, actor.BenchmarkFailure):
logger.error("A benchmark failure has occurred")
raise exceptions.RallyError(result.message, result.cause)
else:
raise exceptions.RallyError("Got an unexpected result during benchmarking: [%s]." % str(result))
except KeyboardInterrupt:
logger.info("User has cancelled the benchmark (detected by race control).")
# notify the coordinator so it can properly handle this state. Do it blocking so we don't have a race between this message
# and the actor exit request.
actor_system.ask(benchmark_actor, actor.BenchmarkCancelled())
finally:
logger.info("Telling benchmark actor to exit.")
actor_system.tell(benchmark_actor, thespian.actors.ActorExitRequest())
class BenchmarkComplete:
"""
Indicates that the benchmark is complete.
"""
def __init__(self, metrics):
self.metrics = metrics
class TaskFinished:
def __init__(self, metrics, next_task_scheduled_in):
self.metrics = metrics
self.next_task_scheduled_in = next_task_scheduled_in
class DriverActor(actor.RallyActor):
RESET_RELATIVE_TIME_MARKER = "reset_relative_time"
WAKEUP_INTERVAL_SECONDS = 1
# post-process request metrics every N seconds and send it to the metrics store
POST_PROCESS_INTERVAL_SECONDS = 30
"""
Coordinates all worker drivers. This is actually only a thin actor wrapper layer around ``Driver`` which does the actual work.
"""
def __init__(self):
super().__init__()
actor.RallyActor.configure_logging(logger)
self.start_sender = None
self.coordinator = None
all_node_ips.add(ip)
return all_node_ips
def nodes_by_host(ip_port_pairs):
nodes = {}
node_id = 0
for ip_port in ip_port_pairs:
if ip_port not in nodes:
nodes[ip_port] = []
nodes[ip_port].append(node_id)
node_id += 1
return nodes
class MechanicActor(actor.RallyActor):
WAKEUP_RESET_RELATIVE_TIME = "relative_time"
WAKEUP_FLUSH_METRICS = "flush_metrics"
"""
This actor coordinates all associated mechanics on remote hosts (which do the actual work).
"""
def __init__(self):
super().__init__()
self.cfg = None
self.metrics_store = None
self.race_control = None
self.cluster_launcher = None
self.cluster = None
self.car = None
self.logger.info("LoadGenerator[%s] starts driving now.", str(self.client_id))
self.start_driving = False
self.drive()
else:
current_samples = self.send_samples()
if self.cancel.is_set():
self.logger.info("LoadGenerator[%s] has detected that benchmark has been cancelled. Notifying master...",
str(self.client_id))
self.send(self.master, actor.BenchmarkCancelled())
elif self.executor_future is not None and self.executor_future.done():
e = self.executor_future.exception(timeout=0)
if e:
self.logger.info("LoadGenerator[%s] has detected a benchmark failure. Notifying master...", str(self.client_id))
# the exception might be user-defined and not be on the load path of the master driver. Hence, it cannot be
# deserialized on the receiver so we convert it here to a plain string.
self.send(self.master, actor.BenchmarkFailure("Error in load generator [{}]".format(self.client_id), str(e)))
else:
self.logger.info("LoadGenerator[%s] is ready for the next task.", str(self.client_id))
self.executor_future = None
self.drive()
else:
if current_samples and len(current_samples) > 0:
most_recent_sample = current_samples[-1]
if most_recent_sample.percent_completed is not None:
self.logger.debug("LoadGenerator[%s] is executing [%s] (%.2f%% complete).",
str(self.client_id), most_recent_sample.task, most_recent_sample.percent_completed * 100.0)
else:
self.logger.debug("LoadGenerator[%s] is executing [%s] (dependent eternal task).",
str(self.client_id), most_recent_sample.task)
else:
self.logger.debug("LoadGenerator[%s] is executing (no samples).", str(self.client_id))
self.wakeupAfter(datetime.timedelta(seconds=self.wakeup_interval))
all_node_ids.update(node_ids_per_host)
return all_node_ids
def nodes_by_host(ip_port_pairs):
nodes = {}
node_id = 0
for ip_port in ip_port_pairs:
if ip_port not in nodes:
nodes[ip_port] = []
nodes[ip_port].append(node_id)
node_id += 1
return nodes
class MechanicActor(actor.RallyActor):
WAKEUP_RESET_RELATIVE_TIME = "relative_time"
"""
This actor coordinates all associated mechanics on remote hosts (which do the actual work).
"""
def __init__(self):
super().__init__()
self.cfg = None
self.race_control = None
self.cluster_launcher = None
self.cluster = None
self.car = None
self.team_revision = None
self.externally_provisioned = False
def race(cfg, sources=False, build=False, distribution=False, external=False, docker=False):
logger = logging.getLogger(__name__)
# at this point an actor system has to run and we should only join
actor_system = actor.bootstrap_actor_system(try_join=True)
benchmark_actor = actor_system.createActor(BenchmarkActor, targetActorRequirements={"coordinator": True})
try:
result = actor_system.ask(benchmark_actor, Setup(cfg, sources, build, distribution, external, docker))
if isinstance(result, Success):
logger.info("Benchmark has finished successfully.")
# may happen if one of the load generators has detected that the user has cancelled the benchmark.
elif isinstance(result, actor.BenchmarkCancelled):
logger.info("User has cancelled the benchmark (detected by actor).")
elif isinstance(result, actor.BenchmarkFailure):
logger.error("A benchmark failure has occurred")
raise exceptions.RallyError(result.message, result.cause)
else:
raise exceptions.RallyError("Got an unexpected result during benchmarking: [%s]." % str(result))
except KeyboardInterrupt:
logger.info("User has cancelled the benchmark (detected by race control).")
# notify the coordinator so it can properly handle this state. Do it blocking so we don't have a race between this message
# and the actor exit request.
actor_system.ask(benchmark_actor, actor.BenchmarkCancelled())
finally:
logger.info("Telling benchmark actor to exit.")
actor_system.tell(benchmark_actor, thespian.actors.ActorExitRequest())