Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# The locust logging format is not necessarily stable, so we use the event
# hooks API to implement our own "stable" logging for later programmatic
# reference.
# The events are:
# * locust_start_hatching
# * master_start_hatching
# * quitting
# * hatch_complete
# install simple event markers
locust.events.locust_start_hatching += EventMarker('locust_start_hatching')
locust.events.master_start_hatching += EventMarker('master_start_hatching')
locust.events.quitting += EventMarker('quitting')
locust.events.hatch_complete += EventMarker('hatch_complete')
rfiles = glob.glob(rdir + '/*.csv')
for rfile in rfiles:
time_postfix = str(datetime.now().replace(microsecond=0))
blobname = Path(rfile).stem + '_' + time_postfix + '.csv'
blob = bucket.blob('stress/' + blobname)
blob.upload_from_filename(rfile)
print("Test result is available via {}".format(blob.public_url))
return
"""
Register locust.events.quitting hook if executing under --no-web mode in a kubernetes container
"""
if "NO_WEB" in os.environ:
events.quitting += on_quitting
self.greenlet.spawn(self.heartbeat_worker).link_exception(callback=self.noop)
self.greenlet.spawn(self.client_listener).link_exception(callback=self.noop)
# listener that gathers info on how many locust users the slaves has spawned
def on_slave_report(client_id, data):
if client_id not in self.clients:
logger.info("Discarded report from unrecognized slave %s", client_id)
return
self.clients[client_id].user_count = data["user_count"]
events.slave_report += on_slave_report
# register listener that sends quit message to slave nodes
def on_quitting():
self.quit()
events.quitting += on_quitting
def start_slave(locust_classes):
runners.locust_runner = runners.SlaveLocustRunner(locust_classes, slave_options)
slave_greenlet = runners.locust_runner.greenlet
try:
slave_greenlet.join()
except socket.error as ex:
logger.error("Failed to connect to the Locust master: %s", ex)
sys.exit(-1)
except KeyboardInterrupt:
events.quitting.fire()
sys.exit(0)
def shutdown(code=0):
"""
Shut down locust by firing quitting event, printing/writing stats and exiting
"""
logger.info("Shutting down (exit code %s), bye." % code)
if stats_printer_greenlet is not None:
stats_printer_greenlet.kill(block=False)
logger.info("Cleaning up runner...")
if runners.locust_runner is not None:
runners.locust_runner.quit()
logger.info("Running teardowns...")
events.quitting.fire(reverse=True)
print_stats(runners.locust_runner.stats, current=False)
print_percentile_stats(runners.locust_runner.stats)
if options.csvfilebase:
write_stat_csvs(options.csvfilebase, options.stats_history_enabled)
print_error_report()
sys.exit(code)
# if this class doesn't have a min_wait, max_wait or wait_function defined, copy it from Locust
if not self.min_wait:
self.min_wait = self.locust.min_wait
if not self.max_wait:
self.max_wait = self.locust.max_wait
if not self.wait_function:
self.wait_function = self.locust.wait_function
self._lock.acquire()
if hasattr(self, "setup") and self._setup_has_run is False:
self._set_setup_flag()
self.setup()
if hasattr(self, "teardown") and self._teardown_is_set is False:
self._set_teardown_flag()
events.quitting += self.teardown
self._lock.release()
events.quitting += __listen_for_locust_events(node_id, event='quitting')
events.master_start_hatching += __listen_for_locust_events(node_id, event='master_start_hatching')
events.master_stop_hatching += __listen_for_locust_events(node_id, event='master_stop_hatching')
events.locust_start_hatching += __listen_for_locust_events(node_id, event='locust_start_hatching')
events.locust_stop_hatching += __listen_for_locust_events(node_id, event='locust_stop_hatching')
# Locust exceptions
events.locust_error += __listen_for_locust_errors(node_id)
def last_flush_on_quitting():
global stop_flag
stop_flag = True
flush_worker.join()
__flush_points(influxdb_client)
# Flush last points when quiting
events.quitting += last_flush_on_quitting
influxdb_client = InfluxDBClient(influx_host, influx_port, user, pwd, database)
influxdb_client.create_database(database)
node_id = 'local'
if '--master' in sys.argv:
node_id = 'master'
if '--slave' in sys.argv:
# TODO: Get real ID of slaves form locust somehow
node_id = 'slave'
# Start a greenlet that will save the data to influx according to the interval informed
flush_worker = gevent.spawn(__flush_cached_points_worker, influxdb_client, interval_ms)
# Request events
events.request_success += __listen_for_requests_events(node_id, success=True)
events.request_failure += __listen_for_requests_events(node_id, success=False)
# Locust events
events.hatch_complete += __listen_for_locust_events(node_id, event='hatch_complete')
events.quitting += __listen_for_locust_events(node_id, event='quitting')
events.master_start_hatching += __listen_for_locust_events(node_id, event='master_start_hatching')
events.master_stop_hatching += __listen_for_locust_events(node_id, event='master_stop_hatching')
events.locust_start_hatching += __listen_for_locust_events(node_id, event='locust_start_hatching')
events.locust_stop_hatching += __listen_for_locust_events(node_id, event='locust_stop_hatching')
# Locust exceptions
events.locust_error += __listen_for_locust_errors(node_id)
def last_flush_on_quitting():
global stop_flag
stop_flag = True
flush_worker.join()
__flush_points(influxdb_client)
# Flush last points when quiting
events.quitting += last_flush_on_quitting
@events.quitting.add_listener
def do_checks(environment, **_kw):
stats = environment.runner.stats.total
fail_ratio = stats.fail_ratio
total_rps = stats.total_rps
avg_response_time = stats.avg_response_time
opts = environment.parsed_options
check_rps = opts.check_rps
check_fail_ratio = opts.check_fail_ratio
check_avg_response_time = opts.check_avg_response_time
if fail_ratio > check_fail_ratio:
logging.info(f"Check failed: fail ratio was {fail_ratio:.1f} (threshold {check_fail_ratio:.1f})")
environment.process_exit_code = 2
if total_rps < check_rps:
logging.info(f"Check failed: total rps was {total_rps:.1f} (threshold {check_rps:.1f})")
with open(fname, 'wt') as self.fhd:
if is_csv:
fieldnames = list(self.__getrec(None, None, None, None).keys())
dialect = guess_csv_dialect(",".join(fieldnames))
self.writer = csv.DictWriter(self.fhd, fieldnames=fieldnames, dialect=dialect)
self.writer.writeheader()
self.fhd.flush()
else:
self.writer = None # FIXME: bad code design, have zero object for it
events.request_success += self.__on_request_success
events.request_failure += self.__on_request_failure
events.locust_error += self.__on_exception
events.slave_report += self.__on_slave_report
events.quitting += self.__on_quit
main.main()
self.fhd.flush()