How to use the locust.events function in locust

To help you get started, we’ve selected a few locust examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github hyperledger-archives / iroha / test / load / locustfile-performance.py View on Github external
def send_tx_await(self, transaction):
        """
        Send a transaction to Iroha and wait for the final status to be reported in status stream
        :param transaction: protobuf Transaction
        :return: None
        """
        start_time = time.time()
        try:
            tx_future = self._command_service_stub.Torii.future(transaction)
            tx_status = 'NOT_RECEIVED'
            while tx_status not in ['COMMITTED', 'REJECTED']:
                for status in self.tx_status_stream(transaction):
                    tx_status = status[0]
        except grpc.RpcError as e:
            total_time = int((time.time() - start_time) * 1000)
            events.request_failure.fire(request_type="grpc", name='send_tx_await', response_time=total_time, exception=e)
        else:
            total_time = int((time.time() - start_time) * 1000)
            events.request_success.fire(request_type="grpc", name='send_tx_await', response_time=total_time, response_length=0)
            # In this example, I've hardcoded response_length=0. If we would want the response length to be 
github edx-unsupported / edx-load-tests / helpers / raw_logs.py View on Github external
def __init__(self):
        events.request_success += self.on_request_success
        events.request_failure += self.on_request_failure
        events.reconfigure += self.on_reconfigure
        self.logfile = None
        self.csvwriter = None
github edx-unsupported / edx-load-tests / loadtests / csm / locustfile.py View on Github external
def wrapper(*args, **kwargs):
            start_time = time.time()
            try:
                result = func(*args, **kwargs)
                if isinstance(result, types.GeneratorType):
                    # To make a generator actually be called, iterate over all the results.
                    result = list(result)
            except Exception as e:
                end_time = time.time()
                total_time = (end_time - start_time) * 1000
                LOG.warning("Request Failed", exc_info=True)
                events.request_failure.fire(
                    request_type="DjangoXBlockUserStateClient",
                    name=name,
                    response_time=total_time,
                    start_time=start_time,
                    end_time=end_time,
                    exception=e
                )
            else:
                end_time = time.time()
                total_time = (end_time - start_time) * 1000
                events.request_success.fire(
                    request_type="DjangoXBlockUserStateClient",
                    name=name,
                    response_time=total_time,
                    start_time=start_time,
                    end_time=time.time(),
github SvenskaSpel / locust-plugins / examples / timescale_listener_ex.py View on Github external
@events.init.add_listener
def on_locust_init(environment, **_kwargs):
    TimescaleListener(env=environment, testplan="timescale_listener_ex", target_env="myTestEnv")
github karol-brejna-i / locust-experiments / kafka-confluent-client / locust-scripts / locustfile.py View on Github external
from confluent_client import KafkaConfluentClient
from kafka_client import KafkaClient

WORK_DIR = os.path.dirname(__file__)

# read kafka brokers from config
KAFKA_BROKERS = os.getenv("KAFKA_BROKERS", "kafka:9092").split(sep=",")

# read other environment variables
QUIET_MODE = True if os.getenv("QUIET_MODE", "true").lower() in ['1', 'true', 'yes'] else False
TASK_DELAY = int(os.getenv("TASK_DELAY", "0"))

# register additional logging handlers
if not QUIET_MODE:
    events.request_success += additional_success_handler
    events.request_failure += additional_failure_handler


class KafkaLocust(Locust):
    client = None

    def __init__(self, *args, **kwargs):
        super(KafkaLocust, self).__init__(*args, **kwargs)
        if not KafkaLocust.client:
            KafkaLocust.client = KafkaConfluentClient(KAFKA_BROKERS)


class KafkaBehaviour(TaskSet):

    def random_message(self, min_length=32, max_length=128):
        return ''.join(random.choice(string.ascii_uppercase) for _ in range(random.randrange(min_length, max_length)))
github SvenskaSpel / locust-plugins / locust_plugins / checks.py View on Github external
@events.init_command_line_parser.add_listener
def add_checks_to_arguments(parser):
    checks = parser.add_argument_group(
        "Checks", "Sets locust's exit code to 2 if any of these thresholds were not met (added by locust-plugins)"
    )
    checks.add_argument("--check-rps", type=float, help="Requests per second", env_var="LOCUST_CHECK_RPS", default=0.0)
    checks.add_argument(
        "--check-fail-ratio",
        type=float,
        help="Ratio of failed requests (0.0-1.0)",
        env_var="LOCUST_CHECK_FAIL_RATIO",
        default=1.0,
    )
    checks.add_argument(
        "--check-avg-response-time",
        type=float,
        help="Average response time",
github locustio / locust / locust / runners.py View on Github external
def _send_stats(self):
        data = {}
        events.report_to_master.fire(client_id=self.client_id, data=data)
        self.client.send(Message("stats", data, self.client_id))
github concurrencylabs / mqtt-locust / mqtt_locust.py View on Github external
def fire_locust_failure(**kwargs):
    events.request_failure.fire(**kwargs)
github TargetProcess / signalr-client-py / tp_login_and_cells.py View on Github external
statsd.timer("requests/response/time/{0}".format(request_name), response_time)
    statsd.timer("requests/response/length/{0}".format(request_name), response_length)


def on_request_failure(request_type, name, response_time, response_length):
    """
    Event handler that get triggered on every successful request
    """
    request_name = "{0}/{1}".format(request_type, name)
    statsd.incr("requests/{0}".format(request_name))
    statsd.incr("requests/status/failure/{0}".format(request_name))
    statsd.timer("requests/response/time/{0}".format(request_name), response_time)
    statsd.timer("requests/response/length/{0}".format(request_name), response_length)


events.request_success += on_request_success
events.request_failure += on_request_failure