How to use the locust.events.request_failure.fire function in locust

To help you get started, we’ve selected a few locust examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github SeldonIO / seldon-core / util / loadtester / scripts / predict_grpc_locust.py View on Github external
shape = [3,2],
                    values = [1.0,1.0,2.0,3.0,4.0,5.0]
                )
            )
            request = prediction_pb2.SeldonMessage(data = datadef)
            start_time = time.time()
            try:
                if self.oauth_enabled:
                    metadata = [('oauth_token', self.access_token)]
                    response = self.stub.Predict(request=request,metadata=metadata)
                else:
                    response = self.stub.Predict(request=request)
            except Exception as e:
                total_time = int((time.time() - start_time) * 1000)
                print(e)
                events.request_failure.fire(request_type="grpc", name=HOST, response_time=total_time, exception=e)
            else:
                total_time = int((time.time() - start_time) * 1000)
                events.request_success.fire(request_type="grpc", name=HOST, response_time=total_time, response_length=0)
github hyperledger-archives / iroha / test / load / locustfile.py View on Github external
def wrapper(*args, **kwargs):
                start_time = time.time()
                try:
                    result = func(*args, **kwargs)
                except grpc.RpcError as e:
                    total_time = int((time.time() - start_time) * 1000)
                    events.request_failure.fire(request_type="grpc", name=name, response_time=total_time, exception=e)
                else:
                    total_time = int((time.time() - start_time) * 1000)
                    events.request_success.fire(request_type="grpc", name=name, response_time=total_time, response_length=0)
                    # In this example, I've hardcoded response_length=0. If we would want the response length to be 
                    # reported correctly in the statistics, we would probably need to hook in at a lower level
                    return result
github edx-unsupported / edx-load-tests / loadtests / course_import / locustfile.py View on Github external
if resp.status_code != 200:
                raise Exception('Course import failed.')
            for _ in xrange(100):
                resp = self.client.get("/import_status/{0}/{1}".format(cid,
                                                                       ifname),
                                       name="/import_status/")
                if resp.text.find("4") >= 0 or resp.text.find("0") >= 0:
                    break
                time.sleep(0.1)
            if resp.text.find("4") >= 0:
                events.request_success.fire(request_type="http",
                                            name="course_import",
                                            response_time=(time.time() - start_time) * 1000,
                                            response_length=0)
            else:
                events.request_failure.fire(request_type="http",
                                            name="course_import",
                                            response_time=(time.time() - start_time) * 1000)
github GoogleCloudPlatform / community / tutorials / load-testing-iot-using-gcp-and-locust / locustfile.py View on Github external
def on_connect(self, client, userdata, flags, rc):
        sys.stdout.write('*** ON_CONNECT {} CONNACK received with code {}'.format(self.get_loggedId(), rc))
        if rc == 0:
            # Fire locust event for initial connect only
            if self.connectStartTime:
                # want connectLatency to be in milliseeconds
                connectLatency = int((time.time() - self.connectStartTime) * 1000)
                events.request_success.fire(request_type='MQTT connect', name='', response_time=connectLatency, response_length=0)
                self.connectStartTime = None
                self.subscribeStartTime = time.time()
            self.connected = True
            self.mqtt_client.subscribe('/devices/{}/commands/#'.format(self.deviceId))
        else:
            msg = 'deviceId {} CONNACK error code {}'.format(self.deviceId, rc)
            events.request_failure.fire(request_type='MQTT connect', name='', response_time=0, exception=msg)
github locustio / locust / locust / contrib / fasthttp.py View on Github external
else:
            request_meta["content_size"] = len(response.content or "")
        
        # Record the consumed time
        # Note: This is intentionally placed after we record the content_size above, since 
        # we'll then trigger fetching of the body (unless stream=True)
        request_meta["response_time"] = int((default_timer() - request_meta["start_time"]) * 1000)
        
        if catch_response:
            response.locust_request_meta = request_meta
            return ResponseContextManager(response)
        else:
            try:
                response.raise_for_status()
            except FAILURE_EXCEPTIONS as e:
                events.request_failure.fire(
                    request_type=request_meta["method"], 
                    name=request_meta["name"], 
                    response_time=request_meta["response_time"], 
                    response_length=request_meta["content_size"],
                    exception=e, 
                )
            else:
                events.request_success.fire(
                    request_type=request_meta["method"],
                    name=request_meta["name"],
                    response_time=request_meta["response_time"],
                    response_length=request_meta["content_size"],
                )
            return response
github httprunner / httprunner / httprunner / task.py View on Github external
def run(self):
        for test_suite in self.test_suite_list:
            for test in test_suite:
                try:
                    test.runTest()
                except exceptions.MyBaseError as ex:
                    from locust.events import request_failure
                    request_failure.fire(
                        request_type=test.testcase_dict.get("request", {}).get("method"),
                        name=test.testcase_dict.get("request", {}).get("url"),
                        response_time=0,
                        exception=ex
                    )
github redhat-developer / rh-che / .ci / workspace-startup / osioperf.py View on Github external
def waitForWorkspaceToStart(self):
    timeout_in_seconds = 300 if os.getenv("START_HARD_FAILURE_TIMEOUT") == None else int(os.getenv("START_HARD_FAILURE_TIMEOUT"))
    soft_timeout_seconds = 60 if os.getenv("START_SOFT_FAILURE_TIMEOUT") == None else int(os.getenv("START_SOFT_FAILURE_TIMEOUT"))
    isSoftFailure = False
    workspace_status = self.getWorkspaceStatusSelf()
    while workspace_status != "RUNNING":
      now = time.time()
      elapsed_time = int(now - self.start)
      if (workspace_status == "STOPPED"):
        events.request_failure.fire(request_type="REPEATED_GET",
                                    name="startWorkspace_"+self.clusterName,
                                    response_time=self._tick_timer(),
                                    exception="Workspace became STOPPED after " 
                                              + str(elapsed_time)
                                              + " seconds.")
        self.log("Workspace " + self.id + " became STOPPED after " 
                 + str(elapsed_time) + " seconds.")
        os.system(self.hard_start_failure_cmd+" -o 1 >/dev/null 2>&1")
        return
      if elapsed_time > soft_timeout_seconds and isSoftFailure == False:
        self.log("Workspace startup on "+self.clusterName+" failed with soft failure.")
        os.system(self.soft_start_failure_cmd+" -o 1 >/dev/null 2>&1")
        isSoftFailure = True
      if elapsed_time > timeout_in_seconds:
        events.request_failure.fire(request_type="REPEATED_GET",
                                    name="startWorkspace_"+self.clusterName,