Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def check_format(file_path, content):
""" check testcase format if valid
"""
if not content:
# testcase file content is empty
err_msg = u"Testcase file content is empty: {}".format(file_path)
logger.log_error(err_msg)
raise exception.FileFormatError(err_msg)
elif not isinstance(content, (list, dict)):
# testcase file content does not match testcase format
err_msg = u"Testcase file content format invalid: {}".format(file_path)
logger.log_error(err_msg)
raise exception.FileFormatError(err_msg)
logger.log_warning(msg)
exit(1)
sys.argv[0] = 'locust'
if len(sys.argv) == 1:
sys.argv.extend(["-h"])
if sys.argv[1] in ["-h", "--help", "-V", "--version"]:
locusts.main()
sys.exit(0)
try:
testcase_index = sys.argv.index('-f') + 1
assert testcase_index < len(sys.argv)
except (ValueError, AssertionError):
logger.log_error("Testcase file is not specified, exit.")
sys.exit(1)
testcase_file_path = sys.argv[testcase_index]
sys.argv[testcase_index] = locusts.parse_locustfile(testcase_file_path)
if "--processes" in sys.argv:
""" locusts -f locustfile.py --processes 4
"""
if "--no-web" in sys.argv:
logger.log_error("conflict parameter args: --processes & --no-web. \nexit.")
sys.exit(1)
processes_index = sys.argv.index('--processes')
processes_count_index = processes_index + 1
# e.g. "content.person.name" => ["content", "person.name"]
try:
top_query, sub_query = field.split('.', 1)
except ValueError:
top_query = field
sub_query = None
if top_query == "cookies":
cookies = self.cookies
try:
return cookies[sub_query]
except KeyError:
err_msg = u"Failed to extract attribute from cookies!\n"
err_msg += u"cookies: {}\n".format(cookies)
err_msg += u"attribute: {}".format(sub_query)
logger.log_error(err_msg)
raise exception.ParamsError(err_msg)
try:
top_query_content = getattr(self, top_query)
except AttributeError:
err_msg = u"Failed to extract attribute from response object: resp_obj.{}".format(top_query)
logger.log_error(err_msg)
raise exception.ParamsError(err_msg)
if sub_query:
if not isinstance(top_query_content, (dict, CaseInsensitiveDict, list)):
try:
# TODO: remove compatibility for content, text
if isinstance(top_query_content, bytes):
top_query_content = top_query_content.decode("utf-8")
top_query_content = json.loads(top_query_content)
err_msg = u"Failed to extract attribute from response object: resp_obj.{}".format(top_query)
logger.log_error(err_msg)
raise exception.ParamsError(err_msg)
if sub_query:
if not isinstance(top_query_content, (dict, CaseInsensitiveDict, list)):
try:
# TODO: remove compatibility for content, text
if isinstance(top_query_content, bytes):
top_query_content = top_query_content.decode("utf-8")
top_query_content = json.loads(top_query_content)
except json.decoder.JSONDecodeError:
err_msg = u"Failed to extract data with delimiter!\n"
err_msg += u"response content: {}\n".format(self.content)
err_msg += u"regex: {}\n".format(field)
logger.log_error(err_msg)
raise exception.ParamsError(err_msg)
# e.g. key: resp_headers_content_type, sub_query = "content-type"
return utils.query_json(top_query_content, sub_query)
else:
# e.g. key: resp_status_code, resp_content
return top_query_content
except AttributeError:
err_msg = u"Failed to extract value from response!\n"
err_msg += u"response content: {}\n".format(self.content)
err_msg += u"extract field: {}\n".format(field)
logger.log_error(err_msg)
raise exception.ParamsError(err_msg)
def __getattr__(self, key):
try:
if key == "json":
value = self.resp_obj.json()
else:
value = getattr(self.resp_obj, key)
self.__dict__[key] = value
return value
except AttributeError:
err_msg = "ResponseObject does not have attribute: {}".format(key)
logger.log_error(err_msg)
raise exception.ParamsError(err_msg)
def _extract_field_with_regex(self, field):
""" extract field from response content with regex.
requests.Response body could be json or html text.
@param (str) field should only be regex string that matched r".*\(.*\).*"
e.g.
self.text: "LB123abcRB789"
field: "LB[\d]*(.*)RB[\d]*"
return: abc
"""
matched = re.search(field, self.text)
if not matched:
err_msg = u"Failed to extract data with regex!\n"
err_msg += u"response content: {}\n".format(self.content)
err_msg += u"regex: {}\n".format(field)
logger.log_error(err_msg)
raise exception.ParamsError(err_msg)
return matched.group(1)
self.context.validate(validators, resp_obj)
except (exception.ParamsError, exception.ResponseError, \
exception.ValidationError, exception.ParseResponseError):
# log request
err_req_msg = "request: \n"
err_req_msg += "headers: {}\n".format(parsed_request.pop("headers", {}))
for k, v in parsed_request.items():
err_req_msg += "{}: {}\n".format(k, v)
logger.log_error(err_req_msg)
# log response
err_resp_msg = "response: \n"
err_resp_msg += "status_code: {}\n".format(resp_obj.status_code)
err_resp_msg += "headers: {}\n".format(resp_obj.headers)
err_resp_msg += "content: {}\n".format(resp_obj.content)
logger.log_error(err_resp_msg)
raise
self.meta_data["stat"] = {
"response_time_ms": response_time_ms,
"elapsed_ms": response.elapsed.microseconds / 1000.0,
"content_size": content_size
}
# record request and response histories, include 30X redirection
response_list = response.history + [response]
self.meta_data["data"] = [
self.get_req_resp_record(resp_obj)
for resp_obj in response_list
]
self.meta_data["data"][0]["request"].update(kwargs)
try:
response.raise_for_status()
except RequestException as e:
logger.log_error(u"{exception}".format(exception=str(e)))
else:
logger.log_info(
"""status_code: {}, response_time(ms): {} ms, response_length: {} bytes\n""".format(
response.status_code,
response_time_ms,
content_size
)
)
return response