How to use the junitparser.Error function in junitparser

To help you get started, we’ve selected a few junitparser examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github IBM / watson-assistant-workbench / scripts / functions_test_evaluate.py View on Github external
# run evaluation
    xml = JUnitXml()
    suite = TestSuite(os.path.splitext(os.path.basename(args.inputFileName))[0]) # once we support multiple test files then for each one should be test suite created
    xml.add_testsuite(suite)
    suite.timestamp = str(datetime.datetime.now()) # time of evaluation, not the testing it self (evaluations could differ)
    #suite.hostname = ''
    testCounter = 0
    for test in inputJson:
        case = TestCase()
        suite.add_testcase(case)

        if not isinstance(test, dict):
            errorMessage = "Input test array element {:d} is not dictionary. Each test has to be dictionary, please see doc!".format(testCounter)
            logger.error(errorMessage)
            case.result = Error(errorMessage, 'ValueError')
            continue

        logger.info("Test number %d, name '%s'", testCounter, test.get('name', '-'))
        case.name = test.get('name', None)

        if 'time' in test:
            time = test.get('time')
            if isinstance(time, int):
                case.time = test.get('time')
            else:
                logger.warning("Time is not type of integer, type '%s'", str(type(time).__name__))

        # load test expected output payload json
        testOutputExpectedJson = test['outputExpected']
        testOutputExpectedPath = None
        try:
github IBM / watson-assistant-workbench / scripts / functions_test_evaluate.py View on Github external
if not testOutputExpectedPath:
            logger.debug("Expected output payload provided inside the test")

        # load test returned output payload json
        testOutputReturnedJson = test['outputReturned']
        testOutputReturnedPath = None
        try:
            if testOutputReturnedJson.startswith('@'):
                testOutputReturnedPath = os.path.join(os.path.dirname(args.inputFileName), testOutputReturnedJson[1:])
                logger.debug("Loading returned output payload from file '%s'", testOutputReturnedPath)
                try:
                    outputReturnedFile = open(testOutputReturnedPath, 'r')
                except IOError:
                    errorMessage = "Cannot open returned output payload from file '{}'".format(testOutputReturnedPath)
                    logger.error(errorMessage)
                    case.result = Error(errorMessage, 'IOError')
                    continue
                try:
                    testOutputReturnedJson = json.load(outputReturnedFile)
                except ValueError as e:
                    errorMessage = "Cannot decode json from returned output payload from file '{}', error '{}'".format(testOutputReturnedPath, str(e))
                    logger.error(errorMessage)
                    case.result = Error(errorMessage, 'ValueError')
                    continue
        except AttributeError:
            pass

        if not testOutputReturnedPath:
            logger.debug("Returned output payload provided inside the test")

        # evaluate test
        if 'type' not in test or test['type'] == 'EXACT_MATCH':
github tytso / xfstests-bld / kvm-xfstests / test-appliance / files / usr / lib / python2.7 / gen_results_summary.py View on Github external
if verbose:
        for test_case in testsuite:
            status = 'Pass'
            if isinstance(test_case.result, Failure):
                status = 'Failed'
            if isinstance(test_case.result, Skipped):
                status = 'Skipped'
            if isinstance(test_case.result, Error):
                status = 'Error'
            out_f.write("  %-12s %-8s %ds\n" %
                        (test_case.name, status, test_case.time))
    else:
        if failures > 0:
            print_tests(out_f, testsuite, Failure, 'Failures')
            if errors > 0:
                print_tests(out_f, testsuite, Error, 'Errors')
github tytso / xfstests-bld / kvm-xfstests / test-appliance / files / usr / lib / python2.7 / gen_results_summary.py View on Github external
out_f.write('%s: %d tests, ' % (cfg, tests))
    if failures > 0:
        out_f.write('%d failures, ' % failures)
    if errors > 0:
        out_f.write('%d errors, ' % errors)
    if skipped > 0:
        out_f.write('%d skipped, ' % skipped)
    out_f.write('%d seconds\n' % runtime)
    if verbose:
        for test_case in testsuite:
            status = 'Pass'
            if isinstance(test_case.result, Failure):
                status = 'Failed'
            if isinstance(test_case.result, Skipped):
                status = 'Skipped'
            if isinstance(test_case.result, Error):
                status = 'Error'
            out_f.write("  %-12s %-8s %ds\n" %
                        (test_case.name, status, test_case.time))
    else:
        if failures > 0:
            print_tests(out_f, testsuite, Failure, 'Failures')
            if errors > 0:
                print_tests(out_f, testsuite, Error, 'Errors')
github zephyrproject-rtos / zephyr / scripts / ci / check-compliance.py View on Github external
def run(self):
        self.prepare()

        if os.path.exists(DOCS_WARNING_FILE) and os.path.getsize(DOCS_WARNING_FILE) > 0:
            with open(DOCS_WARNING_FILE, "rb") as f:
                log = f.read()

                self.case.result = Error("Documentation Issues", "failure")
                self.case.result._elem.text = log.decode('utf8')