How to use the benchexec.result function in BenchExec

To help you get started, we’ve selected a few BenchExec examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sosy-lab / benchexec / benchexec / tools / testcov.py View on Github external
if line.startswith("ERROR:"):
                if "timeout" in line.lower():
                    return "TIMEOUT"
                else:
                    return "ERROR ({0})".format(returncode)
            elif line.startswith("Result: FALSE"):
                return result.RESULT_FALSE_REACH
            elif line.startswith("Result: TRUE"):
                return result.RESULT_TRUE_PROP
            elif line.startswith("Result: DONE"):
                return result.RESULT_DONE
            elif line.startswith("Result: ERROR"):
                # matches ERROR and ERROR followed by some reason in parantheses
                # e.g., "ERROR (TRUE)" or "ERROR(TRUE)"
                return re.search(r"ERROR(\s*\(.*\))?", line).group(0)
        return result.RESULT_UNKNOWN
github sosy-lab / benchexec / benchexec / tablegenerator / __init__.py View on Github external
def get_property_of_task(task_id):
    task_name = task_id[0]
    property_names = task_id[1].split() if task_id[1] else []
    if task_name.endswith(".yml"):
        # try to find property file of task and create Property object
        try:
            task_template = model.load_task_definition_file(task_name)
            for prop_dict in task_template.get("properties", []):
                if "property_file" in prop_dict:
                    expanded = benchexec.util.expand_filename_pattern(
                        prop_dict["property_file"], os.path.dirname(task_name)
                    )
                    if len(expanded) == 1:
                        prop = result.Property.create(expanded[0], allow_unknown=True)
                        if set(prop.names) == set(property_names):
                            expected_result = prop_dict.get("expected_verdict")
                            if isinstance(expected_result, bool):
                                expected_result = result.ExpectedResult(
                                    expected_result, prop_dict.get("subproperty")
                                )
                            else:
                                expected_result = None
                            return (prop, expected_result)
        except BenchExecException as e:
            logging.debug("Could not load task-template file %s: %s", task_name, e)
    elif property_names:
        prop = result.Property.create_from_names(property_names)
        expected_result = result.expected_results_of_file(task_name).get(prop.name)
        return (prop, expected_result)
github sosy-lab / benchexec / benchexec / tools / symbiotic4.py View on Github external
def determine_result(self, returncode, returnsignal, output, isTimeout):
        if isTimeout:
            return "timeout"

        if output is None:
            return "error (no output)"

        for line in output:
            line = line.strip()
            if line == "TRUE":
                return result.RESULT_TRUE_PROP
            elif line == "UNKNOWN":
                return result.RESULT_UNKNOWN
            elif line.startswith("FALSE (valid-deref)"):
                return result.RESULT_FALSE_DEREF
            elif line.startswith("FALSE (valid-free)"):
                return result.RESULT_FALSE_FREE
            elif line.startswith("FALSE (valid-memtrack)"):
                return result.RESULT_FALSE_MEMTRACK
            elif line.startswith("FALSE (overflow)"):
                return result.RESULT_FALSE_OVERFLOW
            elif line.startswith("FALSE"):
                return result.RESULT_FALSE_REACH

        return result.RESULT_ERROR
github sosy-lab / cpachecker / BenchExec / benchexec / tablegenerator / __init__.py View on Github external
# collect some statistics
    sumRow = []
    correctRow = []
    wrongTrueRow = []
    wrongFalseRow = []
    wrongPropertyRow = []
    scoreRow = []

    for column, values in zip(columns, listsOfValues):
        if column.title == 'status':
            countCorrectTrue, countCorrectFalse, countCorrectProperty, countWrongTrue, countWrongFalse, countWrongProperty, countMissing = get_category_count(statusList)

            sum     = StatValue(len([status for (category, status) in statusList if status]))
            correct = StatValue(countCorrectTrue + countCorrectFalse + countCorrectProperty)
            score   = StatValue(result.SCORE_CORRECT_TRUE   * countCorrectTrue + \
                                result.SCORE_CORRECT_FALSE * countCorrectFalse + \
                                result.SCORE_CORRECT_FALSE * countCorrectProperty + \
                                result.SCORE_WRONG_TRUE     * countWrongTrue + \
                                result.SCORE_WRONG_FALSE   * countWrongFalse + \
                                result.SCORE_WRONG_FALSE   * countWrongProperty,
                                )
            wrongTrue   = StatValue(countWrongTrue)
            wrongFalse = StatValue(countWrongFalse)
            wrongProperty = StatValue(countWrongProperty)

        else:
            sum, correct, wrongTrue, wrongFalse, wrongProperty = get_stats_of_number_column(values, statusList, column.title)
            score = ''

        if (sum.sum, correct.sum, wrongTrue.sum, wrongFalse.sum) == (0,0,0,0):
            (sum, correct, wrongTrue, wrongFalse) = (None, None, None, None)
github sosy-lab / benchexec / benchexec / model.py View on Github external
task_def_file,
            input_files,
            options,
            self,
            local_propertytag,
            required_files_pattern,
            required_files,
        )

        # run.propertyfile of Run is fully determined only after Run is created,
        # thus we handle it and the expected results here.
        if not run.propertyfile:
            return run

        # TODO: support "property_name" attribute in yaml
        prop = result.Property.create(run.propertyfile)
        run.properties = [prop]

        for prop_dict in task_def.get("properties", []):
            if not isinstance(prop_dict, dict) or "property_file" not in prop_dict:
                raise BenchExecException(
                    "Missing property file for property in task-definition file {}.".format(
                        task_def_file
                    )
                )
            expanded = util.expand_filename_pattern(
                prop_dict["property_file"], os.path.dirname(task_def_file)
            )
            if len(expanded) != 1:
                raise BenchExecException(
                    "Property pattern '{}' in task-definition file {} does not refer to exactly one file.".format(
                        prop_dict["property_file"], task_def_file
github smackers / smack / svcomp / bench / toInstallDir / benchexec / benchexec / tablegenerator / __init__.py View on Github external
def get_category_count(categoryList):
    # count different elems in statusList
    counts = collections.defaultdict(int)
    for category in categoryList:
        counts[category] += 1

    # warning: read next lines carefully, there are some brackets and commas!
    return (
        # correctTrue, correctFalse
            counts[result.CATEGORY_CORRECT, result.RESULT_TRUE_PROP],
            counts[result.CATEGORY_CORRECT, result.RESULT_FALSE_REACH] \
          + counts[result.CATEGORY_CORRECT, result.RESULT_FALSE_DEREF] \
          + counts[result.CATEGORY_CORRECT, result.RESULT_FALSE_FREE] \
          + counts[result.CATEGORY_CORRECT, result.RESULT_FALSE_MEMTRACK] \
          + counts[result.CATEGORY_CORRECT, result.RESULT_FALSE_TERMINATION],

        # wrongTrue, wrongFalse
            counts[result.CATEGORY_WRONG, result.RESULT_TRUE_PROP],
            counts[result.CATEGORY_WRONG, result.RESULT_FALSE_REACH] \
          + counts[result.CATEGORY_WRONG, result.RESULT_FALSE_DEREF] \
          + counts[result.CATEGORY_WRONG, result.RESULT_FALSE_FREE] \
          + counts[result.CATEGORY_WRONG, result.RESULT_FALSE_MEMTRACK] \
          + counts[result.CATEGORY_WRONG, result.RESULT_FALSE_TERMINATION],

        # missing
            counts[result.CATEGORY_MISSING, result.RESULT_TRUE_PROP] \
          + counts[result.CATEGORY_MISSING, result.RESULT_FALSE_REACH] \
          + counts[result.CATEGORY_MISSING, result.RESULT_FALSE_DEREF] \
          + counts[result.CATEGORY_MISSING, result.RESULT_FALSE_FREE] \
          + counts[result.CATEGORY_MISSING, result.RESULT_FALSE_MEMTRACK] \
          + counts[result.CATEGORY_MISSING, result.RESULT_FALSE_TERMINATION] \
github sosy-lab / benchexec / benchexec / tools / divine.py View on Github external
if not output:
            return "ERROR - no output"

        last = output[-1]

        if isTimeout:
            return "TIMEOUT"

        if returncode != 0:
            return "ERROR - Pre-run"

        if last is None:
            return "ERROR - no output"
        elif "result: true" in last:
            return result.RESULT_TRUE_PROP
        elif "result: false" in last:
            return result.RESULT_FALSE_REACH
        else:
            return result.RESULT_UNKNOWN
github sosy-lab / benchexec / benchexec / tools / ultimate.py View on Github external
for line in output:
            if line.find(unsupported_syntax_errorstring) != -1:
                return "ERROR: UNSUPPORTED SYNTAX"
            if line.find(incorrect_syntax_errorstring) != -1:
                return "ERROR: INCORRECT SYNTAX"
            if line.find(type_errorstring) != -1:
                return "ERROR: TYPE ERROR"
            if line.find(witness_errorstring) != -1:
                return "ERROR: INVALID WITNESS FILE"
            if line.find(exception_errorstring) != -1:
                return "ERROR: EXCEPTION"
            if self._contains_overapproximation_result(line):
                return "UNKNOWN: OverapproxCex"
            if line.find(termination_false_string) != -1:
                return result.RESULT_FALSE_TERMINATION
            if line.find(termination_true_string) != -1:
                return result.RESULT_TRUE_PROP
            if line.find(ltl_false_string) != -1:
                return "FALSE(valid-ltl)"
            if line.find(ltl_true_string) != -1:
                return result.RESULT_TRUE_PROP
            if line.find(unsafety_string) != -1:
                return result.RESULT_FALSE_REACH
            if line.find(mem_deref_false_string) != -1:
                return result.RESULT_FALSE_DEREF
            if line.find(mem_deref_false_string_2) != -1:
                return result.RESULT_FALSE_DEREF
            if line.find(mem_free_false_string) != -1:
                return result.RESULT_FALSE_FREE
            if line.find(mem_memtrack_false_string) != -1:
                return result.RESULT_FALSE_MEMTRACK
github sosy-lab / cpachecker / BenchExec / benchexec / tools / cpachecker.py View on Github external
status = 'JAVA HEAP ERROR'
            elif line.startswith('Error: ') and not status:
                status = 'ERROR'
                if 'Unsupported C feature (recursion)' in line:
                    status = 'ERROR (recursion)'
                elif 'Unsupported C feature (threads)' in line:
                    status = 'ERROR (threads)'
                elif 'Parsing failed' in line:
                    status = 'ERROR (parsing failed)'
            elif line.startswith('For your information: CPAchecker is currently hanging at') and status == 'ERROR (1)' and isTimeout:
                status = 'TIMEOUT'

            elif line.startswith('Verification result: '):
                line = line[21:].strip()
                if line.startswith('TRUE'):
                    newStatus = result.STATUS_TRUE_PROP
                elif line.startswith('FALSE'):
                    newStatus = result.STATUS_FALSE_REACH
                    match = re.match('.* Property violation \(([^:]*)(:.*)?\) found by chosen configuration.*', line)
                    if match and match.group(1) in ['valid-deref', 'valid-free', 'valid-memtrack']:
                        newStatus = result.STR_FALSE + '(' + match.group(1) + ')'
                else:
                    newStatus = result.STATUS_UNKNOWN

                if not status:
                    status = newStatus
                elif newStatus != result.STATUS_UNKNOWN:
                    status = "{0} ({1})".format(status, newStatus)

        if not status:
            status = result.STATUS_UNKNOWN
        return status