How to use the benchexec.tools.template.BaseTool function in BenchExec

To help you get started, we’ve selected a few BenchExec examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sosy-lab / benchexec / benchexec / tools / tbf_testsuite_validator.py View on Github external
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer 
#
# SPDX-License-Identifier: Apache-2.0

import re
import benchexec.result as result
import benchexec.util as util
import benchexec.tools.template


class Tool(benchexec.tools.template.BaseTool):
    """
    Tool info for tbf test-suite validator (https://gitlab.com/sosy-lab/software/test-format).
    """

    REQUIRED_PATHS = ["python_modules", "lib", "bin"]

    def program_files(self, executable):
        return self._program_files_from_executable(
            executable, self.REQUIRED_PATHS, parent_dir=True
        )

    def executable(self):
        return util.find_executable(
            "tbf-testsuite-validator", "bin/tbf-testsuite-validator"
        )
github sosy-lab / benchexec / benchexec / test_analyze_run_result.py View on Github external
def create_run(self, info_result=RESULT_UNKNOWN):
        runSet = types.SimpleNamespace()
        runSet.log_folder = "."
        runSet.result_files_folder = "."
        runSet.options = []
        runSet.real_name = None
        runSet.propertytag = None
        runSet.benchmark = lambda: None
        runSet.benchmark.base_dir = "."
        runSet.benchmark.benchmark_file = "Test.xml"
        runSet.benchmark.columns = []
        runSet.benchmark.name = "Test"
        runSet.benchmark.instance = "Test"
        runSet.benchmark.rlimits = {}
        runSet.benchmark.tool = BaseTool()

        def determine_result(self, returncode, returnsignal, output, isTimeout=False):
            return info_result

        runSet.benchmark.tool.determine_result = determine_result

        return Run(
            identifier="test.c", sourcefiles=["test.c"], fileOptions=[], runSet=runSet
        )
github sosy-lab / benchexec / benchexec / tools / llbmc.py View on Github external
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer 
#
# SPDX-License-Identifier: Apache-2.0

import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result


class Tool(benchexec.tools.template.BaseTool):
    """
    This class serves as tool adaptor for LLBMC
    """

    def executable(self):
        return util.find_executable("llbmc")

    def version(self, executable):
        return self._version_from_tool(executable).splitlines()[2][8:18]

    def name(self):
        return "LLBMC"

    def cmdline(self, executable, options, tasks, propertyfile, rlimits):
        assert len(tasks) == 1, "only one inputfile supported"
        return [executable] + options + tasks
github sosy-lab / benchexec / benchexec / tools / coveriteam.py View on Github external
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer 
#
# SPDX-License-Identifier: Apache-2.0

import benchexec.tools.template
import benchexec.util as util
import benchexec.result as result


class Tool(benchexec.tools.template.BaseTool):
    """
    Tool info for CoVeriTeam: On-Demand Composition of Cooperative Verification Systems.
    URL: https://gitlab.com/sosy-lab/software/coveriteam.

    This class has 2 purposes:
        1. to serve as an abstract class for specific coveriteam programs like verifiers, validators, etc.
        2. to serve as the tool info module for any generic coveritea program.
    """

    # TODO: I am not sure about the following folders:
    # 1. examples and config: should be included or not? It can also be dealt with the required files tag in the behchdef.
    # 2. tools and toolinfocache: these are cache folders. Isn't it better just to wrap them in one folder called cache?
    # To be resolved before the final merge.
    REQUIRED_PATHS = [
        "coveriteam",
        "bin",
github sosy-lab / benchexec / benchexec / tools / symbiotic3.py View on Github external
http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from os.path import dirname
from os.path import join as joinpath

import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result

class Tool(benchexec.tools.template.BaseTool):
    """
    Symbiotic tool info object
    """

    REQUIRED_PATHS = [
                  "bin",
                  "build-fix.sh",
                  "include",
                  "lib",
                  "lib32",
                  "lib.c",
                  "path_to_ml.pl",
                  "symbiotic"
                  ]

    def executable(self):
github sosy-lab / benchexec / benchexec / tools / satabs.py View on Github external
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer 
#
# SPDX-License-Identifier: Apache-2.0

import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result


class Tool(benchexec.tools.template.BaseTool):
    def executable(self):
        return util.find_executable("satabs")

    def version(self, executable):
        return self._version_from_tool(executable)

    def name(self):
        return "SatAbs"

    def determine_result(self, returncode, returnsignal, output, isTimeout):
        output = "\n".join(output)
        if "VERIFICATION SUCCESSFUL" in output:
            assert returncode == 0
            status = result.RESULT_TRUE_PROP
        elif "VERIFICATION FAILED" in output:
            assert returncode == 10
github sosy-lab / benchexec / benchexec / tools / ultimate.py View on Github external
import benchexec.result as result
import benchexec.tools.template
import benchexec.util as util
from benchexec import BenchExecException
from benchexec.model import MEMLIMIT
from benchexec.tools.template import UnsupportedFeatureException

_OPTION_NO_WRAPPER = "--force-no-wrapper"
_SVCOMP17_VERSIONS = {"f7c3ed31"}
_SVCOMP17_FORBIDDEN_FLAGS = {"--full-output", "--architecture"}
_ULTIMATE_VERSION_REGEX = re.compile(r"^Version is (.*)$", re.MULTILINE)
# .jar files that are used as launcher arguments with most recent .jar first
_LAUNCHER_JARS = ["plugins/org.eclipse.equinox.launcher_1.3.100.v20150511-1540.jar"]


class UltimateTool(benchexec.tools.template.BaseTool):
    """
    Abstract tool info for Ultimate-based tools.
    """

    REQUIRED_PATHS = [
        "artifacts.xml",
        "config",
        "configuration",
        "cvc4",
        "cvc4nyu",
        "cvc4-LICENSE",
        "features",
        "LICENSE",
        "LICENSE.GPL",
        "LICENSE.GPL.LESSER",
        "mathsat",
github sosy-lab / cpachecker / BenchExec / benchexec / tools / rand.py View on Github external
from random import random
import benchexec.tools.template
import benchexec.result as result

class Tool(benchexec.tools.template.BaseTool):
    """
    This tool is an imaginary tool that randomly returns SAFE and UNSAFE.
    To use it you need a normal benchmark-xml-file
    with the tool and sourcefiles, however options are ignored.
    """

    def executable(self):
        return '/bin/true'

    def name(self):
        return 'Random'

    def cmdline(self, executable, options, sourcefiles, propertyfile, rlimits):
        return [executable] + sourcefiles

    def determine_result(self, returncode, returnsignal, output, isTimeout):
github sosy-lab / cpachecker / BenchExec / benchexec / tools / true.py View on Github external
import benchexec.tools.template
import benchexec.result as result

class Tool(benchexec.tools.template.BaseTool):
    """
    This tool is an imaginary tool that returns always SAFE.
    To use it you need a normal benchmark-xml-file
    with the tool and sourcefiles, however options are ignored.
    """
    def executable(self):
        return '/bin/true'

    def name(self):
        return 'AlwaysTrue'

    def cmdline(self, executable, options, sourcefiles, propertyfile, rlimits):
        return [executable] + sourcefiles

    def determine_result(self, returncode, returnsignal, output, isTimeout):
        return result.STATUS_TRUE_PROP