How to use the pyperf.BenchmarkSuite function in pyperf

To help you get started, we’ve selected a few pyperf examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github python / pyperformance / pyperformance / compare.py View on Github external
def compare_results(options):
    base_label, changed_label = get_labels(options.baseline_filename,
                                           options.changed_filename)

    base_suite = pyperf.BenchmarkSuite.load(options.baseline_filename)
    changed_suite = pyperf.BenchmarkSuite.load(options.changed_filename)

    results = []
    common = set(base_suite.get_benchmark_names()) & set(
        changed_suite.get_benchmark_names())
    for name in sorted(common):
        base_bench = base_suite.get_benchmark(name)
        changed_bench = changed_suite.get_benchmark(name)
        result = BenchmarkResult(base_bench, changed_bench)
        results.append(result)

    hidden = []
    shown = []
    for result in results:
        name = result.base.get_name()
github python / pyperformance / pyperformance / compile.py View on Github external
def update_metadata(self):
        metadata = {
            'commit_id': self.revision,
            'commit_branch': self.branch,
            'commit_date': self.commit_date.isoformat(),
        }
        if self.patch:
            metadata['patch_file'] = self.patch

        suite = pyperf.BenchmarkSuite.load(self.filename)
        for bench in suite:
            bench.update_metadata(metadata)
        suite.dump(self.filename, replace=True)
github vstinner / pyperf / doc / examples / export_csv.py View on Github external
def main():
    args = parse_args()
    if args.benchmark:
        suite = pyperf.BenchmarkSuite.load(args.json_filename)
        bench = suite.get_benchmark(args.benchmark)
    else:
        bench = pyperf.Benchmark.load(args.json_filename)

    export_csv(args, bench)
github vstinner / pyperf / pyperf / __main__.py View on Github external
def load_benchmark_suite(self, filename):
        suite = pyperf.BenchmarkSuite.load(filename)
        self.suites.append(suite)
github python / pyperformance / pyperformance / run.py View on Github external
def add_bench(dest_suite, obj):
            if isinstance(obj, pyperf.BenchmarkSuite):
                benchmarks = obj
            else:
                benchmarks = (obj,)

            version = pyperformance.__version__
            for bench in benchmarks:
                bench.update_metadata({'performance_version': version})

                if dest_suite is not None:
                    dest_suite.add_benchmark(bench)
                else:
                    dest_suite = pyperf.BenchmarkSuite([bench])

            return dest_suite
github cropsinsilico / yggdrasil / yggdrasil / timing.py View on Github external
data (pyperf.BenchmarkSuite or dict): Data to be saved.
            overwrite (bool, optional): If True, any existing file will be
                overwritten. Defaults to False.

        Raises:
            RuntimeError: If the file already exists and overwrite is False.

        """
        if os.path.isfile(self.filename) and (not overwrite):
            raise RuntimeError("'%s' exists" % self.filename)
        if data is not None:
            if self.dont_use_pyperf:
                with open(self.filename, 'wb') as fd:
                    backwards.pickle.dump(data, fd)
            else:
                if isinstance(data, pyperf.BenchmarkSuite):
                    data.dump(self.filename, replace=overwrite)
                else:
                    with open(self.filename, 'w') as fd:
                        json.dump(data, fd, sort_keys=True,
                                  separators=(',', ':'))
                        fd.write("\n")
github python / pyperformance / pyperformance / run.py View on Github external
def add_bench(dest_suite, obj):
            if isinstance(obj, pyperf.BenchmarkSuite):
                benchmarks = obj
            else:
                benchmarks = (obj,)

            version = pyperformance.__version__
            for bench in benchmarks:
                bench.update_metadata({'performance_version': version})

                if dest_suite is not None:
                    dest_suite.add_benchmark(bench)
                else:
                    dest_suite = pyperf.BenchmarkSuite([bench])

            return dest_suite
github vstinner / pyperf / pyperf / __main__.py View on Github external
def cmd_convert(args):
    suite = pyperf.BenchmarkSuite.load(args.input_filename)

    if args.add:
        suite2 = pyperf.BenchmarkSuite.load(args.add)
        for bench in suite2.get_benchmarks():
            suite._add_benchmark_runs(bench)

    if args.include_benchmarks:
        names = args.include_benchmarks
        try:
            suite._convert_include_benchmark(names)
        except KeyError:
            fatal_missing_benchmarks(suite, names)

    elif args.exclude_benchmarks:
        names = args.exclude_benchmarks
        try:
            suite._convert_exclude_benchmark(names)
        except ValueError:
            fatal_no_more_benchmark(suite)