How to use the pyperf.Benchmark function in pyperf

To help you get started, we’ve selected a few pyperf examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github vstinner / pyperf / doc / examples / plot.py View on Github external
def main():
    args = parse_args()
    if args.benchmark:
        suite = pyperf.BenchmarkSuite.load(args.filename)
        bench = suite.get_benchmark(args.benchmark)
    else:
        bench = pyperf.Benchmark.load(args.filename)
    plot_bench(args, bench)
github vstinner / pyperf / doc / examples / hist_scipy.py View on Github external
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-n', '--bins', type=int, default=25,
                        help="Number of histogram bars (default: 25)")
    parser.add_argument('--mean', action="store_true",
                        help="Use mean-stdev, instead of median-mad")
    parser.add_argument('-b', '--benchmark')
    parser.add_argument('filename')
    args = parser.parse_args()

    if args.benchmark:
        suite = pyperf.BenchmarkSuite.load(args.filename)
        bench = suite.get_benchmark(args.benchmark)
    else:
        bench = pyperf.Benchmark.load(args.filename)

    display_histogram_scipy(bench, args.mean, args.bins)
github vstinner / pyperf / doc / examples / export_csv.py View on Github external
def main():
    args = parse_args()
    if args.benchmark:
        suite = pyperf.BenchmarkSuite.load(args.json_filename)
        bench = suite.get_benchmark(args.benchmark)
    else:
        bench = pyperf.Benchmark.load(args.json_filename)

    export_csv(args, bench)
github yt-project / unyt / benchmarks / bench.py View on Github external
def make_plot(extension):
    ratios = OrderedDict()
    stddevs = OrderedDict()
    benchmarks = OrderedDict()
    np_bench = pyperf.Benchmark.load(open("{}_{}".format("numpy", extension), "r"))
    np_mean = np_bench.mean()
    np_stddev = np_bench.stdev()
    for package in setup:
        if package == "numpy":
            continue
        benchmarks[package] = pyperf.Benchmark.load(
            open("{}_{}".format(package, extension), "r")
        )
        mean = benchmarks[package].mean()
        stddev = benchmarks[package].stdev()
        ratios[package] = mean / np_mean
        stddevs[package] = ratios[package] * np.sqrt(
            (np_stddev / np_mean) ** 2 + (stddev / mean) ** 2
        )
    fig, ax = plt.subplots()
    packages = list(ratios.keys())
github vstinner / pyperf / pyperf / _collect_metadata.py View on Github external
else:
        cpus = get_isolated_cpus()
        if cpus:
            set_cpu_affinity(cpus)
            # ignore if set_cpu_affinity() failed

    run = pyperf.Run([1.0])
    metadata = run.get_metadata()
    if metadata:
        print("Metadata:")
        for line in format_metadata(metadata):
            print(line)

    if filename:
        run = run._update_metadata({'name': 'metadata'})
        bench = pyperf.Benchmark([run])
        bench.dump(filename)
github vstinner / pyperf / pyperf / _runner.py View on Github external
def _worker(self, task):
        self._cpu_affinity()
        self._process_priority()
        run = task.create_run()
        bench = pyperf.Benchmark((run,))
        self._display_result(bench, checks=False)
        return bench