How to use petab - 10 common examples

To help you get started, we’ve selected a few petab examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ICB-DCM / pyABC / test / petab / test_petab.py View on Github external
benchmark_dir = "doc/examples/tmp/benchmark-models-petab"
    if not os.path.exists(benchmark_dir):
        git.Repo.clone_from(
            "https://github.com/benchmarking-initiative"
            "/benchmark-models-petab.git",
            benchmark_dir, depth=1)
    g = git.Git(benchmark_dir)

    # update repo if online
    try:
        g.pull()
    except git.exc.GitCommandError:
        pass

    # create problem
    petab_problem = petab.Problem.from_yaml(os.path.join(
        benchmark_dir, "Benchmark-Models",
        "Boehm_JProteomeRes2014", "Boehm_JProteomeRes2014.yaml"))

    # compile amici
    model = amici.petab_import.import_petab_problem(petab_problem)
    solver = model.getSolver()

    # import to pyabc
    importer = pyabc.petab.AmiciPetabImporter(petab_problem, model, solver)

    # extract required objects
    prior = importer.create_prior()
    model = importer.create_model()
    kernel = importer.create_kernel()

    # call model
github ICB-DCM / pyPESTO / test / test_amici_objective.py View on Github external
def test_preeq_guesses():
    """
    Test whether optimization with preequilibration guesses works, asserts
    that steadystate guesses are written and checks that gradient is still
    correct with guesses set.
    """
    petab_problem = petab.Problem.from_yaml(
        folder_base + "Zheng_PNAS2012/Zheng_PNAS2012.yaml")
    petab_problem.model_name = "Zheng_PNAS2012"
    importer = pypesto.PetabImporter(petab_problem)
    obj = importer.create_objective()
    problem = importer.create_problem(obj)

    # assert that initial guess is uninformative
    assert problem.objective.steadystate_guesses['fval'] == np.inf

    optimizer = pypesto.ScipyOptimizer('L-BFGS-B', options={'maxiter': 50})
    result = pypesto.minimize(
        problem=problem, optimizer=optimizer, n_starts=1,
    )

    assert problem.objective.steadystate_guesses['fval'] < np.inf
    assert len(obj.steadystate_guesses['data']) == 1
github ICB-DCM / pyPESTO / test / test_amici_objective.py View on Github external
def test_error_leastsquares_with_ssigma():
    petab_problem = petab.Problem.from_yaml(
        folder_base + "Zheng_PNAS2012/Zheng_PNAS2012.yaml")
    petab_problem.model_name = "Zheng_PNAS2012"
    importer = pypesto.PetabImporter(petab_problem)
    obj = importer.create_objective()
    problem = importer.create_problem(obj)

    optimizer = pypesto.ScipyOptimizer('ls_trf', options={'max_nfev': 50})
    with pytest.raises(RuntimeError):
        pypesto.minimize(
            problem=problem, optimizer=optimizer, n_starts=1,
            options=pypesto.OptimizeOptions(allow_failed_starts=False)
        )
github ICB-DCM / pyPESTO / test / test_model_selection.py View on Github external
def test_pipeline_forward():
    petab_problem = petab.Problem.from_yaml(EXAMPLE_YAML)

    selector = ModelSelector(petab_problem, EXAMPLE_MODELS)
    model_list = [model for model in selector.model_generator()]
    
    selected_models, _, selection_history = selector.select('forward', 'AIC')
    assert models_compared_with(INITIAL_VIRTUAL_MODEL, selection_history) == \
        {'M5_0', 'M6_0', 'M7_0'}
    assert models_compared_with('M6_0', selection_history) == \
        {'M3_0', 'M4_0'}

    selected_models, local_selection_history, selection_history = \
        selector.select('forward', 'AIC')
    # includes models compared to `INITIAL_VIRTUAL_MODEL` in first run, as
    # `selection_history` includes them (they were not retested)
    assert models_compared_with(INITIAL_VIRTUAL_MODEL, selection_history) == \
        {'M5_0', 'M6_0', 'M7_0', 'M2_0'}
github ICB-DCM / pyPESTO / test / test_modelselection.py View on Github external
def test_row2problem_yaml_string(yaml_file_example):
    petab_problem = petab.Problem.from_yaml(yaml_file_example)

    importer = PetabImporter(petab_problem)
    obj = importer.create_objective()
    pypesto_problem = importer.create_problem(obj)

    row = pd.Series()
    assert row2problem(row, yaml_file_example) == pypesto_problem
github ICB-DCM / pyABC / test / petab / test_petab_suite.py View on Github external
solution = petabtests.load_solution(case)
    gt_chi2 = solution[petabtests.CHI2]
    gt_llh = solution[petabtests.LLH]
    gt_simulation_dfs = solution[petabtests.SIMULATION_DFS]
    tol_chi2 = solution[petabtests.TOL_CHI2]
    tol_llh = solution[petabtests.TOL_LLH]
    tol_simulations = solution[petabtests.TOL_SIMULATIONS]

    # unique folder for compiled amici model
    output_folder = f'amici_models/model_{case}'

    # import petab problem
    yaml_file = os.path.join(case_dir, petabtests.problem_yaml_name(case))

    # create problem
    petab_problem = petab.Problem.from_yaml(yaml_file)

    # compile amici
    amici_model = amici.petab_import.import_petab_problem(
        petab_problem=petab_problem,
        model_output_dir=output_folder)
    solver = amici_model.getSolver()

    # import to pyabc
    importer = pyabc.petab.AmiciPetabImporter(
        petab_problem, amici_model, solver)
    model = importer.create_model(return_rdatas=True)

    # simulate
    problem_parameters = petab_problem.x_nominal_free_scaled
    ret = model(problem_parameters)
github ICB-DCM / pyPESTO / test / test_petab_import.py View on Github external
def test_0_import(self):
        for model_name in ["Zheng_PNAS2012", "Boehm_JProteomeRes2014"]:
            petab_problem = petab.Problem.from_folder(
                folder_base + model_name)
            self.petab_problems.append(petab_problem)
github ICB-DCM / pyABC / test / petab / test_petab_suite.py View on Github external
importer = pyabc.petab.AmiciPetabImporter(
        petab_problem, amici_model, solver)
    model = importer.create_model(return_rdatas=True)

    # simulate
    problem_parameters = petab_problem.x_nominal_free_scaled
    ret = model(problem_parameters)

    llh = ret['llh']

    # extract results
    rdatas = ret['rdatas']
    chi2 = sum(rdata['chi2'] for rdata in rdatas)
    simulation_df = amici.petab_objective.rdatas_to_measurement_df(
        rdatas, amici_model, importer.petab_problem.measurement_df)
    petab.check_measurement_df(
        simulation_df, importer.petab_problem.observable_df)
    simulation_df = simulation_df.rename(
        columns={petab.MEASUREMENT: petab.SIMULATION})
    simulation_df[petab.TIME] = simulation_df[petab.TIME].astype(int)

    # check if matches
    chi2s_match = petabtests.evaluate_chi2(chi2, gt_chi2, tol_chi2)
    llhs_match = petabtests.evaluate_llh(llh, gt_llh, tol_llh)
    simulations_match = petabtests.evaluate_simulations(
        [simulation_df], gt_simulation_dfs, tol_simulations)

    # log matches
    logger.log(logging.INFO if chi2s_match else logging.ERROR,
               f"CHI2: simulated: {chi2}, expected: {gt_chi2},"
               f" match = {chi2s_match}")
    logger.log(logging.INFO if simulations_match else logging.ERROR,
github ICB-DCM / pyPESTO / test / test_petab_suite.py View on Github external
model = importer.create_model()
    obj = importer.create_objective(model=model)

    # the scaled parameters
    problem_parameters = importer.petab_problem.x_nominal_scaled

    # simulate
    ret = obj(problem_parameters, sensi_orders=(0,), return_dict=True)

    # extract results
    rdatas = ret['rdatas']
    chi2 = sum(rdata['chi2'] for rdata in rdatas)
    llh = - ret['fval']
    simulation_df = amici.petab_objective.rdatas_to_measurement_df(
        rdatas, model, importer.petab_problem.measurement_df)
    petab.check_measurement_df(
        simulation_df, importer.petab_problem.observable_df)
    simulation_df = simulation_df.rename(
        columns={petab.MEASUREMENT: petab.SIMULATION})
    simulation_df[petab.TIME] = simulation_df[petab.TIME].astype(int)

    # check if matches
    chi2s_match = petabtests.evaluate_chi2(chi2, gt_chi2, tol_chi2)
    llhs_match = petabtests.evaluate_llh(llh, gt_llh, tol_llh)
    simulations_match = petabtests.evaluate_simulations(
        [simulation_df], gt_simulation_dfs, tol_simulations)

    # log matches
    logger.log(logging.INFO if chi2s_match else logging.ERROR,
               f"CHI2: simulated: {chi2}, expected: {gt_chi2},"
               f" match = {chi2s_match}")
    logger.log(logging.INFO if simulations_match else logging.ERROR,
github ICB-DCM / pyABC / pyabc / petab / base.py View on Github external
Returns
        -------
        prior:
            A valid pyabc.Distribution for the parameters to estimate.
        """
        # add default values
        parameter_df = petab.normalize_parameter_df(
            self.petab_problem.parameter_df)

        prior_dct = {}

        # iterate over parameters
        for _, row in parameter_df.reset_index().iterrows():
            # check whether we can ignore
            if not self.fixed_parameters and row[petab.C.ESTIMATE] == 0:
                # ignore fixed parameters
                continue
            if not self.free_parameters and row[petab.C.ESTIMATE] == 1:
                # ignore free parameters
                continue

            # pyabc currently only knows objective priors, no
            #  initialization priors
            prior_type = row[petab.C.OBJECTIVE_PRIOR_TYPE]
            pars_str = row[petab.C.OBJECTIVE_PRIOR_PARAMETERS]
            prior_pars = tuple(float(val) for val in pars_str.split(';'))

            # create random variable from table entry
            if prior_type in [petab.C.PARAMETER_SCALE_UNIFORM,
                              petab.C.UNIFORM]:
                lb, ub = prior_pars