How to use the pycalphad.Database function in pycalphad

To help you get started, we’ve selected a few pycalphad examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PhasesResearchLab / ESPEI / tests / test_parameter_generation.py View on Github external
def test_initial_database_can_be_supplied(datasets_db):
    """Initial Databases can be passed to parameter generation"""
    initial_dbf = Database(CR_FE_INITIAL_TDB_CONTRIBUTIONS)
    assert len(initial_dbf._parameters.all()) == 11
    dbf = generate_parameters(CR_FE_PHASE_MODELS, datasets_db, 'SGTE91', 'linear', dbf=initial_dbf)
    assert len(dbf._parameters.all()) == 13  # 11 initial parameters + 2 generated endmember parameters
github PhasesResearchLab / ESPEI / tests / test_parameter_generation.py View on Github external
def test_model_contributions_can_be_excluded_mixed_datasets(datasets_db):
    """Model contributions excluded in the datasets should not be fit and should still work when different types of datasets are mixed"""
    datasets_db.insert(CR_FE_HM_MIX_EXCLUDED_MAG)
    datasets_db.insert(CR_FE_HM_MIX_WITH_MAG)
    dbf = generate_parameters(CR_FE_PHASE_MODELS, datasets_db, 'SGTE91', 'linear', dbf=Database(CR_FE_INITIAL_TDB_CONTRIBUTIONS))
    assert dbf.symbols['VV0000'] == 40000  # 4 mol-atom/mol-form * 10000 J/mol-atom, verified with no initial Database
github PhasesResearchLab / ESPEI / tests / test_error_functions.py View on Github external
def test_subsystem_activity_probability(datasets_db):
    """Test binary Cr-Ni data produces the same probability regardless of whether the main system is a binary or ternary."""

    datasets_db.insert(CR_NI_ACTIVITY)

    dbf_bin = Database(CR_NI_TDB)
    dbf_tern = Database(CR_FE_NI_TDB)
    phases = list(dbf_tern.phases.keys())

    # Truth
    bin_prob = calculate_activity_error(dbf_bin, ['CR','NI','VA'], phases, datasets_db, {}, {}, {})

    # Getting binary subsystem data explictly (from binary input)
    prob = calculate_activity_error(dbf_tern, ['CR','NI','VA'], phases, datasets_db, {}, {}, {})
    assert np.isclose(prob, bin_prob)

    # Getting binary subsystem from ternary input
    prob = calculate_activity_error(dbf_tern, ['CR', 'FE', 'NI', 'VA'], phases, datasets_db, {}, {}, {})
    assert np.isclose(prob, bin_prob)
github PhasesResearchLab / ESPEI / tests / test_parameter_generation_utils.py View on Github external
One possible issue is that the new GM in the fixed model does not have any
    individual contributions, so it cannot be used to test excluded model
    contributions. The only excluded model contributions in this data are
    idmix, but the property we are testing is HM_FORM, so the feature transform
    of the idmix property should be zero.

    """
    # Hack the namespace to make the copy-pasted Gibbs energy function work
    from sympy import log, Piecewise
    T = v.T

    data = [{'components': ['AL', 'NI', 'VA'], 'phases': ['BCC_B2'], 'solver': {'mode': 'manual', 'sublattice_occupancies': [[1.0, [0.5, 0.5], 1.0], [1.0, [0.75, 0.25], 1.0]], 'sublattice_site_ratios': [0.5, 0.5, 1.0], 'sublattice_configurations': (('AL', ('NI', 'VA'), 'VA'), ('AL', ('NI', 'VA'), 'VA')), 'comment': 'BCC_B2 sublattice configuration (2SL)'}, 'conditions': {'P': 101325.0, 'T': np.array([300.])}, 'reference_state': 'SGTE91', 'output': 'HM_FORM', 'values': np.array([[[-40316.61077, -56361.58554]]]), 'reference': 'C. Jiang 2009 (constrained SQS)', 'excluded_model_contributions': ['idmix']}, {'components': ['AL', 'NI', 'VA'], 'phases': ['BCC_B2'], 'solver': {'mode': 'manual', 'sublattice_occupancies': [[1.0, [0.5, 0.5], 1.0], [1.0, [0.75, 0.25], 1.0]], 'sublattice_site_ratios': [0.5, 0.5, 1.0], 'sublattice_configurations': (('AL', ('NI', 'VA'), 'VA'), ('AL', ('NI', 'VA'), 'VA')), 'comment': 'BCC_B2 sublattice configuration (2SL)'}, 'conditions': {'P': 101325.0, 'T': np.array([300.])}, 'reference_state': 'SGTE91', 'output': 'HM_FORM', 'values': np.array([[[-41921.43363, -57769.49473]]]), 'reference': 'C. Jiang 2009 (relaxed SQS)', 'excluded_model_contributions': ['idmix']}]
    NEW_GM = 8.3145*T*(0.5*Piecewise((v.SiteFraction("BCC_B2", 0, "AL")*log(v.SiteFraction("BCC_B2", 0, "AL")), v.SiteFraction("BCC_B2", 0, "AL") > 1.0e-16), (0, True))/(0.5*v.SiteFraction("BCC_B2", 0, "AL") + 0.5*v.SiteFraction("BCC_B2", 0, "NI") + 0.5*v.SiteFraction("BCC_B2", 1, "AL") + 0.5*v.SiteFraction("BCC_B2", 1, "NI")) + 0.5*Piecewise((v.SiteFraction("BCC_B2", 0, "NI")*log(v.SiteFraction("BCC_B2", 0, "NI")), v.SiteFraction("BCC_B2", 0, "NI") > 1.0e-16), (0, True))/(0.5*v.SiteFraction("BCC_B2", 0, "AL") + 0.5*v.SiteFraction("BCC_B2", 0, "NI") + 0.5*v.SiteFraction("BCC_B2", 1, "AL") + 0.5*v.SiteFraction("BCC_B2", 1, "NI")) + 0.5*Piecewise((v.SiteFraction("BCC_B2", 0, "VA")*log(v.SiteFraction("BCC_B2", 0, "VA")), v.SiteFraction("BCC_B2", 0, "VA") > 1.0e-16), (0, True))/(0.5*v.SiteFraction("BCC_B2", 0, "AL") + 0.5*v.SiteFraction("BCC_B2", 0, "NI") + 0.5*v.SiteFraction("BCC_B2", 1, "AL") + 0.5*v.SiteFraction("BCC_B2", 1, "NI")) + 0.5*Piecewise((v.SiteFraction("BCC_B2", 1, "AL")*log(v.SiteFraction("BCC_B2", 1, "AL")), v.SiteFraction("BCC_B2", 1, "AL") > 1.0e-16), (0, True))/(0.5*v.SiteFraction("BCC_B2", 0, "AL") + 0.5*v.SiteFraction("BCC_B2", 0, "NI") + 0.5*v.SiteFraction("BCC_B2", 1, "AL") + 0.5*v.SiteFraction("BCC_B2", 1, "NI")) + 0.5*Piecewise((v.SiteFraction("BCC_B2", 1, "NI")*log(v.SiteFraction("BCC_B2", 1, "NI")), v.SiteFraction("BCC_B2", 1, "NI") > 1.0e-16), (0, True))/(0.5*v.SiteFraction("BCC_B2", 0, "AL") + 0.5*v.SiteFraction("BCC_B2", 0, "NI") + 0.5*v.SiteFraction("BCC_B2", 1, "AL") + 0.5*v.SiteFraction("BCC_B2", 1, "NI")) + 0.5*Piecewise((v.SiteFraction("BCC_B2", 1, "VA")*log(v.SiteFraction("BCC_B2", 1, "VA")), v.SiteFraction("BCC_B2", 1, "VA") > 1.0e-16), (0, True))/(0.5*v.SiteFraction("BCC_B2", 0, "AL") + 0.5*v.SiteFraction("BCC_B2", 0, "NI") + 0.5*v.SiteFraction("BCC_B2", 1, "AL") + 0.5*v.SiteFraction("BCC_B2", 1, "NI")) + Piecewise((v.SiteFraction("BCC_B2", 2, "VA")*log(v.SiteFraction("BCC_B2", 2, "VA")), v.SiteFraction("BCC_B2", 2, "VA") > 1.0e-16), (0, True))/(0.5*v.SiteFraction("BCC_B2", 0, "AL") + 0.5*v.SiteFraction("BCC_B2", 0, "NI") + 0.5*v.SiteFraction("BCC_B2", 1, "AL") + 0.5*v.SiteFraction("BCC_B2", 1, "NI"))) + (45262.9*v.SiteFraction("BCC_B2", 0, "AL")*v.SiteFraction("BCC_B2", 0, "NI")*v.SiteFraction("BCC_B2", 1, "AL")*v.SiteFraction("BCC_B2", 2, "VA") + 45262.9*v.SiteFraction("BCC_B2", 0, "AL")*v.SiteFraction("BCC_B2", 1, "AL")*v.SiteFraction("BCC_B2", 1, "NI")*v.SiteFraction("BCC_B2", 2, "VA"))/(0.5*v.SiteFraction("BCC_B2", 0, "AL") + 0.5*v.SiteFraction("BCC_B2", 0, "NI") + 0.5*v.SiteFraction("BCC_B2", 1, "AL") + 0.5*v.SiteFraction("BCC_B2", 1, "NI")) + (1.0*v.SiteFraction("BCC_B2", 0, "AL")*v.SiteFraction("BCC_B2", 1, "AL")*v.SiteFraction("BCC_B2", 2, "VA")*Piecewise((10083 - 4.813*T, (T >= 298.15) & (T < 2900.0)), (0, True)) + v.SiteFraction("BCC_B2", 0, "AL")*v.SiteFraction("BCC_B2", 1, "NI")*v.SiteFraction("BCC_B2", 2, "VA")*(9.52839e-8*T**3 + 0.00123463*T**2 + 0.000871898*T*log(T) + 1.31471*T - 64435.3 + 23095.2/T) + v.SiteFraction("BCC_B2", 0, "AL")*v.SiteFraction("BCC_B2", 1, "VA")*v.SiteFraction("BCC_B2", 2, "VA")*(10.0*T + 16432.5) + v.SiteFraction("BCC_B2", 0, "NI")*v.SiteFraction("BCC_B2", 1, "AL")*v.SiteFraction("BCC_B2", 2, "VA")*(9.52839e-8*T**3 + 0.00123463*T**2 + 0.000871898*T*log(T) + 1.31471*T - 64435.3 + 23095.2/T) + 1.0*v.SiteFraction("BCC_B2", 0, "NI")*v.SiteFraction("BCC_B2", 1, "NI")*v.SiteFraction("BCC_B2", 2, "VA")*Piecewise((8715.084 - 3.556*T, (T >= 298.15) & (T < 3000.0)), (0, True)) + 32790.6*v.SiteFraction("BCC_B2", 0, "NI")*v.SiteFraction("BCC_B2", 1, "VA")*v.SiteFraction("BCC_B2", 2, "VA") + v.SiteFraction("BCC_B2", 0, "VA")*v.SiteFraction("BCC_B2", 1, "AL")*v.SiteFraction("BCC_B2", 2, "VA")*(10.0*T + 16432.5) + 32790.6*v.SiteFraction("BCC_B2", 0, "VA")*v.SiteFraction("BCC_B2", 1, "NI")*v.SiteFraction("BCC_B2", 2, "VA"))/(0.5*v.SiteFraction("BCC_B2", 0, "AL") + 0.5*v.SiteFraction("BCC_B2", 0, "NI") + 0.5*v.SiteFraction("BCC_B2", 1, "AL") + 0.5*v.SiteFraction("BCC_B2", 1, "NI"))

    dbf = Database("""$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
    $ Date: 2019-12-08 18:05
    $ Components: AL, NI, VA
    $ Phases: BCC_B2
    $ Generated by brandon (pycalphad 0.8.1.post1)
    $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$

    ELEMENT AL FCC_A1 26.982 4577.3 28.322 !
    ELEMENT NI FCC_A1 58.69 4787.0 29.796 !
    ELEMENT VA VACUUM 0.0 0.0 0.0 !

    TYPE_DEFINITION % SEQ * !
    DEFINE_SYSTEM_DEFAULT ELEMENT 2 !
    DEFAULT_COMMAND DEFINE_SYSTEM_ELEMENT VA !

    PHASE BCC_B2 %  3 0.5 0.5 1 !
    CONSTITUENT BCC_B2 :AL,NI,VA:AL,NI,VA:VA: !
github PhasesResearchLab / ESPEI / espei / espei_script.py View on Github external
bokeh_server_info = client.scheduler_info()['services']['bokeh']
                logging.info("bokeh server for dask scheduler at localhost:{}".format(bokeh_server_info))
            except KeyError:
                logging.info("Install bokeh to use the dask bokeh server.")
        elif mcmc_settings['scheduler'] == 'None' or mcmc_settings['scheduler'] is None:
            client = None
            logging.info("Not using a parallel scheduler. ESPEI is running MCMC on a single core.")
        else: # we were passed a scheduler file name
            _raise_dask_work_stealing()  # check for work-stealing
            client = ImmediateClient(scheduler_file=mcmc_settings['scheduler'])
            client.run(logging.basicConfig, level=verbosity[output_settings['verbosity']], filename=output_settings['logfile'])
            logging.info("Running with dask scheduler: %s [%s cores]" % (client.scheduler, sum(client.ncores().values())))

        # get a Database
        if mcmc_settings.get('input_db'):
            dbf = Database(mcmc_settings.get('input_db'))

        # load the restart trace if needed
        if mcmc_settings.get('restart_trace'):
            restart_trace = np.load(mcmc_settings.get('restart_trace'))
        else:
            restart_trace = None

        # load the remaining mcmc fitting parameters
        iterations = mcmc_settings.get('iterations')
        save_interval = mcmc_settings.get('save_interval')
        chains_per_parameter = mcmc_settings.get('chains_per_parameter')
        chain_std_deviation = mcmc_settings.get('chain_std_deviation')
        deterministic = mcmc_settings.get('deterministic')
        prior = mcmc_settings.get('prior')
        data_weights = mcmc_settings.get('data_weights')
        syms = mcmc_settings.get('symbols')
github PhasesResearchLab / ESPEI / espei / espei_script.py View on Github external
logging.warning('No datasets were found in the path {}. This should be a directory containing dataset files ending in `.json`.'.format(dataset_path))
    apply_tags(datasets, system_settings.get('tags', dict()))
    add_ideal_exclusions(datasets)
    logging.log(TRACE, 'Finished checking datasets')

    with open(system_settings['phase_models']) as fp:
        phase_models = json.load(fp)

    if generate_parameters_settings is not None:
        refdata = generate_parameters_settings['ref_state']
        excess_model = generate_parameters_settings['excess_model']
        ridge_alpha = generate_parameters_settings['ridge_alpha']
        aicc_penalty = generate_parameters_settings['aicc_penalty_factor']
        input_dbf = generate_parameters_settings.get('input_db', None)
        if input_dbf is not None:
            input_dbf = Database(input_dbf)
        dbf = generate_parameters(phase_models, datasets, refdata, excess_model,
                                  ridge_alpha=ridge_alpha, dbf=input_dbf,
                                  aicc_penalty_factor=aicc_penalty,)
        dbf.to_file(output_settings['output_db'], if_exists='overwrite')

    if mcmc_settings is not None:
        tracefile = output_settings['tracefile']
        probfile = output_settings['probfile']
        # Set trace and prob files to None if specified by the user.
        if tracefile == 'None':
          tracefile = None
        if probfile == 'None':
          probfile = None
        # check that the MCMC output files do not already exist
        # only matters if we are actually running MCMC
        if tracefile is not None and os.path.exists(tracefile):
github PhasesResearchLab / ESPEI / espei / unary_fitting / thermal_vacancies.py View on Github external
L0_A, L0_B = np.matmul(np.linalg.pinv(A), b)

# L1
b = L1_grid.reshape((num_T, 1))
L1_A, L1_B = np.matmul(np.linalg.pinv(A), b)

# TODO (Usman): add code to put these parameters in the database,
# dbf.add_parameter()

#### END thermal vacancies function

# generate_thermal_vacancies_tdb

# this code needs phase_models to run
from pycalphad import Database
dbf = Database()
dbf.elements = set(phase_models['components'])
for phase_name, phase_obj in sorted(phase_models['phases'].items(), key=operator.itemgetter(0)):
    # Perform parameter selection and single-phase fitting based on input
    # TODO: More advanced phase data searching
    site_ratios = phase_obj['sublattice_site_ratios']
    subl_model = phase_obj['sublattice_model']
    dbf.add_phase(phase_name, dict(), site_ratios)
    dbf.add_phase_constituents(phase_name, subl_model)
    dbf.add_structure_entry(phase_name, phase_name)
    # modifies the database in place
    phase_fit_thermal_vacancies(dbf, phase_name, subl_model, site_ratios, datasets)
github PhasesResearchLab / ESPEI / run.py View on Github external
lower_hpd, upper_hpd = utils.hpd(trace, 0.05)
            row = {
                'Parameter': parname,
                'Mean': trace.mean(0),
                'SD': trace.std(0),
                'Lower 95% HPD': lower_hpd,
                'Upper 95% HPD': upper_hpd,
                'MC error': batchsd(trace, min(len(trace), 100)),
                'q2.5': q2d5, 'q25': q25, 'q50': q50, 'q75': q75, 'q97.5': q975
            }
            writer.writerow(row)
    output_files.append(str(os.path.join(parameters['sumatra_label'], 'parameters.csv')))
    # Generate comparison figures
    os.makedirs(os.path.join(image_path, 'results'))
    input_database = Database(parameters['input_database'])
    compare_databases = {key: Database(value) for key, value in parameters['compare_databases'].items()}
    idx = 1
    for fig in plot_results(input_database, datasets, data_dict, databases=compare_databases):
        fig.savefig(str(os.path.join(image_path, 'results', 'Figure{}.png'.format(idx))))
        output_files.append(str(os.path.join(parameters['sumatra_label'], 'results', 'Figure{}.png'.format(idx))))
        plt.close(fig)
        idx += 1