How to use the pycalphad.Model function in pycalphad

To help you get started, we’ve selected a few pycalphad examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PhasesResearchLab / ESPEI / tests / test_gdq.py View on Github external
def test_get_data_quantities_mixing_entropy():
    """Test that mixing entropy produces correct data quantities.

    """
    data = [{'components': ['AL', 'CR'], 'phases': ['AL11CR2'], 'solver': {'mode': 'manual', 'sublattice_site_ratios': [10.0, 2.0], 'sublattice_configurations': (('AL', ('AL', 'CR')),), 'sublattice_occupancies': [[1.0, [0.5, 0.5]]]}, 'conditions': {'P': 101325.0, 'T': np.array([300.])}, 'output': 'SM_MIX', 'values': np.array([[[0.60605556]]]), 'reference': 'text 1 to write down reference for this work', 'comment': 'test 2 to write down comment for this work', 'excluded_model_contributions': ['idmix']}]

    dbf = Database("""
    ELEMENT AL FCC_A1 26.982 4577.3 28.322 !
    ELEMENT CR BCC_A2 51.996 4050.0 23.56 !

    PHASE AL11CR2 %  2 10.0 2.0 !
    CONSTITUENT AL11CR2 :AL:AL,CR: !

    """)
    mod = Model(dbf, ['AL', 'CR'], 'AL11CR2')
    print(get_samples(data))
    # desired_property, fixed_model, fixed_portions, data, samples
    qty = get_data_quantities('SM_MIX', mod, [0], data)
    print(qty)
    assert np.all(np.isclose([7.27266667], qty))
github PhasesResearchLab / ESPEI / tests / test_parameter_generation_utils.py View on Github external
def test_get_data_quantities_mixing_entropy():
    """Test that mixing entropy produces correct data quantities with excluded idmix model contribution
    """
    data = [{'components': ['AL', 'CR'], 'phases': ['AL11CR2'], 'solver': {'mode': 'manual', 'sublattice_site_ratios': [10.0, 2.0], 'sublattice_configurations': (('AL', ('AL', 'CR')),), 'sublattice_occupancies': [[1.0, [0.5, 0.5]]]}, 'conditions': {'P': 101325.0, 'T': np.array([300.])}, 'output': 'SM_MIX', 'values': np.array([[[0.60605556]]]), 'reference': 'text 1 to write down reference for this work', 'comment': 'test 2 to write down comment for this work', 'excluded_model_contributions': ['idmix']}]

    dbf = Database("""
    ELEMENT AL FCC_A1 26.982 4577.3 28.322 !
    ELEMENT CR BCC_A2 51.996 4050.0 23.56 !

    PHASE AL11CR2 %  2 10.0 2.0 !
    CONSTITUENT AL11CR2 :AL:AL,CR: !

    """)
    mod = Model(dbf, ['AL', 'CR'], 'AL11CR2')
    print(get_samples(data))
    # desired_property, fixed_model, fixed_portions, data, samples
    qty = get_data_quantities('SM_MIX', mod, [0], data)
    print(qty)
    assert np.all(np.isclose([7.27266667], qty))
github PhasesResearchLab / ESPEI / espei / plot.py View on Github external
Examples
    --------
    >>> # plot the LAVES_C15 (Cu)(Mg) endmember
    >>> plot_parameters(dbf, ['CU', 'MG'], 'LAVES_C15', ('CU', 'MG'), symmetry=None, datasets=datasets)  # doctest: +SKIP
    >>> # plot the mixing interaction in the first sublattice
    >>> plot_parameters(dbf, ['CU', 'MG'], 'LAVES_C15', (('CU', 'MG'), 'MG'), symmetry=None, datasets=datasets)  # doctest: +SKIP

    """
    em_plots = [('T', 'CPM'), ('T', 'CPM_FORM'), ('T', 'SM'), ('T', 'SM_FORM'),
                ('T', 'HM'), ('T', 'HM_FORM')]
    mix_plots = [ ('Z', 'HM_MIX'), ('Z', 'SM_MIX')]
    comps = sorted(comps)
    mod = Model(dbf, comps, phase_name)
    mod.models['idmix'] = 0
    # This is for computing properties of formation
    mod_norefstate = Model(dbf, comps, phase_name, parameters={'GHSER'+(c.upper()*2)[:2]: 0 for c in comps})
    # Is this an interaction parameter or endmember?
    if any([isinstance(conf, list) or isinstance(conf, tuple) for conf in configuration]):
        plots = mix_plots
    else:
        plots = em_plots

    # filter which parameters to plot by the data that exists
    if require_data and datasets is not None:
        filtered_plots = []
        for x_val, y_val in plots:
            desired_props = [y_val.split('_')[0]+'_FORM', y_val] if y_val.endswith('_MIX') else [y_val]
            data = get_data(comps, phase_name, configuration, symmetry, datasets, desired_props)
            if len(data) > 0:
                filtered_plots.append((x_val, y_val, data))
    elif require_data:
        raise ValueError('Plots require datasets, but no datasets were passed.')
github PhasesResearchLab / ESPEI / espei / paramselect.py View on Github external
# dict of {feature, [candidate_models]}
    candidate_models_features = build_candidate_models(configuration, features)

    # All possible parameter values that could be taken on. This is some legacy
    # code from before there were many candidate models built. For very large
    # sets of candidate models, this could be quite slow.
    # TODO: we might be able to remove this initialization for clarity, depends on fixed poritions
    parameters = {}
    for candidate_models in candidate_models_features.values():
        for model in candidate_models:
            for coef in model:
                parameters[coef] = 0

    # These is our previously fit partial model from previous steps
    # Subtract out all of these contributions (zero out reference state because these are formation properties)
    fixed_model = Model(dbf, comps, phase_name, parameters={'GHSER'+(c.upper()*2)[:2]: 0 for c in comps})
    fixed_portions = [0]

    for desired_props in fitting_steps:
        feature_type = desired_props[0].split('_')[0]  # HM_FORM -> HM
        aicc_factor = aicc_feature_factors.get(feature_type, 1.0)
        desired_data = get_data(comps, phase_name, configuration, symmetry, datasets, desired_props)
        logging.log(TRACE, '{}: datasets found: {}'.format(desired_props, len(desired_data)))
        if len(desired_data) > 0:
            # Ravelled weights for all data
            weights = get_weights(desired_data)

            # We assume all properties in the same fitting step have the same
            # features (all CPM, all HM, etc., but different ref states).
            # data quantities are the same for each candidate model and can be computed up front
            data_qtys = get_data_quantities(feature_type, fixed_model, fixed_portions, desired_data)
github PhasesResearchLab / ESPEI / espei / error_functions / equilibrium_thermochemical_error.py View on Github external
from pycalphad.plot.eqplot import _map_coord_to_variable
from pycalphad import Database, Model, ReferenceState, variables as v
from pycalphad.core.equilibrium import _eqcalculate
from pycalphad.codegen.callables import build_phase_records
from pycalphad.core.utils import instantiate_models, filter_phases, extract_parameters, unpack_components, unpack_condition
from pycalphad.core.phase_rec import PhaseRecord

from espei.utils import PickleableTinyDB
from espei.shadow_functions import equilibrium_, calculate_, no_op_equilibrium_, update_phase_record_parameters

EqPropData = NamedTuple('EqPropData', (('dbf', Database),
                                       ('species', Sequence[v.Species]),
                                       ('phases', Sequence[str]),
                                       ('potential_conds', Dict[v.StateVariable, float]),
                                       ('composition_conds', Sequence[Dict[v.X, float]]),
                                       ('models', Dict[str, Model]),
                                       ('params_keys', Dict[str, float]),
                                       ('phase_records', Sequence[Dict[str, PhaseRecord]]),
                                       ('output', str),
                                       ('samples', np.ndarray),
                                       ('weight', np.ndarray),
                                       ('reference', str),
                                       ))


def build_eqpropdata(data: tinydb.database.Document,
                     dbf: Database,
                     parameters: Optional[Dict[str, float]] = None,
                     data_weight_dict: Optional[Dict[str, float]] = None
                     ) -> EqPropData:
    """
    Build EqPropData for the calculations corresponding to a single dataset.
github pycalphad / pycalphad / pycalphad / residuals.py View on Github external
Returns
    -------
    (Dictionary of fit key:values), (lmfit minimize result)

    Examples
    --------
    None yet.
    """
    if 'maxfev' not in kwargs:
        kwargs['maxfev'] = 100
    fit_params = lmfit.Parameters()
    for guess_name, guess_value in guess.items():
        fit_params.add(guess_name, value=guess_value)
    param_names = fit_params.valuesdict().keys()
    fit_models = {name: Model(dbf, comps, name) for name in phases}
    fit_variables = dict()
    for name, mod in fit_models.items():
        fit_variables[name] = sorted(mod.energy.atoms(v.StateVariable).union({v.T, v.P}), key=str)
        # Extra factor '1e-100...' is to work around an annoying broadcasting bug for zero gradient entries
        fit_models[name].models['_broadcaster'] = 1e-100 * sympy.Mul(*fit_variables[name]) ** 3

    callables = {name: make_callable(mod.ast,
                                     itertools.chain(param_names, fit_variables[name]))
                 for name, mod in fit_models.items()}
    grad_callables = {name: make_callable(sympy.Matrix([mod.ast]).jacobian(fit_variables[name]),
                                          itertools.chain(param_names, fit_variables[name]))
                      for name, mod in fit_models.items()}

    #out = leastsq(residual_equilibrium, param_values,
    #              args=(data, dbf, comps, fit_models, callables),
    #              full_output=True, **kwargs)
github pycalphad / pycalphad / pycalphad / core / compiled_model.py View on Github external
There are other solutions that involve retaining the asymmetry for
        physical reasons, but this solution works well for components that
        are physically similar.

        This procedure is based on an analysis by Hillert, 1980,
        published in the Calphad journal.
        """
        arity = len(comps)
        return_dict = {}
        correction_term = (S.One - Add(*comps)) / arity
        for comp in comps:
            return_dict[comp] = comp + correction_term
        return return_dict


class CompiledModel(Model):
    def __init__(self, dbe, comps, phase_name, parameters=None):
        super(CompiledModel, self).__init__(dbe, comps, phase_name, parameters=parameters)
    def eval_energy(self, pressure, temperature, dof, out):
        pass
    def eval_energy_ideal(self, pressure, temperature, dof, out):
        pass
    def eval_gradient_energy(self, pressure, temperature, dof, out):
        pass
    def eval_gradient_energy_ideal(self, pressure, temperature, dof, out):
        pass
    def eval_hessian_energy(self, pressure, temperature, dof, out):
        pass
    def eval_hessian_energy_ideal(self, pressure, temperature, dof, out):
        pass
github PhasesResearchLab / ESPEI / espei / error_functions / zpf_error.py View on Github external
else:
            raise ValueError("Wrong number of data in tie-line point")
        region_phases.append(phase_name)
        region_comp_conds.append(dict(zip(map(v.X, map(lambda x: x.upper(), components)), compositions)))
        phase_flags.append(flag)
    return region_phases, region_comp_conds, phase_flags


PhaseRegion = NamedTuple('PhaseRegion', (('region_phases', Sequence[str]),
                                         ('potential_conds', Dict[v.StateVariable, float]),
                                         ('comp_conds', Sequence[Dict[v.X, float]]),
                                         ('phase_flags', Sequence[str]),
                                         ('dbf', Database),
                                         ('species', Sequence[v.Species]),
                                         ('phases', Sequence[str]),
                                         ('models', Dict[str, Model]),
                                         ('phase_records', Sequence[Dict[str, PhaseRecord]]),
                                         ))


def get_zpf_data(dbf: Database, comps: Sequence[str], phases: Sequence[str], datasets: PickleableTinyDB, parameters: Dict[str, float]):
    """
    Return the ZPF data used in the calculation of ZPF error

    Parameters
    ----------
    comps : list
        List of active component names
    phases : list
        List of phases to consider
    datasets : espei.utils.PickleableTinyDB
        Datasets that contain single phase data
github PhasesResearchLab / ESPEI / espei / plot.py View on Github external
# Because it's disordered all sublattices should be equivalent
                # TODO: Fix this to filter because we need to guarantee the plot points are disordered
                occ = data['solver']['sublattice_occupancies']
                subl_idx = np.nonzero([isinstance(c, (list, tuple)) for c in occ[0]])[0]
                if len(subl_idx) > 1:
                    subl_idx = int(subl_idx[0])
                else:
                    subl_idx = int(subl_idx)
                indep_var_data = [c[subl_idx][1] for c in occ]
            else:
                interactions = np.array([i[1][1] for i in get_samples([data])], dtype=np.float)
                indep_var_data = 1 - (interactions+1)/2
            if y.endswith('_MIX') and data['output'].endswith('_FORM'):
                # All the _FORM data we have still has the lattice stability contribution
                # Need to zero it out to shift formation data to mixing
                mod_latticeonly = Model(dbf, comps, phase_name, parameters={'GHSER'+c.upper(): 0 for c in comps})
                mod_latticeonly.models = {key: value for key, value in mod_latticeonly.models.items()
                                          if key == 'ref'}
                temps = data['conditions'].get('T', 300)
                pressures = data['conditions'].get('P', 101325)
                points = build_sitefractions(phase_name, data['solver']['sublattice_configurations'],
                                             data['solver']['sublattice_occupancies'])
                for point_idx in range(len(points)):
                    missing_variables = mod_latticeonly.ast.atoms(v.SiteFraction) - set(points[point_idx].keys())
                    # Set unoccupied values to zero
                    points[point_idx].update({key: 0 for key in missing_variables})
                    # Change entry to a sorted array of site fractions
                    points[point_idx] = list(OrderedDict(sorted(points[point_idx].items(), key=str)).values())
                points = np.array(points, dtype=np.float)
                # TODO: Real temperature support
                points = points[None, None]
                stability = calculate(dbf, comps, [phase_name], output=data['output'][:-5],