How to use the lmfit.Minimizer function in lmfit

To help you get started, we’ve selected a few lmfit examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github lmfit / lmfit-py / tests / test_algebraic_constraint2.py View on Github external
pfit = Parameters()
    pfit.add(name='amp_g',  value=10)
    pfit.add(name='cen_g',  value=9)
    pfit.add(name='wid_g',  value=1)

    pfit.add(name='amp_tot',  value=20)
    pfit.add(name='amp_l',  expr='amp_tot - amp_g')
    pfit.add(name='cen_l',  expr='1.5+cen_g')
    pfit.add(name='wid_l',  expr='2*wid_g')

    pfit.add(name='line_slope', value=0.0)
    pfit.add(name='line_off', value=0.0)

    sigma = 0.021  # estimate of data error (for all data points)

    myfit = Minimizer(residual, pfit,
                      fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
                      scale_covar=True)

    myfit.prepare_fit()
    init = residual(myfit.params, x)

    result = myfit.leastsq()

    print(' Nfev = ', result.nfev)
    print( result.chisqr, result.redchi, result.nfree)

    report_fit(result.params, min_correl=0.3)

    fit = residual(result.params, x)
    assert(result.params['cen_l'].value == 1.5 + result.params['cen_g'].value)
    assert(result.params['amp_l'].value == result.params['amp_tot'].value - result.params['amp_g'].value)
github lmfit / lmfit-py / tests / test_brute.py View on Github external
def test_brute_lower_bounds_and_brute_step(params_lmfit):
    """TEST 6: using finite lower bounds, Ns=15, and brute_step specified."""
    Ns = 15
    params_lmfit['y'].set(max=np.inf)
    params_lmfit['y'].set(brute_step=0.1)

    mini = lmfit.Minimizer(func_lmfit, params_lmfit)
    out = mini.minimize(method='brute', Ns=Ns)

    grid_x_expected = np.linspace(params_lmfit['x'].min,
                                  params_lmfit['x'].max, Ns)
    grid_x = np.unique([par.ravel() for par in out.brute_grid][0])
    assert_allclose(grid_x, grid_x_expected)

    grid_y_expected = np.linspace(params_lmfit['y'].min,
                                  params_lmfit['y'].min +
                                  Ns*params_lmfit['y'].brute_step, Ns, False)
    grid_y = np.unique([par.ravel() for par in out.brute_grid][1])
    assert_allclose(grid_y, grid_y_expected)
github lmfit / lmfit-py / tests / test_brute.py View on Github external
def test_brute_one_parameter(params_lmfit):
    """TEST 9: only one varying parameter."""
    params_lmfit['x'].set(vary=False)
    mini = lmfit.Minimizer(func_lmfit, params_lmfit)
    out = mini.minimize(method='brute')
    assert out.candidates[0].score <= out.candidates[1].score
    assert isinstance(out.candidates[0], lmfit.minimizer.Candidate)
    assert isinstance(out.candidates[0].params, lmfit.Parameters)
    assert isinstance(out.candidates[0].score, np.float64)
github lmfit / lmfit-py / tests / test_confidence.py View on Github external
def test_confidence_2d_limits(data, pars):
    """Test the 2D confidence interval calculation using limits."""
    minimizer = lmfit.Minimizer(residual, pars, fcn_args=data)
    out = minimizer.minimize(method='leastsq')

    lim = ((1.0e-6, 0.02), (1.0e-6, -4.0))
    cx, cy, grid = lmfit.conf_interval2d(minimizer, out, 'a', 'b', limits=lim)
    assert grid.shape == (10, 10)
    assert_allclose(min(cx.ravel()), 1.0e-6)
    assert_allclose(max(cx.ravel()), 0.02)
    assert_allclose(min(cy.ravel()), -4.0)
    assert_allclose(max(cy.ravel()), 1.0e-6)
github lmfit / lmfit-py / tests / test_brute_method.py View on Github external
assert_equal(resbrute[3], resbrute_lmfit.brute_Jout, verbose=True) # function values on grid identical
    assert_equal(resbrute[0][0], resbrute_lmfit.brute_x0[0], verbose=True) # best fit x value identical
    assert_equal(resbrute[0][0], resbrute_lmfit.params['x'].value, verbose=True) # best fit x value stored correctly
    assert_equal(resbrute[0][1], resbrute_lmfit.brute_x0[1], verbose=True) # best fit y value identical
    assert_equal(resbrute[0][1], resbrute_lmfit.params['y'].value, verbose=True) # best fit y value stored correctly
    assert_equal(resbrute[1], resbrute_lmfit.brute_fval, verbose=True) # best fit function value identical
    assert_equal(resbrute[1], resbrute_lmfit.residual, verbose=True) # best fit function value stored correctly

    # TEST 2: using bounds, setting Ns=40 and no stepsize specified
    assert(not params_lmfit['x'].brute_step)  # brute_step for x == None
    assert(not params_lmfit['y'].brute_step)  # brute_step for y == None

    rranges = ((-4, 4), (-2, 2))
    resbrute = optimize.brute(f, rranges, args=params, full_output=True, Ns=40,
                              finish=None)
    fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
    resbrute_lmfit = fitter.minimize(method='brute', Ns=40)

    assert_equal(resbrute[2], resbrute_lmfit.brute_grid, verbose=True) # grid identical
    assert_equal(resbrute[3], resbrute_lmfit.brute_Jout, verbose=True) # function values on grid identical
    assert_equal(resbrute[0][0], resbrute_lmfit.params['x'].value, verbose=True) # best fit x value identical
    assert_equal(resbrute[0][1], resbrute_lmfit.params['y'].value, verbose=True) # best fit y value identical
    assert_equal(resbrute[1], resbrute_lmfit.residual, verbose=True) # best fit function value identical

    # TEST 3: using bounds and specifing stepsize for both parameters
    params_lmfit['x'].set(brute_step=0.25)
    params_lmfit['y'].set(brute_step=0.25)
    assert_equal(params_lmfit['x'].brute_step, 0.25 ,verbose=True)
    assert_equal(params_lmfit['y'].brute_step, 0.25 ,verbose=True)

    rranges = (slice(-4, 4, 0.25), slice(-2, 2, 0.25))
    resbrute = optimize.brute(f, rranges, args=params, full_output=True, Ns=20,
github lmfit / lmfit-py / tests / test_least_squares.py View on Github external
return model - data

    # generate synthetic data
    np.random.seed(0)
    x = np.linspace(0.0, 250.0, 1500)
    noise = np.random.normal(scale=2.80, size=x.size)
    data = residual(p_true, x) + noise

    # create Parameters and set initial values and bounds
    fit_params = lmfit.Parameters()
    fit_params.add('amp', value=13.0, min=0.0, max=20)
    fit_params.add('period', value=2, max=10)
    fit_params.add('shift', value=0.0, min=-np.pi/2., max=np.pi/2.)
    fit_params.add('decay', value=0.02, min=0.0, max=0.10)

    mini = lmfit.Minimizer(residual, fit_params, fcn_args=(x, data))
    out = mini.minimize(method='least_squares')

    assert out.method == 'least_squares'
    assert out.nfev > 10
    assert out.nfree > 50
    assert out.chisqr > 1.0
    assert out.errorbars
    assert out.success
    assert_allclose(out.params['decay'], p_true['decay'], rtol=1e-2)
    assert_allclose(out.params['shift'], p_true['shift'], rtol=1e-2)
github lmfit / lmfit-py / tests / test_basinhopping.py View on Github external
return np.cos(14.5*x - 0.3) + (x+0.2) * x

    minimizer_kwargs = {'method': 'L-BFGS-B'}
    x0 = [1.]

    ret = basinhopping(func, x0, minimizer_kwargs=minimizer_kwargs, seed=7)

    # lmfit
    def residual(params):
        x = params['x'].value
        return np.cos(14.5*x - 0.3) + (x+0.2) * x

    pars = lmfit.Parameters()
    pars.add_many(('x', 1.))
    kws = {'minimizer_kwargs': {'method': 'L-BFGS-B'}, 'seed': 7}
    mini = lmfit.Minimizer(residual, pars)
    out = mini.minimize(method='basinhopping', **kws)

    assert_allclose(out.residual, ret.fun)
    assert_allclose(out.params['x'].value, ret.x, rtol=1e-5)
github lmfit / lmfit-py / examples / peakfit_1.py View on Github external
data  = residual(fit_params, x) + noise

if HASPYLAB:
    pylab.plot(x, data, 'r+')

fit_params = Parameters()
fit_params.add_many(('a1',  8.0, True, None, 14., None),
                    ('c1',  5.0, True, None, None, None),
                    ('w1',  0.7, True, None, None, None),
                    ('a2',  3.1, True, None, None, None),
                    ('c2',  8.8, True, None, None, None))

fit_params.add('w2', expr='2.5*w1')

myfit = Minimizer(residual, fit_params,
                  fcn_args=(x,), fcn_kws={'data':data})

myfit.prepare_fit()

init = residual(fit_params, x)

if HASPYLAB:
    pylab.plot(x, init, 'b--')

myfit.leastsq()

print ' N fev = ', myfit.nfev
print myfit.chisqr, myfit.redchi, myfit.nfree

report_fit(fit_params)
github bgamari / photon-tools / global_fit.py View on Github external
import lmfit

class Curve(object):
    """
    This represents a curve/model pair within a GlobalFit.
    """
    def __init__(self, name, data, model, weights=None):
        self.data = data
        self.model = model
        self.name = name
        self.weights = weights

    def _residual(self, params, **kwargs):
        return self.model._residual(params, self.data, self.weights, **kwargs)

class GlobalFit(lmfit.Minimizer):
    """
    This represents a fit over a number of curves, each against its
    own model. These models can potentially share parameters (known as
    "tying" of the parameters).

    The parameters object accepted by global fits contain two types of
    parameters: per-curve parameters are parameters which correspond
    to a parameter of a curve, and global parameters which tie
    together several per-curve parameters. Per-curve parameters have
    the name of their curve prepended to their name to ensure
    uniqueness.
    """

    def __init__(self, curves={}, method='leastsq'):
        self.method = method
        self.curves = curves
github lmfit / lmfit-py / examples / lmfit_and_emcee.py View on Github external
x = np.linspace(0, 20, 350)
a, b, t1, t2 = 2, 3, 2, 10  #Real values
y_true = a * np.exp(-x/t1) + b * np.exp(-x/t2)
sigma = 0.02
y = y_true + np.random.randn(x.size)*sigma

def residuals(paras):
    a = paras['a'].value
    b = paras['b'].value
    t1 = paras['t1'].value
    t2 = paras['t2'].value
    return a * np.exp(-x/t1) + b * np.exp(-x/t2) - y

#Fit the data with lmfit.
mini = lmfit.Minimizer(residuals, params)
result = mini.leastsq()
lmfit.report_errors(result.params)

#create lnfunc and starting distribution.
lnfunc, guess = create_all(result)
nwalkers, ndim = 30, len(guess)
p0 = emcee.utils.sample_ball(guess, 0.1*np.array(guess), nwalkers)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnfunc)
steps = 500
sampler.run_mcmc(p0, steps)

if HASPYLAB:
    fig, axes = plt.subplots(5, 1, sharex=True, figsize=(8, 9))
    for (i, name, rv) in zip(range(5), list(params.keys()) + ['sigma'], [a, b, t1, t2, sigma]):
        axes[i].plot(sampler.chain[:, :, i].T, color="k", alpha=0.05)
        axes[i].yaxis.set_major_locator(plt.MaxNLocator(5))