How to use the numdifftools.nd_cstep.Derivative function in numdifftools

To help you get started, we’ve selected a few numdifftools examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pbrod / numdifftools / numdifftools / nd_cstep.py View on Github external
np.put(err, k, terr.flat[k])
    print(val - 2 * np.dot(x.T, x))
    print(err)
    erri = [v.max() for v in errt]

    plt.loglog(epsi[1:-1], erri)
    plt.show('hold')
    hnd = nd.Hessian(lambda a: fun2(a, y, x))
    hessnd = hnd(xk)
    print('numdiff')
    print(hessnd - 2 * np.dot(x.T, x))
    # assert_almost_equal(hessnd, he[0])
    gnd = nd.Gradient(lambda a: fun2(a, y, x))
    _gradnd = gnd(xk)

    print(Derivative(np.cosh)(0))
    print(nd.Derivative(np.cosh)(0))
github pbrod / numdifftools / numdifftools / nd_cstep.py View on Github external
ih = h * _SQRT_J
        return 12.0 * (f(x + ih, *args, **kwargs) +
                       f(x - ih, *args, **kwargs) - 2 * fx).real

    @staticmethod
    def _multicomplex(f, fx, x, h, *args, **kwds):
        z = bicomplex(x + 1j * h, 0)
        return f(z, *args, **kwds).imag

    @staticmethod
    def _multicomplex2(f, fx, x, h, *args, **kwds):
        z = bicomplex(x + 1j * h, h)
        return f(z, *args, **kwds).imag12


class Gradient(Derivative):
    def __init__(self, f, step=None, method='central', order=2,
                 full_output=False):
        super(Gradient, self).__init__(f, step=step, method=method, n=1,
                                       order=order, full_output=full_output)
    __doc__ = _cmn_doc % dict(
        derivative='Gradient',
        extra_parameter="""order : int, optional
        defines the order of the error term in the Taylor approximation used.
        For 'central' and 'complex' methods, it must be an even number.""",
        returns="""
    Returns
    -------
    grad : array
        gradient
    """, extra_note="""
    Higher order approximation methods will generally be more accurate, but may
github pbrod / numdifftools / numdifftools / nd_cstep.py View on Github external
def _example3(x=0.0001, fun_name='cos', epsilon=None, method='central',
              scale=None, n=1, order=2):
    fun0, dfun = get_test_function(fun_name, n)
    if dfun is None:
        return dict(n=n, order=order, method=method, fun=fun_name,
                    error=np.nan, scale=np.nan)
    fd = Derivative(fun0, step=epsilon, method=method, n=n, order=order)
    t = []
    scales = np.arange(1.0, 45, 0.25)
    for scale in scales:
        fd.step.scale = scale
        try:
            val = fd(x)
        except Exception:
            val = np.nan
        t.append(val)
    t = np.array(t)
    tt = dfun(x)
    relativ_error = np.abs(t - tt) / (np.maximum(np.abs(tt), 1)) + 1e-16

    weights = np.ones((3,))/3
    relativ_error = convolve1d(relativ_error, weights)  # smooth curve
github pbrod / numdifftools / numdifftools / nd_cstep.py View on Github external
>>> val = Jfun([1,2,0.75])
    >>> np.allclose(val, np.zeros((10,3)))
    True

    >>> fun2 = lambda x : x[0]*x[1]*x[2] + np.exp(x[0])*x[1]
    >>> Jfun3 = ndc.Jacobian(fun2)
    >>> Jfun3([3.,5.,7.])
    array([ 135.42768462,   41.08553692,   15.        ])
    ''', see_also="""
    See also
    --------
    Derivative, Hessian, Gradient
    """)


class Hessdiag(Derivative):
    def __init__(self, f, step=None, method='central', order=2,
                 full_output=False):
        super(Hessdiag, self).__init__(f, step=step, method=method, n=2,
                                       order=order, full_output=full_output)
    __doc__ = _cmn_doc % dict(
        derivative='Hessian diagonal',
        extra_parameter="""order : int, optional
        defines the order of the error term in the Taylor approximation used.
        For 'central' and 'complex' methods, it must be an even number.""",
        returns="""
    Returns
    -------
    hessdiag : array
        hessian diagonal
    """, extra_note="""
    Higher order approximation methods will generally be more accurate, but may
github pbrod / numdifftools / numdifftools / nd_cstep.py View on Github external
def _example(x=0.0001, fun_name='inv', epsilon=None, method='central',
             scale=None):
    '''
    '''
    fun0, dfun = get_test_function(fun_name)

    h = _default_base_step(x, scale=2, epsilon=None)  # 1e-4

    fd = Derivative(fun0, step=epsilon, method=method, full_output=True)

    t, res = fd(x)

    txt = (' (f(x+h)-f(x))/h = %g\n' %
           ((fun0(x + h) - fun0(x)) / h))
    deltas = np.array([h for h in epsilon(x, fd.scale)])

    print((txt +
           '      true df(x) = %20.15g\n' +
           ' estimated df(x) = %20.15g\n' +
           ' true err = %g\n err estimate = %g\n relative err = %g\n'
           ' delta = %g\n') % (dfun(x), t, dfun(x) - t,
                               res.error_estimate,
                               res.error_estimate / t,
                               deltas.flat[res.index]))
    # plt.show('hold')
github pbrod / numdifftools / numdifftools / nd_cstep.py View on Github external
def _example2(x=0.0001, fun_name='inv', epsilon=None, method='central',
              scale=None, n=1):
    fun0, dfun = get_test_function(fun_name, n)

    fd = Derivative(fun0, step=epsilon, method=method, n=n)
    t = []
    orders = n + (n % 2) + np.arange(0, 12, 2)

    for order in orders:
        fd.order = order
        fd.step.num_steps = n + order - 1
        t.append(fd(x))
    t = np.array(t)
    tt = dfun(x)
    plt.semilogy(orders, np.abs(t - tt) / (np.abs(tt) + 1e-17) + 1e-17)

    plt.show('hold')