How to use numdifftools - 10 common examples

To help you get started, we’ve selected a few numdifftools examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pbrod / numdifftools / numdifftools / nd_cstep.py View on Github external
method = 'complex'
    data = []
    for name in ['exp', 'expm1', 'sin', 'cos', 'square']:
        # function_names[:-3]:
        for order in range(4, 5, 1):
            #  order = 1
            for n in range(1, 16, 1):
                num_steps = n + order - 1 + num_extrap
                if method in ['central', 'complex']:
                    step = 2
                    if (n > 1 or order >= 4) and method == 'complex':
                        step = 4
                    num_steps = (n + order-1) // step + num_extrap

                step_ratio = 1.6  # 4**(1./n)
                epsilon = MinStepGenerator(num_steps=num_steps,
                                           step_ratio=step_ratio,
                                           offset=0, use_exact_steps=True)
                data.append(pd.DataFrame(_example3(x=0.7, fun_name=name,
                                                   epsilon=epsilon,
                                                   method=method,
                                                   scale=None, n=n,
                                                   order=order),
                                         index=np.arange(1)))
    df = pd.concat(data)
    # sprint(df)
    print(df.groupby(['n']).mean())
    print(np.diff(df.groupby(['n']).mean(), axis=0))
    plt.show('hold')
github locuslab / mpc.pytorch / tests / test_mpc.py View on Github external
def f_f(f_flat):
        f_ = f_flat.reshape(T-1, n_batch, n_state)
        return forward_numpy(C, c, x_init, u_lower, u_upper, F, f_)

    def f_x_init(x_init):
        x_init = x_init.reshape(1, -1)
        return forward_numpy(C, c, x_init, u_lower, u_upper, F, f)

    u = forward_numpy(C, c, x_init, u_lower, u_upper, F, f)

    # Make sure the solution is strictly partially on the boundary.
    assert np.any(u == u_lower.reshape(-1)) or np.any(u == u_upper.reshape(-1))
    assert np.any((u != u_lower.reshape(-1)) & (u != u_upper.reshape(-1)))

    du_dc_fd = nd.Jacobian(f_c)(c.reshape(-1))
    du_dF_fd = nd.Jacobian(f_F)(F.reshape(-1))
    du_df_fd = nd.Jacobian(f_f)(f.reshape(-1))
    du_dxinit_fd = nd.Jacobian(f_x_init)(x_init[0])

    _C, _c, _x_init, _u_lower, _u_upper, F, f = [
        Variable(torch.Tensor(x).double(), requires_grad=True)
        if x is not None else None
        for x in [C, c, x_init, u_lower, u_upper, F, f]
    ]

    u_init = None
    x_lqr, u_lqr, objs_lqr = mpc.MPC(
        n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
        lqr_iter=20,
        verbose=1,
    )(_x_init, QuadCost(_C, _c), LinDx(F, f))
github popgenmethods / momi2 / test / test_autograd.py View on Github external
def check_gradient(f, x):
    print(x, "\n", f(x))

    print("# grad2")
    grad2 = Gradient(f)(x)
    print("# building grad1")
    g = grad(f)
    print("# computing grad1")
    grad1 = g(x)

    print("gradient1\n", grad1, "\ngradient2\n", grad2)
    np.allclose(grad1, grad2)

    # check Hessian vector product
    y = np.random.normal(size=x.shape)
    gdot = lambda u: np.dot(g(u), y)
    hess1, hess2 = grad(gdot)(x), Gradient(gdot)(x)
    print("hess1\n", hess1, "\nhess2\n", hess2)
    np.allclose(hess1, hess2)
github oxfordcontrol / Bayesian-Optimization / tests / test_derivatives.py View on Github external
def derivatives_numerical(x, model):
    '''
    Returns the gradient and hessian of the optimal value of
    the SDP with respect to x.
    Beware, the hessian is based on the analytical derivative,
    for accuracy and performance reasons.
    '''
    def opt_val(y):
        return model.acquisition(y)[0]

    def gradient(y):
        return model.acquisition(y)[1]

    gradient_numerical = nd.Gradient(opt_val)(x)
    hessian_numerical = nd.Hessian(opt_val)(x)

    return gradient_numerical, hessian_numerical
github popgenmethods / momi2 / test / test_autograd.py View on Github external
print(x, "\n", f(x))

    print("# grad2")
    grad2 = Gradient(f)(x)
    print("# building grad1")
    g = grad(f)
    print("# computing grad1")
    grad1 = g(x)

    print("gradient1\n", grad1, "\ngradient2\n", grad2)
    np.allclose(grad1, grad2)

    # check Hessian vector product
    y = np.random.normal(size=x.shape)
    gdot = lambda u: np.dot(g(u), y)
    hess1, hess2 = grad(gdot)(x), Gradient(gdot)(x)
    print("hess1\n", hess1, "\nhess2\n", hess2)
    np.allclose(hess1, hess2)
github pymc-devs / pymc3 / tests / test_distributions.py View on Github external
def check_dlogp(model, value, domains): 

    domains = [d[1:-1] for d in domains]
    bij = DictToArrayBijection(ArrayOrdering(model.cont_vars), model.test_point)

    if not model.cont_vars:
        return 

    dlp = model.dlogpc()
    dlogp = bij.mapf(model.dlogpc())
    
    lp = model.logpc
    logp = bij.mapf(model.logpc)
    ndlogp = Gradient(logp)

    for a in its.product(*domains):
        pt = Point(dict( (str(var), val) for var,val in zip(model.vars, a)), model = model)

        pt = bij.map(pt)
github locuslab / mpc.pytorch / tests / test_mpc.py View on Github external
def f_F(F_flat):
        F_ = F_flat.reshape(T-1, n_batch, n_state, n_sc)
        return forward_numpy(C, c, x_init, u_lower, u_upper, F_ ,f)

    def f_f(f_flat):
        f_ = f_flat.reshape(T-1, n_batch, n_state)
        return forward_numpy(C, c, x_init, u_lower, u_upper, F, f_)

    u = forward_numpy(C, c, x_init, u_lower, u_upper, F, f)

    # Make sure the solution is not on the boundary.
    assert np.all(u != u_lower.reshape(-1)) and np.all(u != u_upper.reshape(-1))

    du_dc_fd = nd.Jacobian(f_c)(c.reshape(-1))
    du_dF_fd = nd.Jacobian(f_F)(F.reshape(-1))
    du_df_fd = nd.Jacobian(f_f)(f.reshape(-1))

    _C, _c, _x_init, _u_lower, _u_upper, F, f = [
        Variable(torch.Tensor(x).double(), requires_grad=True)
        if x is not None else None
        for x in [C, c, x_init, u_lower, u_upper, F, f]
    ]

    u_init = None
    x_lqr, u_lqr, objs_lqr = mpc.MPC(
        n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
        lqr_iter=20,
        verbose=1,
        exit_unconverged=False,
    )(_x_init, QuadCost(_C, _c), LinDx(F, f))
    u_lqr = u_lqr.view(-1)
github locuslab / mpc.pytorch / tests / test_mpc.py View on Github external
return forward_numpy(C, c_, x_init, u_lower, u_upper, F, f)

    def f_F(F_flat):
        F_ = F_flat.reshape(T-1, n_batch, n_state, n_sc)
        return forward_numpy(C, c, x_init, u_lower, u_upper, F_ ,f)

    def f_f(f_flat):
        f_ = f_flat.reshape(T-1, n_batch, n_state)
        return forward_numpy(C, c, x_init, u_lower, u_upper, F, f_)

    u = forward_numpy(C, c, x_init, u_lower, u_upper, F, f)

    # Make sure the solution is not on the boundary.
    assert np.all(u != u_lower.reshape(-1)) and np.all(u != u_upper.reshape(-1))

    du_dc_fd = nd.Jacobian(f_c)(c.reshape(-1))
    du_dF_fd = nd.Jacobian(f_F)(F.reshape(-1))
    du_df_fd = nd.Jacobian(f_f)(f.reshape(-1))

    _C, _c, _x_init, _u_lower, _u_upper, F, f = [
        Variable(torch.Tensor(x).double(), requires_grad=True)
        if x is not None else None
        for x in [C, c, x_init, u_lower, u_upper, F, f]
    ]

    u_init = None
    x_lqr, u_lqr, objs_lqr = mpc.MPC(
        n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
        lqr_iter=20,
        verbose=1,
        exit_unconverged=False,
    )(_x_init, QuadCost(_C, _c), LinDx(F, f))
github joschu / rapprentice / test / tps_unit_tests.py View on Github external
def nonrigidity_gradient():
    import numdifftools as ndt
    D = 3
    pts0 = np.random.randn(10,D)
    pts_eval = np.random.randn(3,D)
    def err_partial(params):
        lin_ag = params[0:9].reshape(3,3)
        trans_g = params[9:12]
        w_ng = params[12:].reshape(-1,3)
        return tps.tps_nr_err(pts_eval, lin_ag, trans_g, w_ng, pts0)    
    lin_ag, trans_g, w_ng = np.random.randn(D,D), np.random.randn(D), np.random.randn(len(pts0), D)
    J1 = tps.tps_nr_grad(pts_eval, lin_ag, trans_g, w_ng, pts0)
    J = ndt.Jacobian(err_partial)(np.r_[lin_ag.flatten(), trans_g.flatten(), w_ng.flatten()])
    assert np.allclose(J1, J)
github locuslab / mpc.pytorch / tests / test_mpc.py View on Github external
def f_c(c_flat):
        c_ = c_flat.reshape(T, n_batch, n_sc)
        return forward_numpy(C, c_, x_init, u_lower, u_upper, fc0b)

    def f_fc0b(fc0b):
        return forward_numpy(C, c, x_init, u_lower, u_upper, fc0b)

    u = forward_numpy(C, c, x_init, u_lower, u_upper, fc0b)

    # Make sure the solution is strictly partially on the boundary.
    assert np.any(u == u_lower.reshape(-1)) or np.any(u == u_upper.reshape(-1))
    assert np.any((u != u_lower.reshape(-1)) & (u != u_upper.reshape(-1)))

    du_dc_fd = nd.Jacobian(f_c)(c.reshape(-1))
    du_dfc0b_fd = nd.Jacobian(f_fc0b)(fc0b.reshape(-1))

    dynamics.fcs[0].bias.data = torch.DoubleTensor(fc0b).clone()

    _C, _c, _x_init, _u_lower, _u_upper, fc0b = [
        Variable(torch.Tensor(x).double(), requires_grad=True)
        if x is not None else None
        for x in [C, c, x_init, u_lower, u_upper, fc0b]
    ]

    u_init = None
    x_lqr, u_lqr, objs_lqr = mpc.MPC(
        n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
        lqr_iter=20,
        verbose=-1,
        max_linesearch_iter=1,
        grad_method=GradMethods.ANALYTIC,