How to use the numdifftools.Jacobian function in numdifftools

To help you get started, we’ve selected a few numdifftools examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github locuslab / mpc.pytorch / tests / test_mpc.py View on Github external
def f_f(f_flat):
        f_ = f_flat.reshape(T-1, n_batch, n_state)
        return forward_numpy(C, c, x_init, u_lower, u_upper, F, f_)

    def f_x_init(x_init):
        x_init = x_init.reshape(1, -1)
        return forward_numpy(C, c, x_init, u_lower, u_upper, F, f)

    u = forward_numpy(C, c, x_init, u_lower, u_upper, F, f)

    # Make sure the solution is strictly partially on the boundary.
    assert np.any(u == u_lower.reshape(-1)) or np.any(u == u_upper.reshape(-1))
    assert np.any((u != u_lower.reshape(-1)) & (u != u_upper.reshape(-1)))

    du_dc_fd = nd.Jacobian(f_c)(c.reshape(-1))
    du_dF_fd = nd.Jacobian(f_F)(F.reshape(-1))
    du_df_fd = nd.Jacobian(f_f)(f.reshape(-1))
    du_dxinit_fd = nd.Jacobian(f_x_init)(x_init[0])

    _C, _c, _x_init, _u_lower, _u_upper, F, f = [
        Variable(torch.Tensor(x).double(), requires_grad=True)
        if x is not None else None
        for x in [C, c, x_init, u_lower, u_upper, F, f]
    ]

    u_init = None
    x_lqr, u_lqr, objs_lqr = mpc.MPC(
        n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
        lqr_iter=20,
        verbose=1,
    )(_x_init, QuadCost(_C, _c), LinDx(F, f))
github locuslab / mpc.pytorch / tests / test_mpc.py View on Github external
def f_F(F_flat):
        F_ = F_flat.reshape(T-1, n_batch, n_state, n_sc)
        return forward_numpy(C, c, x_init, u_lower, u_upper, F_ ,f)

    def f_f(f_flat):
        f_ = f_flat.reshape(T-1, n_batch, n_state)
        return forward_numpy(C, c, x_init, u_lower, u_upper, F, f_)

    u = forward_numpy(C, c, x_init, u_lower, u_upper, F, f)

    # Make sure the solution is not on the boundary.
    assert np.all(u != u_lower.reshape(-1)) and np.all(u != u_upper.reshape(-1))

    du_dc_fd = nd.Jacobian(f_c)(c.reshape(-1))
    du_dF_fd = nd.Jacobian(f_F)(F.reshape(-1))
    du_df_fd = nd.Jacobian(f_f)(f.reshape(-1))

    _C, _c, _x_init, _u_lower, _u_upper, F, f = [
        Variable(torch.Tensor(x).double(), requires_grad=True)
        if x is not None else None
        for x in [C, c, x_init, u_lower, u_upper, F, f]
    ]

    u_init = None
    x_lqr, u_lqr, objs_lqr = mpc.MPC(
        n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
        lqr_iter=20,
        verbose=1,
        exit_unconverged=False,
    )(_x_init, QuadCost(_C, _c), LinDx(F, f))
    u_lqr = u_lqr.view(-1)
github locuslab / mpc.pytorch / tests / test_mpc.py View on Github external
return forward_numpy(C, c_, x_init, u_lower, u_upper, F, f)

    def f_F(F_flat):
        F_ = F_flat.reshape(T-1, n_batch, n_state, n_sc)
        return forward_numpy(C, c, x_init, u_lower, u_upper, F_ ,f)

    def f_f(f_flat):
        f_ = f_flat.reshape(T-1, n_batch, n_state)
        return forward_numpy(C, c, x_init, u_lower, u_upper, F, f_)

    u = forward_numpy(C, c, x_init, u_lower, u_upper, F, f)

    # Make sure the solution is not on the boundary.
    assert np.all(u != u_lower.reshape(-1)) and np.all(u != u_upper.reshape(-1))

    du_dc_fd = nd.Jacobian(f_c)(c.reshape(-1))
    du_dF_fd = nd.Jacobian(f_F)(F.reshape(-1))
    du_df_fd = nd.Jacobian(f_f)(f.reshape(-1))

    _C, _c, _x_init, _u_lower, _u_upper, F, f = [
        Variable(torch.Tensor(x).double(), requires_grad=True)
        if x is not None else None
        for x in [C, c, x_init, u_lower, u_upper, F, f]
    ]

    u_init = None
    x_lqr, u_lqr, objs_lqr = mpc.MPC(
        n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
        lqr_iter=20,
        verbose=1,
        exit_unconverged=False,
    )(_x_init, QuadCost(_C, _c), LinDx(F, f))
github joschu / rapprentice / test / tps_unit_tests.py View on Github external
def nonrigidity_gradient():
    import numdifftools as ndt
    D = 3
    pts0 = np.random.randn(10,D)
    pts_eval = np.random.randn(3,D)
    def err_partial(params):
        lin_ag = params[0:9].reshape(3,3)
        trans_g = params[9:12]
        w_ng = params[12:].reshape(-1,3)
        return tps.tps_nr_err(pts_eval, lin_ag, trans_g, w_ng, pts0)    
    lin_ag, trans_g, w_ng = np.random.randn(D,D), np.random.randn(D), np.random.randn(len(pts0), D)
    J1 = tps.tps_nr_grad(pts_eval, lin_ag, trans_g, w_ng, pts0)
    J = ndt.Jacobian(err_partial)(np.r_[lin_ag.flatten(), trans_g.flatten(), w_ng.flatten()])
    assert np.allclose(J1, J)
github locuslab / mpc.pytorch / tests / test_mpc.py View on Github external
def f_c(c_flat):
        c_ = c_flat.reshape(T, n_batch, n_sc)
        return forward_numpy(C, c_, x_init, u_lower, u_upper, fc0b)

    def f_fc0b(fc0b):
        return forward_numpy(C, c, x_init, u_lower, u_upper, fc0b)

    u = forward_numpy(C, c, x_init, u_lower, u_upper, fc0b)

    # Make sure the solution is strictly partially on the boundary.
    assert np.any(u == u_lower.reshape(-1)) or np.any(u == u_upper.reshape(-1))
    assert np.any((u != u_lower.reshape(-1)) & (u != u_upper.reshape(-1)))

    du_dc_fd = nd.Jacobian(f_c)(c.reshape(-1))
    du_dfc0b_fd = nd.Jacobian(f_fc0b)(fc0b.reshape(-1))

    dynamics.fcs[0].bias.data = torch.DoubleTensor(fc0b).clone()

    _C, _c, _x_init, _u_lower, _u_upper, fc0b = [
        Variable(torch.Tensor(x).double(), requires_grad=True)
        if x is not None else None
        for x in [C, c, x_init, u_lower, u_upper, fc0b]
    ]

    u_init = None
    x_lqr, u_lqr, objs_lqr = mpc.MPC(
        n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
        lqr_iter=20,
        verbose=-1,
        max_linesearch_iter=1,
        grad_method=GradMethods.ANALYTIC,
github ethz-asl / kalibr / aslam_nonparametric_estimation / bsplines / interp_rotation / jacobians.py View on Github external
p = math.sqrt(dot(phi.T,phi))
    a = phi/p
    sp2 = math.sin(0.5 * p)
    sp = math.sin(p)
    ax = asrl.crossMx(a)
    px = asrl.crossMx(phi)
    return eye(3) - (2/p) * sp2 * sp2 * ax + (1/p)*(p - sp)*np.dot(ax,ax)

Jfun4 = nd.Jacobian( lambda d: qexp( d ) )


def dqinv(dq,q1):
    dq1 = qdot(qexp(dq),q1)
    return qinv(dq1)

Jfun5 = nd.Jacobian( lambda d: dqinv(d, q1) )

# These equations show what happens when you substitute
# quat --> Euler Rodriguez for qlog
# and
# Euler Rodriquez --> quat for qexp.
#
# The equations are *MUCH NICER*
#
# Wow, are they ever nice.
def qlog2(q):
    return qeps(q)/qeta(q)
    

def qexp2(aa):
    phi = 2*math.atan(np.linalg.norm(aa))
    eta = math.cos(0.5 * phi)
github pbrod / numdifftools / src / numdifftools / run_benchmark.py View on Github external
hessian_fun = 'Hessian'

    if nda is not None:
        nda_method = 'forward'
        nda_txt = 'algopy_' + nda_method
        gradient_funs[nda_txt] = nda.Jacobian(1, method=nda_method)

        hessian_funs[nda_txt] = getattr(nda, hessian_fun)(1, method=nda_method)
    ndc_hessian = getattr(nd, hessian_fun)

    order = 2
    for method in ['forward', 'central', 'complex']:
        method2 = method + adaptiv_txt
        options = dict(method=method, order=order)
        gradient_funs[method] = nd.Jacobian(1, step=fixed_step, **options)
        gradient_funs[method2] = nd.Jacobian(1, step=epsilon, **options)
        hessian_funs[method] = ndc_hessian(1, step=fixed_step, **options)
        hessian_funs[method2] = ndc_hessian(1, step=epsilon, **options)

    hessian_funs['forward_statsmodels'] = nds.Hessian(1, method='forward')
    hessian_funs['central_statsmodels'] = nds.Hessian(1, method='central')
    hessian_funs['complex_statsmodels'] = nds.Hessian(1, method='complex')

    gradient_funs['forward_statsmodels'] = nds.Jacobian(1, method='forward')
    gradient_funs['central_statsmodels'] = nds.Jacobian(1, method='central')
    gradient_funs['complex_statsmodels'] = nds.Jacobian(1, method='complex')
    gradient_funs['forward_scipy'] = nsc.Jacobian(1, method='forward')
    gradient_funs['central_scipy'] = nsc.Jacobian(1, method='central')
    gradient_funs['complex_scipy'] = nsc.Jacobian(1, method='complex')

    run_gradient_and_hessian_benchmarks(gradient_funs, hessian_funs, problem_sizes)
github dit / dit / dit / algorithms / optimization.py View on Github external
x0 = self._optima.copy()
        count = (x0 < cutoff).sum()
        x0[x0 < cutoff] = 0

        minimizer_kwargs = {'bounds': [(0, 0) if np.isclose(x, 0) else (0, 1) for x in x0],
                            'tol': None,
                            'callback': None,
                            'constraints': self.constraints,
                            }

        try:  # pragma: no cover
            if callable(self._jacobian):
                minimizer_kwargs['jac'] = self._jacobian
            else:  # compute jacobians for objective, constraints using numdifftools
                import numdifftools as ndt
                minimizer_kwargs['jac'] = ndt.Jacobian(self.objective)
                for const in minimizer_kwargs['constraints']:
                    const['jac'] = ndt.Jacobian(const['fun'])
        except AttributeError:
            pass

        res = minimize(fun=self.objective,
                       x0=x0,
                       **minimizer_kwargs
                       )

        if res.success:
            self._optima = res.x.copy()

            if count < (res.x < cutoff).sum():
                self._polish(cutoff=cutoff)
github ethz-asl / kalibr / aslam_nonparametric_estimation / bsplines / interp_rotation / jacobians.py View on Github external
#
# Wow, are they ever nice.
def qlog2(q):
    return qeps(q)/qeta(q)
    

def qexp2(aa):
    phi = 2*math.atan(np.linalg.norm(aa))
    eta = math.cos(0.5 * phi)
    q = np.hstack([aa*eta,eta])
    return q

def invS2(p):
    return  ( dot(p,p.T) + eye(3) + asrl.crossMx(p) )

Jfun3 = nd.Jacobian( lambda d: qlog2( qdot(qexp2(d),q1) ) )