How to use the casadi.nlpsol function in casadi

To help you get started, we’ve selected a few casadi examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github meco-group / omg-tools / omgtools / __init__.py View on Github external
def assert_ma57():
  import casadi
  import sys

  x = casadi.MX.sym("x")
  nlp = casadi.nlpsol('nlp','ipopt',{"x":x,"f":x**2},{"ipopt": {"linear_solver": "ma57", "print_level":0}, "print_time": False})

  nlp()

  if nlp.stats()["return_status"]=="Invalid_Option":
    print("Could not find ma57 (from hsl library), which is needed for the code to run.")
    sys.exit(0)
github TUMFTM / global_racetrajectory_optimization / opt_mintime_traj / src / opt_mintime.py View on Github external
if pars["optim_opts"]["warm_start"]:
        try:
            w0 = np.loadtxt(os.path.join(export_path, 'w0.csv'))
            lam_x0 = np.loadtxt(os.path.join(export_path, 'lam_x0.csv'))
            lam_g0 = np.loadtxt(os.path.join(export_path, 'lam_g0.csv'))
        except IOError:
            print('\033[91m' + 'WARNING: Failed to load warm start files!' + '\033[0m')
            sys.exit(1)

    # check warm start files
    if pars["optim_opts"]["warm_start"] and not len(w0) == len(lbw):
        print('\033[91m' + 'WARNING: Warm start files do not fit to the dimension of the NLP!' + '\033[0m')
        sys.exit(1)

    # create solver instance
    solver = ca.nlpsol("solver", "ipopt", nlp, opts)

    # ------------------------------------------------------------------------------------------------------------------
    # SOLVE NLP --------------------------------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    # start time measure
    t0 = time.perf_counter()

    # solve NLP
    if pars["optim_opts"]["warm_start"]:
        sol = solver(x0=w0, lbx=lbw, ubx=ubw, lbg=lbg, ubg=ubg, lam_x0=lam_x0, lam_g0=lam_g0)
    else:
        sol = solver(x0=w0, lbx=lbw, ubx=ubw, lbg=lbg, ubg=ubg)

    # end time measure
    tend = time.perf_counter()
github meco-group / omg-tools / omgtools / export / export_p2p.py View on Github external
def create_nlp_src(self, destination):
        obj = self.father.problem_description['obj']
        con = self.father.problem_description['con']
        var = self.father.problem_description['var']
        par = self.father.problem_description['par']
        opt = self.father.problem_description['opt']
        nlp = {'x': var, 'p': par, 'f': obj, 'g': con}
        options = {}
        for key, value in opt['solver_options']['ipopt'].items():
            options[key] = value
        options.update({'expand': True})
        solver = nlpsol('solver', 'ipopt', nlp, options)
        solver.generate_dependencies('nlp.c')
        cwd = os.getcwd()
        shutil.move(cwd+'/nlp.c', destination+'src/nlp.c')
github meco-group / omg-tools / omgtools / basics / optilayer.py View on Github external
def create_nlp(var, par, obj, con, options, name=''):
    codegen = options['codegen']
    if options['verbose'] >= 1:
        print('Building nlp ... ', end=' ')
    t0 = time.time()
    nlp = {'x': var, 'p': par, 'f': obj, 'g': con}
    slv_opt = options['solver_options'][options['solver']]
    opt = {}
    for key, value in slv_opt.items():
        opt[key] = value
    opt.update({'expand': True})
    solver = nlpsol('solver', options['solver'], nlp, opt)
    name = 'nlp' if name == '' else 'nlp_' + name
    if codegen['build'] == 'jit':
        if options['verbose'] >= 1:
            print(('[jit compilation with flags %s]' % (codegen['flags'])), end=' ')
        solver.generate_dependencies(name+'.c')
        compiler = Compiler(
            name+'.c', 'clang', {'flags': codegen['flags']})
        problem = nlpsol('solver', options['solver'], compiler, slv_opt)
        os.remove(name+'.c')
    elif codegen['build'] == 'shared':
        if os.name == 'nt':
            raise ValueError('Build option is not supported for Windows!')
        directory = os.path.join(os.getcwd(), 'build')
        if not os.path.isdir(directory):
            os.makedirs(directory)
        path = os.path.join(directory, name)
github helgeanl / GP-MPC / gp_mpc / mpc_class.py View on Github external
'ipopt.mu_init' : 0.01,
            'ipopt.tol' : 1e-8,
            'ipopt.warm_start_init_point' : 'yes',
            'ipopt.warm_start_bound_push' : 1e-9,
            'ipopt.warm_start_bound_frac' : 1e-9,
            'ipopt.warm_start_slack_bound_frac' : 1e-9,
            'ipopt.warm_start_slack_bound_push' : 1e-9,
            'ipopt.warm_start_mult_bound_push' : 1e-9,
            'ipopt.mu_strategy' : 'adaptive',
            'print_time' : False,
            'verbose' : False,
            'expand' : True
        }
        if solver_opts is not None:
            options.update(solver_opts)
        self.__solver = ca.nlpsol('mpc_solver', 'ipopt', nlp, options)



        # First prediction used in the NLP, used in plot later
        self.__var_prediction = np.zeros((Nt + 1, Ny))
        self.__mean_prediction = np.zeros((Nt + 1, Ny))
        self.__mean = None

        build_solver_time += time.time()
        print('\n________________________________________')
        print('# Time to build mpc solver: %f sec' % build_solver_time)
        print('# Number of variables: %d' % self.__num_var)
        print('# Number of equality constraints: %d' % num_eq_con)
        print('# Number of inequality constraints: %d' % num_ineq_con)
        print('----------------------------------------')
github meco-group / omg-tools / omgtools / problems / gcodeschedulerproblem.py View on Github external
# obj = ((definite_integral(circ,0,1.) - ((segment['shape'].radius_out+segment['shape'].radius_in)*0.5)**2)**2)
        # limit the jerk of the trajectory, to avoid nervous solutions
        obj = definite_integral(s_x.derivative(3)**2,0,1.) + definite_integral(s_y.derivative(3)**2,0,1.)

        # make nlp
        nlp = {'x':X, 'f':obj, 'g':con}
        # set options
        options = {}
        # options['ipopt.linear_solver'] = 'ma57'  # must be installed separately
        options['ipopt.tol'] = 1e-8
        options['ipopt.print_level'] = 0
        options['print_time'] = 0
        options['ipopt.warm_start_init_point'] = 'yes'
        options['ipopt.max_iter'] = 3000
        # create solver
        solver = nlpsol('solver','ipopt', nlp, options)

        # set bounds for constraints
        lbg = np.r_[-np.inf * np.ones(con.size1()-12), np.zeros(12)]
        ubg = np.r_[np.zeros(con.size1()-12), np.zeros(12)]
        # set bounds for variables
        lbx = -np.inf * np.ones(X.size1())
        ubx = np.inf * np.ones(X.size1())

        # create solver input
        solver_input = {}
        solver_input['lbx'] = lbx
        solver_input['ubx'] = ubx
        solver_input['lbg'] = lbg
        solver_input['ubg'] = ubg

        # make initial guess
github helgeanl / GP-MPC / gp_mpc / optimize.py View on Github external
# NLP solver options
    opts = {}
    opts['expand']              = True
    opts['print_time']          = False
    opts['verbose']             = False
    opts['ipopt.print_level']   = 1
    opts['ipopt.tol']          = 1e-8
    opts['ipopt.mu_strategy'] = 'adaptive'
    if optimizer_opts is not None:
        opts.update(optimizer_opts)

    warm_start = False
    if hyper_init is not None:
        opts['ipopt.warm_start_init_point'] = 'yes'
        warm_start = True
    Solver = ca.nlpsol('Solver', 'ipopt', nlp, opts)

    hyp_opt = np.zeros((Ny, num_hyp))
    lam_x_opt = np.zeros((Ny, num_hyp))
    invK = np.zeros((Ny, N, N))
    alpha = np.zeros((Ny, N))
    chol = np.zeros((Ny, N, N))

    print('\n________________________________________')
    print('# Optimizing hyperparameters (N=%d)' % N )
    print('----------------------------------------')
    for output in range(Ny):
        meanF     = np.mean(Y)
        lb        = -np.inf * np.ones(num_hyp)
        ub        = np.inf * np.ones(num_hyp)
#
        lb[:Nx]    = 1e-2