How to use the nlp.model.linemodel.C1LineModel function in nlp

To help you get started, we’ve selected a few nlp examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PythonOptimizers / NLP.py / tests / model / test_linemodel.py View on Github external
def c1boundedrosenbrock_restriction_feas(request):
    nvar = request.param
    # The original model has bounds 0 ≤ x ≤ 1.
    # We choose an x inside the bounds and a random d.
    return C1LineModel(BoundedRosenbrock(nvar, np.zeros(nvar), np.ones(nvar)),
                       np.random.random(nvar),
                       np.random.random(nvar) - 0.5)
github PythonOptimizers / NLP.py / tests / ls / test_wolfe.py View on Github external
def rosenbrock_wolfe_ascent(request):
    model = Rosenbrock(request.param)
    x = np.zeros(request.param)
    g = model.grad(x)
    c1model = C1LineModel(model, x, g)  # ascent direction!
    return c1model
github PythonOptimizers / NLP.py / tests / model / test_linemodel.py View on Github external
def c1boundedrosenbrock_restriction_infeas(request):
    nvar = request.param
    # The original model has bounds 0 ≤ x ≤ 1.
    # We choose an x outside the bounds and  d.
    x = np.zeros(nvar)
    x[0] = 2
    return C1LineModel(BoundedRosenbrock(nvar, np.zeros(nvar), np.ones(nvar)),
                       x,
                       np.ones(nvar))
github PythonOptimizers / NLP.py / tests / ls / test_wolfe.py View on Github external
def rosenbrock_wolfe(request):
    model = Rosenbrock(request.param)
    x = np.zeros(request.param)
    g = model.grad(x)
    c1model = C1LineModel(model, x, -g)  # steepest descent direction
    return StrongWolfeLineSearch(c1model)
github PythonOptimizers / NLP.py / nlp / optimize / regsqp / new_regsqp.py View on Github external
def emergency_backtrack(self, x, y, dx, dy, Fnorm0, Fnorm_ext):
        u"""Backtrack on ‖F(w)‖."""
        model = self.model
        goal = self.theta * Fnorm0 + self.epsilon

        self.log.warning("Starting emergency backtrack with goal: %6.2e",
                         goal)

        fnorm_model = FnormModel(model, prox=0, penalty=0)
        w = np.concatenate((x, y))
        d = np.concatenate((dx, dy))

        line_model = C1LineModel(fnorm_model, w, d)
        ls = SimpleBacktrackingLineSearch(line_model, decr=1.75,
                                          value=Fnorm0, trial_value=Fnorm_ext,
                                          goal=goal)

        try:
            for step in ls:
                self.log.debug(ls_fmt, step, ls.trial_value)

            self.log.debug('step norm: %6.2e',
                           norm2(w - ls.iterate))
            w = ls.iterate
            x = w[:model.n]
            y = w[model.n:]
            f = model.obj(x)
            g = model.grad(x)
            J = model.jop(x)
github PythonOptimizers / NLP.py / nlp / model / linemodel.py View on Github external
:keywords:
            :x: full-space x+td if that vector has already been formed.
        """
        return self.jac(t, x=x) * v

    def jtprod(self, t, u, x=None):
        u"""Transposed-Jacobian-vector product γ'(t)ᵀ u with u ∈ ℝᵐ.

        :keywords:
            :x: full-space x+td if that vector has already been formed.
        """
        return np.dot(self.jac(t, x=x), u)


class C2LineModel(C1LineModel):
    u"""Restriction of a C² objective function to a line.

    If f: ℝⁿ → ℝ, x ∈ ℝⁿ and d ∈ ℝⁿ (d≠0) is a fixed direction, an instance
    of this class is a model representing the function f restricted to
    the line x + td, i.e., the function ϕ: ℝ → ℝ defined by

        ϕ(t) := f(x + td).

    The function f is assumed to be C², i.e., values and first and second
    derivatives of ϕ are defined.
    """

    def hess(self, t, z, x=None):
        u"""Evaluate ϕ"(t) = dᵀ ∇²L(x + td, z) d.

        :keywords:
github PythonOptimizers / NLP.py / nlp / optimize / regsqp / new_regsqp.py View on Github external
self.merit.xk = x.copy()

            # compute step
            self.assemble_linear_system(x, y)
            rhs = self.assemble_rhs(g, J, y, c)

            status, short_status, solved, dx, _ = \
                self.solve_linear_system(rhs, J=J)
            assert solved

            if not solved:
                failure = True
                continue

            # Step 4: Armijo backtracking linesearch
            line_model = C1LineModel(self.merit, x, dx)
            slope = np.dot(gphi, dx)

            self.log.debug(u"ϕ(x) = %9.2e, ∇ϕᵀΔx = %9.2e", phi, slope)
            ls = ArmijoLineSearch(line_model, bkmax=50,
                                  decr=1.75, value=phi, slope=slope)
            # ls = ArmijoWolfeLineSearch(line_model, step=1.0, bkmax=10,
            #                            decr=1.75, value=phi, slope=slope)
            # ls = StrongWolfeLineSearch(
            #     line_model, value=phi, slope=slope, gtol=0.1)
            # ls = QuadraticCubicLineSearch(line_model, bkmax=50,
            #                               value=phi, slope=slope)

            try:
                for step in ls:
                    self.log.debug(ls_fmt, step, ls.trial_value)
github PythonOptimizers / NLP.py / nlp / model / linemodel.py View on Github external
:x: Numpy array
            :d: Numpy array assumed to be nonzero (no check is performed).
        """
        name = "line-" + model.name
        kwargs.pop("name", None)
        kwargs.pop("Lvar", None)
        kwargs.pop("Uvar", None)
        kwargs.pop("Lcon", None)
        kwargs.pop("Ucon", None)
        pos = where(d > 0)
        neg = where(d < 0)
        tmax = Min((model.Uvar[pos] - x[pos]) / d[pos])
        tmax = min(tmax, Min((model.Lvar[neg] - x[neg]) / d[neg]))
        tmin = Max((model.Lvar[pos] - x[pos]) / d[pos])
        tmin = max(tmin, Max((model.Uvar[neg] - x[neg]) / d[neg]))
        super(C1LineModel, self).__init__(1,
                                          m=model.ncon,
                                          name=name,
                                          x0=kwargs.get("x0", 0.0),
                                          Lvar=np.array([tmin]),
                                          Uvar=np.array([tmax]),
                                          Lcon=model.Lcon,
                                          Ucon=model.Ucon,
                                          **kwargs)
        if tmin > tmax:
            # no intersection between the original bounds and the line
            self.logger.warn("restricted model is infeasible")
        self.__x = x
        self.__d = d
        self.__f = None  # most recent objective value of `model`
        self.__g = None  # most recent objective gradient of `model`
        self.__c = None  # most recent constraint values of `model`