How to use the nlp.tools.norms.norm2 function in nlp

To help you get started, we’ve selected a few nlp examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PythonOptimizers / NLP.py / nlp / optimize / cqp.py View on Github external
"""
        self.log.debug('Solving linear system')
        self.LBL.solve(rhs)
        self.LBL.refine(rhs, tol=itref_threshold, nitref=nitrefmax)

        # Collect statistics on the linear system solve.
        self.cond_history.append((self.LBL.cond, self.LBL.cond2))
        self.berr_history.append((self.LBL.berr, self.LBL.berr2))
        self.derr_history.append(self.LBL.dirError)
        self.nrms_history.append((self.LBL.matNorm, self.LBL.xNorm))
        self.lres_history.append(self.LBL.relRes)

        # Estimate matrix l2-norm condition number.
        if self.condest:
            rhsNorm = norm2(rhs)
            solnNorm = norm2(self.LBL.x)
            Hop = PysparseLinearOperator(self.H, symmetric=True)
            normH, _ = normest(Hop, tol=1.0e-3)
            if rhsNorm > 0 and solnNorm > 0:
                self.condest_history.append(solnNorm * normH / rhsNorm)
            else:
                self.condest_history.append(1.)
            self.normest_history.append(normH)

        nr = norm2(self.LBL.residual)
        return (self.LBL.x, nr, self.LBL.neig)
github PythonOptimizers / NLP.py / nlp / optimize / regsqp / new_regsqp.py View on Github external
if not full_rank:
            self.log.info("unable to regularize sufficiently")
            status = '    Unable to regularize sufficiently.'
            short_status = 'degn'
            solved = False
            dx = None
            dy = None
            return status, short_status, solved, dx, dy

        self.prox_last = self.merit.prox
        self.LBL.solve(rhs)
        self.LBL.refine(rhs, nitref=nitref)
        (dx, dy) = self.get_dx_dy(self.LBL.x)
        self.log.debug("residual norm = %8.2e, ‖Δx‖ = %8.2e, ‖Δy‖ = %8.2e",
                       norm2(self.LBL.residual), norm2(dx), norm2(dy))

        # only for debugging, should be removed later
        J = self.K[nvar:, :nvar]
        HprIpdjj = np.dot(self.K[:nvar, :nvar] * dx, dx)
        HprIpdjj += self.merit.penalty * np.dot(dx, (J * dx) * J)
        if HprIpdjj <= 0:
            self.log.error("not sufficiently positive definite: %6.2e>=%6.2e",
                           HprIpdjj,
                           1e-8 * np.dot(dx, dx))
            assert HprIpdjj > 0

        status = None
        short_status = None
        solved = True
        return status, short_status, solved, dx, dy
github PythonOptimizers / NLP.py / nlp / optimize / regsqp / new_regsqp.py View on Github external
self.log.debug(u"ϕ(x) = %9.2e, ∇ϕᵀΔx = %9.2e", phi, slope)
            ls = ArmijoLineSearch(line_model, bkmax=50,
                                  decr=1.75, value=phi, slope=slope)
            # ls = ArmijoWolfeLineSearch(line_model, step=1.0, bkmax=10,
            #                            decr=1.75, value=phi, slope=slope)
            # ls = StrongWolfeLineSearch(
            #     line_model, value=phi, slope=slope, gtol=0.1)
            # ls = QuadraticCubicLineSearch(line_model, bkmax=50,
            #                               value=phi, slope=slope)

            try:
                for step in ls:
                    self.log.debug(ls_fmt, step, ls.trial_value)

                self.log.debug('step norm: %8.2e', norm2(x - ls.iterate))
                x = ls.iterate
                f = model.obj(x)
                g = model.grad(x)
                J = model.jop(x)
                c = model.cons(x) - model.Lcon
                cnorm = norm2(c)
                gL = g - J.T * y
                y_al = y - c * self.merit.penalty
                phi = f - np.dot(y, c) + 0.5 * self.merit.penalty * cnorm**2
                gphi = g - J.T * y_al
                gphi_norm = norm2(gphi)

                if gphi_norm <= self.theta * gLnorm0 + 0.5 * self.epsilon:
                    self.log.debug('optimality improved sufficiently')
                    if cnorm <= self.theta * cnorm0 + 0.5 * self.epsilon:
                        self.log.debug('feasibility improved sufficiently')
github PythonOptimizers / NLP.py / nlp / optimize / auglag.py View on Github external
self.log.info(self.format0, self.iter, self.f, self.pg0,
                          self.cons0, al_model.penalty, "", "", self.omega,
                          self.eta)

        while not (exitOptimal or exitIter):
            self.iter += 1

            # Perform bound-constrained minimization
            bc_solver = self.setup_bc_solver()
            bc_solver.solve()
            self.x = bc_solver.x.copy()  # may not be useful.
            self.niter_total += bc_solver.iter + 1

            dL = al_model.dual_feasibility(self.x)
            PdL = self.project_gradient(self.x, dL)
            PdL_norm_new = norm(PdL)
            convals_new = slack_model.cons(self.x)

            # Specific handling for the case where the original NLP is
            # unconstrained
            if slack_model.m == 0:
                cons_norm_new = 0.
            else:
                cons_norm_new = norm(convals_new)

            # Update penalty parameter or multipliers based on result
            if cons_norm_new <= np.maximum(self.eta, self.eta_opt):

                bc_solver_status = self.check_bc_solver_status(bc_solver.status)
                self.update_multipliers(convals_new, bc_solver_status)

                dL = al_model.dual_feasibility(self.x)
github PythonOptimizers / NLP.py / nlp / optimize / cqp.py View on Github external
if self.verbose:
            log.info('Max column scaling factor = %8.2e' % np.max(col_scale))

        # Apply column scaling to A and c.
        values /= col_scale[jcol]
        self.c[:self.qp.original_n] /= col_scale[:self.qp.original_n]

        if self.verbose:
            log.info('Smallest and largest elements of A after scaling: ')
            log.info('%8.2e %8.2e' % (np.min(np.abs(values)),
                                      np.max(np.abs(values))))

        # Overwrite A with scaled values.
        self.A.put(values, irow, jcol)
        self.normA = norm2(values)   # Frobenius norm of A.

        # Apply scaling to Hessian matrix Q.
        (values, irow, jcol) = self.Q.find()
        values /= col_scale[irow]
        values /= col_scale[jcol]
        self.Q.put(values, irow, jcol)
        self.normQ = norm2(values)  # Frobenius norm of Q

        # Save row and column scaling.
        self.row_scale = row_scale
        self.col_scale = col_scale

        self.prob_scaled = True

        return
github PythonOptimizers / NLP.py / nlp / optimize / regsqp / new_regsqp_BFGS.py View on Github external
self.log.debug('Solving linear system')
        solver = self.iterative_solver(J.T)
        N = 1e-8 * self.Im
        dy_tilde, istop, itn, normr, normar, normA, condA, normx = \
            solver.solve(-shifted_rhs, M=self.HinvOp, N=N, damp=1.0,
                         itnlim=1000000, show=True)

        dx, dy = self.get_dx_dy(dy_tilde, J, shifted_rhs, rhs)

        xs = x + dx
        ys = y + dy
        gs = model.grad(xs)
        Js = model.jop(xs)
        cs = model.cons(xs) - model.Lcon
        gLs = gs - Js.T * ys
        Fnorms = norm2(gLs) + norm2(cs)
        if Fnorms < Fnorm:
            self.log.debug("improved initial point accepted")
            x += dx
            y += dy
            Fnorm = Fnorms
            g = gs.copy()
            J = model.jop(x)
            c = cs.copy()
            self.f = f = model.obj(x)
            self.cnorm = cnorm = norm2(c)
            self.gLnorm = gLnorm = norm2(gLs)

            try:
                self.post_iteration(x, gLs)
            except UserExitRequest:
                self.status = "User exit"
github PythonOptimizers / NLP.py / nlp / optimize / cqp.py View on Github external
self.nrms_history.append((self.LBL.matNorm, self.LBL.xNorm))
        self.lres_history.append(self.LBL.relRes)

        # Estimate matrix l2-norm condition number.
        if self.condest:
            rhsNorm = norm2(rhs)
            solnNorm = norm2(self.LBL.x)
            Hop = PysparseLinearOperator(self.H, symmetric=True)
            normH, _ = normest(Hop, tol=1.0e-3)
            if rhsNorm > 0 and solnNorm > 0:
                self.condest_history.append(solnNorm * normH / rhsNorm)
            else:
                self.condest_history.append(1.)
            self.normest_history.append(normH)

        nr = norm2(self.LBL.residual)
        return (self.LBL.x, nr, self.LBL.neig)