How to use nlp - 10 common examples

To help you get started, we’ve selected a few nlp examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PythonOptimizers / NLP.py / tests / tools / test_dercheck.py View on Github external
model = Rosenbrock(request.param)
    x = np.ones(model.nvar)
    x[1::2] = -1
    dcheck = DerivativeChecker(model, x, tol=1.0e-4)
    dcheck.check()
    dcheck.check(cheap_check=True, hess=False)
    return dcheck


def test_rosenbrock(rosenbrock_checker):
    assert (len(rosenbrock_checker.grad_errs) == 0)
    assert (len(rosenbrock_checker.hess_errs) == 0)
    assert (len(rosenbrock_checker.cheap_grad_errs) == 0)


class Erroneous(NLPModel):

    def __init__(self, nvar, **kwargs):
        kwargs.pop("m", None)
        super(Erroneous, self).__init__(nvar, m=1, **kwargs)

    def obj(self, x):
        return 0.5 * np.dot(x, x)

    def grad(self, x):
        g = x.copy()
        g[0] += 1  # error: should be x[0].
        return g

    def hess(self, x, *args, **kwargs):
        obj_weight = kwargs.get('obj_weight', 1.0)
        H = np.eye(self.nvar)
github PythonOptimizers / NLP.py / tests / model / test_amplmodel.py View on Github external
def test_cons_scaling(self):
        model = AmplModel(self.model_name)
        log = config_logger("nlp.der",
                            "%(name)-10s %(levelname)-8s %(message)s",
                            level=logging.DEBUG)
        dcheck = DerivativeChecker(model, model.x0, tol=1e-5)
        dcheck.check(hess=True, chess=True)
        assert len(dcheck.jac_errs) == 0
        assert len(dcheck.hess_errs) == 0
        for j in xrange(model.ncon):
            assert (len(dcheck.chess_errs[j]) == 0)

        model.compute_scaling_cons(g_max=40.)
        assert np.allclose(model.scale_con, np.array([1.]))
        assert np.allclose(model.cons(model.x0), np.array([29.0]))
        assert np.allclose(model.jop(model.x0).to_array(),
                           np.array([[40., 4.]]))
        model.compute_scaling_cons(reset=True)
        assert model.scale_con is None
github PythonOptimizers / NLP.py / tests / model / test_amplmodel.py View on Github external
def test_obj_scaling(self):
        model = AmplModel(self.model_name)
        log = config_logger("nlp.der",
                            "%(name)-10s %(levelname)-8s %(message)s",
                            level=logging.DEBUG)
        dcheck = DerivativeChecker(model, model.x0, tol=1e-5)
        dcheck.check(hess=True, chess=True)
        assert len(dcheck.grad_errs) == 0
        assert len(dcheck.hess_errs) == 0
        for j in xrange(model.ncon):
            assert (len(dcheck.chess_errs[j]) == 0)

        model.compute_scaling_obj(g_max=1.)
        assert model.obj(model.x0) == -0.39056208756589972
        assert np.allclose(model.grad(model.x0), np.array([0.8, -1.]))
        assert model.scale_obj == 1.

        model.compute_scaling_obj(reset=True)
        assert model.scale_obj is None
github PythonOptimizers / NLP.py / tests / tools / test_nullvector.py View on Github external
def test_add(self):
        uval = np.random.random()
        u = NullVector(self.n, uval, dtype=np.float)
        w = u + self.v
        assert(isinstance(w, NullVector))
        assert(len(w) == self.n)
        assert(w.value == np.float(self.val) + np.float(uval))
        assert(w.dtype == np.result_type(self.v.dtype, u.dtype))

        u = NullVector(self.n + 1, uval, dtype=np.float)
        assert_raises(ValueError, lambda y: y + self.v, u)

        w = u + 2
        assert(isinstance(w, NullVector))
        assert(w.value == uval + 2)

        u += 2
        assert(u.value == np.float(uval) + 2)
github PythonOptimizers / NLP.py / tests / tools / test_nullvector.py View on Github external
def test_div(self):
        uval = np.random.random() + 1  # Ensure nonzero.
        u = NullVector(self.n, uval, dtype=np.float)
        w = self.v / u
        assert(isinstance(w, NullVector))
        assert(len(w) == self.n)
        assert(w.value == np.float(self.val) / np.float(uval))
        assert(w.dtype == np.result_type(self.v.dtype, u.dtype))

        u = NullVector(self.n + 1, uval, dtype=np.float)
        assert_raises(ValueError, lambda y: self.v / y, u)

        w = u / 2
        assert(isinstance(w, NullVector))
        assert(w.value == uval / 2)

        u /= 2
        assert(u.value == np.float(uval) / 2)
github PythonOptimizers / NLP.py / tests / tools / test_nullvector.py View on Github external
def test_sub(self):
        uval = np.random.random()
        u = NullVector(self.n, uval, dtype=np.float)
        w = u - self.v
        assert(isinstance(w, NullVector))
        assert(len(w) == self.n)
        assert(w.value == np.float(uval) - np.float(self.val))
        assert(w.dtype == np.result_type(self.v.dtype, u.dtype))

        u = NullVector(self.n + 1, uval, dtype=np.float)
        assert_raises(ValueError, lambda y: y - self.v, u)

        w = u - 2
        assert(isinstance(w, NullVector))
        assert(w.value == uval - 2)

        u -= 2
        assert(u.value == np.float(uval) - 2)

        z = np.random.random(self.n)
        w = z - self.v
        assert(isinstance(w, np.ndarray))
        assert(len(w) == self.n)
        for i in range(self.n):
            assert(w[i] == z[i] - self.val)
github PythonOptimizers / NLP.py / tests / tools / test_nullvector.py View on Github external
def test_order(self):
        u = NullVector(self.n, self.val, dtype=np.float)
        assert_equal(u == self.v, range(self.n))

        u = NullVector(self.n, self.val + 1, dtype=np.float)
        assert_equal(u == self.v, [])

        u = NullVector(self.n + 1, self.val, dtype=np.float)
        assert_equal(u == self.v, [])
github PythonOptimizers / NLP.py / tests / tools / test_nullvector.py View on Github external
def test_sub(self):
        uval = np.random.random()
        u = NullVector(self.n, uval, dtype=np.float)
        w = u - self.v
        assert(isinstance(w, NullVector))
        assert(len(w) == self.n)
        assert(w.value == np.float(uval) - np.float(self.val))
        assert(w.dtype == np.result_type(self.v.dtype, u.dtype))

        u = NullVector(self.n + 1, uval, dtype=np.float)
        assert_raises(ValueError, lambda y: y - self.v, u)

        w = u - 2
        assert(isinstance(w, NullVector))
        assert(w.value == uval - 2)

        u -= 2
        assert(u.value == np.float(uval) - 2)
github PythonOptimizers / NLP.py / tests / tools / test_nullvector.py View on Github external
def test_order(self):
        u = NullVector(self.n, self.val, dtype=np.float)
        assert_equal(u == self.v, range(self.n))

        u = NullVector(self.n, self.val + 1, dtype=np.float)
        assert_equal(u == self.v, [])

        u = NullVector(self.n + 1, self.val, dtype=np.float)
        assert_equal(u == self.v, [])
github PythonOptimizers / NLP.py / tests / tools / test_nullvector.py View on Github external
def setUp(self):
        self.n = 10
        self.val = np.random.random()
        self.v = NullVector(self.n, self.val, dtype=np.float)