How to use the adaptive.utils.cache_latest function in adaptive

To help you get started, we’ve selected a few adaptive examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github python-adaptive / adaptive / adaptive / learner / learnerND.py View on Github external
    @cache_latest
    def loss(self, real=True):
        # XXX: compute pending loss if real == False
        losses = self._losses if self.tri is not None else dict()
        return max(losses.values()) if losses else float("inf")
github python-adaptive / adaptive / adaptive / learner / average_learner.py View on Github external
    @cache_latest
    def loss(self, real=True, *, n=None):
        if n is None:
            n = self.npoints if real else self.n_requested
        else:
            n = n
        if n < 2:
            return np.inf
        standard_error = self.std / sqrt(n)
        return max(
            standard_error / self.atol, standard_error / abs(self.mean) / self.rtol
        )
github python-adaptive / adaptive / adaptive / learner / balancing_learner.py View on Github external
    @cache_latest
    def loss(self, real=True):
        losses = self._losses(real)
        return max(losses)
github python-adaptive / adaptive / adaptive / learner / integrator_learner.py View on Github external
    @cache_latest
    def loss(self, real=True):
        return abs(abs(self.igral) * self.tol - self.err)
github python-adaptive / adaptive / adaptive / learner / learner1D.py View on Github external
    @cache_latest
    def loss(self, real=True):
        losses = self.losses if real else self.losses_combined
        if not losses:
            return np.inf
        max_interval, max_loss = losses.peekitem(0)
        return max_loss
github python-adaptive / adaptive / adaptive / learner / skopt_learner.py View on Github external
    @cache_latest
    def loss(self, real=True):
        if not self.models:
            return np.inf
        else:
            model = self.models[-1]
            # Return the in-sample error (i.e. test the model
            # with the training data). This is not the best
            # estimator of loss, but it is the cheapest.
            return 1 - model.score(self.Xi, self.yi)
github python-adaptive / adaptive / adaptive / learner / learner2D.py View on Github external
    @cache_latest
    def loss(self, real=True):
        if not self.bounds_are_done:
            return np.inf
        ip = self.interpolator(scaled=True) if real else self._interpolator_combined()
        losses = self.loss_per_triangle(ip)
        return losses.max()