How to use the sciunit.scores.base.Score function in sciunit

To help you get started, we’ve selected a few sciunit examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github scidash / sciunit / sciunit / scores / complete.py View on Github external
@property
    def norm_score(self) -> float:
        """Return 1.0 for a ratio of 1, falling to 0.0 for extremely small or large values.

        Returns:
            float: The value of the norm score.
        """
        score = math.log10(self.score)
        cdf = (1.0 + math.erf(score / math.sqrt(2.0))) / 2.0
        return 1 - 2*math.fabs(0.5 - cdf)

    def __str__(self):
        return 'Ratio = %.2f' % self.score


class PercentScore(Score):
    """A percent score.

    A float in the range [0, 100.0] where higher is better.
    """

    _description = ('100.0 is considered perfect agreement between the '
                    'observation and the prediction. 0.0 is the worst possible'
                    ' agreement')

    _best = 100.0

    _worst = 0.0

    def _check_score(self, score):
        if not (0.0 <= score <= 100.0):
            raise errors.InvalidScoreError(("Score of %f must be in "
github scidash / sciunit / sciunit / scores / complete.py View on Github external
"range 0.0-100.0" % score))

    @property
    def norm_score(self) -> float:
        """Return 1.0 for a percent score of 100, and 0.0 for 0.

        Returns:
            float: 1.0 if the percent score is 100, else 0.0.
        """
        return float(self.score)/100

    def __str__(self) -> str:
        return '%.1f%%' % self.score


class FloatScore(Score):
    """A float score.

    A float with any value.
    """

    _allowed_types = (float, pq.Quantity,)

    # The best value is indeterminate without more context.
    # But some float value must be supplied to use methods like Test.ace().
    _best = 0.0

    # The best value is indeterminate without more context.
    _worst = 0.0

    def _check_score(self, score):
        if isinstance(score, pq.Quantity) and score.size != 1:
github scidash / sciunit / sciunit / scores / complete.py View on Github external
o_std = observation['std']
        try:  # Try to pool taking samples sizes into account.
            p_n = prediction['n']
            o_n = observation['n']
            s = (((p_n-1)*(p_std**2) + (o_n-1)*(o_std**2))/(p_n+o_n-2))**0.5
        except KeyError:  # If sample sizes are not available.
            s = (p_std**2 + o_std**2)**0.5
        value = (p_mean - o_mean)/s
        value = utils.assert_dimensionless(value)
        return CohenDScore(value)

    def __str__(self) -> str:
        return 'D = %.2f' % self.score


class RatioScore(Score):
    """A ratio of two numbers.

    Usually the prediction divided by
    the observation.
    """

    _allowed_types = (float,)

    _description = ('The ratio between the prediction and the observation')

    _best = 1.0  # A RatioScore of 1.0 is best

    _worst = np.inf

    def _check_score(self, score):
        if score < 0.0:
github scidash / sciunit / sciunit / scores / base.py View on Github external
def color(self, value=None):
        """Turn the score intp an RGB color tuple of three 8-bit integers."""
        if value is None:
            value = self.norm_score
        rgb = Score.value_color(value)
        return rgb
github scidash / sciunit / sciunit / scores / base.py View on Github external
def __lt__(self, other):
        if isinstance(other, Score):
            result = self.norm_score < other.norm_score
        else:
            result = self.score < other
        return result
github scidash / sciunit / sciunit / scores / base.py View on Github external
result = None
        if not isinstance(obs_or_pred, dict):
            result = obs_or_pred
        else:
            keys = ([key] if key is not None else []) + ['mean', 'value']
            for k in keys:
                if k in obs_or_pred:
                    result = obs_or_pred[k]
                    break
            if result is None:
                raise KeyError(("%s has neither a mean nor a single "
                                "value" % obs_or_pred))
        return result


class ErrorScore(Score):
    """A score returned when an error occurs during testing."""

    @property
    def norm_score(self):
        return 0.0

    @property
    def summary(self):
        """Summarize the performance of a model on a test."""
        return "== Model %s did not complete test %s due to error '%s'. ==" %\
               (str(self.model), str(self.test), str(self.score))

    def _describe(self):
        return self.summary

    def __str__(self):
github scidash / sciunit / sciunit / scores / base.py View on Github external
def __init__(self, score, related_data=None):
        """Abstract base class for scores.

        Args:
            score (int, float, bool): A raw value to wrap in a Score class.
            related_data (dict, optional): Artifacts to store with the score.
        """
        self.check_score(score)
        if related_data is None:
            related_data = {}
        self.score, self.related_data = score, related_data
        if isinstance(score, Exception):
            # Set to error score to use its summarize().
            self.__class__ = ErrorScore
        super(Score, self).__init__()
github scidash / sciunit / sciunit / scores / complete.py View on Github external
return BooleanScore(observation == prediction)

    @property
    def norm_score(self) -> float:
        """Return 1.0 for a True score and 0.0 for False score.

        Returns:
            float: 1.0 for a True score and 0.0 for False score.
        """
        return 1.0 if self.score else 0.0

    def __str__(self) -> str:
        return 'Pass' if self.score else 'Fail'


class ZScore(Score):
    """A Z score.

    A float indicating standardized difference
    from a reference mean.
    """

    _allowed_types = (float,)

    _description = ('The difference between the means of the observation and '
                    'prediction divided by the standard deviation of the '
                    'observation')

    _best = 0.0  # A Z-Score of 0.0 is best

    _worst = np.inf  # A Z-score of infinity (or negative infinity) is worst
github scidash / sciunit / sciunit / scores / incomplete.py View on Github external
"""Score types for tests that did not complete successfully.

These include details about the various possible reasons
that a particular combination of model and test could not be completed.
"""

from .base import Score
from sciunit.errors import InvalidScoreError


class NoneScore(Score):
    """A `None` score.

    Usually indicates that the model has not been
    checked to see if it has the capabilities required by the test."""

    def __init__(self, score, related_data=None):
        if isinstance(score, str) or score is None:
            super(NoneScore, self).__init__(score, related_data=related_data)
        else:
            raise InvalidScoreError("Score must be a string or None")
           
    @property
    def norm_score(self):
        return None

    def __str__(self):
github scidash / sciunit / sciunit / scores / complete.py View on Github external
"""

from __future__ import division

import math

import numpy as np
import quantities as pq

from sciunit import utils
from sciunit import errors
from .base import Score
from .incomplete import InsufficientDataScore


class BooleanScore(Score):
    """A boolean score, which must be True or False."""

    _allowed_types = (bool,)

    _description = ('True if the observation and prediction were '
                    'sufficiently similar; False otherwise')

    _best = True

    _worst = False

    @classmethod
    def compute(cls, observation: dict, prediction: dict) -> 'BooleanScore':
        """Compute whether the observation equals the prediction.

        Returns: