How to use the sciunit.models.ConstModel function in sciunit

To help you get started, we’ve selected a few sciunit examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github scidash / sciunit / sciunit / unit_test / core_tests.py View on Github external
def test_rangetest(self):
        from sciunit.tests import RangeTest
        range_2_3_test = RangeTest(observation=[2,3])
        one_model = sciunit.models.ConstModel(2.5)
        self.assertTrue(range_2_3_test.check_capabilities(one_model))
        score = range_2_3_test.judge(one_model)
        self.assertTrue(isinstance(score, sciunit.scores.BooleanScore))
        self.assertEqual(score.score,True)
        self.assertTrue(score.test is range_2_3_test)
        self.assertTrue(score.model is one_model)
github scidash / sciunit / sciunit / unit_test / core_tests.py View on Github external
sciunit.TestM2M.__init__(self,observation,name)
                self.required_capabilities += (ProducesNumber,)

            def generate_prediction(self, model, verbose=False):
                """Implementation of sciunit.Test.generate_prediction."""
                prediction = model.produce_number()
                return prediction

            def compute_score(self, prediction1, prediction2):
                """Implementation of sciunit.Test.score_prediction."""
                score = sciunit.scores.FloatScore(prediction1 - prediction2)
                score.description = "Difference between model predictions"
                return score

        myModel1 = ConstModel(100.0, "Model1")
        myModel2 = ConstModel(110.0, "Model2")
        myTest = NumberTest_M2M(observation=95.0)
        myScore = myTest.judge([myModel1, myModel2])

        # Test model vs observation; different ways of specifying individual scores
        self.assertEqual(myScore[myTest][myModel1], -5.0)
        self.assertEqual(myScore[myModel1][myTest], 5.0)
        self.assertEqual(myScore["observation"][myModel2], -15.0)
        self.assertEqual(myScore[myModel2]["observation"], 15.0)
        self.assertEqual(myScore[myTest][myTest], 0.0)
        self.assertEqual(myScore["observation"]["observation"], 0.0)

        # Test model vs model; different ways of specifying individual scores
        self.assertEqual(myScore[myModel1][myModel2], -10.0)
        self.assertEqual(myScore[myModel2][myModel1], 10.0)
        self.assertEqual(myScore["Model1"][myModel2], -10.0)
        self.assertEqual(myScore["Model2"][myModel1], 10.0)
github scidash / sciunit / sciunit / unit_test / core_tests.py View on Github external
def __init__(self, observation=None, name="ValueTest-M2M"):
                sciunit.TestM2M.__init__(self,observation,name)
                self.required_capabilities += (ProducesNumber,)

            def generate_prediction(self, model, verbose=False):
                """Implementation of sciunit.Test.generate_prediction."""
                prediction = model.produce_number()
                return prediction

            def compute_score(self, prediction1, prediction2):
                """Implementation of sciunit.Test.score_prediction."""
                score = sciunit.scores.FloatScore(prediction1 - prediction2)
                score.description = "Difference between model predictions"
                return score

        myModel1 = ConstModel(100.0, "Model1")
        myModel2 = ConstModel(110.0, "Model2")
        myTest = NumberTest_M2M(observation=95.0)
        myScore = myTest.judge([myModel1, myModel2])

        # Test model vs observation
        self.assertEqual(myScore[myTest][myModel1], -5.0)
        self.assertEqual(myScore[myModel1][myTest], 5.0)
        self.assertEqual(myScore["observation"][myModel2], -15.0)
        self.assertEqual(myScore[myModel2]["observation"], 15.0)

        # Test model vs model
        self.assertEqual(myScore[myModel1][myModel2], -10.0)
        self.assertEqual(myScore[myModel2][myModel1], 10.0)
github scidash / sciunit / sciunit / unit_test / core_tests.py View on Github external
def test_regular_models(self):
        from sciunit.models import ConstModel,UniformModel,SharedModel
        
        m = ConstModel(3)
        self.assertEqual(m.produce_number(),3)

        m = UniformModel(3,4)
        self.assertTrue(3 < m.produce_number() < 4)
github scidash / sciunit / sciunit / unit_test / core_tests.py View on Github external
def test_versioned(self):
        from sciunit.utils import Versioned
        from sciunit.models import ConstModel
        class VersionedModel(ConstModel,Versioned):
            pass
        m = VersionedModel(37)
        print("Commit hash is %s" % m.version)
        print("Remote URL is %s" % m.remote_url)
        self.assertTrue('sciunit' in m.remote_url)
github scidash / sciunit / sciunit / unit_test / core_tests.py View on Github external
def __init__(self, observation=None, name="ValueTest-M2M"):
                sciunit.TestM2M.__init__(self,observation,name)
                self.required_capabilities += (ProducesNumber,)

            def generate_prediction(self, model, verbose=False):
                """Implementation of sciunit.Test.generate_prediction."""
                prediction = model.produce_number()
                return prediction

            def compute_score(self, prediction1, prediction2):
                """Implementation of sciunit.Test.score_prediction."""
                score = sciunit.scores.FloatScore(prediction1 - prediction2)
                score.description = "Difference between model predictions"
                return score

        myModel1 = ConstModel(100.0, "Model1")
        myModel2 = ConstModel(110.0, "Model2")
        myTest = NumberTest_M2M(observation=95.0)
        myScore = myTest.judge([myModel1, myModel2])

        # Test model vs observation; different ways of specifying individual scores
        self.assertEqual(myScore[myTest][myModel1], -5.0)
        self.assertEqual(myScore[myModel1][myTest], 5.0)
        self.assertEqual(myScore["observation"][myModel2], -15.0)
        self.assertEqual(myScore[myModel2]["observation"], 15.0)
        self.assertEqual(myScore[myTest][myTest], 0.0)
        self.assertEqual(myScore["observation"]["observation"], 0.0)

        # Test model vs model; different ways of specifying individual scores
        self.assertEqual(myScore[myModel1][myModel2], -10.0)
        self.assertEqual(myScore[myModel2][myModel1], 10.0)
        self.assertEqual(myScore["Model1"][myModel2], -10.0)
github scidash / sciunit / sciunit / unit_test / core_tests.py View on Github external
sciunit.TestM2M.__init__(self,observation,name)
                self.required_capabilities += (ProducesNumber,)

            def generate_prediction(self, model, verbose=False):
                """Implementation of sciunit.Test.generate_prediction."""
                prediction = model.produce_number()
                return prediction

            def compute_score(self, prediction1, prediction2):
                """Implementation of sciunit.Test.score_prediction."""
                score = sciunit.scores.FloatScore(prediction1 - prediction2)
                score.description = "Difference between model predictions"
                return score

        myModel1 = ConstModel(100.0, "Model1")
        myModel2 = ConstModel(110.0, "Model2")
        myTest = NumberTest_M2M(observation=95.0)
        myScore = myTest.judge([myModel1, myModel2])

        # Test model vs observation
        self.assertEqual(myScore[myTest][myModel1], -5.0)
        self.assertEqual(myScore[myModel1][myTest], 5.0)
        self.assertEqual(myScore["observation"][myModel2], -15.0)
        self.assertEqual(myScore[myModel2]["observation"], 15.0)

        # Test model vs model
        self.assertEqual(myScore[myModel1][myModel2], -10.0)
        self.assertEqual(myScore[myModel2][myModel1], 10.0)