How to use the catboost.python-package.catboost.eval.evaluation_result.ScoreConfig function in catboost

To help you get started, we’ve selected a few catboost examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github catboost / catboost / catboost / python-package / catboost / eval / evaluation_result.py View on Github external
def abs_score(level=0.01):
        return ScoreConfig(score_type=ScoreType.Abs,
                           multiplier=1,
                           score_level=level)
github catboost / catboost / catboost / python-package / catboost / eval / evaluation_result.py View on Github external
def _change_score_config(self, config):
        if config is not None:
            if isinstance(config, ScoreType):
                if config == ScoreType.Abs:
                    config = ScoreConfig.abs_score()
                elif config == ScoreType.Rel:
                    config = ScoreConfig.rel_score()
                else:
                    raise CatBoostError("Unknown scoreType {}".format(config))
            if self._score_config != config:
                self._score_config = config
                self.__clear_comparisons()
github catboost / catboost / catboost / python-package / catboost / eval / evaluation_result.py View on Github external
def __init__(self, case_results):
        if len(case_results) <= 1:
            raise CatBoostError("Need at least 2 case results, got {} ".format(len(case_results)))

        self._case_results = dict()
        self._case_comparisons = dict()
        self._cases = [case_result.get_case() for case_result in case_results]

        for case_result in case_results:
            case = case_result.get_case()
            self._case_results[case] = case_result

        self._metric_description = case_results[0].get_metric_description()
        self._baseline_case = case_results[0].get_case()

        self._score_config = ScoreConfig()

        for (case, case_result) in self._case_results.items():
            if case_result.get_metric_description() != self._metric_description:
                raise CatBoostError("Metric names should be equal for all case results")

            if case_result.get_fold_ids() != self.get_fold_ids():
                raise CatBoostError("Case results should be computed on the same folds")

            if case_result.get_eval_step() != self.get_eval_step():
                raise CatBoostError("Eval steps should be equal for different cases")
github catboost / catboost / catboost / python-package / catboost / eval / evaluation_result.py View on Github external
def rel_score(level=0.01):
        return ScoreConfig(score_type=ScoreType.Rel,
                           multiplier=100,
                           score_level=level)
github catboost / catboost / catboost / python-package / catboost / eval / evaluation_result.py View on Github external
def _change_score_config(self, config):
        if config is not None:
            if isinstance(config, ScoreType):
                if config == ScoreType.Abs:
                    config = ScoreConfig.abs_score()
                elif config == ScoreType.Rel:
                    config = ScoreConfig.rel_score()
                else:
                    raise CatBoostError("Unknown scoreType {}".format(config))
            if self._score_config != config:
                self._score_config = config
                self.__clear_comparisons()