How to use the catboost.eval.log_config.get_eval_logger function in catboost

To help you get started, we’ve selected a few catboost examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github catboost / catboost / catboost / python-package / catboost / eval / _fold_models_handler.py View on Github external
make_dirs_if_not_exists(FoldModelsHandler.__MODEL_DIR)

        feature_count = pool.num_col()
        if "ignored_features" in case.get_params():
            ignored_features = case.get_params()["ignored_features"]
            if len(ignored_features) and max(ignored_features) >= feature_count:
                raise CatBoostError("Error: input parameter contains feature indices wich are not available in pool: "
                                    "{}\n "
                                    "Check eval_feature set and ignored features options".format(ignored_features))
        get_eval_logger().debug('Learn model {} on fold #{}'.format(str(case), fold_id))
        cur_time = time.time()
        instance = CatBoost(params=case.get_params())
        instance.fit(pool)
        instance.save_model(fname=model_path)

        get_eval_logger().debug('Operation was done in {} seconds'.format(time.time() - cur_time))
        return FoldModel(case, model_path, fold_id)
github catboost / catboost / catboost / python-package / catboost / eval / _fold_models_handler.py View on Github external
:return: Dictionary of models where the key is case and the value is models on learn folds
        """
        make_dirs_if_not_exists(FoldModelsHandler.__MODEL_DIR)

        models = {}
        for case in self._cases:
            models[case] = list()

        for file_num, learn_file in enumerate(learn_files):
            pool = FoldModelsHandler._create_pool(learn_file, self._thread_count)
            fold_id = fold_id_bias + file_num

            for case in self._cases:
                model_path = os.path.join(FoldModelsHandler.__MODEL_DIR,
                                          FoldModelsHandler._create_model_name(case, fold_id))
                get_eval_logger().debug("For model {} on fold #{} path is {}".format(str(case), fold_id, model_path))
                fold_model = self._fit_model(pool, case, fold_id, model_path)
                get_eval_logger().info("Model {} on fold #{} was fitted".format(str(case), fold_id))
                models[case].append(fold_model)

        return models
github catboost / catboost / catboost / python-package / catboost / eval / _fold_models_handler.py View on Github external
make_dirs_if_not_exists(FoldModelsHandler.__MODEL_DIR)

        models = {}
        for case in self._cases:
            models[case] = list()

        for file_num, learn_file in enumerate(learn_files):
            pool = FoldModelsHandler._create_pool(learn_file, self._thread_count)
            fold_id = fold_id_bias + file_num

            for case in self._cases:
                model_path = os.path.join(FoldModelsHandler.__MODEL_DIR,
                                          FoldModelsHandler._create_model_name(case, fold_id))
                get_eval_logger().debug("For model {} on fold #{} path is {}".format(str(case), fold_id, model_path))
                fold_model = self._fit_model(pool, case, fold_id, model_path)
                get_eval_logger().info("Model {} on fold #{} was fitted".format(str(case), fold_id))
                models[case].append(fold_model)

        return models
github catboost / catboost / catboost / python-package / catboost / eval / _fold_models_handler.py View on Github external
def _remove_model_dir():
        try:
            if os.path.exists(FoldModelsHandler.__MODEL_DIR):
                os.rmdir(FoldModelsHandler.__MODEL_DIR)
        except OSError as err:
            get_eval_logger().warning(str(err))
github catboost / catboost / catboost / python-package / catboost / eval / _fold_models_handler.py View on Github external
def _remove_models(list_models):
        get_eval_logger().debug('Remove models {}'.format(str(list_models)))
        for model in list_models:
            model.delete()