How to use the catboost.benchmarks.training_speed.learners.Learner function in catboost

To help you get started, we’ve selected a few catboost examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github catboost / catboost / catboost / benchmarks / training_speed / learners.py View on Github external
num_iterations = params_copy['iterations']
        del params_copy['iterations']

        params = Learner._fit(self, params_copy)
        self.learner = lgb.train(
            params,
            self.train,
            num_boost_round=num_iterations,
            valid_sets=self.test
        )

    def predict(self, n_tree):
        return self.learner.predict(self.test, num_iteration=n_tree)


class CatBoostLearner(Learner):
    def __init__(self, data, task, metric, use_gpu):
        Learner.__init__(self)

        params = {
            'devices': [0],
            'logging_level': 'Info',
            'use_best_model': False,
            'bootstrap_type': 'Bernoulli',
            'random_seed': RANDOM_SEED
        }

        if use_gpu:
            params['task_type'] = 'GPU'

        if task == 'regression':
            params['loss_function'] = 'RMSE'
github catboost / catboost / catboost / benchmarks / training_speed / learners.py View on Github external
if not os.path.exists(log_dir_name):
            os.makedirs(log_dir_name)

        self.set_train_dir(params, log_filename + 'dir')

        with Logger(log_filename):
            start = time.time()
            self._fit(params)
            elapsed = time.time() - start
            print('Elapsed: ' + str(elapsed))

        return elapsed


class XGBoostLearner(Learner):
    def __init__(self, data, task, metric, use_gpu):
        Learner.__init__(self)
        params = {
            'n_gpus': 1,
            'silent': 0,
            'seed': RANDOM_SEED
        }

        if use_gpu:
            params['tree_method'] = 'gpu_hist'
        else:
            params['tree_method'] = 'hist'

        if task == "regression":
            params["objective"] = "reg:linear"
            if use_gpu:
github catboost / catboost / catboost / benchmarks / training_speed / learners.py View on Github external
def _fit(self, tunable_params):
        params = Learner._fit(self, tunable_params)
        self.model = cat.CatBoost(params)
        self.model.fit(self.train, eval_set=self.test, verbose_eval=True)
github catboost / catboost / catboost / benchmarks / training_speed / learners.py View on Github external
self.default_params = params

    @staticmethod
    def name():
        return 'xgboost'

    def _fit(self, tunable_params):
        params = Learner._fit(self, tunable_params)
        self.learner = xgb.train(params, self.train, tunable_params['iterations'], evals=[(self.test, 'eval')])

    def predict(self, n_tree):
        return self.learner.predict(self.test, ntree_limit=n_tree)


class LightGBMLearner(Learner):
    def __init__(self, data, task, metric, use_gpu):
        Learner.__init__(self)

        params = {
            'task': 'train',
            'boosting_type': 'gbdt',
            'verbose': 0,
            'random_state': RANDOM_SEED,
            'bagging_freq': 1
        }

        if use_gpu:
            params["device"] = "gpu"

        if task == "regression":
            params["objective"] = "regression"
github catboost / catboost / catboost / benchmarks / training_speed / learners.py View on Github external
def __init__(self, data, task, metric, use_gpu):
        Learner.__init__(self)
        params = {
            'n_gpus': 1,
            'silent': 0,
            'seed': RANDOM_SEED
        }

        if use_gpu:
            params['tree_method'] = 'gpu_hist'
        else:
            params['tree_method'] = 'hist'

        if task == "regression":
            params["objective"] = "reg:linear"
            if use_gpu:
                params["objective"] = "gpu:" + params["objective"]
        elif task == "multiclass":