How to use the scikit-learn.sklearn.linear_model.base.make_dataset function in scikit-learn

To help you get started, we’ve selected a few scikit-learn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github angadgill / Parallel-SGD / scikit-learn / sklearn / linear_model / stochastic_gradient.py View on Github external
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
               pos_weight, neg_weight, sample_weight):
    """Fit a single binary classifier.

    The i'th class is considered the "positive" class.
    """
    # if average is not true, average_coef, and average_intercept will be
    # unused
    y_i, coef, intercept, average_coef, average_intercept = \
        _prepare_fit_binary(est, y, i)
    assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
    dataset, intercept_decay = make_dataset(X, y_i, sample_weight)

    penalty_type = est._get_penalty_type(est.penalty)
    learning_rate_type = est._get_learning_rate_type(learning_rate)

    # XXX should have random_state_!
    random_state = check_random_state(est.random_state)
    # numpy mtrand expects a C long which is a signed 32 bit integer under
    # Windows
    seed = random_state.randint(0, np.iinfo(np.int32).max)

    if not est.average:
        return plain_sgd(coef, intercept, est.loss_function,
                         penalty_type, alpha, C, est.l1_ratio,
                         dataset, n_iter, int(est.fit_intercept),
                         int(est.verbose), int(est.shuffle), seed,
                         pos_weight, neg_weight,
github angadgill / Parallel-SGD / scikit-learn / sklearn / linear_model / stochastic_gradient.py View on Github external
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
                       sample_weight, n_iter):
        dataset, intercept_decay = make_dataset(X, y, sample_weight)

        loss_function = self._get_loss_function(loss)
        penalty_type = self._get_penalty_type(self.penalty)
        learning_rate_type = self._get_learning_rate_type(learning_rate)

        if self.t_ is None:
            self.t_ = 1.0

        random_state = check_random_state(self.random_state)
        # numpy mtrand expects a C long which is a signed 32 bit integer under
        # Windows
        seed = random_state.randint(0, np.iinfo(np.int32).max)

        if self.average > 0:
            self.standard_coef_, self.standard_intercept_, \
                self.average_coef_, self.average_intercept_ =\