How to use the sklearn.metrics.roc_auc_score function in sklearn

To help you get started, we’ve selected a few sklearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github slinderman / pyhawkes / experiments / synthetic_comparison.py View on Github external
aucs['bfgs'] = roc_auc_score(A_true,
                                     bfgs_model.W.ravel())

    if sgd_model is not None:
        assert isinstance(sgd_model, DiscreteTimeStandardHawkesModel)
        aucs['sgd'] = roc_auc_score(A_true,
                                     sgd_model.W.ravel())

    if gibbs_samples is not None:
        # Compute ROC based on mean value of W_effective in second half of samples
        Weff_samples = np.array([s.weight_model.W_effective for s in gibbs_samples])
        N_samples    = Weff_samples.shape[0]
        offset       = N_samples // 2
        Weff_mean    = Weff_samples[offset:,:,:].mean(axis=0)

        aucs['gibbs'] = roc_auc_score(A_true, Weff_mean.ravel())

    if gibbs_ss_samples is not None:
        # Compute ROC based on mean value of W_effective in second half of samples
        Weff_samples = np.array([s.weight_model.W_effective for s in gibbs_ss_samples])
        N_samples    = Weff_samples.shape[0]
        offset       = N_samples // 2
        Weff_mean    = Weff_samples[offset:,:,:].mean(axis=0)

        aucs['gibbs_ss'] = roc_auc_score(A_true, Weff_mean.ravel())

    if vb_models is not None:
        # Compute ROC based on E[A] under variational posterior
        aucs['vb'] = roc_auc_score(A_true,
                                   vb_models[-1].weight_model.expected_A().ravel())

    if svi_models is not None:
github lyst / lightfm / tests / test_evaluation.py View on Github external
for user_id, row in enumerate(ground_truth):
        uid_array = np.empty(no_items, dtype=np.int32)
        uid_array.fill(user_id)
        predictions = model.predict(uid_array, pid_array,
                                    user_features=user_features,
                                    item_features=item_features,
                                    num_threads=4)

        true_pids = row.indices[row.data == 1]

        grnd = np.zeros(no_items, dtype=np.int32)
        grnd[true_pids] = 1

        if len(true_pids):
            scores.append(roc_auc_score(grnd, predictions))

    return scores
github Lapis-Hong / FM / faFM.py View on Github external
print('auc:', roc_auc_score(test_y, y_pred_proba))

fm2 = sgd.FMClassification(n_iter=100, init_stdev=0.1, rank=8, random_state=123, l2_reg_w=0, l2_reg_V=0, l2_reg=None, step_size=0.1)
fm2.fit(train_X, train_y)
y_pred = fm2.predict(test_X)
y_pred_proba = fm2.predict_proba(test_X)

print('acc:', accuracy_score(test_y, y_pred))
print('auc:', roc_auc_score(test_y, y_pred_proba))


fm3 = mcmc.FMClassification(n_iter=100, init_stdev=0.1, rank=8, random_state=123, copy_X=True)
y_pred = fm3.fit_predict(train_X, train_y, test_X)
y_pred_proba = fm3.fit_predict_proba(train_X, train_y, test_X)
print('acc:', accuracy_score(test_y, y_pred))
print('auc:', roc_auc_score(test_y, y_pred_proba))
github lytforgood / MachineLearningTrick / Xgboost_Feature.py View on Github external
n_estimators=self.n_estimators,
                 max_depth=self.max_depth,
                 min_child_weight=self.min_child_weight,
                 gamma=self.gamma,
                 subsample=self.subsample,
                 colsample_bytree=self.colsample_bytree,
                 objective= self.objective,
                 nthread=self.nthread,
                 scale_pos_weight=self.scale_pos_weight,
                 reg_alpha=self.reg_alpha,
                 reg_lambda=self.reg_lambda,
                 seed=self.seed)
          clf.fit(X_train, y_train)
          y_pre= clf.predict(X_test)
          y_pro= clf.predict_proba(X_test)[:,1]
          print "pred_leaf=T  AUC Score : %f" % metrics.roc_auc_score(y_test, y_pro)
          print"pred_leaf=T  Accuracy : %.4g" % metrics.accuracy_score(y_test, y_pre)
          new_feature= clf.apply(X_train)
          X_train_new=self.mergeToOne(X_train,new_feature)
          new_feature_test= clf.apply(X_test)
          X_test_new=self.mergeToOne(X_test,new_feature_test)
          print "Training set sample number remains the same"
          return X_train_new,y_train,X_test_new,y_test
github nirdizati-research / predict-python / core / binary_classification.py View on Github external
def calculate_auc(actual, scores, auc: int):
    if scores.shape[1] == 1:
        auc += 0
    else:
        try:
            auc += metrics.roc_auc_score(actual, scores[:, 1])
        except Exception:
            pass
    return auc
github ben519 / MLPB / Problems / Rank Sales Leads / rank_leads_logreg.py View on Github external
#--------------------------------------------------
# Rank the predictions from most likely to least likely

test.sort_values('ProbSale', inplace=True, ascending=False)
test['ProbSaleRk'] = np.arange(test.shape[0])

#--------------------------------------------------
# Take a look

test[['ProbSaleRk', 'CompanyName', 'ProbSale', 'Sale']]  # Looks perty good!

#--------------------------------------------------
# Evaluate the results using area under the ROC curve

roc_auc_score(y_true=test.Sale, y_score=test.ProbSale)  # 1
github ZhaoJ9014 / High-Performance-Face-Recognition / src / ResNet / CASIA_WEB_FACE.PyTorch / train.py View on Github external
# Eval metrics
        scores = -feat_dist
        gt = np.asarray(issame_list)

        # 10 fold
        fold_size = 600  # 600 pairs in each fold
        roc_auc = np.zeros(10)
        roc_eer = np.zeros(10)

        for i in tqdm.tqdm(range(10)):
            start = i * fold_size
            end = (i + 1) * fold_size
            scores_fold = scores[start:end]
            gt_fold = gt[start:end]
            roc_auc[i] = sklearn.metrics.roc_auc_score(gt_fold, scores_fold)
            fpr, tpr, _ = sklearn.metrics.roc_curve(gt_fold, scores_fold)

            # EER calc: https://yangcha.github.io/EER-ROC/
            roc_eer[i] = brentq(
                lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)

        print(('LFW VAL AUC: %0.4f +/- %0.4f, LFW VAL EER: %0.4f +/- %0.4f') % (np.mean(roc_auc), np.std(roc_auc), np.mean(roc_eer), np.std(roc_eer)))
        epoch_val_roc_auc = np.mean(roc_auc)
        epoch_val_roc_eer = np.mean(roc_eer)

        if epoch_val_roc_auc > best_roc_auc:
            best_roc_auc = epoch_val_roc_auc
            best_model_wts = copy.deepcopy(model.state_dict())

            if FLAG_CENTER:
                # Save checkpoint
github Lapis-Hong / FM / train.py View on Github external
def metrics(model_name):
    y_pred = model_name.predictions
    y_true = get_target(os.path.join(DATA_DIR, TEST))
    auc = roc_auc_score(y_true, y_pred)
    logloss = log_loss(y_true, y_pred)
    print("auc:{0:.6f} logloss:{1:.6f}\n".format(auc, logloss))
    return auc
github NVIDIA / gbm-bench / new_metrics.py View on Github external
def classification_metrics(y_true, y_pred):
    metrics = {
        "Accuracy":  accuracy_score,
        "Precision": precision_score,
        "Recall":    recall_score,
        "AUC":       roc_auc_score,
        "F1":        f1_score,
    }
    return evaluate_metrics(y_true, y_pred, metrics)
github Ashton-Sidhu / aethos / aethos / modelling / model_analysis.py View on Github external
Returns
        -------
        float
            ROC AUC Score

        Examples
        --------
        >>> m = model.LogisticRegression()
        >>> m.roc_auc()
        """

        multi_class = kwargs.pop("multi_class", "ovr")

        if self.multiclass:
            roc_auc = sklearn.metrics.roc_auc_score(
                self.y_test, self.probabilities, multi_class=multi_class, **kwargs
            )
        else:
            roc_auc = sklearn.metrics.roc_auc_score(self.y_test, self.y_pred, **kwargs)

        return roc_auc