How to use the sklearn.metrics.accuracy_score function in sklearn

To help you get started, we’ve selected a few sklearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github GauravBh1010tt / DeepLearn / corrnet / DeepLearn_cornet.py View on Github external
def svm_classifier(train_x, train_y, valid_x, valid_y, test_x, test_y):
    
    clf = svm.LinearSVC()
    #print train_x.shape,train_y.shape
    clf.fit(train_x,train_y)
    pred = clf.predict(valid_x)
    va = accuracy_score(np.ravel(valid_y),np.ravel(pred))
    pred = clf.predict(test_x)
    ta = accuracy_score(np.ravel(test_y),np.ravel(pred))
    return va, ta
github zhouyanasd / DL-NC / Brian2_scripts / sim_brian_paper / sim_brian_paper_SNAS / src / core.py View on Github external
def readout_sk(self, X_train, X_validation, X_test, y_train, y_validation, y_test, **kwargs):
        lr = LogisticRegression(**kwargs)
        lr.fit(X_train.T, y_train.T)
        y_train_predictions = lr.predict(X_train.T)
        y_validation_predictions = lr.predict(X_validation.T)
        y_test_predictions = lr.predict(X_test.T)
        return accuracy_score(y_train_predictions, y_train.T), \
               accuracy_score(y_validation_predictions, y_validation.T), \
               accuracy_score(y_test_predictions, y_test.T)
github google / makerfaire-booth / 2018 / burger / model / verygoodmodel.py View on Github external
all_train = X_train.join(y_train)
    pos_train = all_train[all_train.output == True]
    neg_train = all_train[all_train.output == False]
    train = pos_train.append(neg_train.sample(len(pos_train)*1000))
    train_X = train.drop(['output'], axis=1)
    train_y = train['output']
    train_X_categoricals = train_X[column_names]
    ttrain_X_categoricals = enc.fit_transform(
        train_X_categoricals)
    clf.fit(ttrain_X_categoricals,
            train.output)
    classes = numpy.unique(y_train)
    X_test_categoricals = X_test[column_names]
    tX_test_categoricals = enc.fit_transform(X_test_categoricals)
    prediction = clf.predict(tX_test_categoricals)
    accuracy = accuracy_score(y_test, prediction)
    cf = confusion_matrix(y_test, prediction)
    tp, fp, tn, fn = cf[1][1], cf[0][1], cf[0][0], cf[1][0]
    p, r, f1, s = precision_recall_fscore_support(y_test, prediction)
    print(tp,fp,tn,fn)
    pickle.dump(clf, open("../data/trained.pkl", "wb"))
github ysig / GraKeL / examples / plot_pipeline_odd_sth.py View on Github external
from grakel import GraphKernel

# Loads the Mutag dataset from:
# https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets
# the biggest collection of benchmark datasets for graph_kernels.
mutag = datasets.fetch_dataset("MUTAG", verbose=False)
G, y = mutag.data, mutag.target
C_grid = (10. ** np.arange(1,10,1) / len(G)).tolist()
n_folds = 10

estimator = make_pipeline(
    GraphKernel(kernel=dict(name="odd_sth"), normalize=True),
    GridSearchCV(svm.SVC(kernel='precomputed'), dict(C=C_grid),
                 scoring='accuracy'))

acc = accuracy_score(y, cross_val_predict(estimator, G, y, cv=n_folds))
print("Accuracy:", str(round(acc*100, 2)) + "%")
github OanaIgnat / vlog_action_recognition / amt / read_csv_results.py View on Github external
spammers_low_GT_acc[hit].add(worker)
                        values_GT[hit] = per_hit_val_rater['GT']

            per_hit_val_rater[0] = [value1]
            per_hit_val_rater[1] = [value2]
            per_hit_val_rater[2] = [value3]
            per_hit_val_rater['GT'] = [value4]

            hit = hit_nb

    # compute for the last HIT
    for worker in range(0, 3):
        if worker in potential_spammers[hit_nb]:
            continue
        else:
            accuracy_with_GT = accuracy_score(per_hit_val_rater[worker], per_hit_val_rater['GT'])
            if accuracy_with_GT < 0.2:
                values_GT[hit_nb] = per_hit_val_rater['GT']
                potential_spammers[hit_nb].add(worker)
                spammers_low_GT_acc[hit_nb].add(worker)

    list_keys = dict()
    with open(csv_file_name, 'r') as csvinput:
        for row in csv.reader(csvinput):
            key = row[0]
            if key == 'HITId':
                continue
            if key not in list_keys.keys():
                list_keys[key] = 0
            list_keys[key] += 1

    csvinput.close()
github delira-dev / delira / delira / training / metrics.py View on Github external
def __init__(self, gt_logits=False, pred_logits=True, **kwargs):
        super().__init__(accuracy_score, gt_logits, pred_logits, **kwargs)
github pylablanche / gcForest / GCForest.py View on Github external
def _cascade_evaluation(self, X_test, y_test):
        """ Evaluate the accuracy of the cascade using X and y.

        :param X_test: np.array
            Array containing the test input samples.
            Must be of the same shape as training data.

        :param y_test: np.array
            Test target values.

        :return: float
            the cascade accuracy.
        """
        casc_pred_prob = np.mean(self.cascade_forest(X_test), axis=0)
        casc_pred = np.argmax(casc_pred_prob, axis=1)
        casc_accuracy = accuracy_score(y_true=y_test, y_pred=casc_pred)
        print('Layer validation accuracy = {}'.format(casc_accuracy))

        return casc_accuracy
github neptune-ml / open-solution-mapping-challenge / src / steps / pytorch / validation.py View on Github external
def torch_acc_score(output, target):
    output = output.data.cpu().numpy()
    y_true = target.numpy()
    y_pred = output.argmax(axis=1)
    return accuracy_score(y_true, y_pred)
github fukatani / stacked_generalization / stacked_generalization / lib / joblibed.py View on Github external
def score(self, X, y, index=None, sample_weight=None):
        from sklearn.metrics import accuracy_score
        return accuracy_score(y,
                              self.predict(X, index),
                              sample_weight=sample_weight)