How to use the mlxtend.classifier.StackingClassifier function in mlxtend

To help you get started, we’ve selected a few mlxtend examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ziweipolaris / atec2018-nlp / utils / test_cv_stacking.py View on Github external
sclf = StackingClassifier(classifiers=[clf1, clf2, clf3], 
                            meta_classifier=lr)
    for clf, label in zip([clf1, clf2, clf3, sclf], 
                        ['KNN', 
                        'Random Forest', 
                        'Naive Bayes',
                        'StackingClassifier']):

        scores = model_selection.cross_val_score(clf, X, y, 
                                                cv=3, scoring='accuracy')

    print("Accuracy: %0.2f (+/- %0.2f) [%s]" 
        % (scores.mean(), scores.std(), label))

elif stack == 2:
    sclf = StackingClassifier(classifiers=[clf1, clf2, clf3],
                            use_probas=True,
                            average_probas=False,
                            meta_classifier=lr)
    for clf, label in zip([clf1, clf2, clf3, sclf], 
                        ['KNN', 
                        'Random Forest', 
                        'Naive Bayes',
                        'StackingClassifier']):

        scores = model_selection.cross_val_score(clf, X, y, 
                                                cv=3, scoring='accuracy')
    
    print("Accuracy: %0.2f (+/- %0.2f) [%s]" 
        % (scores.mean(), scores.std(), label))
elif stack == 3:
    sclf = StackingClassifier(classifiers=[clf1, clf2, clf3],
github ziweipolaris / atec2018-nlp / utils / test_cv_stacking.py View on Github external
from sklearn.naive_bayes import GaussianNB 
from sklearn.ensemble import RandomForestClassifier
from mlxtend.classifier import StackingClassifier
from sklearn.model_selection import GridSearchCV
import numpy as np

clf1 = KNeighborsClassifier(n_neighbors=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
lr = LogisticRegression()

print('3-fold cross validation:\n')

stack = 2
if stack == 1:
    sclf = StackingClassifier(classifiers=[clf1, clf2, clf3], 
                            meta_classifier=lr)
    for clf, label in zip([clf1, clf2, clf3, sclf], 
                        ['KNN', 
                        'Random Forest', 
                        'Naive Bayes',
                        'StackingClassifier']):

        scores = model_selection.cross_val_score(clf, X, y, 
                                                cv=3, scoring='accuracy')

    print("Accuracy: %0.2f (+/- %0.2f) [%s]" 
        % (scores.mean(), scores.std(), label))

elif stack == 2:
    sclf = StackingClassifier(classifiers=[clf1, clf2, clf3],
                            use_probas=True,
github ShaoQiBNU / stacking / iris_stack.py View on Github external
################## load data #####################
iris = datasets.load_iris()
x, y = iris.data[:, 1:3], iris.target


################## define classifier #####################
clf1 = KNeighborsClassifier(n_neighbors = 1)

clf2 = RandomForestClassifier(random_state = 1)

clf3 = GaussianNB()

lr = LogisticRegression()

sclf = StackingClassifier(classifiers = [clf1, clf2, clf3], meta_classifier = lr)


################## class result #####################
for clf, label in zip([clf1, clf2, clf3, sclf],
                      ['KNN',
                       'Random Forest',
                       'Naive Bayes',
                       'StackingClassifier']):
    
    scores = model_selection.cross_val_score(clf, x, y, cv = 3, scoring='accuracy')
    
    print("Accuracy: %0.2f (+/- %0.2f) [%s]" 
          % (scores.mean(), scores.std(), label))
    
    
import matplotlib.pyplot as plt
github SimonBlanke / Hyperactive / examples / examples_v1.x.x / use_cases / stacking_example.py View on Github external
def stacking(para, X, y):
    stack_lvl_0 = StackingClassifier(
        classifiers=para["lvl_0"], meta_classifier=para["top"]
    )
    stack_lvl_1 = StackingClassifier(
        classifiers=para["lvl_1"], meta_classifier=stack_lvl_0
    )
    scores = cross_val_score(stack_lvl_1, X, y, cv=3)

    return scores.mean()
github shibing624 / text-classifier / models / classic_model.py View on Github external
from mlxtend.classifier import EnsembleVoteClassifier
        from xgboost import XGBClassifier
        clf1 = LogisticRegression(random_state=0)
        clf2 = XGBClassifier(random_state=0)
        clf3 = SVC(random_state=0, kernel='linear', probability=True)
        clf4 = MLPClassifier(random_state=0)
        model = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3, clf4],
                                       weights=[1, 2, 2, 1], voting='soft', verbose=2)
    elif model_type == 'stack':
        from mlxtend.classifier import StackingClassifier
        from xgboost import XGBClassifier
        clf1 = XGBClassifier(random_state=0)
        clf2 = SVC(random_state=0, kernel='linear', probability=True)
        clf3 = MLPClassifier(random_state=0)
        lr = LogisticRegression()
        model = StackingClassifier(classifiers=[clf1, clf2, clf3],
                                   use_probas=True,
                                   average_probas=False,
                                   meta_classifier=lr)
    else:
        raise ValueError('model type set error.')
    return model
github SimonBlanke / Hyperactive / examples / examples_v1.x.x / use_cases / stacking_example.py View on Github external
def stacking(para, X, y):
    stack_lvl_0 = StackingClassifier(
        classifiers=para["lvl_0"], meta_classifier=para["top"]
    )
    stack_lvl_1 = StackingClassifier(
        classifiers=para["lvl_1"], meta_classifier=stack_lvl_0
    )
    scores = cross_val_score(stack_lvl_1, X, y, cv=3)

    return scores.mean()
github PSNAppz / Machine-Learning-and-Data-Mining-Algorithms / Stacking / stackedClassifier.py View on Github external
from sklearn.linear_model import RidgeClassifier
from sklearn.naive_bayes import GaussianNB

import warnings
warnings.filterwarnings("ignore")

dataset = np.loadtxt('../Dataset/comb.csv', delimiter=",")
# split data into X and y
X = dataset[:,0:np.array(dataset).shape[1] - 1]
y = dataset[:,np.array(dataset).shape[1] - 1]

clf1 = LinearDiscriminantAnalysis()
clf2 = RidgeClassifier()
clf4 = RandomForestClassifier()
clf3 = GaussianNB()
sclf = StackingClassifier(classifiers=[clf1, clf3, clf4], 
                          meta_classifier=clf2)

print('10-fold cross validation:\n')

for clf, label in zip([clf1, clf2, clf4, sclf], 
                      ['LDA', 
                       'Gaussian Naive Bayes', 
                       'Random Forest',
                       'Meta - Ridge Classifier']):
    scores = model_selection.cross_val_score(clf,X,y, cv=10, scoring='accuracy')
    print("Accuracy: %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), label))