How to use the mlxtend.evaluate.permutation_test function in mlxtend

To help you get started, we’ve selected a few mlxtend examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github EricSchles / drifter_ml / drifter_ml / columnar_tests / columnar_tests.py View on Github external
def spearman_similar_correlation(self, column,
                                      correlation_lower_bound,
                                      pvalue_threshold=0.05,
                                      num_rounds=3):
        correlation_info = stats.spearmanr(self.new_data[column],
                                           self.historical_data[column])
        p_value = permutation_test(
            self.new_data[column],
            self.historical_data[column],
            method="approximate",
            num_rounds=num_rounds,
            func=lambda x, y: stats.spearmanr(x, y).correlation,
            seed=0)
        if p_value > pvalue_threshold:
            return False
        if correlation_info.correlation < correlation_lower_bound:
            return False
        return True
github EricSchles / drifter_ml / drifter_ml / columnar_tests / columnar_tests.py View on Github external
def mann_whitney_u_similar_distribution(self, column,
                                            pvalue_threshold=0.05,
                                            num_rounds=3):
        p_value = permutation_test(
            self.new_data[column],
            self.historical_data[column],
            method="approximate",
            num_rounds=num_rounds,
            func=lambda x, y: stats.mannwhitneyu(x, y).statistic,
            seed=0)

        if p_value < pvalue_threshold:
            return False
        return True
github EricSchles / drifter_ml / drifter_ml / columnar_tests / columnar_tests.py View on Github external
def kruskal_similar_distribution(self, column,
                                      pvalue_threshold=0.05,
                                      num_rounds=3):
        p_value = permutation_test(
            self.new_data[column],
            self.historical_data[column],
            method="approximate",
            num_rounds=num_rounds,
            func=lambda x, y: stats.kruskal(x, y).statistic,
            seed=0)
        if p_value < pvalue_threshold:
            return False
        return True
github ResponsiblyAI / responsibly / ethically / we / weat.py View on Github external
def _calc_weat_pvalue(first_associations, second_associations,
                      method='approximate'):

    if method not in PVALUE_METHODS:
        raise ValueError('method should be one of {}, {} was given'.format(
            PVALUE_METHODS, method))

    pvalue = permutation_test(first_associations, second_associations,
                              func='x_mean > y_mean',
                              method=method,
                              seed=RANDOM_STATE)  # if exact - no meaning
    return pvalue
github ResponsiblyAI / responsibly / responsibly / we / weat.py View on Github external
def _calc_weat_pvalue(first_associations, second_associations,
                      method=PVALUE_DEFUALT_METHOD):

    if method not in PVALUE_METHODS:
        raise ValueError('method should be one of {}, {} was given'.format(
            PVALUE_METHODS, method))

    pvalue = permutation_test(first_associations, second_associations,
                              func=lambda x, y: sum(x) - sum(y),
                              method=method,
                              seed=RANDOM_STATE)  # if exact - no meaning
    return pvalue