How to use the suod.models.parallel_processes._parallel_decision_function function in suod

To help you get started, we’ve selected a few suod examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github yzhao062 / SUOD / examples / temp_do_not_use_work_w_minist.py View on Github external
# print('Orig Predict time p:', time.time() - start)
    # print()

    # # unfold and generate the label matrix
    # predicted_labels_orig = np.zeros([X.shape[0], n_estimators])
    # for i in range(n_jobs):
    #     predicted_labels_orig[:, starts[i]:starts[i + 1]] = np.asarray(
    #         all_results_pred_p[i]).T

    ##########################################################################

    start = time.time()
    # model prediction
    all_results_scores = Parallel(n_jobs=n_jobs, max_nbytes=None,
                                  verbose=True)(
        delayed(_parallel_decision_function)(
            n_estimators_list[i],
            trained_estimators[starts[i]:starts[i + 1]],
            None,
            X,
            n_estimators,
            rp_flags[starts[i]:starts[i + 1]],
            None,
            approx_flags[starts[i]:starts[i + 1]],
            verbose=True)
        for i in range(n_jobs))

    print('Orig decision_function time:', time.time() - start)
    print()

    # unfold and generate the label matrix
    predicted_scores_orig = np.zeros([X.shape[0], n_estimators])
github yzhao062 / SUOD / examples / temp_do_not_use.py View on Github external
print()

    # unfold and generate the label matrix
    predicted_labels_orig = np.zeros([X_test.shape[0], n_estimators])
    for i in range(n_jobs):
        predicted_labels_orig[:, starts[i]:starts[i + 1]] = np.asarray(
            all_results_pred[i]).T

    start = time.time()
    n_estimators = len(base_estimators)
    n_estimators_list, starts, n_jobs = _partition_estimators(n_estimators,
                                                              n_jobs)
    # model prediction
    all_results_scores = Parallel(n_jobs=n_jobs, max_nbytes=None,
                                  verbose=True)(
        delayed(_parallel_decision_function)(
            n_estimators_list[i],
            trained_estimators[starts[i]:starts[i + 1]],
            None,
            X_test,
            n_estimators,
            # rp_flags[starts[i]:starts[i + 1]],
            jl_transformers,
            approx_flags[starts[i]:starts[i + 1]],
            verbose=True)
        for i in range(n_jobs))

    print('Orig decision_function time:', time.time() - start)
    print()

    # unfold and generate the label matrix
    predicted_scores_orig = np.zeros([X_test.shape[0], n_estimators])
github yzhao062 / SUOD / examples / demo_full.py View on Github external
print()

    # unfold and generate the label matrix
    predicted_labels_orig = np.zeros([X_test.shape[0], n_estimators])
    for i in range(n_jobs):
        predicted_labels_orig[:, starts[i]:starts[i + 1]] = np.asarray(
            all_results_pred[i]).T

    start = time.time()
    n_estimators = len(base_estimators)
    n_estimators_list, starts, n_jobs = _partition_estimators(n_estimators,
                                                              n_jobs)
    # model prediction
    all_results_scores = Parallel(n_jobs=n_jobs, max_nbytes=None,
                                  verbose=True)(
        delayed(_parallel_decision_function)(
            n_estimators_list[i],
            trained_estimators[starts[i]:starts[i + 1]],
            None,
            X_test,
            n_estimators,
            jl_transformers,
            approx_flags[starts[i]:starts[i + 1]],
            verbose=True)
        for i in range(n_jobs))

    print('Orig decision_function time:', time.time() - start)
    print()

    # unfold and generate the label matrix
    predicted_scores_orig = np.zeros([X_test.shape[0], n_estimators])
    for i in range(n_jobs):
github yzhao062 / SUOD / suod / models / base.py View on Github external
else:
            # use simple equal split by sklearn
            n_estimators_list, starts, n_jobs = _partition_estimators(
                self.n_estimators, self.n_jobs)

        # fit the base models
        if self.verbose:
            print('Parallel score prediction...')
            start = time.time()

        # TODO: code cleanup. There is an existing bug for joblib on Windows:
        # https://github.com/joblib/joblib/issues/806
        # max_nbytes can be dropped on other OS
        all_results_scores = Parallel(n_jobs=n_jobs, max_nbytes=None,
                                      verbose=True)(
            delayed(_parallel_decision_function)(
                n_estimators_list[i],
                self.base_estimators[starts[i]:starts[i + 1]],
                self.approximators[starts[i]:starts[i + 1]],
                X,
                self.n_estimators,
                # self.rp_flags[starts[i]:starts[i + 1]],
                self.jl_transformers_[starts[i]:starts[i + 1]],
                self.approx_flags[starts[i]:starts[i + 1]],
                verbose=True)
            for i in range(n_jobs))

        # fit the base models
        if self.verbose:
            print('Parallel Score Prediction without Approximators '
                  'Total Time:', time.time() - start)