How to use the suod.models.jl_projection.jl_transform function in suod

To help you get started, we’ve selected a few suod examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github yzhao062 / SUOD / suod / models / parallel_processes.py View on Github external
approx_flags
    approximator
    verbose

    Returns
    -------

    """
    X = check_array(X)
    # Build estimators
    approximators = []

    # TODO: approximators can be different
    for i in range(n_estimators):
        # project matrix
        X_scaled = jl_transform(X, rp_transformers[i])

        estimator = clfs[i]

        check_is_fitted(estimator, ['decision_scores_'])
        if verbose > 1:
            print("Building estimator %d of %d for this parallel run "
                  "(total %d)..." % (i + 1, n_estimators, total_n_estimators))

        if approx_flags[i] == 1:
            # operate on the reduce space
            pseudo_scores = estimator.decision_scores_
            # pseudo_scores = estimator.decision_function(X)
            # use the same type of approximator for all models
            base_approximater = clone(approximator)
            base_approximater.fit(X_scaled, pseudo_scores)
github yzhao062 / SUOD / suod / models / parallel_processes.py View on Github external
def _parallel_predict_proba(n_estimators, clfs, approximators, X,
                            total_n_estimators, rp_transformers,
                            approx_flags, verbose):
    X = check_array(X)

    pred = []
    for i in range(n_estimators):
        estimator = clfs[i]
        if verbose > 1:
            print("predicting with estimator %d of %d for this parallel run "
                  "(total %d)..." % (i + 1, n_estimators, total_n_estimators))

        # project matrix
        X_scaled = jl_transform(X, rp_transformers[i])

        # turn approximator scores to labels by outlier
        if approx_flags[i] == 1:
            raw_scores = approximators[i].predict(X_scaled)
            predicted_scores = raw_score_to_proba(estimator.decision_scores_,
                                                  raw_scores)

        else:
            predicted_scores = estimator.predict_proba(X_scaled)

        pred.append(predicted_scores[:, 1])
        # pred.append(predicted_scores)

    return pred
github yzhao062 / SUOD / suod / models / parallel_processes.py View on Github external
def _parallel_predict(n_estimators, clfs, approximators, X, total_n_estimators,
                      rp_transformers, approx_flags, contamination, verbose):
    X = check_array(X)

    pred = []
    for i in range(n_estimators):
        estimator = clfs[i]
        if verbose > 1:
            print("predicting with estimator %d of %d for this parallel run "
                  "(total %d)..." % (i + 1, n_estimators, total_n_estimators))

        # project matrix
        X_scaled = jl_transform(X, rp_transformers[i])

        # turn approximator scores to labels by outlier
        if approx_flags[i] == 1:
            predicted_labels = score_to_label(
                approximators[i].predict(X_scaled),
                outliers_fraction=contamination)

        else:
            predicted_labels = estimator.predict(X_scaled)

        pred.append(predicted_labels)

    return pred
github yzhao062 / SUOD / suod / models / parallel_processes.py View on Github external
def _parallel_decision_function(n_estimators, clfs, approximators, X,
                                total_n_estimators, rp_transformers,
                                approx_flags, verbose):
    X = check_array(X)

    pred = []
    for i in range(n_estimators):
        estimator = clfs[i]
        if verbose > 1:
            print("predicting with estimator %d of %d for this parallel run "
                  "(total %d)..." % (i + 1, n_estimators, total_n_estimators))

        # project matrix
        X_scaled = jl_transform(X, rp_transformers[i])

        # turn approximator scores to labels by outlier
        if approx_flags[i] == 1:
            predicted_scores = approximators[i].predict(X_scaled)
        else:
            predicted_scores = estimator.decision_function(X_scaled)

        pred.append(predicted_scores)

    return pred