Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# print('Orig Predict time p:', time.time() - start)
# print()
# # unfold and generate the label matrix
# predicted_labels_orig = np.zeros([X.shape[0], n_estimators])
# for i in range(n_jobs):
# predicted_labels_orig[:, starts[i]:starts[i + 1]] = np.asarray(
# all_results_pred_p[i]).T
##########################################################################
start = time.time()
# model prediction
all_results_scores = Parallel(n_jobs=n_jobs, max_nbytes=None,
verbose=True)(
delayed(_parallel_decision_function)(
n_estimators_list[i],
trained_estimators[starts[i]:starts[i + 1]],
None,
X,
n_estimators,
rp_flags[starts[i]:starts[i + 1]],
None,
approx_flags[starts[i]:starts[i + 1]],
verbose=True)
for i in range(n_jobs))
print('Orig decision_function time:', time.time() - start)
print()
# unfold and generate the label matrix
predicted_scores_orig = np.zeros([X.shape[0], n_estimators])
print()
# unfold and generate the label matrix
predicted_labels_orig = np.zeros([X_test.shape[0], n_estimators])
for i in range(n_jobs):
predicted_labels_orig[:, starts[i]:starts[i + 1]] = np.asarray(
all_results_pred[i]).T
start = time.time()
n_estimators = len(base_estimators)
n_estimators_list, starts, n_jobs = _partition_estimators(n_estimators,
n_jobs)
# model prediction
all_results_scores = Parallel(n_jobs=n_jobs, max_nbytes=None,
verbose=True)(
delayed(_parallel_decision_function)(
n_estimators_list[i],
trained_estimators[starts[i]:starts[i + 1]],
None,
X_test,
n_estimators,
# rp_flags[starts[i]:starts[i + 1]],
jl_transformers,
approx_flags[starts[i]:starts[i + 1]],
verbose=True)
for i in range(n_jobs))
print('Orig decision_function time:', time.time() - start)
print()
# unfold and generate the label matrix
predicted_scores_orig = np.zeros([X_test.shape[0], n_estimators])
print()
# unfold and generate the label matrix
predicted_labels_orig = np.zeros([X_test.shape[0], n_estimators])
for i in range(n_jobs):
predicted_labels_orig[:, starts[i]:starts[i + 1]] = np.asarray(
all_results_pred[i]).T
start = time.time()
n_estimators = len(base_estimators)
n_estimators_list, starts, n_jobs = _partition_estimators(n_estimators,
n_jobs)
# model prediction
all_results_scores = Parallel(n_jobs=n_jobs, max_nbytes=None,
verbose=True)(
delayed(_parallel_decision_function)(
n_estimators_list[i],
trained_estimators[starts[i]:starts[i + 1]],
None,
X_test,
n_estimators,
jl_transformers,
approx_flags[starts[i]:starts[i + 1]],
verbose=True)
for i in range(n_jobs))
print('Orig decision_function time:', time.time() - start)
print()
# unfold and generate the label matrix
predicted_scores_orig = np.zeros([X_test.shape[0], n_estimators])
for i in range(n_jobs):
else:
# use simple equal split by sklearn
n_estimators_list, starts, n_jobs = _partition_estimators(
self.n_estimators, self.n_jobs)
# fit the base models
if self.verbose:
print('Parallel score prediction...')
start = time.time()
# TODO: code cleanup. There is an existing bug for joblib on Windows:
# https://github.com/joblib/joblib/issues/806
# max_nbytes can be dropped on other OS
all_results_scores = Parallel(n_jobs=n_jobs, max_nbytes=None,
verbose=True)(
delayed(_parallel_decision_function)(
n_estimators_list[i],
self.base_estimators[starts[i]:starts[i + 1]],
self.approximators[starts[i]:starts[i + 1]],
X,
self.n_estimators,
# self.rp_flags[starts[i]:starts[i + 1]],
self.jl_transformers_[starts[i]:starts[i + 1]],
self.approx_flags[starts[i]:starts[i + 1]],
verbose=True)
for i in range(n_jobs))
# fit the base models
if self.verbose:
print('Parallel Score Prediction without Approximators '
'Total Time:', time.time() - start)