How to use the joblib.Parallel function in joblib

To help you get started, we’ve selected a few joblib examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Neuroinflab / kCSD-python / tests / test_kCSD3D.py View on Github external
def calculate_potential_3D_parallel(true_csd, ele_xx, ele_yy, ele_zz, 
                                    csd_x, csd_y, csd_z):
    """
    For Mihav's implementation to compute the LFP generated
    """

    xlin = csd_x[:,0,0]
    ylin = csd_y[0,:,0]
    zlin = csd_z[0,0,:]
    xlims = [xlin[0], xlin[-1]]
    ylims = [ylin[0], ylin[-1]]
    zlims = [zlin[0], zlin[-1]]
    sigma = 1.0
    #tic = time.time()
    pots = Parallel(n_jobs=num_cores)(delayed(integrate_3D)(ele_xx[ii],ele_yy[ii],ele_zz[ii],
                                                            xlims, ylims, zlims, true_csd,
                                                            xlin, ylin, zlin,
                                                            csd_x, csd_y, csd_z) for ii in range(len(ele_xx)))
    pots = np.array(pots)
    pots /= 4*np.pi*sigma
    #toc = time.time() - tic
    #print toc, 'Total time taken - parallel, sims '
    return pots
1
github arthurmensch / cogspaces / exps / analyse / extract_dl.py View on Github external
def compute_all_decomposition(output_dir, n_jobs=1):
    seeds = pd.read_pickle(join(output_dir, 'seeds.pkl'))
    seeds = seeds['seed'].unique()

    decompositions = ['dl_rest_positive']
    alphas = [1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7]

    for decomposition in decompositions:
        if decomposition == 'pca':
            components_list = Parallel(n_jobs=n_jobs, verbose=10)(
                delayed(compute_pca)(output_dir, seed)
                for seed in seeds)
        elif decomposition == 'dl_rest':
            components_list = Parallel(n_jobs=n_jobs, verbose=10)(
                delayed(compute_sparse_components)
                (output_dir, seed,
                 symmetric_init=False,
                 alpha=alpha,
                 init='rest')
                for seed in seeds
                for alpha in alphas)
        elif decomposition == 'dl_rest_positive':
            components_list = Parallel(n_jobs=n_jobs, verbose=10)(
                delayed(compute_sparse_components)
                (output_dir, seed,
                 symmetric_init=False,
                 positive=True,
                 alpha=alpha,
                 init='rest')
                for seed in seeds
github sergeyk / vislab / vislab / datasets / pascal.py View on Github external
def load_annotation_files(filenames, num_workers=1):
    t = time.time()
    if num_workers > 1:
        results = joblib.Parallel(n_jobs=num_workers)(
            joblib.delayed(_load_pascal_annotation)(fname)
            for fname in filenames
        )
    else:
        results = [_load_pascal_annotation(fname) for fname in filenames]
    images, objects_dfs = zip(*results)
    images_df = pd.DataFrame(list(images))
    objects_df = pd.concat(objects_dfs)
    print('load_annotation_files: finished in {:.3f} s'.format(
        time.time() - t))
    return images_df, objects_df
github scikit-learn / scikit-learn / sklearn / compose / _column_transformer.py View on Github external
def _fit_transform(self, X, y, func, fitted=False):
        """
        Private function to fit and/or transform on demand.

        Return value (transformers and/or transformed X data) depends
        on the passed function.
        ``fitted=True`` ensures the fitted transformers are used.
        """
        transformers = list(
            self._iter(fitted=fitted, replace_strings=True))
        try:
            return Parallel(n_jobs=self.n_jobs)(
                delayed(func)(
                    transformer=clone(trans) if not fitted else trans,
                    X=_safe_indexing(X, column, axis=1),
                    y=y,
                    weight=weight,
                    message_clsname='ColumnTransformer',
                    message=self._log_message(name, idx, len(transformers)))
                for idx, (name, trans, column, weight) in enumerate(
                        self._iter(fitted=fitted, replace_strings=True), 1))
        except ValueError as e:
            if "Expected 2D array, got 1D array instead" in str(e):
                raise ValueError(_ERR_MSG_1DCOLUMN)
            else:
                raise
github AppleHolic / pytorch_sound / pytorch_sound / scripts / preprocess.py View on Github external
def preprocess_audio(in_dir: str, out_dir: str, sample_rate: int = 22050):
        """
        Preprocess audios given base directory and target directory with multi thread function.
        :param in_dir: base directory of data files
        :param out_dir: target directory
        :param sample_rate: target audio sample rate
        """
        in_wav_list, out_wav_list = __class__.__get_wave_file_list(in_dir, out_dir)

        # do multi process
        Parallel(n_jobs=__class__.num_workers)(
            delayed(process_all)
            (*args, sample_rate) for args in tqdm(zip(in_wav_list, out_wav_list))
        )
github KrishnaswamyLab / scprep / scprep / stats.py View on Github external
gene_names = np.arange(X.shape[1])
    X = utils.to_array_or_spmatrix(X)
    Y = utils.to_array_or_spmatrix(Y)
    # inconsistent behaviour from csr and csc
    if sparse.issparse(X):
        X = X.tocsr()
    if sparse.issparse(Y):
        Y = Y.tocsr()
    if measure == "difference":
        difference = mean_difference(X, Y)
    if measure == "ttest":
        difference = t_statistic(X, Y)
    if measure == "ranksum":
        difference = rank_sum_statistic(X, Y)
    elif measure == "emd":
        difference = joblib.Parallel(n_jobs)(
            joblib.delayed(EMD)(
                select.select_cols(X, idx=i), select.select_cols(Y, idx=i)
            )
            for i in range(X.shape[1])
        )
        difference = np.array(difference) * np.sign(mean_difference(X, Y))
    result = pd.DataFrame({measure: difference}, index=gene_names)
    if direction == "up":
        if measure == "ranksum":
            result = result.sort_index().sort_values([measure], ascending=True)
        else:
            result = result.sort_index().sort_values([measure], ascending=False)
    elif direction == "down":
        if measure == "ranksum":
            result = result.sort_index().sort_values([measure], ascending=False)
        else:
github AlpacaDB / backlight / src / backlight / portfolio / portfolio.py View on Github external
assert len(principal) == len(trades)
    assert len(lot_size) == len(trades)

    symbols2mkt = {m.symbol: m for m in mkt}
    symbols = [t.symbol for t in trades]
    assert set(symbols) == set(symbols2mkt.keys())

    # Transform trades following the lot_size
    mult_trades = []
    for (trade, lot) in zip(trades, lot_size):
        mult_trade = trade.copy()
        mult_trade["amount"] *= lot
        mult_trades.append(mult_trade)

    # Construct positions and return Portfolio
    positions = Parallel(n_jobs=-1, max_nbytes=None)(
        [
            delayed(calc_positions)(
                trade, symbols2mkt[trade.symbol], principal=principal_per_asset
            )
            for (trade, principal_per_asset) in zip(mult_trades, principal)
        ]
    )

    symbols = [p.symbol for p in positions]
    if len(set(symbols)) != len(symbols):
        positions = _fusion_positions(positions)

    return Portfolio(positions)
github david-cortes / contextualbandits / contextualbandits / utils.py View on Github external
def predict_proba_raw(self,X):
        preds = np.zeros((X.shape[0], self.n))
        Parallel(n_jobs=self.njobs, verbose=0, require="sharedmem")(delayed(self._decision_function_single)(choice, X, preds, 0) for choice in range(self.n))
        _apply_smoothing(preds, self.smooth, self.counters)
        return preds
github chrodan / tdlearn / mdp.py View on Github external
if n_jobs == 1:
        with ProgressBar(enabled=(verbose >= 1)) as p:
            for k in xrange(n):
                p.update(k, n, "Sampling acc. reward")
                np.random.seed(seed)
                r = mymdp.sample_accum_reward(states[k], gamma, policy, n_eps=n_eps, l_eps=l_eps)
                rewards[k] = np.mean(r)
    else:
        jobs = []
        b = int(n / n_jobs)+1
        k = 0
        while k < n:
            kp = min(k+b, n)
            jobs.append((run1, [mymdp, policy, states[k:kp], gamma, n_eps, l_eps, seed], {"verbose": verbose-1, "n_jobs": 1}))
            k = kp
        res = Parallel(n_jobs=n_jobs, verbose=verbose)(jobs)
        rewards = np.concatenate(res, axis=0)
    return rewards
github thanhdtran / RME / rec_eval.py View on Github external
def parallel_recall_at_k(train_data, heldout_data, U, V, batch_users=2000, k=20,
                mu=None, vad_data=None, agg=np.nanmean, clear_invalid=False, n_jobs = 16, cache=False):
    n_users = train_data.shape[0]
    res = Parallel(n_jobs=n_jobs)(delayed(recall_at_k_batch)(train_data, heldout_data,
                                                             U, V.T, user_idx, k=k, mu=mu,
                                                             vad_data=vad_data, clear_invalid=clear_invalid, cache=cache)
                                  for user_idx in user_idx_generator(n_users, batch_users))
    mn_recall = np.hstack(res)
    # print mn_recall
    if callable(agg):
        return agg(mn_recall)
    return mn_recall