How to use the multiprocess.pool.Pool function in multiprocess

To help you get started, we’ve selected a few multiprocess examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github uqfoundation / multiprocess / py2.6 / multiprocess / __init__.py View on Github external
def Pool(processes=None, initializer=None, initargs=()):
    '''
    Returns a process pool object
    '''
    from multiprocess.pool import Pool
    return Pool(processes, initializer, initargs)
github IMLHF / Real-Time-Voice-Cloning / encoder / preprocess.py View on Github external
# Create the mel spectrogram, discard those that are too short
            frames = audio.wav_to_mel_spectrogram(wav)
            if len(frames) < partials_n_frames:
                continue

            out_fpath = speaker_out_dir.joinpath(out_fname)
            np.save(out_fpath, frames)
            logger.add_sample(duration=len(wav) / sampling_rate)
            sources_file.write("%s,%s\n" % (out_fname, in_fpath))

        sources_file.close()

    # Process the utterances for each speaker
    # for speaker_dir in speaker_dirs: # DEBUG
    #     preprocess_speaker(speaker_dir)
    with Pool(56) as pool:
        list(tqdm(pool.imap(preprocess_speaker, speaker_dirs), dataset_name, len(speaker_dirs),
                  unit="speakers"))
    logger.finalize()
    print("Done preprocessing %s.\n" % dataset_name)
github uqfoundation / multiprocess / py3.1 / multiprocess / __init__.py View on Github external
def Pool(processes=None, initializer=None, initargs=()):
    '''
    Returns a process pool object
    '''
    from multiprocess.pool import Pool
    return Pool(processes, initializer, initargs)
github titu1994 / Advanced_Machine_Learning / proj2 / print_word_accuracies_all_models.py View on Github external
data = [(l, i, X_test, y_test, False) for i, l in enumerate(lines)]
    accuracies = pool.imap(process_line, data)  # maintains order when processing in parallel

    return accuracies, optimizer, lambd


if __name__ == '__main__':
    X_train, y_train = prepare_dataset("train_sgd.txt")
    X_test, y_test = prepare_dataset("test_sgd.txt")

    OPTIMIZERS = ['BFGS', 'ADAM', 'SGD']
    LAMBDAS = [1e-2, 1e-4, 1e-6]

    # 9 possible combinations, process them all at once.
    # Requires a ton of memory and CPU compute (which can hang laptops with < 4 cores),  so reduce it if needed
    pool = Pool(9)
    result_list = []

    t1 = time.time()
    for optm in OPTIMIZERS:
        for lambd in LAMBDAS:
            param_path = 'results/%s_%s.txt' % (optm, lambd)
            print("Getting word accuracy data from optimizer %s with lambda %s" % (optm, lambd))

            # parallel processing, takes a lot of memory
            result = print_word_error_parallel(param_path, X_test, y_test, pool, optm, lambd)
            result_list.append(result)

    pool.close()

    print('\n', '*' * 80, '\n')
github IMLHF / Real-Time-Voice-Cloning / synthesizer / inference.py View on Github external
:param return_alignments: if True, a matrix representing the alignments between the 
        characters
        and each decoder output step will be returned for each spectrogram
        :return: a list of N melspectrograms as numpy arrays of shape (80, Mi), where Mi is the 
        sequence length of spectrogram i, and possibly the alignments.
        """
        if not self._low_mem:
            # Usual inference mode: load the model on the first request and keep it loaded.
            if not self._is_loaded():
                self._load()
            specs, alignments = self._model.my_synthesize(embeddings, texts)
        else:
            # Low memory inference mode: load the model upon every request. The model has to be 
            # loaded in a separate process to be able to release GPU memory (a simple workaround 
            # to tensorflow's intricacies)
            specs, alignments = Pool(1).starmap(Synthesizer._one_shot_synthesize_spectrograms, 
                                                [(self.checkpoint_fpath, embeddings, texts)])[0]
    
        return (specs, alignments) if return_alignments else specs
github sharpstill / AU_R-CNN / lstm_end_to_end / script / write_memcached_landmark_info.py View on Github external
from collections_toolkit.memcached_manager import PyLibmcManager
        mc_manager = PyLibmcManager(args.memcached_host)
        if mc_manager is None:
            raise IOError("no memcached found listen in {}".format(args.memcached_host))


    train_data = AUDataset(database=args.database,
                           fold=args.fold, split_name=args.trainval,
                           split_index=args.split_idx, mc_manager=mc_manager, train_all_data=False,
                           )
    result_data = [img_path for img_path, AU_set, current_database_name in train_data.result_data
                   if args.database + "|" + img_path not in mc_manager]
    sub_list = split_list(result_data, len(result_data)//100)

    for img_path_lst in sub_list:
        with Pool(processes=50) as pool:
            input_list = [(img_path, None, None) for img_path in img_path_lst]
            result =\
                pool.starmap(parallel_landmark_and_conn_component, input_list)
            pool.close()
            pool.join()
            for img_path, AU_box_dict, landmark_dict, box_is_whole_image in result:
                key_prefix = args.database + "|"
                key = key_prefix + img_path
                orig_img = cv2.imread(img_path, cv2.IMREAD_COLOR)
                new_face, rect = FaceMaskCropper.dlib_face_crop(orig_img, landmark_dict)

                print("write {}".format(key))
                if mc_manager is not None and key not in mc_manager:
                    save_dict = {"landmark_dict": landmark_dict, "AU_box_dict": AU_box_dict, "crop_rect":rect}
                    mc_manager.set(key, save_dict)
github uqfoundation / multiprocess / py3.2 / multiprocess / __init__.py View on Github external
def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
    '''
    Returns a process pool object
    '''
    from multiprocess.pool import Pool
    return Pool(processes, initializer, initargs, maxtasksperchild)
github uqfoundation / multiprocess / py3.3 / multiprocess / __init__.py View on Github external
def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
    '''
    Returns a process pool object
    '''
    from multiprocess.pool import Pool
    return Pool(processes, initializer, initargs, maxtasksperchild)