How to use the pathos.multiprocessing.ProcessingPool function in pathos

To help you get started, we’ve selected a few pathos examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github vsimonis / MicroExpressionDetector / MicroExpressionDetector / legacy / other / SimpleASM.py View on Github external
def rotate( self, rotation ):
        return map( lambda line: SimpleShape( PPool().map( lambda x :  x.scale( rotation ) ,  line)), PPool().map( lambda y : y.pointList, self.shapeList ) )
github antoinecarme / pyaf / TS / SignalDecomposition.py View on Github external
def train_multiprocessed(self , iInputDS, iTime, iSignal, iHorizon):
        pool = Pool(self.mOptions.mNbCores)
        self.defineTransformations(iInputDS, iTime, iSignal);
        # print([transform1.mFormula for transform1 in self.mTransformList]);
        args = [];
        for transform1 in self.mTransformList:
            arg = cTraining_Arg(transform1.get_name(""));
            arg.mSigDec = cSignalDecompositionOneTransform();
            arg.mSigDec.mOptions = self.mOptions;
            arg.mSigDec.mExogenousData = self.mExogenousData;
            arg.mInputDS = iInputDS;
            arg.mTime = iTime;
            arg.mSignal = iSignal;
            arg.mHorizon = iHorizon;
            arg.mTransformation = transform1;
            arg.mOptions = self.mOptions;
            arg.mExogenousData = self.mExogenousData;
            arg.mResult = None;
github vsimonis / MicroExpressionDetector / MicroExpressionDetector / legacy / models / ParallelASM.py View on Github external
def alignAllShapes( self ):
        start = time.time()
        self.calcWs() 
        self.allShapes = PPool().map( self.alignOneShape, self.allShapes )
        print 'alignAllShapes: %f' % (time.time() - start  )
        return
github peterhurford / vowpal_platypus / vowpal_platypus / vw.py View on Github external
num_cores = len(model) if isinstance(model, collections.Sequence) else 1
    if num_cores > 1:
        os.system("spanning_tree")
        if header:
            num_lines = sum(1 for line in open(train_filename))
            os.system('tail -n {} {} > {}'.format(num_lines - 1, train_filename, train_filename + '_'))
            if predict_filename != train_filename:
                num_lines = sum(1 for line in open(predict_filename))
                os.system('tail -n {} {} > {}'.format(num_lines - 1, predict_filename, predict_filename + '_'))
            train_filename = train_filename + '_'
            predict_filename = predict_filename + '_'
            header = False
        split_file(train_filename, num_cores)
        if predict_filename != train_filename:
            split_file(predict_filename, num_cores)
        pool = Pool(num_cores)
        train_filenames = [train_filename + (str(n) if n >= 10 else '0' + str(n)) for n in range(num_cores)]
        predict_filenames = [predict_filename + (str(n) if n >= 10 else '0' + str(n)) for n in range(num_cores)]
        args = []
        for i in range(num_cores):
            args.append({'model': model[i],
                         'train_filename': train_filenames[i],
                         'predict_filename': predict_filenames[i],
                         'train_line_function': train_line_function,
                         'predict_line_function': predict_line_function,
                         'evaluate_function': evaluate_function,
                         'split': split,
                         'quiet': model[i].params.get('quiet'),
                         'multicore': True,
                         'header': header})
        results = sum(pool.map(run_model, args), [])
        if evaluate_function:
github opendatacube / datacube-core / datacube / drivers / s3 / storage / s3aio / s3lio.py View on Github external
def __init__(self, enable_compression=True, enable_s3=True, file_path=None, num_workers=30):
        """Initialise the S3 Labeled IO interface.

        :param bool enable_s3: Flag to store objects in s3 or disk.
            True: store in S3
            False: store on disk (for testing purposes)
        :param str file_path: The root directory for the emulated s3 buckets when enable_s3 is set to False.
        :param int num_workers: The number of workers for parallel IO.
        """
        self.s3aio = S3AIO(enable_compression, enable_s3, file_path, num_workers)

        self.pool = ProcessingPool(num_workers)
        self.enable_compression = enable_compression
github cogeorg / BlackRhino / examples / casp_many_assets / abm_template / src / basemarket.py View on Github external
def tatonnement_parallel(self, sellers, buyers, starting_price):
        import random
        # For parallelisation
        import pathos.multiprocessing as mp
        pool = mp.ProcessingPool()
        supply_functions = []
        demand_functions = []
        for seller in sellers:
            supply_functions.append(seller[1])
        for buyer in buyers:
            demand_functions.append(buyer[1])
        # Initialise a variable which looks for the equilibrium price
        price_dummy = 0.0
        # Set price dummy to the starting value
        # TODO: rethink this, maybe if it's run multiple times start with
        #       previous equilibrium price, the below is tentative
        if starting_price == 0.0:
            price_dummy = random.uniform(0, 10) + 0.01
        else:
            price_dummy = starting_price
        # Initialise dummy variables for exponential search
github L0SG / seqgan-music / music_wseqgan.py View on Github external
# cast batch and prediction to 2d list of strings
        batch_list = batch.astype(np.str).tolist()
        pred_list = prediction.astype(np.str).tolist()
        references.extend(batch_list)
        hypotheses.extend(pred_list)

    bleu = 0.

    # calculate bleu for each predicted seq
    # compare each predicted seq with the entire references
    # this is slow, use multiprocess
    def calc_sentence_bleu(hypothesis):
        return sentence_bleu(references, hypothesis, smoothing_function=smoother.method4)

    if __name__ == '__main__':
        p = Pool()
        result = (p.map(calc_sentence_bleu, hypotheses))
    bleu = np.mean(result)

    return bleu
github AkandaAshraf / VirtualSoc / Socialiser.py View on Github external
np.random.shuffle(nodesCombination)
        np.random.shuffle(nodesCombination)
        np.random.shuffle(nodesCombination)
        np.random.shuffle(nodesCombination)
        nodesCombination = nodesCombination[0:int(np.floor(len(nodesCombination) * self.p))]

        nodesCombinationDict =collections.defaultdict(lambda: None)
        for nodes in nodesCombination:
            if nodes[0] is not None:
                nodesCombinationDict[nodes[0].ID,nodes[1].ID] = [nodes[0],nodes[1]]



        self._bar = pyprind.ProgBar(len(nodesCombination), stream=sys.stdout)
        if numberofProcesses is not None:
            pool = Pool(numberofProcesses)
            print('Calculating node scores!')
            # cp.cuda.MemoryPool(allocator=_malloc)

            NodesScoreListOfObjects = pool.imap(self.getScoresSingleProcess, nodesCombination)
        else:
            NodesScoreListOfObjects = []
            for nodes in nodesCombination:
                NodesScoreListOfObjects.append(self.getScoresSingleProcess(nodes))
        # print("Number of cpu : ", multiprocessing.cpu_count())

        # for node1 in nodes:
        #     for node2 in nodes:
        #
        #         if 1.0 - self.p <= np.random.uniform(low=0.0, high=1.0, size=None):
        #             if node1 is not node2:
        #                 NodesScoreListOfObjects.append(self._NodesScore(node1=node1, node2=node2,
github mantidproject / mantid / scripts / AbinsModules / CalculateS.py View on Github external
def _calculate_s_powder_core(self, q_indx=None):
        """
        Helper function for _calculate_s_powder_1d.
        :return: Python dictionary with S data
        """
        atoms_items = dict()
        num_atoms, atoms = self._prepare_data()

        q_multiplied = [q_indx] * num_atoms

        if PATHOS_FOUND:
            p_local = ProcessingPool(nodes=AbinsModules.AbinsParameters.atoms_threads)
            result = p_local.map(self._calculate_s_powder_one_atom, atoms, q_multiplied)
        else:
            result = []
            for atom in atoms:
                result.append(self._calculate_s_powder_one_atom(atom=atom, q_indx=q_indx))

        for atom in range(num_atoms):
            atoms_items["atom_%s" % atom] = {"s": result[atoms.index(atom)]}
            self._report_progress(msg="S for atom %s" % atom + " has been calculated.")
        return atoms_items