How to use the mpi4py.MPI.ANY_TAG function in mpi4py

To help you get started, we’ve selected a few mpi4py examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github hvasbath / beat / test / pt_toy_example.py View on Github external
print("Master starting with %d workers" % num_workers)
    for i in range(num_workers):
        comm.recv(source=MPI.ANY_SOURCE, tag=tags.READY, status=status)
        source = status.Get_source()
        comm.send(tasks[i], dest=source, tag=tags.START)
        print("Sent task to worker %i" % source)
        active_workers += 1

    print("Parallel tempering ...")
    print("----------------------")

    while True:
        m1 = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
        source1 = status.Get_source()
        print("Got sample 1 from worker %i" % source1)
        m2 = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
        source2 = status.Get_source()
        print("Got sample 2 from worker %i" % source1)

        m1, m2 = metrop_select(m1, m2)
        print('samples 1, 2 %i %i' % (m1, m2))
        chain.extend([m1, m2])
        if len(chain) < nsamples:
            print("Sending states back to workers ...")
            comm.send(m1, dest=source1, tag=tags.START)
            comm.send(m2, dest=source2, tag=tags.START)
        else:
            print('Requested number of samples reached!')
            break

    print("Master finishing, recorded chain:")
    print(chain)
github jaredwo / topowx / twx / infill / mpi_infill_optim_po_normals.py View on Github external
prcp[:,x] = prcp_stn
    
    #Send stn ids and masks to all processes
    MPI.COMM_WORLD.bcast((fnl_stn_ids,xval_masks_prcp,last_idxs_prcp), root=RANK_COORD)
    
    print "Coord: Done initialization. Starting to send work."
    
    cnt = 0
    nrec = 0
    
    for stn_id in fnl_stn_ids:
                
        if cnt < nwrkers:
            dest = cnt+N_NON_WRKRS
        else:
            dest = MPI.COMM_WORLD.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG)
            nrec+=1

        MPI.COMM_WORLD.send(stn_id, dest=dest, tag=TAG_DOWORK)
        cnt+=1
    
    for w in np.arange(nwrkers):
        MPI.COMM_WORLD.send(None, dest=w+N_NON_WRKRS, tag=TAG_STOPWORK)
        
    print "coord_proc: done"
github uchicago-cs / deepdish / deepdish / parallel / mpi.py View on Github external
from mpi4py import MPI
    N = MPI.COMM_WORLD.Get_size() - 1
    if N == 0 or not _g_initialized:
        mapf = [map, itr.starmap][star]
        for res in mapf(f, workloads):
            yield res
        return

    for job_index, workload in enumerate(itr.chain(workloads, itr.repeat(None))):
        if workload is None and len(_g_available_workers) == N:
            break

        while not _g_available_workers or workload is None:
            # Wait to receive results
            status = MPI.Status()
            ret = MPI.COMM_WORLD.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
            if status.tag == 2:
                yield ret['output_data']
                _g_available_workers.add(status.source)
                if len(_g_available_workers) == N:
                    break

        if _g_available_workers and workload is not None:
            dest_rank = _g_available_workers.pop()

            # Send off job
            task = dict(func=f, input_data=workload, job_index=job_index, unpack=star)
            MPI.COMM_WORLD.send(task, dest=dest_rank, tag=10)
github cctbx / cctbx_project / xfel / amo / pnccd_ana / pnccd_hit.py View on Github external
def recv(self):


        status     = MPI.Status()
        self.myobj = comm.recv(source=MPI.ANY_SOURCE,tag=MPI.ANY_TAG,status=status)
        recvRank   = status.Get_source()


        if not hasattr(self, 'total_ave'):
           adim  = None
        else:
           adim  = self.total_ave[0].shape

        if not hasattr(self, 'total_c2'):
           cdim  = None
        else:
           cdim  = self.total_c2[0].shape

        if not hasattr(self, 'total_saxs'):
           sdim  = None
        else:
github jaredwo / topowx / twx / interp / mpi_metric.py View on Github external
nchks = tile_grid_info.nchks
    chks_per_tile = tile_grid_info.chks_per_tile
    
    tile_status = {}
    for key in tile_ids.keys():
        tile_status[key] = 0
    
    tile_queues = {}
    for key in tile_ids.keys():
        tile_queues[key] = deque()
    
    stat_chk = StatusCheck(nchks,1)
    
    while 1:
        
        MPI.COMM_WORLD.Recv([tile_num_msg,MPI.INT],source=MPI.ANY_SOURCE,tag=MPI.ANY_TAG,status=status)
        
        tile_num = tile_num_msg[0]
        
        if status.tag == TAG_REQUEST_WRITE:
            
            if len(tile_queues[tile_num]) > 0:
                
                tile_queues[tile_num].append(status.source)
            
            else:
            
                MPI.COMM_WORLD.Send([tile_num_msg,MPI.INT],dest=status.source,tag=TAG_WRITE_PERMIT)
                tile_queues[tile_num].append(status.source)
        
        elif status.tag == TAG_DONE_WRITE:
github jaredwo / topowx / twx / infill / mpi_infillfix_tair.py View on Github external
else:
        
        ds_rst = None
        
    print "Coord: Done initialization. Starting to send work."
    
    cnt = 0
    nrec = 0
    stn_sent = True
    for stn_id in stn_ids_all:
        
        if stn_sent:
            if cnt < nwrkers:
                dest = cnt+N_NON_WRKRS
            else:
                dest = MPI.COMM_WORLD.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG)
                nrec+=1
        
        fix_tmin = stn_id in stn_ids_rerun_tmin
        fix_tmax = stn_id in stn_ids_rerun_tmax
        tmin_stn = stn_id in stn_ids_tmin
        tmax_stn = stn_id in stn_ids_tmax
        
        send_stn = False
        
        if ds_rst is not None:
            
            stn_idx = np.nonzero(stn_ids_all == stn_id)[0][0]
            
            tmin_flg = np.bool(ds_rst.variables['tmin'][stn_idx])
            tmax_flg = np.bool(ds_rst.variables['tmax'][stn_idx])
github RadioAstronomySoftwareGroup / pyuvsim / pyuvsim / mpi.py View on Github external
def _counter_thread(self):
        incr = array('i', [0])
        ival = array('i', [0])
        status = MPI.Status()
        while True:  # server loop
            self.comm.Recv([incr, MPI.INT],
                           MPI.ANY_SOURCE, MPI.ANY_TAG,
                           status)
            if status.Get_tag() == 1:
                return
            self.comm.Send([ival, MPI.INT],
                           status.Get_source(), self.count_rank)
            ival[0] += incr[0]
github jaredwo / topowx / twx / interp / mpi_xval_tair_overall.py View on Github external
def proc_work(params,rank):
    
    status = MPI.Status()
    
    #xval = XvalTairOverall(params[P_PATH_DB], params[P_VARNAME])
    xval = XvalGwrNormOverall(params[P_PATH_DB], params[P_VARNAME])
        
    while 1:
    
        stn_id = MPI.COMM_WORLD.recv(source=RANK_COORD,tag=MPI.ANY_TAG,status=status)
        
        if status.tag == TAG_STOPWORK:
            MPI.COMM_WORLD.send([None]*7, dest=RANK_WRITE, tag=TAG_STOPWORK)
            print "".join(["Worker ",str(rank),": Finished"]) 
            return 0
        else:
            
            try:
                
                biasNorm,maeNorm,maeDly,biasDly,r2Dly,seNorm = xval.run_xval(stn_id)
                                            
            except Exception as e:
            
                print "".join(["ERROR: Worker ",str(rank),": could not xval ",stn_id,str(e)])
                
                ndata = np.ones(13)*netCDF4.default_fillvals['f8']
github Rapid-Design-of-Systems-Laboratory / beluga / beluga / parallelprocessing / Worker.py View on Github external
def listenMPI(self):
        # Listen over MPI
        if self.mode == 'HOST':
            data = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
            return data

        elif self.mode == 'MPI':
            signal.signal(signal.SIGCHLD, self.reaper)
            while self.started:
                # Receive some data from another node
                status = MPI.Status()  # get MPI status object. Does this need to be inside the loop?
                data = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)

                # Fork the thread to handle calculations and communication concurrently
                # TODO: Fix this garbled mess
                if self.tagtocommand[status.Get_tag()] is not 'STOP_WORKER':
                    PID = os.fork()
                else:
                    PID = 0
github jaredwo / topowx / twx / infill / mpi_infill_lst.py View on Github external
def proc_work(params,rank):

    status = MPI.Status()
    
    stnda = StationSerialDataDb(params[P_PATH_STNDB], params[P_TAIR_VAR],stn_dtype=DTYPE_STN_BASIC)
    
    timeVarsSet = False
    
    imputeNorms = {}
    mthMasks = []
            
    while 1:
        
        r,c,tileNum = MPI.COMM_WORLD.recv(source=RANK_COORD, tag=MPI.ANY_TAG, status=status)
        
        if status.tag == TAG_STOPWORK:
            #MPI.COMM_WORLD.Send([np.zeros((params[P_CHCKSIZE_Y],params[P_CHCKSIZE_X]),dtype=np.int16),MPI.INT],dest=RANK_WRITE,tag=TAG_STOPWORK) 
            MPI.COMM_WORLD.send([None]*4,dest=RANK_WRITE,tag=TAG_STOPWORK)
            print "".join(["Worker ",str(rank),": Finished"]) 
            return 0
        else:
            
            tileName = params[P_MODIS_TILES][tileNum]
            
            if imputeNorms.has_key(tileName):
                imp = imputeNorms[tileName]
            else:
                impData = LstData(params[P_PATH_NC_STACK], tileName, params[P_LST_VAR],params[P_TAIR_VAR],stnda)
                imp = ImputeLstNorm(impData)
                imputeNorms[tileName] = imp