How to use the mpi4py.MPI.Status function in mpi4py

To help you get started, we’ve selected a few mpi4py examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github chainer / chainer / tests / chainermn_tests / communicator_tests / test_communicator.py View on Github external
def check_send_recv_obj(self, x, tag=0,
                            use_any_recv=True, use_status=False):
        if self.communicator.rank == 0:
            self.communicator.send_obj(x, dest=1, tag=tag)
            y = x

        elif self.communicator.rank == 1:
            status = None
            if use_status:
                status = mpi4py.MPI.Status()

            if use_any_recv:
                y = self.communicator.recv_obj(source=0,
                                               status=status)
            else:
                y = self.communicator.recv_obj(source=0,
                                               tag=tag,
                                               status=status)

            if use_status:
                status_src = status.Get_source()
                self.assertEqual(0, status_src)
                status_tag = status.Get_tag()
                self.assertEqual(tag, status_tag)

        self.assertEqual(x, y)
github jaredwo / topowx / twx / interp / mpi_topo_disect.py View on Github external
def work_proc(params,rank):
    
    rDEM = TopoDisectDEM(params[P_PATH_DEM])
        
    pt = np.zeros(2,dtype=np.int32)
    result = np.zeros(3)
    status = MPI.Status()
        
    print "".join(["work_proc ",str(rank),": ready to receive work"])
    while 1:
        
        MPI.COMM_WORLD.Recv([pt,MPI.INT],source=RANK_COORD, tag=MPI.ANY_TAG,status=status)
            
        if status.tag == TAG_STOPWORK:
            MPI.COMM_WORLD.Send(result, dest=RANK_WRITE, tag=TAG_STOPWORK) 
            return 0
        else:
            
            r,c = pt
            
            #lon,lat = rDEM.getGeoLocation(c, r)
            #elev = rDEM.a[r,c]
            tdi = rDEM.get_tdi(r,c, params[P_WIN_SIZES])
github jaredwo / topowx / scripts / step14_mpi_infill_stn_normals.py View on Github external
def proc_write(twx_cfg, start_ymd, end_ymd, nwrkers):

    status = MPI.Status()
    nwrkrs_done = 0
    stn_da = StationDataDb(twx_cfg.fpath_stndata_nc_tair_homog,
                           (start_ymd, end_ymd), mode="r+")
    
    mths = np.arange(1, 13)
    
    for mth in mths:
        
        for varname in ['tmin', 'tmax']:
                    
            varname_mean = get_mean_varname(varname, mth)
            varname_vari = get_variance_varname(varname, mth)
        
            stn_da.add_stn_variable(varname_mean, varname_mean, "C", 'f8')
            stn_da.add_stn_variable(varname_vari, varname_vari, "C**2", 'f8')
github hvasbath / beat / beat / sampler / pt.py View on Github external
def _sample():
    # Define MPI message tags
    tags = distributed.enum('READY', 'INIT', 'DONE', 'EXIT', 'SAMPLE', 'BETA')

    # Initializations and preliminaries
    comm = MPI.COMM_WORLD
    status = MPI.Status()

    logger.debug('communicator size %i' % comm.size)
    model = load_objects(distributed.pymc_model_name)
    with model:
        for i in range(comm.size):
            if i == comm.rank:
                logger.info('Working %i' % i)

        comm.Barrier()

        if comm.rank == 0:
            logger.info('Loading passed arguments ...')

            arguments = load_objects(distributed.mpiargs_name)
            args = [model] + arguments
github jaredwo / topowx / twx / infill / mpi_infill_prcp_normals.py View on Github external
def proc_work(params,rank):
    
    status = MPI.Status()
    
    stn_da = StationDataDb(params[P_PATH_DB],(params[P_START_YMD],params[P_END_YMD]))
    days = stn_da.days
    
    mth_masks = build_mth_masks(days)
    mthbuf_masks = build_mth_masks(days,MTH_BUFFER)
    
    bcast_msg = None
    bcast_msg = MPI.COMM_WORLD.bcast(bcast_msg, root=RANK_COORD)
    print "".join(["Worker ",str(rank),": Received broadcast msg"])
    
    while 1:
    
        stn_id = MPI.COMM_WORLD.recv(source=RANK_COORD,tag=MPI.ANY_TAG,status=status)
        
        if status.tag == TAG_STOPWORK:
github jaredwo / topowx / twx / interp / mpi_xval_tair_dyncparams.py View on Github external
def proc_work(params,rank):
    
    status = MPI.Status()

    stn_da = StationSerialDataDb(params[P_PATH_DB], params[P_VARNAME])
    stn_da_xval = StationSerialDataDb(params[P_PATH_DB_XVAL], params[P_VARNAME_XVAL])
    mask_stns = it.build_stn_mask(stn_da.stn_ids, params[P_PATH_RMSTNS])    
    stn_slct = station_select(stn_da, stn_mask=mask_stns, rm_zero_dist_stns=True)
    
    p_mean = it.load_neon_params_mean(params[P_PATH_PARAMS_MEAN])
    p_anom = it.load_neon_params_anom(params[P_PATH_PARAMS_ANOM])
       
    it.init_krig_R_env(params[P_PATH_RLIB])
    
#    krig_params = it.KrigParamsDynamic(stn_slct, p_mean, NEON)
#    krig = it.KrigTair(stn_slct, krig_params)
    
    krig_params = it.KrigParamsDynamic2(stn_slct, p_mean, NEON)
    krig = it.KrigTair2(stn_slct, krig_params)
github jaredwo / topowx / scripts / step16_mpi_infill_stn_daily.py View on Github external
def proc_work(twx_cfg, start_ymd, end_ymd, params_ppca, rank):

    status = MPI.Status()

    stn_da = StationDataDb(twx_cfg.fpath_stndata_nc_tair_homog,
                           (start_ymd, end_ymd))
    days = stn_da.days
    ndays = days.size

    empty_fill = np.ones(ndays, dtype=np.float32) * netCDF4.default_fillvals['f4']
    empty_flags = np.ones(ndays, dtype=np.int8) * netCDF4.default_fillvals['i1']
    empty_bias = netCDF4.default_fillvals['f4']
    empty_mae = netCDF4.default_fillvals['f4']

    ds_nnr = NNRNghData(twx_cfg.path_reanalysis_namerica,
                        (start_ymd, end_ymd))
    
    mths = np.arange(1, 13)
    mth_masks = [stn_da.days[MONTH] == mth for mth in mths]
github ilastik / lazyflow / lazyflow / distributed / TaskOrchestrator.py View on Github external
def get_finished_worker(self) -> _Worker[TASK_DATUM]:
        status = MPI.Status()
        self.comm.recv(source=MPI.ANY_SOURCE, tag=Tags.TASK_DONE, status=status)
        return self.workers[status.Get_source()]
github funcx-faas / funcX / funcx / executor / parsl / executors / extreme_scale / mpi_worker_pool.py View on Github external
def recv_task_request_from_workers(self):
        """ Receives 1 task request from MPI comm

        Returns:
        --------
            worker_rank: worker_rank id
        """
        info = MPI.Status()
        comm.recv(source=MPI.ANY_SOURCE, tag=TASK_REQUEST_TAG, status=info)
        worker_rank = info.Get_source()
        logger.info("Received task request from worker:{}".format(worker_rank))
        return worker_rank