How to use the mpi4py.MPI.COMM_WORLD.Get_size function in mpi4py

To help you get started, we’ve selected a few mpi4py examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tbenthompson / tectosaur / tests / test_table_lookup.py View on Github external
def benchmark_co_lookup_pts():
    n_cores = mpi4py.MPI.COMM_WORLD.Get_size()

    async def submit(w):
        n_chunks = n_cores

        # async with tsk.Profiler(w, range(n_chunks)):
        # n = 4000
        # corners = [[-1, -1, 0], [-1, 1, 0], [1, 1, 0], [1, -1, 0]]
        # m = make_rect(n, n, corners)
        # tri_pts = m[0][m[1]]
        # np.save('tripts.npy', tri_pts)
        tri_pts = np.load('tripts.npy')
        # time.sleep(2.0)
        chunk_bounds = np.linspace(0, tri_pts.shape[0], n_chunks + 1)

        t2 = Timer(just_print = True)
        with taskloaf.shmem.alloc_shmem(tri_pts) as sm_filepath:
github Gjacquenot / Puma-EM / code / assemble_Z_near.py View on Github external
def Z_nearCRS_Assembling(processNumber_to_ChunksNumbers, chunkNumber_to_cubesNumbers, MAX_BLOCK_SIZE, C, ELEM_TYPE, Z_TMP_ELEM_TYPE, pathToReadFrom, pathToSaveTo):
    """this function computes Z_CFIE_near by slices and stores them on the disk.
    The maximum size of a block is given by the variable MAX_BLOCK_SIZE in MegaBytes"""
    # test on MAX_BLOCK_SIZE
    if ( (MAX_BLOCK_SIZE<0.1) | (MAX_BLOCK_SIZE>10000.) ):
        print("Error: MAX_BLOCK_SIZE too big or too small")
        sys.exit(1)
    num_procs = MPI.COMM_WORLD.Get_size()
    my_id = MPI.COMM_WORLD.Get_rank()
    NAME = "Z_CFIE_near"
    if (my_id==0):
        print("Number of leaf cubes = " + str(C))
        print("assembling Z_CFIE_near chunks...")
    chunkNumbers = processNumber_to_ChunksNumbers[my_id]
    for chunkNumber in chunkNumbers:
        cubesNumbers = chunkNumber_to_cubesNumbers[chunkNumber]
        pathToReadFromChunk = os.path.join(pathToReadFrom, "chunk" + str(chunkNumber))
        Z_CFIE_near, src_RWG_numbers, rowIndexToColumnIndexes, test_RWG_numbers = chunk_of_Z_nearCRS_Assembling(cubesNumbers, ELEM_TYPE, Z_TMP_ELEM_TYPE, pathToReadFromChunk)
        writeToDisk_chunk_of_Z_sparse(pathToSaveTo, NAME, Z_CFIE_near, src_RWG_numbers, rowIndexToColumnIndexes, test_RWG_numbers, chunkNumber)
        del Z_CFIE_near, src_RWG_numbers, rowIndexToColumnIndexes, test_RWG_numbers
        commands.getoutput("rm -rf " + os.path.join(pathToReadFromChunk))
    # we write the chunks numbers of the process
    writeASCIIBlitzArrayToDisk(array(chunkNumbers).astype('i'), os.path.join(pathToSaveTo, 'chunkNumbers.txt'))
github natj / runko / projects / shocks / pic.py View on Github external
yee.ex[l, m, n] = 0.0
                    yee.ey[l, m, n] = -beta * yee.bz[l, m, n]
                    yee.ez[l, m, n] = beta * yee.by[l, m, n]
    return


if __name__ == "__main__":

    # --------------------------------------------------
    # initial setup
    do_print = False
    if MPI.COMM_WORLD.Get_rank() == 0:
        do_print = True

    if do_print:
        print("Running pic.py with {} MPI processes.".format(MPI.COMM_WORLD.Get_size()))

    # --------------------------------------------------
    # Timer for profiling
    timer = pytools.Timer()

    timer.start("total")
    timer.start("init")
    timer.do_print = do_print

    # --------------------------------------------------
    # parse command line arguments
    args = pytools.parse_args()

    # create conf object with simulation parameters based on them
    conf = Configuration(args.conf_filename, do_print=do_print)
github Libensemble / libensemble / examples / tutorials / tutorial_calling_mpi.py View on Github external
import numpy as np
import matplotlib.pyplot as plt
from libensemble.libE import libE
from libensemble.utils import add_unique_random_streams
from tutorial_gen import gen_random_sample
from tutorial_sim import sim_find_sine
from mpi4py import MPI

libE_specs = {'comms': 'mpi'}                   # 'nworkers' removed, 'comms' now 'mpi'

nworkers = MPI.COMM_WORLD.Get_size() - 1        # one process belongs to manager
is_master = (MPI.COMM_WORLD.Get_rank() == 0)    # master process has MPI rank 0

gen_specs = {'gen_f': gen_random_sample,        # Our generator function
             'out': [('x', float, (1,))],       # gen_f output (name, type, size).
             'user': {'lower': np.array([-3]),  # random sampling lower bound
                      'upper': np.array([3]),   # random sampling upper bound
                      'gen_batch_size': 5       # number of values gen_f will generate per call
                      }
             }

sim_specs = {'sim_f': sim_find_sine,            # Our simulator function
             'in': ['x'],                       # Input field names. 'x' from gen_f output
             'out': [('y', float)]}             # sim_f output. 'y' = sine('x')

persis_info = add_unique_random_streams({}, nworkers+1)  # Intitialize manager/workers random streams
github threeML / threeML / threeML / bayesian / bayesian_analysis.py View on Github external
import chainconsumer

except:

    has_chainconsumer = False

else:

    has_chainconsumer = True

try:

    # see if we have mpi and/or are using parallel

    from mpi4py import MPI
    if MPI.COMM_WORLD.Get_size() > 1:    # need parallel capabilities
        using_mpi = True

        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()

    else:

        using_mpi = False
except:

    using_mpi = False

import numpy as np
import collections
import math
import os
github Pyomo / pyomo / pyomo / contrib / benders / benders_cuts.py View on Github external
def __init__(self, component):
        if not mpi4py_available:
            raise ImportError('BendersCutGenerator requires mpi4py.')
        if not numpy_available:
            raise ImportError('BendersCutGenerator requires numpy.')
        _BlockData.__init__(self, component)
        self.num_subproblems_by_rank = np.zeros(MPI.COMM_WORLD.Get_size())
        self.subproblems = list()
        self.complicating_vars_maps = list()
        self.master_vars = list()
        self.master_vars_indices = pe.ComponentMap()
        self.master_etas = list()
        self.cuts = None
        self.subproblem_solvers = list()
        self.tol = None
        self.all_master_etas = list()
github jaredwo / topowx / twx / infill / mpi_infill_optim_prcp_norm.py View on Github external
perr.missing_value = netCDF4.default_fillvals['f4']
    
    perr = ds.createVariable('fit_mean','f4',('stn_id',))
    perr.missing_value = netCDF4.default_fillvals['f4']
    
    ds.sync()
    
    return ds

if __name__ == '__main__':
        
    np.seterr(all='raise')
    np.seterr(under='ignore')
    
    rank = MPI.COMM_WORLD.Get_rank()
    nsize = MPI.COMM_WORLD.Get_size()

    params = {}
    params[P_PATH_DB] = '/projects/daymet2/station_data/all/all.nc'
    params[P_PATH_OUT] = '/projects/daymet2/station_data/infill/xval_infill_prcp_norm4.nc'
    params[P_PATH_POR] = '/projects/daymet2/station_data/all/all_por.csv'
    params[P_PATH_NEON] = '/projects/daymet2/dem/NEON_DOMAINS/neon_mask3.nc'
    params[P_PATH_GHCN_STNS] = '/projects/daymet2/station_data/ghcn/ghcnd-stations.txt'
    params[P_MIN_POR_PCT] = 0.90
    params[P_STNS_PER_RGN] = 10
    params[P_NYRS_MOD] = 5
    params[P_START_YMD] = None #19480101
    params[P_END_YMD] = None #20111231
    
    if rank == RANK_COORD:
        
        params[P_EXCLUDE_STNIDS] = np.array([])
github deephyper / deephyper / deephyper / search / nas / baselines / ddpg / ddpg_learner.py View on Github external
if self.param_noise is None:
            return 0.

        # Perturb a separate copy of the policy to adjust the scale for the next "real" perturbation.
        batch = self.memory.sample(batch_size=self.batch_size)
        self.sess.run(self.perturb_adaptive_policy_ops, feed_dict={
            self.param_noise_stddev: self.param_noise.current_stddev,
        })
        distance = self.sess.run(self.adaptive_policy_distance, feed_dict={
            self.obs0: batch['obs0'],
            self.param_noise_stddev: self.param_noise.current_stddev,
        })

        if MPI is not None:
            mean_distance = MPI.COMM_WORLD.allreduce(distance, op=MPI.SUM) / MPI.COMM_WORLD.Get_size()
        else:
            mean_distance = distance

        if MPI is not None:
            mean_distance = MPI.COMM_WORLD.allreduce(distance, op=MPI.SUM) / MPI.COMM_WORLD.Get_size()
        else:
            mean_distance = distance

        self.param_noise.adapt(mean_distance)
        return mean_distance
github ucbdrive / spc / legacy / ddpg / baselines / trpo_mpi / trpo_mpi.py View on Github external
def learn(env, policy_fn, *,
        timesteps_per_batch, # what to train on
        max_kl, cg_iters,
        gamma, lam, # advantage estimation
        entcoeff=0.0,
        cg_damping=1e-2,
        vf_stepsize=3e-4,
        vf_iters =3,
        max_timesteps=0, max_episodes=0, max_iters=0,  # time constraint
        callback=None
        ):
    nworkers = MPI.COMM_WORLD.Get_size()
    rank = MPI.COMM_WORLD.Get_rank()
    np.set_printoptions(precision=3)
    # Setup losses and stuff
    # ----------------------------------------
    ob_space = env.observation_space
    ac_space = env.action_space
    pi = policy_fn("pi", ob_space, ac_space)
    oldpi = policy_fn("oldpi", ob_space, ac_space)
    atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
    ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return

    ob = U.get_placeholder_cached(name="ob")
    ac = pi.pdtype.sample_placeholder([None])

    kloldnew = oldpi.pd.kl(pi.pd)
    ent = pi.pd.entropy()
github jaredwo / topowx / twx / interp / mpi_interp_tair.py View on Github external
MPI.COMM_WORLD.Send([wrk_chk,MPI.DOUBLE], dest=dest, tag=tile_num)
    
    except StopIteration:
        pass
        
    for w in np.arange(nwrkers):
        MPI.COMM_WORLD.Send([wrk_chk,MPI.DOUBLE], dest=w+N_NON_WRKRS, tag=TAG_STOPWORK)
    print "coord_proc: done"

if __name__ == '__main__':
    
    np.seterr(all='raise')
    np.seterr(under='ignore')
    
    rank = MPI.COMM_WORLD.Get_rank()
    nsize = MPI.COMM_WORLD.Get_size()

    params = {}
    
    #CONUS SCALE RUN
    ############################################################################################
    params[P_PATH_DB_TMIN] = '/projects/daymet2/station_data/infill/serial_fnl/serial_tmin.nc'
    params[P_PATH_DB_TMAX] = '/projects/daymet2/station_data/infill/serial_fnl/serial_tmax.nc'

    gridPath = '/projects/daymet2/dem/interp_grids/conus/ncdf/'
    params[P_PATH_MASK] = "".join([gridPath,'fnl_mask.nc'])
    params[P_PATH_ELEV] = "".join([gridPath,'fnl_elev.nc'])
    params[P_PATH_TDI] = "".join([gridPath,'fnl_tdi.nc'])
    params[P_PATH_LST_TMIN] = ["".join([gridPath,'fnl_lst_tmin%02d.nc'%mth]) for mth in np.arange(1,13)]
    params[P_PATH_LST_TMAX] = ["".join([gridPath,'fnl_lst_tmax%02d.nc'%mth]) for mth in np.arange(1,13)]
    params[P_PATH_NEON] = "".join([gridPath,'fnl_climdiv.nc'])