How to use the mpi4py.MPI.COMM_WORLD.size function in mpi4py

To help you get started, we’ve selected a few mpi4py examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github mathLab / RBniCS / tests / unit / test_separated_parametrized_form_mixed.py View on Github external
_solution_to_problem_map[expr10] = Problem("problem10")
_solution_to_problem_map[expr11] = Problem("problem11")
_solution_to_problem_map[expr12] = Problem("problem12")
_solution_to_problem_map[expr13] = Problem("problem13")

u, p = split(TrialFunction(V))
v, q = split(TestFunction(V))

scalar_trial = TrialFunction(scalar_V)
scalar_test = TestFunction(scalar_V)
vector_trial = TrialFunction(vector_V)
vector_test = TestFunction(vector_V)


# Fixtures
skip_in_parallel = pytest.mark.skipif(MPI.COMM_WORLD.size > 1, reason="Numbering of functions changes in parallel.")


# Tests
@skip_in_parallel
@enable_separated_parametrized_form_logging
@pytest.mark.dependency(name="1")
def test_separated_parametrized_forms_mixed_1():
    a1 = (inner(expr3 * grad(u), grad(v)) * dx + inner(grad(u) * expr2, v) * dx + expr1 * inner(u, v) * dx
          - p * tr(expr4 * grad(v)) * dx - expr1 * q * div(u) * dx - expr2[0] * p * q * dx)
    a1_sep = SeparatedParametrizedForm(a1)
    test_logger.log(DEBUG, "*** ###              FORM 1             ### ***")
    test_logger.log(DEBUG, "This is a basic mixed parametrized form, with all parametrized coefficients")
    a1_sep.separate()
    test_logger.log(DEBUG, "\tLen coefficients:")
    test_logger.log(DEBUG, "\t\t" + str(len(a1_sep.coefficients)))
github krischer / LASIF / lasif / components / actions.py View on Github external
"""
            return [container[_i::count] for _i in range(count)]

        # Only rank 0 needs to know what has to be processsed.
        if MPI.COMM_WORLD.rank == 0:
            # All stations for the given iteration and event.
            stations = \
                set(iteration.events[event["event_name"]]["stations"].keys())

            # Get all stations that currently do not have windows.
            windows = self.comm.windows.get(event, iteration).list()
            stations_without_windows = \
                stations - set(map(channel2station, windows))
            total_size = len(stations_without_windows)
            stations_without_windows = split(list(stations_without_windows),
                                             MPI.COMM_WORLD.size)

            # Initialize station cache on rank 0.
            self.comm.stations.file_count
            # Also initialize the processed and synthetic data caches. They
            # have to exist before the other ranks can access them.
            try:
                self.comm.waveforms.get_waveform_cache(
                    event["event_name"], "processed", iteration.processing_tag)
            except LASIFNotFoundError:
                pass
            try:
                self.comm.waveforms.get_waveform_cache(
                    event["event_name"], "synthetic", iteration)
            except LASIFNotFoundError:
                pass
        else:
github krischer / LASIF / lasif / tools / parallel_helpers.py View on Github external
Simple and elegant function splitting a container into count
        equal chunks.

        Order is not preserved but for the use case at hand this is
        potentially an advantage as data sitting in the same folder thus
        have a higher at being processed at the same time thus the disc
        head does not have to jump around so much. Of course very
        architecture dependent.
        """
        return [container[_i::count] for _i in range(count)]

    # Rank zero collects what needs to be done and distributes it across
    # all cores.
    if MPI.COMM_WORLD.rank == 0:
        total_length = len(items)
        items = split(items, MPI.COMM_WORLD.size)
    else:
        items = None

    # Now each rank knows what it has to process. This still works
    # nicely with only one core, the overhead is negligible.
    items = MPI.COMM_WORLD.scatter(items, root=0)

    results = []

    for _i, item in enumerate(items):
        results.append(_execute_wrapped_function(function, item))

        if MPI.COMM_WORLD.rank == 0:
            print("Approximately %i of %i items have been processed." % (
                min((_i + 1) * MPI.COMM_WORLD.size, total_length),
                total_length))
github mfem / PyMFEM / examples / ex7p.py View on Github external
'''
   MFEM example 7

   See c++ version in the MFEM library for more detail 
'''
from mfem import path
import mfem.par as mfem
from mfem.par import intArray
from mpi4py import MPI
from os.path import expanduser, join
import numpy as np
from numpy import sin, cos, exp


num_proc = MPI.COMM_WORLD.size
myid     = MPI.COMM_WORLD.rank

elem_type = 1
ref_levels = 2
par_ref_levels = 2
amr = 0
order = 2
always_snap = False

if elem_type == 1:
    Nvert = 8; Nelem = 6
else:
    Nvert = 6; Nelem = 8

    
mesh = mfem.Mesh(2, Nvert, Nelem, 0, 3)
github choderalab / yank / yank / driver.py View on Github external
if not (bool(options.receptor_pdb_filename) ^ bool(options.receptor_crd_filename) ^ bool(options.complex_pdb_filename) ^ bool(options.complex_crd_filename)):
        parser.error("Receptor coordinates must be specified through only one of --receptor_crd, --receptor_pdb, --complex_crd, or --complex_pdb.")    
    if not (options.complex_prmtop_filename):
        parser.error("Please specify --complex_prmtop [complex_prmtop_filename] argument.")

    # Initialize MPI if requested.
    if options.mpi:
        # Initialize MPI. 
        try:
            from mpi4py import MPI # MPI wrapper
            hostname = os.uname()[1]
            options.mpi = MPI.COMM_WORLD
            if not MPI.COMM_WORLD.rank == 0: 
                options.verbose = False
            MPI.COMM_WORLD.barrier()
            if MPI.COMM_WORLD.rank == 0: print "Initialized MPI on %d processes." % (MPI.COMM_WORLD.size)
        except Exception as e:
            print e
            parser.error("Could not initialize MPI.")

    # Select simulation parameters.
    # TODO: Allow user selection or intelligent automated selection of simulation parameters.
    # NOTE: Simulation paramters are hard-coded for now.
    # NOTE: Simulation parameters will be different for explicit solvent.
    import simtk.openmm.app as app
    nonbondedMethod = app.NoCutoff
    implicitSolvent = app.OBC2
    constraints = app.HBonds
    removeCMMotion = False


    ligand_system = app.AmberPrmtopFile(options.ligand_prmtop_filename).createSystem(nonbondedMethod=nonbondedMethod, implicitSolvent=implicitSolvent, constraints=constraints, removeCMMotion=removeCMMotion)
github GeoscienceAustralia / COG-Conversion / dea_cogger / cog_conv_app.py View on Github external
def _nth_by_mpi(iterator):
    """
    Use to split an iterator based on MPI pool size and rank of this process
    """
    from mpi4py import MPI
    job_size = MPI.COMM_WORLD.size  # Total number of processes
    job_rank = MPI.COMM_WORLD.rank  # Rank of this process
    for i, element in enumerate(iterator):
        if i % job_size == job_rank:
            yield element
github AcutronicRobotics / ros2learn / experiments / examples / deployment / restore_network_mlsh.py View on Github external
num_timesteps=1e9
            seed = 1401
            rank = MPI.COMM_WORLD.Get_rank()
            sess = U.single_threaded_session()
            sess.__enter__()
            workerseed = seed + 1000 * MPI.COMM_WORLD.Get_rank()
            rank = MPI.COMM_WORLD.Get_rank()
            set_global_seeds(workerseed)

            # if rank != 0:
            #     logger.set_level(logger.DISABLED)
            # logger.log("rank %i" % MPI.COMM_WORLD.Get_rank())

            world_group = MPI.COMM_WORLD.Get_group()
            mygroup = rank % 10
            theta_group = world_group.Incl([x for x in range(MPI.COMM_WORLD.size) if (x % 10 == mygroup)])
            comm = MPI.COMM_WORLD.Create(theta_group)
            comm.Barrier()
            # comm = MPI.COMM_WORLD

            #master_robotics.start(callback, args=args, workerseed=workerseed, rank=rank, comm=comm)
            self.start(self.callback, workerseed=workerseed, rank=rank, comm=comm)
github AcutronicRobotics / ros2learn / experiments / examples / modular_scara_3dof_obstacles_v0 / train_mlsh_multiple_obstacles.py View on Github external
num_timesteps=1e9
    seed = 1401
    rank = MPI.COMM_WORLD.Get_rank()
    sess = U.single_threaded_session()
    sess.__enter__()
    workerseed = seed + 1000 * MPI.COMM_WORLD.Get_rank()
    rank = MPI.COMM_WORLD.Get_rank()
    set_global_seeds(workerseed)

    # if rank != 0:
    #     logger.set_level(logger.DISABLED)
    # logger.log("rank %i" % MPI.COMM_WORLD.Get_rank())

    world_group = MPI.COMM_WORLD.Get_group()
    mygroup = rank % 10
    theta_group = world_group.Incl([x for x in range(MPI.COMM_WORLD.size) if (x % 10 == mygroup)])
    comm = MPI.COMM_WORLD.Create(theta_group)
    comm.Barrier()
    # comm = MPI.COMM_WORLD
    master_robotics.start(callback, env, savename, replay, macro_duration, num_subs,  num_rollouts, warmup_time, train_time, force_subpolicy, store, workerseed=workerseed, rank=rank, comm=comm)
github AcutronicRobotics / ros2learn / experiments / examples / modular_scara_3dof_v3 / run_mlsh.py View on Github external
num_timesteps=1e9
    seed = 1401
    rank = MPI.COMM_WORLD.Get_rank()
    sess = U.single_threaded_session()
    sess.__enter__()
    workerseed = seed + 1000 * MPI.COMM_WORLD.Get_rank()
    rank = MPI.COMM_WORLD.Get_rank()
    set_global_seeds(workerseed)

    # if rank != 0:
    #     logger.set_level(logger.DISABLED)
    # logger.log("rank %i" % MPI.COMM_WORLD.Get_rank())

    world_group = MPI.COMM_WORLD.Get_group()
    mygroup = rank % 10
    theta_group = world_group.Incl([x for x in range(MPI.COMM_WORLD.size) if (x % 10 == mygroup)])
    comm = MPI.COMM_WORLD.Create(theta_group)
    comm.Barrier()
    # comm = MPI.COMM_WORLD

    #master_robotics.start(callback, args=args, workerseed=workerseed, rank=rank, comm=comm)
    start(callback,sess, workerseed=workerseed, rank=rank, comm=comm)
github SeismicData / pyasdf / pyasdf / utils.py View on Github external
def is_mpi_env():
    """
    Returns True if the current environment is an MPI environment.
    """
    try:
        import mpi4py
    except ImportError:
        return False

    try:
        import mpi4py.MPI
    except ImportError:
        return False

    if mpi4py.MPI.COMM_WORLD.size == 1 and mpi4py.MPI.COMM_WORLD.rank == 0:
        return False
    return True