How to use mpi4py - 10 common examples

To help you get started, we’ve selected a few mpi4py examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tbenthompson / tectosaur / tests / test_table_lookup.py View on Github external
def benchmark_co_lookup_pts():
    n_cores = mpi4py.MPI.COMM_WORLD.Get_size()

    async def submit(w):
        n_chunks = n_cores

        # async with tsk.Profiler(w, range(n_chunks)):
        # n = 4000
        # corners = [[-1, -1, 0], [-1, 1, 0], [1, 1, 0], [1, -1, 0]]
        # m = make_rect(n, n, corners)
        # tri_pts = m[0][m[1]]
        # np.save('tripts.npy', tri_pts)
        tri_pts = np.load('tripts.npy')
        # time.sleep(2.0)
        chunk_bounds = np.linspace(0, tri_pts.shape[0], n_chunks + 1)

        t2 = Timer(just_print = True)
        with taskloaf.shmem.alloc_shmem(tri_pts) as sm_filepath:
github espressopp / espressopp / contrib / mpi4py / mpi4py-2.0.0 / demo / sequential / test_seq.py View on Github external
def test():
    size = MPI.COMM_WORLD.Get_size()
    rank = MPI.COMM_WORLD.Get_rank()
    name = MPI.Get_processor_name()
    with Seq(MPI.COMM_WORLD, 1, 10):
        print(
            "Hello, World! I am process %d of %d on %s."
            % (rank, size, name))
github firedrakeproject / firedrake / tests / firedrake_parallel_nonlinear_helmholtz / test_firedrake_parallel_nonlinear_helmholtz.py View on Github external
return sqrt(assemble(dot(u - f, u - f) * dx))


def run_convergence_test():
    diff = [run_test(i) for i in range(3, 8)]

    from math import log
    import numpy as np
    conv = [log(diff[i] / diff[i + 1], 2) for i in range(len(diff) - 1)]
    return np.array(conv)

if __name__ == "__main__":
    import pickle
    from mpi4py import MPI
    l2_conv = run_convergence_test()
    if MPI.COMM_WORLD.rank == 0:
        with open("test-output.dat", "w") as f:
            pickle.dump(l2_conv, f)
github firedrakeproject / firedrake / tests / regression / test_nonlinear_helmholtz.py View on Github external
def test_l2_conv_parallel():
    from mpi4py import MPI
    l2_conv = run_convergence_test()
    print('[%d]' % MPI.COMM_WORLD.rank, 'convergence rate:', l2_conv)
    assert (l2_conv > 2.8).all()
github mathLab / RBniCS / tests / unit / test_separated_parametrized_form_mixed.py View on Github external
_solution_to_problem_map[expr10] = Problem("problem10")
_solution_to_problem_map[expr11] = Problem("problem11")
_solution_to_problem_map[expr12] = Problem("problem12")
_solution_to_problem_map[expr13] = Problem("problem13")

u, p = split(TrialFunction(V))
v, q = split(TestFunction(V))

scalar_trial = TrialFunction(scalar_V)
scalar_test = TestFunction(scalar_V)
vector_trial = TrialFunction(vector_V)
vector_test = TestFunction(vector_V)


# Fixtures
skip_in_parallel = pytest.mark.skipif(MPI.COMM_WORLD.size > 1, reason="Numbering of functions changes in parallel.")


# Tests
@skip_in_parallel
@enable_separated_parametrized_form_logging
@pytest.mark.dependency(name="1")
def test_separated_parametrized_forms_mixed_1():
    a1 = (inner(expr3 * grad(u), grad(v)) * dx + inner(grad(u) * expr2, v) * dx + expr1 * inner(u, v) * dx
          - p * tr(expr4 * grad(v)) * dx - expr1 * q * div(u) * dx - expr2[0] * p * q * dx)
    a1_sep = SeparatedParametrizedForm(a1)
    test_logger.log(DEBUG, "*** ###              FORM 1             ### ***")
    test_logger.log(DEBUG, "This is a basic mixed parametrized form, with all parametrized coefficients")
    a1_sep.separate()
    test_logger.log(DEBUG, "\tLen coefficients:")
    test_logger.log(DEBUG, "\t\t" + str(len(a1_sep.coefficients)))
github mfatihaktas / deep-scheduler / learning_shortestq_wmpi.py View on Github external
for d in range(1, size):
      print("rank= {}, sending to d= {}".format(rank, d) )
      flag[0] = 1
      comm.Send([flag, MPI.FLOAT], dest=d)
    for d in range(1, size):
      n_t_r_l = np.empty(N*T, dtype='f')
      comm.Recv([n_t_r_l, MPI.FLOAT], source=d)
      n_t_r_l = n_t_r_l.reshape((N, T))
      print("rank= {}, recved from d= {} n_t_r_l= \n{}".format(rank, d, n_t_r_l) )
  else:
    comm.Recv([flag, MPI.FLOAT], source=0)
    print("rank= {}, recved flag= {}".format(rank, flag) )
    # do sth
    # n_t_r_l = np.random.rand(N, T)
    n_t_r_l = np.arange(N*T, dtype='f') * rank
    comm.Send([n_t_r_l, MPI.FLOAT], dest=0)
    n_t_r_l = n_t_r_l.reshape((N, T))
    print("rank= {}, returned to master n_t_r_l= \n{}".format(rank, n_t_r_l) )
github chainer / chainer / tests / chainermn_tests / communicator_tests / test_communicator.py View on Github external
def check_send_recv_obj(self, x, tag=0,
                            use_any_recv=True, use_status=False):
        if self.communicator.rank == 0:
            self.communicator.send_obj(x, dest=1, tag=tag)
            y = x

        elif self.communicator.rank == 1:
            status = None
            if use_status:
                status = mpi4py.MPI.Status()

            if use_any_recv:
                y = self.communicator.recv_obj(source=0,
                                               status=status)
            else:
                y = self.communicator.recv_obj(source=0,
                                               tag=tag,
                                               status=status)

            if use_status:
                status_src = status.Get_source()
                self.assertEqual(0, status_src)
                status_tag = status.Get_tag()
                self.assertEqual(tag, status_tag)

        self.assertEqual(x, y)
github firedrakeproject / firedrake / tests / output / test_hdf5file_checkpoint.py View on Github external
def test_checkpoint_fails_for_non_function(dumpfile):
    dumpfile = MPI.COMM_WORLD.bcast(dumpfile, root=0)
    with HDF5File(dumpfile, "w", comm=MPI.COMM_WORLD) as h5:
        with pytest.raises(ValueError):
            h5.write(np.arange(10), "/solution")
github hvasbath / beat / test / pt_toy_example.py View on Github external
print("Master starting with %d workers" % num_workers)
    for i in range(num_workers):
        comm.recv(source=MPI.ANY_SOURCE, tag=tags.READY, status=status)
        source = status.Get_source()
        comm.send(tasks[i], dest=source, tag=tags.START)
        print("Sent task to worker %i" % source)
        active_workers += 1

    print("Parallel tempering ...")
    print("----------------------")

    while True:
        m1 = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
        source1 = status.Get_source()
        print("Got sample 1 from worker %i" % source1)
        m2 = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
        source2 = status.Get_source()
        print("Got sample 2 from worker %i" % source1)

        m1, m2 = metrop_select(m1, m2)
        print('samples 1, 2 %i %i' % (m1, m2))
        chain.extend([m1, m2])
        if len(chain) < nsamples:
            print("Sending states back to workers ...")
            comm.send(m1, dest=source1, tag=tags.START)
            comm.send(m2, dest=source2, tag=tags.START)
        else:
            print('Requested number of samples reached!')
            break

    print("Master finishing, recorded chain:")
    print(chain)
github ahmadia / collfs / test_enchilada.py View on Github external
enchilada_start = time.time()

# try cached importer first
sys.meta_path.insert(0, mpi4py_finder())

# fall back to our importer
sys.meta_path.insert(1, Importer())

from clawpack import pyclaw

MPI.COMM_WORLD.Barrier() 
enchilada_elapsed = time.time() - enchilada_start
full_elapsed = time.time() - full_start


if (MPI.COMM_WORLD.rank == 0):
    print "elapsed time (enchilada): %f" % enchilada_elapsed
    print "elapsed time (full): %f" % full_elapsed