How to use the mpi4py.MPI.SUM function in mpi4py

To help you get started, we’ve selected a few mpi4py examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github mathLab / RBniCS / rbnics / backends / dolfin / wrapping / create_submesh.py View on Github external
mesh_to_submesh_vertex_local_indices = dict(zip(mesh_vertex_indices, list(range(len(mesh_vertex_indices)))))
    # Also create a mapping from mesh local cell index to submesh local cell index.
    mesh_to_submesh_cell_local_indices = dict(zip(mesh_cell_indices, list(range(len(mesh_cell_indices)))))
    # Now, define submesh cells
    submesh_cells = list()
    for i, c in enumerate(mesh_cells):
        submesh_cells.append([mesh_to_submesh_vertex_local_indices[j] for j in c])
    # Store vertices as submesh_vertices[local_index] = (global_index, coordinates)
    submesh_vertices = dict()
    for mesh_vertex_local_index, submesh_vertex_local_index in mesh_to_submesh_vertex_local_indices.items():
        submesh_vertices[submesh_vertex_local_index] = (
            allgathered_mesh_to_submesh_vertex_global_indices[
                mesh.topology().global_indices(0)[mesh_vertex_local_index]],
            mesh.coordinates()[mesh_vertex_local_index])
    # Collect the global number of vertices and cells
    global_num_cells = mpi_comm.allreduce(len(submesh_cells), op=SUM)
    global_num_vertices = len(allgathered_mesh_to_submesh_vertex_global_indices)
    # Fill in mesh_editor
    mesh_editor.init_vertices_global(len(submesh_vertices), global_num_vertices)
    mesh_editor.init_cells_global(len(submesh_cells), global_num_cells)
    for local_index, cell_vertices in enumerate(submesh_cells):
        mesh_editor.add_cell(local_index, cell_vertices)
    for local_index, (global_index, coordinates) in submesh_vertices.items():
        mesh_editor.add_vertex_global(local_index, global_index, coordinates)
    mesh_editor.close()
    # Initialize topology
    submesh.topology().init(0, len(submesh_vertices), global_num_vertices)
    submesh.topology().init(mesh.ufl_cell().topological_dimension(), len(submesh_cells), global_num_cells)
    # Correct the global index of cells
    for local_index in range(len(submesh_cells)):
        submesh.topology().set_global_index(
            submesh.topology().dim(),
github bccp / nbodykit / legacy / measurepower.py View on Github external
def paint_darkmatter(pm, filename, fileformat):
    pm.real[:] = 0
    Ntot = 0
    for round, P in enumerate(read(pm.comm, ns.filename, TPMSnapshotFile, 
                columns=['Position'], bunchsize=ns.bunchsize)):
        P['Position'] *= ns.BoxSize
        layout = pm.decompose(P['Position'])
        tpos = layout.exchange(P['Position'])
        #print tpos.shape
        pm.paint(tpos)
        npaint = pm.comm.allreduce(len(tpos), op=MPI.SUM) 
        nread = pm.comm.allreduce(len(P['Position']), op=MPI.SUM) 
        if pm.comm.rank == 0:
            logging.info('round %d, npaint %d, nread %d' % (round, npaint, nread))
        Ntot = Ntot + nread
    return Ntot
github jeremiedecock / snippets / python / mpi4py / dynamic_process_reduce / worker.py View on Github external
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

# Copyright (c) 2013 Jérémie DECOCK (http://www.jdhp.org)

from mpi4py import MPI

comm = MPI.Comm.Get_parent()

size = comm.size
rank = comm.rank

data = rank

result = comm.reduce(data, op=MPI.SUM, root=0)

print("process", rank, ":", result)

comm.Disconnect()
github xunzhang / parasol / parasol / alg / mf_v2.py View on Github external
def calc_rmse(self):
        import math
        esum = self.__calc_esum()
	paralg.sync(self)
        esum = self.comm.allreduce(esum, op = MPI.SUM)
	cnt = self.comm.allreduce(len(self.graph), op = MPI.SUM)
        return math.sqrt(esum / cnt)
github deephyper / deephyper / deephyper / searches / nas / utils / common / mpi_adam.py View on Github external
def update(self, localg, stepsize):
        if self.t % 100 == 0:
            self.check_synced()
        localg = localg.astype('float32')
        globalg = np.zeros_like(localg)
        self.comm.Allreduce(localg, globalg, op=MPI.SUM)
        if self.scale_grad_by_procs:
            globalg /= self.comm.Get_size()

        self.t += 1
        a = stepsize * np.sqrt(1 - self.beta2**self.t)/(1 - self.beta1**self.t)
        self.m = self.beta1 * self.m + (1 - self.beta1) * globalg
        self.v = self.beta2 * self.v + (1 - self.beta2) * (globalg * globalg)
        step = (- a) * self.m / (np.sqrt(self.v) + self.epsilon)
        self.setfromflat(self.getflat() + step)
github quantumiracle / Reinforcement_Learning_for_Traffic_Light_Control / 6.ddpg_for_single / common / mpi_running_mean_std.py View on Github external
def update(self, x):
        x = x.astype('float64')
        n = int(np.prod(self.shape))
        totalvec = np.zeros(n*2+1, 'float64')
        addvec = np.concatenate([x.sum(axis=0).ravel(), np.square(x).sum(axis=0).ravel(), np.array([len(x)],dtype='float64')])
        MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM)
        self.incfiltparams(totalvec[0:n].reshape(self.shape), totalvec[n:2*n].reshape(self.shape), totalvec[2*n])
github Geodels / eSCAPE / eSCAPE / flow / surfprocplex.py View on Github external
hBase = self.hLocal.getArray().copy()
        hTop = self.fillLocal.getArray().copy()
        hDiff = hTop-hBase

        # Find inland depressions
        inlandIDs = self.pHeight>self.sealevel
        excess = np.zeros(1)
        for k in range(len(self.pVol)):
            if inlandIDs[k]:
                # pts = wshed==k
                pts = np.where(wshed==k)[0]
                zsea[pts] = -1.e6
                locVol = np.zeros(1)
                locVol[0] = np.sum(depV[pts])
                MPI.COMM_WORLD.Allreduce(MPI.IN_PLACE, locVol, op=MPI.SUM)
                # Case 1: incoming sediment volume lower than pit volume
                if locVol[0] < self.pVol[k]:
                    frac = locVol[0]/self.pVol[k]
                    depo[pts] += frac*hDiff[pts]
                    Qs[pts] = 0.
                    depV[pts] = 0.
                    self.pVol[k] -= locVol[0]
                # Case 2: incoming sediment volume greater than pit volume
                elif locVol[0] > self.pVol[k] and wshed[self.pitNode[k]]>=0:
                    depo[pts] = hDiff[pts]
                    Qs[pts] = 0.
                    excess[0] = 1
                    self.pVol[k] = 0.
                    depV[pts] = 0.
                    if MPIrank==self.pitProc[k]:
                        Qs[self.pitNode[k]] = (locVol[0] - self.pVol[k])/self.dt
github Parallel-in-Time / pySDC / pySDC / implementations / problem_classes / AllenCahn_MPIFFT.py View on Github external
if self.params.spectral:

            f.impl = -self.K2 * u

            tmp = newDistArray(self.fft, False)
            tmp[:] = self.fft.backward(u, tmp)

            if self.params.eps > 0:
                tmpf = -2.0 / self.params.eps ** 2 * tmp * (1.0 - tmp) * (1.0 - 2.0 * tmp)
            else:
                tmpf = self.dtype_f(self.init, val=0.0)

            # build sum over RHS without driving force
            Rt_local = float(np.sum(self.fft.backward(f.impl) + tmpf))
            if self.params.comm is not None:
                Rt_global = self.params.comm.allreduce(sendobj=Rt_local, op=MPI.SUM)
            else:
                Rt_global = Rt_local

            # build sum over driving force term
            Ht_local = float(np.sum(6.0 * tmp * (1.0 - tmp)))
            if self.params.comm is not None:
                Ht_global = self.params.comm.allreduce(sendobj=Ht_local, op=MPI.SUM)
            else:
                Ht_global = Rt_local

            # add/substract time-dependent driving force
            if Ht_global != 0.0:
                dw = Rt_global / Ht_global
            else:
                dw = 0.0
github choderalab / yank / Yank / sampling.py View on Github external
ReplicaExchange._propagate_replicas(self)

        # Print summary statistics.
        # TODO: Streamline this idiom.
        if self.mpicomm:
            # MPI
            from mpi4py import MPI
            if self.mc_displacement and (self.mc_atoms is not None):
                self.displacement_trials_accepted = self.mpicomm.reduce(self.displacement_trials_accepted, op=MPI.SUM)
                self.displacement_trial_time = self.mpicomm.reduce(self.displacement_trial_time, op=MPI.SUM)
                if self.mpicomm.rank == 0:
                    logger.debug("Displacement MC trial times consumed %.3f s aggregate (%d accepted)" % (self.displacement_trial_time, self.displacement_trials_accepted))

            if self.mc_rotation and (self.mc_atoms is not None):
                self.rotation_trials_accepted = self.mpicomm.reduce(self.rotation_trials_accepted, op=MPI.SUM)
                self.rotation_trial_time = self.mpicomm.reduce(self.rotation_trial_time, op=MPI.SUM)
                if self.mpicomm.rank == 0:
                    logger.debug("Rotation MC trial times consumed %.3f s aggregate (%d accepted)" % (self.rotation_trial_time, self.rotation_trials_accepted))
        else:
            # SERIAL
            if self.mc_displacement and (self.mc_atoms is not None):
                logger.debug("Displacement MC trial times consumed %.3f s aggregate (%d accepted)" % (self.displacement_trial_time, self.displacement_trials_accepted))
            if self.mc_rotation and (self.mc_atoms is not None):
                logger.debug("Rotation MC trial times consumed %.3f s aggregate (%d accepted)" % (self.rotation_trial_time, self.rotation_trials_accepted))

        return