How to use the mpi4py.MPI.Comm function in mpi4py

To help you get started, we’ve selected a few mpi4py examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github uoguelph-mlrg / Theano-MPI / lib / base / proc_load_mpi.py View on Github external
return np.ascontiguousarray(data, dtype='float32')


if __name__ == '__main__':
    
    from mpi4py import MPI
    
    verbose = False #rank==0
    
    import sys
    
    gpuid = sys.argv[1]

    if verbose: print gpuid

    icomm = MPI.Comm.Get_parent()

    # 0. Receive config
    config = icomm.recv(source=MPI.ANY_SOURCE,tag=99)
    config['icomm']=icomm
    size = config['size']
    rank = config['rank']

    file_batch_size = config['file_batch_size']
    batch_size = config['batch_size']
    subb = file_batch_size//batch_size

    ctx = pygpu.init(gpuid)

    import socket
    addr = socket.gethostbyname(socket.gethostname())
    if verbose: print '[load] ', addr, rank
github cctbx / cctbx_project / xfel / merging / command_line / worker2.py View on Github external
from __future__ import division
# LIBTBX_SET_DISPATCHER_NAME mpi.worker2

print "Deprecated on 12/01/17. Code will be removed at a later date"
exit()

from mpi4py import MPI
from xfel.command_line.mpi_merge import scaling_manager_mpi

comm = MPI.Comm.Get_parent()
size = comm.Get_size()
rank = comm.Get_rank()

received_info = {}
received_info = comm.bcast(received_info, root=0)
file_names = received_info["file_names"]
sm = scaling_manager_mpi(received_info["miller_set"],
                         received_info["model"],
                         received_info["params"])

assert sm.params.backend == 'MySQL' # only option that makes sense
from xfel.merging.database.merging_database import manager
db_mgr = manager(sm.params)

for ix in xrange(len(file_names)):
  if ix%size == rank:
github helmholtz-analytics / heat / heat / core / communication.py View on Github external
buf = buf._DNDarray__array
        # convert torch tensors to MPI memory buffers
        if not isinstance(buf, torch.Tensor):
            return func(buf, root), None, None, None

        srbuf = buf if CUDA_AWARE_MPI else buf.cpu()

        return func(self.as_buffer(srbuf), root), srbuf, srbuf, buf

    def Bcast(self, buf, root=0):
        ret, sbuf, rbuf, buf = self.__broadcast_like(self.handle.Bcast, buf, root)
        if buf is not None and isinstance(buf, torch.Tensor) and buf.is_cuda and not CUDA_AWARE_MPI:
            buf.copy_(rbuf)
        return ret

    Bcast.__doc__ = MPI.Comm.Bcast.__doc__

    def Ibcast(self, buf, root=0):
        return MPIRequest(*self.__broadcast_like(self.handle.Ibcast, buf, root))

    Ibcast.__doc__ = MPI.Comm.Ibcast.__doc__

    def __reduce_like(self, func, sendbuf, recvbuf, *args, **kwargs):
        sbuf = None
        rbuf = None
        buf = None
        # unpack the send buffer if it is a HeAT tensor
        if isinstance(sendbuf, dndarray.DNDarray):
            sendbuf = sendbuf._DNDarray__array
        # unpack the receive buffer if it is a HeAT tensor
        if isinstance(recvbuf, dndarray.DNDarray):
            recvbuf = recvbuf._DNDarray__array
github tomMoral / dicodile / dicodile / utils / debugs.py View on Github external
def worker_check_beta(rank, workers_segments, beta, D_shape):
    """Helper function for main_check_warm_beta, to be run in the workers."""
    global_test_points = get_global_test_points(workers_segments)
    for i_probe, pt_global in enumerate(global_test_points):
        pt = workers_segments.get_local_coordinate(rank, pt_global)
        if workers_segments.is_contained_coordinate(rank, pt):
            beta_slice = (Ellipsis,) + pt
            sum_beta = np.array(beta[beta_slice].sum(), dtype='d')

            comm = MPI.Comm.Get_parent()
            comm.Send([sum_beta, MPI.DOUBLE], dest=0,
                      tag=constants.TAG_ROOT + i_probe)
github sfepy / sfepy / sfepy / solvers / ls_mumps_parallel.py View on Github external
flags[0])
        x = vals_rhs.copy()
        mumps_ls.set_rhs(x)

    mumps_ls(6)  # analyse, factorize, solve

    if comm.rank == 0:
        vals_x = nm.memmap(tmpfile('vals_x.array'), dtype=dtype, mode='w+',
                           shape=x.shape)
        vals_x[:] = x

    del(mumps_ls)


if __name__ == '__main__':
    comm = MPI.Comm.Get_parent()
    mumps_parallel_solve()
    comm.Disconnect()
github helmholtz-analytics / heat / heat / core / communication.py View on Github external
Allgatherv.__doc__ = MPI.Comm.Allgatherv.__doc__

    def Iallgather(self, sendbuf, recvbuf, recv_axis=0):
        """
        Parameters
        ----------
        sendbuf: Input Sendbuffer
        recvbuf: Input Receivebuffer
        recv_axis: concatenation axis: The axis among which sendbuffer is distributed before allgather is performed
        """
        return MPIRequest(
            *self.__allgather_like(self.handle.Iallgather, sendbuf, recvbuf, recv_axis)
        )

    Iallgather.__doc__ = MPI.Comm.Iallgather.__doc__

    def Iallgatherv(self, sendbuf, recvbuf, recv_axis=0):
        """
        Parameters
        ----------
        sendbuf: Input Sendbuffer
        recvbuf: Input Receivebuffer
        recv_axis: concatenation axis: The axis among which sendbuffer is distributed before allgather is performed
        """
        return MPIRequest(
            *self.__allgather_like(self.handle.Iallgatherv, sendbuf, recvbuf, recv_axis)
        )

    Iallgatherv.__doc__ = MPI.Comm.Iallgatherv.__doc__

    def __alltoall_like(self, func, sendbuf, recvbuf, send_axis, recv_axis, **kwargs):
github helmholtz-analytics / heat / heat / core / communication.py View on Github external
def __recv_like(self, func, buf, source, tag, status):
        if isinstance(buf, dndarray.DNDarray):
            buf = buf._DNDarray__array
        if not isinstance(buf, torch.Tensor):
            return func(buf, source, tag, status)

        return func(self.as_buffer(buf), source, tag, status)

    def Irecv(self, buf, source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=None):
        return self.__recv_like(self.handle.Irecv, buf, source, tag, status)
    Irecv.__doc__ = MPI.Comm.Irecv.__doc__

    def Recv(self, buf, source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=None):
        return self.__recv_like(self.handle.Recv, buf, source, tag, status)
    Recv.__doc__ = MPI.Comm.Recv.__doc__

    def __send_like(self, func, buf, dest, tag):
        if isinstance(buf, dndarray.DNDarray):
            buf = buf._DNDarray__array
        if not isinstance(buf, torch.Tensor):
            return func(buf, dest, tag)

        return func(self.as_buffer(buf), dest, tag)

    def Bsend(self, buf, dest, tag=0):
        return self.__send_like(self.handle.Bsend, buf, dest, tag)
    Bsend.__doc__ = MPI.Comm.Bsend.__doc__

    def Ibsend(self, buf, dest, tag=0):
        return self.__send_like(self.handle.Ibsend, buf, dest, tag)
    Ibsend.__doc__ = MPI.Comm.Ibsend.__doc__
github helmholtz-analytics / heat / heat / core / communication.py View on Github external
exit_code = func(mpi_sendbuf, mpi_recvbuf, **kwargs)

        # undo the recvbuf permutation and assign the temporary buffer to the original recvbuf
        if recv_axis != 0:
            print("recvbuf", recvbuf)
            recvbuf = recvbuf.permute(*recv_axis_permutation)
            print("recvbuf after permuting", recvbuf)
            print("recvbuf.storage()", recvbuf.storage(), "recvbuf.storage_offset()", recvbuf.storage_offset(), "recvbuf.shape", recvbuf.shape, "recvbuf.stride()", recvbuf.stride())
            original_recvbuf.set_(recvbuf.storage(), recvbuf.storage_offset(), recvbuf.shape, recvbuf.stride())
            print("original_recvbuf", original_recvbuf)

        return exit_code

    def Allgather(self, sendbuf, recvbuf, axis=0, recv_axis=None):
        return self.__scatter_like(self.handle.Allgather, sendbuf, recvbuf, axis, recv_axis, recv_factor=self.size)
    Allgather.__doc__ = MPI.Comm.Allgather.__doc__

    def Allgatherv(self, sendbuf, recvbuf, axis=0, recv_axis=None):
        return self.__scatter_like(self.handle.Allgatherv, sendbuf, recvbuf, axis, recv_axis, recv_factor=self.size)
    Allgatherv.__doc__ = MPI.Comm.Allgatherv.__doc__

    def Alltoall(self, sendbuf, recvbuf, axis=0, recv_axis=None):
        return self.__scatter_like(
            self.handle.Alltoall, sendbuf, recvbuf, axis, recv_axis, send_factor=self.size, recv_factor=self.size
        )
    Alltoall.__doc__ = MPI.Comm.Alltoall.__doc__

    def Alltoallv(self, sendbuf, recvbuf, axis=0, recv_axis=None):
        return self.__scatter_like(self.handle.Alltoallv, sendbuf, recvbuf, axis, recv_axis)
    Alltoallv.__doc__ = MPI.Comm.Alltoallv.__doc__

    def Gather(self, sendbuf, recvbuf, root=0, axis=0, recv_axis=None):
github helmholtz-analytics / heat / heat / core / communication.py View on Github external
Iallgather.__doc__ = MPI.Comm.Iallgather.__doc__

    def Iallgatherv(self, sendbuf, recvbuf, recv_axis=0):
        """
        Parameters
        ----------
        sendbuf: Input Sendbuffer
        recvbuf: Input Receivebuffer
        recv_axis: concatenation axis: The axis among which sendbuffer is distributed before allgather is performed
        """
        return MPIRequest(
            *self.__allgather_like(self.handle.Iallgatherv, sendbuf, recvbuf, recv_axis)
        )

    Iallgatherv.__doc__ = MPI.Comm.Iallgatherv.__doc__

    def __alltoall_like(self, func, sendbuf, recvbuf, send_axis, recv_axis, **kwargs):
        """
        Parameters
        ----------
        sendbuf: Input send buffer; can be of type DNDarray, torch.Tensor, tuple = (torch.Tensor, send_counts, send_displ), or any other numpy supported type (only if send_axis == 0)
        recvbuf: Input receive buffer; can be of type DNDarray, torch.Tensor, tuple = (torch.Tensor, send_counts, send_displ), or any other numpy supported type (only if send_axis == 0)
        send_axis: future split axis, along which data blocks will be created that will be send to individual ranks
                    if send_axis == recv_axis, an error will be thrown
                    if send_axis or recv_axis are None, an error will be thrown
        recv_axis: prior split axis, along which blocks are received from the individual ranks

        Returns
        -------
        exit code: of func
        """