How to use the mpi4py.MPI.Wtime function in mpi4py

To help you get started, we’ve selected a few mpi4py examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github espressopp / espressopp / contrib / mpi4py / mpi4py-1.3 / demo / compute-pi / cpi-dpm.py View on Github external
def master(icomm):
    n = get_n()
    wt = MPI.Wtime()
    n =  N.array(n, 'i')
    icomm.Send([n, MPI.INT], dest=0)
    pi = N.array(0, 'd')
    icomm.Recv([pi, MPI.DOUBLE], source=0)
    wt = MPI.Wtime() - wt
    if n == 0: return
    np = icomm.Get_remote_size()
    view(pi, np, wt)
github usgs / geobipy / geobipy / src / inversion / DataSetResults.py View on Github external
d, a = hm.fit_mixture(intervals, track=False, **kwargs)

            for j in range(nIntervals):
                dm = np.squeeze(d[j].means_[a[j]])
                dv = np.squeeze(d[j].covariances_[a[j]])

                nD = np.size(dm)

                line.hdfFile['/mixture_fits/means/data'][ind, j, :nD] = dm
                line.hdfFile['/mixture_fits/variances/data'][ind, j, :nD] = dv

            counter += 1
            if counter == nUpdate:
                print('rank {}, line/fiducial {}/{}, iteration {}/{},  time/dp {} h:m:s'.format(world.rank, self.lineNumbers[iL], line.fiducials[ind], i+1, chunk, str(timedelta(seconds=MPI.Wtime()-t0)/nUpdate)), flush=True)
                t0 = MPI.Wtime()
                counter = 0

        print('rank {} finished in {} h:m:s'.format(world.rank, str(timedelta(seconds=MPI.Wtime()-tBase))))
github espressopp / espressopp / contrib / mpi4py / mpi4py-2.0.0 / demo / osu_alltoall.py View on Github external
print ('# %-8s%20s' % ("Size [B]", "Latency [us]"))

    for size in message_sizes(MAX_MSG_SIZE):
        if size > large_message_size:
            skip = skip_large
            loop = loop_large
        iterations = list(range(loop+skip))
        s_msg = [s_buf, size, MPI.BYTE]
        r_msg = [r_buf, size, MPI.BYTE]
        #
        comm.Barrier()
        for i in iterations:
            if i == skip:
                t_start = MPI.Wtime()
            comm.Alltoall(s_msg, r_msg)
        t_end = MPI.Wtime()
        comm.Barrier()
        #
        if myid == 0:
            latency = (t_end - t_start) * 1e6 / loop
            print ('%-10d%20.2f' % (size, latency))
github espressopp / espressopp / contrib / mpi4py / mpi4py-2.0.0 / demo / osu_latency.py View on Github external
message_sizes = [0] + [2**i for i in range(30)]
    for size in message_sizes:
        if size > MAX_MSG_SIZE:
            break
        if size > large_message_size:
            skip = skip_large
            loop = loop_large
        iterations = list(range(loop+skip))
        s_msg = [s_buf, size, MPI.BYTE]
        r_msg = [r_buf, size, MPI.BYTE]
        #
        comm.Barrier()
        if myid == 0:
            for i in iterations:
                if i == skip:
                    t_start = MPI.Wtime()
                comm.Send(s_msg, 1, 1)
                comm.Recv(r_msg, 1, 1)
            t_end = MPI.Wtime()
        elif myid == 1:
            for i in iterations:
                comm.Recv(r_msg, 0, 1)
                comm.Send(s_msg, 0, 1)
        #
        if myid == 0:
            latency = (t_end - t_start) * 1e6 / (2 * loop)
            print ('%-10d%20.2f' % (size, latency))
github ome / openmicroscopy / components / tools / OmeroPy / src / flim-omero.py View on Github external
md = argmax(k1h[0][1:])
            # calc cumlative sum for lower half and find 50 centile for this
            # range
            csl = 100 * k1h[0][1:md].cumsum() / k1h[0][1:md].sum()
            csmn = 100 - (csl - 50) * (csl - 50)
            csmin = argmax(csmn)
            # calc cumlative sum for upper half and find 50 centile for this
            # range
            csu = 100 * k1h[0][md:].cumsum() / k1h[0][md:].sum()
            csmx = 100 - (csu - 50) * (csu - 50)
            csmax = argmax(csmx)
            # calculate the mean of this new range
            nmean = (k1h[0][csmin:csmax + md] * mp[csmin:csmax + md]
                     ).sum() / k1h[0][csmin:csmax + md].sum()
            tex.write('Mean %10.5f\\\\ \n' % nmean)
            end = mpi.Wtime()
            tex.write('Time %9.5f\\\\ \n' % ((end - start)))
            cummean += nmean
            # THIS BIT IS WHERE PROCESS RANK=0 generates output for use in the
            # tex document output pictures
            fig = plt.figure()
            n, bins, patches = plt.hist(
                gk1[npy.where(gk1 > 0)], 1000, normed=1, histtype='bar')
            fig.savefig(s_dir + 'out/gk1h_' + str(s_id) + '.pdf')
            fig.clf()
            if i_mode > 1:
                fig = plt.figure()
                n, bins, patches = plt.hist(
                    gk2[npy.where(gk2 > 0)], 1000, normed=1, histtype='bar')
                fig.savefig(s_dir + 'out/gk2h_' + str(s_id) + '.pdf')
                fig.clf()
            fig = plt.figure()
github olcf / pcircle / legacy / pcircle / fsum.py View on Github external
def __init__(self, circle, treewalk, chunksize, totalsize=0, totalfiles=0):
        BaseTask.__init__(self, circle)
        self.circle = circle
        self.treewalk = treewalk
        self.totalsize = totalsize
        self.totalfiles = totalfiles
        self.total_chunks = 0
        self.workcnt = 0
        #self.chunkq = []
        self.chunksize = chunksize

        # debug
        self.d = {"rank": "rank %s" % circle.rank}
        self.wtime_started = MPI.Wtime()
        self.wtime_ended = None

        # reduce
        self.vsize = 0
        self.vsize_prior = 0

        self.logger = utils.getLogger(__name__)

        if self.circle.rank == 0:
            print("Start parallel checksumming ...")
github usgs / geobipy / geobipy / src / inversion / DataSetResults.py View on Github external
for j in range(nIntervals):
                dm = np.asarray([d.mean for d in distributions[j]])
                dv = np.asarray([d.variance for d in distributions[j]])
                da = amplitudes[j]

                nD = np.size(dm)

                line.hdfFile['/mixture_fits/means/data'][ind, j, :nD] = dm
                line.hdfFile['/mixture_fits/variances/data'][ind, j, :nD] = dv
                line.hdfFile['/mixture_fits/amplitudes/data'][ind, j, :nD] = da

            counter += 1
            if counter == nUpdate:
                print('rank {}, line/fiducial {}/{}, iteration {}/{},  time/dp {} h:m:s'.format(world.rank, self.lineNumbers[iL], line.fiducials[ind], i+1, chunk, str(timedelta(seconds=MPI.Wtime()-t0)/nUpdate)), flush=True)
                t0 = MPI.Wtime()
                counter = 0

        print('rank {} finished in {} h:m:s'.format(world.rank, str(timedelta(seconds=MPI.Wtime()-tBase))), flush=True)
github keurfonluu / StochOPy / stochopy / evolutionary_algorithm.py View on Github external
if not converge and np.any( sigma * np.sqrt(diagC) > 1e3 * insigma ):
                converge = True
                self._flag = 6
                
            # TolFun: stop if fun-changes smaller than 1e-12
            if not converge and it > 2 and np.max(np.append(arfitness, arbestfitness)) - np.min(np.append(arfitness, arbestfitness)) < 1e-12:
                converge = True
                self._flag = 7
                
            # TolX: stop if x-changes smaller than 1e-11 times initial sigma
            if not converge and np.all( sigma * np.max(np.append(np.abs(pc), np.sqrt(diagC))) < 1e-11 * insigma ):
                converge = True
                self._flag = 8
                
            if self._mpi:
                self._time_serial[it-1] = MPI.Wtime() - starttime_serial
        
        arindex = np.argsort(arfitness)
        xopt = self._unstandardize(arxvalid[arindex[0]])
        gfit = arfitness[arindex[0]]
        self._xopt = np.array(xopt)
        self._gfit = gfit
        self._n_iter = it
        if self._mpi:
            self._time_serial = self._time_serial[:it] - self._time_parallel[:it]
            self._time_parallel = self._time_parallel[:it]
        if self._snap:
            self._models = self._models[:,:,:it]
            self._energy = self._energy[:,:it]
            self._means = self._means[:it,:]
        return xopt, gfit
github ClementiGroup / LSDMap-DM-d-MD / lsdmap / lsdm.py View on Github external
self.config = config
        self.args = args

        filename = args.struct_file[0] 
        self.struct_filename = filename
        self.npoints,self.natoms = coord_reader.get_nframes_natoms(filename)

        if coord_reader.supports_parallel_reading(filename): 
            # read coordinates in parallel
            self.idxs_thread, self.npoints_per_thread, self.offsets_per_thread = p_index.get_idxs_thread(comm, self.npoints)
            coords_thread = coord_reader.get_coordinates(filename, idxs=self.idxs_thread)
            coords_ravel = coords_thread.ravel()
            ravel_lengths, ravel_offsets = p_index.get_ravel_offsets(self.npoints_per_thread,self.natoms)
            coordstemp = np.zeros(self.npoints*3*self.natoms, dtype='float')
            start = MPI.Wtime()
            comm.Allgatherv(coords_ravel, (coordstemp, ravel_lengths, ravel_offsets, MPI.DOUBLE))
            self.coords = coordstemp.reshape((self.npoints,3,self.natoms))
        else: 
            # serial reading
            if rank == 0:
                self.coords = coord_reader.get_coordinates(filename)
            else:
                self.coords = np.zeros((self.npoints,3,self.natoms),dtype=np.double)
            comm.Bcast(self.coords, root=0) 

        logging.info('input coordinates loaded')

        self.initialize_local_scale()
        self.initialize_weights()
        self.initialize_metric()
github sfepy / sfepy / sfepy / base / multiproc_mpi.py View on Github external
def wait_for_tag(wtag, num=1):
    ndone = num
    start = MPI.Wtime()
    while ndone > 0:
        mpi_comm.recv(source=MPI.ANY_SOURCE, tag=wtag, status=mpi_status)
        tag = mpi_status.Get_tag()
        source = mpi_status.Get_source()
        logger.debug('received %s from %d (%.03fs)' % (tags.name[tag],
                                                       source,
                                                       MPI.Wtime() - start))
        if tag == wtag:
            ndone -= 1