Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
for d in range(1, size):
print("rank= {}, sending to d= {}".format(rank, d) )
flag[0] = 1
comm.Send([flag, MPI.FLOAT], dest=d)
for d in range(1, size):
n_t_r_l = np.empty(N*T, dtype='f')
comm.Recv([n_t_r_l, MPI.FLOAT], source=d)
n_t_r_l = n_t_r_l.reshape((N, T))
print("rank= {}, recved from d= {} n_t_r_l= \n{}".format(rank, d, n_t_r_l) )
else:
comm.Recv([flag, MPI.FLOAT], source=0)
print("rank= {}, recved flag= {}".format(rank, flag) )
# do sth
# n_t_r_l = np.random.rand(N, T)
n_t_r_l = np.arange(N*T, dtype='f') * rank
comm.Send([n_t_r_l, MPI.FLOAT], dest=0)
n_t_r_l = n_t_r_l.reshape((N, T))
print("rank= {}, returned to master n_t_r_l= \n{}".format(rank, n_t_r_l) )
newcomm.send(obj=work, dest=status.Get_source())
# Shutdown communicators
newcomm.Disconnect()
tsimend = timer()
simcompletestr = '\n=== MPI master ({}, rank: {}) on {} completed simulation in [HH:MM:SS]: {}'.format(comm.name, comm.Get_rank(), hostname, datetime.timedelta(seconds=tsimend - tsimstart))
print('{} {}\n'.format(simcompletestr, '=' * (get_terminal_width() - 1 - len(simcompletestr))))
##################
# Worker process #
##################
elif workerflag in sys.argv:
# Connect to parent to get communicator
try:
comm = MPI.Comm.Get_parent()
rank = comm.Get_rank()
except ValueError:
raise ValueError('MPI worker could not connect to parent')
# Select GPU and get info
gpuinfo = ''
if args.gpu is not None:
# Set device ID based on rank from list of GPUs
args.gpu = args.gpu[rank]
gpuinfo = ' using {} - {}, {} RAM '.format(args.gpu.deviceID, args.gpu.name, human_size(args.gpu.totalmem, a_kilobyte_is_1024_bytes=True))
# Ask for work until stop sentinel
for work in iter(lambda: comm.sendrecv(0, dest=0), StopIteration):
currentmodelrun = work['currentmodelrun']
# If Taguchi optimisation, add specific value for each parameter to
print("\nThis is node %d (%d of %d)" % (sim.rank(), sim.rank()+1, sim.num_processes()))
assert comm.rank == sim.rank()
assert comm.size == sim.num_processes()
data1 = numpy.empty(100, dtype=float)
if comm.rank == 0:
data1 = numpy.arange(100, dtype=float)
else:
pass
comm.Bcast([data1, MPI.DOUBLE], root=0)
print(comm.rank, data1)
data2 = numpy.arange(comm.rank, 10+comm.rank, dtype=float)
print(comm.rank, data2)
data2g = numpy.empty(10*comm.size)
comm.Gather([data2, MPI.DOUBLE], [data2g, MPI.DOUBLE], root=0)
if comm.rank == 0:
print("gathered (2):", data2g)
data3 = numpy.arange(0, 5*(comm.rank+1), dtype=float)
print(comm.rank, data3)
if comm.rank == 0:
sizes = range(5,5*comm.size+1,5)
disp = [size-5 for size in sizes]
data3g = numpy.empty(sum(sizes))
else:
sizes = disp = []
data3g = numpy.empty([])
comm.Gatherv([data3, data3.size, MPI.DOUBLE], [data3g, (sizes,disp), MPI.DOUBLE], root=0)
if comm.rank == 0:
print("gathered (3):", data3g)
def bcast_tf_vars_from_root(sess, vars):
"""
Send the root node's parameters to every worker.
Arguments:
sess: the TensorFlow session.
vars: all parameter variables including optimizer's
"""
rank = MPI.COMM_WORLD.Get_rank()
for var in vars:
if rank == 0:
MPI.COMM_WORLD.bcast(sess.run(var))
else:
sess.run(tf.assign(var, MPI.COMM_WORLD.bcast(None)))
def _send_result_mpi(self):
comm = MPI.Comm.Get_parent()
self.info("Reducing the distributed results", global_msg=True)
self.return_z_hat()
if self.return_ztz:
self.return_sufficient_statistics()
self.return_cost()
if self.timing:
comm.send(self._log_updates, dest=0)
comm.Barrier()
def runMaster(self, comm):
''' Master will distribute steps to run and write to db'''
# Build a dictionary with steps dependencies
stepsDict = self.createStepsDict()
#f = open('nodo%d.log' % self.rank, 'w')
workingNodes = self.size - 1
remainingJobs = len(self.steps)
while workingNodes:
#print >> f, "workingNodes %d, remainingJobs %d " % (workingNodes, remainingJobs)
# Wait for a step request
status = MPI.Status()
jobId = NO_JOBS
##print >> f, "Waiting for request"
jobId = comm.recv(None, source=MPI.ANY_SOURCE, tag=JOB_REQUEST, status=status)
#print >> f, "Request received from nodo %d" % status.source
if jobId != NO_JOBS:
#Update job completion
si = stepsDict[jobId]
si.finish = True
try:
self.db.endSingleStep(si.step, si.info)
except Exception, e:
#print >> f, "ERROR: ", e
pass
#break
# Try to find next step to execute
try:
if remainingJobs:
jobId = NO_AVAIL_JOB
def libE_mpi_defaults(libE_specs):
"Fill in default values for MPI-based communicators."
from mpi4py import MPI
if 'comm' not in libE_specs:
libE_specs['comm'] = MPI.COMM_WORLD.Dup()
return libE_specs, MPI.COMM_NULL
def close():
MPI.Unpublish_name(self.service, self.info, self.port)
print '[Server] Service unpublished'
MPI.Close_port(self.port)
print '[Server] Service port closed'
data = pd.DataFrame(population, columns=['x','y','quadkey'])
else:
data = pd.read_csv("Vermont_pop.csv", header=0, usecols=[1,2,3])
t2 = time.time()
print("{} people took {:.1f}s".format(data.shape[0],t2-t0))
#%% Phase 2: Generate Tile
# create a range of descending zoomlevels
zoomlevels = range(upperzoom,lowerzoom,-1)
# track number of tiles
N = 0
# create a communicator
comm = MPI.COMM_WORLD # line 2
# loop through zoom levels
for j in range(len(zoomlevels)):
level = zoomlevels[j]
# grab correct quadkey string based on zoom level
data.loc[:,'quadkey'] = data['quadkey'].map(lambda x: x[0:level])
# group dataframe by quadkey
groups = data.groupby('quadkey')
# get list of unique quadkeys and length
quadtree = data['quadkey'].unique()
n = len(quadtree)
# loop through quadkeys
for i in range(comm.rank, n, comm.size): # line 3
quadkey = quadtree[i]
# generate tile function
tile.generate_tile(groups.get_group(quadkey), quadkey, level)
# keep count of tiles