Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
print "SNOTEL/RAWS Coord: Done initialization. Starting to send work."
cnt = 0
nrec = 0
for stn_id in fnl_stn_ids:
for min_ngh in params[P_NGH_RNG]:
for tair_var in ['tmin','tmax']:
if cnt < nwrkers:
dest = cnt+N_NON_WRKRS
else:
dest = MPI.COMM_WORLD.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG)
nrec+=1
MPI.COMM_WORLD.send((stn_id,min_ngh,tair_var), dest=dest, tag=TAG_DOWORK)
cnt+=1
for w in np.arange(nwrkers):
MPI.COMM_WORLD.send((None,None,None), dest=w+N_NON_WRKRS, tag=TAG_STOPWORK)
print "coord_proc: done"
def chunk_recv(source, unpickle_it=True):
chunks = []
status = MPI.Status()
# Keep receiving messages until [[MSGOVER]] is received
while(True):
msg = MPI.COMM_WORLD.recv(source=source, status=status)
# If we are listening to ANY_SOURCE, receive the remainder of messages
# from the SAME source as the first message (prevent interleaving)
if(source==MPI.ANY_SOURCE):
source = status.Get_source()
# print ("----- %d received msg of size %d" % (MPI.COMM_WORLD.Get_rank(), len(msg)))
# If the special [[MSG_OVER]] string is received, we are done
if(msg=="[[MSG_OVER]]"):
break
# Otherwise, add the string to the list of received strings
chunks.append(msg)
# Concatenate the strings, then unpickle
pickled_obj = "".join(chunks)
def proc_work(params,rank):
status = MPI.Status()
optim = XvalTairAnom(params[P_PATH_DB], params[P_VARNAME])
bcast_msg = None
bcast_msg = MPI.COMM_WORLD.bcast(bcast_msg, root=RANK_COORD)
print "".join(["Worker ",str(rank),": Received broadcast msg"])
while 1:
stn_id = MPI.COMM_WORLD.recv(source=RANK_COORD,tag=MPI.ANY_TAG,status=status)
if status.tag == TAG_STOPWORK:
MPI.COMM_WORLD.send([None]*4, dest=RANK_WRITE, tag=TAG_STOPWORK)
print "".join(["Worker ",str(rank),": Finished"])
return 0
else:
try:
bias,mae,r2 = optim.run_xval(stn_id, params[P_NGH_RNG])
except Exception as e:
print "".join(["ERROR: Worker ",str(rank),": could not xval ",stn_id,"...",str(e)])
mae = np.ones((params[P_NGH_RNG].size,12))*netCDF4.default_fillvals['f8']
ttl_xval_stns += stnids_climdiv.size
print "WRITER: Output NCDF files created"
stn_idxs = {}
for x in np.arange(stns.size):
stn_idxs[stns[STN_ID][x]] = x
ttl_xvals = ttl_xval_stns
stat_chk = StatusCheck(ttl_xvals, 10)
while 1:
stn_id, err = MPI.COMM_WORLD.recv(source=MPI.ANY_SOURCE,
tag=MPI.ANY_TAG, status=status)
if status.tag == TAG_STOPWORK:
nwrkrs_done += 1
if nwrkrs_done == nwrkers:
######################################################
print "WRITER: Setting the optim # of nghs..."
set_optim_nstns_tair_norm(stn_da, path_out_optim)
######################################################
print "WRITER: Finished"
stn_idxs = {}
for x in np.arange(stn_ids.size):
stn_idxs[stn_ids[x]] = x
ngh_idxs = {}
for x in np.arange(params[P_NGH_RNG].size):
ngh_idxs[params[P_NGH_RNG][x]] = x
ttl_xvals = params[P_NGH_RNG].size * stn_ids.size
stat_chk = StatusCheck(ttl_xvals,1000)
while 1:
stn_id,min_ngh,hss = MPI.COMM_WORLD.recv(source=MPI.ANY_SOURCE,tag=MPI.ANY_TAG,status=status)
if status.tag == TAG_STOPWORK:
nwrkrs_done+=1
if nwrkrs_done == nwrkers:
print "Writer: Finished"
return 0
else:
dim1 = ngh_idxs[min_ngh]
dim2 = stn_idxs[stn_id]
ds.variables['hss'][dim1,dim2] = hss
ds.sync()
#print "|".join(["WRITER",stn_id,str(min_ngh),"%.4f"%(hss,)])
print "SNOTEL/RAWS Coord: Done initialization. Starting to send work."
cnt = 0
nrec = 0
for stn_id in fnl_stn_ids:
for min_ngh in params[P_NGH_RNG]:
for tair_var in ['tmin','tmax']:
if cnt < nwrkers:
dest = cnt+N_NON_WRKRS
else:
dest = MPI.COMM_WORLD.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG)
nrec+=1
MPI.COMM_WORLD.send((stn_id,min_ngh,tair_var), dest=dest, tag=TAG_DOWORK)
cnt+=1
for w in np.arange(nwrkers):
MPI.COMM_WORLD.send((None,None,None), dest=w+N_NON_WRKRS, tag=TAG_STOPWORK)
print "coord_proc: done"
ds_prcp = Dataset("".join([params[P_PATH_OUT],'infill_prcp.nc']),'r+')
ttl_infills = stnids_prcp.size
stnids_prcp = np.array(ds_prcp.variables['stn_id'][:], dtype="
def proc_coord(twx_cfg, mask_stns, nwrkers):
stndb = StationDataDb(twx_cfg.fpath_stndata_nc_all)
stns = stndb.stns[mask_stns]
cnt = 0
nrec = 0
for stn in stns:
if cnt < nwrkers:
dest = cnt + N_NON_WRKRS
else:
dest = MPI.COMM_WORLD.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG)
nrec += 1
MPI.COMM_WORLD.send(stn[STN_ID], dest=dest, tag=TAG_DOWORK)
cnt += 1
for w in np.arange(nwrkers):
MPI.COMM_WORLD.send(stn[STN_ID], dest=w + N_NON_WRKRS, tag=TAG_STOPWORK)
MPI.COMM_WORLD.bcast(atiler.build_tile_grid_info(), root=RANK_COORD)
print "COORD: Starting to send work chunks to workers..."
cnt = 0
try:
while 1:
tile_num,wrk_chk = atiler.next()
if cnt < nwrkers:
dest = cnt+N_NON_WRKRS
else:
dest = MPI.COMM_WORLD.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG)
cnt+=1
MPI.COMM_WORLD.Send([wrk_chk,MPI.DOUBLE], dest=dest, tag=tile_num)
except StopIteration:
pass
for w in np.arange(nwrkers):
MPI.COMM_WORLD.Send([wrk_chk,MPI.DOUBLE], dest=w+N_NON_WRKRS, tag=TAG_STOPWORK)
print "coord_proc: done"
stns = stn_da.stns[stn_mask]
stn_da.ds.close()
stn_da = None
ds = Dataset(params[P_PATH_WRITEDB],'r+')
mths = np.arange(12)
mthNames = []
for mth in mths:
mthNames.append(get_norm_varname(mth+1))
stat_chk = StatusCheck(stns.size,250)
while 1:
stn_id,tair_daily,tair_norms = MPI.COMM_WORLD.recv(source=MPI.ANY_SOURCE,tag=MPI.ANY_TAG,status=status)
if status.tag == TAG_STOPWORK:
nwrkrs_done+=1
if nwrkrs_done == nwrkers:
print "Writer: Finished"
return 0
else:
x = np.nonzero(stn_ids==stn_id)[0][0]
ds.variables[params[P_VARNAME]][:,x] = tair_daily
for i in mths:
ds.variables[mthNames[i]] = tair_norms[i]
ds.sync()