Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def compute_soil_drainage(result_fname, delta_t, cell_id):
h5file = h5.openFile(result_fname)
node = h5file.getNode('/'+'Soil', 'Qs_out')
flows = node.read()[1:, cell_id]
runoff_vol = flows.sum() * delta_t
h5file.close()
return runoff_vol
def load_data(inlist, seq_h5_filename, min_counts, skips):
infiles = open_files(inlist, "rt")
seq_h5 = tables.openFile(seq_h5_filename, "r")
end_of_file = False
count_table = []
keep_list = []
info_list = []
for infile in infiles:
line = infile.readline()
line = infile.readline()
if not line:
end_of_file = True
else:
info_list.append(line.strip().split())
while not end_of_file:
snp_info = info_list[0]
plot(x, pd4, label="Poisson")
plot(x, d1234 / all, label="Simulation")
xlabel("Mean density")
ylabel("Probability")
title("Prob: four detectors with particles")
legend(loc='best')
savefig("plots/poisson-four.pdf")
if __name__ == '__main__':
try:
data, kdata
except NameError:
data = tables.openFile(DATAFILE, 'r')
kdata = tables.openFile(KASCADEDATAFILE, 'r')
main()
def load(cls, filepath):
"""
Right now this load method isn't done in a very nice way.
TODO: Complete refactoring.
"""
import tables
file = tables.openFile(filepath,mode='r')
document = neuroml.NeuroMLDocument()
for node in file.root:
if hasattr(node,'vertices'):
loaded_morphology = cls.__extract_morphology(node)
document.morphology.append(loaded_morphology)
else:
for morphology in node:
loaded_morphology = cls.__extract_morphology(morphology)
document.morphology.append(loaded_morphology)
return document
for step in range(start,finaltime+1,stride):
XMFfile = open(basename+"."+str(step)+".xmf","w")
XMFfile.write(r"""
"""+"\n")
XMFfile.write(r' '+"\n")
for proc in range(0,size):
filename="solution.p"+str(proc)+"."+str(step)+".h5"
print filename
f1 = tables.openFile(filename)
XMFfile.write (r''+"\n")
XMFfile.write(r' <time value="'+str(step)+'">'+"\n")
for tmp in f1.root:
if tmp.name == "elements":
XMFfile.write (r''+"\n")
XMFfile.write (r' ' + filename + ':/elements'+"\n")
XMFfile.write (r''+"\n")
if tmp.name == "nodes":
XMFfile.write (r''+"\n")
XMFfile.write (r' ' + filename + ':/nodes'+"\n")
XMFfile.write (r''+"\n")
if tmp.name == "u":
XMFfile.write (r''+"\n")
XMFfile.write (r' ' + filename + ':/u'+"\n")</time>
def main():
args = parse_args()
snp_tab_h5 = tables.openFile(args.snp_tab, "r")
snp_index_h5 = tables.openFile(args.snp_index, "r")
if args.haplotype:
hap_h5 = tables.openFile(args.haplotype, "r")
ind_idx = lookup_individual_index(args.samples, args.individual)
else:
hap_h5 = None
ind_idx = None
ref_count_h5 = tables.openFile(args.ref_as_counts, "w")
alt_count_h5 = tables.openFile(args.alt_as_counts, "w")
other_count_h5 = tables.openFile(args.other_as_counts, "w")
read_count_h5 = tables.openFile(args.read_counts, "w")
output_h5 = [ref_count_h5, alt_count_h5, other_count_h5, read_count_h5]
chrom_dict = {}
# initialize every chromosome in output files
chrom_list = chromosome.get_all_chromosomes(args.chrom)
def __init__(self, data_path):
self.data = tables.openFile(data_path, 'a')
self.station_groups = ['/s%d' % u for u in self.stations]
self.cluster = clusters.ScienceParkCluster(self.stations)
self.trig_threshold = .5
"""Open a HDF5 kwik file."""
if not os.path.exists(self.filename_kwik) and os.path.exists(self.filenames['fet']):
klusters_to_hdf5(self.filename, self.klusters_to_hdf5_progress_report)
self.initialize_logfile()
# Load the similarity measure chosen by the user in the preferences
# file: 'gaussian' or 'kl'.
# Refresh the preferences file when a new file is opened.
# USERPREF.refresh()
self.similarity_measure = self.userpref['similarity_measure'] or 'gaussian'
debug("Similarity measure: {0:s}.".format(self.similarity_measure))
info("Opening {0:s}.".format(self.filename))
if os.path.exists(self.filename):
self.kwik = tb.openFile(self.filename, mode='r+')
# Get the list of shanks.
# WARNING
# The commented code above detects the shank indices from introspection
# in the "shanks" group. It is not necessary anymore as soon as the
# metadata contains a "SHANKS" attribute with the list of shanks.
# self.shanks = [int(re.match("shank([0-9]+)",
# shank._v_name).group(1)[0])
# for shank in self.kwik.listNodes('/shanks')]
self.read_metadata(self.kwik)
# By default, read the first available shank.
self.set_shank(self.shanks[0])
self.read_shank()
import tables
try:
data
except NameError:
data = tables.openFile('data-e15.h5', 'r')
try:
R, T
except NameError:
events = data.root.simulations.angle_0
r_list, t_list = [], []
for event in events:
if not event['id'] % 10:
r0 = event['r']
fi0 = event['phi']
elif event['pid'] in [-2, 2, -3, 3]:
r1 = event['r']
fi1 = event['phi']
# uit mail jos
Returns
-------
sp_dict : dict
filtered recordings
"""
data = spikes['data']
sp_dict = spikes.copy()
if filter_obj is None:
return spikes
filename = tempfile.mktemp(suffix='.h5')
atom = tables.Atom.from_dtype(np.dtype('float64'))
shape = data.shape
h5f = tables.openFile(filename, 'w')
carray = h5f.createCArray('/', 'test', atom, shape)
_open_files[filename] = h5f
chunksize = int(chunksize)
n_chunks = int(np.ceil(shape[1] * 1.0 / chunksize))
for i in range(shape[0]):
for j in range(n_chunks):
stop = int(np.min(((j + 1) * chunksize, shape[1])))
carray[i, j * chunksize:stop] = filter_obj(
data[i, j * chunksize:stop], sp_dict['FS'])
sp_dict['data'] = carray
return sp_dict