Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
drn_spd = []
print(mf.drn.stress_period_data[0].dtype)
drn_stage = mf.drn.stress_period_data[0]["elev"][0]
i = mfr.nrow - 1
ib = mfr.bas6.ibound[0].array
for j in range(mfr.ncol):
if ib[i,j] == 0:
continue
drn_spd.append([0,i,j,drn_stage,10.0])
flopy.modflow.ModflowDrn(mfr,stress_period_data={0:drn_spd})
rdata = pd.DataFrame.from_records(mf.sfr.reach_data)
sdata = pd.DataFrame.from_records(mf.sfr.segment_data[0])
print(rdata.reachID)
rdata = rdata.reindex(np.arange(mfr.nrow))
#print(rdata.strthick)
#return
rdata.loc[:,'k'] = 0
rdata.loc[:,'j'] = (rdata.loc[0,"j"] * fac) + int(fac / 2.0)
rdata.loc[:,'rchlen'] = mfr.dis.delc.array
rdata.loc[:,'i'] = np.arange(mfr.nrow)
rdata.loc[:,"iseg"] = rdata.i + 1
rdata.loc[:,"ireach"] = 1
rdata.loc[:,"reachID"] = rdata.index.values
rdata.loc[:,"outreach"] = rdata.reachID + 1
rdata.loc[mfr.nrow-1,"outreach"] = 0
rdata.loc[:,"node"] = rdata.index.values
def _get_dataframe(self):
if self.lookups:
df = pandas.DataFrame.from_records(
list(self.get_queryset().values_list(*self.lookups)),
columns=self.lookups,
)
else:
df = pandas.DataFrame()
def clean_null_int(x):
if x is None or isnan(x):
return ''
return int(x)
# Avoids float representation of nullable integer fields
for lookup in self.null_int_lookups:
df[lookup] = df[lookup].apply(clean_null_int)
def remove_timezone(dt):
'setup_id': int(eval_['oml:setup_id']),
'flow_id': int(eval_['oml:flow_id']),
'flow_name': eval_['oml:flow_name'],
'data_id': int(eval_['oml:data_id']),
'data_name': eval_['oml:data_name'],
'function': eval_['oml:function'],
'upload_time': eval_['oml:upload_time'],
'uploader': int(eval_['oml:uploader']),
'uploader_name': user_dict[eval_['oml:uploader']],
'value': value,
'values': values,
'array_data': array_data}
if output_format == 'dataframe':
rows = [value for key, value in evals.items()]
evals = pd.DataFrame.from_records(rows, columns=rows[0].keys())
return evals
# 'Matmul Batch 1': 1, 'Matmul Batch 16': 16,
# 'Matmul Batch 64': 64, 'Matmul Batch 256': 256}
# 'Matmul Batch1': 1, 'Matmul Batch16': 16,
# 'Matmul Batch64': 64, 'Matmul Batch256': 256}
'Matmul 1': 1, 'Matmul 16': 16, 'Matmul 64': 64,
'Matmul 256': 256}
for i, nbytes in enumerate([8, 16, 32]):
bytes_str = '{}B'.format(nbytes)
dicts = []
for algo in ALGOS:
dps = np.random.randn(10) + 256 / nbytes
dps += algo2offset[algo] / nbytes
dicts += [{'algo': algo, 'nbytes': bytes_str, 'y': y} for y in dps]
df = pd.DataFrame.from_records(dicts)
else:
# ALGOS = ['Bolt', 'PQ', 'OPQ', 'PairQ', 'Matmul 1', # 'Matmul 16',
# 'Matmul 64', 'Matmul 256', 'Matmul 1024']
if with_matmuls:
ALGOS = ['Bolt', 'Binary Embedding', 'PQ', 'OPQ',
'Matmul 1', 'Matmul 256', 'Matmul 1024']
else:
ALGOS = ['Bolt', 'Binary Embedding', 'PQ', 'OPQ']
df = results.query_speed_results()
df['y'] = df['y'] / 1e9 # convert to billions
print("df cols: ", df.columns)
df.rename(columns={'algo': ' '}, inplace=True) # hide from legend
# ax = sb.barplot(x='x', y='y', hue=' ', ci=95, data=df, ax=axes[i])
if len(records) == 0:
return records
version = Version(records[0][0])
if flatten:
if not all([Version(v) == version for v,_ in records]):
unexpected = [Version(v) for v,_ in records if Version(v) != version][0]
raise UnexpectedVersionError(unexpected, version)
# combine each record list
records = [item for _,data in records for item in data]
return version, pd.DataFrame.from_records(records)
else:
# list of version, DataFrame tuples
return [(Version(r[0]), pd.DataFrame.from_records(r[1])) for r in records]
rows = []
for string in strings:
full_peptide, start, stop = parse_string(string)
row = {}
row['SourceSequence'] = full_peptide
row['MutationStart'] = start
row['MutationEnd'] = stop
row['GeneInfo'] = source
row['Gene'] = '-'
row['GeneMutationInfo'] = "-"
row['PeptideMutationInfo'] = "-"
row['TranscriptId'] = "-"
rows.append(row)
return pd.DataFrame.from_records(rows,
columns = (
'SourceSequence',
'MutationStart',
'MutationEnd',
'GeneInfo',
'Gene',
'GeneMutationInfo',
'PeptideMutationInfo',
'TranscriptId',
))
# Delete 0 values (not provided in evaluation)
res_unc = [x for x in res_unc if x != 0.0]
par_unc.extend(res_unc)
records = []
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gn[i], gg[i],
gfa[i], gfb[i]])
corr = endf.get_intg_record(file_obj)
cov = np.diag(par_unc).dot(corr).dot(np.diag(par_unc))
# Create pandas DataFrame with resonacne data
columns = ['energy', 'J', 'neutronWidth', 'captureWidth',
'fissionWidthA', 'fissionWidthB']
parameters = pd.DataFrame.from_records(records, columns=columns)
# Determine mpar (number of parameters for each resonance in
# covariance matrix)
nparams, params = parameters.shape
covsize = cov.shape[0]
mpar = int(covsize/nparams)
# Add parameters from File 2
parameters = _add_file2_contributions(parameters,
resonance.parameters)
# Create instance of ReichMooreCovariance
rmc = cls(energy_min, energy_max, parameters, cov, mpar, lcomp,
resonance)
return rmc
def drawChart(self, candlesticks, orders, movingAverages):
# googlecharts
output = open("./output/data.js",'w')
output.truncate()
# candlesticks
candlesticks = pd.DataFrame.from_records([c.toDict() for c in candlesticks])
ma = pd.DataFrame(movingAverages)
if len(ma) > 0:
candlesticks['ma'] = ma
else:
candlesticks['ma'] = 0
candlesticks['date'] = candlesticks['date']/1000
candlesticks.set_index('date', inplace=True)
# orders
orders = pd.DataFrame.from_records([o.toDict() for o in orders])
orders['date'] = orders['date']/1000
if len(orders)>1:
orders.set_index('date', inplace=True)
else :
orders['orderNumber'] = 0
orders['rate'] = 0
def as_dataframe(self) -> pd.DataFrame:
if self.get_patent_details:
return pd.DataFrame.from_records(self.as_list(), index='patent_num')
else:
return pd.DataFrame.from_records(self.as_list())
locus_list,
flank_limit=flank_limit,
chain=True,
include_parent_locus=True,
)
# Extract the edges for the full set of genes
edges = self.subnetwork(
genes_list,
min_distance=0,
sig_only=False,
trans_locus_only=True,
names_as_index=True,
)
if by_gene == True:
# Filter out trans edges
gene_split = pd.DataFrame.from_records(
chain(
*[
((gene_a, score), (gene_b, score))
for gene_a, gene_b, score, *junk in edges[edges.trans == True]
.reset_index()
.values
]
),
columns=["gene", "score"],
)
gene_split = gene_split.groupby("gene").agg(np.mean)
if iter_name is not None:
gene_split["iter"] = iter_name
gene_split.index.name = "gene"
gene_split["num_trans_edges"] = len(edges)
return gene_split