Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_rft2df():
"""Test that dataframes are produced"""
eclfiles = EclFiles(DATAFILE)
rftdf = rft.rft2df(eclfiles)
assert "ZONE" in rftdf
assert "LEAF" not in rftdf # Topology metadata should not be exported
assert set(rftdf["WELLMODEL"]) == {"STANDARD"}
assert set(rftdf["WELL"]) == {
"OP_1",
"OP_2",
"OP_3",
"OP_4",
"OP_5",
"WI_1",
"WI_2",
"WI_3",
}
assert not rftdf.empty
assert len(rftdf) == 115
def test_df2ecl_editnnc(tmpdir):
"""Test generation of EDITNNC keyword"""
eclfiles = EclFiles(DATAFILE)
nncdf = nnc.df(eclfiles)
tmpdir.chdir()
nncdf["TRANM"] = 2
editnnc = nnc.df2ecl_editnnc(nncdf, filename="editnnc.inc")
editnnc_fromfile = "".join(open("editnnc.inc").readlines())
assert editnnc == editnnc_fromfile
assert "EDITNNC" in editnnc
assert editnnc.count("/") == len(nncdf) + 1
assert "avg multiplier" in editnnc
# Fails when columns are missing
with pytest.raises((KeyError, ValueError)):
nnc.df2ecl_editnnc(nncdf[["I1", "I2"]])
editnnc = nnc.df2ecl_editnnc(nncdf, nocomments=True)
def test_nnc2df():
"""Test that dataframes are produced"""
eclfiles = EclFiles(DATAFILE)
nncdf = nnc.df(eclfiles)
assert not nncdf.empty
assert "I1" in nncdf
assert "J1" in nncdf
assert "K1" in nncdf
assert "I2" in nncdf
assert "J2" in nncdf
assert "K2" in nncdf
assert "TRAN" in nncdf
prelen = len(nncdf)
nncdf = nnc.filter_vertical(nncdf)
assert (nncdf["I1"] == nncdf["I2"]).all()
assert (nncdf["J1"] == nncdf["J2"]).all()
assert len(nncdf) < prelen
def test_faults2df():
"""Test that dataframes are produced"""
eclfiles = EclFiles(DATAFILE)
faultsdf = faults.df(eclfiles.get_ecldeck())
assert "NAME" in faultsdf
assert "I" in faultsdf
assert "J" in faultsdf
assert "K" in faultsdf
assert "FACE" in faultsdf
assert not faultsdf.empty
def test_nx(tmpdir):
"""Test graph generation"""
eclfiles = EclFiles(DATAFILE)
network = trans.nx(eclfiles, region="FIPNUM")
assert network.number_of_nodes() == 6
networkx.write_gexf(
network, str(tmpdir.join("reek-fipnum-trans.gxf")), prettyprint=True
)
assert os.path.exists(str(tmpdir.join("reek-fipnum-trans.gxf")))
def test_main(tmpdir):
"""Test command line interface"""
tmpcsvfile = str(tmpdir.join("pvt.csv"))
sys.argv = ["ecl2csv", "pvt", "-v", DATAFILE, "-o", tmpcsvfile]
ecl2csv.main()
assert os.path.exists(tmpcsvfile)
disk_df = pd.read_csv(tmpcsvfile)
assert "PVTNUM" in disk_df
assert "KEYWORD" in disk_df
assert not disk_df.empty
# Write back to include file:
incfile = str(tmpdir.join("pvt.inc"))
sys.argv = ["csv2ecl", "pvt", "-v", str(tmpcsvfile), "-o", incfile]
csv2ecl.main()
# Reparse the include file on disk back to dataframe
# and check dataframe equality
assert os.path.exists(incfile)
disk_inc_df = pvt.df(open(incfile).read())
assert "FIPNUM" in disk_df
assert "EQLNUM" not in disk_df
assert len(disk_df) == 7675
# Group pr. FIPNUM:
sys.argv = [
"ecl2csv",
"pillars",
DATAFILE,
"--region",
"FIPNUM",
"--group",
"-o",
str(tmpcsvfile),
]
ecl2csv.main()
assert os.path.exists(str(tmpcsvfile))
disk_df = pd.read_csv(str(tmpcsvfile))
assert "PILLAR" not in disk_df # because of grouping
assert "FIPNUM" in disk_df # grouped by this.
assert len(disk_df) == 6
# Test dates:
sys.argv = [
"ecl2csv",
"pillars",
DATAFILE,
"--region",
"",
"--group",
"--rstdates",
"first",
def test_prettyprint():
"""Test pretty printing via command line interface"""
sys.argv = ["ecl2csv", "gruptree", DATAFILE, "--prettyprint"]
ecl2csv.main()
def test_main_subparser():
"""Test command line interface"""
tmpcsvfile = ".TMP-equil.csv"
sys.argv = ["ecl2csv", "equil", DATAFILE, "-o", tmpcsvfile]
ecl2csv.main()
assert os.path.exists(tmpcsvfile)
disk_df = pd.read_csv(tmpcsvfile)
assert not disk_df.empty
os.remove(tmpcsvfile)
assert os.path.exists(str(tmpcsvfile))
disk_df = pd.read_csv(str(tmpcsvfile))
assert not disk_df.empty
tmpcsvfile = tmpdir.join(".TMP-rft2.csv")
# Test with RFT file as argument:
sys.argv = [
"ecl2cvsv",
"rft",
"-v",
DATAFILE.replace(".DATA", ".RFT"),
"-o",
str(tmpcsvfile),
]
ecl2csv.main()
assert os.path.exists(str(tmpcsvfile))
disk_df = pd.read_csv(str(tmpcsvfile))
assert not disk_df.empty