Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def run_task(self, fw_spec):
db_file = env_chk(self["db_file"], fw_spec)
wf_uuid = self["wf_uuid"]
mc_settings = self.get("mc_settings", {})
# Get Heisenberg models from db
mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
mmdb.collection = mmdb.db["exchange"]
# Get documents
docs = list(
mmdb.collection.find(
{"wf_meta.wf_uuid": wf_uuid}, ["heisenberg_model", "nn_cutoff"]
)
)
hmodels = [HeisenbergModel.from_dict(d["heisenberg_model"]) for d in docs]
cutoffs = [hmodel.cutoff for hmodel in hmodels]
ordered_hmodels = [h for _, h in sorted(zip(cutoffs, hmodels), reverse=False)]
# Take the model with smallest NN cutoff
hmodel = ordered_hmodels[0]
# Get a converged Heisenberg model if one was found
# assimilate (i.e., parse)
task_doc = drone.assimilate(calc_dir)
# Check for additional keys to set based on the fw_spec
if self.get("fw_spec_field"):
task_doc.update(fw_spec[self.get("fw_spec_field")])
# get the database connection
db_file = env_chk(self.get('db_file'), fw_spec)
# db insertion or taskdoc dump
if not db_file:
with open("task.json", "w") as f:
f.write(json.dumps(task_doc, default=DATETIME_HANDLER))
else:
mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
t_id = mmdb.insert_task(
task_doc, use_gridfs=self.get("parse_dos", False)
or bool(self.get("bandstructure_mode", False))
or self.get("parse_chgcar", False) # deprecated
or self.get("parse_aeccar", False) # deprecated
or bool(self.get("store_volumetric_data", STORE_VOLUMETRIC_DATA)))
logger.info("Finished parsing with task_id: {}".format(t_id))
defuse_children = False
if task_doc["state"] != "successful":
defuse_unsuccessful = self.get("defuse_unsuccessful",
DEFUSE_UNSUCCESSFUL)
if defuse_unsuccessful is True:
defuse_children = True
elif defuse_unsuccessful is False:
pass
def run_task(self, fw_spec):
wfid = list(filter(lambda x: 'wfid' in x, fw_spec['tags'])).pop()
db_file = env_chk(self.get("db_file"), fw_spec)
vaspdb = VaspCalcDb.from_db_file(db_file, admin=True)
# ferroelectric workflow groups calculations by generated wfid tag
polarization_tasks = vaspdb.collection.find({"tags": wfid, "task_label": {"$regex": ".*polarization"}})
tasks = []
outcars = []
structure_dicts = []
sort_weight = []
energies_per_atom = []
energies = []
zval_dicts = []
for p in polarization_tasks:
# Grab data from each polarization task
energies_per_atom.append(p['calcs_reversed'][0]['output']['energy_per_atom'])
energies.append(p['calcs_reversed'][0]['output']['energy'])
"number": sg.get_space_group_number(),
"point_group": sg.get_point_group_symbol(),
"source": "spglib",
"crystal_system": sg.get_crystal_system(),
"hall": sg.get_hall()}
d["created_at"] = datetime.utcnow()
db_file = env_chk(self.get('db_file'), fw_spec)
if not db_file:
del d["dos"]
with open(os.path.join(btrap_dir, "boltztrap.json"), "w") as f:
f.write(json.dumps(d, default=DATETIME_HANDLER))
else:
mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
# dos gets inserted into GridFS
dos = json.dumps(d["dos"], cls=MontyEncoder)
fsid, compression = mmdb.insert_gridfs(dos, collection="dos_boltztrap_fs",
compress=True)
d["dos_boltztrap_fs_id"] = fsid
del d["dos"]
mmdb.db.boltztrap.insert(d)
def __init__(self):
with open("/global/homes/s/sivonxay/.conda/envs/knl_env/config/my_launchpad.yaml", "r") as f:
t = yaml.load(f)
client = MongoClient(t['host'])
db = client.fw_es_vasp
db.authenticate(t['username'], t['password'])
self.tasks = db.tasks
db_file = "/global/homes/s/sivonxay/.conda/envs/knl_env/config/db.json"
self.mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
def run_task(self, fw_spec):
db_file = env_chk(self["db_file"], fw_spec)
wf_uuid = self["exchange_wf_uuid"]
formula_pretty = self["parent_structure"].composition.reduced_formula
mc_settings = self["mc_settings"]
# Get Heisenberg models from db
mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
mmdb.collection = mmdb.db["exchange"]
task_doc = {"wf_meta": {"wf_uuid": wf_uuid}, "formula_pretty": formula_pretty}
if fw_spec.get("tags", None):
task_doc["tags"] = fw_spec["tags"]
# Vampire monte carlo settings
mc_box_size = mc_settings["mc_box_size"]
equil_timesteps = mc_settings["equil_timesteps"]
mc_timesteps = mc_settings["mc_timesteps"]
# Get a converged Heisenberg model if one was found
if fw_spec["converged_heisenberg_model"]:
hmodel = HeisenbergModel.from_dict(fw_spec["converged_heisenberg_model"])
vc = VampireCaller(
def run_task(self, fw_spec):
db_file = env_chk(self["db_file"], fw_spec)
wf_uuid = self["exchange_wf_uuid"]
# Get magnetic orderings collection from db
mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
mmdb.collection = mmdb.db["magnetic_orderings"]
formula = self["parent_structure"].formula
formula_pretty = self["parent_structure"].composition.reduced_formula
# Get documents
docs = list(
mmdb.collection.find(
{"wf_meta.wf_uuid": wf_uuid},
["task_id", "structure", "energy_per_atom"],
)
)
# Get structures and energy / unit cell
structures = [Structure.from_dict(d["structure"]) for d in docs]
epas = [d["energy_per_atom"] for d in docs]
# Check for additional keys to set based on the fw_spec
if self.get("fw_spec_field"):
task_doc.update(fw_spec[self.get("fw_spec_field")])
task_doc["state"] = "successful"
task_doc = jsanitize(task_doc)
# get the database connection
db_file = env_chk(self.get("db_file"), fw_spec)
# db insertion or taskdoc dump
if not db_file:
with open("task_lobster.json", "w") as f:
f.write(json.dumps(task_doc, default=DATETIME_HANDLER))
else:
db = VaspCalcDb.from_db_file(db_file, admin=True)
db.collection = db.db["lobster"]
additional_outputs = self.get("additional_outputs", None)
if additional_outputs:
for filename in additional_outputs:
fs_id = None
if os.path.isfile(filename):
fs_id = put_file_in_gridfs(
filename, db, collection_name="lobster_files", compress=True
)
elif os.path.isfile(filename + ".gz"):
fs_id = put_file_in_gridfs(
filename + ".gz",
db,
collection_name="lobster_files",
compress=False,
def run_task(self, fw_spec):
db_file = env_chk(self["db_file"], fw_spec)
wf_uuid = self["wf_uuid"]
vampire_output = loadfn("vampire_output.json")
mmdb = VaspCalcDb.from_db_file(db_file, admin=True)
mmdb.collection = mmdb.db["exchange"]
task_doc = {"wf_meta": {"wf_uuid": wf_uuid}, "task_name": "vampire caller"}
if fw_spec.get("tags", None):
task_doc["tags"] = fw_spec["tags"]
task_doc["vampire_output"] = vampire_output.as_dict()
mmdb.collection.insert_one(task_doc)
omega = nm_frequencies[k]
if nm_eigenvals[k] > 0:
logger.warning("Mode: {} is UNSTABLE. Freq(cm^-1) = {}".format(k, -omega))
raman_tensor = scale * raman_tensor * np.sum(nm_norms[k]) / np.sqrt(omega)
raman_tensor_dict[str(k)] = raman_tensor.tolist()
d["raman_tensor"] = raman_tensor_dict
d["state"] = "successful"
# store the results
db_file = env_chk(self.get("db_file"), fw_spec)
if not db_file:
with open("raman.json", "w") as f:
f.write(json.dumps(d, default=DATETIME_HANDLER))
else:
db = VaspCalcDb.from_db_file(db_file, admin=True)
db.collection = db.db["raman"]
db.collection.insert_one(d)
logger.info("Raman tensor calculation complete.")
return FWAction()