Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# Not sure of the validity of this, but it seems to be the only terminal-invoking way
# so python envs may be setup from there
# Commands to execute before the Dask
env_extra=TASK_STARTUP_COMMANDS
)
# Setup up adaption
# Workers are distributed down to the cores through the sub-divided processes
# Optimization may be needed
cluster.adapt(minimum=0, maximum=MAX_NODES)
dask_client = Client(cluster)
# Build a interface to the server
client = portal.FractalClient(FRACTAL_ADDRESS, verify=False)
# Build a manager
manager = qcfractal.queue.QueueManager(client, dask_client, update_frequency=0.5,
cores_per_task=CORES_PER_NODE // MAX_TASKS_PER_NODE,
memory_per_task=MEMORY_PER_NODE // MAX_TASKS_PER_NODE)
# Important for a calm shutdown
from qcfractal.cli.cli_utils import install_signal_handlers
install_signal_handlers(manager.loop, manager.stop)
# Start the loop
manager.start()
adapter_logger = logging.getLogger(logger_map[settings.common.adapter])
adapter_logger.setLevel("DEBUG")
logger.setLevel("DEBUG")
if settings.manager.log_file_prefix is not None:
tornado.options.options["log_file_prefix"] = settings.manager.log_file_prefix
# Clones the log to the output
tornado.options.options["log_to_stderr"] = True
tornado.log.enable_pretty_logging()
if settings.manager.test:
# Test this manager, no client needed
client = None
else:
# Connect to a specified fractal server
client = qcfractal.interface.FractalClient(
address=settings.server.fractal_uri, **settings.server.dict(skip_defaults=True, exclude={"fractal_uri"})
)
# Figure out per-task data
node_parallel_tasks = settings.common.nodes_per_task > 1 # Whether tasks are node-parallel
if node_parallel_tasks:
supported_adapters = ["parsl"]
if settings.common.adapter not in supported_adapters:
raise ValueError("Node-parallel jobs are only supported with {} adapters".format(supported_adapters))
# Node-parallel tasks use all cores on a worker
cores_per_task = settings.common.cores_per_worker
memory_per_task = settings.common.memory_per_worker
if settings.common.tasks_per_worker > 1:
raise ValueError(">1 task per node and >1 node per tasks are mutually-exclusive")
else:
cores_per_task = settings.common.cores_per_worker // settings.common.tasks_per_worker
def live_fractal_or_skip():
"""
Ensure Fractal live connection can be made
First looks for a local staging server, then tries QCArchive.
"""
try:
return FractalClient("localhost:7777", verify=False)
except (requests.exceptions.ConnectionError, ConnectionRefusedError):
print("Failed to connect to localhost, trying MolSSI QCArchive.")
try:
requests.get("https://api.qcarchive.molssi.org:443", json={}, timeout=5)
return FractalClient()
except (requests.exceptions.ConnectionError, ConnectionRefusedError):
return pytest.skip("Could not make a connection to central Fractal server")
def postgres_server():
if shutil.which("psql") is None:
pytest.skip("Postgres is not installed on this server and no active postgres could be found.")
storage = None
psql = PostgresHarness({"database": {"port": 5432}})
# psql = PostgresHarness({"database": {"port": 5432, "username": "qcarchive", "password": "mypass"}})
if not psql.is_alive():
print()
print(
f"Could not connect to a Postgres server at {psql.config.database_uri()}, this will increase time per test session by ~3 seconds."
)
print()
storage = TemporaryPostgres()
psql = storage.psql
print("Using Database: ", psql.config.database_uri())
yield psql
if storage:
storage.stop()
def build_socket_fixture(stype, server=None):
print("")
# Check mongo
storage_name = "test_qcfractal_storage" + stype
# IP/port/drop table is specific to build
if stype == "sqlalchemy":
server.create_database(storage_name)
storage = storage_socket_factory(server.database_uri(), storage_name, db_type=stype, sql_echo=False)
# Clean and re-init the database
storage._clear_db(storage_name)
else:
raise KeyError("Storage type {} not understood".format(stype))
yield storage
if stype == "sqlalchemy":
# todo: drop db
# storage._clear_db(storage_name)
pass
else:
raise KeyError("Storage type {} not understood".format(stype))
- spec: dynamic field (dict-like), can have any structure
- tag: str
- base_results: tuple (required), first value is the class type
of the result, {'results' or 'procedure'). The second value is
the ID of the result in the DB. Example:
"base_result": ('results', result_id)
Returns
-------
dict (data and meta)
'data' is a list of the IDs of the tasks IN ORDER, including
duplicates. An errored task has 'None' in its ID
meta['duplicates'] has the duplicate tasks
"""
meta = add_metadata_template()
results = []
with self.session_scope() as session:
for task_num, record in enumerate(data):
try:
task_dict = record.dict(exclude={"id"})
# # for compatibility with mongoengine
# if isinstance(task_dict['base_result'], dict):
# task_dict['base_result'] = task_dict['base_result']['id']
task = TaskQueueORM(**task_dict)
task.priority = task.priority.value # Must be an integer for sorting
session.add(task)
session.commit()
results.append(str(task.id))
meta["n_inserted"] += 1
except IntegrityError as err: # rare case
def _copy_users(self, record_list: Dict):
"""
copy the given users as-is to the DB. Used for data migration
Parameters
----------
record_list : list of dict of managers data
Returns
-------
Dict with keys: data, meta
Data is the ids of the inserted/updated/existing docs
"""
meta = add_metadata_template()
user_names = []
with self.session_scope() as session:
for user in record_list:
doc = session.query(UserORM).filter_by(username=user["username"])
if get_count_fast(doc) == 0:
doc = UserORM(**user)
if isinstance(doc.password, str): # TODO, for mongo
doc.password = doc.password.encode("ascii")
session.add(doc)
session.commit()
user_names.append(doc.username)
meta["n_inserted"] += 1
else:
name = doc.first().username
def add_wavefunction_store(self, blobs_list: List[Dict[str, Any]]):
"""
Adds to the wavefunction key/value store table.
Parameters
----------
blobs_list : List[Dict[str, Any]]
A list of wavefunction data blobs to add.
Returns
-------
TYPE
Description
"""
meta = add_metadata_template()
blob_ids = []
with self.session_scope() as session:
for blob in blobs_list:
if blob is None:
blob_ids.append(None)
continue
doc = WavefunctionStoreORM(**blob)
session.add(doc)
session.commit()
blob_ids.append(str(doc.id))
meta["n_inserted"] += 1
meta["success"] = True
return {"data": blob_ids, "meta": meta}
"""
Adds to the key/value store table.
Parameters
----------
blobs_list : List[Any]
A list of data blobs to add.
Returns
-------
TYPE
Description
"""
meta = add_metadata_template()
blob_ids = []
with self.session_scope() as session:
for blob in blobs_list:
if blob is None:
blob_ids.append(None)
continue
doc = KVStoreORM(value=blob)
session.add(doc)
session.commit()
blob_ids.append(str(doc.id))
meta["n_inserted"] += 1
meta["success"] = True
return {"data": blob_ids, "meta": meta}
Parameters
----------
collection : str, optional
name : str, optional
return_json : bool
with_ids : bool
limit : int
skip : int
Returns
-------
A dict with keys: 'data' and 'meta'
The data is a list of the collections found
"""
meta = get_metadata_template()
if name:
name = name.lower()
if collection:
collection = collection.lower()
query, errors = format_query(lname=name, collection=collection)
data = []
try:
if projection:
data = CollectionORM.objects(**query).only(*projection).limit(self.get_limit(limit)).skip(skip)
else:
data = CollectionORM.objects(**query).exclude("lname").limit(self.get_limit(limit)).skip(skip)
meta["n_found"] = data.count()
meta["success"] = True
except Exception as err: