How to use the snakemake.utils.read_job_properties function in snakemake

To help you get started, we’ve selected a few snakemake examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github metagenome-atlas / atlas / atlas / cluster / slurm / slurm-submit.py View on Github external
"-t", "--time", help="time limit")
slurm_parser.add_argument(
    "--wrap", help="wrap command string in a sh script and submit")
slurm_parser.add_argument(
    "-C", "--constraint", help="specify a list of constraints")
slurm_parser.add_argument(
    "--mem", help="minimum amount of real memory")

args = parser.parse_args()

if args.help:
    parser.print_help()
    sys.exit(0)

jobscript = sys.argv[-1]
job_properties = read_job_properties(jobscript)

extras = ""
if args.positional:
    for m in args.positional:
        if m is not None:
            extras = extras + " " + m

arg_dict = dict(args.__dict__)


# Process resources
if "resources" in job_properties:
    resources = job_properties["resources"]
    if arg_dict["time"] is None:
        if "runtime" in resources:
            arg_dict["time"] = resources["runtime"]
github slowkow / snakefiles / bsub.py View on Github external
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("jobscript")
    parser.add_argument("-e", help="Write bsub stderr here")
    parser.add_argument("-o", help="Write bsub stdout here")
    args = parser.parse_args()

    job_properties = read_job_properties(args.jobscript)

    # By default, we use 1 thread.
    threads = job_properties.get('threads', 1)

    # We'll leave unspecified the memory and runtime with 0 MB and 0 minutes.
    mem = int(job_properties['resources'].get('mem', '0'))
    runtime = int(job_properties['resources'].get('runtime', '0'))

    # Let the user specify the queue.
    queue = job_properties['resources'].get('queue', None)

    # Otherwise, choose an appropriate queue based on required resources.
    if not queue:
        queue = get_queue(threads, mem, runtime)
    
    # If we fail to find a queue, exit with an error.
github BuysDB / SingleCellMultiOmics / singlecellmultiomics / snakemake_workflows / _general / sge_wrapper.py View on Github external
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import sys
import re
import subprocess
import uuid
from snakemake.utils import read_job_properties

# load
## loading job stdout & stderr path
log_path = sys.argv[-2]
## loading job script (provided by snakemake)
job_script = sys.argv[-1]
job_properties = read_job_properties(job_script)

# getting job parameters from snakemake-generated job script
try:
    threads = job_properties['threads']
except KeyError:
    threads = 1
n = threads

try:
    time = job_properties['cluster']['time'] # runtime is time in hours
except KeyError:
    try:
        time = job_properties['params']['runtime'].replace('h','') + ':00:00'
    except KeyError:
        time = '12:00:00'
github EnvGen / snakemake-workflows / scheduling / Snakefile_sbatch.py View on Github external
def __init__(self, snakebashfile, dependencies=None, config=None):
        self.scriptname = snakebashfile
        job_properties = read_job_properties(snakebashfile)
        self.rule = job_properties['rule']
        self.ifiles = job_properties['input']
        self.ofiles = job_properties['output']
        self.params = job_properties['params']
        if dependencies == None or len(dependencies) < 1:
            self.dependencies = None
        else:
            # expects snakemake like list of numbers
            self.dependencies = dependencies
            assert len(self.dependencies) >= 1
        self.config = config
github crazyhottommy / ChIP-seq-analysis / snakemake_ChIPseq_pipeline / msub_cluster.py View on Github external
#!/usr/bin/env python3

## In order to submit all the jobs to the moab queuing system, one needs to write a wrapper.
## This wrapper is inspired by Daniel Park https://github.com/broadinstitute/viral-ngs/blob/master/pipes/Broad_LSF/cluster-submitter.py
## I asked him questions on the snakemake google group and he kindly answered: https://groups.google.com/forum/#!topic/snakemake/1QelazgzilY

import sys
import re
from snakemake.utils import read_job_properties

## snakemake will generate a jobscript containing all the (shell) commands from your Snakefile. 
## I think that's something baked into snakemake's code itself. It passes the jobscript as the last parameter.
## https://bitbucket.org/snakemake/snakemake/wiki/Documentation#markdown-header-job-properties

jobscript = sys.argv[-1]
props = read_job_properties(jobscript)
github giesselmann / nanopype / profiles / mxq / mxq-submit.py View on Github external
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Written by Pay Giesselmann
# ---------------------------------------------------------------------------------
import sys, subprocess
from snakemake.utils import read_job_properties


if __name__ == '__main__':
    jobscript = sys.argv[-1]
    job_properties = read_job_properties(jobscript)
    
    # default resources
    threads = '1'
    runtime = '60'
    memory = '16000'
    # parse resources
    if job_properties["type"] == "group":
        group_name = job_properties["groupid"]
    else:
        group_name = job_properties["rule"]
    if "threads" in job_properties:
        threads = str(job_properties["threads"])
    if "resources" in job_properties:
        resources = job_properties["resources"]
        if "mem_mb" in resources: memory = str(resources["mem_mb"])
        if "time_min" in resources: runtime = str(resources["time_min"])
github BuysDB / SingleCellMultiOmics / singlecellmultiomics / snakemake_workflows / _general / slurm_wrapper.py View on Github external
# -*- coding: utf-8 -*-
from singlecellmultiomics.utils.submission import submit_job

import os
import sys
import re
import subprocess
import uuid
from snakemake.utils import read_job_properties

# load
## loading job stdout & stderr path
log_path = sys.argv[-2]
## loading job script (provided by snakemake)
job_script = sys.argv[-1]
job_properties = read_job_properties(job_script)

# getting job parameters from snakemake-generated job script
try:
    threads = job_properties['threads']
except KeyError:
    threads = 1
n = threads

try:
    time = int( job_properties['resources']['time'])
except KeyError:
    try:
        time = int(job_properties['cluster']['time']) # runtime is time in hours
    except KeyError:
        try:
            time = job_properties['params']['runtime'].replace('h','')
github broadinstitute / viral-ngs / pipes / Broad_UGER / cluster-submitter.py View on Github external
#!/usr/bin/env python3
import os
import sys
import re
import getpass
from snakemake.utils import read_job_properties

LOGDIR = sys.argv[-2]
jobscript = sys.argv[-1]
mo = re.match(r'(\S+)/snakejob\.\S+\.(\d+)\.sh', jobscript)
assert mo
sm_tmpdir, sm_jobid = mo.groups()
props = read_job_properties(jobscript)

# Blacklist problematic nodes; this list is stored as filenames
# in /broad/hptmp/[username]/blacklisted-nodes/
whoami = getpass.getuser()
blacklisted_node_dir = os.path.join("/broad/hptmp", whoami, "blacklisted-nodes")
if not os.path.exists(blacklisted_node_dir):
    os.makedirs(blacklisted_node_dir)
def hard_blacklist_node(node):
    blacklist_path = os.path.join(blacklisted_node_dir, node)
    with open(blacklist_path, 'a'):
        os.utime(blacklist_path, None)
# Always blacklist 'sgi1'; it cannot perform basic operations like
# allocating memory
hard_blacklist_node('sgi1')
blacklisted_nodes = os.listdir(blacklisted_node_dir)
github inodb / snakemake-workflows / scheduling / Snakefile_sbatch.py View on Github external
def __init__(self, snakebashfile, dependencies=None, config=None):
        self.scriptname = snakebashfile
        job_properties = read_job_properties(snakebashfile)
        self.rule = job_properties['rule']
        self.ifiles = job_properties['input']
        self.ofiles = job_properties['output']
        if dependencies == None or len(dependencies) < 1:
            self.dependencies = None
        else:
            # expects snakemake like list of numbers
            self.dependencies = dependencies
            assert len(self.dependencies) >= 1
        self.config = config
github crazyhottommy / pyflow-ChIPseq / bsub_cluster.py View on Github external
from snakemake.utils import read_job_properties

## snakemake will generate a jobscript containing all the (shell) commands from your Snakefile. 
## I think that's something baked into snakemake's code itself. It passes the jobscript as the last parameter.
## https://bitbucket.org/snakemake/snakemake/wiki/Documentation#markdown-header-job-properties

## make a directory for the logs from the cluster 
try: 
	os.makedirs("bsub_log")
except OSError as exception:
	if exception.errno != errno.EEXIST:
		raise


jobscript = sys.argv[-1]
job_properties = read_job_properties(jobscript)

## the jobscript is something like snakejob.index_bam.23.sh
mo = re.match(r'(\S+)/snakejob\.\S+\.(\d+)\.sh', jobscript)
assert mo
sm_tmpdir, sm_jobid = mo.groups()

## set up jobname. 
jobname = "{rule}-{jobid}".format(rule = job_properties["rule"], jobid = sm_jobid)

## it is safer to use get method in case the key is not present
# the job_properties is a dictionary of dictonary. I set up job name in the Snake file under the params directive and associate the sample name with the 
# job

jobname_tag_sample = job_properties.get('params', {}). get('jobname')