How to use the toil.subprocess function in toil

To help you get started, we’ve selected a few toil examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github DataBiosphere / toil / src / toil / batchSystems / lsfHelper.py View on Github external
def apply_bparams(fn):
    """
    apply fn to each line of bparams, returning the result
    """
    cmd = ["bparams", "-a"]
    try:
        output = subprocess.check_output(cmd).decode('utf-8')
    except:
        return None
    return fn(output.split("\n"))
github DataBiosphere / toil / src / toil / batchSystems / lsf.py View on Github external
def getRunningJobIDs(self):
            times = {}
            with self.runningJobsLock:
                currentjobs = dict((str(self.batchJobIDs[x][0]), x) for x in
                                   self.runningJobs)
            process = subprocess.Popen(
                    ["bjobs", "-o", "jobid stat start_time delimiter='|'"],
                    stdout=subprocess.PIPE)
            stdout, _ = process.communicate()

            for curline in stdout.decode('utf-8').split('\n'):
                items = curline.strip().split('|')
                if items[0] in currentjobs and items[1] == 'RUN':
                    jobstart = parse(items[2], default=datetime.now(tzlocal()))
                    times[currentjobs[items[0]]] = datetime.now(tzlocal()) \
                        - jobstart
            return times
github DataBiosphere / toil / src / toil / provisioners / node.py View on Github external
# add specified options to ssh command
            assert isinstance(sshOptions, list)
            commandTokens.extend(sshOptions)
        # specify host
        user = kwargs.pop('user', 'core')   # CHANGED: Is this needed?
        commandTokens.append('%s@%s' % (user,str(self.effectiveIP)))
        appliance = kwargs.pop('appliance', None)
        if appliance:
            # run the args in the appliance
            tty = kwargs.pop('tty', None)
            ttyFlag = '-t' if tty else ''
            commandTokens += ['docker', 'exec', '-i', ttyFlag, 'toil_leader']

        inputString = kwargs.pop('input', None)
        if inputString is not None:
            kwargs['stdin'] = subprocess.PIPE
        collectStdout = kwargs.pop('collectStdout', None)
        if collectStdout:
            kwargs['stdout'] = subprocess.PIPE
        kwargs['stderr'] = subprocess.PIPE

        logger.debug('Node %s: %s', self.effectiveIP, ' '.join(args))
        args = list(map(pipes.quote, args))
        commandTokens += args
        logger.debug('Full command %s', ' '.join(commandTokens))
        popen = subprocess.Popen(commandTokens, **kwargs)
        stdout, stderr = popen.communicate(input=inputString)
        # at this point the process has already exited, no need for a timeout
        resultValue = popen.wait()
        # ssh has been throwing random 255 errors - why?
        if resultValue != 0:
            logger.debug('SSH Error (%s) %s' % (resultValue, stderr))
github DataBiosphere / toil / src / toil / batchSystems / parasol.py View on Github external
def _runParasol(self, command, autoRetry=True):
        """
        Issues a parasol command using popen to capture the output. If the command fails then it
        will try pinging parasol until it gets a response. When it gets a response it will
        recursively call the issue parasol command, repeating this pattern for a maximum of N
        times. The final exit value will reflect this.
        """
        command = list(concat(self.parasolCommand, command))
        while True:
            logger.debug('Running %r', command)
            process = subprocess.Popen(command,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE,
                                       bufsize=-1)
            stdout, stderr = process.communicate()
            status = process.wait()
            for line in stderr.decode('utf-8').split('\n'):
                if line: logger.warn(line)
            if status == 0:
                return 0, stdout.decode('utf-8').split('\n')
            message = 'Command %r failed with exit status %i' % (command, status)
            if autoRetry:
                logger.warn(message)
            else:
                logger.error(message)
                return status, None
            logger.warn('Waiting for a 10s, before trying again')
            time.sleep(10)
github DataBiosphere / toil / src / toil / batchSystems / singleMachine.py View on Github external
# TODO: The following does not yet properly populate self.runningJobs so it is not possible to kill
            # running jobs in forkless mode - see the "None" value in place of popen
            info = Info(time.time(), None, killIntended=False)
            try:
                self.runningJobs[jobID] = info
                try:
                    toil_worker.workerScript(jobStore, jobStore.config, jobName, jobStoreID, 
                                             redirectOutputToLogFile=not self.debugWorker) # Call the worker
                finally:
                    self.runningJobs.pop(jobID)
            finally:
                if not info.killIntended:
                    self.outputQueue.put((jobID, 0, time.time() - startTime))
        else:
            with self.popenLock:
                popen = subprocess.Popen(jobCommand,
                                         shell=True,
                                         env=dict(os.environ, **environment))
            info = Info(time.time(), popen, killIntended=False)
            try:
                self.runningJobs[jobID] = info
                try:
                    statusCode = popen.wait()
                    if statusCode != 0 and not info.killIntended:
                        log.error("Got exit code %i (indicating failure) "
                                  "from job %s.", statusCode, self.jobs[jobID])
                finally:
                    self.runningJobs.pop(jobID)
            finally:
                if not info.killIntended:
                    self.outputQueue.put((jobID, statusCode, time.time() - startTime))
github DataBiosphere / toil / src / toil / wdl / wdl_functions.py View on Github external
def process_single_outfile(f, fileStore, workDir, outDir):
    if os.path.exists(f):
        output_f_path = f
    elif os.path.exists(os.path.abspath(f)):
        output_f_path = os.path.abspath(f)
    elif os.path.exists(os.path.join(workDir, 'execution', f)):
        output_f_path = os.path.join(workDir, 'execution', f)
    elif os.path.exists(os.path.join('execution', f)):
        output_f_path = os.path.join('execution', f)
    elif os.path.exists(os.path.join(workDir, f)):
        output_f_path = os.path.join(workDir, f)
    else:
        tmp = subprocess.check_output(['ls', '-lha', workDir]).decode('utf-8')
        exe = subprocess.check_output(['ls', '-lha', os.path.join(workDir, 'execution')]).decode('utf-8')
        raise RuntimeError('OUTPUT FILE: {} was not found!\n'
                           '{}\n\n'
                           '{}\n'.format(f, tmp, exe))
    output_file = fileStore.writeGlobalFile(output_f_path)
    preserveThisFilename = os.path.basename(output_f_path)
    fileStore.exportFile(output_file, "file://" + os.path.join(os.path.abspath(outDir), preserveThisFilename))
    return (output_file, preserveThisFilename)
github DataBiosphere / toil / src / toil / batchSystems / torque.py View on Github external
def _pbsVersion(self):
            """ Determines PBS/Torque version via pbsnodes
            """
            try:
                out = subprocess.check_output(["pbsnodes", "--version"]).decode('utf-8')

                if "PBSPro" in out:
                     logger.debug("PBS Pro proprietary Torque version detected")
                     self._version = "pro"
                else:
                     logger.debug("Torque OSS version detected")
                     self._version = "oss"
            except subprocess.CalledProcessError as e:
               if e.returncode != 0:
                    logger.error("Could not determine PBS/Torque version")

            return self._version
github DataBiosphere / toil / src / toil / common.py View on Github external
registry = lookupEnvVar(name='docker registry',
                                envName='TOIL_DOCKER_REGISTRY',
                                defaultValue=dockerRegistry)

        self.mtailImage = "%s/toil-mtail:%s" % (registry, dockerTag)
        self.grafanaImage = "%s/toil-grafana:%s" % (registry, dockerTag)
        self.prometheusImage = "%s/toil-prometheus:%s" % (registry, dockerTag)

        self.startDashboard(clusterName=clusterName, zone=region)

        # Always restart the mtail container, because metrics should start from scratch
        # for each workflow
        try:
            subprocess.check_call(["docker", "rm", "-f", "toil_mtail"])
        except subprocess.CalledProcessError:
            pass

        try:
            self.mtailProc = subprocess.Popen(["docker", "run", "--rm", "--interactive",
                                               "--net=host",
                                               "--name", "toil_mtail",
                                               "-p", "3903:3903",
                                               self.mtailImage],
                                              stdin=subprocess.PIPE, stdout=subprocess.PIPE)
        except subprocess.CalledProcessError:
            logger.warn("Could not start toil metrics server.")
            self.mtailProc = None
        except KeyboardInterrupt:
            self.mtailProc.terminate()

        # On single machine, launch a node exporter instance to monitor CPU/RAM usage.
github DataBiosphere / toil / src / toil / common.py View on Github external
region = provisioner._zone

        registry = lookupEnvVar(name='docker registry',
                                envName='TOIL_DOCKER_REGISTRY',
                                defaultValue=dockerRegistry)

        self.mtailImage = "%s/toil-mtail:%s" % (registry, dockerTag)
        self.grafanaImage = "%s/toil-grafana:%s" % (registry, dockerTag)
        self.prometheusImage = "%s/toil-prometheus:%s" % (registry, dockerTag)

        self.startDashboard(clusterName=clusterName, zone=region)

        # Always restart the mtail container, because metrics should start from scratch
        # for each workflow
        try:
            subprocess.check_call(["docker", "rm", "-f", "toil_mtail"])
        except subprocess.CalledProcessError:
            pass

        try:
            self.mtailProc = subprocess.Popen(["docker", "run", "--rm", "--interactive",
                                               "--net=host",
                                               "--name", "toil_mtail",
                                               "-p", "3903:3903",
                                               self.mtailImage],
                                              stdin=subprocess.PIPE, stdout=subprocess.PIPE)
        except subprocess.CalledProcessError:
            logger.warn("Could not start toil metrics server.")
            self.mtailProc = None
        except KeyboardInterrupt:
            self.mtailProc.terminate()