How to use the parsl.providers.SlurmProvider function in parsl

To help you get started, we’ve selected a few parsl examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Parsl / parsl / parsl / configs / stampede2_htex_multinode.py View on Github external
from parsl.config import Config
from parsl.providers import SlurmProvider
from parsl.executors import HighThroughputExecutor
from parsl.addresses import address_by_hostname
from parsl.data_provider.globus import GlobusScheme


config = Config(
    executors=[
        HighThroughputExecutor(
            label='Stampede2_HTEX',
            address=address_by_hostname(),
            provider=SlurmProvider(
                nodes_per_block=2,
                init_blocks=1,
                min_blocks=1,
                partition='YOUR_PARTITION',
                # string to prepend to #SBATCH blocks in the submit
                # script to the scheduler eg: '#SBATCH --constraint=knl,quad,cache'
                scheduler_options='',
                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init='',
                walltime='00:30:00'
            ),
            storage_access=[GlobusScheme(
                endpoint_uuid='ceea5ca0-89a9-11e7-a97f-22000a92523b',
                endpoint_path='/',
                local_path='/'
github funcx-faas / funcX / funcx / executor / parsl / configs / midway_ipp_multicore.py View on Github external
from parsl.launchers import SingleNodeLauncher

from parsl.config import Config
from parsl.executors.ipp import IPyParallelExecutor
from parsl.executors.ipp_controller import Controller

# This is an example config, make sure to
#        replace the specific values below with the literal values
#          (e.g., 'USERNAME' -> 'your_username')

config = Config(
    executors=[
        IPyParallelExecutor(
            label='midway_ipp_multicore',
            workers_per_node=4,
            provider=SlurmProvider(
                'westmere',
                channel=SSHChannel(
                    hostname='swift.rcc.uchicago.edu',
                    username='USERNAME',     # Please replace USERNAME with your username
                    script_dir='/scratch/midway2/USERNAME/parsl_scripts',    # Please replace USERNAME with your username
                ),
                scheduler_options='',     # Input your scheduler_options if needed
                worker_init='',     # Input your worker_init if needed
                nodes_per_block=1,
                walltime="00:05:00",
                init_blocks=1,
                max_blocks=1,
                launcher=SingleNodeLauncher(),
            ),
            controller=Controller(public_ip='PUBLIC_IP'),    # Please replace PUBLIC_IP with your public ip
        )
github Parsl / parsl / parsl / dataflow / workspace / cpu_stress_midway_slurm.py View on Github external
import logging
  logger  = logging.getLogger()
  handler = logging.StreamHandler(sys.stdout)
  handler.setLevel(logging.INFO)
  logger.addHandler(handler)
  _logging = logger.setLevel(logging.INFO) 
  return _logging

# config
config = Config(
    executors=[
        HighThroughputExecutor(
            label="midway_htex",
            cores_per_worker=1,
            address=adress_by_hostname(),
            provider=SlurmProvider(
                'broadwl',    # machine name on midway
                init_blocks=1,
                launcher=SrunLauncher(),
                scheduler_options='#SBATCH --exclusive',
                max_blocks=10,
                nodes_per_block=10,
                # tasks_per_node=1,  # For HighThroughputExecutor, this option sho<
                parallelism=1.0,
                walltime='00:20:00',
                worker_init='module load Anaconda3/5.0.0.1; source activate py3501'
            ),
        )
    ],
    #strategy='htex_aggressive',
    #strategy='htex_totaltime',
    strategy='simple',
github funcx-faas / funcX / funcx / executor / parsl / configs / cori_ipp_single_node.py View on Github external
from parsl.providers import SlurmProvider
from parsl.channels import SSHChannel

from parsl.config import Config
from parsl.executors.ipp import IPyParallelExecutor
from parsl.executors.ipp_controller import Controller

# This is an example config, make sure to
#        replace the specific values below with the literal values
#          (e.g., 'USERNAME' -> 'your_username')

config = Config(
    executors=[
        IPyParallelExecutor(
            label='cori_ipp_single_node',
            provider=SlurmProvider(
                'debug',
                channel=SSHChannel(
                    hostname='cori.nersc.gov',
                    username='USERNAME',     # Please replace USERNAME with your username
                    script_dir='/global/homes/y/USERNAME/parsl_scripts',    # Please replace USERNAME with your username
                ),
                nodes_per_block=1,
                init_blocks=1,
                max_blocks=1,
                scheduler_options='',     # Input your scheduler_options if needed
                worker_init='',     # Input your worker_init if needed
            ),
            controller=Controller(public_ip='PUBLIC_IP'),    # Please replace PUBLIC_IP with your public ip
        )
github Parsl / parsl / parsl / configs / midway_ipp_multicore.py View on Github external
from parsl.launchers import SingleNodeLauncher

from parsl.config import Config
from parsl.executors.ipp import IPyParallelExecutor
from parsl.executors.ipp_controller import Controller

# This is an example config, make sure to
#        replace the specific values below with the literal values
#          (e.g., 'USERNAME' -> 'your_username')

config = Config(
    executors=[
        IPyParallelExecutor(
            label='midway_ipp_multicore',
            workers_per_node=4,
            provider=SlurmProvider(
                'westmere',
                channel=SSHChannel(
                    hostname='swift.rcc.uchicago.edu',
                    username='USERNAME',     # Please replace USERNAME with your username
                    script_dir='/scratch/midway2/USERNAME/parsl_scripts',    # Please replace USERNAME with your username
                ),
                scheduler_options='',     # Input your scheduler_options if needed
                worker_init='',     # Input your worker_init if needed
                nodes_per_block=1,
                walltime="00:05:00",
                init_blocks=1,
                max_blocks=1,
                launcher=SingleNodeLauncher(),
            ),
            controller=Controller(public_ip='PUBLIC_IP'),    # Please replace PUBLIC_IP with your public ip
        )
github Parsl / parsl / parsl / dataflow / workspace / app_slurm_dag2.py View on Github external
],
    #strategy='htex_aggressive',
    #strategy='htex_totaltime',
    #strategy='simple',
    strategy=args.strategy,
  )
elif args.executor == 'HighThroughput_Slurm':
  config = Config(
    executors=[
        HighThroughputExecutor(
            label="midway_htex",
            cores_per_worker=1,
            address=address_by_hostname(),
            heartbeat_period=1,  # default 30s
            heartbeat_threshold=2,  #default 120s
            provider=SlurmProvider(
                'broadwl',    # machine name on midway
                launcher=SrunLauncher(),
                scheduler_options='#SBATCH --mem-per-cpu=16000 ',
                ###scheduler_options='#SBATCH --exclusive',
                worker_init='module load Anaconda3/5.0.0.1; source activate parsl-dev',
                init_blocks=1,
                max_blocks=5,
                #max_blocks=1,
                nodes_per_block=1,
                # tasks_per_node=1,  # For HighThroughputExecutor, this option sho<
                parallelism=1.0,
                walltime='12:00:00',
            ),
        )
    ],
    #strategy='htex_aggressive',
github funcx-faas / funcX / funcx / executor / parsl / configs / midway_ipp.py View on Github external
from parsl.channels import SSHChannel
from parsl.providers import SlurmProvider

from parsl.config import Config
from parsl.executors.ipp import IPyParallelExecutor
from parsl.executors.ipp_controller import Controller

# This is an example config, make sure to
#        replace the specific values below with the literal values
#          (e.g., 'USERNAME' -> 'your_username')

config = Config(
    executors=[
        IPyParallelExecutor(
            workers_per_node=4,
            provider=SlurmProvider(
                'westmere',
                channel=SSHChannel(
                    hostname='swift.rcc.uchicago.edu',
                    username='USERNAME',     # Please replace USERNAME with your username
                    script_dir='/scratch/midway2/USERNAME/parsl_scripts',    # Please replace USERNAME with your username
                ),
                init_blocks=1,
                min_blocks=1,
                max_blocks=2,
                nodes_per_block=1,
                parallelism=0.5,
                scheduler_options='',     # Input your scheduler_options if needed
                worker_init='',     # Input your worker_init if needed
            ),
            label='midway_ipp',
            controller=Controller(public_ip='PUBLIC_IP'),    # Please replace PUBLIC_IP with your public ip
github Parsl / parsl / parsl / configs / midway_htex_multinode.py View on Github external
from parsl.config import Config
from parsl.providers import SlurmProvider
from parsl.launchers import SrunLauncher
from parsl.addresses import address_by_hostname
from parsl.executors import HighThroughputExecutor

config = Config(
    executors=[
        HighThroughputExecutor(
            label='Midway_HTEX_multinode',
            worker_debug=False,
            address=address_by_hostname(),
            provider=SlurmProvider(
                'broadwl',
                launcher=SrunLauncher(),
                nodes_per_block=2,
                init_blocks=1,
                min_blocks=1,
                max_blocks=1,
                # string to prepend to #SBATCH blocks in the submit
                # script to the scheduler eg: '#SBATCH --constraint=knl,quad,cache'
                scheduler_options='',
                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init='',
                walltime='00:30:00'
            ),
github funcx-faas / funcX / funcx / executor / parsl / configs / cori_ipp_multinode.py View on Github external
from parsl.channels import SSHChannel
from parsl.launchers import SrunLauncher

from parsl.config import Config
from parsl.executors.ipp import IPyParallelExecutor
from parsl.executors.ipp_controller import Controller

# This is an example config, make sure to
#        replace the specific values below with the literal values
#          (e.g., 'USERNAME' -> 'your_username')

config = Config(
    executors=[
        IPyParallelExecutor(
            label='cori_ipp_multinode',
            provider=SlurmProvider(
                'debug',
                channel=SSHChannel(
                    hostname='cori.nersc.gov',
                    username='USERNAME',     # Please replace USERNAME with your username
                    script_dir='/global/homes/y/USERNAME/parsl_scripts',    # Please replace USERNAME with your username
                ),
                nodes_per_block=2,
                init_blocks=1,
                max_blocks=1,
                scheduler_options='',     # Input your scheduler_options if needed
                worker_init='',     # Input your worker_init if needed
                launcher=SrunLauncher(),
            ),
            controller=Controller(public_ip='PUBLIC_IP'),    # Please replace PUBLIC_IP with your public ip
        )
github CoffeaTeam / coffea / coffea / processor / parsl / slurm_config.py View on Github external
export X509_USER_PROXY=%s
    ''' % (osp.join(work_dir, x509_proxy))

    sched_opts = '''
    #SBATCH --cpus-per-task=%d
    #SBATCH --mem-per-cpu=%d
    ''' % (cores_per_job, mem_per_core, )

    slurm_htex = Config(
        executors=[
            HighThroughputExecutor(
                label=htex_label,
                address=address_by_hostname(),
                prefetch_capacity=0,
                max_workers=cores_per_job,
                provider=SlurmProvider(
                    channel=LocalChannel(),
                    launcher=SrunLauncher(),
                    init_blocks=initial_workers,
                    max_blocks=max_workers,
                    nodes_per_block=jobs_per_worker,
                    partition=partition,
                    scheduler_options=sched_opts,   # Enter scheduler_options if needed
                    worker_init=wrk_init,         # Enter worker_init if needed
                    walltime=walltime
                ),
            )
        ],
        strategy=None,
    )

    return slurm_htex