How to use funcx - 10 common examples

To help you get started, we’ve selected a few funcx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github funcx-faas / funcX / funcx / mock_broker / mock_tester.py View on Github external
def test(address):
    r = requests.post(address + '/register',
                      json={'python_v': "{}.{}".format(sys.version_info.major,
                                                       sys.version_info.minor),
                            'os': platform.system(),
                            'hname': platform.node(),
                            'username': getpass.getuser(),
                            'funcx_v': str(funcx.__version__)
                      }
    )
    print("Status code :", r.status_code)
    print("Json : ", r.json())
github funcx-faas / funcX / funcx / endpoint / endpoint.py View on Github external
funcx.set_stream_logger(level=logging.DEBUG if args.debug else logging.INFO)
    global logger
    logger = logging.getLogger('funcx')

    if args.version:
        logger.info("FuncX version: {}".format(funcx.__version__))

    logger.debug("Command: {}".format(args.command))

    args.config_file = os.path.join(args.config_dir, 'config.py')

    if args.command == "init":
        if args.force:
            logger.debug("Forcing re-authentication via GlobusAuth")
        funcx_client = FuncXClient(force_login=args.force)
        init_endpoint(args)
        return

    if not os.path.exists(args.config_file):
        logger.critical("Missing a config file at {}. Critical error. Exiting.".format(args.config_file))
        logger.info("Please run the following to create the appropriate config files : \n $> funcx-endpoint init")
        exit(-1)

    if args.command == "init":
        init_endpoint(args)
        exit(-1)

    logger.debug("Loading config files from {}".format(args.config_dir))

    import importlib.machinery
    global_config = importlib.machinery.SourceFileLoader('global_config',
github funcx-faas / funcX / funcx / executors / high_throughput / interchange.py View on Github external
optionals['suppress_failure'] = args.suppress_failure
    optionals['logdir'] = os.path.abspath(args.logdir)
    optionals['client_address'] = args.client_address
    optionals['client_ports'] = [int(i) for i in args.client_ports.split(',')]
    optionals['endpoint_id'] = args.endpoint_id
    optionals['config'] = args.config

    if args.debug:
        optionals['logging_level'] = logging.DEBUG
    if args.worker_ports:
        optionals['worker_ports'] = [int(i) for i in args.worker_ports.split(',')]
    if args.worker_port_range:
        optionals['worker_port_range'] = [int(i) for i in args.worker_port_range.split(',')]

    with daemon.DaemonContext():
        ic = Interchange(**optionals)
        ic.start()
github funcx-faas / funcX / funcx / endpoint / endpoint.py View on Github external
logger.info("Endpoint registered with UUID: {}".format(reg_info['endpoint_id']))

            # Configure the parameters for the interchange
            optionals = {}
            optionals['client_address'] = reg_info['address']
            optionals['client_ports'] = reg_info['client_ports'].split(',')
            if 'endpoint_address' in global_config:
                optionals['interchange_address'] = global_config['endpoint_address']

            optionals['logdir'] = endpoint_dir
            # optionals['debug'] = True

            if args.debug:
                optionals['logging_level'] = logging.DEBUG

            ic = Interchange(endpoint_config.config, **optionals)
            ic.start()
            ic.stop()

            logger.critical("Interchange terminated.")
            time.sleep(10)

    stdout.close()
    stderr.close()

    logger.critical(f"Shutting down endpoint {endpoint_uuid}")
github funcx-faas / funcX / funcx / executors / high_throughput / interchange.py View on Github external
optionals['suppress_failure'] = args.suppress_failure
    optionals['logdir'] = os.path.abspath(args.logdir)
    optionals['client_address'] = args.client_address
    optionals['client_ports'] = [int(i) for i in args.client_ports.split(',')]
    optionals['endpoint_id'] = args.endpoint_id
    optionals['config'] = args.config

    if args.debug:
        optionals['logging_level'] = logging.DEBUG
    if args.worker_ports:
        optionals['worker_ports'] = [int(i) for i in args.worker_ports.split(',')]
    if args.worker_port_range:
        optionals['worker_port_range'] = [int(i) for i in args.worker_port_range.split(',')]

    with daemon.DaemonContext():
        ic = Interchange(**optionals)
        ic.start()
github funcx-faas / funcX / docs / conf.py View on Github external
import requests
sys.path.insert(0, os.path.abspath('../'))
import funcx

# -- Project information -----------------------------------------------------

project = 'funcX'
copyright = '2019, The University of Chicago'
author = 'The funcX Team'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = funcx.__version__.rsplit('.', 1)[0]
# The full version, including alpha/beta/rc tags.
release = funcx.__version__


# -- General configuration ---------------------------------------------------

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
    'nbsphinx',
    'sphinx.ext.autodoc',
    'sphinx.ext.autosummary',
    'sphinx.ext.intersphinx',
    #'sphinx.ext.linkcode',
    'sphinx.ext.napoleon'
github funcx-faas / funcX / docs / conf.py View on Github external
import funcx

# -- Project information -----------------------------------------------------

project = 'funcX'
copyright = '2019, The University of Chicago'
author = 'The funcX Team'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = funcx.__version__.rsplit('.', 1)[0]
# The full version, including alpha/beta/rc tags.
release = funcx.__version__


# -- General configuration ---------------------------------------------------

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
    'nbsphinx',
    'sphinx.ext.autodoc',
    'sphinx.ext.autosummary',
    'sphinx.ext.intersphinx',
    #'sphinx.ext.linkcode',
    'sphinx.ext.napoleon'
]
github funcx-faas / funcX / funcx / endpoint / endpoint.py View on Github external
# Stop an endpoint
    stop = subparsers.add_parser('stop', help='Stops an active endpoint')
    stop.add_argument("name", help="Name of the endpoint to stop")

    # List all endpoints
    subparsers.add_parser('list', help='Lists all endpoints')

    args = parser.parse_args()

    funcx.set_stream_logger(level=logging.DEBUG if args.debug else logging.INFO)
    global logger
    logger = logging.getLogger('funcx')

    if args.version:
        logger.info("FuncX version: {}".format(funcx.__version__))

    logger.debug("Command: {}".format(args.command))

    args.config_file = os.path.join(args.config_dir, 'config.py')

    if args.command == "init":
        if args.force:
            logger.debug("Forcing re-authentication via GlobusAuth")
        funcx_client = FuncXClient(force_login=args.force)
        init_endpoint(args)
        return

    if not os.path.exists(args.config_file):
        logger.critical("Missing a config file at {}. Critical error. Exiting.".format(args.config_file))
        logger.info("Please run the following to create the appropriate config files : \n $> funcx-endpoint init")
        exit(-1)
github funcx-faas / funcX / funcx / executors / high_throughput / funcx_manager.py View on Github external
# Only check if no messages were received.
                if time.time() > last_interchange_contact + self.heartbeat_threshold:
                    logger.critical("[TASK_PULL_THREAD] Missing contact with interchange beyond heartbeat_threshold")
                    kill_event.set()
                    logger.critical("Killing all workers")
                    for proc in self.worker_procs:
                        proc.kill()
                    logger.critical("[TASK_PULL_THREAD] Exiting")
                    break

            logger.debug("Task queues: {}".format(self.task_queues))
            logger.debug("To-Die Counts: {}".format(self.worker_map.to_die_count))
            logger.debug("Alive worker counts: {}".format(self.worker_map.total_worker_type_counts))

            new_worker_map = naive_scheduler(self.task_queues, self.worker_count, new_worker_map, self.worker_map.to_die_count, logger=logger)
            logger.debug("[SCHEDULER] New worker map: {}".format(new_worker_map))

            #  Count the workers of each type that need to be removed
            if new_worker_map is not None:
                spin_downs = self.worker_map.spin_down_workers(new_worker_map)

                for w_type in spin_downs:
                    self.remove_worker_init(w_type)

            # NOTE: Wipes the queue -- previous scheduling loops don't affect what's needed now.
            if new_worker_map is not None:
                self.next_worker_q = self.worker_map.get_next_worker_q(new_worker_map)

            current_worker_map = self.worker_map.get_worker_counts()
            for task_type in current_worker_map:
                if task_type == 'unused':
github funcx-faas / funcX / funcx / executors / high_throughput / interchange.py View on Github external
self.results_outgoing = self.context.socket(zmq.DEALER)
        self.results_outgoing.set_hwm(0)
        logger.info("Results outgoing on tcp://{}:{}".format(client_address, client_ports[1]))
        self.results_outgoing.connect("tcp://{}:{}".format(client_address, client_ports[1]))

        self.command_channel = self.context.socket(zmq.DEALER)
        self.command_channel.RCVTIMEO = 1000  # in milliseconds
        # self.command_channel.set_hwm(0)
        logger.info("Command channel on tcp://{}:{}".format(client_address, client_ports[2]))
        self.command_channel.connect("tcp://{}:{}".format(client_address, client_ports[2]))
        logger.info("Connected to client")

        self.pending_task_queue = {}
        self.containers = {}
        self.total_pending_task_count = 0
        self.fxs = FuncXClient()

        logger.info("Interchange address is {}".format(self.interchange_address))
        self.worker_ports = worker_ports
        self.worker_port_range = worker_port_range

        self.task_outgoing = self.context.socket(zmq.ROUTER)
        self.task_outgoing.set_hwm(0)
        self.results_incoming = self.context.socket(zmq.ROUTER)
        self.results_incoming.set_hwm(0)

        # initalize the last heartbeat time to start the loop
        self.last_heartbeat = time.time()
        self.max_heartbeats_missed = max_heartbeats_missed

        self.endpoint_id = endpoint_id
        if self.worker_ports: