How to use the funcx.executors.high_throughput.container_sched.naive_scheduler function in funcx

To help you get started, we’ve selected a few funcx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github funcx-faas / funcX / funcx / executors / high_throughput / funcx_manager.py View on Github external
# Only check if no messages were received.
                if time.time() > last_interchange_contact + self.heartbeat_threshold:
                    logger.critical("[TASK_PULL_THREAD] Missing contact with interchange beyond heartbeat_threshold")
                    kill_event.set()
                    logger.critical("Killing all workers")
                    for proc in self.worker_procs:
                        proc.kill()
                    logger.critical("[TASK_PULL_THREAD] Exiting")
                    break

            logger.debug("Task queues: {}".format(self.task_queues))
            logger.debug("To-Die Counts: {}".format(self.worker_map.to_die_count))
            logger.debug("Alive worker counts: {}".format(self.worker_map.total_worker_type_counts))

            new_worker_map = naive_scheduler(self.task_queues, self.worker_count, new_worker_map, self.worker_map.to_die_count, logger=logger)
            logger.debug("[SCHEDULER] New worker map: {}".format(new_worker_map))

            #  Count the workers of each type that need to be removed
            if new_worker_map is not None:
                spin_downs = self.worker_map.spin_down_workers(new_worker_map)

                for w_type in spin_downs:
                    self.remove_worker_init(w_type)

            # NOTE: Wipes the queue -- previous scheduling loops don't affect what's needed now.
            if new_worker_map is not None:
                self.next_worker_q = self.worker_map.get_next_worker_q(new_worker_map)

            current_worker_map = self.worker_map.get_worker_counts()
            for task_type in current_worker_map:
                if task_type == 'unused':
github funcx-faas / funcX / funcx / executors / high_throughput / funcx_manager.py View on Github external
poll_timer = min(self.heartbeat_period * 1000, poll_timer * 2)

                # Only check if no messages were received.
                if time.time() > last_interchange_contact + self.heartbeat_threshold:
                    logger.critical("[TASK_PULL_THREAD] Missing contact with interchange beyond heartbeat_threshold")
                    kill_event.set()
                    logger.critical("Killing all workers")
                    for proc in self.worker_procs.values():
                        proc.kill()
                    logger.critical("[TASK_PULL_THREAD] Exiting")
                    break

            logger.debug("To-Die Counts: {}".format(self.worker_map.to_die_count))
            logger.debug("Alive worker counts: {}".format(self.worker_map.total_worker_type_counts))

            new_worker_map = naive_scheduler(self.task_queues,
                                             self.outstanding_task_count,
                                             self.max_worker_count,
                                             new_worker_map,
                                             self.worker_map.to_die_count,
                                             logger=logger)
            logger.debug("[SCHEDULER] New worker map: {}".format(new_worker_map))

            # NOTE: Wipes the queue -- previous scheduling loops don't affect what's needed now.
            self.next_worker_q, need_more = self.worker_map.get_next_worker_q(new_worker_map)

            #  Count the workers of each type that need to be removed
            spin_downs = self.worker_map.spin_down_workers(new_worker_map,
                                                           worker_max_idletime=self.worker_max_idletime,
                                                           need_more=need_more,
                                                           scheduler_mode=self.scheduler_mode)