How to use the ray.shutdown function in ray

To help you get started, we’ve selected a few ray examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ray-project / ray / test / multi_node_test_2.py View on Github external
def start_connected_longer_cluster():
    """Creates a cluster with a longer timeout."""
    g = Cluster(
        initialize_head=True,
        connect=True,
        head_node_args={
            "num_cpus": 1,
            "_internal_config": json.dumps({
                "num_heartbeats_timeout": 20
            })
        })
    yield g
    # The code after the yield will run as teardown code.
    ray.shutdown()
    g.shutdown()
github allwefantasy / pyjava / python / pyjava / api / mlsql.py View on Github external
context = _context
        elif isinstance(_context, dict):
            if 'context' in _context:
                context = _context['context']
            else:
                '''
                we are not in MLSQL
                '''
                context = PythonContext([], {"pythonMode": "ray"})
                context.rayContext.is_in_mlsql = False
        else:
            raise Exception("context is not set")

        if url is not None:
            import ray
            ray.shutdown(exiting_interpreter=False)
            ray.init(redis_address=url)
        return context.rayContext
github PartnershipOnAI / safelife / muzero / muzero.py View on Github external
games, None let MuZero play all players turn by turn.
        """
        print("\nTesting...")
        ray.init()
        self_play_workers = self_play.SelfPlay.remote(
            copy.deepcopy(self.muzero_weights),
            self.Game(self.config.seed + self.config.num_actors),
            self.config,
        )
        test_rewards = []
        for _ in range(self.config.test_episodes):
            history = ray.get(
                self_play_workers.play_game.remote(0, render, muzero_player)
            )
            test_rewards.append(sum(history.rewards))
        ray.shutdown()
        return test_rewards
github dkeras-project / dkeras / dkeras / benchmarks / model_benchmark.py View on Github external
default='2,3,5,10,20,25,50,60,100', type=str)

    parser.add_argument("--model", help="Model options: {}".format(
        model_names.keys()),
                        default='resnet50', type=str)
    args = parser.parse_args()

    n_workers = args.n_workers
    test_type = args.test
    n_data = args.n_data
    model_name = args.model
    use_search = args.search
    search_pool = args.search_pool

    if ray.is_initialized():
        ray.shutdown()
    # ray.init(object_store_memory=args.object_store_memory)
    ray.init()

    default_shape = (224, 224, 3)

    try:
        search_pool = list(map(int, search_pool.split(',')))
    except TypeError:
        raise UserWarning("Search pool arg must be int separated by commas")

    if not ((model_name == 'all') or (model_name in model_names.keys())):
        raise UserWarning(
            "Model name not found: {}, options: {}".format(
                model_name, model_names.keys()))

    if model_name != 'all':
github dcgym / iroko / run_ray.py View on Github external
log.info("Starting experiment.")
    if args.tune:
        tune_run(config, args.episodes, args.root_output, args.schedule)
    else:
        run_ray(config, args.episodes)

    # Wait until the topology is torn down completely
    # The flaky Mininet stop() call necessitates this
    # This is an unfortunate reality and may conflict with other ovs setups
    log.info("Waiting for environment to complete...")
    wait_for_ovs()
    # Take control back from root
    dc_utils.change_owner(args.root_output)
    # Ray doesn't play nice and prevents proper shutdown sometimes
    ray.shutdown()
    # time.sleep(1)
    # kill_ray()
    log.info("Experiment has completed.")
github rlworkgroup / garage / src / garage / sampler / ray_sampler.py View on Github external
def shutdown_worker(self):
        """Shuts down the worker."""
        ray.shutdown()
github StepNeverStop / RLs / gym_wrapper / wrapper_linux.py View on Github external
def close(self):
        '''
        close all environments.
        '''
        ray.get([env.close.remote() for env in self.envs])
        ray.shutdown()
github HumanCompatibleAI / adversarial-policies / src / aprl / activations / tsne / fit_model.py View on Github external
results = []
        for stem, paths in activation_paths.items():
            output_dir = osp.join(output_root, stem)
            os.makedirs(output_dir)
            future = fit_tsne_helper.remote(
                paths, output_dir, num_components, num_observations, perplexity, data_type
            )
            results.append(future)

        ray.get(results)  # block until all jobs have finished
        utils.add_artifacts(_run, output_root, ingredient=fit_model_ex)
    finally:
        # Clean up temporary directory (if needed)
        if tmp_dir is not None:
            tmp_dir.cleanup()
        ray.shutdown()
github biocore-ntnu / pyranges / pyranges / multithreaded.py View on Github external
if len(dfs.keys()) == 2:
                df, df2 = dfs.values()
                # merge strands
                df = _merge_dfs.remote(df, df2)
            else:
                df = dfs.values()[0]

            df = make_unary_sparse(kwargs, df)
            result = call_f_single(function, nparams, df, **kwargs)
            results.append(result)
            keys.append(c)

    results = get(results)

    if nb_cpu > 1:
        ray.shutdown()

    results = process_results(results, keys)

    return results