How to use the ray.get function in ray

To help you get started, we’ve selected a few ray examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ray-project / ray / test / stress_tests.py View on Github external
def ping(self):
            return

    @ray.remote
    class Worker(object):
        def __init__(self, actor):
            self.actor = actor

        def ping(self):
            return ray.get(self.actor.ping.remote())

    a = Actor.remote()
    workers = [Worker.remote(a) for _ in range(100)]
    for _ in range(10):
        out = ray.get([w.ping.remote() for w in workers])
        assert out == [None for _ in workers]
github diux-dev / cluster / psbench / ray_localsgd.py View on Github external
def run_driver():
  ray.init(redis_address=args.ip)

  # start workers training asynchronously
  workers = [Worker.remote(i, args.num_workers) for i in
             range(args.num_workers)]

  workers[0].train.remote(100)
  print(ray.get(workers[0].get_params.remote()))

  def start_worker(w):
    w.train.remote(args.iters)
  print("First part done")

  threads = []
  for worker in workers:
    threads.append(threading.Thread(target=start_worker, args=(worker,)))
    threads[-1].start()

  while True:
    params0 = workers[0].get_params.remote()
    print(ray.get(params0))
    time.sleep(0.25)
github diux-dev / cluster / tf_numpy_benchmark / tf_numpy_benchmark.py View on Github external
def tf_add0_to_ray_fast():
  params0 = np.ones((args_dim,), dtype=np.float32)
  ray_obj = ray.put(params0)
  params0 = ray.get(ray_obj)
  params0.flags['WRITEABLE'] = True

  
  with tf.device('/cpu:0'):
    params = tf.placeholder(tf.float32)
    result = params + 0

    
  for i in range(args.num_iters):
    with timeit():
      sess.run(result.op, feed_dict = {params: params0})
github ray-project / ray / python / ray / experimental / queue.py View on Github external
def empty(self):
        """Whether the queue is empty."""
        return ray.get(self.actor.qsize.remote())
github rlgraph / rlgraph / rlgraph / execution / ray / ray_executor.py View on Github external
Retrieves full episode-reward time series for a worker by id (or first worker in registry if None).

        Args:
            worker_index (Optional[int]): Index of worker to fetch.

        Returns:
            dict: Full results for this worker.
        """
        if worker_index is not None:
            ray_worker = self.ray_env_sample_workers[worker_index]
        else:
            # Otherwise just pick  first.
            ray_worker = self.ray_env_sample_workers[0]

        task = ray_worker.get_workload_statistics.remote()
        metrics = ray.get(task)

        # Return full reward series.
        return dict(
            episode_rewards=metrics["episode_rewards"],
            episode_timesteps=metrics["episode_timesteps"]
        )
github dkeras-project / dkeras / dkeras / utils / qsub_functions.py View on Github external
def wait_for_workers(n_workers, timeout=300):
    start_time = time.time()
    print("Waiting for {} workers".format(n_workers))
    while True:
        n_nodes = ray.get(_get_n_nodes.remote())
        if n_nodes >= n_workers:
            return True
        if (start_time - time.time() >= timeout):
            return False
github modin-project / modin / modin / engines / ray / pandas_on_ray / frame / data.py View on Github external
def combine_dtypes(cls, list_of_dtypes, column_names):
        # Compute dtypes by getting collecting and combining all of the partitions. The
        # reported dtypes from differing rows can be different based on the inference in
        # the limited data seen by each worker. We use pandas to compute the exact dtype
        # over the whole column for each column.
        dtypes = (
            pandas.concat(ray.get(list_of_dtypes), axis=1)
            .apply(lambda row: find_common_type(row.values), axis=1)
            .squeeze(axis=0)
        )
        dtypes.index = column_names
        return dtypes
github openai / neural-mmo / forge / trinity / smith.py View on Github external
def step(self, actions=None):
      recvs = [e.step.remote() for e in self.envs]
      return ray.get(recvs)
github AboudyKreidieh / h-baselines / hbaselines / algorithms / on_policy.py View on Github external
-------
        gym.spaces.*
            the action space of the training environment
        gym.spaces.*
            the observation space of the training environment
        gym.spaces.* or None
            the context space of the training environment (i.e. the same of the
            desired environmental goal)
        gym.spaces.* or None
            the full-state observation space of the training environment
        """
        sampler = self.sampler[0]

        if self.num_envs > 1:
            ac_space = ray.get(sampler.action_space.remote())
            ob_space = ray.get(sampler.observation_space.remote())
            co_space = ray.get(sampler.context_space.remote())
            all_ob_space = ray.get(sampler.all_observation_space.remote())
        else:
            ac_space = sampler.action_space()
            ob_space = sampler.observation_space()
            co_space = sampler.context_space()
            all_ob_space = sampler.all_observation_space()

        return ac_space, ob_space, co_space, all_ob_space
github ray-project / ray / python / ray / dataframe / dataframe.py View on Github external
def _get_col_lengths(self):
        """Gets the lengths for each partition and caches it if it wasn't.

        Returns:
            A list of integers representing the length of each partition.
        """
        if self._col_length_cache is None:
            return None
        if isinstance(self._col_length_cache, ray.local_scheduler.ObjectID):
            self._col_length_cache = ray.get(self._col_length_cache)
        elif isinstance(self._col_length_cache, list) and \
                isinstance(self._col_length_cache[0],
                           ray.local_scheduler.ObjectID):
            self._col_length_cache = ray.get(self._col_length_cache)
        return self._col_length_cache