How to use the ray.put function in ray

To help you get started, we’ve selected a few ray examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ray-project / ray / test / microbenchmarks.py View on Github external
elapsed_times.append(end_time - start_time)
    elapsed_times = np.sort(elapsed_times)
    average_elapsed_time = sum(elapsed_times) / 1000
    print("Time required to submit a trivial function call and get the "
          "result:")
    print("    Average: {}".format(average_elapsed_time))
    print("    90th percentile: {}".format(elapsed_times[900]))
    print("    99th percentile: {}".format(elapsed_times[990]))
    print("    worst:           {}".format(elapsed_times[999]))
    # average_elapsed_time should be about 0.0013.

    # Measure the time required to do do a put.
    elapsed_times = []
    for _ in range(1000):
        start_time = time.time()
        ray.put(1)
        end_time = time.time()
        elapsed_times.append(end_time - start_time)
    elapsed_times = np.sort(elapsed_times)
    average_elapsed_time = sum(elapsed_times) / 1000
    print("Time required to put an int:")
    print("    Average: {}".format(average_elapsed_time))
    print("    90th percentile: {}".format(elapsed_times[900]))
    print("    99th percentile: {}".format(elapsed_times[990]))
    print("    worst:           {}".format(elapsed_times[999]))
    # average_elapsed_time should be about 0.00087.
github ray-project / ray / doc / examples / doc_code / torch_example.py View on Github external
ray.get([NetworkActor.train.remote(), NetworkActor2.train.remote()])
# __torch_actor_end__
# yapf: enable

# yapf: disable
# __weight_average_start__
weights = ray.get(
    [NetworkActor.get_weights.remote(),
     NetworkActor2.get_weights.remote()])

from collections import OrderedDict
averaged_weights = OrderedDict(
    [(k, (weights[0][k] + weights[1][k]) / 2) for k in weights[0]])

weight_id = ray.put(averaged_weights)
[
    actor.set_weights.remote(weight_id)
    for actor in [NetworkActor, NetworkActor2]
]
ray.get([actor.train.remote() for actor in [NetworkActor, NetworkActor2]])
github dkeras-project / dkeras / dkeras / dkeras.py View on Github external
sample_weight_mode=None,
                weighted_metrics=None,
                target_tensors=None):
        """

        :param optimizer:
        :param loss:
        :param metrics:
        :param loss_weights:
        :param sample_weight_mode:
        :param weighted_metrics:
        :param target_tensors:
        :return:
        """
        if self.distributed:
            compile_data = ray.put([optimizer,
                                    loss,
                                    metrics,
                                    loss_weights,
                                    sample_weight_mode,
                                    weighted_metrics,
                                    target_tensors])
            self.data_server.push_compile.remote(compile_data)
        else:
            self.model.compile(optimizer,
                               loss=loss,
                               metrics=metrics,
                               loss_weights=loss_weights,
                               sample_weight_mode=sample_weight_mode)
github ray-project / ray / doc / examples / lbfgs / driver.py View on Github external
def full_loss(theta):
    theta_id = ray.put(theta)
    loss_ids = [actor.loss.remote(theta_id) for actor in actors]
    return sum(ray.get(loss_ids))
github ray-project / ray / rllib / optimizers / sync_batch_replay_optimizer.py View on Github external
def step(self):
        with self.update_weights_timer:
            if self.workers.remote_workers():
                weights = ray.put(self.workers.local_worker().get_weights())
                for e in self.workers.remote_workers():
                    e.set_weights.remote(weights)

        with self.sample_timer:
            if self.workers.remote_workers():
                batches = ray_get_and_free(
                    [e.sample.remote() for e in self.workers.remote_workers()])
            else:
                batches = [self.workers.local_worker().sample()]

            # Handle everything as if multiagent
            tmp = []
            for batch in batches:
                if isinstance(batch, SampleBatch):
                    batch = MultiAgentBatch({
                        DEFAULT_POLICY_ID: batch
github ray-project / ray / python / ray / ray_perf.py View on Github external
def do_put():
        for _ in range(10):
            ray.put(np.zeros(10 * 1024 * 1024, dtype=np.int64))
github ray-project / ray / rllib / optimizers / sync_samples_optimizer.py View on Github external
def step(self):
        with self.update_weights_timer:
            if self.workers.remote_workers():
                weights = ray.put(self.workers.local_worker().get_weights())
                for e in self.workers.remote_workers():
                    e.set_weights.remote(weights)

        with self.sample_timer:
            samples = []
            while sum(s.count for s in samples) < self.train_batch_size:
                if self.workers.remote_workers():
                    samples.extend(
                        ray_get_and_free([
                            e.sample.remote()
                            for e in self.workers.remote_workers()
                        ]))
                else:
                    samples.append(self.workers.local_worker().sample())
            samples = SampleBatch.concat_samples(samples)
            self.sample_timer.push_units_processed(samples.count)
github ray-project / ray / rllib / optimizers / async_replay_optimizer.py View on Github external
for i, (ev, (sample_batch, count)) in enumerate(completed):
                sample_timesteps += counts[i]

                # Send the data to the replay buffer
                random.choice(
                    self.replay_actors).add_batch.remote(sample_batch)

                # Update weights if needed
                self.steps_since_update[ev] += counts[i]
                if self.steps_since_update[ev] >= self.max_weight_sync_delay:
                    # Note that it's important to pull new weights once
                    # updated to avoid excessive correlation between actors
                    if weights is None or self.learner.weights_updated:
                        self.learner.weights_updated = False
                        with self.timers["put_weights"]:
                            weights = ray.put(
                                self.workers.local_worker().get_weights())
                    ev.set_weights.remote(weights)
                    self.num_weight_syncs += 1
                    self.steps_since_update[ev] = 0

                # Kick off another sample request
                self.sample_tasks.add(ev, ev.sample_with_count.remote())

        with self.timers["replay_processing"]:
            for ra, replay in self.replay_tasks.completed():
                self.replay_tasks.add(ra, ra.replay.remote())
                if self.learner.inqueue.full():
                    self.num_samples_dropped += 1
                else:
                    with self.timers["get_samples"]:
                        samples = ray_get_and_free(replay)
github ray-project / ray / python / ray / rllib / optimizers / async_samples_optimizer.py View on Github external
continue

            sample_timesteps += sample_batch.count

            # Put in replay buffer if enabled
            if self.replay_buffer_num_slots > 0:
                self.replay_batches.append(sample_batch)
                if len(self.replay_batches) > self.replay_buffer_num_slots:
                    self.replay_batches.pop(0)

            # Note that it's important to pull new weights once
            # updated to avoid excessive correlation between actors
            if weights is None or (self.learner.weights_updated
                                   and num_sent >= self.broadcast_interval):
                self.learner.weights_updated = False
                weights = ray.put(self.local_evaluator.get_weights())
                num_sent = 0
            ev.set_weights.remote(weights)
            self.num_weight_syncs += 1
            num_sent += 1

            # Kick off another sample request
            self.sample_tasks.add(ev, ev.sample.remote())

        while not self.learner.outqueue.empty():
            count = self.learner.outqueue.get()
            train_timesteps += count

        return sample_timesteps, train_timesteps
github ray-project / ray / python / ray / tune / util.py View on Github external
def pin_in_object_store(obj):
    """Deprecated, use ray.put(value, weakref=False) instead."""

    obj_id = ray.put(obj, weakref=False)
    _pinned_objects.append(obj_id)
    return obj_id