How to use the ray.ObjectID function in ray

To help you get started, we’ve selected a few ray examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ray-project / ray / python / ray / worker.py View on Github external
if resources is None:
                raise ValueError("The resources dictionary is required.")
            for value in resources.values():
                assert (isinstance(value, int) or isinstance(value, float))
                if value < 0:
                    raise ValueError(
                        "Resource quantities must be nonnegative.")
                if (value >= 1 and isinstance(value, float)
                        and not value.is_integer()):
                    raise ValueError(
                        "Resource quantities must all be whole numbers.")

            # Submit the task to local scheduler.
            task = ray.local_scheduler.Task(
                driver_id, ray.ObjectID(
                    function_id.id()), args_for_local_scheduler,
                num_return_vals, self.current_task_id, self.task_index,
                actor_creation_id, actor_creation_dummy_object_id, actor_id,
                actor_handle_id, actor_counter, is_actor_checkpoint_method,
                execution_dependencies, resources, self.use_raylet)
            # Increment the worker's task index to track how many tasks have
            # been submitted by the current task so far.
            self.task_index += 1
            self.local_scheduler_client.submit(task)

            return task.returns()
github ray-project / ray / python / ray / worker.py View on Github external
# Set other fields needed for computing task IDs.
        worker.task_index = 0
        worker.put_index = 1

        # Create an entry for the driver task in the task table. This task is
        # added immediately with status RUNNING. This allows us to push errors
        # related to this driver task back to the driver.  For example, if the
        # driver creates an object that is later evicted, we should notify the
        # user that we're unable to reconstruct the object, since we cannot
        # rerun the driver.
        nil_actor_counter = 0

        driver_task = ray.local_scheduler.Task(
            worker.task_driver_id, ray.ObjectID(NIL_FUNCTION_ID), [], 0,
            worker.current_task_id, worker.task_index,
            ray.ObjectID(NIL_ACTOR_ID), ray.ObjectID(NIL_ACTOR_ID),
            ray.ObjectID(NIL_ACTOR_ID), ray.ObjectID(NIL_ACTOR_ID),
            nil_actor_counter, False, [], {"CPU": 0}, worker.use_raylet)

        # Add the driver task to the task table.
        if not worker.use_raylet:
            global_state._execute_command(
                driver_task.task_id(), "RAY.TASK_TABLE_ADD",
                driver_task.task_id().id(), TASK_STATUS_RUNNING,
                NIL_LOCAL_SCHEDULER_ID,
                driver_task.execution_dependencies_string(), 0,
                ray.local_scheduler.task_to_string(driver_task))
        else:
            global_state._execute_command(
                driver_task.task_id(), "RAY.TABLE_ADD",
                ray.gcs_utils.TablePrefix.RAYLET_TASK,
                ray.gcs_utils.TablePubsub.RAYLET_TASK,
github ray-project / ray / python / ray / worker.py View on Github external
local_scheduler_socket, worker.worker_id, is_worker, worker.use_raylet)

    # If this is a driver, set the current task ID, the task driver ID, and set
    # the task index to 0.
    if mode in [SCRIPT_MODE, SILENT_MODE]:
        # If the user provided an object_id_seed, then set the current task ID
        # deterministically based on that seed (without altering the state of
        # the user's random number generator). Otherwise, set the current task
        # ID randomly to avoid object ID collisions.
        numpy_state = np.random.get_state()
        if object_id_seed is not None:
            np.random.seed(object_id_seed)
        else:
            # Try to use true randomness.
            np.random.seed(None)
        worker.current_task_id = ray.ObjectID(np.random.bytes(20))
        # Reset the state of the numpy random number generator.
        np.random.set_state(numpy_state)
        # Set other fields needed for computing task IDs.
        worker.task_index = 0
        worker.put_index = 1

        # Create an entry for the driver task in the task table. This task is
        # added immediately with status RUNNING. This allows us to push errors
        # related to this driver task back to the driver.  For example, if the
        # driver creates an object that is later evicted, we should notify the
        # user that we're unable to reconstruct the object, since we cannot
        # rerun the driver.
        nil_actor_counter = 0

        driver_task = ray.local_scheduler.Task(
            worker.task_driver_id, ray.ObjectID(NIL_FUNCTION_ID), [], 0,
github ray-project / ray / python / ray / experimental / features.py View on Github external
def _object_table_shard(shard_index):
    redis_client = ray.global_state.redis_clients[shard_index]
    object_table_keys = redis_client.keys(OBJECT_LOCATION_PREFIX + b"*")
    results = {}
    for key in object_table_keys:
        object_id_binary = key[len(OBJECT_LOCATION_PREFIX):]
        results[binary_to_hex(object_id_binary)] = (
            ray.global_state._object_table(ray.ObjectID(object_id_binary)))

    return results
github ray-project / ray / python / ray / worker.py View on Github external
# Reset the state of the numpy random number generator.
        np.random.set_state(numpy_state)
        # Set other fields needed for computing task IDs.
        worker.task_index = 0
        worker.put_index = 1

        # Create an entry for the driver task in the task table. This task is
        # added immediately with status RUNNING. This allows us to push errors
        # related to this driver task back to the driver.  For example, if the
        # driver creates an object that is later evicted, we should notify the
        # user that we're unable to reconstruct the object, since we cannot
        # rerun the driver.
        nil_actor_counter = 0

        driver_task = ray.local_scheduler.Task(
            worker.task_driver_id, ray.ObjectID(NIL_FUNCTION_ID), [], 0,
            worker.current_task_id, worker.task_index,
            ray.ObjectID(NIL_ACTOR_ID), ray.ObjectID(NIL_ACTOR_ID),
            ray.ObjectID(NIL_ACTOR_ID), ray.ObjectID(NIL_ACTOR_ID),
            nil_actor_counter, False, [], {"CPU": 0}, worker.use_raylet)

        # Add the driver task to the task table.
        if not worker.use_raylet:
            global_state._execute_command(
                driver_task.task_id(), "RAY.TASK_TABLE_ADD",
                driver_task.task_id().id(), TASK_STATUS_RUNNING,
                NIL_LOCAL_SCHEDULER_ID,
                driver_task.execution_dependencies_string(), 0,
                ray.local_scheduler.task_to_string(driver_task))
        else:
            global_state._execute_command(
                driver_task.task_id(), "RAY.TABLE_ADD",
github ray-project / ray / python / ray / worker.py View on Github external
block until all the values for object_ids have been written to the
        local object store.

        Args:
            object_ids (List[object_id.ObjectID]): A list of the object IDs
                whose values should be retrieved.
            timeout (float): timeout (float): The maximum amount of time in
                seconds to wait before returning.

        Raises:
            Exception if running in LOCAL_MODE and any of the object IDs do not
            exist in the emulated object store.
        """
        # Make sure that the values are object IDs.
        for object_id in object_ids:
            if not isinstance(object_id, ObjectID):
                raise TypeError(
                    "Attempting to call `get` on the value {}, "
                    "which is not an ray.ObjectID.".format(object_id))

        if self.mode == LOCAL_MODE:
            return self.local_mode_manager.get_objects(object_ids)

        timeout_ms = int(timeout * 1000) if timeout else -1
        data_metadata_pairs = self.core_worker.get_objects(
            object_ids, self.current_task_id, timeout_ms)
        return self.deserialize_objects(data_metadata_pairs, object_ids)
github ray-project / ray / python / ray / worker.py View on Github external
final_results = self.retrieve_and_deserialize(plain_object_ids, 0)
        # Construct a dictionary mapping object IDs that we haven't gotten yet
        # to their original index in the object_ids argument.
        unready_ids = {
            plain_object_ids[i].binary(): i
            for (i, val) in enumerate(final_results)
            if val is plasma.ObjectNotAvailable
        }
        was_blocked = (len(unready_ids) > 0)
        # Try reconstructing any objects we haven't gotten yet. Try to get them
        # until at least get_timeout_milliseconds milliseconds passes, then
        # repeat.
        while len(unready_ids) > 0:
            for unready_id in unready_ids:
                self.local_scheduler_client.reconstruct_objects(
                    [ray.ObjectID(unready_id)], False)
            # Do another fetch for objects that aren't available locally yet,
            # in case they were evicted since the last fetch. We divide the
            # fetch into smaller fetches so as to not block the manager for a
            # prolonged period of time in a single call.
            object_ids_to_fetch = list(
                map(plasma.ObjectID, unready_ids.keys()))
            ray_object_ids_to_fetch = list(
                map(ray.ObjectID, unready_ids.keys()))
            for i in range(0, len(object_ids_to_fetch),
                           ray._config.worker_fetch_request_size()):
                if not self.use_raylet:
                    self.plasma_client.fetch(object_ids_to_fetch[i:(
                        i + ray._config.worker_fetch_request_size())])
                else:
                    self.local_scheduler_client.reconstruct_objects(
                        ray_object_ids_to_fetch[i:(
github ray-project / ray / python / ray / worker.py View on Github external
or they are ray.ObjectIDs.

        Returns:
            The retrieved arguments in addition to the arguments that were
                passed by value.

        Raises:
            RayError: This exception is raised if a task that
                created one of the arguments failed.
        """
        arguments = [None] * len(serialized_args)
        object_ids = []
        object_indices = []

        for (i, arg) in enumerate(serialized_args):
            if isinstance(arg, ObjectID):
                object_ids.append(arg)
                object_indices.append(i)
            else:
                # pass the argument by value
                arguments[i] = arg

        # Get the objects from the local object store.
        if len(object_ids) > 0:
            values = self.get_objects(object_ids)
            for i, value in enumerate(values):
                if isinstance(value, RayError):
                    raise value
                else:
                    arguments[object_indices[i]] = value

        return ray.signature.recover_args(arguments)
github ray-project / ray / python / ray / experimental / serve / server.py View on Github external
"Request SLO must be positive, it is {}".format(
                            request_slo_ms))
            except ValueError as e:
                await JSONResponse({"error": str(e)})(scope, receive, send)
                return

        result_object_id_bytes = await (
            self.serve_global_state.init_or_get_router()
            .enqueue_request.remote(
                service=endpoint_name,
                request_args=(scope, http_body_bytes),
                request_kwargs=dict(),
                request_context=TaskContext.Web,
                request_slo_ms=request_slo_ms))

        result = await ray.ObjectID(result_object_id_bytes)

        if isinstance(result, ray.exceptions.RayTaskError):
            await JSONResponse({
                "error": "internal error, please use python API to debug"
            })(scope, receive, send)
        else:
            await JSONResponse({"result": result})(scope, receive, send)
github ray-project / ray / python / ray / worker.py View on Github external
worker.task_index = 0
        worker.put_index = 1

        # Create an entry for the driver task in the task table. This task is
        # added immediately with status RUNNING. This allows us to push errors
        # related to this driver task back to the driver.  For example, if the
        # driver creates an object that is later evicted, we should notify the
        # user that we're unable to reconstruct the object, since we cannot
        # rerun the driver.
        nil_actor_counter = 0

        driver_task = ray.local_scheduler.Task(
            worker.task_driver_id, ray.ObjectID(NIL_FUNCTION_ID), [], 0,
            worker.current_task_id, worker.task_index,
            ray.ObjectID(NIL_ACTOR_ID), ray.ObjectID(NIL_ACTOR_ID),
            ray.ObjectID(NIL_ACTOR_ID), ray.ObjectID(NIL_ACTOR_ID),
            nil_actor_counter, False, [], {"CPU": 0}, worker.use_raylet)

        # Add the driver task to the task table.
        if not worker.use_raylet:
            global_state._execute_command(
                driver_task.task_id(), "RAY.TASK_TABLE_ADD",
                driver_task.task_id().id(), TASK_STATUS_RUNNING,
                NIL_LOCAL_SCHEDULER_ID,
                driver_task.execution_dependencies_string(), 0,
                ray.local_scheduler.task_to_string(driver_task))
        else:
            global_state._execute_command(
                driver_task.task_id(), "RAY.TABLE_ADD",
                ray.gcs_utils.TablePrefix.RAYLET_TASK,
                ray.gcs_utils.TablePubsub.RAYLET_TASK,
                driver_task.task_id().id(),