How to use the perception.DepthImage function in Perception

To help you get started, we’ve selected a few Perception examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github BerkeleyAutomation / perception / perception / primesense_sensor.py View on Github external
def _read_depth_images(self, num_images):
        """ Reads depth images from the device """
        depth_images = self._ros_read_images(self._depth_image_buffer, num_images, self.staleness_limit)
        for i in range(0, num_images):
            depth_images[i] = depth_images[i] * MM_TO_METERS # convert to meters
            if self._flip_images:
                depth_images[i] = np.flipud(depth_images[i])
                depth_images[i] = np.fliplr(depth_images[i])
            depth_images[i] = DepthImage(depth_images[i], frame=self._frame) 
        return depth_images
    def _read_color_images(self, num_images):
github BerkeleyAutomation / perception / perception / primesense_sensor.py View on Github external
def _read_depth_image(self):
        """ Reads a depth image from the device """
        # read raw uint16 buffer
        im_arr = self._depth_stream.read_frame()
        raw_buf = im_arr.get_buffer_as_uint16()
        buf_array = np.array([raw_buf[i] for i in range(PrimesenseSensor.DEPTH_IM_WIDTH * PrimesenseSensor.DEPTH_IM_HEIGHT)])

        # convert to image in meters
        depth_image = buf_array.reshape(PrimesenseSensor.DEPTH_IM_HEIGHT,
                                        PrimesenseSensor.DEPTH_IM_WIDTH)
        depth_image = depth_image * MM_TO_METERS # convert to meters
        if self._flip_images:
            depth_image = np.flipud(depth_image)
        else:
            depth_image = np.fliplr(depth_image)
        return DepthImage(depth_image, frame=self._frame)
github BerkeleyAutomation / perception / perception / ensenso_sensor.py View on Github external
if msg.height != self._camera_intr.height:
            rescale_factor = float(msg.height) / self._camera_intr.height
            self._camera_intr = self._camera_intr.resize(rescale_factor)
            
        # read num points
        num_points = msg.height * msg.width
            
        # read buffer
        raw_tup = struct.Struct(self._format).unpack_from(msg.data, 0)
        raw_arr = np.array(raw_tup)

        # subsample depth values and reshape
        depth_ind = 2 + 4 * np.arange(num_points)
        depth_buf = raw_arr[depth_ind]
        depth_arr = depth_buf.reshape(msg.height, msg.width)
        depth_im = DepthImage(depth_arr, frame=self._frame)

        return depth_im
github BerkeleyAutomation / meshrender / meshrender / scene.py View on Github external
for render_mode in render_modes:
            # Then, convert them to an image wrapper class
            if render_mode == RenderMode.SEGMASK:
                images.append(BinaryImage((depth_im > 0.0).astype(np.uint8), frame=self.camera.intrinsics.frame, threshold=0))

            elif render_mode == RenderMode.COLOR:
                images.append(ColorImage(color_im, frame=self.camera.intrinsics.frame))

            elif render_mode == RenderMode.GRAYSCALE:
                images.append(ColorImage(color_im, frame=self.camera.intrinsics.frame).to_grayscale())

            elif render_mode == RenderMode.DEPTH:
                images.append(DepthImage(depth_im, frame=self.camera.intrinsics.frame))

            elif render_mode == RenderMode.SCALED_DEPTH:
                images.append(DepthImage(depth_im, frame=self.camera.intrinsics.frame).to_color())

            elif render_mode == RenderMode.RGBD:
                c = ColorImage(color_im, frame=self.camera.intrinsics.frame)
                d = DepthImage(depth_im, frame=self.camera.intrinsics.frame)
                images.append(RgbdImage.from_color_and_depth(c, d))

            elif render_mode == RenderMode.GD:
                g = ColorImage(color_im, frame=self.camera.intrinsics.frame).to_grayscale()
                d = DepthImage(depth_im, frame=self.camera.intrinsics.frame)
                images.append(GdImage.from_grayscale_and_depth(g, d))
            else:
                raise ValueError('Render mode {} not supported'.format(render_mode))

        return images
github BerkeleyAutomation / perception / perception / kinect2_sensor_bridge.py View on Github external
def _depth_image_callback(self, image_msg):
        """ subscribe to depth image topic and keep it up to date
        """
        depth_arr = self._process_image_msg(image_msg)
        depth = np.array(depth_arr, np.float32)
        self._cur_depth_im = DepthImage(depth, self._frame)
github BerkeleyAutomation / perception / perception / realsense_sensor.py View on Github external
return None, None

        if self._filter_depth:
            depth_frame = self._filter_depth_frame(depth_frame)

        # convert to numpy arrays
        depth_image = self._to_numpy(depth_frame, np.float32)
        color_image = self._to_numpy(color_frame, np.uint8)

        # convert depth to meters
        depth_image *= self._depth_scale

        # bgr to rgb
        color_image = color_image[..., ::-1]

        depth = DepthImage(depth_image, frame=self._frame)
        color = ColorImage(color_image, frame=self._frame)
        return color, depth