How to use the perception.ColorImage function in Perception

To help you get started, we’ve selected a few Perception examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github BerkeleyAutomation / perception / tests / test_image.py View on Github external
def test_color_init(self):
        # valid data
        random_valid_data = (255.0 * np.random.rand(IM_HEIGHT, IM_WIDTH, 3)).astype(np.uint8)
        im = ColorImage(random_valid_data)
        self.assertEqual(im.height, IM_HEIGHT)
        self.assertEqual(im.width, IM_WIDTH)
        self.assertEqual(im.channels, 3)
        self.assertTrue(np.allclose(im.data, random_valid_data))

        # invalid channels
        random_data = np.random.rand(IM_HEIGHT, IM_WIDTH).astype(np.uint8)
        caught_bad_channels = False
        try:
            im = ColorImage(random_data)
        except:
            caught_bad_channels = True
        self.assertTrue(caught_bad_channels)

        # invalid type
        random_data = np.random.rand(IM_HEIGHT, IM_WIDTH, 3).astype(np.float32)
        caught_bad_dtype = False
        try:
            im = ColorImage(random_data)
        except:
            caught_bad_dtype = True
        self.assertTrue(caught_bad_dtype)
github BerkeleyAutomation / perception / tests / test_image.py View on Github external
def test_transform(self):
        random_valid_data = (255.0 * np.random.rand(IM_HEIGHT, IM_WIDTH, 3)).astype(np.uint8)
        im = ColorImage(random_valid_data)
        
        translation = np.array([2,2])
        im_tf = im.transform(translation, 0.0)
        self.assertTrue(np.allclose(im[0,0], im_tf[2,2]))
github BerkeleyAutomation / perception / tests / test_image.py View on Github external
def test_resize(self):
        random_valid_data = (255.0 * np.random.rand(IM_HEIGHT, IM_WIDTH, 3)).astype(np.uint8)
        im = ColorImage(random_valid_data)

        big_scale = 2.0
        big_im = im.resize(big_scale)
        self.assertEqual(big_im.height, big_scale * IM_HEIGHT)
        self.assertEqual(big_im.width, big_scale * IM_WIDTH)

        small_scale = 0.5
        small_im = im.resize(small_scale)
        self.assertEqual(small_im.height, small_scale * IM_HEIGHT)
        self.assertEqual(small_im.width, small_scale * IM_WIDTH)
github BerkeleyAutomation / perception / perception / webcam_sensor.py View on Github external
"""
        if most_recent:
            for i in range(4):
                self._cap.grab()
        for i in range(1):
            if self._adjust_exposure:
                try:
                    command = 'v4l2-ctl -d /dev/video{} -c exposure_auto=1 -c exposure_auto_priority=0 -c exposure_absolute=100 -c saturation=60 -c gain=140'.format(self._device_id)
                    FNULL = open(os.devnull, 'w')
                    subprocess.call(shlex.split(command), stdout=FNULL, stderr=subprocess.STDOUT)
                except:
                    pass
            ret, frame = self._cap.read()
        rgb_data = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        return ColorImage(rgb_data, frame=self._frame), None, None
github BerkeleyAutomation / meshrender / meshrender / scene.py View on Github external
images.append(BinaryImage((depth_im > 0.0).astype(np.uint8), frame=self.camera.intrinsics.frame, threshold=0))

            elif render_mode == RenderMode.COLOR:
                images.append(ColorImage(color_im, frame=self.camera.intrinsics.frame))

            elif render_mode == RenderMode.GRAYSCALE:
                images.append(ColorImage(color_im, frame=self.camera.intrinsics.frame).to_grayscale())

            elif render_mode == RenderMode.DEPTH:
                images.append(DepthImage(depth_im, frame=self.camera.intrinsics.frame))

            elif render_mode == RenderMode.SCALED_DEPTH:
                images.append(DepthImage(depth_im, frame=self.camera.intrinsics.frame).to_color())

            elif render_mode == RenderMode.RGBD:
                c = ColorImage(color_im, frame=self.camera.intrinsics.frame)
                d = DepthImage(depth_im, frame=self.camera.intrinsics.frame)
                images.append(RgbdImage.from_color_and_depth(c, d))

            elif render_mode == RenderMode.GD:
                g = ColorImage(color_im, frame=self.camera.intrinsics.frame).to_grayscale()
                d = DepthImage(depth_im, frame=self.camera.intrinsics.frame)
                images.append(GdImage.from_grayscale_and_depth(g, d))
            else:
                raise ValueError('Render mode {} not supported'.format(render_mode))

        return images
github BerkeleyAutomation / perception / perception / ensenso_sensor.py View on Github external
-------
        :obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
            The ColorImage, DepthImage, and IrImage of the current frame.

        Raises
        ------
        RuntimeError
            If the Kinect stream is not running.
        """
        # wait for a new image
        while self._cur_depth_im is None:
            time.sleep(0.01)
            
        # read next image
        depth_im = self._cur_depth_im
        color_im = ColorImage(np.zeros([depth_im.height,
                                        depth_im.width,
                                        3]).astype(np.uint8), frame=self._frame)
        self._cur_depth_im = None
        return color_im, depth_im, None
github BerkeleyAutomation / perception / tools / generate_siamese_dataset.py View on Github external
def rotate_image(image):
    angle = np.random.uniform(0.0, 360.0)
    rotated_image_data = scipy.ndimage.rotate(image.data, angle, reshape=False)
    rotated_image = ColorImage(rotated_image_data, image.frame)
    return rotated_image
github BerkeleyAutomation / perception / perception / colorized_phoxi_sensor.py View on Github external
min_depths_pp = icd_depths[unique_inv]
        depth_delta_mask = np.abs(min_depths_pp - sorted_dists) < 5e-3

        # Create mask for points with missing depth or that lie outside the image
        valid_mask = np.logical_and(np.logical_and(icds[:,0] >= 0, icds[:,0] < self._webcam.color_intrinsics.width),
                                    np.logical_and(icds[:,1] >= 0, icds[:,1] < self._webcam.color_intrinsics.height))
        valid_mask = np.logical_and(valid_mask, sorted_depths != 0.0)
        valid_mask = np.logical_and(valid_mask, depth_delta_mask)
        valid_icds = icds[valid_mask]

        colors = color_im.data[valid_icds[:,1],valid_icds[:,0],:]
        color_im_data = np.zeros((target_shape[0] * target_shape[1], target_shape[2]), dtype=np.uint8)
        color_im_data[valid_mask] = colors
        color_im_data[order] = color_im_data.copy()
        color_im_data = color_im_data.reshape(target_shape)
        return ColorImage(color_im_data, frame=self._frame)
github BerkeleyAutomation / perception / perception / primesense_sensor.py View on Github external
g_array = np.array([raw_buf[i][1] for i in range(PrimesenseSensor.COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)])        
        b_array = np.array([raw_buf[i][2] for i in range(PrimesenseSensor.COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)])        

        # convert to uint8 image
        color_image = np.zeros([PrimesenseSensor.COLOR_IM_HEIGHT, PrimesenseSensor.COLOR_IM_WIDTH, 3])
        color_image[:,:,0] = r_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,
                                             PrimesenseSensor.COLOR_IM_WIDTH)
        color_image[:,:,1] = g_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,
                                             PrimesenseSensor.COLOR_IM_WIDTH)
        color_image[:,:,2] = b_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,
                                             PrimesenseSensor.COLOR_IM_WIDTH)
        if self._flip_images:
            color_image = np.flipud(color_image.astype(np.uint8))
        else:
            color_image = np.fliplr(color_image.astype(np.uint8))
        return ColorImage(color_image, frame=self._frame)