How to use the perception.BinaryImage function in Perception

To help you get started, we’ve selected a few Perception examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github BerkeleyAutomation / perception / tests / test_image.py View on Github external
def test_binary_init(self):
        # valid data
        random_valid_data = (255.0 * np.random.rand(IM_HEIGHT, IM_WIDTH)).astype(np.uint8)
        binary_data = 255 * (random_valid_data > BINARY_THRESH)
        im = BinaryImage(random_valid_data, threshold=BINARY_THRESH)
        self.assertEqual(im.height, IM_HEIGHT)
        self.assertEqual(im.width, IM_WIDTH)
        self.assertEqual(im.channels, 1)
        self.assertTrue(np.allclose(im.data, binary_data))

        # invalid channels
        random_data = np.random.rand(IM_HEIGHT, IM_WIDTH, 3).astype(np.uint8)
        caught_bad_channels = False
        try:
            im = BinaryImage(random_data)
        except:
            caught_bad_channels = True
        self.assertTrue(caught_bad_channels)

        # invalid type
        random_data = np.random.rand(IM_HEIGHT, IM_WIDTH).astype(np.float32)
github BerkeleyAutomation / perception / tests / test_image.py View on Github external
self.assertTrue(np.allclose(im.data, binary_data))

        # invalid channels
        random_data = np.random.rand(IM_HEIGHT, IM_WIDTH, 3).astype(np.uint8)
        caught_bad_channels = False
        try:
            im = BinaryImage(random_data)
        except:
            caught_bad_channels = True
        self.assertTrue(caught_bad_channels)

        # invalid type
        random_data = np.random.rand(IM_HEIGHT, IM_WIDTH).astype(np.float32)
        caught_bad_dtype = False
        try:
            im = BinaryImage(random_data)
        except:
            caught_bad_dtype = True
        self.assertTrue(caught_bad_dtype)
github BerkeleyAutomation / perception / tools / generate_siamese_dataset.py View on Github external
cv2.fillConvexPoly(upper_mask, upper_polygon, 0)

        if np.count_nonzero(lower_mask) > min_number_points:
            mask = BinaryImage(lower_mask)
            if preserve_scale:
                img = normalize(image.mask_binary(mask), crop_size=crop_size)
            else:
                img = normalize_fill(image.mask_binary(mask), crop_size=crop_size)

            if rotate:
                img = rotate_image(img)

            samples.append(img)

        if np.count_nonzero(upper_mask) > min_number_points:
            mask = BinaryImage(upper_mask)
            if preserve_scale:
                img = normalize(image.mask_binary(mask), crop_size=crop_size)
            else:
                img = normalize_fill(image.mask_binary(mask), crop_size=crop_size)

            if rotate:
                img = rotate_image(img)

            samples.append(img)

    return samples[:n_samples]
github BerkeleyAutomation / perception / tools / generate_siamese_dataset.py View on Github external
lower_polygon = np.vstack((inter_points, lower_points))
            upper_polygon = np.vstack((inter_points, upper_points))
        except:
            import pdb
            pdb.set_trace()
        lower_polygon = np.array(sorted(lower_polygon, key=get_key_function(np.mean(lower_polygon, axis=0))))
        upper_polygon = np.array(sorted(upper_polygon, key=get_key_function(np.mean(upper_polygon, axis=0))))

        # Create masks
        lower_mask = orig_mask.data.copy()
        upper_mask = orig_mask.data.copy()
        cv2.fillConvexPoly(lower_mask, lower_polygon, 0)
        cv2.fillConvexPoly(upper_mask, upper_polygon, 0)

        if np.count_nonzero(lower_mask) > min_number_points:
            mask = BinaryImage(lower_mask)
            if preserve_scale:
                img = normalize(image.mask_binary(mask), crop_size=crop_size)
            else:
                img = normalize_fill(image.mask_binary(mask), crop_size=crop_size)

            if rotate:
                img = rotate_image(img)

            samples.append(img)

        if np.count_nonzero(upper_mask) > min_number_points:
            mask = BinaryImage(upper_mask)
            if preserve_scale:
                img = normalize(image.mask_binary(mask), crop_size=crop_size)
            else:
                img = normalize_fill(image.mask_binary(mask), crop_size=crop_size)
github BerkeleyAutomation / perception / tools / capture_single_obj_dataset.py View on Github external
all_points = point_cloud_world.data
    filtered_point_cloud_world = PointCloud(all_points,
                                            frame='world')  

    # compute the filtered depth image
    filtered_point_cloud_cam = T_camera_world.inverse() * filtered_point_cloud_world
    depth_im = camera_intr.project_to_image(filtered_point_cloud_cam)    

    # form segmask
    segmask = depth_im_seg.to_binary()
    valid_px_segmask = depth_im.invalid_pixel_mask().inverse()
    segmask = segmask.mask_binary(valid_px_segmask)
    segdata = segmask.data
    segdata = cv2.erode(segdata, np.ones((10,10), np.uint8), iterations=1)
    segdata = cv2.dilate(segdata, np.ones((10,10), np.uint8), iterations=1)
    segmask = BinaryImage(ndimage.binary_fill_holes(segdata).astype(np.uint8) * 255)
    segmask = BinaryImage(segdata)

    # inpaint
    color_im = raw_color_im.inpaint(rescale_factor=inpaint_rescale_factor)
    depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor)    
    return color_im, depth_im, segmask
github BerkeleyAutomation / perception / tools / capture_single_obj_dataset.py View on Github external
filtered_point_cloud_world = PointCloud(all_points,
                                            frame='world')  

    # compute the filtered depth image
    filtered_point_cloud_cam = T_camera_world.inverse() * filtered_point_cloud_world
    depth_im = camera_intr.project_to_image(filtered_point_cloud_cam)    

    # form segmask
    segmask = depth_im_seg.to_binary()
    valid_px_segmask = depth_im.invalid_pixel_mask().inverse()
    segmask = segmask.mask_binary(valid_px_segmask)
    segdata = segmask.data
    segdata = cv2.erode(segdata, np.ones((10,10), np.uint8), iterations=1)
    segdata = cv2.dilate(segdata, np.ones((10,10), np.uint8), iterations=1)
    segmask = BinaryImage(ndimage.binary_fill_holes(segdata).astype(np.uint8) * 255)
    segmask = BinaryImage(segdata)

    # inpaint
    color_im = raw_color_im.inpaint(rescale_factor=inpaint_rescale_factor)
    depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor)    
    return color_im, depth_im, segmask