How to use the perception.RenderMode function in Perception

To help you get started, we’ve selected a few Perception examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github BerkeleyAutomation / meshrender / examples / test_viewer.py View on Github external
# Add the camera to the scene
scene.camera = camera

#====================================
# Render images
#====================================

# Render raw numpy arrays containing color and depth
color_image_raw, depth_image_raw = scene.render(render_color=True)

# Alternatively, just render a depth image
depth_image_raw = scene.render(render_color=False)

# Alternatively, collect wrapped images
wrapped_color, wrapped_depth, wrapped_segmask = scene.wrapped_render(
    [RenderMode.COLOR, RenderMode.DEPTH, RenderMode.SEGMASK]
)

wrapped_color.save('output/color.jpg')
wrapped_depth.save('output/depth.jpg')

# Test random variables
cfg = {
    'focal_length': {
        'min' : 520,
        'max' : 530,
    },
    'delta_optical_center': {
        'min' : 0.0,
        'max' : 0.0,
    },
    'radius': {
github BerkeleyAutomation / meshrender / meshrender / scene.py View on Github external
for mode in render_modes:
            if mode != RenderMode.DEPTH and mode != RenderMode.SCALED_DEPTH:
                render_color = True
                break

        color_im, depth_im = None, None
        if render_color:
            color_im, depth_im = self.render(render_color, front_and_back=front_and_back)
        else:
            depth_im = self.render(render_color)

        # For each specified render mode, add an Image object of the appropriate type
        images = []
        for render_mode in render_modes:
            # Then, convert them to an image wrapper class
            if render_mode == RenderMode.SEGMASK:
                images.append(BinaryImage((depth_im > 0.0).astype(np.uint8), frame=self.camera.intrinsics.frame, threshold=0))

            elif render_mode == RenderMode.COLOR:
                images.append(ColorImage(color_im, frame=self.camera.intrinsics.frame))

            elif render_mode == RenderMode.GRAYSCALE:
                images.append(ColorImage(color_im, frame=self.camera.intrinsics.frame).to_grayscale())

            elif render_mode == RenderMode.DEPTH:
                images.append(DepthImage(depth_im, frame=self.camera.intrinsics.frame))

            elif render_mode == RenderMode.SCALED_DEPTH:
                images.append(DepthImage(depth_im, frame=self.camera.intrinsics.frame).to_color())

            elif render_mode == RenderMode.RGBD:
                c = ColorImage(color_im, frame=self.camera.intrinsics.frame)
github BerkeleyAutomation / perception / tools / capture_single_obj_dataset_stable_poses.py View on Github external
# capture
        sobj = SceneObject(obj.mesh, RigidTransform(from_frame='obj', to_frame='world'))
        s.add_object('obj', sobj)

        for j, stp in enumerate(stable_poses):
            logging.info('Test case %d of %d' %(j+1, len(stable_poses)))

            # Put object in stable pose, with (0,0,0) at target min point
            T_obj_world = stp.T_obj_table
            T_obj_world.to_frame='world'
            T_obj_world.translation = target_point
            sobj.T_obj_world = T_obj_world

            # Render target object
            depth_im, segmask = s.wrapped_render([RenderMode.DEPTH, RenderMode.SEGMASK])

            img_dir = os.path.join(sensor_dir, 'color_images')
            if not os.path.exists(img_dir):
                os.makedirs(img_dir)
            depth_im = depth_im.mask_binary(segmask).to_color()
            depth_im.save(os.path.join(img_dir, '{}_{:06d}.png'.format(obj_name, j)))

        s.remove_object('obj')

    del s
github BerkeleyAutomation / perception / tools / capture_single_obj_dataset.py View on Github external
sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)
        sensors[sensor_name] = sensor

        # start the sensor
        sensor.start()
        camera_intr = sensor.ir_intrinsics
        camera_intr = camera_intr.resize(im_rescale_factor)
        camera_intrs[sensor_name] = camera_intr

        # render image of static workspace
        scene = Scene()
        camera = VirtualCamera(camera_intr, T_camera_world)
        scene.camera = camera
        for obj_key, scene_obj in workspace_objects.iteritems():
            scene.add_object(obj_key, scene_obj)
        workspace_ims[sensor_name] = scene.wrapped_render([RenderMode.DEPTH])[0]

        # save intrinsics and pose
        sensor_tf_filename = os.path.join(sensor_dir, 'T_{}_world.tf'.format(sensor_name))
        sensor_intr_filename = os.path.join(sensor_dir, '{}.intr'.format(sensor_name))
        T_camera_world.save(sensor_tf_filename)
        camera_intr.save(sensor_intr_filename)

        # save raw
        if save_raw:
            sensor_dir = os.path.join(output_dir, sensor_name)
            raw_dir = os.path.join(sensor_dir, 'raw')
            if not os.path.exists(raw_dir):
                os.mkdir(raw_dir)

            camera_intr_filename = os.path.join(raw_dir, 'camera_intr.intr')
            camera_intr.save(camera_intr_filename)
github BerkeleyAutomation / meshrender / meshrender / random_variables.py View on Github external
if RenderMode.SEGMASK in self.render_modes:
                # Disable every object that isn't the target
                for obj in self.scene.objects.keys():
                    if obj != self.object_name:
                        self.scene.objects[obj].enabled = False

                # Compute the Seg Image
                seg_image = self.scene.wrapped_render([RenderMode.SEGMASK], front_and_back=front_and_back)[0]

                # Re-enable every object
                for obj in self.scene.objects.keys():
                    self.scene.objects[obj].enabled = True

            renders = { m : i for m, i in zip(self.render_modes, images) }
            if seg_image:
                renders[RenderMode.SEGMASK] = seg_image

            samples.append(RenderSample(renders, camera_sample))

        self.scene.camera = orig_camera

        # not a list if only 1 sample
        if size == 1:
            return samples[0]
        return samples
github BerkeleyAutomation / meshrender / meshrender / random_variables.py View on Github external
camera_sample = self.ws_rv.sample(size=1)

            # Compute the camera-to-world transform from the object-to-camera transform
            T_camera_world = camera_sample.camera_to_world_pose
            T_camera_world.translation += obj_xy

            # Set the scene's camera
            camera = VirtualCamera(camera_sample.camera_intr, T_camera_world)
            self.scene.camera = camera

            # Render the scene and grab the appropriate wrapped images
            images = self.scene.wrapped_render(self.render_modes, front_and_back=front_and_back)

            # If a segmask was requested, re-render the scene after disabling all other objects.
            seg_image = None
            if RenderMode.SEGMASK in self.render_modes:
                # Disable every object that isn't the target
                for obj in self.scene.objects.keys():
                    if obj != self.object_name:
                        self.scene.objects[obj].enabled = False

                # Compute the Seg Image
                seg_image = self.scene.wrapped_render([RenderMode.SEGMASK], front_and_back=front_and_back)[0]

                # Re-enable every object
                for obj in self.scene.objects.keys():
                    self.scene.objects[obj].enabled = True

            renders = { m : i for m, i in zip(self.render_modes, images) }
            if seg_image:
                renders[RenderMode.SEGMASK] = seg_image
github BerkeleyAutomation / perception / tools / capture_dataset.py View on Github external
sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)
        sensors[sensor_name] = sensor
        
        # start the sensor
        sensor.start()
        camera_intr = sensor.ir_intrinsics
        camera_intr = camera_intr.resize(im_rescale_factor)
        camera_intrs[sensor_name] = camera_intr        
        
        # render image of static workspace
        scene = Scene()
        camera = VirtualCamera(camera_intr, T_camera_world)
        scene.camera = camera
        for obj_key, scene_obj in workspace_objects.iteritems():
            scene.add_object(obj_key, scene_obj)
        workspace_ims[sensor_name] = scene.wrapped_render([RenderMode.DEPTH])[0]

        # fix dataset config
        dataset_config['fields']['raw_color_ims']['height'] = camera_intr.height
        dataset_config['fields']['raw_color_ims']['width'] = camera_intr.width
        dataset_config['fields']['raw_depth_ims']['height'] = camera_intr.height
        dataset_config['fields']['raw_depth_ims']['width'] = camera_intr.width 
        dataset_config['fields']['color_ims']['height'] = camera_intr.height
        dataset_config['fields']['color_ims']['width'] = camera_intr.width 
        dataset_config['fields']['depth_ims']['height'] = camera_intr.height
        dataset_config['fields']['depth_ims']['width'] = camera_intr.width 
        dataset_config['fields']['segmasks']['height'] = camera_intr.height
        dataset_config['fields']['segmasks']['width'] = camera_intr.width 
       
        # open dataset
        sensor_dataset_filename = os.path.join(output_dir, sensor_name)
        datasets[sensor_name] = TensorDataset(sensor_dataset_filename,
github BerkeleyAutomation / meshrender / meshrender / scene.py View on Github external
module's RenderMode enum.

        front_and_back : bool
            If True, all surface normals are treated as if they're facing the camera.

        Returns
        -------
        list of perception.Image
            A list containing a corresponding Image sub-class for each type listed
            in render_modes.
        """

        # Render raw images
        render_color = False
        for mode in render_modes:
            if mode != RenderMode.DEPTH and mode != RenderMode.SCALED_DEPTH:
                render_color = True
                break

        color_im, depth_im = None, None
        if render_color:
            color_im, depth_im = self.render(render_color, front_and_back=front_and_back)
        else:
            depth_im = self.render(render_color)

        # For each specified render mode, add an Image object of the appropriate type
        images = []
        for render_mode in render_modes:
            # Then, convert them to an image wrapper class
            if render_mode == RenderMode.SEGMASK:
                images.append(BinaryImage((depth_im > 0.0).astype(np.uint8), frame=self.camera.intrinsics.frame, threshold=0))