How to use the perception.RgbdSensorFactory.sensor function in Perception

To help you get started, we’ve selected a few Perception examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github BerkeleyAutomation / perception / tools / test_realsense.py View on Github external
def main():
    ids = discover_cams()
    assert ids, "[!] No camera detected."

    cfg = {}
    cfg['cam_id'] = ids[0]
    cfg['filter_depth'] = True
    cfg['frame'] = 'realsense_overhead'

    sensor = RgbdSensorFactory.sensor('realsense', cfg)
    sensor.start()
    camera_intr = sensor.color_intrinsics
    color_im, depth_im, _ = sensor.frames()
    sensor.stop()

    print("intrinsics matrix: {}".format(camera_intr.K))

    fig, axes = plt.subplots(1, 2)
    for ax, im in zip(axes, [color_im.data, depth_im.data]):
        ax.imshow(im)
        ax.axis('off')
    plt.show()
github BerkeleyAutomation / perception / tools / capture_test_images.py View on Github external
logger.info('Capturing images from sensor %s' %(sensor_name))
        save_dir = os.path.join(output_dir, sensor_name)
        if not os.path.exists(save_dir):
            os.mkdir(save_dir)        
        
        # read params
        sensor_type = sensor_config['type']
        sensor_frame = sensor_config['frame']
        
        # read camera calib
        tf_filename = '%s_to_world.tf' %(sensor_frame)
        T_camera_world = RigidTransform.load(os.path.join(config['calib_dir'], sensor_frame, tf_filename))
        T_camera_world.save(os.path.join(save_dir, tf_filename))

        # setup sensor
        sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)

        # start the sensor
        sensor.start()
        camera_intr = sensor.ir_intrinsics
        camera_intr.save(os.path.join(save_dir, '%s.intr' %(sensor.frame)))

        # get raw images
        for i in range(sensor_config['num_images']):
            logger.info('Capturing image %d' %(i))
            message = 'Hit ENTER when ready.'
            utils.keyboard_input(message=message)
            
            # read images
            color, depth, ir = sensor.frames()

            # save processed images
github BerkeleyAutomation / perception / tools / register_webcam.py View on Github external
T_cb_world = RigidTransform.load(config['chessboard_tf'])

    # Get camera sensor object
    for sensor_frame, sensor_data in config['sensors'].iteritems():
        logging.info('Registering {}'.format(sensor_frame))
        sensor_config = sensor_data['sensor_config']
        reg_cfg = sensor_data['registration_config'].copy()
        reg_cfg.update(config['chessboard_registration'])

        try:
            # Open sensor
            sensor_type = sensor_config['type']
            sensor_config['frame'] = sensor_frame
            logging.info('Creating sensor')
            sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)
            logging.info('Starting sensor')
            sensor.start()
            intrinsics = sensor.color_intrinsics
            logging.info('Sensor initialized')

            # Register sensor
            resize_factor = reg_cfg['color_image_rescale_factor']
            nx, ny = reg_cfg['corners_x'], reg_cfg['corners_y']
            sx, sy = reg_cfg['size_x'], reg_cfg['size_y']

            img, _, _= sensor.frames()
            resized_color_im = img.resize(resize_factor)
            corner_px = resized_color_im.find_chessboard(sx=nx, sy=ny)
            if corner_px is None:
                logging.error('No chessboard detected in sensor {}! Check camera exposure settings'.format(sensor_frame))
                exit(1)
github BerkeleyAutomation / perception / tools / capture_single_obj_dataset.py View on Github external
for sensor_name, sensor_config in sensor_configs.iteritems():
        # read params
        sensor_type = sensor_config['type']
        sensor_frame = sensor_config['frame']

        sensor_dir = os.path.join(output_dir, sensor_name)
        if not os.path.exists(sensor_dir):
            os.makedirs(sensor_dir)

        # read camera calib
        tf_filename = '%s_to_world.tf' %(sensor_frame)
        T_camera_world = RigidTransform.load(os.path.join(sensor_config['calib_dir'], sensor_frame, tf_filename))
        sensor_poses[sensor_name] = T_camera_world

        # setup sensor
        sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)
        sensors[sensor_name] = sensor

        # start the sensor
        sensor.start()
        camera_intr = sensor.ir_intrinsics
        camera_intr = camera_intr.resize(im_rescale_factor)
        camera_intrs[sensor_name] = camera_intr

        # render image of static workspace
        scene = Scene()
        camera = VirtualCamera(camera_intr, T_camera_world)
        scene.camera = camera
        for obj_key, scene_obj in workspace_objects.iteritems():
            scene.add_object(obj_key, scene_obj)
        workspace_ims[sensor_name] = scene.wrapped_render([RenderMode.DEPTH])[0]
github BerkeleyAutomation / perception / tools / register_camera.py View on Github external
rospy.init_node('register_camera', anonymous=True)
    logging.getLogger().addHandler(rl.RosStreamHandler())

    # get camera sensor object
    for sensor_frame, sensor_data in config['sensors'].iteritems():
        logging.info('Registering %s' %(sensor_frame))
        sensor_config = sensor_data['sensor_config']
        registration_config = sensor_data['registration_config'].copy()
        registration_config.update(config['chessboard_registration'])
        
        # open sensor
        try:
            sensor_type = sensor_config['type']
            sensor_config['frame'] = sensor_frame
            logging.info('Creating sensor')
            sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)
            logging.info('Starting sensor')
            sensor.start()
            ir_intrinsics = sensor.ir_intrinsics
            logging.info('Sensor initialized')

            # register
            reg_result = CameraChessboardRegistration.register(sensor, registration_config)
            T_camera_world = T_cb_world * reg_result.T_camera_cb

            logging.info('Final Result for sensor %s' %(sensor_frame))
            logging.info('Rotation: ')
            logging.info(T_camera_world.rotation)
            logging.info('Quaternion: ')
            logging.info(T_camera_world.quaternion)
            logging.info('Translation: ')
            logging.info(T_camera_world.translation)
github BerkeleyAutomation / perception / tools / colorize_phoxi.py View on Github external
parser = argparse.ArgumentParser(description='Register a webcam to the Photoneo PhoXi')
    parser.add_argument('--config_filename', type=str, default='cfg/tools/colorize_phoxi.yaml', help='filename of a YAML configuration for registration')
    args = parser.parse_args()
    config_filename = args.config_filename
    config = YamlConfig(config_filename)

    sensor_data = config['sensors']
    phoxi_config = sensor_data['phoxi']
    phoxi_config['frame'] = 'phoxi'

    # Initialize ROS node
    rospy.init_node('colorize_phoxi', anonymous=True)
    logging.getLogger().addHandler(rl.RosStreamHandler())

    # Get PhoXi sensor set up
    phoxi = RgbdSensorFactory.sensor(phoxi_config['type'], phoxi_config)
    phoxi.start()

    # Capture PhoXi and webcam images
    phoxi_color_im, phoxi_depth_im, _ = phoxi.frames()

    # vis2d.figure()
    # vis2d.subplot(121)
    # vis2d.imshow(phoxi_color_im)
    # vis2d.subplot(122)
    # vis2d.imshow(phoxi_depth_im)
    # vis2d.show()

    phoxi_pc = phoxi.ir_intrinsics.deproject(phoxi_depth_im)
    colors = phoxi_color_im.data.reshape((phoxi_color_im.shape[0] * phoxi_color_im.shape[1], phoxi_color_im.shape[2])) / 255.0
    vis3d.figure()
    vis3d.points(phoxi_pc.data.T[::3], color=colors[::3], scale=0.001)
github BerkeleyAutomation / perception / tools / capture_dataset.py View on Github external
sensors = {}
    sensor_poses = {}
    camera_intrs = {}
    workspace_ims = {}
    for sensor_name, sensor_config in sensor_configs.iteritems():
        # read params
        sensor_type = sensor_config['type']
        sensor_frame = sensor_config['frame']
        
        # read camera calib
        tf_filename = '%s_to_world.tf' %(sensor_frame)
        T_camera_world = RigidTransform.load(os.path.join(sensor_config['calib_dir'], sensor_frame, tf_filename))
        sensor_poses[sensor_name] = T_camera_world
        
        # setup sensor
        sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)
        sensors[sensor_name] = sensor
        
        # start the sensor
        sensor.start()
        camera_intr = sensor.ir_intrinsics
        camera_intr = camera_intr.resize(im_rescale_factor)
        camera_intrs[sensor_name] = camera_intr        
        
        # render image of static workspace
        scene = Scene()
        camera = VirtualCamera(camera_intr, T_camera_world)
        scene.camera = camera
        for obj_key, scene_obj in workspace_objects.iteritems():
            scene.add_object(obj_key, scene_obj)
        workspace_ims[sensor_name] = scene.wrapped_render([RenderMode.DEPTH])[0]
github BerkeleyAutomation / perception / tools / capture_images.py View on Github external
print('Capturing images from sensor %s' %(sensor_name))
        save_dir = os.path.join(output_dir, sensor_name)
        if not os.path.exists(save_dir):
            os.mkdir(save_dir)        
        
        # read params
        sensor_type = sensor_config['type']
        sensor_frame = sensor_config['frame']
        
        # read camera calib
        tf_filename = '%s_to_world.tf' %(sensor_frame)
        T_camera_world = RigidTransform.load(os.path.join(config['calib_dir'], sensor_frame, tf_filename))
        T_camera_world.save(os.path.join(save_dir, tf_filename))

        # setup sensor
        sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)

        # start the sensor
        sensor.start()
        camera_intr = sensor.ir_intrinsics
        camera_intr.save(os.path.join(save_dir, '%s.intr' %(sensor.frame)))

        # get raw images
        for i in range(sensor_config['num_images']):
            logging.info('Capturing image %d' %(i))

            # save raw images
            color, depth, ir = sensor.frames()
            color.save(os.path.join(save_dir, 'color_%d.png' %(i)))
            depth.save(os.path.join(save_dir, 'depth_%d.npy' %(i)))
            if ir is not None:
                ir.save(os.path.join(save_dir, 'ir_%d.npy' %(i)))