Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def camera_intrinsics(self, T_camera_world, f, cx, cy):
"""Generate shifted camera intrinsics to simulate cropping.
"""
# form intrinsics
camera_intr = CameraIntrinsics(self.frame, fx=f, fy=f,
cx=cx, cy=cy, skew=0.0,
height=self.im_height, width=self.im_width)
return camera_intr
"""Reset the view to a good initial state.
The view is initially along the positive x-axis a sufficient distance from the scene.
"""
# Compute scene bounds and scale
bounds = self._compute_scene_bounds()
centroid = np.mean(bounds, axis=0)
extents = np.diff(bounds, axis=0).reshape(-1)
scale = (extents ** 2).sum() ** .5
width, height = self._size
# Set up reasonable camera intrinsics
fov = np.pi / 6.0
fl = height / (2.0 * np.tan(fov / 2))
ci = CameraIntrinsics(
frame = 'camera',
fx = fl,
fy = fl,
cx = width/2.0,
cy = height/2.0,
skew=0.0,
height=height,
width=width
)
# Move centroid if needed
if self._target_object and self._target_object in self.scene.objects:
obj = self.scene.objects[self._target_object]
if isinstance(obj, InstancedSceneObject):
centroid = np.mean(obj.raw_pose_data[3::4,:3], axis=0)
else:
y_scale = float(new_height) / self.intrinsics.height
# Compute new intrinsics parameters
center_x = float(self.intrinsics.width-1)/2
center_y = float(self.intrinsics.height-1)/2
orig_cx_diff = self.intrinsics.cx - center_x
orig_cy_diff = self.intrinsics.cy - center_y
scaled_center_x = float(new_width-1) / 2
scaled_center_y = float(new_height-1) / 2
cx = scaled_center_x + x_scale * orig_cx_diff
cy = scaled_center_y + y_scale * orig_cy_diff
fx = self.intrinsics.fx * x_scale
fy = self.intrinsics.fy * x_scale
# Create new intrinsics
scaled_intrinsics = CameraIntrinsics(frame=self.intrinsics.frame,
fx=fx, fy=fy, skew=self.intrinsics.skew,
cx=cx, cy=cy, height=new_height, width=new_width)
self.intrinsics = scaled_intrinsics
self._cur_depth_im = None
self._cur_normal_map = None
# Set up camera intrinsics for the sensor
width, height = 2064, 1544
focal_x, focal_y = 2244., 2244.
center_x, center_y = 1023., 768.
if size == 'small':
width = 1032
height = 772
focal_x = focal_x / 2
focal_y = focal_y / 2
center_x = center_x / 2
center_y = center_y / 2
self._camera_intr = CameraIntrinsics(self._frame, focal_x, focal_y,
center_x, center_y,
height=height, width=width)
def _get_camera_intr(self):
"""Gets camera intrinsics.
Returns
-------
CameraIntrinsics object with intrinsics for current camera
"""
if self.intrinsics_stream is None:
return None
info = rospy.wait_for_message(self.intrinsics_stream, CameraInfo, timeout=10)
if self.image_type == "raw":
mat = np.asarray(info.K).reshape((3,3))
elif self.image_type == "rectified":
mat = np.asarray(info.P).reshape((3,4))
return CameraIntrinsics(info.header.frame_id,
mat[0,0],
fy=mat[1,1],
cx=mat[0,2],
cy=mat[1,2],
height=info.height, width=info.width)
import IPython
from autolab_core import RigidTransform
from visualization import Visualizer2D as vis2d
from visualization import Visualizer3D as vis3d
from perception import DepthImage, CameraIntrinsics
KSIZE = 9
if __name__ == '__main__':
depth_im_filename = sys.argv[1]
camera_intr_filename = sys.argv[2]
camera_intr = CameraIntrinsics.load(camera_intr_filename)
depth_im = DepthImage.open(depth_im_filename, frame=camera_intr.frame)
depth_im = depth_im.inpaint()
point_cloud_im = camera_intr.deproject_to_image(depth_im)
normal_cloud_im = point_cloud_im.normal_cloud_im(ksize=KSIZE)
vis3d.figure()
vis3d.points(point_cloud_im.to_point_cloud(), scale=0.0025)
alpha = 0.025
subsample = 20
for i in range(0, point_cloud_im.height, subsample):
for j in range(0, point_cloud_im.width, subsample):
p = point_cloud_im[i,j]
n = normal_cloud_im[i,j]
def _set_camera_properties(self, msg):
""" Set the camera intrinsics from an info msg. """
focal_x = msg.K[0]
focal_y = msg.K[4]
center_x = msg.K[2]
center_y = msg.K[5]
im_height = msg.height
im_width = msg.width
self._camera_intr = CameraIntrinsics(self._frame, focal_x, focal_y,
center_x, center_y,
height=im_height,
width=im_width)
Parameters
----------
intrinsics : percetion.CameraIntrinsics
The intrinsic properties of the camera, from the Berkeley AUTOLab's perception module.
T_camera_world : autolab_core.RigidTransform
A transform from camera to world coordinates that indicates
the camera's pose. The camera frame's x axis points right,
its y axis points down, and its z axis points towards
the scene (i.e. standard OpenCV coordinates).
z_near : float
The near-plane clipping distance.
z_far : float
The far-plane clipping distance.
"""
if not isinstance(intrinsics, CameraIntrinsics):
raise ValueError('intrinsics must be an object of type CameraIntrinsics')
self._intrinsics = intrinsics
self.T_camera_world = T_camera_world
self._z_near = z_near
self._z_far = z_far