Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
self.align_depth_to_color = True
self.sensor = o3d.io.AzureKinectSensor(config)
if device < 0 or device > 255:
print('Unsupported device id, fall back to 0')
device = 0
if not self.sensor.connect(device):
raise RuntimeError('Failed to connect to sensor')
config_dict = json.loads(open(configuration_path).read())
self.color_resolution = config_dict['color_resolution']
if self.color_resolution == 'K4A_COLOR_RESOLUTION_720P':
self.intrinsics = o3d.camera.PinholeCameraIntrinsic(1280, 720, 602.365, 602.393, 636.734, 363.352)
elif self.color_resolution == 'K4A_COLOR_RESOLUTION_1080P':
self.intrinsics = o3d.camera.PinholeCameraIntrinsic(1920, 1080, 903.547, 903.589, 955.35, 545.277)
elif self.color_resolution == 'K4A_COLOR_RESOLUTION_1440P':
self.intrinsics = o3d.camera.PinholeCameraIntrinsic(2560, 1440, 1204.73, 1204.79, 1273.97, 727.203)
elif self.color_resolution == 'K4A_COLOR_RESOLUTION_1536P':
self.intrinsics = o3d.camera.PinholeCameraIntrinsic(2048, 1536, 963.783, 963.829, 1019.07, 773.662)
elif self.color_resolution == 'K4A_COLOR_RESOLUTION_2160P':
self.intrinsics = o3d.camera.PinholeCameraIntrinsic(3840, 2160, 1807.09, 1807.18, 1911.2, 1091.05)
elif self.color_resolution == 'K4A_COLOR_RESOLUTION_3072P':
self.intrinsics = o3d.camera.PinholeCameraIntrinsic(4096, 3072, 1927.57, 1927.66, 2038.65, 1547.82)
else:
self.intrinsics = None
"../../TestData/RGBD/other_formats/TUM_color.png")
depth_raw = o3d.io.read_image(
"../../TestData/RGBD/other_formats/TUM_depth.png")
rgbd_image = o3d.geometry.RGBDImage.create_from_tum_format(
color_raw, depth_raw)
print(rgbd_image)
plt.subplot(1, 2, 1)
plt.title('TUM grayscale image')
plt.imshow(rgbd_image.color)
plt.subplot(1, 2, 2)
plt.title('TUM depth image')
plt.imshow(rgbd_image.depth)
plt.show()
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
rgbd_image,
o3d.camera.PinholeCameraIntrinsic(
o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault))
# Flip it, otherwise the pointcloud will be upside down
pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
o3d.visualization.draw_geometries([pcd])
depth_raw = o3d.io.read_image("../../TestData/RGBD/depth/00000.png")
rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
color_raw, depth_raw)
print(rgbd_image)
plt.subplot(1, 2, 1)
plt.title('Redwood grayscale image')
plt.imshow(rgbd_image.color)
plt.subplot(1, 2, 2)
plt.title('Redwood depth image')
plt.imshow(rgbd_image.depth)
plt.show()
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
rgbd_image,
o3d.camera.PinholeCameraIntrinsic(
o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault))
# Flip it, otherwise the pointcloud will be upside down
pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
o3d.visualization.draw_geometries([pcd])
pipeline1 = rs.pipeline()
rs_config.enable_device(connect_device[0])
pipeline_profile1 = pipeline1.start(rs_config)
intr1 = pipeline_profile1.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
pinhole_camera1_intrinsic = o3d.camera.PinholeCameraIntrinsic(intr1.width, intr1.height, intr1.fx, intr1.fy, intr1.ppx, intr1.ppy)
cam1 = rgbdTools.Camera(intr1.fx, intr1.fy, intr1.ppx, intr1.ppy)
# print('cam1 intrinsics:')
# print(intr1.width, intr1.height, intr1.fx, intr1.fy, intr1.ppx, intr1.ppy)
pipeline2 = rs.pipeline()
rs_config.enable_device(connect_device[1])
pipeline_profile2 = pipeline2.start(rs_config)
intr2 = pipeline_profile2.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
pinhole_camera2_intrinsic = o3d.camera.PinholeCameraIntrinsic(intr2.width, intr2.height, intr2.fx, intr2.fy, intr2.ppx, intr2.ppy)
cam2 = rgbdTools.Camera(intr2.fx, intr2.fy, intr2.ppx, intr2.ppy)
# print('cam2 intrinsics:')
# print(intr2.width, intr2.height, intr2.fx, intr2.fy, intr2.ppx, intr2.ppy)
print('Calculating Transformation Matrix:')
cam1_point = []
cam2_point = []
for view in range(chessBoard_num):
cam1_rgb = cv2.imread('./output/cam1_color_'+str(view)+'.png')
cam1_rgb_array = np.asanyarray(cam1_rgb)
cam1_depth = cv2.imread('./output/cam1_depth_'+str(view)+'.png',-1)
cam1_depth_array = np.asanyarray(cam1_depth)
cam2_rgb = cv2.imread('./output/cam2_color_'+str(view)+'.png')
cam2_rgb_array = np.asanyarray(cam2_rgb)
cam2_depth = cv2.imread('./output/cam2_depth_'+str(view)+'.png',-1)
cam2_depth_array = np.asanyarray(cam2_depth)
else:
config = o3d.io.AzureKinectSensorConfig()
self.align_depth_to_color = True
self.sensor = o3d.io.AzureKinectSensor(config)
if device < 0 or device > 255:
print('Unsupported device id, fall back to 0')
device = 0
if not self.sensor.connect(device):
raise RuntimeError('Failed to connect to sensor')
config_dict = json.loads(open(configuration_path).read())
self.color_resolution = config_dict['color_resolution']
if self.color_resolution == 'K4A_COLOR_RESOLUTION_720P':
self.intrinsics = o3d.camera.PinholeCameraIntrinsic(1280, 720, 602.365, 602.393, 636.734, 363.352)
elif self.color_resolution == 'K4A_COLOR_RESOLUTION_1080P':
self.intrinsics = o3d.camera.PinholeCameraIntrinsic(1920, 1080, 903.547, 903.589, 955.35, 545.277)
elif self.color_resolution == 'K4A_COLOR_RESOLUTION_1440P':
self.intrinsics = o3d.camera.PinholeCameraIntrinsic(2560, 1440, 1204.73, 1204.79, 1273.97, 727.203)
elif self.color_resolution == 'K4A_COLOR_RESOLUTION_1536P':
self.intrinsics = o3d.camera.PinholeCameraIntrinsic(2048, 1536, 963.783, 963.829, 1019.07, 773.662)
elif self.color_resolution == 'K4A_COLOR_RESOLUTION_2160P':
self.intrinsics = o3d.camera.PinholeCameraIntrinsic(3840, 2160, 1807.09, 1807.18, 1911.2, 1091.05)
elif self.color_resolution == 'K4A_COLOR_RESOLUTION_3072P':
self.intrinsics = o3d.camera.PinholeCameraIntrinsic(4096, 3072, 1927.57, 1927.66, 2038.65, 1547.82)
else:
self.intrinsics = None
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/Python/Advanced/camera_trajectory.py
import numpy as np
import open3d as o3d
if __name__ == "__main__":
print("Testing camera in open3d ...")
intrinsic = o3d.camera.PinholeCameraIntrinsic(
o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)
print(intrinsic.intrinsic_matrix)
print(o3d.camera.PinholeCameraIntrinsic())
x = o3d.camera.PinholeCameraIntrinsic(640, 480, 525, 525, 320, 240)
print(x)
print(x.intrinsic_matrix)
o3d.io.write_pinhole_camera_intrinsic("test.json", x)
y = o3d.io.read_pinhole_camera_intrinsic("test.json")
print(y)
print(np.asarray(y.intrinsic_matrix))
print("Read a trajectory and combine all the RGB-D images.")
pcds = []
trajectory = o3d.io.read_pinhole_camera_trajectory(
"../../TestData/RGBD/trajectory.log")
o3d.io.write_pinhole_camera_trajectory("test.json", trajectory)
print(trajectory)
print(trajectory.parameters[0].extrinsic)
print(np.asarray(trajectory.parameters[0].extrinsic))
for i in range(5):