Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
m2 = int(round(corners2[53-p_2d][1]))
n2 = int(round(corners2[53-p_2d][0]))
if cam2_depth_array[m2,n2] > 0:
x2,y2,z2 = rgbdTools.getPosition(cam2,cam2_depth_array,m2,n2)
else:
continue
cam1_point.append([x1,y1,z1])
cam2_point.append([x2,y2,z2])
Transformation = registration.rigid_transform_3D(np.asarray(cam1_point),np.asarray(cam2_point))
print(Transformation)
geometrie_added = False
vis = o3d.visualization.Visualizer()
vis.create_window("Pointcloud")
pointcloud = o3d.geometry.PointCloud()
try:
time_beigin = time.time()
while True:
time_start = time.time()
pointcloud.clear()
frames1 = pipeline1.wait_for_frames()
frames2 = pipeline2.wait_for_frames()
aligned_frames1 = align.process(frames1)
aligned_frames2 = align.process(frames2)
color_frame1 = aligned_frames1.get_color_frame()
depth_frame1 = aligned_frames1.get_depth_frame()
soutput = model(sinput)
timer.toc()
print(
f'Time to process a room with {voxel_size}m voxel downsampling '
f'containing {len(sinput)} voxels: {timer.min_time}'
)
# Feed-forward pass and get the prediction
_, pred = soutput.F.max(1)
pred = pred.cpu().numpy()
# Map color
colors = np.array([SCANNET_COLOR_MAP[VALID_CLASS_IDS[l]] for l in pred])
# Create a point cloud file
pred_pcd = o3d.geometry.PointCloud()
coordinates = soutput.C.numpy()[:, :3] # last column is the batch index
pred_pcd.points = o3d.utility.Vector3dVector(coordinates * 0.02)
pred_pcd.colors = o3d.utility.Vector3dVector(colors / 255)
# Move the original point cloud
pcd = o3d.io.read_point_cloud(config.file_name)
pcd.points = o3d.utility.Vector3dVector(np.array(pcd.points) + np.array([0, 5, 0]))
# Visualize the input point cloud and the prediction
o3d.visualization.draw_geometries([pcd, pred_pcd])
def _load_ssv_hc(args):
ssv, feats, feat_labels, pt_type, radius = args
vert_dc = dict()
# TODO: replace by poisson disk sampling
for k in feats:
pcd = o3d.geometry.PointCloud()
verts = ssv.load_mesh(k)[1].reshape(-1, 3)
pcd.points = o3d.utility.Vector3dVector(verts)
pcd = pcd.voxel_down_sample(voxel_size=pts_feat_ds_dict[pt_type][k])
vert_dc[k] = np.asarray(pcd.points)
sample_feats = np.concatenate([[feat_labels[ii]] * len(vert_dc[k])
for ii, k in enumerate(feats)])
sample_pts = np.concatenate([vert_dc[k] for k in feats])
if not ssv.load_skeleton():
raise ValueError(f'Couldnt find skeleton of {ssv}')
nodes, edges = ssv.skeleton['nodes'] * ssv.scaling, ssv.skeleton['edges']
hc = HybridCloud(nodes, edges, vertices=sample_pts, features=sample_feats)
# cache verts2node
_ = hc.verts2node
if radius is not None:
# add edges within radius
kdt = spatial.cKDTree(hc.nodes)
def geometry_generator():
mesh = o3d.geometry.TriangleMesh.create_sphere()
verts = np.asarray(mesh.vertices)
colors = np.random.uniform(0, 1, size=verts.shape)
mesh.vertex_colors = o3d.utility.Vector3dVector(colors)
mesh.compute_vertex_normals()
pcl = o3d.geometry.PointCloud()
pcl.points = mesh.vertices
pcl.colors = mesh.vertex_colors
pcl.normals = mesh.vertex_normals
yield pcl
yield o3d.geometry.LineSet.create_from_triangle_mesh(mesh)
yield mesh
def to_pointcloud(arr):
pc = o3.geometry.PointCloud()
pc.points = o3.utility.Vector3dVector(arr.T)
return pc
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
# Read RGBD images
rgbd_images = []
depth_image_path = get_file_list(os.path.join(path, "depth/"),
extension=".png")
color_image_path = get_file_list(os.path.join(path, "image/"),
extension=".jpg")
assert (len(depth_image_path) == len(color_image_path))
for i in range(len(depth_image_path)):
depth = o3d.io.read_image(os.path.join(depth_image_path[i]))
color = o3d.io.read_image(os.path.join(color_image_path[i]))
rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
color, depth, convert_rgb_to_intensity=False)
if debug_mode:
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
rgbd_image,
o3d.camera.PinholeCameraIntrinsic(
o3d.camera.PinholeCameraIntrinsicParameters.
PrimeSenseDefault))
o3d.visualization.draw_geometries([pcd])
rgbd_images.append(rgbd_image)
# Read camera pose and mesh
camera = o3d.io.read_pinhole_camera_trajectory(
os.path.join(path, "scene/key.log"))
mesh = o3d.io.read_triangle_mesh(
os.path.join(path, "scene", "integrated.ply"))
# Before full optimization, let's just visualize texture map
# with given geometry, RGBD images, and camera poses.
option = o3d.color_map.ColorMapOptimizationOption()
# get camera intrinsics
intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
# print(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
RealSense = rgbdTools.Camera(fx = intr.fx, fy = intr.fy, cx = intr.ppx, cy = intr.ppy)
TablePlane = keyPoints.Plane()
# print(type(pinhole_camera_intrinsic))
cv2.namedWindow('Color Stream', cv2.WINDOW_AUTOSIZE)
# cv2.namedWindow('Depth Stream', cv2.WINDOW_AUTOSIZE)
time_start = time.time()
globalPointcloud = o3d.geometry.PointCloud()
Point2 = o3d.geometry.PointCloud()
Point3 = o3d.geometry.PointCloud()
Point4 = o3d.geometry.PointCloud()
prePoint3 = o3d.geometry.PointCloud()
prePoint4 = o3d.geometry.PointCloud()
temPoint3 = o3d.geometry.PointCloud()
color_l = [(255,0,0),(255,0,255),(0,0,255),(0,255,255),(255,255,0),(96,96,96),(1,97,0),(227,207,87),(176,224,230),
(106,90,205),(56,94,15),(61,89,171),(51,161,201),(178,34,34),(138,43,226)]
pointcloud = o3d.geometry.PointCloud()
vis = o3d.visualization.Visualizer()
vis.create_window("Pointcloud",640,480)
geometrie_added = False
i = 0
print("Read SUN dataset")
color_raw = o3d.io.read_image(
"../../TestData/RGBD/other_formats/SUN_color.jpg")
depth_raw = o3d.io.read_image(
"../../TestData/RGBD/other_formats/SUN_depth.png")
rgbd_image = o3d.geometry.RGBDImage.create_from_sun_format(
color_raw, depth_raw)
print(rgbd_image)
plt.subplot(1, 2, 1)
plt.title('SUN grayscale image')
plt.imshow(rgbd_image.color)
plt.subplot(1, 2, 2)
plt.title('SUN depth image')
plt.imshow(rgbd_image.depth)
plt.show()
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
rgbd_image,
o3d.camera.PinholeCameraIntrinsic(
o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault))
# Flip it, otherwise the pointcloud will be upside down
pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
o3d.visualization.draw_geometries([pcd])
def pointcloud_generator():
pts = np.random.uniform(-30, 30, size=(int(1e5), 3))
pcl = o3d.geometry.PointCloud()
pcl.points = o3d.utility.Vector3dVector(pts)
yield 'uniform', pcl
yield 'moebius', o3d.geometry.TriangleMesh.create_moebius(
).sample_points_uniformly(int(1e5))
yield 'bunny', meshes.bunny().scale(10).sample_points_uniformly(int(1e5))
yield 'fragment', o3d.io.read_point_cloud('../../TestData/fragment.ply')
lines_holes = np.unique(lines_holes)
# extract cuboid lines
cuboid_lines = []
for cuboid in annos['cuboids']:
for planeID in cuboid['planeID']:
cuboid_lineID = np.where(np.array(annos['planeLineMatrix'][planeID]))[0].tolist()
cuboid_lines.extend(cuboid_lineID)
cuboid_lines = np.unique(cuboid_lines)
cuboid_lines = np.setdiff1d(cuboid_lines, lines_holes)
# visualize junctions
connected_junctions = junctions[np.unique(junction_pairs)]
connected_colors = np.repeat(colormap[0].reshape(1, 3), len(connected_junctions), axis=0)
junction_set = open3d.geometry.PointCloud()
junction_set.points = open3d.utility.Vector3dVector(connected_junctions)
junction_set.colors = open3d.utility.Vector3dVector(connected_colors)
# visualize line segments
line_colors = np.repeat(colormap[5].reshape(1, 3), len(junction_pairs), axis=0)
# color holes
if len(lines_holes) != 0:
line_colors[lines_holes] = colormap[6]
# color cuboids
if len(cuboid_lines) != 0:
line_colors[cuboid_lines] = colormap[2]
line_set = open3d.geometry.LineSet()
line_set.points = open3d.utility.Vector3dVector(junctions)