How to use the open3d.utility function in open3d

To help you get started, we’ve selected a few open3d examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Ryan-Zirui-Wang / farthest-point-sampling / main_group.py View on Github external
fps_v0.fit()
    print("FPS sampling finished.")

    labels = fps_v0.group(radius)
    print("FPS grouping finished.")

    pcd_obj = o3d.geometry.PointCloud()
    pcd_obj.points = o3d.utility.Vector3dVector(pcd_xyz)

    pcd_color = np.zeros_like(pcd_xyz)
    for i, l in enumerate(labels):
        color = colormap[l]
        pcd_color[i] = color

    pcd_obj.colors = o3d.utility.Vector3dVector(pcd_color)

    return pcd_obj
github Jiang-Muyun / Open3D-Semantic-KITTI-Vis / src / kitti_base.py View on Github external
def extract_points(self,voxel_size = 0.01):
        # filter in range points based on fov, x,y,z range setting
        combined = self.points_basic_filter(self.points)
        points = self.points[combined]
        label = self.sem_label[combined]

        pcd = open3d.geometry.PointCloud()
        pcd.points = open3d.utility.Vector3dVector(points[:,:3])

        # approximate_class must be set to true
        # see this issue for more info https://github.com/intel-isl/Open3D/issues/1085
        pcd, trace = pcd.voxel_down_sample_and_trace(voxel_size,self.min_bound,self.max_bound,approximate_class=True)
        to_index_org = np.max(trace, 1)

        pts = points[to_index_org]
        sem_label = label[to_index_org]
        self.pts = pts
        colors = np.array([self.sem_color_map[x] for x in sem_label])
        pcd.colors = open3d.utility.Vector3dVector(colors/255.0)

        return pcd,sem_label
github intel-isl / Open3D / examples / Python / Advanced / surface_reconstruction_ball_pivoting.py View on Github external
pcd = gt_mesh.sample_points_poisson_disk(2000)
    radii = [0.005, 0.01, 0.02, 0.04]
    yield pcd, radii

    gt_mesh = meshes.armadillo()
    gt_mesh.compute_vertex_normals()
    pcd = gt_mesh.sample_points_poisson_disk(2000)
    radii = [5, 10]
    yield pcd, radii


if __name__ == "__main__":
    for pcd, radii in problem_generator():
        o3d.visualization.draw_geometries([pcd])
        rec_mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting(
            pcd, o3d.utility.DoubleVector(radii))
        o3d.visualization.draw_geometries([pcd, rec_mesh])
github bertjiazheng / Structured3D / visualize_3d.py View on Github external
# extract cuboid lines
    cuboid_lines = []
    for cuboid in annos['cuboids']:
        for planeID in cuboid['planeID']:
            cuboid_lineID = np.where(np.array(annos['planeLineMatrix'][planeID]))[0].tolist()
            cuboid_lines.extend(cuboid_lineID)
    cuboid_lines = np.unique(cuboid_lines)
    cuboid_lines = np.setdiff1d(cuboid_lines, lines_holes)

    # visualize junctions
    connected_junctions = junctions[np.unique(junction_pairs)]
    connected_colors = np.repeat(colormap[0].reshape(1, 3), len(connected_junctions), axis=0)

    junction_set = open3d.geometry.PointCloud()
    junction_set.points = open3d.utility.Vector3dVector(connected_junctions)
    junction_set.colors = open3d.utility.Vector3dVector(connected_colors)

    # visualize line segments
    line_colors = np.repeat(colormap[5].reshape(1, 3), len(junction_pairs), axis=0)

    # color holes
    if len(lines_holes) != 0:
        line_colors[lines_holes] = colormap[6]

    # color cuboids
    if len(cuboid_lines) != 0:
        line_colors[cuboid_lines] = colormap[2]

    line_set = open3d.geometry.LineSet()
    line_set.points = open3d.utility.Vector3dVector(junctions)
    line_set.lines = open3d.utility.Vector2iVector(junction_pairs)
github agisoft-llc / metashape-scripts / src / align_model_to_model.py View on Github external
def to_point_cloud(vs):
    pc = o3d.geometry.PointCloud()
    pc.points = o3d.utility.Vector3dVector(vs.copy())
    return pc
github AoLyu / 3D-Object-Reconstruction-with-RealSense-D435 / ObjectRecognitionUsingPointNet / client.py View on Github external
choose = np.array(range(len(obj_pt)))
                    choose = np.pad(choose,(0,npoints-len(choose)),'wrap')
                point_set = obj_pt[choose,:]
                # print(point_set.shape)
                str_encode = point_set.tostring()
                length = str.encode(str(len(str_encode)).ljust(16))
                s.send(length)
                s.send(str_encode)
                label_index = recvall(s,2)
                print('the object is',obj_list[int(label_index)])
            
            if len(obj_pt2) >0:
                print(len(obj_pt2))
                obj_pcd.paint_uniform_color(color_list[int(label_index)+1])

            pcd.points = o3d.utility.Vector3dVector(np.array(Pt))
            pcd.colors = o3d.utility.Vector3dVector(np.array(colors))

        else:                
            depth = o3d.geometry.Image(depth_image)
            color = o3d.geometry.Image(color_image)
            rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(color, depth, convert_rgb_to_intensity = False)
            pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd, pinhole_camera_intrinsic)

             
        # print('obj_num:',obj_num)

        depth_color_frame = rs.colorizer().colorize(depth_frame)
        depth_color_image = np.asanyarray(depth_color_frame.get_data())

        cv2.imshow('Color Stream', color_image1)
        cv2.imshow('Depth Stream', depth_color_image )
github chrischoy / FCGF / lib / data_loaders.py View on Github external
xyz0 = self.apply_transform(xyz0, T0)
      xyz1 = self.apply_transform(xyz1, T1)
    else:
      trans = np.identity(4)

    # Voxelization
    sel0 = ME.utils.sparse_quantize(xyz0 / self.voxel_size, return_index=True)
    sel1 = ME.utils.sparse_quantize(xyz1 / self.voxel_size, return_index=True)

    # Make point clouds using voxelized points
    pcd0 = make_open3d_point_cloud(xyz0)
    pcd1 = make_open3d_point_cloud(xyz1)

    # Select features and points using the returned voxelized indices
    pcd0.colors = o3d.utility.Vector3dVector(color0[sel0])
    pcd1.colors = o3d.utility.Vector3dVector(color1[sel1])
    pcd0.points = o3d.utility.Vector3dVector(np.array(pcd0.points)[sel0])
    pcd1.points = o3d.utility.Vector3dVector(np.array(pcd1.points)[sel1])
    # Get matches
    matches = get_matching_indices(pcd0, pcd1, trans, matching_search_voxel_size)

    # Get features
    npts0 = len(pcd0.colors)
    npts1 = len(pcd1.colors)

    feats_train0, feats_train1 = [], []

    feats_train0.append(np.ones((npts0, 1)))
    feats_train1.append(np.ones((npts1, 1)))

    feats0 = np.hstack(feats_train0)
    feats1 = np.hstack(feats_train1)
github bertjiazheng / Structured3D / visualize_3d.py View on Github external
lineIDs = np.where(np.array(annos['planeLineMatrix'][planeID]))[0].tolist()
            junction_pairs = [np.where(np.array(annos['lineJunctionMatrix'][lineID]))[0].tolist() for lineID in lineIDs]
            polygon = convert_lines_to_vertices(junction_pairs)
            vertices, faces = clip_polygon(polygon, vertices_holes, junctions, plane_anno)
            polygons.append([vertices, faces, planeID, plane_anno['normal'], plane_anno['type'], semantic['type']])

    plane_set = []
    for i, (vertices, faces, planeID, normal, plane_type, semantic_type) in enumerate(polygons):
        # ignore the room ceiling
        if plane_type == 'ceiling' and semantic_type not in ['door', 'window']:
            continue

        plane_vis = open3d.geometry.TriangleMesh()

        plane_vis.vertices = open3d.utility.Vector3dVector(vertices)
        plane_vis.triangles = open3d.utility.Vector3iVector(faces)

        if args.color == 'normal':
            if np.dot(normal, [1, 0, 0]) > eps:
                plane_vis.paint_uniform_color(colormap[0])
            elif np.dot(normal, [-1, 0, 0]) > eps:
                plane_vis.paint_uniform_color(colormap[1])
            elif np.dot(normal, [0, 1, 0]) > eps:
                plane_vis.paint_uniform_color(colormap[2])
            elif np.dot(normal, [0, -1, 0]) > eps:
                plane_vis.paint_uniform_color(colormap[3])
            elif np.dot(normal, [0, 0, 1]) > eps:
                plane_vis.paint_uniform_color(colormap[4])
            elif np.dot(normal, [0, 0, -1]) > eps:
                plane_vis.paint_uniform_color(colormap[5])
            else:
                plane_vis.paint_uniform_color(colormap[6])