How to use the open3d.PointCloud function in open3d

To help you get started, we’ve selected a few open3d examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github F2Wang / ObjectDatasetTools / register_segmented.py View on Github external
aruco_center = get_aruco_center(cad,depth)
        # remove plane and anything underneath the plane from the pointcloud
        sol = findplane(cad,depth)
        distance = point_to_plane(depth,sol)
        sol = fitplane(sol,depth[(distance > -0.01) & (distance < 0.01)])
        # record the plane equation on the first frame for point projections
        if Filename == 0:
             plane_equation = sol
        distance = point_to_plane(depth,sol)
        mask[distance < 0.002] = 0

        # use statistical outlier remover to remove isolated noise from the scene
        distance2center = np.linalg.norm(depth - aruco_center, axis=2)
        mask[distance2center > MAX_RADIUS] = 0
        source = open3d.PointCloud()
        source.points = open3d.Vector3dVector(depth[mask>0])
        source.colors = open3d.Vector3dVector(cad[mask>0])

        cl,ind = open3d.statistical_outlier_removal(source,nb_neighbors=500, std_ratio=0.5)

        if downsample == True:
            pcd_down = open3d.voxel_down_sample(cl, voxel_size = voxel_size)
            open3d.estimate_normals(pcd_down, KDTreeSearchParamHybrid(radius = 0.002 * 2, max_nn = 30))
            pcds.append(pcd_down)
        else:
            pcds.append(cl)
    return pcds
github xiangruhuang / Learning2Sync / src / data_processing / process_redwood.py View on Github external
def draw(vertex):
    import open3d
    pcd = open3d.PointCloud()
    pcd.points = open3d.Vector3dVector(vertex.T)
    open3d.draw_geometries([pcd])
github intel-isl / Open3D-PointNet2-Semantic3D / kitti_visualize.py View on Github external
import time
import argparse

if __name__ == "__main__":
    # Parser
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--kitti_root", default="", help="Checkpoint file", required=True
    )
    flags = parser.parse_args()

    basedir = flags.kitti_root
    date = "2011_09_26"
    drive = "0001"

    pcd = open3d.PointCloud()
    vis = open3d.Visualizer()
    vis.create_window()
    vis.add_geometry(pcd)

    render_option = vis.get_render_option()
    render_option.point_size = 0.01

    data = pykitti.raw(basedir, date, drive)
    to_reset_view_point = True
    for points_with_intensity in data.velo:
        points = points_with_intensity[:, :3]
        pcd.points = open3d.Vector3dVector(points)

        vis.update_geometry()
        if to_reset_view_point:
            vis.reset_view_point(True)
github xiangruhuang / Learning2Sync / src / data_processing / process_redwood.py View on Github external
depth_paths, T, pose_paths = getData(args.shapeid)
    n = len(depth_paths)
    print('found %d clean depth images...' % n)
    intrinsic = np.array([[525.0,0,319.5],[0,525.0,239.5],[0,0,1]])
    np.random.seed(816)
    indices = np.random.permutation(n)
    print(indices[:100])
    #indices = sorted(indices)
    make_dirs(PATH_MAT.format(args.shapeid, 0))
    import open3d
    pcd_combined = open3d.PointCloud()
    for i, idx in enumerate(indices):
        import ipdb; ipdb.set_trace()
        print('%d / %d' % (i, len(indices)))
        mesh = Mesh.read(depth_paths[idx], mode='depth', intrinsic = intrinsic)
        pcd = open3d.PointCloud()
        pcd.points = open3d.Vector3dVector(mesh.vertex.T)
        pcd.transform(inverse(T[idx]))
        #pcd = open3d.voxel_down_sample(pcd, voxel_size=0.02)
        pcd_combined += pcd
        pcd_combined = open3d.voxel_down_sample(pcd_combined, voxel_size=0.02)
        sio.savemat(PATH_MAT.format(args.shapeid, i), mdict={
            'vertex': mesh.vertex, 
            'validIdx_rowmajor': mesh.validIdx, 
            'pose': T[idx], 
            'depth_path': depth_paths[idx], 
            'pose_path': pose_paths[idx]})
        if i <= 50 and i >= 40:
            pcd_combined_down = open3d.voxel_down_sample(pcd_combined, voxel_size=0.02)
            open3d.draw_geometries([pcd_combined_down])
github intel-isl / Open3D-PointNet2-Semantic3D / dataset / kitti_dataset.py View on Github external
def __init__(self, points, box_size_x, box_size_y):
        self.box_size_x = box_size_x
        self.box_size_y = box_size_y

        # Crop the region of interest centered at origin
        # TODO: This is a special treatment, since we only care about the origin now
        min_z = -2
        max_z = 5
        min_x = -self.box_size_x / 2.0
        max_x = -min_x
        min_y = -self.box_size_y / 2.0
        max_y = -min_y
        pcd = open3d.PointCloud()
        pcd.points = open3d.Vector3dVector(points)
        region_pcd = open3d.crop_point_cloud(
            pcd, [min_x, min_y, min_z], [max_x, max_y, max_z]
        )
        self.points = np.asarray(region_pcd.points)

        # Load label. In pure test set, fill with zeros.
        self.labels = np.zeros(len(self.points)).astype(bool)

        # Load colors. If not use_color, fill with zeros.
        self.colors = np.zeros_like(self.points)

        # Sort according to x to speed up computation of boxes and z-boxes
        sort_idx = np.argsort(self.points[:, 0])
        self.points = self.points[sort_idx]
        self.labels = self.labels[sort_idx]
github AoLyu / 3D-Object-Reconstruction-with-RealSense-D435 / MutiView3DReconstruction / MutiViewReconstruction.py View on Github external
path_to_rgbd = './bear/'

RealSense = rgbdTools.Camera(fx = 616.8676, fy = 617.0631, cx = 319.5701, cy = 233.0649)
TablePlane = keyPoints.Plane()
cloud_number = 8
voxel_size =0.0001
Point2_list = []
Point3_list = []
Point4_list = []

for j in range(cloud_number):
    print('Processing No.',j,'view .')
    Point2 = o3d.PointCloud()
    Point3 = o3d.PointCloud()
    Point4 = o3d.PointCloud()
    Color2 = []
    Pt2 = []
    Pt3 = []
    Pt4 = []
    
    img = cv2.imread(path_to_rgbd + 'color/color_{}.png'.format(j))
    rgb = np.array(img)
    depth = cv2.imread(path_to_rgbd + 'depth/depth_{}.png'.format(j),-1)
    depth = np.asarray(depth)
    xl,yl,rl = keyPoints.getCircles(img)

    if j == 0:
        a,b,c,d = keyPoints.calculatePlane(RealSense,depth,xl,yl,rl)
        TablePlane.getParam(a,b,c,d)

    for ind,x in enumerate(xl):
github intel-isl / Open3D-PointNet2-Semantic3D / predict.py View on Github external
print(
                "Batch size: {}, time: {}".format(current_batch_size, time.time() - s)
            )

            # Save to collector for file output
            points_raw_collector.extend(points_raw)
            pd_labels_collector.extend(pd_labels)

            # Increment confusion matrix
            cm.increment_from_list(gt_labels.flatten(), pd_labels.flatten())

        # Save sparse point cloud and predicted labels
        file_prefix = os.path.basename(semantic_file_data.file_path_without_ext)

        points_raw_collector = np.array(points_raw_collector)
        pcd = open3d.PointCloud()
        pcd.points = open3d.Vector3dVector(points_raw_collector.reshape((-1, 3)))
        pcd_path = os.path.join(output_dir, file_prefix + ".pcd")
        open3d.write_point_cloud(pcd_path, pcd)
        print("Exported pcd to {}".format(pcd_path))

        pd_labels_collector = np.array(pd_labels_collector).astype(int)
        pd_labels_path = os.path.join(output_dir, file_prefix + ".labels")
        np.savetxt(pd_labels_path, pd_labels_collector.flatten(), fmt="%d")
        print("Exported labels to {}".format(pd_labels_path))

    cm.print_metrics()
github facebookresearch / pyrobot / robots / LoCoBot / locobot_navigation / orb_slam2_ros / scripts / orb_slam2_ros / pcdlib.py View on Github external
type=int)
    args = parser.parse_args()
    rospy.init_node('reconstruct_world',
                    anonymous=True)
    rgb_dir = os.path.join(args.img_dir, 'RGBImgs')
    depth_dir = os.path.join(args.img_dir, 'DepthImgs')
    pcd_processor = PointCloudProcessor(rgb_dir=rgb_dir,
                                        depth_dir=depth_dir,
                                        cfg_filename=args.cfg_filename,
                                        subsample_pixs=args.subsample_pixs,
                                        depth_threshold=(args.depth_min,
                                                         args.depth_max))
    time.sleep(2)
    points, colors = pcd_processor.get_current_pcd()
    if USE_OPEN3D:
        pcd = open3d.PointCloud()
        pcd.points = open3d.Vector3dVector(points)
        pcd.colors = open3d.Vector3dVector(colors / 255.0)
        coord = open3d.create_mesh_coordinate_frame(1, [0, 0, 0])
        open3d.draw_geometries([pcd, coord])
github romaintha / pytorch_pointnet / infer.py View on Github external
points = points.cuda()
    points = points.unsqueeze(dim=0)
    model = model.eval()
    preds, feature_transform = model(points)
    if task == 'segmentation':
        preds = preds.view(-1, num_classes)
    preds = preds.data.max(1)[1]

    points = points.cpu().numpy().squeeze()
    preds = preds.cpu().numpy()

    if task == 'classification':
        print('Detected class: %s' % preds)
        if points.shape[1] == 2:
            points = np.hstack([points, np.zeros((49,1))])
        pcd = open3d.PointCloud()
        pcd.points = open3d.Vector3dVector(points)
        open3d.draw_geometries([pcd])
    elif task == 'segmentation':
        colors = [(random.randrange(256)/255, random.randrange(256)/255, random.randrange(256)/255)
                  for _ in range(num_classes)]
        rgb = [colors[p] for p in preds]
        rgb = np.array(rgb)

        pcd = open3d.PointCloud()
        pcd.points = open3d.Vector3dVector(points)
        pcd.colors = open3d.Vector3dVector(rgb)
        open3d.draw_geometries([pcd])