How to use the open3d.registration.registration_icp function in open3d

To help you get started, we’ve selected a few open3d examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github agisoft-llc / metashape-scripts / src / align_model_to_model.py View on Github external
def icp_registration(source, target, voxel_size, transform_init, max_iterations):
    # See http://www.open3d.org/docs/release/tutorial/Basic/icp_registration.html#icp-registration
    threshold = 8.0 * voxel_size
    reg_p2p = o3d.registration.registration_icp(
        source, target, threshold, transform_init,
        o3d.registration.TransformationEstimationPointToPoint(),
        o3d.registration.ICPConvergenceCriteria(max_iteration=max_iterations))
    return reg_p2p
github intel-isl / Open3D / examples / Python / Advanced / colored_pointcloud_registration.py View on Github external
if __name__ == "__main__":

    print("1. Load two point clouds and show initial pose")
    source = o3d.io.read_point_cloud("../../TestData/ColoredICP/frag_115.ply")
    target = o3d.io.read_point_cloud("../../TestData/ColoredICP/frag_116.ply")

    # draw initial alignment
    current_transformation = np.identity(4)
    draw_registration_result_original_color(source, target,
                                            current_transformation)

    # point to plane ICP
    current_transformation = np.identity(4)
    print("2. Point-to-plane ICP registration is applied on original point")
    print("   clouds to refine the alignment. Distance threshold 0.02.")
    result_icp = o3d.registration.registration_icp(
        source, target, 0.02, current_transformation,
        o3d.registration.TransformationEstimationPointToPlane())
    print(result_icp)
    draw_registration_result_original_color(source, target,
                                            result_icp.transformation)

    # colored pointcloud registration
    # This is implementation of following paper
    # J. Park, Q.-Y. Zhou, V. Koltun,
    # Colored Point Cloud Registration Revisited, ICCV 2017
    voxel_radius = [0.04, 0.02, 0.01]
    max_iter = [50, 30, 14]
    current_transformation = np.identity(4)
    print("3. Colored point cloud registration")
    for scale in range(3):
        iter = max_iter[scale]
github intel-isl / Open3D / examples / Python / ReconstructionSystem / refine_registration.py View on Github external
result_icp = o3d.registration.registration_icp(
                source_down, target_down, distance_threshold,
                current_transformation,
                o3d.registration.TransformationEstimationPointToPoint(),
                o3d.registration.ICPConvergenceCriteria(max_iteration=iter))
        else:
            source_down.estimate_normals(
                o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size[scale] *
                                                     2.0,
                                                     max_nn=30))
            target_down.estimate_normals(
                o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size[scale] *
                                                     2.0,
                                                     max_nn=30))
            if config["icp_method"] == "point_to_plane":
                result_icp = o3d.registration.registration_icp(
                    source_down, target_down, distance_threshold,
                    current_transformation,
                    o3d.registration.TransformationEstimationPointToPlane(),
                    o3d.registration.ICPConvergenceCriteria(max_iteration=iter))
            if config["icp_method"] == "color":
                result_icp = o3d.registration.registration_colored_icp(
                    source_down, target_down, voxel_size[scale],
                    current_transformation,
                    o3d.registration.ICPConvergenceCriteria(
                        relative_fitness=1e-6,
                        relative_rmse=1e-6,
                        max_iteration=iter))
        current_transformation = result_icp.transformation
        if i == len(max_iter) - 1:
            information_matrix = o3d.registration.get_information_matrix_from_point_clouds(
                source_down, target_down, voxel_size[scale] * 1.4,
github intel-isl / Open3D / examples / Python / ReconstructionSystem / refine_registration.py View on Github external
def multiscale_icp(source,
                   target,
                   voxel_size,
                   max_iter,
                   config,
                   init_transformation=np.identity(4)):
    current_transformation = init_transformation
    for i, scale in enumerate(range(len(max_iter))):  # multi-scale approach
        iter = max_iter[scale]
        distance_threshold = config["voxel_size"] * 1.4
        print("voxel_size %f" % voxel_size[scale])
        source_down = source.voxel_down_sample(voxel_size[scale])
        target_down = target.voxel_down_sample(voxel_size[scale])
        if config["icp_method"] == "point_to_point":
            result_icp = o3d.registration.registration_icp(
                source_down, target_down, distance_threshold,
                current_transformation,
                o3d.registration.TransformationEstimationPointToPoint(),
                o3d.registration.ICPConvergenceCriteria(max_iteration=iter))
        else:
            source_down.estimate_normals(
                o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size[scale] *
                                                     2.0,
                                                     max_nn=30))
            target_down.estimate_normals(
                o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size[scale] *
                                                     2.0,
                                                     max_nn=30))
            if config["icp_method"] == "point_to_plane":
                result_icp = o3d.registration.registration_icp(
                    source_down, target_down, distance_threshold,
github intel-isl / Open3D / examples / Python / Advanced / interactive_visualization.py View on Github external
assert (len(picked_id_source) >= 3 and len(picked_id_target) >= 3)
    assert (len(picked_id_source) == len(picked_id_target))
    corr = np.zeros((len(picked_id_source), 2))
    corr[:, 0] = picked_id_source
    corr[:, 1] = picked_id_target

    # estimate rough transformation using correspondences
    print("Compute a rough transform using the correspondences given by user")
    p2p = o3d.registration.TransformationEstimationPointToPoint()
    trans_init = p2p.compute_transformation(source, target,
                                            o3d.utility.Vector2iVector(corr))

    # point-to-point ICP for refinement
    print("Perform point-to-point ICP refinement")
    threshold = 0.03  # 3cm distance threshold
    reg_p2p = o3d.registration.registration_icp(
        source, target, threshold, trans_init,
        o3d.registration.TransformationEstimationPointToPoint())
    draw_registration_result(source, target, reg_p2p.transformation)
    print("")
github intel-isl / Open3D / examples / Python / Basic / icp_registration.py View on Github external
if __name__ == "__main__":
    source = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd")
    target = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_1.pcd")
    threshold = 0.02
    trans_init = np.asarray([[0.862, 0.011, -0.507, 0.5],
                             [-0.139, 0.967, -0.215, 0.7],
                             [0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]])
    draw_registration_result(source, target, trans_init)
    print("Initial alignment")
    evaluation = o3d.registration.evaluate_registration(source, target,
                                                        threshold, trans_init)
    print(evaluation)

    print("Apply point-to-point ICP")
    reg_p2p = o3d.registration.registration_icp(
        source, target, threshold, trans_init,
        o3d.registration.TransformationEstimationPointToPoint())
    print(reg_p2p)
    print("Transformation is:")
    print(reg_p2p.transformation)
    print("")
    draw_registration_result(source, target, reg_p2p.transformation)

    print("Apply point-to-plane ICP")
    reg_p2l = o3d.registration.registration_icp(
        source, target, threshold, trans_init,
        o3d.registration.TransformationEstimationPointToPlane())
    print(reg_p2l)
    print("Transformation is:")
    print(reg_p2l.transformation)
    print("")
github bing-jian / gmmreg / expts / lounge_expts.py View on Github external
pcloud_transformed.points = Vector3dVector(transformed)
    pcloud_transformed.paint_uniform_color([0, 0, 1]) # blue
    if visualize:
        draw_geometries([pcloud_transformed, down_model, down_scene])
        matrix = np.loadtxt(os.path.join(TMP_PATH, 'final_rigid_matrix.txt'))
        transformed = np.dot(model, matrix[:3,:3].T) + matrix[:3, 3].T
        pcloud_transformed.points = Vector3dVector(transformed)
        pcloud_transformed.paint_uniform_color([0, 0, 1]) # blue
        draw_geometries([pcloud_transformed, pcloud_scene])

    if icp_refine:
        print("Apply point-to-point ICP")
        threshold = 0.02
        trans_init = matrix
        t1 = time.time()
        reg_p2p = registration_icp(
                down_model, down_scene, threshold, trans_init,
                TransformationEstimationPointToPoint())
        t2 = time.time()
        print("ICP Run time : %s seconds" % (t2 - t1))
        print(reg_p2p)
        print("Transformation by ICP is:")
        print(reg_p2p.transformation)
        print("")
        R = np.dot(gt[:3,:3].T, reg_p2p.transformation[:3,:3])
        theta = np.arccos((np.trace(R) - 1) * 0.5)
        print("pose difference (in degrees) after icp-refinement:", theta * 180 / np.pi)
        if visualize:
            draw_registration_result(pcloud_model, pcloud_scene, reg_p2p.transformation)
    return res, theta_before * 180 / np.pi, theta_after * 180 / np.pi