How to use the open3d.visualization function in open3d

To help you get started, we’ve selected a few open3d examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github gdlg / simple-waymo-open-dataset-reader / examples / visualise_pcl.py View on Github external
if len(sys.argv) != 2:
    print("""Usage: python visual_pcl.py 
Visualise the LIDAR points.""")
    sys.exit(0)

# Open a .tfrecord
filename = sys.argv[1]
datafile = WaymoDataFileReader(filename)

# Generate a table of the offset of all frame records in the file.
table = datafile.get_record_table()

print("There are %d frames in this file." % len(table))

# Initialise the visualiser
vis = o3d.visualization.VisualizerWithKeyCallback()

vis.create_window()
pcd = o3d.geometry.PointCloud()

once = True

datafile_iter = iter(datafile)

def display_next_frame(event=None):
    global once

    frame = next(datafile_iter)

    # Get the top laser information
    laser_name = dataset_pb2.LaserName.TOP
    laser = utils.get(frame.lasers, laser_name)
github intel-isl / Open3D / examples / Python / Advanced / color_map_optimization.py View on Github external
option = o3d.color_map.ColorMapOptimizationOption()
    option.maximum_iteration = 0
    o3d.color_map.color_map_optimization(mesh, rgbd_images, camera, option)
    o3d.visualization.draw_geometries([mesh])
    o3d.io.write_triangle_mesh(
        os.path.join(path, "scene", "color_map_before_optimization.ply"), mesh)

    # Optimize texture and save the mesh as texture_mapped.ply
    # This is implementation of following paper
    # Q.-Y. Zhou and V. Koltun,
    # Color Map Optimization for 3D Reconstruction with Consumer Depth Cameras,
    # SIGGRAPH 2014
    option.maximum_iteration = 300
    option.non_rigid_camera_coordinate = True
    o3d.color_map.color_map_optimization(mesh, rgbd_images, camera, option)
    o3d.visualization.draw_geometries([mesh])
    o3d.io.write_triangle_mesh(
        os.path.join(path, "scene", "color_map_after_optimization.ply"), mesh)
github intel-isl / Open3D / examples / Python / Basic / mesh_properties.py View on Github external
print("#" * 80)
    for name, mesh in mesh_generator(edge_cases=True):
        check_properties(name, mesh)

    # fix triangle orientation
    print("#" * 80)
    print("Fix triangle orientation")
    print("#" * 80)
    for name, mesh in mesh_generator(edge_cases=False):
        mesh.compute_vertex_normals()
        triangles = np.asarray(mesh.triangles)
        rnd_idx = np.random.rand(*triangles.shape).argsort(axis=1)
        rnd_idx[0] = (0, 1, 2)
        triangles = np.take_along_axis(triangles, rnd_idx, axis=1)
        mesh.triangles = o3d.utility.Vector3iVector(triangles)
        o3d.visualization.draw_geometries([mesh])
        sucess = mesh.orient_triangles()
        print("%s orientated: %s" % (name, "yes" if sucess else "no"))
        o3d.visualization.draw_geometries([mesh])

    # intersection tests
    print("#" * 80)
    print("Intersection tests")
    print("#" * 80)
    np.random.seed(30)
    bbox = o3d.geometry.TriangleMesh.create_box(20, 20, 20).translate(
        (-10, -10, -10))
    meshes = [o3d.geometry.TriangleMesh.create_box() for _ in range(20)]
    meshes.append(o3d.geometry.TriangleMesh.create_sphere())
    meshes.append(o3d.geometry.TriangleMesh.create_cone())
    meshes.append(o3d.geometry.TriangleMesh.create_torus())
    dirs = [np.random.uniform(-0.1, 0.1, size=(3,)) for _ in meshes]
github AoLyu / 3D-Object-Reconstruction-with-RealSense-D435 / Realtime_registrition_of_two_cam / Realtime.py View on Github external
else:
                continue
            m2 = int(round(corners2[53-p_2d][1]))
            n2 = int(round(corners2[53-p_2d][0]))
            if cam2_depth_array[m2,n2] > 0:
                x2,y2,z2 = rgbdTools.getPosition(cam2,cam2_depth_array,m2,n2)
            else:
                continue
            cam1_point.append([x1,y1,z1])
            cam2_point.append([x2,y2,z2])                     

    Transformation = registration.rigid_transform_3D(np.asarray(cam1_point),np.asarray(cam2_point))
    print(Transformation)

    geometrie_added = False
    vis = o3d.visualization.Visualizer()
    vis.create_window("Pointcloud")
    pointcloud = o3d.geometry.PointCloud()

    try:
        time_beigin = time.time()
        while True:
            time_start = time.time()
            pointcloud.clear()

            frames1 = pipeline1.wait_for_frames()
            frames2 = pipeline2.wait_for_frames()

            aligned_frames1 = align.process(frames1)
            aligned_frames2 = align.process(frames2)

            color_frame1 = aligned_frames1.get_color_frame()
github intel-isl / Open3D / examples / Python / Basic / mesh_sampling.py View on Github external
def mesh_generator():
    yield meshes.plane()
    yield o3d.geometry.TriangleMesh.create_sphere()
    yield meshes.bunny()
    yield meshes.armadillo()


if __name__ == "__main__":
    plane = meshes.plane()
    o3d.visualization.draw_geometries([plane])

    print('Uniform sampling can yield clusters of points on the surface')
    pcd = plane.sample_points_uniformly(number_of_points=500)
    o3d.visualization.draw_geometries([pcd])

    print(
        'Poisson disk sampling can evenly distributes the points on the surface.'
    )
    print('The method implements sample elimination.')
    print('Therefore, the method starts with a sampled point cloud and removes '
          'point to satisfy the sampling criterion.')
    print('The method supports two options to provide the initial point cloud')
    print('1) Default via the parameter init_factor: The method first samples '
          'uniformly a point cloud from the mesh with '
          'init_factor x number_of_points and uses this for the elimination')
    pcd = plane.sample_points_poisson_disk(number_of_points=500, init_factor=5)
    o3d.visualization.draw_geometries([pcd])

    print(
        '2) one can provide an own point cloud and pass it to the '
github intel-isl / Open3D / examples / Python / Basic / visualization.py View on Github external
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details

# examples/Python/Basic/visualization.py

import numpy as np
import open3d as o3d

if __name__ == "__main__":

    print("Load a ply point cloud, print it, and render it")
    pcd = o3d.io.read_point_cloud("../../TestData/fragment.ply")
    o3d.visualization.draw_geometries([pcd])

    print("Let's draw some primitives")
    mesh_box = o3d.geometry.TriangleMesh.create_box(width=1.0,
                                                    height=1.0,
                                                    depth=1.0)
    mesh_box.compute_vertex_normals()
    mesh_box.paint_uniform_color([0.9, 0.1, 0.1])
    mesh_sphere = o3d.geometry.TriangleMesh.create_sphere(radius=1.0)
    mesh_sphere.compute_vertex_normals()
    mesh_sphere.paint_uniform_color([0.1, 0.1, 0.7])
    mesh_cylinder = o3d.geometry.TriangleMesh.create_cylinder(radius=0.3,
                                                              height=4.0)
    mesh_cylinder.compute_vertex_normals()
    mesh_cylinder.paint_uniform_color([0.1, 0.9, 0.1])
    mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
        size=0.6, origin=[-2, -2, -2])
github intel-isl / Open3D / examples / Python / ReconstructionSystem / integrate_scene.py View on Github external
config['n_frames_per_fragment'] + frame_id
            print(
                "Fragment %03d / %03d :: integrate rgbd frame %d (%d of %d)." %
                (fragment_id, n_fragments - 1, frame_id_abs, frame_id + 1,
                 len(pose_graph_rgbd.nodes)))
            rgbd = read_rgbd_image(color_files[frame_id_abs],
                                   depth_files[frame_id_abs], False, config)
            pose = np.dot(pose_graph_fragment.nodes[fragment_id].pose,
                          pose_graph_rgbd.nodes[frame_id].pose)
            volume.integrate(rgbd, intrinsic, np.linalg.inv(pose))
            poses.append(pose)

    mesh = volume.extract_triangle_mesh()
    mesh.compute_vertex_normals()
    if config["debug_mode"]:
        o3d.visualization.draw_geometries([mesh])

    mesh_name = join(path_dataset, config["template_global_mesh"])
    o3d.io.write_triangle_mesh(mesh_name, mesh, False, True)

    traj_name = join(path_dataset, config["template_global_traj"])
    write_poses_to_log(traj_name, poses)
github Jiang-Muyun / Open3D-Semantic-KITTI-Vis / src / kitti_base.py View on Github external
def __init__(self,cfg, new_config = False, width = 800, height = 800):
        self.vis = open3d.visualization.VisualizerWithKeyCallback()
        self.vis.create_window(width=width, height=height, left=100)
        self.vis.get_render_option().load_from_json('config/render_option.json')
        self.cfg = cfg

        self.new_config = new_config
        # Modify json file or there will be errors when we change window size
        print('Load config file [%s]'%(self.cfg))
        data = json.load(open(self.cfg,'r'))
        data['intrinsic']['width'] = width
        data['intrinsic']['height'] = height
        data['intrinsic']['intrinsic_matrix'][6] = (width-1)/2
        data['intrinsic']['intrinsic_matrix'][7] = (height-1)/2
        json.dump(data, open(self.cfg,'w'),indent=4)
        self.param = open3d.io.read_pinhole_camera_parameters(self.cfg)

        self.pcd = open3d.geometry.PointCloud()