How to use pyrender - 10 common examples

To help you get started, we’ve selected a few pyrender examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github mmatl / pyrender / tests / unit / test_offscreen.py View on Github external
def test_offscreen_renderer(tmpdir):

    # Fuze trimesh
    fuze_trimesh = trimesh.load('examples/models/fuze.obj')
    fuze_mesh = Mesh.from_trimesh(fuze_trimesh)

    # Drill trimesh
    drill_trimesh = trimesh.load('examples/models/drill.obj')
    drill_mesh = Mesh.from_trimesh(drill_trimesh)
    drill_pose = np.eye(4)
    drill_pose[0,3] = 0.1
    drill_pose[2,3] = -np.min(drill_trimesh.vertices[:,2])

    # Wood trimesh
    wood_trimesh = trimesh.load('examples/models/wood.obj')
    wood_mesh = Mesh.from_trimesh(wood_trimesh)

    # Water bottle trimesh
    bottle_gltf = trimesh.load('examples/models/WaterBottle.glb')
    bottle_trimesh = bottle_gltf.geometry[list(bottle_gltf.geometry.keys())[0]]
    bottle_mesh = Mesh.from_trimesh(bottle_trimesh)
    bottle_pose = np.array([
        [1.0, 0.0, 0.0, 0.1],
        [0.0, 0.0, -1.0, -0.16],
        [0.0, 1.0, 0.0, 0.13],
github xuelin-chen / pcl2pcl-gan-pub / pc2pc / tools / render_results / render_test_results.py View on Github external
def render_big_gallery(results_dir, nb=30, pts_colors=[0.5,0.5,0.5], draw_text=False):
    '''
    pts_colors: [0,0,0]
    return np array of a big image
    '''

    cam = PerspectiveCamera(yfov=(YFOV))
    cam_pose = CAM_POSE
    
    point_l = PointLight(color=np.ones(3), intensity=POINT_LIGHT_INTENSITY)
    scene = Scene(bg_color=np.array([1,1,1,0]))

    # cam and light
    _ = scene.add(cam, pose=cam_pose)
    _ = scene.add(point_l, pose=cam_pose)

    input_ply_filenames = get_all_filnames(results_dir, nb)

    r = OffscreenRenderer(viewport_width=640*2, viewport_height=480*2, point_size=POINT_SIZE)
    pc_pose = PC_POSE

    images = []
    for _, input_pf in enumerate(input_ply_filenames):

        input_pc = read_ply_xyz(input_pf)

        colors = np.array(pts_colors)
github xuelin-chen / pcl2pcl-gan-pub / pc2pc / tools / render_results / render_test_results.py View on Github external
return np array of a big image
    '''

    cam = PerspectiveCamera(yfov=(YFOV))
    cam_pose = CAM_POSE
    
    point_l = PointLight(color=np.ones(3), intensity=POINT_LIGHT_INTENSITY)
    scene = Scene(bg_color=np.array([1,1,1,0]))

    # cam and light
    _ = scene.add(cam, pose=cam_pose)
    _ = scene.add(point_l, pose=cam_pose)

    input_ply_filenames = get_all_filnames(results_dir, nb)

    r = OffscreenRenderer(viewport_width=640*2, viewport_height=480*2, point_size=POINT_SIZE)
    pc_pose = PC_POSE

    images = []
    for _, input_pf in enumerate(input_ply_filenames):

        input_pc = read_ply_xyz(input_pf)

        colors = np.array(pts_colors)
        colors = np.tile(colors, (input_pc.shape[0], 1))

        input_pc_node = add_point_cloud_mesh_to_scene(input_pc, scene, pc_pose, colors)

        renderred_color, _ = r.render(scene)
        
        scene.remove_node(input_pc_node)

github xuelin-chen / pcl2pcl-gan-pub / pc2pc / tools / render_results / render_test_results.py View on Github external
def render_big_gallery(results_dir, nb=30, pts_colors=[0.5,0.5,0.5], draw_text=False):
    '''
    pts_colors: [0,0,0]
    return np array of a big image
    '''

    cam = PerspectiveCamera(yfov=(YFOV))
    cam_pose = CAM_POSE
    
    point_l = PointLight(color=np.ones(3), intensity=POINT_LIGHT_INTENSITY)
    scene = Scene(bg_color=np.array([1,1,1,0]))

    # cam and light
    _ = scene.add(cam, pose=cam_pose)
    _ = scene.add(point_l, pose=cam_pose)

    input_ply_filenames = get_all_filnames(results_dir, nb)

    r = OffscreenRenderer(viewport_width=640*2, viewport_height=480*2, point_size=POINT_SIZE)
    pc_pose = PC_POSE

    images = []
    for _, input_pf in enumerate(input_ply_filenames):
github mmatl / pyrender / tests / unit / test_offscreen.py View on Github external
boxv_trimesh.visual.vertex_colors = boxv_vertex_colors
    boxv_mesh = Mesh.from_trimesh(boxv_trimesh, smooth=False)
    boxf_trimesh = trimesh.creation.box(extents=0.1 * np.ones(3))
    boxf_face_colors = np.random.uniform(size=boxf_trimesh.faces.shape)
    boxf_trimesh.visual.face_colors = boxf_face_colors
    # Instanced
    poses = np.tile(np.eye(4), (2,1,1))
    poses[0,:3,3] = np.array([-0.1, -0.10, 0.05])
    poses[1,:3,3] = np.array([-0.15, -0.10, 0.05])
    boxf_mesh = Mesh.from_trimesh(boxf_trimesh, poses=poses, smooth=False)

    points = trimesh.creation.icosphere(radius=0.05).vertices
    point_colors = np.random.uniform(size=points.shape)
    points_mesh = Mesh.from_points(points, colors=point_colors)

    direc_l = DirectionalLight(color=np.ones(3), intensity=1.0)
    spot_l = SpotLight(color=np.ones(3), intensity=10.0,
                       innerConeAngle=np.pi / 16, outerConeAngle=np.pi / 6)

    cam = PerspectiveCamera(yfov=(np.pi / 3.0))
    cam_pose = np.array([
        [0.0, -np.sqrt(2) / 2, np.sqrt(2) / 2, 0.5],
        [1.0, 0.0, 0.0, 0.0],
        [0.0, np.sqrt(2) / 2, np.sqrt(2) / 2, 0.4],
        [0.0, 0.0, 0.0, 1.0]
    ])

    scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02]))

    fuze_node = Node(mesh=fuze_mesh, translation=np.array([
        0.1, 0.15, -np.min(fuze_trimesh.vertices[:,2])
    ]))
github musyoku / gqn-dataset-renderer / opengl / gif_mnist_dice.py View on Github external
axis_orthogonal = fig.add_subplot(1, 2, 2)
    ims = []

    scene = build_scene(
        floor_textures,
        wall_textures,
        fix_light_position=args.fix_light_position)
    place_dice(
        scene,
        mnist_images,
        discrete_position=args.discrete_position,
        rotate_dice=args.rotate_dice)

    camera_distance = 5
    perspective_camera = PerspectiveCamera(yfov=math.pi / 4)
    perspective_camera_node = Node(
        camera=perspective_camera, translation=np.array([0, 1, 1]))
    orthographic_camera = OrthographicCamera(xmag=3, ymag=3)
    orthographic_camera_node = Node(camera=orthographic_camera)

    rad_step = math.pi / 36
    total_frames = int(math.pi * 2 / rad_step)
    current_rad = 0
    for _ in range(total_frames):
        scene.add_node(perspective_camera_node)

        # Perspective camera
        camera_xz = camera_distance * np.array(
            (math.sin(current_rad), math.cos(current_rad)))
        # Compute yaw and pitch
        camera_direction = np.array([camera_xz[0], 0, camera_xz[1]])
        yaw, pitch = compute_yaw_and_pitch(camera_direction)
github musyoku / gqn-dataset-renderer / opengl / rooms_ring_camera.py View on Github external
def build_scene(floor_textures, wall_textures, fix_light_position=False):
    scene = Scene(
        bg_color=np.array([153 / 255, 226 / 255, 249 / 255]),
        ambient_light=np.array([0.5, 0.5, 0.5, 1.0]))

    floor_trimesh = trimesh.load("{}/floor.obj".format(object_directory))
    mesh = Mesh.from_trimesh(floor_trimesh, smooth=False)
    node = Node(
        mesh=mesh,
        rotation=pyrender.quaternion.from_pitch(-math.pi / 2),
        translation=np.array([0, 0, 0]))
    texture_path = random.choice(floor_textures)
    set_random_texture(node, texture_path)
    scene.add_node(node)

    texture_path = random.choice(wall_textures)

    wall_trimesh = trimesh.load("{}/wall.obj".format(object_directory))
    mesh = Mesh.from_trimesh(wall_trimesh, smooth=False)
    node = Node(mesh=mesh, translation=np.array([0, 1.15, -3.5]))
    set_random_texture(node, texture_path)
    scene.add_node(node)

    mesh = Mesh.from_trimesh(wall_trimesh, smooth=False)
github musyoku / gqn-dataset-renderer / opengl / rooms_free_camera_no_object_rotations.py View on Github external
# Place objects
    directions = [-1.0, 0.0, 1.0]
    available_positions = []
    for z in directions:
        for x in directions:
            available_positions.append((x, z))
    available_positions = np.array(available_positions)
    num_objects = random.choice(range(args.max_num_objects)) + 1
    indices = np.random.choice(
        np.arange(len(available_positions)), replace=False, size=num_objects)
    for xz in available_positions[indices]:
        node = random.choice(objects)()
        node.mesh.primitives[0].color_0 = random.choice(colors)
        if args.discrete_position == False:
            xz += np.random.uniform(-0.25, 0.25, size=xz.shape)
        parent = Node(children=[node], translation=np.array([xz[0], 0, xz[1]]))
        scene.add_node(parent)

    return scene
github musyoku / gqn-dataset-renderer / opengl / gif_rooms.py View on Github external
fix_light_position=args.fix_light_position)
    place_objects(
        scene,
        colors,
        objects,
        min_num_objects=args.num_objects,
        max_num_objects=args.num_objects,
        discrete_position=args.discrete_position,
        rotate_object=args.rotate_object)

    camera_distance = 5
    perspective_camera = PerspectiveCamera(yfov=math.pi / 4)
    perspective_camera_node = Node(
        camera=perspective_camera, translation=np.array([0, 1, 1]))
    orthographic_camera = OrthographicCamera(xmag=3, ymag=3)
    orthographic_camera_node = Node(camera=orthographic_camera)

    rad_step = math.pi / 36
    total_frames = int(math.pi * 2 / rad_step)
    current_rad = 0
    for _ in range(total_frames):
        scene.add_node(perspective_camera_node)

        # Perspective camera
        camera_xz = camera_distance * np.array(
            (math.sin(current_rad), math.cos(current_rad)))
        # Compute yaw and pitch
        camera_direction = np.array([camera_xz[0], 0, camera_xz[1]])
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        perspective_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
github musyoku / gqn-dataset-renderer / opengl / rooms_ring_camera.py View on Github external
mesh=mesh,
        rotation=pyrender.quaternion.from_yaw(math.pi),
        translation=np.array([0, 1.15, 3.5]))
    set_random_texture(node, texture_path)
    scene.add_node(node)

    mesh = Mesh.from_trimesh(wall_trimesh, smooth=False)
    node = Node(
        mesh=mesh,
        rotation=pyrender.quaternion.from_yaw(-math.pi / 2),
        translation=np.array([3.5, 1.15, 0]))
    set_random_texture(node, texture_path)
    scene.add_node(node)

    mesh = Mesh.from_trimesh(wall_trimesh, smooth=False)
    node = Node(
        mesh=mesh,
        rotation=pyrender.quaternion.from_yaw(math.pi / 2),
        translation=np.array([-3.5, 1.15, 0]))
    set_random_texture(node, texture_path)
    scene.add_node(node)

    light = DirectionalLight(color=np.ones(3), intensity=10)
    if fix_light_position == True:
        translation = np.array([1, 1, 1])
    else:
        xz = np.random.uniform(-1, 1, size=2)
        translation = np.array([xz[0], 1, xz[1]])
    yaw, pitch = compute_yaw_and_pitch(translation)
    node = Node(
        light=light,
        rotation=genearte_camera_quaternion(yaw, pitch),