How to use the pyrealsense2.align function in pyrealsense2

To help you get started, weā€™ve selected a few pyrealsense2 examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github AoLyu / 3D-Object-Reconstruction-with-RealSense-D435 / Realtime_registrition_of_two_cam / Realtime.py View on Github external
import sys
import open3d as o3d  
from tools import rgbdTools,registration




if __name__ == '__main__':

    chessBoard_num = 4

    resolution_width = 1280 # pixels
    resolution_height = 720 # pixels
    frame_rate = 15  # fps

    align = rs.align(rs.stream.color)
    rs_config = rs.config()
    rs_config.enable_stream(rs.stream.depth, resolution_width, resolution_height, rs.format.z16, frame_rate)
    rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate)

    connect_device = []
    for d in rs.context().devices:
        if d.get_info(rs.camera_info.name).lower() != 'platform camera':
            connect_device.append(d.get_info(rs.camera_info.serial_number))

    if len(connect_device) < 2:
        print('Registrition needs two camera connected.But got one.')
        exit()
    
    pipeline1 = rs.pipeline()
    rs_config.enable_device(connect_device[0])
    pipeline_profile1 = pipeline1.start(rs_config)
github kevinzakka / walle / walle / cameras / realsense.py View on Github external
self._width = RealSenseD415.WIDTH_H
            self._fps = RealSenseD415.FPS_H
        self._resolution = (self._width, self._height)

        # pipeline and config
        self._pipe = rs.pipeline()
        self._cfg = rs.config()

        # post-processing
        self._spatial_filter = rs.spatial_filter()
        self._hole_filling = rs.hole_filling_filter()
        self._temporal_filter = rs.temporal_filter()

        # misc
        self._colorizer = rs.colorizer()
        self._align = rs.align(rs.stream.color)
github AoLyu / 3D-Object-Reconstruction-with-RealSense-D435 / ObjectRecognitionUsingPointNet / client.py View on Github external
npoints = 1024
    obj_list = list(range(8))
    obj_list = ['box','polar bear','duck','turtle','whale','dog','elephant','horse']
    obj_list = ['ē›’子','åŒ—ęžē†Š','小黄éø­','å°ęµ·é¾Ÿ','ē‹¬č§’é²ø','ē°ē‹—','å¤§č±”','马']
    color_list = [[96/255,96/255,96/255],[1,97/255,0],[227/255,207/255,87/255],[176/255,224/255,230/255],
                [106/255,90/255,205/255],[56/255,94/255,15/255],[61/255,89/255,171/255],[51/255,161/255,201/255],
                [178/255,34/255,34/255],[138/255,43/255,226/255]]
    


    s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
    s.connect(('titanxp.sure-to.win',8899))
    print(s.recv(1024).decode('utf-8'))

    align = rs.align(rs.stream.color)
    #align = rs.align(rs.stream.depth)

    config = rs.config()
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 15)
    pipeline = rs.pipeline()
    profile = pipeline.start(config)

    # get camera intrinsics
    intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
    # print(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
    pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
    # print(type(pinhole_camera_intrinsic))
    
    cv2.namedWindow('Color Stream', cv2.WINDOW_AUTOSIZE)
    cv2.namedWindow('Depth Stream', cv2.WINDOW_AUTOSIZE)
github AoLyu / 3D-Object-Reconstruction-with-RealSense-D435 / Basic / recordBag.py View on Github external
if backgroundColorFlag:
        opt.background_color = np.asarray([0, 0, 0])
        backgroundColorFlag = 0
    else:
        opt.background_color = np.asarray([1, 1, 1])
        backgroundColorFlag = 1
    # background_color ~=backgroundColorFlag
    return False

if __name__=="__main__":

    bag_name = time.strftime('%Y-%m-%d_%H:%M:%S',time.localtime(time.time()))
    
    output_path = bag_name + '.bag'

    align = rs.align(rs.stream.color)

    config = rs.config()
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)

    config.enable_record_to_file(output_path) 

    pipeline = rs.pipeline()
    profile = pipeline.start(config)

    # get camera intrinsics
    intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
    print(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
    pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
    # print(type(pinhole_camera_intrinsic))
github bionicdl-sustech / DeepClawBenchmark / driver / sensors / camera / RealsenseController.py View on Github external
self.fps = self._cfg["FRAME_ARGS"]["fps"]
        self.serial_id = self._cfg["DEVICE_CONFIGURATION"]["serial_id"]

        self.points = rs.points()
        self.pipeline = rs.pipeline()
        config = rs.config()
        if self.serial_id != '':
            config.enable_device(serial=self.serial_id)
        config.enable_stream(rs.stream.infrared, 1, 1280, 720, rs.format.y8, self.fps)
        config.enable_stream(rs.stream.infrared, 2, 1280, 720, rs.format.y8, self.fps)
        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, self.fps)
        config.enable_stream(rs.stream.color, self.width, self.height, rs.format.bgr8, self.fps)

        self.profile = self.pipeline.start(config)
        align_to = rs.stream.color
        self.align = rs.align(align_to)
github intel-isl / Open3D / examples / Python / ReconstructionSystem / sensors / realsense_pcd_visualizer.py View on Github external
depth_sensor.set_option(rs.option.visual_preset, Preset.HighAccuracy)

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_scale = depth_sensor.get_depth_scale()

    # We will not display the background of objects more than
    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 3  # 3 meter
    clipping_distance = clipping_distance_in_meters / depth_scale
    # print(depth_scale)

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)

    vis = o3d.visualization.Visualizer()
    vis.create_window()

    pcd = o3d.geometry.PointCloud()
    flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]

    # Streaming loop
    frame_count = 0
    try:
        while True:

            dt0 = datetime.now()

            # Get frameset of color and depth
            frames = pipeline.wait_for_frames()
github AoLyu / 3D-Object-Reconstruction-with-RealSense-D435 / MutiView3DReconstruction(paper) / interfaceVersion.py View on Github external
import _init_paths
import pyrealsense2 as rs 
from lib import rgbdTools,keyPoints,registration,template
import cv2
import numpy as np 
from scipy import optimize
import open3d as o3d 
import copy
import sys
import os 
import time


if __name__=="__main__":
    align = rs.align(rs.stream.color)
    #align = rs.align(rs.stream.depth)

    config = rs.config()
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 6)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 6)
    pipeline = rs.pipeline()
    profile = pipeline.start(config)

    # get camera intrinsics
    intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
    # print(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
    pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
    RealSense = rgbdTools.Camera(fx = intr.fx, fy = intr.fy, cx =  intr.ppx, cy = intr.ppy)
    TablePlane = keyPoints.Plane()
    # print(type(pinhole_camera_intrinsic))
github AoLyu / 3D-Object-Reconstruction-with-RealSense-D435 / Python / captureRGBDpt.py View on Github external
import pyrealsense2 as rs
import numpy as np
import cv2
import time
from open3d import *
import os

if __name__=="__main__":
    align = rs.align(rs.stream.color)

    config = rs.config()
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 15)
    pipeline = rs.pipeline()
    profile = pipeline.start(config)

    # get camera intrinsics
    intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
    print(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
    pinhole_camera_intrinsic = PinholeCameraIntrinsic(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
    # print(type(pinhole_camera_intrinsic))
    
    geometrie_added = False
    vis = Visualizer()
    vis.create_window("Pointcloud",640,480)
github nateroblin33 / 3d-bone-tracking-tool / files / speedcam.py View on Github external
# OPTIONAL code for using Default Webcam/Facetime Camera
    # NOTE: If used, comment out the later "vs = color_image" in line ################################################134 (this line number has changed and is no longer correct)
    #vs = cv2.VideoCapture(args["video"])
    #vs = cv2.VideoCapture(-1)
 
    # Allow the camera or video file to warm up
time.sleep(2.0)

# Print start time for log
dt = datetime.now()
print("Time Started: " + str(dt))

# Aligns streams
align_to = rs.stream.color
align = rs.align(align_to)

# Function to normalize vector
def normalize(v):
    norm = np.linalg.norm(v)
    if norm == 0:
        return v
    return v / norm

# Function to calculate Roll
def roll(p1, p2, p3, baseline):
    vecx = [(p1[0]+p2[0])/2 - p3[0], (p1[1]+p2[1])/2 - p3[1], (p1[2]+p2[2])/2 - p3[2]]
    v1 = [p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2]]
    v2 = [p3[0]-p1[0], p3[1]-p1[1], p3[2]-p1[2]]
    X = normalize(vecx)
    vecz = [(v1[1]*v2[2])-(v1[2]*v2[1]), (v1[2]*v2[0])-(v1[0]*v2[2]), (v1[0]*v2[1])-(v1[1]*v2[0])]
    Z = normalize(vecz)