Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
## License: Apache 2.0. See LICENSE file in root directory.
## Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved.
###############################################
## Open CV and Numpy integration ##
###############################################
import pyrealsense2 as rs
import numpy as np
import cv2
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
pipeline.start(config)
try:
while True:
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue
# Convert images to numpy arrays
out = o3d.camera.PinholeCameraIntrinsic(640, 480, intrinsics.fx,
intrinsics.fy, intrinsics.ppx,
intrinsics.ppy)
return out
if __name__ == "__main__":
# Create a pipeline
pipeline = rs.pipeline()
#Create a config and configure the pipeline to stream
# different resolutions of color and depth streams
config = rs.config()
config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
# Start streaming
profile = pipeline.start(config)
depth_sensor = profile.get_device().first_depth_sensor()
# Using preset HighAccuracy for recording
depth_sensor.set_option(rs.option.visual_preset, Preset.HighAccuracy)
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_scale = depth_sensor.get_depth_scale()
# We will not display the background of objects more than
# clipping_distance_in_meters meters away
clipping_distance_in_meters = 3 # 3 meter
clipping_distance = clipping_distance_in_meters / depth_scale
else:
opt.background_color = np.asarray([1, 1, 1])
backgroundColorFlag = 1
# background_color ~=backgroundColorFlag
return False
key_to_callback={}
key_to_callback[ord(" ")] = saveCurrentRGBD
key_to_callback[ord("Q")] = breakLoop
key_to_callback[ord("K")] = change_background_color
if __name__=="__main__":
align = rs.align(rs.stream.color)
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)
config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 15)
pipeline = rs.pipeline()
profile = pipeline.start(config)
# get camera intrinsics
intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
# print(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
# print(type(pinhole_camera_intrinsic))
geometrie_added = False
vis = o3d.visualization.VisualizerWithKeyCallback()
vis.create_window("Pointcloud",640,480)
pointcloud = o3d.geometry.PointCloud()
else:
opt.background_color = np.asarray([1, 1, 1])
backgroundColorFlag = 1
# background_color ~=backgroundColorFlag
return False
if __name__=="__main__":
bag_name = time.strftime('%Y-%m-%d_%H:%M:%S',time.localtime(time.time()))
output_path = bag_name + '.bag'
align = rs.align(rs.stream.color)
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
config.enable_record_to_file(output_path)
pipeline = rs.pipeline()
profile = pipeline.start(config)
# get camera intrinsics
intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
print(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
# print(type(pinhole_camera_intrinsic))
cv2.namedWindow('Color Stream', cv2.WINDOW_AUTOSIZE)
cv2.namedWindow('Depth Stream', cv2.WINDOW_AUTOSIZE)
## Brown = N ##
## Gray = A ##
## ##
##############################################
# Construct the argument parse and parse the arguments
# Gives option to use "python mbts.py --video [FILENAME.EXTENSION]" in Terminal
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=4, help="max buffer size")
args = vars(ap.parse_args())
# Sets up streaming settings for Intel RealSense D435
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Starts stream with configured settings (DOES NOT DISPLAY THE STREAM)
profile = pipeline.start(config)
# Define bounds for each color supported in HSV/HSB values
# NOTE: HSV/HSB is NOT the same as HSL
# NOTE: HSV/HSB values typically range from (0,0%,0%) to (360,100%,100%), but they are scaled here to be from (0,0,0) to (180,255,255)
# Conversion from real HSV/HSB to this scale can be done by halving the Hue and then taking the appropriate percentage (%) of 255 for Saturation and Value/Brightness
# BLACK (k)
blackLower = (0, 0, 0)
blackUpper = (180, 255, 49)
# WHITE (i)
whiteLower = (0, 0, 240)
framecount = 0
detectframecount = 0
time1 = 0
time2 = 0
LABELS = ('background',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
pipeline.start(config)
net = cv2.dnn.readNet('lrmodel/MobileNetSSD/MobileNetSSD_deploy.xml', 'lrmodel/MobileNetSSD/MobileNetSSD_deploy.bin')
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
try:
while True:
t1 = time.perf_counter()
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
def init_device():
# Configure depth streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
print 'config'
# Start streaming
profile = pipeline.start(config)
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print "Depth Scale is: " , depth_scale
return pipeline, depth_scale
import numpy as np # fundamental package for scientific computing
import json
# in order to import cv2 under python3 when you also have ROS installed
import os
if os.path.exists("/opt/ros/kinetic/lib/python2.7/dist-packages"):
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
if os.path.exists("~/anaconda3/lib/python3.7/site-packages"):
sys.path.append('~/anaconda3/lib/python3.7/site-packages')
import cv2
######################################################
## These parameters are reconfigurable ##
######################################################
STREAM_TYPE = [rs.stream.depth, rs.stream.color] # rs2_stream is a types of data provided by RealSense device
FORMAT = [rs.format.z16, rs.format.bgr8] # rs2_format is identifies how binary data is encoded within a frame
WIDTH = 848 # Defines the number of columns for each frame or zero for auto resolve
HEIGHT = 480 # Defines the number of lines for each frame or zero for auto resolve
FPS = 30 # Defines the rate of frames per second
DISPLAY_WINDOW_NAME = 'Input/output depth'
OPTION_WINDOW_NAME = 'Filter options'
USE_PRESET_FILE = True
PRESET_FILE = "../cfg/d4xx-default.json"
# List of filters to be applied, in this order.
# Depth Frame (input)
# >> Decimation Filter (reduces depth frame density)
# >> Threshold Filter (removes values outside recommended range)
# >> Depth2Disparity Transform** (transform the scene into disparity domain)
# >> Spatial Filter (edge-preserving spatial smoothing)
# >> Temporal Filter (reduces temporal noise)
######################################################
## Python implementation of rs-depth.c example ##
## https://github.com/IntelRealSense/librealsense/tree/master/examples/C/depth
######################################################
# First import the library
import pyrealsense2 as rs
import sys
######################################################
## These parameters are reconfigurable ##
######################################################
STREAM_TYPE = rs.stream.depth # rs2_stream is a types of data provided by RealSense device
FORMAT = rs.format.z16 # rs2_format is identifies how binary data is encoded within a frame
WIDTH = 640 # Defines the number of columns for each frame or zero for auto resolve
HEIGHT = 480 # Defines the number of lines for each frame or zero for auto resolve
FPS = 30 # Defines the rate of frames per second
HEIGHT_RATIO = 20 # Defines the height ratio between the original frame to the new frame
WIDTH_RATIO = 10 # Defines the width ratio between the original frame to the new frame
MAX_DEPTH = 1 # Approximate the coverage of pixels within this range (meter)
ROW_LENGTH = int(WIDTH / WIDTH_RATIO)
pixels = " .:nhBXWW" # The text-based representation of depth
######################################################
## Main program starts here ##
######################################################
try:
# Create a context object. This object owns the handles to all connected realsense devices
pipeline = rs.pipeline()
config = rs.config()
import pyrealsense2 as rs
import numpy as np
import cv2
from datetime import datetime
from open3d import *
import os
if __name__=="__main__":
align = rs.align(rs.stream.color)
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)
config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 15)
pipeline = rs.pipeline()
profile = pipeline.start(config)
# get camera intrinsics
intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
print(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
pinhole_camera_intrinsic = PinholeCameraIntrinsic(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
# print(type(pinhole_camera_intrinsic))
geometrie_added = False
vis = Visualizer()
vis.create_window("Pointcloud",640,480)
pointcloud = PointCloud()
i = 0