Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
global detectframecount
global time1
global time2
global cam
global window_name
global depth_scale
global align_to
global align
# Configure depth and color streams
# Or
# Open USB Camera streams
if camera_mode == 0:
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
profile = pipeline.start(config)
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
align_to = rs.stream.color
align = rs.align(align_to)
window_name = "RealSense"
elif camera_mode == 1:
cam = cv2.VideoCapture(0)
if cam.isOpened() != True:
print("USB Camera Open Error!!!")
sys.exit(0)
cam.set(cv2.CAP_PROP_FPS, 30)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
window_name = "USB Camera"
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Configure depth and color streams RealSense D435
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
pipeline.start(config)
except:
import traceback
traceback.print_exc()
# Stop streaming
if pipeline != None:
pipeline.stop()
print("\n\nFinished\n\n")
sys.exit()
def init():
glClearColor(0.7, 0.7, 0.7, 0.7)
from numba import njit
# In order to import cv2 under python3 when you also have ROS Kinetic installed
import os
if os.path.exists("/opt/ros/kinetic/lib/python2.7/dist-packages"):
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
if os.path.exists("~/anaconda3/lib/python3.7/site-packages"):
sys.path.append('~/anaconda3/lib/python3.7/site-packages')
import cv2
######################################################
## Depth parameters - reconfigurable ##
######################################################
# Sensor-specific parameter, for D435: https://www.intelrealsense.com/depth-camera-d435/
STREAM_TYPE = [rs.stream.depth, rs.stream.color] # rs2_stream is a types of data provided by RealSense device
FORMAT = [rs.format.z16, rs.format.bgr8] # rs2_format is identifies how binary data is encoded within a frame
WIDTH = 640 # Defines the number of columns for each frame or zero for auto resolve
HEIGHT = 480 # Defines the number of lines for each frame or zero for auto resolve
FPS = 30 # Defines the rate of frames per second
DEPTH_RANGE = [0.1, 8.0] # Replace with your sensor's specifics, in meter
USE_PRESET_FILE = True
PRESET_FILE = "../cfg/d4xx-default.json"
# List of filters to be applied, in this order.
# https://github.com/IntelRealSense/librealsense/blob/master/doc/post-processing-filters.md
filters = [
[True, "Decimation Filter", rs.decimation_filter()],
[True, "Threshold Filter", rs.threshold_filter()],
[True, "Depth to Disparity", rs.disparity_transform(True)],
[True, "Spatial Filter", rs.spatial_filter()],
def _config_pipe(self):
"""Configures pipeline to stream color and depth.
"""
self._cfg.enable_device(self._serial)
# configure color stream
self._cfg.enable_stream(
rs.stream.color, self._width, self._height, rs.format.rgb8, self._fps,
)
# configure depth stream
self._cfg.enable_stream(
rs.stream.depth, self._width, self._height, rs.format.z16, self._fps,
)
import pyrealsense2 as rs
import numpy as np
import cv2
import time
from open3d import *
import os
if __name__=="__main__":
align = rs.align(rs.stream.color)
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)
config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 15)
pipeline = rs.pipeline()
profile = pipeline.start(config)
# get camera intrinsics
intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
print(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
pinhole_camera_intrinsic = PinholeCameraIntrinsic(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
# print(type(pinhole_camera_intrinsic))
geometrie_added = False
vis = Visualizer()
vis.create_window("Pointcloud",640,480)
pointcloud = PointCloud()
i = 0
def rotation(self):
Rx, _ = cv2.Rodrigues((self.pitch, 0, 0))
Ry, _ = cv2.Rodrigues((0, self.yaw, 0))
return np.dot(Ry, Rx).astype(np.float32)
@property
def pivot(self):
return self.translation + np.array((0, 0, self.distance), dtype=np.float32)
state = AppState()
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
pipeline.start(config)
# Get stream profile and camera intrinsics
profile = pipeline.get_active_profile()
depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
depth_intrinsics = depth_profile.get_intrinsics()
w, h = depth_intrinsics.width, depth_intrinsics.height
# Processing blocks
pc = rs.pointcloud()
decimate = rs.decimation_filter()
decimate.set_option(rs.option.filter_magnitude, 2 ** state.decimate)
colorizer = rs.colorizer()