Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def main():
global capture
global Classifier
global FacenetHelpers
Classifier = Classifier()
FacenetHelpers = FacenetHelpers()
try:
capture = WebcamVideoStream(src=Classifier._configs["Cameras"][0]["URL"]).start()
print("-- CONNECTED TO WEBCAM")
except Exception as e:
print("-- FAILED TO CONNECT TO WEBCAM")
print(str(e))
sys.exit()
try:
server = ThreadedHTTPServer((Classifier._configs["Cameras"][0]["Stream"], Classifier._configs["Cameras"][0]["StreamPort"]), CamHandler)
print("-- Server started on "+Classifier._configs["Cameras"][0]["Stream"]+":"+str(Classifier._configs["Cameras"][0]["StreamPort"]))
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
def main():
global capture
global Classifier
global FacenetHelpers
Classifier = Classifier()
FacenetHelpers = FacenetHelpers()
try:
capture = WebcamVideoStream(src=Classifier._configs["Cameras"][0]["URL"]).start()
print("-- CONNECTED TO WEBCAM")
except Exception as e:
print("-- FAILED TO CONNECT TO WEBCAM")
print(str(e))
sys.exit()
try:
server = ThreadedHTTPServer((Classifier._configs["Cameras"][0]["Stream"], Classifier._configs["Cameras"][0]["StreamPort"]), CamHandler)
print("server started")
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
def run_video(self):
if self.verbose:
print('No image path specified, reading from camera video feed')
camera = WebcamVideoStream(src=self.source).start()
# Set stream size -- TODO: figure out why this isn't working
# camera.stream.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
# camera.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
timeout = 0
if self.tuning:
cv2.namedWindow('Settings')
cv2.resizeWindow('Settings', 700, 350)
cv2.createTrackbar('Lower H', 'Settings', self.settings['lower'][0], 255,
lambda val: self.update_thresh(True, 0, val))
cv2.createTrackbar('Lower S', 'Settings', self.settings['lower'][1], 255,
lambda val: self.update_thresh(True, 1, val))
cv2.createTrackbar('Lower V', 'Settings', self.settings['lower'][2], 255,
def __init__(self, path):
# Export path
self.path = path
self.img_count = 0
if not os.path.exists(self.path):
os.makedirs(self.path)
self.recording_csv = open(self.path + 'annotations.csv', 'a')
# Initialize ROS Subscriber
rospy.Subscriber('cmd', Point, self.callback, queue_size=1)
# Open Camera stream
self.videoStream = WebcamVideoStream().start()
time.sleep(1.0)
#rospy.spin()
def __init__(self, id):
# self.cap = cv2.VideoCapture(id)
self.cap = WebcamVideoStream(src = id).start()
self.cfgfile = "cfg/yolov3.cfg"
# self.cfgfile = 'cfg/yolov3-tiny.cfg'
self.weightsfile = "yolov3.weights"
# self.weightsfile = 'yolov3-tiny.weights'
self.confidence = float(0.6)
self.nms_thesh = float(0.8)
self.num_classes = 80
self.classes = load_classes('data/coco.names')
self.colors = pkl.load(open("pallete", "rb"))
self.model = Darknet(self.cfgfile)
self.CUDA = torch.cuda.is_available()
self.model.load_weights(self.weightsfile)
self.model.net_info["height"] = 160
self.inp_dim = int(self.model.net_info["height"])
self.width = 1280 #640#1280
self.height = 720 #360#720
def run_video(self):
if self.verbose:
print("No image path specified, reading from camera video feed")
camera = WebcamVideoStream(src=self.source).start()
timeout = 0
if self.tuning:
cv2.namedWindow("Settings")
cv2.resizeWindow("Settings", 700, 350)
cv2.createTrackbar("Lower H", "Settings", self.lower[0], 255,
lambda val: self.update_setting(True, 0, val))
cv2.createTrackbar("Lower S", "Settings", self.lower[1], 255,
lambda val: self.update_setting(True, 1, val))
cv2.createTrackbar("Lower V", "Settings", self.lower[2], 255,
lambda val: self.update_setting(True, 2, val))
cv2.createTrackbar("Upper H", "Settings", self.upper[0], 255,
lambda val: self.update_setting(False, 0, val))
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
stream.release()
cv2.destroyAllWindows()
# created a *threaded* video stream, allow the camera sensor to warmup,
# and start the FPS counter
print("[INFO] sampling THREADED frames from webcam...")
vs = WebcamVideoStream(src=0).start()
fps = FPS().start()
# loop over some frames...this time using the threaded stream
while fps._numFrames < args["num_frames"]:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=400)
# check to see if the frame should be displayed to our screen
if args["display"] > 0:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# update the FPS counter
fps.update()
def __init__(self, path):
# Export path
self.path = path
if not os.path.exists(self.path):
os.makedirs(self.path)
self.recording_csv = open(self.path + 'annotations.csv', 'a')
# Initialize ROS Subscriber
rospy.init_node('data_saver', anonymous=True)
rospy.Subscriber('cmd', Point, self.callback)
# Open Camera stream
self.videoStream = WebcamVideoStream().start()
time.sleep(1.0)
rospy.spin()
from imutils.video import WebcamVideoStream
import numpy as np
import argparse
import imutils
import time
import cv2
print("[INFO] starting video thread...")
wvs = WebcamVideoStream().start()
time.sleep(1.0)
# loop over frames from the video file stream
while True:
frame = wvs.read()
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#frame = np.dstack([frame, frame, frame])
cv2.imshow("Frame", frame)
cv2.waitKey(1)
#fps.update()
# do a bit of cleanup
cv2.destroyAllWindows()
wvs.stop()
for i in range(detections.size(1)):
j = 0
while detections[0, i, j, 0] >= 0.6:
pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
cv2.rectangle(frame,
(int(pt[0]), int(pt[1])),
(int(pt[2]), int(pt[3])),
COLORS[i % 3], 2)
cv2.putText(frame, labelmap[i - 1], (int(pt[0]), int(pt[1])),
FONT, 2, (255, 255, 255), 2, cv2.LINE_AA)
j += 1
return frame
# start video stream thread, allow buffer to fill
print("[INFO] starting threaded video stream...")
stream = WebcamVideoStream(src=0).start() # default camera
time.sleep(1.0)
# start fps timer
# loop over frames from the video file stream
while True:
# grab next frame
frame = stream.read()
key = cv2.waitKey(1) & 0xFF
# update FPS counter
fps.update()
frame = predict(frame)
# keybindings for display
if key == ord('p'): # pause
while True:
key2 = cv2.waitKey(1) or 0xff