Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
from collections import defaultdict
from imutils.video import FPS
import imagezmq
# instantiate image_hub
image_hub = imagezmq.ImageHub()
image_count = 0
sender_image_counts = defaultdict(int) # dict for counts by sender
first_image = True
try:
while True: # receive images until Ctrl-C is pressed
sent_from, image = image_hub.recv_image()
if first_image:
fps = FPS().start() # start FPS timer after first image is received
first_image = False
fps.update()
image_count += 1 # global count of all images received
sender_image_counts[sent_from] += 1 # count images for each RPi name
cv2.imshow(sent_from, image) # display images 1 window per sent_from
cv2.waitKey(1)
image_hub.send_reply(b"OK") # REP reply
except (KeyboardInterrupt, SystemExit):
pass # Ctrl-C was pressed to end program; FPS stats computed below
except Exception as ex:
print('Python error with no Exception handler:')
print('Traceback error:', ex)
traceback.print_exc()
finally:
# stop the timer and display FPS information
print()
# created a *threaded *video stream, allow the camera sensor to warmup,
# and start the FPS counter
vs = PiVideoStream(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT))
# Camera settings
vs.camera.shutter_speed = SHUTTER
vs.exposure = EXPOSURE
vs.camera.awb_mode = AWB_MODE
vs.camera.awb_gains = AWB_GAINS
# Start camera
vs.start()
time.sleep(2.0)
fps = FPS().start()
# loop over some frames...this time using the threaded stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
# check to see if the frame should be displayed to our screen
if args["display"] > 0:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# update the FPS counter
fps.update()
# Key press
if frame is not None:
break
frame_resize = cv2.resize(frame, None, fx=1 / args.downsample, fy=1 / args.downsample)
img_shape = frame_resize.shape
fast_style = FastStyle(args.checkpoint, img_shape, args.device)
if args.video_out is not None:
fourcc = cv2.VideoWriter_fourcc(*'XVID')
if args.concat:
shp = (int(2*img_shape[1]*args.scale),int(img_shape[0]*args.scale))
else:
shp = (int(img_shape[1]*args.scale),int(img_shape[0]*args.scale))
out = cv2.VideoWriter(args.video_out, fourcc, args.fps, shp)
fps = FPS().start()
count = 0
while(True):
ret, frame = cap.read()
if ret is True:
if args.zoom > 1:
o_h, o_w, _ = frame.shape
frame = cv2.resize(frame, None, fx=args.zoom, fy=args.zoom)
h, w, _ = frame.shape
off_h, off_w = int((h - o_h) / 2), int((w - o_w) / 2)
frame = frame[off_h:h-off_h, off_w:w-off_w, :]
# resize image and detect face
frame_resize = cv2.resize(frame, None, fx=1 / args.downsample, fy=1 / args.downsample)
infer_network.load_model(args.model, args.device, CPU_EXTENSION, num_requests=2)
current_inference, next_inference = 0, 1
# Get a Input blob shape
in_n, in_c, in_h, in_w = infer_network.get_input_shape()
# Get a output blob name
_ = infer_network.get_output_name()
# Handle the input stream
cap = cv2.VideoCapture(args.input)
cap.open(args.input)
_, frame = cap.read()
fps = FPS().start()
# Process frames until the video ends, or process is exited
while cap.isOpened():
# Read the next frame
flag, frame = cap.read()
if not flag:
break
fh = frame.shape[0]
fw = frame.shape[1]
key_pressed = cv2.waitKey(60)
# Pre-process the frame
image_resize = cv2.resize(frame, (in_w, in_h))
image = image_resize.transpose((2,0,1))
image = image.reshape(in_n, in_c, in_h, in_w)
import imutils
import cv2
import timeit
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-n", "--num-frames", type=int, default=100,
help="# of frames to loop over for FPS test")
ap.add_argument("-d", "--display", type=int, default=-1,
help="Whether or not frames should be displayed")
args = vars(ap.parse_args())
# grab a pointer to the video stream and initialize the FPS counter
print("[INFO] sampling frames from webcam...")
stream = cv2.VideoCapture(1)
fps = FPS().start()
tic=timeit.default_timer()
font = cv2.FONT_HERSHEY_SIMPLEX
# loop over some frames
while fps._numFrames < args["num_frames"]:
# grab the frame from the stream and resize it to have a maximum
# width of 400 pixels
(grabbed, frame) = stream.read()
frame = imutils.resize(frame, width=400)
# check to see if the frame should be displayed to our screen
if args["display"] > 0:
toc=timeit.default_timer()
cv2.putText(frame, "%0.3f" % (toc-tic), (50,200), font, 2, (255,255,255),4,cv2.LINE_AA)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
def calculate_fps(self, frames_no=100):
fps = FPS().start()
# Don't wanna display window
if self.debug:
self.debug = not self.debug
for i in range(0, frames_no):
self.where_lane_be()
fps.update()
fps.stop()
# Don't wanna display window
if not self.debug:
self.debug = not self.debug
print('Time taken: {:.2f}'.format(fps.elapsed()))
# load the pre-trained EAST text detector
print("[INFO] loading EAST text detector...")
net = cv2.dnn.readNet(args["east"])
# if a video path was not supplied, grab the reference to the web cam
if not args.get("video", False):
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(1.0)
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
# start the FPS throughput estimator
fps = FPS().start()
# loop over frames from the video stream
while True:
# grab the current frame, then handle if we are using a
# VideoStream or VideoCapture object
frame = vs.read()
frame = frame[1] if args.get("video", False) else frame
# check to see if we have reached the end of the stream
if frame is None:
break
# resize the frame, maintaining the aspect ratio
frame = imutils.resize(frame, width=1000)
orig = frame.copy()
def main():
os.makedirs('original', exist_ok=True)
os.makedirs('landmarks', exist_ok=True)
cap = cv2.VideoCapture(args.filename)
fps = video.FPS().start()
count = 0
while cap.isOpened():
ret, frame = cap.read()
frame_resize = cv2.resize(frame, None, fx=1 / DOWNSAMPLE_RATIO, fy=1 / DOWNSAMPLE_RATIO)
gray = cv2.cvtColor(frame_resize, cv2.COLOR_BGR2GRAY)
faces = detector(gray, 1)
black_image = np.zeros(frame.shape, np.uint8)
t = time.time()
# Perform if there is a face detected
if len(faces) == 1:
for face in faces:
detected_landmarks = predictor(gray, face).parts()
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# initialize the video stream, allow the cammera sensor to warmup,
# and initialize the FPS counter
print("[INFO] starting video stream...")
url = 'd.mp4'
#vs = cv2.VideoCapture('http://d3tj01z94i74qz.cloudfront.net/cam0/videos/cam0_30_fps.mp4')
# vs = cv2.VideoCapture(url)
vs = VideoStream(url).start()
time.sleep(1.0)
fps = FPS().start()
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
if frame is None:
print("End of stream..")
break
# et, frame = vs.read()
frame = imutils.resize(frame, width=400)
# grab the frame dimensions and convert it to a blob
(h, w) = frame.shape[:2]