Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
camera.framerate = 20
rawCapture = PiRGBArray(camera, size=(640, 480))
stream = camera.capture_continuous(rawCapture, format="bgr",
use_video_port=True)
# allow the camera to warmup and start the FPS counter
print("[INFO] sampling frames from `picamera` module...")
time.sleep(2.0)
fps = FPS().start()
# loop over some frames
for (i, f) in enumerate(stream):
# grab the frame from the stream and resize it to have a maximum
# width of 400 pixels
frame = f.array
frame = imutils.resize(frame, width=400)
# check to see if the frame should be displayed to our screen
if args["display"] > 0:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame and update
# the FPS counter
rawCapture.truncate(0)
fps.update()
# check to see if the desired number of frames have been reached
if i == args["num_frames"]:
break
# stop the timer and display FPS information
vs = VideoStream(src = 0).start()
#vs = VideoStream(usePiCamera = True).start()
fileStream = False;
time.sleep(1.0)
#loop over the frames from the video stream
while True:
#if this is a video file streamer, then we need to check
#if there are any more frames left in the buffer to process
if fileStream and not vs.more():
break
#grab the frame from the threaded video file stream, resize
#it, and convert it to grayscale
frame = vs.read()
frame = imutils.resize(frame, width = 450)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#detect faces in the grayscale frame
rects = detector(gray, 0)
#loop over the face detections
for rect in rects:
#determine the facial landmarks for the face region, then
#convert the facial landmark (x, y)- coordinates to a NumPy
#array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
#extract the left and right eye coordinates, then use the
#coordinates to compute the eye aspect ratio for both eyes
leftEye = shape[lStart:lEnd]
continue
self._camConnectionDts = self.utcNow()
ret, current_frame = self.cap.read()
# if can't read current frame - going to the next loop
if (not ret) or (current_frame is None): # the connection broke, or the stream came to an end
self.logger.warning("bad frame")
bad_frames += 1
continue
else:
bad_frames = 0
if self.scaleFrameTo is not None:
current_frame = imutils.resize(current_frame, width=self.scaleFrameTo[0], height=self.scaleFrameTo[1])
# get timestamp of the frame
instant = time.time()
frameHeight = np.size(current_frame, 0)
frameWidth = np.size(current_frame, 1)
if self.camFps is None:
self.camFps = self.cap.get(cv.CAP_PROP_FPS)
self.logger.info("FPS = {}".format(self.camFps))
# adding frame to pre-recording buffer
if self.preAlarmRecordingSecondsQty > 0:
self._addPreAlarmFrame(current_frame)
if emptyFrame is None:
redUpper = (179, 255, 255)
pts = deque(maxlen=args["buffer"])
# grab the reference to the webcam
camera = cv2.VideoCapture(0)
#keep looping
while True:
#grab the current frame
(grabbed, frame) = camera.read()
# resize the frame, blur it, and return
# it in the HSV color space
frame = imutils.resize(frame, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# create a mask for the color green and then
# perform erodions and dialations to make the
# tracking more smooth
mask_blue = cv2.inRange(hsv, blueLower, blueUpper)
mask_green = cv2.inRange(hsv, greenLower, greenUpper)
mask_yellow = cv2.inRange(hsv, yellowLower, yellowUpper)
mask_red = cv2.inRange(hsv, redLower, redUpper)
mask_total = mask_blue + mask_green + mask_yellow + mask_red
mask_total = cv2.erode(mask_total, None, iterations=2)
mask_total = cv2.dilate(mask_total, None, iterations=2)
firstFrame = None
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
(grabbed, frame) = camera.read()
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=600)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
image_gray=cv2.cvtColor(image_resized, cv2.COLOR_BGR2GRAY)
# Do thresholding
image_thresh=image_gray
T=mahotas.thresholding.otsu(image_gray) # will find an optimal value of T from the image
image_thresh[image_thresh>T]=255 # This goes pixel by pixel if the pixel value of the thresh is greater than the optimal value then the color is white
image_thresh[image_thresh
print("[INFO] search took: {:.2f}s".format(sr.search_time))
# initialize the results montage
montage = ResultsMontage((240, 320), 5, 20)
# loop over the individual results
for (i, (score, resultID, resultIdx)) in enumerate(sr.results):
# load the result image and display it
print("[RESULT] {result_num}. {result} - {score:.2f}".format(result_num=i + 1,
result=resultID, score=score))
result = cv2.imread("{}/{}".format(args["dataset"], resultID))
montage.addResult(result, text="#{}".format(i + 1),
highlight=resultID in queryRelevant)
# show the output image of results
cv2.imshow("Results", imutils.resize(montage.montage, height=700))
cv2.waitKey(0)
searcher.finish()
log.error('weather_image_matching: %s appears to be corrupted' % str(url_img_name))
return 0
screenshot_img = cv2.imread(screenshot_name,3)
if (screenshot_img is None):
log.error('weather_image_matching: %s appears to be corrupted' % str(screenshot_name))
return 0
height, width,_ = weather_icon.shape
fort_img = imutils.resize(screenshot_img, width = int(screenshot_img.shape[1] * 2))
height_f, width_f,_ = screenshot_img.shape
screenshot_img = screenshot_img[0:height_f/7,0:width_f]
resized = imutils.resize(weather_icon, width = int(weather_icon.shape[1] * 1))
crop = cv2.Canny(resized, 100, 200)
if crop.mean() == 255 or crop.mean() == 0:
return 0
(tH, tW) = crop.shape[:2]
screenshot_img = cv2.blur(screenshot_img,(3,3))
screenshot_img = cv2.Canny(screenshot_img, 50, 100)
found = None
for scale in np.linspace(0.2,1, 5)[::-1]:
resized = imutils.resize(screenshot_img, width = int(screenshot_img.shape[1] * scale))
r = screenshot_img.shape[1] / float(resized.shape[1])
def detect(self, frame):
# Resizing for speed and better accuracy
self.original_shape = frame.shape
frame = imutils.resize(frame,
width=min(400, frame.shape[1]))
self.resized_shape = frame.shape
# Detection
rects, _ = self.hog.detectMultiScale(
frame, winStride=(4, 4),
padding=(8, 8), scale=1.05)
# Convert dets to xmin,ymin,xmax,ymax format
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
# Non-Max Supression (xmin, ymin, xmax, ymax)
self.detects = utils.nms(rects=rects, overlapThresh=0.65)
self.scale_detections()
print("[INFO] search took: {:.2f}s".format(sr.search_time))
# initialize the results montage
montage = ResultsMontage((240, 320), 5, 20)
# loop over the individual results
for (i, (score, resultID, resultIdx)) in enumerate(sr.results):
# load the result image and display it
print("[RESULT] {result_num}. {result} - {score:.2f}".format(result_num=i + 1,
result=resultID, score=score))
result = cv2.imread("{}/{}".format(args["dataset"], resultID))
montage.addResult(result, text="#{}".format(i + 1),
highlight=resultID in queryRelevant)
# show the output image of results
cv2.imshow("Results", imutils.resize(montage.montage, height=700))
cv2.waitKey(0)
searcher.finish()