Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
break
frame = cv2.flip(frame, 1)
blurred = cv2.GaussianBlur(frame, (5, 5), 5)
# hsv = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=1)
mask = cv2.dilate(mask, None, iterations=1)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# for each contour search polygon rectangle
for cnt in cnts:
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.04 * peri, True) # 0.05
# print len(approx)
if len(approx) == 4:
target = approx
(x), (y), (w), (h) = cv2.boundingRect(approx)
#cv2.rectangle(hsv, (x, y), int( x + int(w / 4)), int(y + int(h / 4)), (255, 255, 255), 13)
cv2.imshow('frame', hsv)
# frame = cv2.cvtColor(hsv, cv2.COLOR_BGR2HSV)
#out.write(hsv)
# break
# image to ensure digits caught only the border of the image
# are retained
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.copyMakeBorder(gray, 8, 8, 8, 8,
cv2.BORDER_REPLICATE)
# threshold the image to reveal the digits
thresh = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
# find contours in the image, keeping only the four largest
# ones
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:4]
# loop over the contours
for c in cnts:
# compute the bounding box for the contour then extract
# the digit
(x, y, w, h) = cv2.boundingRect(c)
roi = gray[y - 5:y + h + 5, x - 5:x + w + 5]
# display the character, making it larger enough for us
# to see, then wait for a keypress
cv2.imshow("ROI", imutils.resize(roi, width=28))
key = cv2.waitKey(0)
# if the '`' key is pressed, then ignore the character
if key == ord("`"):
edged = cv2.Canny(last_image, 50, 200, 255)
last_image = edged
if show_all_steps:
cv2.imshow("Edged", edged)
if True:
threshold_value = 127 # 127: dark conditions, 200: good light conditions
_, thresh = cv2.threshold(last_image, threshold_value, 255, THRESHOLD_TYPE["BINARY"])
if show_all_steps:
cv2.imshow('Threshed', thresh)
last_image = thresh
all_contours = cv2.findContours(last_image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
all_contours = imutils.grab_contours(all_contours)
print("Found {} contours".format(len(all_contours)))
if show_all_steps:
cv2.drawContours(image, all_contours, -1, (0, 255, 0), 3) # in green
cv2.imshow('Contours', image)
digit_contours = []
# loop over the digit area candidates
for c in all_contours:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
print("Found Contours x:{} y:{} w:{} h:{}".format(x, y, w, h))
# if the contour is sufficiently large, it must be a digit
if w >= 15 and h >= 50: # <= That's the tricky part
print("\tAdding Contours x:{} y:{} w:{} h:{}".format(x, y, w, h))
def tracker(image, lowthresh, highthresh):
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lowthresh, highthresh)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
return x, y, center, radius, M, cnts
# convert the resized image to grayscale, blur it slightly,
# and threshold it
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
#thresh = cv2.threshold(blurred, 135, 255, cv2.THRESH_TOZERO)[1]
thresh = cv2.adaptiveThreshold(blurred,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,7,2)
cv2.imshow("Image", thresh)
cv2.waitKey(0)
# find contours in the thresholded image and initialize the
# shape detector
im2, contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
print("{} {} {}".format(np.shape(im2), np.shape(contours), np.shape(hierarchy)))
cnts = imutils.grab_contours((im2, contours, hierarchy))
#print("{} {} {}".format(np.shape(cnts[0]), np.shape(cnts[1]), np.shape(cnts[2])))
"""
for idx in range(len(contours)):
print(np.shape(contours[idx]))
"""
cv2.drawContours(resized, contours, -1, (255,255,0), 3)
cv2.imshow("drawContours", resized)
cv2.waitKey(0)
changed_img = np.zeros(np.shape(im2), dtype=np.uint8)
for idx in range(len(contours)):
if len(contours[idx]) > 60:
def compare_ssim_debug(image_a, image_b, color=(255, 0, 0)):
"""
Args:
image_a, image_b: opencv image or PIL.Image
color: (r, g, b) eg: (255, 0, 0) for red
Refs:
https://www.pyimagesearch.com/2017/06/19/image-difference-with-opencv-and-python/
"""
ima, imb = conv2cv(image_a), conv2cv(image_b)
score, diff = compare_ssim(ima, imb, full=True)
diff = (diff * 255).astype('uint8')
_, thresh = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cv2color = tuple(reversed(color))
im = ima.copy()
for c in cnts:
x, y, w, h = cv2.boundingRect(c)
cv2.rectangle(im, (x, y), (x+w, y+h), cv2color, 2)
# todo: show image
cv2pil(im).show()
return im
image = cv2.imread(args["image"])
resized = imutils.resize(image, width=1024)
ratio = image.shape[0] / float(resized.shape[0])
# convert the resized image to grayscale, blur it slightly,
# and threshold it
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(blurred, 170, 255, cv2.THRESH_BINARY)[1]
cv2.imshow("Image", thresh)
cv2.waitKey(0)
# find contours in the thresholded image and initialize the
# shape detector
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
print(np.shape(cnts))
sd = ShapeDetector()
# loop over the contours
for c in cnts:
# compute the center of the contour, then detect the name of the
# shape using only the contour
M = cv2.moments(c)
if M["m00"] == 0:
continue
cX = int((M["m10"] / M["m00"]) * ratio)
cY = int((M["m01"] / M["m00"]) * ratio)
shape = sd.detect(c)
if shape is None or shape not in ["square", "rectangle"]:
continue
print(shape)
# multiply the contour (x, y)-coordinates by the resize ratio,
def detect_symbol(self, avg_area=None):
""" Attempts to detect a symbol in self.roi
based on:
* https://gurus.pyimagesearch.com/lesson-sample-advanced-contour-properties/
* http://qtandopencv.blogspot.com/2015/11/analyze-tic-tac-toe-by-computer-vision.html
"""
imgcopy = self.roi.copy()
cnts = cv2.findContours(imgcopy, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
lSolidity = []
#if self.title == "mm":
# pdb.set_trace()
for (i, c) in enumerate(cnts):
# compute the area of the contour along with the bounding box
# to compute the aspect ratio
area = cv2.contourArea(c)
# if there are multiple contours detected, check if the detected contour is at
# least 6% of total area
# also ignore the contour if it is larger than 70% of total area or less than 6% of total area
ratio = area/self.area
if ((len(cnts) > 1 and i>=0 and (area < self.area*0.01)) or ratio > 0.70 or ratio < 0.06):
continue
(x, y, w, h) = cv2.boundingRect(c)
# compute the convex hull of the contour, then use the area of the
# original contour and the area of the convex hull to compute the
if "extra" in categories:
cv2.rectangle(roi, (410, 758), (1835, 847), color=(255, 255, 255), thickness=-1)
# preparing the ends of the interval of blue colors allowed, BGR format
lower_blue = numpy.array([132, 97, 66], dtype=numpy.uint8)
upper_blue = numpy.array([207, 142, 92], dtype=numpy.uint8)
# find the colors within the specified boundaries
mask = cv2.inRange(image, lower_blue, upper_blue)
# apply roi, result is a black and white image where the white rectangles are the options enabled
result = cv2.bitwise_and(roi, mask)
# obtain countours, needed to calculate the rectangles' positions
cnts = cv2.findContours(result, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = grab_contours(cnts)
# filter regions with a contour area inferior to 190x45=8550 (i.e. not a sorting option)
cnts = list(filter(lambda x: cv2.contourArea(x) > 8550, cnts))
# loop over the contours and extract regions
regions = []
for c in cnts:
# calculates contours perimeter
perimeter = cv2.arcLength(c, True)
# approximates perimeter to a polygon with the specified precision
approx = cv2.approxPolyDP(c, 0.04 * perimeter, True)
if len(approx) == 4:
# if approx is a rectangle, get bounding box
x, y, w, h = cv2.boundingRect(approx)
# print values
Logger.log_debug("Region x:{}, y:{}, w:{}, h:{}".format(x, y, w, h))
# appends to regions' list