How to use the imutils.is_cv2 function in imutils

To help you get started, we’ve selected a few imutils examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github housekorea / pfc / cv / pfc_cv_measurer.py View on Github external
def find_contours(self):
		copy_image = self.IMAGES['ERODE_IMG'].copy()
		f_contours = cv2.findContours(copy_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

		f_contours = f_contours[0] if imutils.is_cv2() else f_contours[1]

		if len(f_contours) == 0:
			# print("Print f_contour : " + str(len(f_contours)))
			self.save_images(debug_save=True)
			sys.exit()


		(f_contours,_) = contours.sort_contours(f_contours)

		detect_object_cnt = 0
		object_stack = []
		for (i,c) in enumerate(f_contours):
			box = cv2.minAreaRect(c)
			box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
			box = self.get_order_points(box)
github mythrex / OMR-Scanner / server / bin / module / grader.py View on Github external
# apply perspective transform to the shape
paper = four_point_transform(image, docCnts.reshape(4, 2))
warped = four_point_transform(gray, docCnts.reshape(4, 2))
# binarisation of image
# instead of otsu thresholding
# we have used adaptive thresholding

thresh = cv2.adaptiveThreshold(
    warped, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)

# find contours in threshholded image
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)

# filter out contours with parents
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# throw new error
try:
    if len(cnts) <= 240:
        raise ge.PaperContourError(
            'The contour of paper is not detected properly. May be only one external contour in the paper is detected.')
except ge.PaperContourError as e:
    sys.exit(e.message)

# find question contours
questions = gu.find_questions(cnts, paper)

# We are now sorting from left to right by taking a batch of 16 contours
# that are basically a whole row and then sorting them from increasing order of x

questionCnts = gu.find_ques_cnts(questions)
github Johk3 / Rotmg-Bot / trader.py View on Github external
# the shapes can be approximated better
            image = cv2.imread(imgpath)
            resized = imutils.resize(image, width=300)
            ratio = image.shape[0] / float(resized.shape[0])

            # blur the resized image slightly, then convert it to both
            # grayscale and the L*a*b* color spaces
            blurred = cv2.GaussianBlur(resized, (5, 5), 0)
            gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
            lab = cv2.cvtColor(blurred, cv2.COLOR_BGR2LAB)
            thresh = cv2.threshold(gray, 60, 255, cv2.THRESH_BINARY)[1]

            # find contours in the thresholded image
            cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = cnts[0] if imutils.is_cv2() else cnts[1]

            # initialize the shape detector and color labeler
            cl = ColorLabeler()
            i = 0
            for c in cnts:
                # compute the center of the contour
                if i >= 1 and i <= 8:
                    M = cv2.moments(c)
                    cX = int((M["m10"] / M["m00"]) * ratio)
                    cY = int((M["m01"] / M["m00"]) * ratio)

                    # detect the shape of the contour and label the color
                    color = cl.label(lab, c, True)

                    # multiply the contour (x, y)-coordinates by the resize ratio,
                    # then draw the contours and the name of the shape and labeled
github Ubotica / telloCV / tracker.py View on Github external
# color space
        blurred = cv2.GaussianBlur(self.frame, (11, 11), 0)
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)

        # construct a mask for the color then perform
        # a series of dilations and erosions to remove any small
        # blobs left in the mask
        mask = cv2.inRange(hsv, self.color_lower, self.color_upper)
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)

        # find contours in the mask and initialize the current
        # (x, y) center of the ball
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
        center = None

        # only proceed if at least one contour was found
        if len(cnts) > 0:
            # find the largest contour in the mask, then use
            # it to compute the minimum enclosing circle and
            # centroid
            c = max(cnts, key=cv2.contourArea)
            ((x, y), radius) = cv2.minEnclosingCircle(c)
            M = cv2.moments(c)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

            # only proceed if the radius meets a minimum size
            if radius > 10:
                # draw the circle and centroid on the frame,
                # then update the list of tracked points
github jrosebr1 / imutils / demos / opencv_versions.py View on Github external
import cv2

# print the current OpenCV version on your system
print("Your OpenCV version: {}".format(cv2.__version__))

# check to see if you are using OpenCV 2.X
print("Are you using OpenCV 2.X? {}".format(imutils.is_cv2()))

# check to see if you are using OpenCV 3.X
print("Are you using OpenCV 3.X? {}".format(imutils.is_cv3(or_better=False)))

# check to see if you are using OpenCV 4.X
print("Are you using OpenCV 4.X? {}".format(imutils.is_cv4(or_better=False)))

# check to see if you are using *at least* OpenCV 2.X
print("Are you using at least OpenCV 2.X? {}".format(imutils.is_cv2(or_better=True)))

# check to see if you are using *at least* OpenCV 3.X
print("Are you using at least OpenCV 3.X? {}".format(imutils.is_cv3(or_better=True)))

# check to see if you are using *at least* OpenCV 4.X
print("Are you using at least OpenCV 4.X? {}".format(imutils.is_cv4(or_better=False)))

# should throw a deprecation warning
print("Checking for OpenCV 3: {}".format(imutils.check_opencv_version("3")))
github OmalPerera / Human-detection-system-with-raspberry-Pi / pi_surveillance.py View on Github external
continue

    # accumulate the weighted average between the current frame and
    # previous frames, then compute the difference between the current
    # frame and running average
    cv2.accumulateWeighted(gray, avg, 0.5)
    frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

    # threshold the delta image, dilate the thresholded image to fill
    # in holes, then find contours on thresholded image
    thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255,
        cv2.THRESH_BINARY)[1]
    thresh = cv2.dilate(thresh, None, iterations=2)
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]

    # loop over the contours
    for c in cnts:
        # if the contour is too small, ignore it
        if cv2.contourArea(c) < conf["min_area"]:
            continue

        # draw the original bounding boxes
        for (x, y, w, h) in rects:
            cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)
 
        # apply non-maxima suppression to the bounding boxes using a
        # fairly large overlap threshold to try to maintain overlapping
        # boxes that are still people
        rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
        pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
github grasslandnetwork / node_lite / multi_object_tracking.py View on Github external
# accumulate the weighted average between the current frame and
                    # previous frames, then compute the difference between the current
                    # frame and running average
                    cv2.accumulateWeighted(gray, avg, 0.5)
                    frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

                    kernel = np.ones((5,5),np.uint8)
                    # threshold the delta image, dilate the thresholded image to fill
                    # in holes, then find contours on thresholded image
                    thresh = cv2.threshold(frameDelta, delta_thresh, 255,
                            cv2.THRESH_BINARY)[1]
                    #thresh = cv2.dilate(thresh, None, iterations=2)
                    thresh = cv2.dilate(thresh, kernel, iterations=2)
                    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
                    cnts = cnts[0] if imutils.is_cv2() else cnts[1]

                

                if track_centroids and frame_dict.get("detected") == 0:
                    rects = []

                # loop over the contours
                for c in cnts:
                    # if the contour is too small, ignore it
                    if cv2.contourArea(c) < min_area:
                        continue

                    (x, y, w, h) = cv2.boundingRect(c)
                    
                    if display:
                        # compute the bounding box for the contour, draw it on the frame,
github ghostbbbmt / Traffic-Sign-Detection / resources / main_old.py View on Github external
def findContour(image):
    #find contours in the thresholded image
    cnts = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE    )
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    return cnts
github StatueFungus / autonomous_driving / src / detectionlib / visualizer.py View on Github external
'''
            Zeichnet einen Text auf das Bild (self.image).

            Parameter
            ---------
            text : String
                Anzuzeigender Text
            size : int
                Groesse des Textes
            color : Tupel
                Farbe des Textes >> (255,0,0)
            position : Tupel
                Position des Textes >> (x,y)

        '''
	if imutils.is_cv2():
	    cv2.putText(self.image, text,position, cv2.FONT_HERSHEY_COMPLEX, size, color, 2, cv2.CV_AA)
	elif imutils.is_cv3():
            cv2.putText(self.image, text,position, cv2.FONT_HERSHEY_COMPLEX, size, color, 2, cv2.LINE_AA)
github ghostbbbmt / Traffic-Sign-Detection / resources / signRecognition.py View on Github external
# pre-process the image by resizing it, converting it to
        # graycale, blurring it, and computing an edge map
        image = imutils.resize(image, height=500)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        edged = cv2.Canny(blurred, 50, 200, 255)

        # find contours in the edge map, then sort them by their
        # size in descending order
        cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                cv2.CHAIN_APPROX_SIMPLE)
        # The is_cv2() and is_cv3() are simple functions that can be used to 
        # automatically determine the OpenCV version of the current environment
        # cnts[0] or cnts[1] hold contours
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
        displayCnt = None

        # loop over the contours
        for c in cnts:
                # approximate the contour
                peri = cv2.arcLength(c, True)
                approx = cv2.approxPolyDP(c, 0.02 * peri, True)

                # if the contour has four vertices, then we have found
                # the thermostat display
                if len(approx) == 4:
                        displayCnt = approx
                        break

        # extract the sign borders, apply a perspective transform

imutils

A series of convenience functions to make basic image processing functions such as translation, rotation, resizing, skeletonization, displaying Matplotlib images, sorting contours, detecting edges, and much more easier with OpenCV and both Python 2.7 and Python 3.

MIT
Latest version published 4 years ago

Package Health Score

64 / 100
Full package analysis