How to use imutils - 10 common examples

To help you get started, we’ve selected a few imutils examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github hj3yoo / mtg_card_detector / transform_data.py View on Github external
card_x = int(card.x + 0.5)
            card_y = int(card.y + 0.5)

            # Scale & rotate card image
            img_card = cv2.resize(card.img, (int(len(card.img[0]) * card.scale), int(len(card.img) * card.scale)))
            # Add a random glaring on individual card - it happens frequently in real life as MTG cards can reflect
            # the lights very well.
            if aug is not None:
                seq = iaa.Sequential([
                    iaa.SimplexNoiseAlpha(first=iaa.Add(random.randrange(128)), size_px_max=[1, 3],
                                          upscale_method="cubic"),  # Lighting
                ])
                img_card = seq.augment_image(img_card)
            mask_scale = cv2.resize(card_mask, (int(len(card_mask[0]) * card.scale), int(len(card_mask) * card.scale)))
            img_mask = cv2.bitwise_and(img_card, mask_scale)
            img_rotate = imutils.rotate_bound(img_mask, card.theta / math.pi * 180)
            
            # Calculate the position of the card image in relation to the background
            # Crop the card image if it's out of boundary
            card_w = len(img_rotate[0])
            card_h = len(img_rotate)
            card_crop_x1 = max(0, card_w // 2 - card_x)
            card_crop_x2 = min(card_w, card_w // 2 + len(img_result[0]) - card_x)
            card_crop_y1 = max(0, card_h // 2 - card_y)
            card_crop_y2 = min(card_h, card_h // 2 + len(img_result) - card_y)
            img_card_crop = img_rotate[card_crop_y1:card_crop_y2, card_crop_x1:card_crop_x2]

            # Calculate the position of the corresponding area in the background
            bg_crop_x1 = max(0, card_x - (card_w // 2))
            bg_crop_x2 = min(len(img_result[0]), int(card_x + (card_w / 2) + 0.5))
            bg_crop_y1 = max(0, card_y - (card_h // 2))
            bg_crop_y2 = min(len(img_result), int(card_y + (card_h / 2) + 0.5))
github dloperab / PyImageSearch-CV-DL-CrashCourse / 02-opencv-tutorial / opencv_tutorial_01.py View on Github external
# computing the image center, then constructing the rotation matrix,
# and then finally applying the affine warp
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, -45, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow("OpenCV Rotation", rotated)
cv2.waitKey(0)

# rotation can also be easily accomplished via imutils with less code
rotated = imutils.rotate(image, -45)
cv2.imshow("Imutils Rotation", rotated)
cv2.waitKey(0)

# OpenCV doesn't "care" if our rotated image is clipped after rotation
# so we can instead use another imutils convenience function to help us out
rotated = imutils.rotate_bound(image, 45)
cv2.imshow("Imutils Bound Rotation", rotated)
cv2.waitKey(0)

# apply a Gaussian blur with a 11x11 kernel to the image to smooth it,
# useful when reducing high frequency noise
blurred = cv2.GaussianBlur(image, (11, 11), 0)
cv2.imshow("Blurred", blurred)
cv2.waitKey(0)

# draw a 2px thick red rectangle surrounding the face
output = image.copy()
cv2.rectangle(output, (320, 60), (420, 160), (0, 0, 255), 2)
cv2.imshow("Rectangle", output)
cv2.waitKey(0)

# draw a blue 20px (filled in) circle on the image centered at
github dloperab / PyImageSearch-CV-DL-CrashCourse / 02-opencv-tutorial / opencv_tutorial_01.py View on Github external
# imutils library instead
resized = imutils.resize(image, width=300)
cv2.imshow("Imutils Resize", resized)
cv2.waitKey(0)

# let's rotate an image 45 degrees clockwise using OpenCV by first
# computing the image center, then constructing the rotation matrix,
# and then finally applying the affine warp
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, -45, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow("OpenCV Rotation", rotated)
cv2.waitKey(0)

# rotation can also be easily accomplished via imutils with less code
rotated = imutils.rotate(image, -45)
cv2.imshow("Imutils Rotation", rotated)
cv2.waitKey(0)

# OpenCV doesn't "care" if our rotated image is clipped after rotation
# so we can instead use another imutils convenience function to help us out
rotated = imutils.rotate_bound(image, 45)
cv2.imshow("Imutils Bound Rotation", rotated)
cv2.waitKey(0)

# apply a Gaussian blur with a 11x11 kernel to the image to smooth it,
# useful when reducing high frequency noise
blurred = cv2.GaussianBlur(image, (11, 11), 0)
cv2.imshow("Blurred", blurred)
cv2.waitKey(0)

# draw a 2px thick red rectangle surrounding the face
github BerkeleyAutomation / perception / tools / generate_siamese_dataset.py View on Github external
def normalize(color_im, crop_size=(512, 512)):

    # Center object in frame
    color_data = color_im.data
    nzp = color_im.nonzero_pixels().astype(np.int32)
    centroid = np.mean(nzp, axis=0)
    cx, cy = color_data.shape[1] // 2, color_data.shape[0] // 2
    color_data = imutils.translate(color_data, cx - round(centroid[1]), cy - round(centroid[0]))
    color_im = ColorImage(color_data, color_im.frame)

    # Crop about center to 512x512
    cx, cy = color_data.shape[1] // 2, color_data.shape[0] // 2
    crop_x = crop_size[0] / 2
    crop_y = crop_size[1] / 2
    color_data = imcrop(color_data, (cx-crop_x, cy-crop_y, cx+crop_x, cy+crop_y))
    color_im = ColorImage(color_data, color_im.frame)

    return color_im
github arnavkj1995 / face_inpainting / preprocess_test_images.py View on Github external
filename = os.path.join(images_dir_path, imgs) 
          
        img = io.imread(filename)
        arr = np.array(img) 
        H, W, C = arr.shape   # we assume that we are getting face cropped images

        # Ask the detector to find the bounding boxes of each face. The 1 in the
        # second argument indicates that we should upsample the image 1 time. This
        # will make everything bigger and allow us to detect more faces.
        dets = detector(img, 1)
        #print("Number of faces detected: {}".format(len(dets)))
        
        for k, d in enumerate(dets):
            # Get the landmarks/parts for the face in box d.
            shape = predictor(img, d)
            shape = face_utils.shape_to_np(shape)
     
            face_part = img[d.top():d.bottom(), d.left():d.right()]
            face_part = imresize(face_part, [128,128])

            key_point_matrix = visualize_facial_landmarks(img, shape)
            key_point_matrix = key_point_matrix[d.top():d.bottom(), d.left():d.right()]
            key_point_matrix = imresize(key_point_matrix, [128,128])

            imsave('test_images/img' + str(counter) + '.png', face_part)
            imsave('test_images/ky' + str(counter) + '.png', key_point_matrix)
github apollos / opencv-practice / SB_Code / chapter21-breaking_captchas / test_model.py View on Github external
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True,
	help="path to input directory of images")
ap.add_argument("-m", "--model", required=True,
	help="path to input model")
args = vars(ap.parse_args())

# load the pre-trained network
print("[INFO] loading pre-trained network...")
model = load_model(args["model"])

# randomy sample a few of the input images
imagePaths = list(paths.list_images(args["input"]))
imagePaths = np.random.choice(imagePaths, size=(10,),
	replace=False)

# loop over the image paths
for imagePath in imagePaths:
	# load the image and convert it to grayscale, then pad the image
	# to ensure digits caught only the border of the image are
	# retained
	image = cv2.imread(imagePath)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	gray = cv2.copyMakeBorder(gray, 20, 20, 20, 20,
		cv2.BORDER_REPLICATE)

	# threshold the image to reveal the digits
	thresh = cv2.threshold(gray, 0, 255,
		cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
github jeffbass / imagezmq / tests / timing_send_jpg_buf.py View on Github external
# use either of the formats below to specifiy address of display computer
sender = imagezmq.ImageSender(connect_to='tcp://jeff-macbook:5555')
# sender = imagezmq.ImageSender(connect_to='tcp://192.168.1.190:5555')

# optionally, turn on the LED area lighting
use_led = False  # set to True or False as needed
# optionally, filp the image vertically
flip = True  # set to True of False as needed

if use_led:
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(18, GPIO.OUT)
    GPIO.output(18, True)  # turn on LEDs

rpi_name = socket.gethostname()  # send RPi hostname with each image
picam = VideoStream(usePiCamera=True).start()
time.sleep(2.0)  # allow camera sensor to warm up
jpeg_quality = 95  # 0 to 100, higher is better quality, 95 is cv2 default
try:
    while True:  # send images as stream until Ctrl-C
        image = picam.read()
        if flip:
            image = cv2.flip(image, -1)
        ret_code, jpg_buffer = cv2.imencode(
            ".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality])
        sender.send_jpg(rpi_name, jpg_buffer)
except (KeyboardInterrupt, SystemExit):
    pass  # Ctrl-C was pressed to end program
except Exception as ex:
    print('Python error with no Exception handler:')
    print('Traceback error:', ex)
    traceback.print_exc()
github EdgarNg1024 / PaperHelper / test_grader_1.py View on Github external
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
questionCnts = []

# 对每一个轮廓进行循环处理
for c in cnts:
    # 计算轮廓的边界框,然后利用边界框数据计算宽高比
    (x, y, w, h) = cv2.boundingRect(c)
    ar = w / float(h)

    # 为了辨别一个轮廓是一个气泡,要求它的边界框不能太小,在这里边至少是20个像素,而且它的宽高比要近似于1
    if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1:
        questionCnts.append(c)

# 以从顶部到底部的方法将我们的气泡轮廓进行排序,然后初始化正确答案数的变量。
questionCnts = contours.sort_contours(questionCnts,
                                      method="top-to-bottom")[0]
correct = 0

# 每个题目有5个选项,所以5个气泡一组循环处理
for (q, i) in enumerate(np.arange(0, len(questionCnts), 5)):
    # 从左到右为当前题目的气泡轮廓排序,然后初始化被涂画的气泡变量
    cnts = contours.sort_contours(questionCnts[i:i + 5])[0]
    bubbled = None

    # 对一行从左到右排列好的气泡轮廓进行遍历
    for (j, c) in enumerate(cnts):
        # 构造只有当前气泡轮廓区域的掩模图像
        mask = np.zeros(thresh.shape, dtype="uint8")
        cv2.drawContours(mask, [c], -1, 255, -1)

        # 对二值图像应用掩模图像,然后就可以计算气泡区域内的非零像素点。
github bthicks / OMR-Grader / test_box.py View on Github external
Grades a list of bubbles from the test box.

        Args:
            bubbles (list): A list of lists, where each list is a group of 
                bubble contours.
            box (numpy.ndarray): An ndarray representing the test box.

        """
        for (i, group) in enumerate(bubbles):
            # Split a group of bubbles by question.
            group = self.group_by_question(group, self.groups[i])

            # Sort bubbles in each question based on box orientation then grade.
            for (j, question) in enumerate(group, 1):
                question_num = j + (i * len(group))
                question, _ = cutils.sort_contours(question,
                    method=self.orientation)

                self.grade_question(question, question_num, i, box)
github jeffbass / imagezmq / tests / timing_receive_images.py View on Github external
from collections import defaultdict
from imutils.video import FPS
import imagezmq

# instantiate image_hub
image_hub = imagezmq.ImageHub()

image_count = 0
sender_image_counts = defaultdict(int)  # dict for counts by sender
first_image = True

try:
    while True:  # receive images until Ctrl-C is pressed
        sent_from, image = image_hub.recv_image()
        if first_image:
            fps = FPS().start()  # start FPS timer after first image is received
            first_image = False
        fps.update()
        image_count += 1  # global count of all images received
        sender_image_counts[sent_from] += 1  # count images for each RPi name
        cv2.imshow(sent_from, image)  # display images 1 window per sent_from
        cv2.waitKey(1)
        image_hub.send_reply(b"OK")  # REP reply
except (KeyboardInterrupt, SystemExit):
    pass  # Ctrl-C was pressed to end program; FPS stats computed below
except Exception as ex:
    print('Python error with no Exception handler:')
    print('Traceback error:', ex)
    traceback.print_exc()
finally:
    # stop the timer and display FPS information
    print()

imutils

A series of convenience functions to make basic image processing functions such as translation, rotation, resizing, skeletonization, displaying Matplotlib images, sorting contours, detecting edges, and much more easier with OpenCV and both Python 2.7 and Python 3.

MIT
Latest version published 3 years ago

Package Health Score

67 / 100
Full package analysis