How to use the imutils.rotate_bound function in imutils

To help you get started, we’ve selected a few imutils examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github hj3yoo / mtg_card_detector / transform_data.py View on Github external
card_x = int(card.x + 0.5)
            card_y = int(card.y + 0.5)

            # Scale & rotate card image
            img_card = cv2.resize(card.img, (int(len(card.img[0]) * card.scale), int(len(card.img) * card.scale)))
            # Add a random glaring on individual card - it happens frequently in real life as MTG cards can reflect
            # the lights very well.
            if aug is not None:
                seq = iaa.Sequential([
                    iaa.SimplexNoiseAlpha(first=iaa.Add(random.randrange(128)), size_px_max=[1, 3],
                                          upscale_method="cubic"),  # Lighting
                ])
                img_card = seq.augment_image(img_card)
            mask_scale = cv2.resize(card_mask, (int(len(card_mask[0]) * card.scale), int(len(card_mask) * card.scale)))
            img_mask = cv2.bitwise_and(img_card, mask_scale)
            img_rotate = imutils.rotate_bound(img_mask, card.theta / math.pi * 180)
            
            # Calculate the position of the card image in relation to the background
            # Crop the card image if it's out of boundary
            card_w = len(img_rotate[0])
            card_h = len(img_rotate)
            card_crop_x1 = max(0, card_w // 2 - card_x)
            card_crop_x2 = min(card_w, card_w // 2 + len(img_result[0]) - card_x)
            card_crop_y1 = max(0, card_h // 2 - card_y)
            card_crop_y2 = min(card_h, card_h // 2 + len(img_result) - card_y)
            img_card_crop = img_rotate[card_crop_y1:card_crop_y2, card_crop_x1:card_crop_x2]

            # Calculate the position of the corresponding area in the background
            bg_crop_x1 = max(0, card_x - (card_w // 2))
            bg_crop_x2 = min(len(img_result[0]), int(card_x + (card_w / 2) + 0.5))
            bg_crop_y1 = max(0, card_y - (card_h // 2))
            bg_crop_y2 = min(len(img_result), int(card_y + (card_h / 2) + 0.5))
github dloperab / PyImageSearch-CV-DL-CrashCourse / 02-opencv-tutorial / opencv_tutorial_01.py View on Github external
# computing the image center, then constructing the rotation matrix,
# and then finally applying the affine warp
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, -45, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow("OpenCV Rotation", rotated)
cv2.waitKey(0)

# rotation can also be easily accomplished via imutils with less code
rotated = imutils.rotate(image, -45)
cv2.imshow("Imutils Rotation", rotated)
cv2.waitKey(0)

# OpenCV doesn't "care" if our rotated image is clipped after rotation
# so we can instead use another imutils convenience function to help us out
rotated = imutils.rotate_bound(image, 45)
cv2.imshow("Imutils Bound Rotation", rotated)
cv2.waitKey(0)

# apply a Gaussian blur with a 11x11 kernel to the image to smooth it,
# useful when reducing high frequency noise
blurred = cv2.GaussianBlur(image, (11, 11), 0)
cv2.imshow("Blurred", blurred)
cv2.waitKey(0)

# draw a 2px thick red rectangle surrounding the face
output = image.copy()
cv2.rectangle(output, (320, 60), (420, 160), (0, 0, 255), 2)
cv2.imshow("Rectangle", output)
cv2.waitKey(0)

# draw a blue 20px (filled in) circle on the image centered at
github hanskrupakar / COCO-Style-Dataset-Generator-GUI / coco_dataset_generator / extras / extract_frames.py View on Github external
for f in glob.glob(os.path.join(args.videos_dir, '*')):

        print (f)
        cap = cv2.VideoCapture(f)
        if not os.path.isdir(os.path.join(args.frames_dir, f.split('/')[-1][:-4])):
            os.mkdir(os.path.join(args.frames_dir, f.split('/')[-1][:-4]))
        
        i = 0
        ret, frame = cap.read()
        while(ret):

            i+=1
            
            if args.rotation is not None:
                frame = imutils.rotate_bound(frame, int(args.rotation))
            
            if os.path.exists(os.path.join(*[args.frames_dir, f.split('/')[-1][:-4], f.split('/')[-1][:-4]+'_%d.jpg'%(i)])):
                continue
            else:
                cv2.imwrite(os.path.join(*[args.frames_dir, f.split('/')[-1][:-4], f.split('/')[-1][:-4]+'_%d.jpg'%(i)]), frame)
            
            ret, frame = cap.read()

        cap.release()
github takefetter / Get_PalmPrint / process_palm.py View on Github external
# Get the angle of inclination
    ellipse = _, _, angle = cv2.fitEllipse(largest_contour)

    # original = cv2.bitwise_and(original, original, mask=black_and_white)

    # Vertical adjustment correction
    '''
    This variable is used when the result of hand segmentation is upside down. Will change it to 0 or 180 to correct the actual angle.
    The issue arises because the angle is returned only between 0 and 180, rather than 360.
    '''
    vertical_adjustment_correction = 0
    if CORRECTION_NEEDED: vertical_adjustment_correction = 180

    # Rotate the image to get hand upright
    if angle >= 90:
        black_and_white = im.rotate_bound(black_and_white, vertical_adjustment_correction + 180 - angle)
        original = im.rotate_bound(original, vertical_adjustment_correction + 180 - angle)
        final_Contour = im.rotate_bound(original, vertical_adjustment_correction + 180 - angle)
    else:
        black_and_white = im.rotate_bound(black_and_white, vertical_adjustment_correction - angle)
        original = im.rotate_bound(original, vertical_adjustment_correction - angle)
        final_Contour = im.rotate_bound(final_Contour, vertical_adjustment_correction - angle)

    original = cv2.bitwise_and(original, original, mask=black_and_white)
    # cv2.imshow('Extracted Hand', final_Contour)
    #cv2.imshow('Original image', original)

    # 求手掌中心
    # 参考至http://answers.opencv.org/question/180668/how-to-find-the-center-of-one-palm-in-the-picture/
    # 因为已经是黑白的,所以省略这一句
    # cv2.threshold(black_and_white, black_and_white, 200, 255, cv2.THRESH_BINARY)
github charlielito / snapchat-filters-opencv / main_dlib.py View on Github external
def apply_sprite(image, path2sprite,w,x,y, angle, ontop = True):
    sprite = cv2.imread(path2sprite,-1)
    #print sprite.shape
    sprite = rotate_bound(sprite, angle)
    (sprite, y_final) = adjust_sprite2head(sprite, w, y, ontop)
    image = draw_sprite(image,sprite,x, y_final)
github takefetter / Get_PalmPrint / process_palm.py View on Github external
'''
    This variable is used when the result of hand segmentation is upside down. Will change it to 0 or 180 to correct the actual angle.
    The issue arises because the angle is returned only between 0 and 180, rather than 360.
    '''
    vertical_adjustment_correction = 0
    if CORRECTION_NEEDED: vertical_adjustment_correction = 180

    # Rotate the image to get hand upright
    if angle >= 90:
        black_and_white = im.rotate_bound(black_and_white, vertical_adjustment_correction + 180 - angle)
        original = im.rotate_bound(original, vertical_adjustment_correction + 180 - angle)
        final_Contour = im.rotate_bound(original, vertical_adjustment_correction + 180 - angle)
    else:
        black_and_white = im.rotate_bound(black_and_white, vertical_adjustment_correction - angle)
        original = im.rotate_bound(original, vertical_adjustment_correction - angle)
        final_Contour = im.rotate_bound(final_Contour, vertical_adjustment_correction - angle)

    original = cv2.bitwise_and(original, original, mask=black_and_white)
    # cv2.imshow('Extracted Hand', final_Contour)
    #cv2.imshow('Original image', original)

    # 求手掌中心
    # 参考至http://answers.opencv.org/question/180668/how-to-find-the-center-of-one-palm-in-the-picture/
    # 因为已经是黑白的,所以省略这一句
    # cv2.threshold(black_and_white, black_and_white, 200, 255, cv2.THRESH_BINARY)

    distance = cv2.distanceTransform(black_and_white, cv2.DIST_L2, 5, cv2.CV_32F)
    # Calculates the distance to the closest zero pixel for each pixel of the source image.
    maxdist = 0
    # rows,cols = img.shape
    for i in range(distance.shape[0]):
        for j in range(distance.shape[1]):
github AruniRC / detectron-self-train / tools / convert_fddb_to_json.py View on Github external
def load_vid(vid_file,rotation=None):
    vid = []
    if rotation is None:
        rotation = _ffmpeg_extract_rotation(vid_file)
    videogen = cv2.VideoCapture(vid_file)
    while True:
        ret,im = videogen.read()
        if not ret:
            break
        im = imutils.rotate_bound(im,rotation-360)
        vid.append(im)
    videogen.release()
    return vid
github febert / visual_mpc / python_visual_mpc / data_preparation / gather_data.py View on Github external
# setting used in softmotion30_v1
    elif 'crop_before_shrink' in conf:
        raw_image_height = conf['raw_image_height']
        img = img[rowstart:rowstart + raw_image_height, colstart:colstart + raw_image_height]

        target_res = conf['target_res']
        img = cv2.resize(img, target_res, interpolation=cv2.INTER_AREA)
    else:
        raise NotImplementedError

    # assert img.shape == (64,64,3)
    img = img[...,::-1]  #bgr => rgb

    if conf['sourcetags'][i_src] == 'aux1':
        img = imutils.rotate_bound(img, 180)

    # plt.imshow(img)
    # plt.show()
    return img
github garethnisbet / T-BOTS / Python / Development / T-Bot_Tracking / disparity.py View on Github external
from time import sleep
import imutils
from scipy.ndimage.filters import gaussian_filter
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
plt.ion()


low_threshold = 50
high_threshold = 150
# loading the stereo pair
left  = cv2.imread('frames/00015.png')
left = cv2.cvtColor(left, cv2.COLOR_BGR2GRAY)
cleft = cv2.Canny(left,low_threshold,high_threshold)
left_r = imutils.rotate_bound(cleft, 90)
right = cv2.imread('frames/00016.png')
right = cv2.cvtColor(right, cv2.COLOR_BGR2GRAY)
cright = cv2.Canny(right,low_threshold,high_threshold)
right_r = imutils.rotate_bound(cright, 90)

stereo = cv2.StereoBM_create(numDisparities = 32, blockSize = 17)
disparity = stereo.compute(left_r, right_r)
disparity = imutils.rotate_bound(disparity, -90)
plt.figure(figsize=(16,6))
plt.subplot(1,3,1)
plt.imshow(cleft+cright)
plt.subplot(1,3,2)
plt.imshow(cleft-cright)
plt.subplot(1,3,3)
plt.imshow(cright)
#plt.imshow(disparity)

imutils

A series of convenience functions to make basic image processing functions such as translation, rotation, resizing, skeletonization, displaying Matplotlib images, sorting contours, detecting edges, and much more easier with OpenCV and both Python 2.7 and Python 3.

MIT
Latest version published 3 years ago

Package Health Score

64 / 100
Full package analysis