How to use the imutils.rotate function in imutils

To help you get started, we’ve selected a few imutils examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github dloperab / PyImageSearch-CV-DL-CrashCourse / 02-opencv-tutorial / opencv_tutorial_01.py View on Github external
# imutils library instead
resized = imutils.resize(image, width=300)
cv2.imshow("Imutils Resize", resized)
cv2.waitKey(0)

# let's rotate an image 45 degrees clockwise using OpenCV by first
# computing the image center, then constructing the rotation matrix,
# and then finally applying the affine warp
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, -45, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow("OpenCV Rotation", rotated)
cv2.waitKey(0)

# rotation can also be easily accomplished via imutils with less code
rotated = imutils.rotate(image, -45)
cv2.imshow("Imutils Rotation", rotated)
cv2.waitKey(0)

# OpenCV doesn't "care" if our rotated image is clipped after rotation
# so we can instead use another imutils convenience function to help us out
rotated = imutils.rotate_bound(image, 45)
cv2.imshow("Imutils Bound Rotation", rotated)
cv2.waitKey(0)

# apply a Gaussian blur with a 11x11 kernel to the image to smooth it,
# useful when reducing high frequency noise
blurred = cv2.GaussianBlur(image, (11, 11), 0)
cv2.imshow("Blurred", blurred)
cv2.waitKey(0)

# draw a 2px thick red rectangle surrounding the face
github PoseNet-Mobile-Robot / Mobile-Robotics / train.py View on Github external
def test(self, img, need_rotate_angle=270, num_random_crops=20):
        if img.shape[2] != 3:
            print ("We only accept 3-dimensional rgb images")
        if img.shape[0] > img.shape[1]:
            img = imutils.rotate(img, need_rotate_angle)
            img = imutils.resize(img , height=256)

        input_size = self.network_input_size # 224 here
        input_batch = np.zeros((num_random_crops,input_size,input_size,3))
        if num_random_crops == 1:
            rand_range = [img.shape[0]-input_size, img.shape[1]-input_size] # height, width
            for index in range(num_random_crops):
                h = np.random.randint(rand_range[0])
                w = np.random.randint(rand_range[1])
                input_batch[index, :] = img[h:h+input_size, w:w+input_size, :]
                t_r_output = self.sess.run([self.regression_out],
                                   feed_dict={self.image_inputs: input_batch})
            return np.mean(t_r_output, axis=0)
        else:
            tf_output = self.sess.run([self.regression_out],
                                      feed_dict={self.image_inputs: gen_data.centeredCrop(img, input_size)} )
github PoseNet-Mobile-Robot / Mobile-Robotics / gen_data_cam.py View on Github external
def preprocess(images):
    images_out = [] #final result
    #Resize and crop and compute mean!
    images_cropped = []
    for i in tqdm(range(len(images))):
        X = cv2.imread(images[i])
        if X.shape[0] < X.shape[1]:
            X = imutils.resize(X , height=256)
        else:
            X = imutils.resize(X, width=256)
        X = imutils.rotate(X, angle=270)
        #X = cv2.resize(X, (455, 256))
        X = centeredCrop(X, 224)
        images_cropped.append(X)
    #compute images mean
    N = 0
    mean = np.zeros((1, 3, 224, 224))
    for X in tqdm(images_cropped):
        mean[0][0] += X[:,:,0]
        mean[0][1] += X[:,:,1]
        mean[0][2] += X[:,:,2]
        N += 1
    mean[0] /= N
    #Subtract mean from all images
    for X in tqdm(images_cropped):
        X = np.transpose(X,(2,0,1))
        X = X - mean
github damien911224 / theWorldInSafety / actionRecognition / twis / twisSubModelServer.py View on Github external
if end_index % self.print_term == 0:
            with self.print_lock:
                print '{:10s}|{:12s}| Until {:07d}|Delay {:.6f} Seconds'.format('Session', 'Dumping', end_index, self.average_delay)

        index = self.start_index
        for frame in frames:
            file_name = os.path.join(self.image_folder, 'show_{:07d}.jpg'.format(index))
            new_frame = cv2.resize(frame, self.show_size, interpolation=cv2.INTER_AREA)
            if self.is_rotated:
                new_frame = imutils.rotate(new_frame, self.rotating_angle)
            cv2.imwrite(file_name, new_frame)

            file_name = os.path.join(self.image_folder, 'img_{:07d}.jpg'.format(index))
            new_frame = cv2.resize(frame, self.new_size, interpolation=cv2.INTER_AREA)
            if self.is_rotated:
                new_frame = imutils.rotate(new_frame, self.rotating_angle)
            cv2.imwrite(file_name, new_frame)
            index += 1
github fpt-corp / DiRa / Reference / Source code final 2018-2019 / MTA-R4F-Digital_race_2019 / src_25_5_13_56 / mpu / src / Demo.py View on Github external
if x>579:
	x = 579
    if x<20:
	x = 20
    if y>579:
	y = 579
    if y<20:
	y = 20
    HEIGHT = 40
    WIDTH = 40
    car2 = cv2.imread("1.png", 1)
    car = car2.copy()
    car[car2>100]=0
    car[car2<=100]=255
    car = cv2.resize(car, (WIDTH, HEIGHT))
    car = imutils.rotate(car, angle)
    img[x-HEIGHT//2:x+HEIGHT//2, y-WIDTH//2:y+WIDTH//2, :] = car
    return img
github damien911224 / theWorldInSafety / actionRecognition / twis / twisModelServer.py View on Github external
def dumpFrames(self, frames):
        end_index = self.start_index + len(frames) - 1
        if end_index % self.print_term == 0:
            with self.print_lock:
                print '{:10s}|{:12s}| Until {:07d}|Delay {:.6f} Seconds'.format('Session', 'Dumping', end_index, self.average_delay)

        index = self.start_index
        for frame in frames:
            file_name = os.path.join(self.image_folder, 'show_{:07d}.jpg'.format(index))
            new_frame = cv2.resize(frame, self.show_size, interpolation=cv2.INTER_AREA)
            if self.is_rotated:
                new_frame = imutils.rotate(new_frame, self.rotating_angle)
            cv2.imwrite(file_name, new_frame)

            file_name = os.path.join(self.image_folder, 'img_{:07d}.jpg'.format(index))
            new_frame = cv2.resize(frame, self.new_size, interpolation=cv2.INTER_AREA)
            if self.is_rotated:
                new_frame = imutils.rotate(new_frame, self.rotating_angle)
            cv2.imwrite(file_name, new_frame)
            index += 1
github AstronomyLiveYt / SatTraker / SatTrakerBetaV5.py View on Github external
def prepare_img_for_tkinter(self):
        if self.collect_images is True:
            self.imgtk = []
            self.img = []
            ret, self.img = self.cap.read()
            if ret is True:
                if trackSettings.flip == 'VerticalFlip':
                    self.img = cv2.flip(self.img, 0)
                if trackSettings.flip == 'HorizontalFlip':
                    self.img = cv2.flip(self.img, 1)
                if trackSettings.flip == 'VerticalHorizontalFlip':
                    self.img = cv2.flip(self.img, -1)
                self.img = imutils.rotate(self.img, trackSettings.rotate)
                #remember current time of the frame
                self.dnow = datetime.datetime.now()
                self.height, self.width = self.img.shape[:2]
                self.displayimg.bind("", self._on_mousewheel)
                self.displayimg.bind("", self.mouse_position)
                self.displayimg.bind("", self.left_click)
                self.displayimg.bind("", self.right_click)
                self.mousebox = [(int(trackSettings.mousecoords[0]-(trackSettings.boxSize/2)),int(trackSettings.mousecoords[1]-(trackSettings.boxSize/2))),
                    (int(trackSettings.mousecoords[0]+(trackSettings.boxSize/2)),int(trackSettings.mousecoords[1]+(trackSettings.boxSize/2)))]
                self.centerbox = [(int(trackSettings.mainviewX-5),int(trackSettings.mainviewY - 5)),
                    (int(trackSettings.mainviewX+5),int(trackSettings.mainviewY+5))]
#make sure mouse coordinates are within bounds
                for idx, coord in enumerate(self.mousebox):
                    if coord[0] < 0:
                        x = 0
                    elif coord[0] > self.width:
github HaohanWang / PAR_experiments / MNIST_R / mnist_r.py View on Github external
def rotateImg(x, angle):
    x = x.reshape(x.shape[0],28,28)
    rotatedx = []
    for dig in x:
        rotated = imutils.rotate(dig, angle)
        rotatedx.append(rotated.reshape(28*28))
    rox=np.array(rotatedx)
    return rox
github IcedDoggie / Micro-Expression-with-Deep-Learning / augmentation.py View on Github external
def rotation(degree, image):
	# M = cv2.getRotationMatrix2D((image.shape[0]/2, image.shape[1]/2), degree, 1 )
	# images_aug = cv2.warpAffine(image, M, (image.shape[0], image.shape[1]))
	images_aug = imutils.rotate(image, degree)
	images_aug = cv2.cvtColor(images_aug, cv2.COLOR_BGR2GRAY)


	return images_aug

imutils

A series of convenience functions to make basic image processing functions such as translation, rotation, resizing, skeletonization, displaying Matplotlib images, sorting contours, detecting edges, and much more easier with OpenCV and both Python 2.7 and Python 3.

MIT
Latest version published 4 years ago

Package Health Score

64 / 100
Full package analysis