How to use the skimage.transform.warp function in skimage

To help you get started, we’ve selected a few skimage examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github zsdonghao / text-to-image / tensorlayer / prepro.py View on Github external
References
    -----------
    - `scikit-image : geometric transformations `_
    - `scikit-image : examples `_
    """
    if type(src) is list:   # convert to numpy
        src = np.array(src)
    if type(dst) is list:
        dst = np.array(dst)
    if np.max(x)>1:         # convert to [0, 1]
        x = x/255

    m = transform.ProjectiveTransform()
    m.estimate(dst, src)
    warped = transform.warp(x, m,  map_args=map_args, output_shape=output_shape, order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range)
    return warped
github sshuair / torchsat / torchvision_multi / transform_multi.py View on Github external
else:
                dst_rows[i, j] = src_rows[i, j] + np.random.uniform(-1, 1) * warp_up_down

    dst = np.dstack([dst_cols.flat, dst_rows.flat])[0]

    # dst_rows_new = np.ndarray.transpose(dst_rows)
    # dst_cols_new = np.ndarray.transpose(dst_cols)
    # dst_new = np.dstack([dst_cols_new.flat, dst_rows_new.flat])[0]

    out_rows = rows
    out_cols = cols

    tform = transform.PiecewiseAffineTransform()
    tform.estimate(src, dst)

    img_new = transform.warp(image, tform, output_shape=(out_rows, out_cols), order=order, preserve_range=True)

    img_new = img_new.astype(type)
    return img_new
github bolme / pyvision / src / pyvision / types / Affine.py View on Github external
result = Image(mat[:,:self.size[0],:self.size[1]])
            
        elif im_a.getType() == TYPE_OPENCV2:
            # Transform an opencv 2 image
            src = im_a.asOpenCV2()
            import skimage.transform
            dst = skimage.transform.warp(src, self.inverse,output_shape=(self.size[1],self.size[0]))
            dst = 255*dst
            dst = dst.astype(np.uint8)
            result = pv.Image(dst)

        elif im_a.getType() == TYPE_OPENCV2BW:
            # Transform a bw opencv 2 image
            src = im_a.asOpenCV2BW()
            import skimage.transform
            dst = skimage.transform.warp(src, self.inverse,output_shape=(self.size[1],self.size[0]))
            dst = 255*dst
            dst = dst.astype(np.uint8)
            result = pv.Image(dst)

        else:
            raise NotImplementedError("Unhandled image type for affine transform.")

        
        # Check to see if there is an aff_prev list for this object
        if use_orig and hasattr(prev_im,'aff_prev'):
            # Create one if not
            result.aff_prev = copy.copy(prev_im.aff_prev)
        else:
            result.aff_prev = []
            
        # Append the prev image and new transform
github scikit-image / scikit-image / doc / examples / features_detection / plot_corner.py View on Github external
.. [1] https://en.wikipedia.org/wiki/Corner_detection
.. [2] https://en.wikipedia.org/wiki/Interest_point_detection

"""
from matplotlib import pyplot as plt

from skimage import data
from skimage.feature import corner_harris, corner_subpix, corner_peaks
from skimage.transform import warp, AffineTransform
from skimage.draw import ellipse


tform = AffineTransform(scale=(1.3, 1.1), rotation=1, shear=0.7,
                        translation=(210, 50))
image = warp(data.checkerboard(), tform.inverse, output_shape=(350, 350))
rr, cc = ellipse(310, 175, 10, 100)
image[rr, cc] = 1
image[180:230, 10:60] = 1
image[230:280, 60:110] = 1

coords = corner_peaks(corner_harris(image), min_distance=5)
coords_subpix = corner_subpix(image, coords, window_size=13)

fig, ax = plt.subplots()
ax.imshow(image, interpolation='nearest', cmap=plt.cm.gray)
ax.plot(coords[:, 1], coords[:, 0], '.b', markersize=3)
ax.plot(coords_subpix[:, 1], coords_subpix[:, 0], '+r', markersize=15)
ax.axis((0, 350, 350, 0))
plt.show()
github google / makerfaire-booth / 2018 / burger / experimental / dek / template_matching / blob.py View on Github external
keypoints2 = descriptor_extractor.keypoints
        descriptors2 = descriptor_extractor.descriptors

        matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

        src = keypoints2[matches12[:, 1]][:, ::-1]
        dst = keypoints1[matches12[:, 0]][:, ::-1]

        model_robust, inliers = \
            ransac((src, dst), SimilarityTransform,
                   min_samples=4, residual_threshold=2)
        if not model_robust:
            print "bad"
            continue
        img2_transformed = transform.warp(img2, model_robust.inverse, mode='constant', cval=1)
        img1_padded_float = img1_padded.astype(numpy.float64)/255.
        sub = img2_transformed - img1_padded_float
        print compare_ssim(img2_transformed, img1_padded_float, win_size=5, multichannel=True)
        fig, axes = plt.subplots(2, 2, figsize=(7, 6), sharex=True, sharey=True)
        ax = axes.ravel()

        ax[0].imshow(img1_padded_float)
        ax[1].imshow(img2)
        ax[1].set_title("Template image")
        ax[2].imshow(img2_transformed)
        ax[2].set_title("Matched image")
        ax[3].imshow(sub)
        ax[3].set_title("Subtracted image")
        # plt.gray()

        # ax = plt.gca()
github jaeoh2 / Road-Lane-Instance-Segmentation-PyTorch / dataset.py View on Github external
def random_transform(self):
        intensity=self.flags['intensity']
        def _get_delta(intensity):
            delta = np.radians(intensity)
            rand_delta = np.random.uniform(low=-delta, high=delta)
            return rand_delta

        trans_M = AffineTransform(scale=(.9, .9),
                                 translation=(-_get_delta(intensity), _get_delta(intensity)),
                                 shear=_get_delta(intensity))
        self.img = img_as_float32(self.img)
        self.label_img = img_as_float32(self.label_img)
        self.ins_img = img_as_float32(self.ins_img)

        self.img = warp(self.img, trans_M)
        self.label_img = warp(self.label_img, trans_M)
        for i in range(len(self.ins_img)):
            self.ins_img[i] = warp(self.ins_img[i], trans_M)
github Zulko / moviepy / examples / star_worms.py View on Github external
def trapzWarp(pic,cx,cy,ismask=False):
    """ Complicated function (will be latex packaged as a fx) """
    Y,X = pic.shape[:2]
    src = np.array([[0,0],[X,0],[X,Y],[0,Y]])
    dst = np.array([[cx*X,cy*Y],[(1-cx)*X,cy*Y],[X,Y],[0,Y]])
    tform = tf.ProjectiveTransform()
    tform.estimate(src,dst)
    im = tf.warp(pic, tform.inverse, output_shape=(Y,X))
    return im if ismask else (im*255).astype('uint8')
github alphacharlie / mlxd / mlxview.py View on Github external
sleep(0.25)        
        ir_raw = fifo.read()
        ir_trimmed = ir_raw[0:128]
        ir = np.frombuffer(ir_trimmed, np.uint16)
        ir = ir.reshape((16, 4))[::-1, ::-1]
        ir = img_as_float(ir)  
        p2, p98 = np.percentile(ir, (2, 98))
        ir = exposure.rescale_intensity(ir, in_range=(p2, p98))
        ir = exposure.equalize_hist(ir)
        
        cmap = plt.get_cmap('spectral')
        rgba_img = cmap(ir)
        rgb_img = np.delete(rgba_img, 3, 2)    
        # align the IR array with the image
        tform = transform.AffineTransform(scale=SCALE, rotation=ROT, translation=OFFSET)
        ir_aligned = transform.warp(rgb_img, tform.inverse, mode='constant', output_shape=im.shape)
        ir_byte = img_as_ubyte(ir_aligned)

        o.update(np.getbuffer(ir_byte))

    print('Error! Closing...')
    camera.remove_overlay(o)
    fifo.close()
github joseph-zhong / LipReading / src / models / extern / prnet / api.py View on Github external
print('warning: no detected face')
                return None

            d = detected_faces[0].rect ## only use the first detected face (assume that each input image only contains one face)
            left = d.left(); right = d.right(); top = d.top(); bottom = d.bottom()
            old_size = (right - left + bottom - top)/2
            center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 + old_size*0.14])
            size = int(old_size*1.58)

        # crop image
        src_pts = np.array([[center[0]-size/2, center[1]-size/2], [center[0] - size/2, center[1]+size/2], [center[0]+size/2, center[1]-size/2]])
        DST_PTS = np.array([[0,0], [0,self.resolution_inp - 1], [self.resolution_inp - 1, 0]])
        tform = estimate_transform('similarity', src_pts, DST_PTS)
        
        image = image/255.
        cropped_image = warp(image, tform.inverse, output_shape=(self.resolution_inp, self.resolution_inp))

        # run our net
        #st = time()
        cropped_pos = self.net_forward(cropped_image)
        #print 'net time:', time() - st

        # restore 
        cropped_vertices = np.reshape(cropped_pos, [-1, 3]).T
        z = cropped_vertices[2,:].copy()/tform.params[0,0]
        cropped_vertices[2,:] = 1
        vertices = np.dot(np.linalg.inv(tform.params), cropped_vertices)
        vertices = np.vstack((vertices[:2,:], z))
        pos = np.reshape(vertices.T, [self.resolution_op, self.resolution_op, 3])
        
        return pos