How to use the skimage.transform function in skimage

To help you get started, we’ve selected a few skimage examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github DaFun / Image-Enhancement / web_serving / inference.py View on Github external
im_input = self.preprocess(data)
        # im_input = cv2.imdecode(img, -1)  # -1 means read as is, no conversions.
        if im_input.shape[2] == 4:
            im_input = im_input[:, :, :3]

        im_input = np.flip(im_input, 2)  # OpenCV reads BGR, convert back to RGB.

        if im_input.dtype == np.uint16 and self.dir == 'hdr':
            # im_input = im_input / 32767.0
            # im_input = im_input / 32767.0 /2
            # im_input = im_input / (1.0*2**16)
            im_input = skimage.img_as_float(im_input)
        else:
            im_input = skimage.img_as_float(im_input)

        lowres_input = skimage.transform.resize(im_input, [256, 256], order=0)
        im_input = im_input[np.newaxis, :, :, :]
        lowres_input = lowres_input[np.newaxis, :, :, :]


        fullres = self.graph.get_tensor_by_name('import/fullres_input:0')
        lowres = self.graph.get_tensor_by_name('import/lowres_input:0')
        out = self.graph.get_tensor_by_name('import/output_img:0')

        feed_dict = {
            fullres: im_input,
            lowres: lowres_input
        }

        y_out = self.sess.run(out, feed_dict=feed_dict)

        img = Image.fromarray(y_out, 'RGB')
github pranv / ARC / image_augmenter.py View on Github external
scale_y = random.uniform(scale_y_min, scale_y_max)
        if not scale_axis_equally:
            scale_y = random.uniform(scale_y_min, scale_y_max)
        else:
            scale_y = scale_x
        rotation = np.deg2rad(random.randint(rotation_deg_min, rotation_deg_max))
        shear = np.deg2rad(random.randint(shear_deg_min, shear_deg_max))
        translation_x = random.randint(translation_x_px_min, translation_x_px_max)
        translation_y = random.randint(translation_y_px_min, translation_y_px_max)

        # create three affine transformation matrices
        # 1st one moves the image to the top left, 2nd one transforms it, 3rd one
        # moves it back to the center.
        # The movement is neccessary, because rotation is applied to the top left
        # and not to the image's center (same for scaling and shear).
        matrix_to_topleft = tf.SimilarityTransform(translation=[-shift_x, -shift_y])
        matrix_transforms = tf.AffineTransform(scale=(scale_x, scale_y),
                                               rotation=rotation, shear=shear,
                                               translation=(translation_x,
                                                            translation_y))
        matrix_to_center = tf.SimilarityTransform(translation=[shift_x, shift_y])

        # Combine the three matrices to one affine transformation (one matrix)
        matrix = matrix_to_topleft + matrix_transforms + matrix_to_center

        # one matrix is ready, add it to the result
        result.append(matrix.inverse)

    return result
github Wuziyi616 / CFUN / utils.py View on Github external
Scikit-Image generates warnings on every call to resize() if it doesn't
    receive the right parameters. The right parameters depend on the version
    of skimage. This solves the problem by using different parameters per
    version. And it provides a central place to control resizing defaults.
    """
    if LooseVersion(skimage.__version__) >= LooseVersion("0.14"):
        # New in 0.14: anti_aliasing. Default it to False for backward
        # compatibility with skimage 0.13.
        return skimage.transform.resize(
            image, output_shape,
            order=order, mode=mode, cval=cval, clip=clip,
            preserve_range=preserve_range, anti_aliasing=anti_aliasing,
            anti_aliasing_sigma=anti_aliasing_sigma)
    else:
        return skimage.transform.resize(
            image, output_shape,
            order=order, mode=mode, cval=cval, clip=clip,
            preserve_range=preserve_range)
github deepinsight / insightface / alignment / img_helper.py View on Github external
def transform(data, center, output_size, scale, rotation):
    scale_ratio = float(output_size)/scale
    rot = float(rotation)*np.pi/180.0
    #translation = (output_size/2-center[0]*scale_ratio, output_size/2-center[1]*scale_ratio)
    t1 = stf.SimilarityTransform(scale=scale_ratio)
    cx = center[0]*scale_ratio
    cy = center[1]*scale_ratio
    t2 = stf.SimilarityTransform(translation=(-1*cx, -1*cy))
    t3 = stf.SimilarityTransform(rotation=rot)
    t4 = stf.SimilarityTransform(translation=(output_size/2, output_size/2))
    t = t1+t2+t3+t4
    trans = t.params[0:2]
    #print('M', scale, rotation, trans)
    cropped = cv2.warpAffine(data,trans,(output_size, output_size), borderValue = 0.0)
    return cropped, trans
github mlagunas / material-appearance-similarity / plot_umap.py View on Github external
def _imscatter(x, y, image, color=None, ax=None, zoom=1.):
    """ Auxiliary function to plot an image in the location [x, y]
        image should be an np.array in the form H*W*3 for RGB
    """
    if ax is None:
        ax = plt.gca()
    try:
        image = plt.imread(image)
        size = min(image.shape[0], image.shape[1])
        image = transform.resize(image[:size, :size], (256, 256))
    except TypeError:
        # Likely already an array...
        pass
    im = OffsetImage(image, zoom=zoom)
    x, y = np.atleast_1d(x, y)
    artists = []
    for x0, y0 in zip(x, y):
        edgecolor = dict(boxstyle='round,pad=0.05',
                         edgecolor=color, lw=4) \
            if color is not None else None
        ab = AnnotationBbox(im, (x0, y0),
                            xycoords='data',
                            frameon=False,
                            bboxprops=edgecolor,
                            )
        artists.append(ax.add_artist(ab))
github philkr / voc-classification / src / data.py View on Github external
try:
			from progressbar import ProgressBar, Percentage, Bar, ETA
			progress = ProgressBar(widgets=["Writing %s   "%path.basename(hf5_file), Percentage(), Bar(), ETA()])
		except:
			print("Writing %s"%path.basename(hf5_file))
		
		voc_data = VOCData(voc_dir, image_set)
		for i in progress(range(len(voc_data))):
			lbl, im = voc_data[i]
			if resize is not None:
				from skimage import transform
				try:
					W, H = resize
				except:
					W, H = resize, resize
				im = transform.resize(im, (H,W))
				im = (255*im).astype(np.uint8)
			
			# Read and write the image
			f.create_dataset('/data/%d'%i, data=im[:,:,::-1].transpose((2,0,1)))
			
			# Write classification labels
			f.create_dataset('/cls/%d'%i, data=lbl.astype(np.uint8))
		f.close()
		# Clean up the file
		atexit.register(try_remove, hf5_file)
	fast_hdf5_input_param = dict(source=hf5_file, batch_size=kwargs.get('batch_size', 1), group_name='data')
	data = L.TransformingFastHDF5Input(fast_hdf5_input_param=fast_hdf5_input_param, transform_param = kwargs.get('transform_param', {}))
	fast_hdf5_input_param = dict(source=hf5_file, batch_size=kwargs.get('batch_size', 1), group_name='cls')
	cls  = L.FastHDF5Input(fast_hdf5_input_param=fast_hdf5_input_param)
	return data, cls
github conormdurkan / autoregressive-energy-machines / tensorflow / utils / data_generators_2D.py View on Github external
def create_einstein_data(n, im_path='../img/einstein.jpg'):
    image = imageio.imread(im_path)
    image = color.rgb2gray(image)
    image = transform.resize(image, (512, 512))

    grid = np.array(
        [(x, y) for x in range(image.shape[0]) for y in range(image.shape[1])]
    )

    rotation_matrix = np.array([[0, -1], [1, 0]])
    p = image.reshape(-1) / sum(image.reshape(-1))
    ix = np.random.choice(range(len(grid)), size=n, replace=True, p=p)
    points = grid[ix].astype(np.float32)
    points += np.random.rand(n, 2)  # dequantize
    points /= image.shape[0]  # scale to [0, 1]

    data = (points @ rotation_matrix).astype(np.float32)
    data[:, 1] += 1
    return data
github scikit-image / scikit-image / doc / examples / plot_censure.py View on Github external
The CENSURE feature detector is a scale-invariant center-surround detector
(CENSURE) that claims to outperform other detectors and is capable of real-time
implementation.

"""
from skimage import data
from skimage import transform as tf
from skimage.feature import CENSURE
from skimage.color import rgb2gray
import matplotlib.pyplot as plt


img1 = rgb2gray(data.astronaut())
tform = tf.AffineTransform(scale=(1.5, 1.5), rotation=0.5,
                           translation=(150, -200))
img2 = tf.warp(img1, tform)

detector = CENSURE()

fig, ax = plt.subplots(nrows=1, ncols=2)

plt.gray()

detector.detect(img1)

ax[0].imshow(img1)
ax[0].axis('off')
ax[0].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0],
              2 ** detector.scales, facecolors='none', edgecolors='r')

detector.detect(img2)
github aleju / imgaug / imgaug / augmenters / geometric.py View on Github external
shift_y = height / 2.0 - 0.5
            scale_x, scale_y = scale_samples[0][i], scale_samples[1][i]
            translate_x, translate_y = translate_samples[0][i], translate_samples[1][i]
            if ia.is_single_float(translate_y):
                translate_y_px = int(np.round(translate_y * keypoints_on_image.shape[0]))
            else:
                translate_y_px = translate_y
            if ia.is_single_float(translate_x):
                translate_x_px = int(np.round(translate_x * keypoints_on_image.shape[1]))
            else:
                translate_x_px = translate_x
            rotate = rotate_samples[i]
            shear = shear_samples[i]
            if scale_x != 1.0 or scale_y != 1.0 or translate_x_px != 0 or translate_y_px != 0 or rotate != 0 \
                    or shear != 0:
                matrix_to_topleft = tf.SimilarityTransform(translation=[-shift_x, -shift_y])
                matrix_transforms = tf.AffineTransform(
                    scale=(scale_x, scale_y),
                    translation=(translate_x_px, translate_y_px),
                    rotation=math.radians(rotate),
                    shear=math.radians(shear)
                )
                matrix_to_center = tf.SimilarityTransform(translation=[shift_x, shift_y])
                matrix = (matrix_to_topleft + matrix_transforms + matrix_to_center)
                if self.fit_output:
                    matrix, output_shape = self._tf_to_fit_output(keypoints_on_image.shape, matrix)
                else:
                    output_shape = keypoints_on_image.shape

                coords = keypoints_on_image.get_coords_array()
                coords_aug = tf.matrix_transform(coords, matrix.params)
                result.append(ia.KeypointsOnImage.from_coords_array(coords_aug, shape=output_shape))
github TenteEEEE / quiche_pantie_patch / src / models / caronauff.py View on Github external
patch = affine_transform_by_arr(patch, arrx, arry)
        pantie[-250:, 485:, :] = 0

        # Affine transform matrix for whole image
        arrx = np.zeros(100)
        arrx[10:] += np.linspace(0, 1, 90)**2 * 195
        arrx[50:80] += np.sin(np.linspace(0, 1 * np.pi, 30)) * -20
        arry = np.zeros(100)
        arry[20:80] += np.sin(np.linspace(0, 1 * np.pi, 60) + np.pi / 2) * -70
        arrx -= 110
        pantie = affine_transform_by_arr(pantie, arrx, arry, smoothx=True, mvx=20, smoothy=True, mvy=20)
        pantie = pantie[10:,27:-30]
        
        # paste patch
        patch = skt.rotate(patch, 90)[:, 68:155]
        patch = skt.resize(patch[:, :, :], (150, 80), anti_aliasing=True, mode='reflect')
        pantie[240:240 + patch.shape[0], :patch.shape[1], :] = patch
        pantie = resize(pantie, [1.04, 0.77])
        pantie = np.rot90(pantie, -1)
        pantie = np.uint8(pantie*255)
        pantie = np.bitwise_and(pantie, self.mask)
        pantie = np.concatenate((pantie[::-1],pantie),axis=0)
        return Image.fromarray(pantie)