How to use the fastai.transforms.CoordTransform function in fastai

To help you get started, we’ve selected a few fastai examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github alecrubin / pytorch-serverless / fastai / transforms.py View on Github external
class RandomDihedral(CoordTransform):
	"""
	Rotates images by random multiples of 90 degrees and/or reflection.
	Please reference D8(dihedral group of order eight), the group of all symmetries of the square.
	"""

	def set_state(self):
		self.store.rot_times = random.randint(0, 3)
		self.store.do_flip = random.random() < 0.5

	def do_transform(self, x, is_y):
		x = np.rot90(x, self.store.rot_times)
		return np.fliplr(x).copy() if self.store.do_flip else x


class RandomFlip(CoordTransform):
	def __init__(self, tfm_y=TfmType.NO, p=0.5):
		super().__init__(tfm_y=tfm_y)
		self.p = p

	def set_state(self): self.store.do_flip = random.random() < self.p

	def do_transform(self, x, is_y): return np.fliplr(x).copy() if self.store.do_flip else x


class RandomLighting(Transform):
	def __init__(self, b, c, tfm_y=TfmType.NO):
		super().__init__(tfm_y)
		self.b, self.c = b, c

	def set_state(self):
		self.store.b_rand = rand0(self.b)
github alecrubin / pytorch-serverless / fastai / transforms.py View on Github external
---------
			pad : int
					size of padding on top, bottom, left and right
			mode:
					type of cv2 padding modes. (e.g., constant, reflect, wrap, replicate. etc. )
	"""

	def __init__(self, pad, mode=cv2.BORDER_REFLECT, tfm_y=TfmType.NO):
		super().__init__(tfm_y)
		self.pad, self.mode = pad, mode

	def do_transform(self, im, is_y):
		return cv2.copyMakeBorder(im, self.pad, self.pad, self.pad, self.pad, self.mode)


class CenterCrop(CoordTransform):
	""" A class that represents a Center Crop.

	This transforms (optionally) transforms x,y at with the same parameters.
	Arguments
	---------
			sz: int
					size of the crop.
			tfm_y : TfmType
					type of y transformation.
	"""

	def __init__(self, sz, tfm_y=TfmType.NO, sz_y=None):
		super().__init__(tfm_y)
		self.min_sz, self.sz_y = sz, sz_y

	def do_transform(self, x, is_y):
github alecrubin / pytorch-serverless / fastai / transforms.py View on Github external
self.modes = (mode, cv2.BORDER_CONSTANT)
		else:
			self.modes = (mode, mode)

	def set_state(self):
		self.store.rdeg = rand0(self.deg)
		self.store.rp = random.random() < self.p

	def do_transform(self, x, is_y):
		if self.store.rp: x = rotate_cv(x, self.store.rdeg,
		                                mode=self.modes[1] if is_y else self.modes[0],
		                                interpolation=cv2.INTER_NEAREST if is_y else cv2.INTER_AREA)
		return x


class RandomDihedral(CoordTransform):
	"""
	Rotates images by random multiples of 90 degrees and/or reflection.
	Please reference D8(dihedral group of order eight), the group of all symmetries of the square.
	"""

	def set_state(self):
		self.store.rot_times = random.randint(0, 3)
		self.store.do_flip = random.random() < 0.5

	def do_transform(self, x, is_y):
		x = np.rot90(x, self.store.rot_times)
		return np.fliplr(x).copy() if self.store.do_flip else x


class RandomFlip(CoordTransform):
	def __init__(self, tfm_y=TfmType.NO, p=0.5):
github alecrubin / pytorch-serverless / fastai / transforms.py View on Github external
self.b, self.c = b, c

	def set_state(self):
		self.store.b_rand = rand0(self.b)
		self.store.c_rand = rand0(self.c)

	def do_transform(self, x, is_y):
		if is_y and self.tfm_y != TfmType.PIXEL: return x
		b = self.store.b_rand
		c = self.store.c_rand
		c = -1/(c-1) if c < 0 else c+1
		x = lighting(x, b, c)
		return x


class RandomRotateZoom(CoordTransform):
	"""
			Selects between a rotate, zoom, stretch, or no transform.
			Arguments:
					deg - maximum degrees of rotation.
					zoom - maximum fraction of zoom.
					stretch - maximum fraction of stretch.
					ps - probabilities for each transform. List of length 4. The order for these probabilities is as listed respectively (4th probability is 'no transform'.
	"""

	def __init__(self, deg, zoom, stretch, ps=None, mode=cv2.BORDER_REFLECT, tfm_y=TfmType.NO):
		super().__init__(tfm_y)
		if ps is None: ps = [0.25, 0.25, 0.25, 0.25]
		assert len(ps) == 4, 'does not have 4 probabilities for p, it has %d'%len(ps)
		self.transforms = RandomRotate(deg, p=1, mode=mode, tfm_y=tfm_y), RandomZoom(zoom, tfm_y=tfm_y), RandomStretch(
				stretch, tfm_y=tfm_y)
		self.pass_t = PassThru()
github alecrubin / pytorch-serverless / fastai / transforms.py View on Github external
tfm_y: TfmType
					type of y transformation.
	"""

	def __init__(self, sz, tfm_y=TfmType.NO, sz_y=None):
		super().__init__(tfm_y)
		self.sz, self.sz_y = sz, sz_y

	def do_transform(self, x, is_y):
		if is_y:
			return scale_min(x, self.sz_y, cv2.INTER_AREA if self.tfm_y == TfmType.PIXEL else cv2.INTER_NEAREST)
		else:
			return scale_min(x, self.sz, cv2.INTER_AREA)


class RandomScale(CoordTransform):
	""" Scales an image so that the min size is a random number between [sz, sz*max_zoom]

	This transforms (optionally) scales x,y at with the same parameters.
	Arguments:
			sz: int
					target size
			max_zoom: float
					float >= 1.0
			p : float
					a probability for doing the random sizing
			tfm_y: TfmType
					type of y transform
	"""

	def __init__(self, sz, max_zoom, p=0.75, tfm_y=TfmType.NO, sz_y=None):
		super().__init__(tfm_y)
github alecrubin / pytorch-serverless / fastai / transforms.py View on Github external
self.store.kernel = (kernel_size, kernel_size)

	def do_transform(self, x, is_y):
		return cv2.GaussianBlur(src=x, ksize=self.store.kernel, sigmaX=0) if self.apply_transform else x


class Cutout(Transform):
	def __init__(self, n_holes, length, tfm_y=TfmType.NO):
		super().__init__(tfm_y)
		self.n_holes, self.length = n_holes, length

	def do_transform(self, img, is_y):
		return cutout(img, self.n_holes, self.length)


class GoogleNetResize(CoordTransform):
	""" Randomly crops an image with an aspect ratio and returns a squared resized image of size targ

	Arguments:
			targ_sz: int
					target size
			min_area_frac: float < 1.0
					minimum area of the original image for cropping
			min_aspect_ratio : float
					minimum aspect ratio
			max_aspect_ratio : float
					maximum aspect ratio
			flip_hw_p : float
					probability for flipping magnitudes of height and width
			tfm_y: TfmType
					type of y transform
	"""
github alecrubin / pytorch-serverless / fastai / transforms.py View on Github external
---------
			sz: int
					size of the crop.
			tfm_y : TfmType
					type of y transformation.
	"""

	def __init__(self, sz, tfm_y=TfmType.NO, sz_y=None):
		super().__init__(tfm_y)
		self.min_sz, self.sz_y = sz, sz_y

	def do_transform(self, x, is_y):
		return center_crop(x, self.sz_y if is_y else self.min_sz)


class RandomCrop(CoordTransform):
	""" A class that represents a Random Crop transformation.

	This transforms (optionally) transforms x,y at with the same parameters.
	Arguments
	---------
			targ: int
					target size of the crop.
			tfm_y: TfmType
					type of y transformation.
	"""

	def __init__(self, targ_sz, tfm_y=TfmType.NO, sz_y=None):
		super().__init__(tfm_y)
		self.targ_sz, self.sz_y = targ_sz, sz_y

	def set_state(self):
github alecrubin / pytorch-serverless / fastai / transforms.py View on Github external
assert self.cum_ps[3] == 1, 'probabilites do not sum to 1; they sum to %d'%self.cum_ps[3]

	def set_state(self):
		self.store.trans = self.pass_t
		self.store.choice = self.cum_ps[3]*random.random()
		for i in range(len(self.transforms)):
			if self.store.choice < self.cum_ps[i]:
				self.store.trans = self.transforms[i]
				break
		self.store.trans.set_state()

	def do_transform(self, x, is_y):
		return self.store.trans.do_transform(x, is_y)


class RandomZoom(CoordTransform):
	def __init__(self, zoom_max, zoom_min=0, mode=cv2.BORDER_REFLECT, tfm_y=TfmType.NO):
		super().__init__(tfm_y)
		self.zoom_max, self.zoom_min = zoom_max, zoom_min

	def set_state(self):
		self.store.zoom = self.zoom_min+(self.zoom_max-self.zoom_min)*random.random()

	def do_transform(self, x, is_y):
		return zoom_cv(x, self.store.zoom)


class RandomStretch(CoordTransform):
	def __init__(self, max_stretch, tfm_y=TfmType.NO):
		super().__init__(tfm_y)
		self.max_stretch = max_stretch
github alecrubin / pytorch-serverless / fastai / transforms.py View on Github external
super().__init__(tfm_y)
		self.max_stretch = max_stretch

	def set_state(self):
		self.store.stretch = self.max_stretch*random.random()
		self.store.stretch_dir = random.randint(0, 1)

	def do_transform(self, x, is_y):
		if self.store.stretch_dir == 0:
			x = stretch_cv(x, self.store.stretch, 0)
		else:
			x = stretch_cv(x, 0, self.store.stretch)
		return x


class PassThru(CoordTransform):
	def do_transform(self, x, is_y):
		return x


class RandomBlur(Transform):
	"""
	Adds a gaussian blur to the image at chance.
	Multiple blur strengths can be configured, one of them is used by random chance.
	"""

	def __init__(self, blur_strengths=5, probability=0.5, tfm_y=TfmType.NO):
		# Blur strength must be an odd number, because it is used as a kernel size.
		super().__init__(tfm_y)
		self.blur_strengths = (np.array(blur_strengths, ndmin=1)*2)-1
		if np.any(self.blur_strengths < 0):
			raise ValueError("all blur_strengths must be > 0")
github alecrubin / pytorch-serverless / fastai / transforms.py View on Github external
y1[y[0]:y[2], y[1]:y[3]] = 1.
		return y1

	def map_y(self, y0, x):
		y = CoordTransform.make_square(y0, x)
		y_tr = self.do_transform(y, True)
		return to_bb(y_tr)

	def transform_coord(self, x, ys):
		yp = partition(ys, 4)
		y2 = [self.map_y(y, x) for y in yp]
		x = self.do_transform(x, False)
		return x, np.concatenate(y2)


class AddPadding(CoordTransform):
	""" A class that represents adding paddings to an image.

	The default padding is border_reflect
	Arguments
	---------
			pad : int
					size of padding on top, bottom, left and right
			mode:
					type of cv2 padding modes. (e.g., constant, reflect, wrap, replicate. etc. )
	"""

	def __init__(self, pad, mode=cv2.BORDER_REFLECT, tfm_y=TfmType.NO):
		super().__init__(tfm_y)
		self.pad, self.mode = pad, mode

	def do_transform(self, im, is_y):