Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def transform(self, image, mask):
aug = Compose([
HorizontalFlip(p=0.9),
RandomBrightness(p=.5,limit=0.3),
RandomContrast(p=.5,limit=0.3),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=20,
p=0.7, border_mode=0, interpolation=4)
])
augmented = aug(image=image, mask=mask)
return augmented['image'], augmented['mask']
def get_light_augmentations(image_size):
return A.Compose([
A.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.1,
rotate_limit=15,
border_mode=cv2.BORDER_CONSTANT, value=0),
A.RandomSizedCrop(min_max_height=(int(image_size[0] * 0.85), image_size[0]),
height=image_size[0],
width=image_size[1], p=0.3),
# Brightness/contrast augmentations
A.OneOf([
A.RandomBrightnessContrast(brightness_limit=0.25,
contrast_limit=0.2),
IndependentRandomBrightnessContrast(brightness_limit=0.1,
contrast_limit=0.1),
A.RandomGamma(gamma_limit=(75, 125)),
A.NoOp()
]),
A.HorizontalFlip(p=0.5),
])
normalization = albu.Normalize(mean=mean, std=std, p=1)
train_augmentations = albu.Compose(
[
albu.RandomSizedCrop(
min_max_height=(
int(0.5 * (train_parameters["height_crop_size"])),
int(2 * (train_parameters["height_crop_size"])),
),
height=train_parameters["height_crop_size"],
width=train_parameters["width_crop_size"],
w2h_ratio=1.0,
p=1,
),
albu.ShiftScaleRotate(
border_mode=cv2.BORDER_CONSTANT, rotate_limit=10, scale_limit=0, p=0.5, mask_value=ignore_index
),
albu.RandomBrightnessContrast(p=0.5),
albu.RandomGamma(p=0.5),
albu.ImageCompression(quality_lower=20, quality_upper=100, p=0.5),
albu.GaussNoise(p=0.5),
albu.Blur(p=0.5),
albu.CoarseDropout(p=0.5, max_height=26, max_width=16),
albu.OneOf([albu.HueSaturationValue(p=0.5), albu.RGBShift(p=0.5)], p=0.5),
normalization,
],
p=1,
)
val_augmentations = albu.Compose(
[
trnmdf['Sequence2'] = np.log(trnmdf[::-1].groupby(['PatientID']).cumcount() + 1)
tstmdf['Sequence2'] = np.log(tstmdf[::-1].groupby(['PatientID']).cumcount() + 1)
trnseq = trnmdf[['SOPInstanceUID', 'Sequence1', 'Sequence2']]
tstseq = tstmdf[['SOPInstanceUID', 'Sequence1', 'Sequence2']]
trnseq.columns = tstseq.columns = ['Image', 'Sequence1', 'Sequence2']
trndf = trndf.merge(trnseq, on='Image').sort_index()
valdf = valdf.merge(trnseq, on='Image').sort_index()
test = test.merge(tstseq, on='Image').sort_index()
# Data loaders
transform_train = Compose([
HorizontalFlip(p=0.5),
ShiftScaleRotate(shift_limit=0.1, scale_limit=0.01,
rotate_limit=30, p=0.7, border_mode = cv2.BORDER_REPLICATE),
ToTensor()
])
transform_test= Compose([
ToTensor()
])
trndataset = IntracranialDataset(trndf, path=dir_train_img, transform=transform_train, labels=True)
valdataset = IntracranialDataset(valdf, path=dir_train_img, transform=transform_test, labels=False)
tstdataset = IntracranialDataset(test, path=dir_test_img, transform=transform_test, labels=False)
num_workers = 16
trnloader = DataLoader(trndataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
valloader = DataLoader(valdataset, batch_size=batch_size*4, shuffle=False, num_workers=num_workers)
tstloader = DataLoader(tstdataset, batch_size=batch_size*4, shuffle=False, num_workers=num_workers)
def hard_augmentations():
return A.Compose(
[
A.RandomRotate90(),
A.Transpose(),
A.RandomGridShuffle(),
A.ShiftScaleRotate(
scale_limit=0.1, rotate_limit=45, border_mode=cv2.BORDER_CONSTANT, mask_value=0, value=0
),
A.ElasticTransform(border_mode=cv2.BORDER_CONSTANT, alpha_affine=5, mask_value=0, value=0),
# Add occasion blur
A.OneOf([A.GaussianBlur(), A.GaussNoise(), A.IAAAdditiveGaussianNoise(), A.NoOp()]),
# D4 Augmentations
A.OneOf([A.CoarseDropout(), A.MaskDropout(max_objects=10), A.NoOp()]),
# Spatial-preserving augmentations:
A.OneOf(
[
A.RandomBrightnessContrast(brightness_by_max=True),
A.CLAHE(),
A.HueSaturationValue(),
A.RGBShift(),
A.RandomGamma(),
A.NoOp(),
im = np.concatenate((i1,i2), 1)
im = Image.fromarray(im)
os.system('pkill eog') #if you use GNOME Viewer
display(im)
im.close()
time.sleep(0.1)
except:
continue
'''
mean_img = [0.22363983, 0.18190407, 0.2523437 ]
std_img = [0.32451536, 0.2956294, 0.31335256]
transform_train = Compose([
HorizontalFlip(p=0.5),
ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05,
rotate_limit=20, p=0.3, border_mode = cv2.BORDER_REPLICATE),
Transpose(p=0.5),
Normalize(mean=mean_img, std=std_img, max_pixel_value=255.0, p=1.0),
ToTensor()
])
from tqdm import tqdm
meanls = []
stdls = []
for tt, imname in enumerate(imgls):
img = cv2.imread(imname)
try:
img = autocropmin(img, threshold=0)
except:
try:
def get_augmentations(augmentation, p):
if augmentation == 'valid':
augmentations = Compose([
HorizontalFlip(p=0.5),
RandomBrightness(p=0.2, limit=0.2),
RandomContrast(p=0.1, limit=0.2),
ShiftScaleRotate(shift_limit=0.1625, scale_limit=0.6, rotate_limit=0, p=0.7)
], p=p)
else:
raise ValueError("Unknown Augmentations")
return augmentations
normalization = albu.Normalize(mean=mean, std=std, p=1)
train_augmentations = albu.Compose(
[
albu.RandomSizedCrop(
min_max_height=(
int(0.5 * (train_parameters["height_crop_size"])),
int(2 * (train_parameters["height_crop_size"])),
),
height=train_parameters["height_crop_size"],
width=train_parameters["width_crop_size"],
w2h_ratio=1.0,
p=1,
),
albu.ShiftScaleRotate(
border_mode=cv2.BORDER_CONSTANT, rotate_limit=10, scale_limit=0, p=0.5, mask_value=ignore_index
),
albu.RandomBrightnessContrast(p=0.5),
albu.RandomGamma(p=0.5),
albu.ImageCompression(quality_lower=20, quality_upper=100, p=0.5),
albu.GaussNoise(p=0.5),
albu.Blur(p=0.5),
albu.CoarseDropout(p=0.5, max_height=26, max_width=16),
albu.OneOf([albu.HueSaturationValue(p=0.5), albu.RGBShift(p=0.5)], p=0.5),
normalization,
],
p=1,
)
val_augmentations = albu.Compose(
[
def get_augumentation(phase, width=512, height=512, min_area=0., min_visibility=0.):
list_transforms = []
if phase == 'train':
list_transforms.extend([
albu.OneOf([
albu.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.1,
rotate_limit=15,
border_mode=cv2.BORDER_CONSTANT, value=0),
albu.NoOp()
]),
albu.augmentations.transforms.RandomResizedCrop(
height=height,
width=width, p=1.0),
albu.OneOf([
albu.RandomBrightnessContrast(brightness_limit=0.5,
contrast_limit=0.4),
albu.RandomGamma(gamma_limit=(50, 150)),
albu.NoOp()
]),
albu.OneOf([
albu.RGBShift(r_shift_limit=20, b_shift_limit=15,
g_shift_limit=15),
normalization = albu.Normalize(mean=mean, std=std, p=1)
train_augmentations = albu.Compose(
[
albu.RandomSizedCrop(
min_max_height=(
int(0.5 * (train_parameters["height_crop_size"])),
int(2 * (train_parameters["height_crop_size"])),
),
height=train_parameters["height_crop_size"],
width=train_parameters["width_crop_size"],
w2h_ratio=1.0,
p=1,
),
albu.ShiftScaleRotate(
border_mode=cv2.BORDER_CONSTANT, rotate_limit=10, scale_limit=0, p=0.5, mask_value=ignore_index
),
albu.HorizontalFlip(p=0.5),
normalization,
],
p=1,
)
val_augmentations = albu.Compose(
[
albu.PadIfNeeded(
min_height=1024, min_width=2048, border_mode=cv2.BORDER_CONSTANT, mask_value=ignore_index, p=1
),
normalization,
],
p=1,