How to use the albumentations.HorizontalFlip function in albumentations

To help you get started, we’ve selected a few albumentations examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github lyakaap / Landmark2019-1st-and-3rd-Place-Solution / experiments / v7.py View on Github external
def build_transforms(mean=(0.485, 0.456, 0.406),
                     std=(0.229, 0.224, 0.225),
                     divide_by=255.0,
                     scale_range=0.1,
                     brightness_range=0.1):

    from src import torch_custom

    # CSAIL ResNet
    # norm = Normalize(mean=(102.9801, 115.9465, 122.7717), std=(1., 1., 1.), max_pixel_value=1, p=1.0)
    norm = Normalize(mean=mean, std=std, max_pixel_value=divide_by)

    train_transform = Compose([
        torch_custom.RandomCropThenScaleToOriginalSize(limit=scale_range, p=1.0),
        RandomBrightness(limit=brightness_range, p=0.5),
        HorizontalFlip(p=0.5),
        norm,
    ])
    eval_transform = Compose([
        norm,
    ])

    return train_transform, eval_transform
github Erlemar / Understanding-Clouds-from-Satellite-Images / augs.py View on Github external
def get_training_augmentation1(image_size: tuple = (320, 640)):
    """

    Args:
        image_size:

    Returns:

    """
    train_transform = [
        albu.HorizontalFlip(p=0.5),
        albu.ShiftScaleRotate(scale_limit=0.3, rotate_limit=15, shift_limit=0.1, p=0.5, border_mode=0),
        albu.GridDistortion(p=0.5),
        albu.OpticalDistortion(p=0.5, distort_limit=0.1, shift_limit=0.2),
        albu.Resize(*image_size),
    ]
    return albu.Compose(train_transform)
github nyoki-mtl / pytorch-segmentation / src / train.py View on Github external
with open(log_dir.joinpath('history.pkl'), 'rb') as f:
        history_dict = pickle.load(f)
        best_metrics = history_dict['best_metrics']
        loss_history = history_dict['loss']
        iou_history = history_dict['iou']
        start_epoch = len(iou_history)
        for _ in range(start_epoch):
            scheduler.step()
else:
    start_epoch = 0
    best_metrics = 0
    loss_history = []
    iou_history = []

# Dataset
affine_augmenter = albu.Compose([albu.HorizontalFlip(p=.5),
                                 # Rotate(5, p=.5)
                                 ])
# image_augmenter = albu.Compose([albu.GaussNoise(p=.5),
#                                 albu.RandomBrightnessContrast(p=.5)])
image_augmenter = None
train_dataset = Dataset(affine_augmenter=affine_augmenter, image_augmenter=image_augmenter,
                        net_type=net_type, **data_config)
valid_dataset = Dataset(split='valid', net_type=net_type, **data_config)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4,
                          pin_memory=True, drop_last=True)
valid_loader = DataLoader(valid_dataset, batch_size=1, shuffle=False, num_workers=4, pin_memory=True)

# To device
model = model.to(device)

# Pretrained model
github n01z3 / kaggle-pneumothorax-segmentation / legacy / predict_fold.py View on Github external
sublist = []
			for index, row in tqdm.tqdm(sample_df.iterrows(), total=len(sample_df)):
				image_id = row['ImageId']
				if image_id in masks_:
					tta_img = []
					tta_preds = []
					img_path = os.path.join('input/test_png', image_id + '.png')

					img = imread(img_path)
					# print ("data1: ",img.shape)
					width, height = img.shape[0], img.shape[1]
					img = imresize(img, (IMG_SIZE, IMG_SIZE), interp='bilinear')
					# print ("data2: ",img.shape)


					aug_HFlip = albumentations.HorizontalFlip(p=1)
					# aug_Rot90 = albumentations.Transpose(p=1)
					augmented1 = aug_HFlip(image=img)
					# augmented2 = aug_Rot90(image=img)

					img1 = augmented1['image']
					# img2 = augmented2['image']

					tta_img.append(img)
					tta_img.append(img1)
					# tta_img.append(img2)
					inx = 0
					# print ("data2: ",img.shape)
					for img in tta_img:
						img = img[np.newaxis,np.newaxis,:,:]
						img = torch.FloatTensor(img)
github Diyago / Severstal-Steel-Defect-Detection / logs / exp 1 val dice 0.75047 LB 0.90386 / code / common_blocks / dataloader.py View on Github external
def get_transforms(phase, mean, std):
    list_transforms = []
    if phase == "train":
        list_transforms.extend(
            [   #CropNonEmptyMaskIfExists(crop_image_size, crop_image_size),
                HorizontalFlip(p=0.5),  # only horizontal flip as of now
            ]
        )
    list_transforms.extend(
        [
            Normalize(mean=mean, std=std, p=1),
            ToTensor(),
        ]
    )
    list_trfms = Compose(list_transforms)
    return list_trfms
github BloodAxe / Catalyst-Inria-Segmentation-Example / inria / augmentations.py View on Github external
def safe_augmentations():
    return A.Compose([A.HorizontalFlip(), A.RandomBrightnessContrast(), A.Normalize()])
github ternaus / iglovikov_segmentation / configs / fpn_resnext50_32x4d_cityscapes_2gpu_a.py View on Github external
normalization = albu.Normalize(mean=mean, std=std, p=1)

train_augmentations = albu.Compose(
    [
        albu.RandomSizedCrop(
            min_max_height=(
                int(0.5 * (train_parameters["height_crop_size"])),
                int(2 * (train_parameters["height_crop_size"])),
            ),
            height=train_parameters["height_crop_size"],
            width=train_parameters["width_crop_size"],
            w2h_ratio=1.0,
            p=1,
        ),
        albu.HorizontalFlip(p=0.5),
        normalization,
    ],
    p=1,
)

val_augmentations = albu.Compose(
    [
        albu.PadIfNeeded(
            min_height=1024, min_width=2048, border_mode=cv2.BORDER_CONSTANT, mask_value=ignore_index, p=1
        ),
        normalization,
    ],
    p=1,
)

test_augmentations = albu.Compose([normalization], p=1)
github microsoft / seismic-deeplearning / experiments / interpretation / dutchf3_patch / train.py View on Github external
value=0,
            ),
            Resize(
                config.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT, config.TRAIN.AUGMENTATIONS.RESIZE.WIDTH, always_apply=True,
            ),
            PadIfNeeded(
                min_height=config.TRAIN.AUGMENTATIONS.PAD.HEIGHT,
                min_width=config.TRAIN.AUGMENTATIONS.PAD.WIDTH,
                border_mode=config.OPENCV_BORDER_CONSTANT,
                always_apply=True,
                mask_value=255,
            ),
        ]
    )
    if config.TRAIN.AUGMENTATION:
        train_aug = Compose([basic_aug, HorizontalFlip(p=0.5)])
        val_aug = basic_aug
    else:
        train_aug = val_aug = basic_aug

    # Training and Validation Loaders:
    TrainPatchLoader = get_patch_loader(config)
    logging.info(f"Using {TrainPatchLoader}")

    train_set = TrainPatchLoader(config, split="train", is_transform=True, augmentations=train_aug, debug=debug,)
    logger.info(train_set)

    n_classes = train_set.n_classes
    val_set = TrainPatchLoader(config, split="val", is_transform=True, augmentations=val_aug, debug=debug,)

    logger.info(val_set)
github darraghdog / rsna / scripts / resnext101v13 / trainorig.py View on Github external
png = glob.glob(os.path.join(dir_train_img1, '*.jpg'))
png = [os.path.basename(png)[:-4] for png in png]
png = np.array(png)
train = train.set_index('Image').loc[png].reset_index()

# get fold
valdf = train[train['fold']==fold].reset_index(drop=True)
trndf = train[train['fold']!=fold].reset_index(drop=True)

# Data loaders
mean_img = [0.22363983, 0.18190407, 0.2523437 ]
std_img = [0.32451536, 0.2956294,  0.31335256]
transform_train = Compose([
    #ShiftScaleRotate(),
    #CenterCrop(height = SIZE//10, width = SIZE//10, p=0.3),
    HorizontalFlip(p=0.5),
    ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, 
                         rotate_limit=20, p=0.3, border_mode = cv2.BORDER_REPLICATE),
    Transpose(p=0.5),
    Normalize(mean=mean_img, std=std_img, max_pixel_value=255.0, p=1.0),
    ToTensor()
])

HFLIPVAL = 1.0 if HFLIP == 'T' else 0.0
TRANSPOSEVAL = 1.0 if TRANSPOSE == 'P' else 0.0
transform_test= Compose([
    HorizontalFlip(p=HFLIPVAL),
    Transpose(p=TRANSPOSEVAL),
    Normalize(mean=mean_img, std=std_img, max_pixel_value=255.0, p=1.0),
    ToTensor()
])
github darraghdog / rsna / scripts / trainorig.py View on Github external
png = [os.path.basename(png)[:-4] for png in png]
train_imgs = set(train.Image.tolist())
png = [p for p in png if p in train_imgs]
logger.info('Number of images to train on {}'.format(len(png)))
png = np.array(png)
train = train.set_index('Image').loc[png].reset_index()

# get fold
valdf = train[train['fold']==fold].reset_index(drop=True)
trndf = train[train['fold']!=fold].reset_index(drop=True)

# Data loaders
mean_img = [0.22363983, 0.18190407, 0.2523437 ]
std_img = [0.32451536, 0.2956294,  0.31335256]
transform_train = Compose([
    HorizontalFlip(p=0.5),
    ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, 
                         rotate_limit=20, p=0.3, border_mode = cv2.BORDER_REPLICATE),
    Transpose(p=0.5),
    Normalize(mean=mean_img, std=std_img, max_pixel_value=255.0, p=1.0),
    ToTensor()
])

HFLIPVAL = 1.0 if HFLIP == 'T' else 0.0
TRANSPOSEVAL = 1.0 if TRANSPOSE == 'P' else 0.0
transform_test= Compose([
    HorizontalFlip(p=HFLIPVAL),
    Transpose(p=TRANSPOSEVAL),
    Normalize(mean=mean_img, std=std_img, max_pixel_value=255.0, p=1.0),
    ToTensor()
])