How to use the albumentations.Compose function in albumentations

To help you get started, we’ve selected a few albumentations examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github albumentations-team / albumentations / tests / test_serialization.py View on Github external
def test_transform_pipeline_serialization(seed, image, mask):
    aug = A.Compose(
        [
            A.OneOrOther(
                A.Compose(
                    [
                        A.Resize(1024, 1024),
                        A.RandomSizedCrop(min_max_height=(256, 1024), height=512, width=512, p=1),
                        A.OneOf(
                            [
                                A.RandomSizedCrop(min_max_height=(256, 512), height=384, width=384, p=0.5),
                                A.RandomSizedCrop(min_max_height=(256, 512), height=512, width=512, p=0.5),
                            ]
                        ),
                    ]
                ),
                A.Compose(
                    [
                        A.Resize(1024, 1024),
                        A.RandomSizedCrop(min_max_height=(256, 1025), height=256, width=256, p=1),
                        A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1),
github ternaus / iglovikov_segmentation / configs / fpn_resnext101_32x8d_cityscapes_2V100.py View on Github external
pad_factor = 64
imread_library = "cv2"  # can be cv2 or jpeg4py

optimizer = RAdam(
    [
        {"params": model.decoder.parameters(), "lr": train_parameters["lr"]},
        # decrease lr for encoder in order not to permute
        # pre-trained weights with large gradients on training start
        {"params": model.encoder.parameters(), "lr": train_parameters["lr"] / 100},
    ],
    weight_decay=1e-3,
)

normalization = albu.Normalize(mean=mean, std=std, p=1)

train_augmentations = albu.Compose(
    [
        albu.RandomSizedCrop(
            min_max_height=(
                int(0.5 * (train_parameters["height_crop_size"])),
                int(2 * (train_parameters["height_crop_size"])),
            ),
            height=train_parameters["height_crop_size"],
            width=train_parameters["width_crop_size"],
            w2h_ratio=1.0,
            p=1,
        ),
        albu.ShiftScaleRotate(
            border_mode=cv2.BORDER_CONSTANT, rotate_limit=10, scale_limit=0, p=0.5, mask_value=ignore_index
        ),
        albu.RandomBrightnessContrast(p=0.5),
        albu.RandomGamma(p=0.5),
github darraghdog / rsna / scripts / resnext101v5 / trainorig.py View on Github external
n_classes = 6
label_cols = ['epidural', 'intraparenchymal', 'intraventricular', 'subarachnoid', 'subdural', 'any']

train = pd.read_csv(os.path.join(path_data, 'train.csv.gz'))
test = pd.read_csv(os.path.join(path_data, 'test.csv.gz'))
png = glob.glob(os.path.join(dir_train_img, '*.jpg'))
png = [os.path.basename(png)[:-4] for png in png]
png = np.array(png)
train = train.set_index('Image').loc[png].reset_index()

# get fold
valdf = train[train['fold']==fold].reset_index(drop=True)
trndf = train[train['fold']!=fold].reset_index(drop=True)
    
# Data loaders
transform_train = Compose([
    HorizontalFlip(p=0.5),
    ShiftScaleRotate(shift_limit=0.1, scale_limit=0.01, 
                         rotate_limit=30, p=0.7, border_mode = cv2.BORDER_REPLICATE),
    ToTensor()
])


transform_test= Compose([
    ToTensor()
])

trndataset = IntracranialDataset(trndf, path=dir_train_img, transform=transform_train, labels=True)
valdataset = IntracranialDataset(valdf, path=dir_train_img, transform=transform_test, labels=False)
tstdataset = IntracranialDataset(test, path=dir_test_img, transform=transform_test, labels=False)

num_workers = 16
github spytensor / pytorch-image-classification / dataset / augmentations.py View on Github external
def get_test_transform(image_size):
    longest_size = max(image_size[0], image_size[1])
    return A.Compose([
        #Resize(int(config.img_height*1.5),int(config.img_weight*1.5)),
        CenterCrop(config.img_height,config.img_weight),
        A.LongestMaxSize(longest_size, interpolation=cv2.INTER_CUBIC),

        A.PadIfNeeded(image_size[0], image_size[1],
                      border_mode=cv2.BORDER_CONSTANT, value=0),

        A.Normalize(),
        ToTensor()
    ])
github Erlemar / Understanding-Clouds-from-Satellite-Images / augs.py View on Github external
Args:
        image_size:

    Returns:

    """
    train_transform = [
        albu.Resize(*image_size),
        albu.HorizontalFlip(p=0.5),
        albu.ShiftScaleRotate(scale_limit=0.3, rotate_limit=15, shift_limit=0.1, p=0.5, border_mode=0),
        albu.GridDistortion(p=0.5),
        albu.OpticalDistortion(p=0.5, distort_limit=0.1, shift_limit=0.2),
        albu.Blur(),
        albu.RandomBrightnessContrast()
    ]
    return albu.Compose(train_transform)
github darraghdog / rsna / scripts / efficientnetb0v5 / trainorig.py View on Github external
img = augmented['image']   
        if self.labels:
            labels = torch.tensor(
                self.data.loc[idx, label_cols])
            return {'image': img, 'labels': labels}    
        else:      
            return {'image': img}

np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
if n_gpu > 0:
    torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.deterministic = True
# Data loaders
transform_train = Compose([
    ShiftScaleRotate(),
    ToTensor()
])

transform_test= Compose([
    ToTensor()
])
    
logger.info('Load Dataframes')
dir_train_img = os.path.join(path_data, 'stage_1_train_images_jpg')
dir_test_img = os.path.join(path_data, 'stage_1_test_images_jpg')

# Parameters
n_classes = 6
label_cols = ['epidural', 'intraparenchymal', 'intraventricular', 'subarachnoid', 'subdural', 'any']
github catalyst-team / mlcomp / examples / batteries / top / experiment.py View on Github external
def transforms_train():
        return A.Compose([
            A.Resize(height=307, width=950),
            A.RandomSizedCrop(min_max_height=(230, 307), height=256, width=896, w2h_ratio=950 / 307),
            A.HorizontalFlip(),
            # A.JpegCompression(quality_lower=60, quality_upper=100),
            # A.RandomGamma(),
            A.Normalize(mean=(Experiment.mean, Experiment.mean, Experiment.mean), std=(Experiment.std, Experiment.std, Experiment.std)),
            ChannelTranspose()
        ])
github cswin / AWC / src / train.py View on Github external
HueSaturationValue,
    IAAAdditiveGaussianNoise, GaussNoise,
    RandomBrightnessContrast,
    IAASharpen, IAAEmboss
)

from models.unet import UNet
from models.discriminator import FCDiscriminator
from dataset.refuge_Vmiccai import REFUGE
from pytorch_utils import (adjust_learning_rate, adjust_learning_rate_D,
                           calc_mse_loss, Weighted_Jaccard_loss, dice_loss)
from models import optim_weight_ema
from arguments import get_arguments


aug_student = Compose([
    OneOf([
        Transpose(p=0.5),
        HorizontalFlip(p=0.5),
        VerticalFlip(p=0.5),
        RandomRotate90(p=0.5)], p=0.2),

    OneOf([
        IAAAdditiveGaussianNoise(p=0.5),
        GaussNoise(p=0.5),
    ], p=0.2),

    OneOf([
        CLAHE(clip_limit=2),
        IAASharpen(p=0.5),
        IAAEmboss(p=0.5),
        RandomBrightnessContrast(p=0.5),
github mrlzla / kaggle_salt / dataset / albumentations.py View on Github external
def strong_aug(p=1.0):
    return Compose([
        HorizontalFlip(),
        Blur(blur_limit=3),
        #GridDistortion(p=.2),
        #ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.3, rotate_limit=0.0, p=0.7),
        # OneOf([
        #     GridDistortion(p=.2),
        #     ElasticTransform(p=.2)
        # ], p=0.2),
        RandomContrast(p=.5, limit=0.2),
        RandomBrightness(p=.5, limit=0.2),
        RandomGamma(),
         #RandomSizedCrop(p=0.9, min_max_height=(65, 101), height=101, width=101),
    ], p=p)