How to use the albumentations.RandomBrightnessContrast function in albumentations

To help you get started, we’ve selected a few albumentations examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github albumentations-team / albumentations / tests / test_serialization.py View on Github external
def test_transform_pipeline_serialization_with_keypoints(seed, image, keypoints, keypoint_format, labels):
    aug = A.Compose(
        [
            A.OneOrOther(
                A.Compose([A.RandomRotate90(), A.OneOf([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5)])]),
                A.Compose([A.Rotate(p=0.5), A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1)]),
            ),
            A.HorizontalFlip(p=1),
            A.RandomBrightnessContrast(p=0.5),
        ],
        keypoint_params={"format": keypoint_format, "label_fields": ["labels"]},
    )
    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug)
    set_seed(seed)
    aug_data = aug(image=image, keypoints=keypoints, labels=labels)
    set_seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, keypoints=keypoints, labels=labels)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["keypoints"], deserialized_aug_data["keypoints"])
github albumentations-team / albumentations / tests / test_serialization.py View on Github external
def test_transform_pipeline_serialization_with_bboxes(seed, image, bboxes, bbox_format, labels):
    aug = A.Compose(
        [
            A.OneOrOther(
                A.Compose([A.RandomRotate90(), A.OneOf([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5)])]),
                A.Compose([A.Rotate(p=0.5), A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1)]),
            ),
            A.HorizontalFlip(p=1),
            A.RandomBrightnessContrast(p=0.5),
        ],
        bbox_params={"format": bbox_format, "label_fields": ["labels"]},
    )
    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug)
    set_seed(seed)
    aug_data = aug(image=image, bboxes=bboxes, labels=labels)
    set_seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, bboxes=bboxes, labels=labels)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["bboxes"], deserialized_aug_data["bboxes"])
github albumentations-team / albumentations / tests / pytorch / test_image_only_transforms.py View on Github external
                A.RandomBrightnessContrast((1.33, 1.33), (0.77, 0.77), True),
                ATorch.RandomBrightnessContrastTorch((1.33, 1.33), (0.77, 0.77), True),
            ],
            [A.ChannelDropout(), ATorch.ChannelDropoutTorch()],
            [A.RandomGamma(), ATorch.RandomGammaTorch()],
            [A.ChannelShuffle(), ATorch.ChannelShuffleTorch()],
            [A.ToGray(), ATorch.ToGrayTorch()],
            [A.ToFloat(), ATorch.ToFloatTorch()],
            [A.FromFloat("uint8"), ATorch.FromFloatTorch()],
        ],
    ),
)
def test_image_transforms_rgb(images, augs):
    image, image_torch = images
    dtype = image.dtype

    aug_cpu, aug_torch = augs
github TAMU-VITA / DeblurGANv2 / aug.py View on Github external
def get_transforms(size: int, scope: str = 'weak', crop='random'):
    augs = {'strong': albu.Compose([albu.HorizontalFlip(),
                                    albu.ShiftScaleRotate(shift_limit=0.0, scale_limit=0.2, rotate_limit=20, p=.4),
                                    albu.ElasticTransform(),
                                    albu.OpticalDistortion(),
                                    albu.OneOf([
                                        albu.CLAHE(clip_limit=2),
                                        albu.IAASharpen(),
                                        albu.IAAEmboss(),
                                        albu.RandomBrightnessContrast(),
                                        albu.RandomGamma()
                                    ], p=0.5),
                                    albu.OneOf([
                                        albu.RGBShift(),
                                        albu.HueSaturationValue(),
                                    ], p=0.5),
                                    ]),
            'weak': albu.Compose([albu.HorizontalFlip(),
                                  ]),
            }

    aug_fn = augs[scope]
    crop_fn = {'random': albu.RandomCrop(size, size),
               'center': albu.CenterCrop(size, size)}[crop]
    normalize = albu.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
github ternaus / iglovikov_segmentation / configs / fpn_se_resnext101_32x4d_cityscapes_2V100.py View on Github external
train_augmentations = albu.Compose(
    [
        albu.RandomSizedCrop(
            min_max_height=(
                int(0.5 * (train_parameters["height_crop_size"])),
                int(2 * (train_parameters["height_crop_size"])),
            ),
            height=train_parameters["height_crop_size"],
            width=train_parameters["width_crop_size"],
            w2h_ratio=1.0,
            p=1,
        ),
        albu.ShiftScaleRotate(
            border_mode=cv2.BORDER_CONSTANT, rotate_limit=10, scale_limit=0, p=0.5, mask_value=ignore_index
        ),
        albu.RandomBrightnessContrast(p=0.5),
        albu.RandomGamma(p=0.5),
        albu.ImageCompression(quality_lower=20, quality_upper=100, p=0.5),
        albu.GaussNoise(p=0.5),
        albu.Blur(p=0.5),
        albu.CoarseDropout(p=0.5, max_height=26, max_width=16),
        albu.OneOf([albu.HueSaturationValue(p=0.5), albu.RGBShift(p=0.5)], p=0.5),
        normalization,
    ],
    p=1,
)

val_augmentations = albu.Compose(
    [
        albu.PadIfNeeded(
            min_height=1024, min_width=2048, border_mode=cv2.BORDER_CONSTANT, mask_value=ignore_index, p=1
        ),
github lopuhin / kaggle-kuzushiji-2019 / kuzushiji / classify / dataset.py View on Github external
for sign in [-1, 1])
    if train:
        transforms = [
            LongestMaxSizeRandomSizedCrop(
                max_size=train_initial_size,
                min_max_height=crop_min_max_height,
                width=crop_width,
                height=crop_height,
                w2h_ratio=crop_width / crop_height,
            ),
            A.HueSaturationValue(
                hue_shift_limit=color_hue_aug,
                sat_shift_limit=color_sat_aug,
                val_shift_limit=color_val_aug,
            ),
            A.RandomBrightnessContrast(),
            A.RandomGamma(),
        ]
    else:
        transforms = [
            A.LongestMaxSize(max_size=test_height),
        ]
    if normalize:
        transforms.append(A.Normalize())
    transforms.extend([
        ToTensor(),
    ])
    return A.Compose(
        transforms,
        bbox_params={
            'format': 'coco',
            'min_area': 0,
github toodef / neural-pipeline / examples / files / img_segmentation.py View on Github external
from neural_pipeline.monitoring import LogMonitor
from neural_pipeline.train_config import AbstractMetric, MetricsProcessor, MetricsGroup, TrainStage, ValidationStage, TrainConfig
from neural_pipeline.utils.fsm import FileStructManager
from neural_pipeline.builtin.monitors.tensorboard import TensorboardMonitor

###################################
# Define dataset and augmentations
# The dataset used in this example is from PicsArt hackathon (https://picsart.ai/en/contest)
###################################

datasets_dir = 'data/dataset'
base_dir = os.path.join(datasets_dir, 'picsart_hack_online_data')

preprocess = OneOf([RandomCrop(height=224, width=224), Resize(width=224, height=224)], p=1)
transforms = Compose([HorizontalFlip(), VerticalFlip(), RandomRotate90()], p=0.5)
aug = Compose([RandomBrightnessContrast(brightness_limit=0.4, contrast_limit=0.4), RandomGamma(), RGBShift(), transforms])


def augmentate(item: {}):
    res = preprocess(image=item['data'], mask=item['target'])
    res = aug(image=res['image'], mask=res['mask'])
    return {'data': res['image'], 'target': res['mask']}


def augmentate_and_to_pytorch(item: {}):
    res = augmentate(item)
    return {'data': torch.from_numpy(np.moveaxis(res['data'].astype(np.float32) / 255., -1, 0)),
            'target': torch.from_numpy(np.expand_dims(res['target'].astype(np.float32) / 255, axis=0))}


class PicsartDataset(AbstractDataset):
    def __init__(self, images_pathes: [], aug: callable):
github BloodAxe / pytorch-toolbelt / examples / segmentation-inria / models / factory.py View on Github external
# D4 Augmentations
        A.Compose([
            A.Transpose(),
            A.RandomRotate90(),
        ], p=float(use_d4)),
        # In case we don't want to use D4 augmentations, we use flips
        A.HorizontalFlip(p=float(not use_d4)),

        # Spatial-preserving augmentations:
        A.OneOf([
            A.Cutout(),
            A.GaussNoise(),
        ]),
        A.OneOf([
            A.RandomBrightnessContrast(),
            A.CLAHE(),
            A.HueSaturationValue(),
            A.RGBShift(),
            A.RandomGamma()
        ]),
        # Weather effects
        # A.OneOf([
        #     A.RandomFog(),
        #     A.RandomRain(),
        #     A.RandomSunFlare()
        # ]),

        # Normalize image to make use of pretrained model
        A.Normalize()
    ])
github lyakaap / Landmark2019-1st-and-3rd-Place-Solution / src / data_utils.py View on Github external
clahe_p=0.0,
                     blur_p=0.0,
                     autoaugment=False,
                     ):
    norm = Normalize(mean=mean, std=std, max_pixel_value=divide_by)

    if autoaugment:
        train_transform = Compose([
            torch_custom.AutoAugmentWrapper(p=1.0),
            torch_custom.RandomCropThenScaleToOriginalSize(limit=scale_limit, p=1.0),
            norm,
        ])
    else:
        train_transform = Compose([
            IAAAffine(rotate=(-rotate_limit, rotate_limit), shear=(-shear_limit, shear_limit), mode='constant'),
            RandomBrightnessContrast(brightness_limit=brightness_limit, contrast_limit=contrast_limit),
            MotionBlur(p=blur_p),
            CLAHE(p=clahe_p),
            torch_custom.RandomCropThenScaleToOriginalSize(limit=scale_limit, p=1.0),
            norm,
        ])
    eval_transform = Compose([norm])

    return train_transform, eval_transform
github spytensor / pytorch-image-classification / dataset / augmentations.py View on Github external
A.NoOp()
        ]),

        A.OneOf([
            A.RandomSizedCrop(min_max_height=(int(image_size[0] * 0.75), image_size[0]),
                              height=image_size[0],
                              width=image_size[1], p=0.3),
            A.NoOp()
        ]),

        A.ISONoise(p=0.5),
        A.JpegCompression(p=0.3, quality_lower=75),

        # Brightness/contrast augmentations
        A.OneOf([
            A.RandomBrightnessContrast(brightness_limit=0.5,
                                       contrast_limit=0.4),
            IndependentRandomBrightnessContrast(brightness_limit=0.25,
                                                contrast_limit=0.24),
            A.RandomGamma(gamma_limit=(50, 150)),
            A.NoOp()
        ]),

        A.OneOf([
            A.RGBShift(r_shift_limit=40, b_shift_limit=30, g_shift_limit=30),
            A.HueSaturationValue(hue_shift_limit=10,
                                 sat_shift_limit=10),
            A.ToGray(p=0.2),
            A.NoOp()
        ]),