Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# FOR DISTRIBUTED: Initialize the backend. torch.distributed.launch will
# provide environment variables, and requires that you use init_method=`env://`.
torch.distributed.init_process_group(backend="nccl", init_method="env://")
scheduler_step = config.TRAIN.END_EPOCH // config.TRAIN.SNAPSHOTS
torch.backends.cudnn.benchmark = config.CUDNN.BENCHMARK
torch.manual_seed(config.SEED)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(config.SEED)
np.random.seed(seed=config.SEED)
# Setup Augmentations
basic_aug = Compose(
[
Normalize(mean=(config.TRAIN.MEAN,), std=(config.TRAIN.STD,), max_pixel_value=1),
Resize(
config.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT, config.TRAIN.AUGMENTATIONS.RESIZE.WIDTH, always_apply=True,
),
PadIfNeeded(
min_height=config.TRAIN.AUGMENTATIONS.PAD.HEIGHT,
min_width=config.TRAIN.AUGMENTATIONS.PAD.WIDTH,
border_mode=cv2.BORDER_CONSTANT,
always_apply=True,
mask_value=255,
),
]
)
if config.TRAIN.AUGMENTATION:
train_aug = Compose([basic_aug, HorizontalFlip(p=0.5)])
val_aug = basic_aug
else:
scheduler_step = config.TRAIN.END_EPOCH // config.TRAIN.SNAPSHOTS
torch.backends.cudnn.benchmark = config.CUDNN.BENCHMARK
torch.manual_seed(config.SEED)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(config.SEED)
np.random.seed(seed=config.SEED)
# Setup Augmentations
basic_aug = Compose(
[
Normalize(
mean=(config.TRAIN.MEAN,),
std=(config.TRAIN.STD,),
max_pixel_value=1,
),
Resize(
config.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT,
config.TRAIN.AUGMENTATIONS.RESIZE.WIDTH,
always_apply=True,
),
PadIfNeeded(
min_height=config.TRAIN.AUGMENTATIONS.PAD.HEIGHT,
min_width=config.TRAIN.AUGMENTATIONS.PAD.WIDTH,
border_mode=cv2.BORDER_CONSTANT,
always_apply=True,
mask_value=255,
),
]
)
if config.TRAIN.AUGMENTATION:
train_aug = Compose(
[
def test_transform_pipeline_serialization(seed, image, mask):
aug = A.Compose(
[
A.OneOrOther(
A.Compose(
[
A.Resize(1024, 1024),
A.RandomSizedCrop(min_max_height=(256, 1024), height=512, width=512, p=1),
A.OneOf(
[
A.RandomSizedCrop(min_max_height=(256, 512), height=384, width=384, p=0.5),
A.RandomSizedCrop(min_max_height=(256, 512), height=512, width=512, p=0.5),
]
),
]
),
A.Compose(
[
A.Resize(1024, 1024),
A.RandomSizedCrop(min_max_height=(256, 1025), height=256, width=256, p=1),
A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1),
def test_keypoint_rotate90(keypoint, expected, factor):
actual = F.keypoint_rot90(keypoint, factor, rows=100, cols=200)
assert actual == expected
[A.PadIfNeeded(333, 512), ATorch.PadIfNeededTorch(333, 512)],
[A.Crop(11, 5, 72, 36), ATorch.CropTorch(11, 5, 72, 36)],
[A.VerticalFlip(), ATorch.VerticalFlipTorch()],
[A.HorizontalFlip(), ATorch.HorizontalFlipTorch()],
[A.Flip(), ATorch.FlipTorch()],
[A.Transpose(), ATorch.TransposeTorch()],
[
A.LongestMaxSize(interpolation=cv2.INTER_NEAREST),
ATorch.LongestMaxSizeTorch(interpolation=cv2.INTER_NEAREST),
],
[
A.SmallestMaxSize(interpolation=cv2.INTER_NEAREST),
ATorch.SmallestMaxSizeTorch(interpolation=cv2.INTER_NEAREST),
],
[
A.Resize(100, 100, interpolation=cv2.INTER_NEAREST),
ATorch.ResizeTorch(100, 100, interpolation=cv2.INTER_NEAREST),
def test_transform_pipeline_serialization_with_keypoints(seed, image, keypoints, keypoint_format, labels):
aug = A.Compose(
[
A.OneOrOther(
A.Compose([A.RandomRotate90(), A.OneOf([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5)])]),
A.Compose([A.Rotate(p=0.5), A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1)]),
),
A.HorizontalFlip(p=1),
A.RandomBrightnessContrast(p=0.5),
],
keypoint_params={"format": keypoint_format, "label_fields": ["labels"]},
)
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
aug_data = aug(image=image, keypoints=keypoints, labels=labels)
set_seed(seed)
deserialized_aug_data = deserialized_aug(image=image, keypoints=keypoints, labels=labels)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["keypoints"], deserialized_aug_data["keypoints"])
def test_transform_pipeline_serialization_with_bboxes(seed, image, bboxes, bbox_format, labels):
aug = A.Compose(
[
A.OneOrOther(
A.Compose([A.RandomRotate90(), A.OneOf([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5)])]),
A.Compose([A.Rotate(p=0.5), A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1)]),
),
A.HorizontalFlip(p=1),
A.RandomBrightnessContrast(p=0.5),
],
bbox_params={"format": bbox_format, "label_fields": ["labels"]},
)
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
aug_data = aug(image=image, bboxes=bboxes, labels=labels)
set_seed(seed)
deserialized_aug_data = deserialized_aug(image=image, bboxes=bboxes, labels=labels)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["bboxes"], deserialized_aug_data["bboxes"])
A.RandomBrightnessContrast((1.33, 1.33), (0.77, 0.77), True),
ATorch.RandomBrightnessContrastTorch((1.33, 1.33), (0.77, 0.77), True),
],
[A.ChannelDropout(), ATorch.ChannelDropoutTorch()],
[A.RandomGamma(), ATorch.RandomGammaTorch()],
[A.ChannelShuffle(), ATorch.ChannelShuffleTorch()],
[A.ToGray(), ATorch.ToGrayTorch()],
[A.ToFloat(), ATorch.ToFloatTorch()],
[A.FromFloat("uint8"), ATorch.FromFloatTorch()],
],
),
)
def test_image_transforms_rgb(images, augs):
image, image_torch = images
dtype = image.dtype
aug_cpu, aug_torch = augs
A.OneOrOther(
A.Compose(
[
A.RandomSizedCrop(min_max_height=(256, 1025), height=512, width=512, p=1),
A.OneOf(
[
A.RandomSizedCrop(min_max_height=(256, 512), height=384, width=384, p=0.5),
A.RandomSizedCrop(min_max_height=(256, 512), height=512, width=512, p=0.5),
]
),
]
),
A.Compose(
[
A.RandomSizedCrop(min_max_height=(256, 1025), height=256, width=256, p=1),
A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1),
]
),
),
A.HorizontalFlip(p=1),
A.RandomBrightnessContrast(p=0.5),
]
)
res = aug(image=np.zeros((1248, 1248, 3), dtype=np.uint8))
assert res["image"].shape[0] in (256, 384, 512)
assert res["image"].shape[1] in (256, 384, 512)
def test_transform_pipeline_serialization_with_bboxes(seed, image, bboxes, bbox_format, labels):
aug = A.Compose(
[
A.OneOrOther(
A.Compose([A.RandomRotate90(), A.OneOf([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5)])]),
A.Compose([A.Rotate(p=0.5), A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1)]),
),
A.HorizontalFlip(p=1),
A.RandomBrightnessContrast(p=0.5),
],
bbox_params={"format": bbox_format, "label_fields": ["labels"]},
)
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
aug_data = aug(image=image, bboxes=bboxes, labels=labels)
set_seed(seed)
deserialized_aug_data = deserialized_aug(image=image, bboxes=bboxes, labels=labels)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["bboxes"], deserialized_aug_data["bboxes"])