Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def translate(image, boxes, prob=0.5, border_value=(128, 128, 128)):
random_prob = np.random.uniform()
if random_prob < (1 - prob):
return image, boxes
h, w = image.shape[:2]
min_x1, min_y1 = np.min(boxes, axis=0)[:2]
max_x2, max_y2 = np.max(boxes, axis=0)[2:]
translation_matrix = translation_xy(min=(min(-min_x1 // 2, 0), min(-min_y1 // 2, 0)),
max=(max((w - max_x2) // 2, 1), max((h - max_y2) // 2, 1)), prob=1.)
translation_matrix = change_transform_origin(translation_matrix, (w / 2, h / 2))
image = cv2.warpAffine(
image,
translation_matrix[:2, :],
dsize=(w, h),
flags=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_CONSTANT,
borderValue=border_value,
)
new_boxes = []
for box in boxes:
x1, y1, x2, y2 = box
points = translation_matrix.dot([
[x1, x2, x1, x2],
[y1, y2, y2, y1],
def translate(image, boxes, prob=0.5, border_value=(128, 128, 128)):
boxes = boxes.astype(np.float32)
random_prob = np.random.uniform()
if random_prob < (1 - prob):
return image, boxes
h, w = image.shape[:2]
if boxes.shape[0] != 0:
min_x1, min_y1 = np.min(boxes, axis=0)[:2]
max_x2, max_y2 = np.max(boxes, axis=0)[2:]
translation_matrix = translation_xy(min=(min(-min_x1 // 2, 0), min(-min_y1 // 2, 0)),
max=(max((w - max_x2) // 2, 1), max((h - max_y2) // 2, 1)), prob=1.)
else:
translation_matrix = translation_xy(min=(min(-w // 8, 0), min(-h // 8, 0)),
max=(max(w // 8, 1), max(h // 8, 1)))
translation_matrix = change_transform_origin(translation_matrix, (w / 2, h / 2))
image = cv2.warpAffine(
image,
translation_matrix[:2, :],
dsize=(w, h),
flags=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_CONSTANT,
borderValue=border_value,
)
if boxes.shape[0] != 0:
new_boxes = []
for box in boxes:
x1, y1, x2, y2 = box
points = translation_matrix.dot([
[x1, x2, x1, x2],
def translate(image, boxes, prob=0.5, border_value=(128, 128, 128)):
boxes = boxes.astype(np.float32)
random_prob = np.random.uniform()
if random_prob < (1 - prob):
return image, boxes
h, w = image.shape[:2]
if boxes.shape[0] != 0:
min_x1, min_y1 = np.min(boxes, axis=0)[:2]
max_x2, max_y2 = np.max(boxes, axis=0)[2:]
translation_matrix = translation_xy(min=(min(-min_x1 // 2, 0), min(-min_y1 // 2, 0)),
max=(max((w - max_x2) // 2, 1), max((h - max_y2) // 2, 1)), prob=1.)
else:
translation_matrix = translation_xy(min=(min(-w // 8, 0), min(-h // 8, 0)),
max=(max(w // 8, 1), max(h // 8, 1)))
translation_matrix = change_transform_origin(translation_matrix, (w / 2, h / 2))
image = cv2.warpAffine(
image,
translation_matrix[:2, :],
dsize=(w, h),
flags=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_CONSTANT,
borderValue=border_value,
)
if boxes.shape[0] != 0:
new_boxes = []
for box in boxes:
def translate(image, boxes, prob=0.5, border_value=(128, 128, 128)):
random_prob = np.random.uniform()
if random_prob < (1 - prob):
return image, boxes
h, w = image.shape[:2]
min_x1, min_y1 = np.min(boxes, axis=0)[:2]
max_x2, max_y2 = np.max(boxes, axis=0)[2:]
translation_matrix = translation_xy(min=(min(-min_x1 // 2, 0), min(-min_y1 // 2, 0)),
max=(max((w - max_x2) // 2, 1), max((h - max_y2) // 2, 1)), prob=1.)
translation_matrix = change_transform_origin(translation_matrix, (w / 2, h / 2))
image = cv2.warpAffine(
image,
translation_matrix[:2, :],
dsize=(w, h),
flags=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_CONSTANT,
borderValue=border_value,
)
new_boxes = []
for box in boxes:
x1, y1, x2, y2 = box
points = translation_matrix.dot([
[x1, x2, x1, x2],
[y1, y2, y2, y1],
[1, 1, 1, 1],
])
def translate(image, boxes, prob=0.5, border_value=(128, 128, 128)):
boxes = boxes.astype(np.float32)
random_prob = np.random.uniform()
if random_prob < (1 - prob):
return image, boxes
h, w = image.shape[:2]
if boxes.shape[0] != 0:
min_x1, min_y1 = np.min(boxes, axis=0)[:2]
max_x2, max_y2 = np.max(boxes, axis=0)[2:]
translation_matrix = translation_xy(min=(min(-min_x1 // 2, 0), min(-min_y1 // 2, 0)),
max=(max((w - max_x2) // 2, 1), max((h - max_y2) // 2, 1)), prob=1.)
else:
translation_matrix = translation_xy(min=(min(-w // 8, 0), min(-h // 8, 0)),
max=(max(w // 8, 1), max(h // 8, 1)))
translation_matrix = change_transform_origin(translation_matrix, (w / 2, h / 2))
image = cv2.warpAffine(
image,
translation_matrix[:2, :],
dsize=(w, h),
flags=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_CONSTANT,
borderValue=border_value,
)
if boxes.shape[0] != 0:
new_boxes = []
for box in boxes:
x1, y1, x2, y2 = box
points = translation_matrix.dot([
[x1, x2, x1, x2],
[y1, y2, y2, y1],
[1, 1, 1, 1],
def create_generators(args):
"""
Create generators for training and validation.
Args
args: parseargs object containing configuration for generators.
preprocess_image: Function that preprocesses an image for the network.
"""
common_args = {
'batch_size': args.batch_size,
'input_size': args.input_size,
}
# create random transform generator for augmenting training data
if args.random_transform:
misc_effect = MiscEffect(border_value=0)
visual_effect = VisualEffect()
else:
misc_effect = None
visual_effect = None
if args.dataset_type == 'pascal':
from generators.pascal import PascalVocGenerator
train_generator = PascalVocGenerator(
args.pascal_path,
'trainval',
skip_difficult=True,
multi_scale=args.multi_scale,
misc_effect=misc_effect,
visual_effect=visual_effect,
**common_args
)
def create_generators(args):
"""
Create generators for training and validation.
Args
args: parseargs object containing configuration for generators.
preprocess_image: Function that preprocesses an image for the network.
"""
common_args = {
'batch_size': args.batch_size,
'phi': args.phi,
}
# create random transform generator for augmenting training data
if args.random_transform:
misc_effect = MiscEffect()
visual_effect = VisualEffect()
else:
misc_effect = None
visual_effect = None
if args.dataset_type == 'pascal':
from generators.pascal import PascalVocGenerator
train_generator = PascalVocGenerator(
args.pascal_path,
'trainval',
skip_difficult=True,
misc_effect=misc_effect,
visual_effect=visual_effect,
**common_args
)
image, boxes = crop(image, boxes, prob=self.crop_prob)
image, boxes = translate(image, boxes, prob=self.translate_prob, border_value=self.border_value)
return image, boxes
if __name__ == '__main__':
from generators.pascal import PascalVocGenerator
train_generator = PascalVocGenerator(
'datasets/VOC0712',
'trainval',
skip_difficult=True,
batch_size=1,
shuffle_groups=False
)
misc_effect = MiscEffect()
for i in range(train_generator.size()):
image = train_generator.load_image(i)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
annotations = train_generator.load_annotations(i)
boxes = annotations['bboxes']
for box in boxes.astype(np.int32):
cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 2)
src_image = image.copy()
# cv2.namedWindow('src_image', cv2.WINDOW_NORMAL)
cv2.imshow('src_image', src_image)
# image, boxes = misc_effect(image, boxes)
image, boxes = multi_scale(image, boxes)
image = image.copy()
for box in boxes.astype(np.int32):
cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 1)
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# train_generator = PascalVocGenerator(
# 'datasets/VOC0712',
# 'trainval',
# skip_difficult=True,
# batch_size=1,
# shuffle_groups=False
# )
from generators.coco import CocoGenerator
train_generator = CocoGenerator(
'/home/adam/.keras/datasets/coco/2017_118_5',
'train2017',
batch_size=1,
shuffle_groups=False
)
misc_effect = MiscEffect()
for i in range(train_generator.size()):
image = train_generator.load_image(i)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
annotations = train_generator.load_annotations(i)
boxes = annotations['bboxes']
for box in boxes.astype(np.int32):
cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 2)
src_image = image.copy()
# cv2.namedWindow('src_image', cv2.WINDOW_NORMAL)
cv2.imshow('src_image', src_image)
image, boxes = misc_effect(image, boxes)
# image, boxes = multi_scale(image, boxes)
image = image.copy()
for box in boxes.astype(np.int32):
cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 1)
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
"""
filename = self.image_names[image_index] + '.xml'
try:
tree = ET.parse(os.path.join(self.data_dir, 'Annotations', filename))
return self.__parse_annotations(tree.getroot())
except ET.ParseError as e:
raise_from(ValueError('invalid annotations file: {}: {}'.format(filename, e)), None)
except ValueError as e:
raise_from(ValueError('invalid annotations file: {}: {}'.format(filename, e)), None)
if __name__ == '__main__':
from augmentor.misc import MiscEffect
from augmentor.color import VisualEffect
misc_effect = MiscEffect(border_value=0)
visual_effect = VisualEffect()
generator = PascalVocGenerator(
'datasets/VOC0712',
'trainval',
skip_difficult=True,
misc_effect=misc_effect,
visual_effect=visual_effect,
batch_size=1
)
for inputs, targets in generator:
print('hi')