How to use the tensorpack.dataflow.imgaug function in tensorpack

To help you get started, we’ve selected a few tensorpack examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tensorpack / tensorpack / examples / ImageNetModels / alexnet.py View on Github external
if isTrain:
        augmentors = [
            imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
            imgaug.RandomCrop(224),
            imgaug.Lighting(0.1,
                            eigval=np.asarray(
                                [0.2175, 0.0188, 0.0045][::-1]) * 255.0,
                            eigvec=np.array(
                                [[-0.5675, 0.7192, 0.4009],
                                 [-0.5808, -0.0045, -0.8140],
                                 [-0.5836, -0.6948, 0.4203]],
                                dtype='float32')[::-1, ::-1]),
            imgaug.Flip(horiz=True)]
    else:
        augmentors = [
            imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
            imgaug.CenterCrop((224, 224))]
    return get_imagenet_dataflow(args.data, name, batch, augmentors)
github iamhankai / ghostnet / tensorflow / main.py View on Github external
def get_data(name, batch):
    isTrain = name == 'train'
    image_shape = 224

    if isTrain:
        augmentors = [
            # use lighter augs if model is too small
            GoogleNetResize(crop_area_fraction=0.49 if args.width_ratio < 1 else 0.08,
                           target_shape=image_shape),
            imgaug.RandomOrderAug(
                [imgaug.BrightnessScale((0.6, 1.4), clip=False),
                 imgaug.Contrast((0.6, 1.4), clip=False),
                 imgaug.Saturation(0.4, rgb=False),
                ]),
            imgaug.Flip(horiz=True),
        ]
    else:
        augmentors = [
            imgaug.ResizeShortestEdge(int(image_shape*256/224), cv2.INTER_CUBIC),
            imgaug.CenterCrop((image_shape, image_shape)),
        ]
    return get_imagenet_dataflow(args.data_dir, name, batch, augmentors, 
                       meta_dir = args.meta_dir)
github itmessager / Figma_RCNN / detection / tensorpacks / data.py View on Github external
height, width: integer
    file_name: str, full path to the image
    boxes: numpy array of kx4 floats
    attrs: numpy array of k integers ,-1->negtive   0->ignorce   1->positive
    """

    # Valid training images should have at least one fg box.
    # But this filter shall not be applied for testing.
    # num = len(roidbs)
    # roidbs = list(filter(lambda img: len(img['boxes'][img['is_crowd'] == 0]) > 0, roidbs))
    # logger.info("Filtered {} images which contain no non-crowd groudtruth boxes. Total #images for training: {}".format(
    #    num - len(roidbs), len(roidbs)))

    ds = DataFromList(roidbs_wider, shuffle=True)

    aug = imgaug.AugmentorList(
        [CustomResize(cfg.PREPROC.TRAIN_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE),
         imgaug.Flip(horiz=True)])

    def preprocess(roidb):
        fname = roidb['img']
        boxes = roidb['bbox']
        male = roidb['male']
        longhair = roidb['longhair']
        sunglass = roidb['sunglass']
        hat = roidb['hat']
        tshirt = roidb['tshirt']
        longsleeve = roidb['longsleeve']
        formal = roidb['formal']
        shorts = roidb['shorts']
        jeans = roidb['jeans']
        longpants = roidb['longpants']
github armandmcqueen / tensorpack-mask-rcnn / MaskRCNN_no_batch / data.py View on Github external
roidbs = DetectionDataset().load_training_roidbs(cfg.DATA.TRAIN)
    print_class_histogram(roidbs)

    # Valid training images should have at least one fg box.
    # But this filter shall not be applied for testing.
    num = len(roidbs)
    roidbs = list(filter(lambda img: len(img['boxes'][img['is_crowd'] == 0]) > 0, roidbs))
    logger.info("Filtered {} images which contain no non-crowd groudtruth boxes. Total #images for training: {}".format(
        num - len(roidbs), len(roidbs)))

    ds = DataFromList(roidbs, shuffle=True)

    aug = imgaug.AugmentorList(
        [CustomResize(cfg.PREPROC.TRAIN_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE),
         imgaug.Flip(horiz=True)])

    def preprocess(roidb):
        fname, boxes, klass, is_crowd = roidb['file_name'], roidb['boxes'], roidb['class'], roidb['is_crowd']
        boxes = np.copy(boxes)
        im = cv2.imread(fname, cv2.IMREAD_COLOR)
        assert im is not None, fname
        im = im.astype('float32')
        # assume floatbox as input
        assert boxes.dtype == np.float32, "Loader has to return floating point boxes!"

        # augmentation:
        im, params = aug.augment_return_params(im)
        points = box_to_point8(boxes)
        points = aug.augment_coords(points, params)
        boxes = point8_to_box(points)
        assert np.min(np_area(boxes)) > 0, "Some boxes have zero area!"
github PatWie / tensorflow-recipes / LearningToSeeInTheDark / sony_dataset.py View on Github external
gt_float = gt_float[shp.h0 * 2: shp.h0 * 2 + shp.h * 2, shp.w0 * 2: shp.w0 * 2 + shp.w * 2, :]

            yield [gt_float, input_float]


class CenterCropRaw(RandomCropRaw):
    def __init__(self, ds, patch_size=512):
        super(RandomCropRaw, self).__init__(ds)
        self.aug = imgaug.CenterCrop(patch_size)


if __name__ == '__main__':
    if False:
        ds = SonyDataset('/scratch/wieschol/seeindark/dataset/Sony')
        ds = RandomCropRaw(ds)
        aus = [imgaug.Flip(horiz=True), imgaug.Flip(vert=True), imgaug.Transpose()]
        ds = AugmentImageComponents(ds, aus, index=(0, 1), copy=False)
        ds.reset_state()
        next(ds.get_data())
        ds = PrefetchDataZMQ(ds, nr_proc=10)
        ds = BatchData(ds, 8)
        ds = PrintData(ds)
        ds.reset_state()
        next(ds.get_data())
        # TestDataSpeed(ds).start()

    val_ds = SonyDataset('/scratch/wieschol/seeindark/dataset/Sony', subset='test', num=10)
    val_ds = CenterCropRaw(val_ds)
    val_ds.reset_state()
    for k, dp in enumerate(val_ds.get_data()):
        print k
github osmr / imgclsmob / tensorflow_ / tensorflowcv / models / others / tpack / shufflenet.py View on Github external
imgaug.Saturation(0.4, rgb=False),
                 # rgb-bgr conversion for the constants copied from fb.resnet.torch
                 imgaug.Lighting(0.1,
                                 eigval=np.asarray(
                                     [0.2175, 0.0188, 0.0045][::-1]) * 255.0,
                                 eigvec=np.array(
                                     [[-0.5675, 0.7192, 0.4009],
                                      [-0.5808, -0.0045, -0.8140],
                                      [-0.5836, -0.6948, 0.4203]],
                                     dtype='float32')[::-1, ::-1]
                                 )]),
            imgaug.Flip(horiz=True),
        ]
    else:
        augmentors = [
            imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
            imgaug.CenterCrop((224, 224)),
        ]
    return get_imagenet_dataflow(
        args.data, name, batch, augmentors)
github tensorpack / tensorpack / examples / ImageNetModels / imagenet_utils.py View on Github external
imgaug.Saturation(0.4, rgb=False),
                 # rgb-bgr conversion for the constants copied from fb.resnet.torch
                 imgaug.Lighting(0.1,
                                 eigval=np.asarray(
                                     [0.2175, 0.0188, 0.0045][::-1]) * 255.0,
                                 eigvec=np.array(
                                     [[-0.5675, 0.7192, 0.4009],
                                      [-0.5808, -0.0045, -0.8140],
                                      [-0.5836, -0.6948, 0.4203]],
                                     dtype='float32')[::-1, ::-1]
                                 )]),
            imgaug.Flip(horiz=True),
        ]
    else:
        augmentors = [
            imgaug.ResizeShortestEdge(256, interp=interpolation),
            imgaug.CenterCrop((224, 224)),
        ]
    return augmentors
github tensorpack / tensorpack / examples / ImageNetModels / alexnet.py View on Github external
augmentors = [
            imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
            imgaug.RandomCrop(224),
            imgaug.Lighting(0.1,
                            eigval=np.asarray(
                                [0.2175, 0.0188, 0.0045][::-1]) * 255.0,
                            eigvec=np.array(
                                [[-0.5675, 0.7192, 0.4009],
                                 [-0.5808, -0.0045, -0.8140],
                                 [-0.5836, -0.6948, 0.4203]],
                                dtype='float32')[::-1, ::-1]),
            imgaug.Flip(horiz=True)]
    else:
        augmentors = [
            imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
            imgaug.CenterCrop((224, 224))]
    return get_imagenet_dataflow(args.data, name, batch, augmentors)
github tensorpack / tensorpack / examples / ImageNetModels / alexnet.py View on Github external
def get_data(name, batch):
    isTrain = name == 'train'
    if isTrain:
        augmentors = [
            imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
            imgaug.RandomCrop(224),
            imgaug.Lighting(0.1,
                            eigval=np.asarray(
                                [0.2175, 0.0188, 0.0045][::-1]) * 255.0,
                            eigvec=np.array(
                                [[-0.5675, 0.7192, 0.4009],
                                 [-0.5808, -0.0045, -0.8140],
                                 [-0.5836, -0.6948, 0.4203]],
                                dtype='float32')[::-1, ::-1]),
            imgaug.Flip(horiz=True)]
    else:
        augmentors = [
            imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
            imgaug.CenterCrop((224, 224))]
    return get_imagenet_dataflow(args.data, name, batch, augmentors)
github itmessager / Figma_RCNN / detection / tensorpacks / data.py View on Github external
boxes: numpy array of kx4 floats
    attrs: numpy array of k integers ,-1->negtive   0->ignorce   1->positive
    """

    # Valid training images should have at least one fg box.
    # But this filter shall not be applied for testing.
    # num = len(roidbs)
    # roidbs = list(filter(lambda img: len(img['boxes'][img['is_crowd'] == 0]) > 0, roidbs))
    # logger.info("Filtered {} images which contain no non-crowd groudtruth boxes. Total #images for training: {}".format(
    #    num - len(roidbs), len(roidbs)))

    ds = DataFromList(roidbs_wider, shuffle=True)

    aug = imgaug.AugmentorList(
        [CustomResize(cfg.PREPROC.TRAIN_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE),
         imgaug.Flip(horiz=True)])

    def preprocess(roidb):
        fname = roidb['img']
        boxes = roidb['bbox']
        male = roidb['male']
        longhair = roidb['longhair']
        sunglass = roidb['sunglass']
        hat = roidb['hat']
        tshirt = roidb['tshirt']
        longsleeve = roidb['longsleeve']
        formal = roidb['formal']
        shorts = roidb['shorts']
        jeans = roidb['jeans']
        longpants = roidb['longpants']
        skirt = roidb['skirt']
        facemask = roidb['facemask']