How to use the gluoncv.data.VOCDetection function in gluoncv

To help you get started, we’ve selected a few gluoncv examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github dmlc / gluon-cv / tests / unittests / test_data_transforms.py View on Github external
def test_transforms_presets_center_net():
    im_fname = gcv.utils.download('https://github.com/dmlc/web-data/blob/master/' +
                                  'gluoncv/detection/biking.jpg?raw=true', path='biking.jpg')
    x, orig_img = center_net.load_test(im_fname, short=512)
    x1, orig_img1 = center_net.transform_test(mx.image.imread(im_fname), short=512)
    np.testing.assert_allclose(x.asnumpy(), x1.asnumpy())
    np.testing.assert_allclose(orig_img, orig_img1)
    if not osp.isdir(osp.expanduser('~/.mxnet/datasets/voc')):
        return
    train_dataset = gcv.data.VOCDetection(splits=((2007, 'trainval'), (2012, 'trainval')))
    val_dataset = gcv.data.VOCDetection(splits=[(2007, 'test')])
    width, height = (512, 512)
    net = gcv.model_zoo.get_model('center_net_resnet18_v1b_voc', pretrained=False, pretrained_base=False)
    net.initialize()
    num_workers = 0
    batch_size = 4
    batchify_fn = Tuple([Stack() for _ in range(6)])
    train_loader = gluon.data.DataLoader(
        train_dataset.transform(center_net.CenterNetDefaultTrainTransform(width, height, num_class=len(train_dataset.classes), scale_factor=net.scale)),
        batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
    val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
    val_loader = gluon.data.DataLoader(
        val_dataset.transform(center_net.CenterNetDefaultValTransform(width, height)),
        batch_size, False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers)

    for loader in [train_loader, val_loader]:
        for i, batch in enumerate(loader):
github awslabs / autogluon / autogluon / task / object_detection / dataset.py View on Github external
import numpy as np

from mxnet import gluon, nd
from mxnet.gluon.data.vision import transforms
from gluoncv.data import transforms as gcv_transforms
from gluoncv import data as gdata
from gluoncv.data.batchify import Tuple, Stack, Pad
from gluoncv.data.transforms.presets.ssd import SSDDefaultTrainTransform, SSDDefaultValTransform

from ... import dataset
from ...utils.data_analyzer import DataAnalyzer

__all__ = ['get_dataset', 'batchify_fn', 'batchify_val_fn']

_dataset = {'voc': gdata.VOCDetection}


_transform_fns = {'SSDDefaultTrainTransform': SSDDefaultTrainTransform,
                  'SSDDefaultValTransform': SSDDefaultValTransform}


def get_transform_fn(name, *args):
    """Returns a transform function by name

    Parameters
    ----------
    name : str
        Name of the transform_fn.

    Returns
    -------
github dmlc / gluon-cv / docs / tutorials / datasets / detection_custom.py View on Github external
│   └── Main
    │       └── train.txt
    └── JPEGImages
        └── 000001.jpg
"""

##############################################################################
# And an example of annotation file:
with open('VOCtemplate/VOC2018/Annotations/000001.xml', 'r') as fid:
    print(fid.read())

##############################################################################
# As long as your dataset can match the PASCAL VOC convension, it is convenient to
# derive custom dataset from ``VOCDetection``
from gluoncv.data import VOCDetection
class VOCLike(VOCDetection):
    CLASSES = ['person', 'dog']
    def __init__(self, root, splits, transform=None, index_map=None, preload_label=True):
        super(VOCLike, self).__init__(root, splits, transform, index_map, preload_label)

dataset = VOCLike(root='VOCtemplate', splits=((2018, 'train'),))
print('length of dataset:', len(dataset))
print('label example:')
print(dataset[0][1])

##############################################################################
# The last column indicate the difficulties of labeled object
# You can ignore the following section if it's out of your intention in the xml file:
"""0"""
github zzdang / cascade_rcnn_gluon / scripts / detection / cascade_rcnn / train_cascade_rcnn_2.py View on Github external
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = gdata.VOCDetection(
            splits=[(2007, 'trainval'), (2012, 'trainval')])
        val_dataset = gdata.VOCDetection(
            splits=[(2007, 'test')])
        val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(splits='instances_train2017')
        val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
github zzdang / cascade_rcnn_gluon / scripts / detection / faster_rcnn / train_faster_rcnn.py View on Github external
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = gdata.VOCDetection(
            splits=[(2007, 'trainval'), (2012, 'trainval')])
        val_dataset = gdata.VOCDetection(
            splits=[(2007, 'test')])
        val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(splits='instances_train2017', use_crowd=False)
        val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
github zzdang / cascade_rcnn_gluon / scripts / detection / cascade_rcnn / train_cascade_rfcn.py View on Github external
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = gdata.VOCDetection(
            splits=[(2007, 'trainval'), (2012, 'trainval')])
        val_dataset = gdata.VOCDetection(
            splits=[(2007, 'test')])
        val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(splits='instances_train2017')
        val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
github CortexFoundation / CortexTheseus / cvm-runtime / python / mrt / dataset.py View on Github external
def _load_data(self):
        assert len(self.ishape) == 4
        N, C, H, W = self.ishape
        assert C == 3
        val_dataset = gdata.VOCDetection(
            root=path.join(self.root_dir, 'VOCdevkit'),
            splits=[('2007', 'test')])
        val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
        self.data = gluon.data.DataLoader(
            val_dataset.transform(YOLO3DefaultValTransform(W, H)),
            N, False, batchify_fn=val_batchify_fn,
            last_batch='discard', num_workers=30)
github dmlc / gluon-cv / scripts / detection / ssd / eval_ssd.py View on Github external
def get_dataset(dataset, data_shape):
    if dataset.lower() == 'voc':
        val_dataset = gdata.VOCDetection(splits=[(2007, 'test')])
        val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
        val_metric = COCODetectionMetric(
            val_dataset, args.save_prefix + '_eval', cleanup=True,
            data_shape=(data_shape, data_shape))
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    return val_dataset, val_metric
github ijkguo / mx-rcnn / gluon_demo.py View on Github external
def get_voc_names(args):
    args.rcnn_num_classes = len(gdata.VOCDetection.CLASSES)
    return gdata.VOCDetection.CLASSES
github dmlc / gluon-cv / scripts / detection / center_net / train_center_net.py View on Github external
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = gdata.VOCDetection(
            splits=[(2007, 'trainval'), (2012, 'trainval')])
        val_dataset = gdata.VOCDetection(
            splits=[(2007, 'test')])
        val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(root=args.dataset_root + "/coco", splits='instances_train2017')
        val_dataset = gdata.COCODetection(root=args.dataset_root + "/coco", splits='instances_val2017', skip_empty=False)
        val_metric = COCODetectionMetric(
            val_dataset, args.save_prefix + '_eval', cleanup=True,
            data_shape=(args.data_shape, args.data_shape), post_affine=get_post_transform)
        # coco validation is slow, consider increase the validation interval
        if args.val_interval == 1:
            args.val_interval = 10
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    if args.num_samples < 0: