How to use the gluoncv.utils.viz.plot_bbox function in gluoncv

To help you get started, we’ve selected a few gluoncv examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Guanghan / mxnet-centernet / unit_test / test_load_coco.py View on Github external
val_dataset = data.COCODetection(splits=['instances_val2017'])
print('Num of training images:', len(train_dataset))
print('Num of validation images:', len(val_dataset))

# load some samples
train_image, train_label = train_dataset[0]
bounding_boxes = train_label[:, :4]
class_ids = train_label[:, 4:5]
print('Image size (height, width, RGB):', train_image.shape)
print('Num of objects:', bounding_boxes.shape[0])
print('Bounding boxes (num_boxes, x_min, y_min, x_max, y_max):\n',
      bounding_boxes)
print('Class IDs (num_boxes, ):\n', class_ids)

# Visualize some samples
utils.viz.plot_bbox(train_image.asnumpy(), bounding_boxes, scores=None,
                    labels=class_ids, class_names=train_dataset.classes)
plt.show()
github dmlc / gluon-cv / tests / unittests / test_utils_viz.py View on Github external
def test_viz_bbox():
    img = mx.nd.zeros((300, 300, 3), dtype=np.uint8)
    bbox = mx.nd.array([[10, 20, 200, 500], [150, 200, 400, 300]])
    scores = mx.nd.array([0.8, 0.001])
    labels = mx.nd.array([1, 3])
    class_names = ['a', 'b', 'c']
    ax = gcv.utils.viz.plot_bbox(img, bbox, scores=scores, labels=labels, class_names=class_names)
    ax = gcv.utils.viz.plot_bbox(img, bbox, ax=ax, reverse_rgb=True)
    ax = gcv.utils.viz.plot_bbox(img, bbox / 500, ax=ax, reverse_rgb=True, absolute_coordinates=False)

    img_output = gcv.utils.viz.cv_plot_bbox(img, bbox, scores=scores, labels=labels, class_names=class_names)
    img_output = gcv.utils.viz.cv_plot_bbox(img, bbox)
    img_output = gcv.utils.viz.cv_plot_bbox(img, bbox / 500, absolute_coordinates=False)
github dmlc / gluon-cv / tests / unittests / test_utils_viz.py View on Github external
def test_viz_bbox():
    img = mx.nd.zeros((300, 300, 3), dtype=np.uint8)
    bbox = mx.nd.array([[10, 20, 200, 500], [150, 200, 400, 300]])
    scores = mx.nd.array([0.8, 0.001])
    labels = mx.nd.array([1, 3])
    class_names = ['a', 'b', 'c']
    ax = gcv.utils.viz.plot_bbox(img, bbox, scores=scores, labels=labels, class_names=class_names)
    ax = gcv.utils.viz.plot_bbox(img, bbox, ax=ax, reverse_rgb=True)
    ax = gcv.utils.viz.plot_bbox(img, bbox / 500, ax=ax, reverse_rgb=True, absolute_coordinates=False)

    img_output = gcv.utils.viz.cv_plot_bbox(img, bbox, scores=scores, labels=labels, class_names=class_names)
    img_output = gcv.utils.viz.cv_plot_bbox(img, bbox)
    img_output = gcv.utils.viz.cv_plot_bbox(img, bbox / 500, absolute_coordinates=False)
github dmlc / gluon-cv / scripts / detection / ssd / demo_ssd.py View on Github external
image_list = [x.strip() for x in args.images.split(',') if x.strip()]

    if args.pretrained.lower() in ['true', '1', 'yes', 't']:
        net = gcv.model_zoo.get_model(args.network, pretrained=True)
    else:
        net = gcv.model_zoo.get_model(args.network, pretrained=False, pretrained_base=False)
        net.load_parameters(args.pretrained)
    net.set_nms(0.45, 200)
    net.collect_params().reset_ctx(ctx = ctx)

    for image in image_list:
        ax = None
        x, img = presets.ssd.load_test(image, short=512)
        x = x.as_in_context(ctx[0])
        ids, scores, bboxes = [xx[0].asnumpy() for xx in net(x)]
        ax = gcv.utils.viz.plot_bbox(img, bboxes, scores, ids, thresh=args.thresh,
                                    class_names=net.classes, ax=ax)
        plt.show()
github dmlc / gluon-cv / docs / tutorials / detection / train_ssd_voc.py View on Github external
from gluoncv.utils import viz

ax = viz.plot_bbox(
    train_image.asnumpy(),
    bboxes,
    labels=cids,
    class_names=train_dataset.classes)
plt.show()

##############################################################################
# Validation images are quite similar to training because they were
# basically split randomly to different sets
val_image, val_label = val_dataset[0]
bboxes = val_label[:, :4]
cids = val_label[:, 4:5]
ax = viz.plot_bbox(
    val_image.asnumpy(),
    bboxes,
    labels=cids,
    class_names=train_dataset.classes)
plt.show()

##############################################################################
# For SSD networks, it is critical to apply data augmentation (see explanations in paper [Liu16]_).
# We provide tons of image and bounding box transform functions to do that.
# They are very convenient to use as well.
from gluoncv.data.transforms import presets
from gluoncv import utils
from mxnet import nd

##############################################################################
width, height = 512, 512  # suppose we use 512 as base training size
github apache / incubator-tvm / tutorials / frontend / deploy_ssd_gluoncv.py View on Github external
m.set_input('data', tvm_input)
    m.set_input(**params)
    # execute
    m.run()
    # get outputs
    class_IDs, scores, bounding_boxs = m.get_output(0), m.get_output(1), m.get_output(2)
    return class_IDs, scores, bounding_boxs

for target, ctx in target_list:
    graph, lib, params = build(target)
    class_IDs, scores, bounding_boxs = run(graph, lib, params, ctx)

######################################################################
# Display result

ax = utils.viz.plot_bbox(img, bounding_boxs.asnumpy()[0], scores.asnumpy()[0],
                         class_IDs.asnumpy()[0], class_names=block.classes)
plt.show()
github dmlc / gluon-cv / docs / tutorials / instance / demo_mask_rcnn.py View on Github external
# :py:func:`gluoncv.utils.viz.expand_mask` will resize the segmentation mask
# and fill the bounding box size in the original image.
# :py:func:`gluoncv.utils.viz.plot_mask` will modify an image to
# overlay segmentation masks.

ids, scores, bboxes, masks = [xx[0].asnumpy() for xx in net(x)]

# paint segmentation mask on images directly
width, height = orig_img.shape[1], orig_img.shape[0]
masks, _ = utils.viz.expand_mask(masks, bboxes, (width, height), scores)
orig_img = utils.viz.plot_mask(orig_img, masks)

# identical to Faster RCNN object detection
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
ax = utils.viz.plot_bbox(orig_img, bboxes, scores, ids,
                         class_names=net.classes, ax=ax)
plt.show()
github dmlc / gluon-cv / docs / tutorials / detection / skip_fintune.py View on Github external
# We can apply this strategy to SSD, YOLO and Mask-RCNN models
# Now we can use mask rcnn and reset class to detect person only

net = model_zoo.get_model('mask_rcnn_resnet50_v1b_coco', pretrained=True)
net.reset_class(classes=['person'], reuse_weights=['person'])
ids, scores, bboxes, masks = [xx[0].asnumpy() for xx in net(x)]

# paint segmentation mask on images directly
width, height = orig_img.shape[1], orig_img.shape[0]
masks, _ = utils.viz.expand_mask(masks, bboxes, (width, height), scores)
orig_img = utils.viz.plot_mask(orig_img, masks)

# identical to Faster RCNN object detection
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
ax = utils.viz.plot_bbox(orig_img, bboxes, scores, ids,
                         class_names=net.classes, ax=ax)
plt.show()