Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
parser.add_argument('--pretrained', type=str, default='True',
help='Load weights from previously saved parameters.')
parser.add_argument('--thresh', type=float, default=0.5,
help='Threshold of object score when visualize the bboxes.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
# context list
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]
ctx = [mx.cpu()] if not ctx else ctx
# grab some image if not specified
if not args.images.strip():
gcv.utils.download("https://cloud.githubusercontent.com/assets/3307514/" +
"20012568/cbc2d6f6-a27d-11e6-94c3-d35a9cb47609.jpg", 'street.jpg')
image_list = ['street.jpg']
else:
image_list = [x.strip() for x in args.images.split(',') if x.strip()]
if args.pretrained.lower() in ['true', '1', 'yes', 't']:
net = gcv.model_zoo.get_model(args.network, pretrained=True)
else:
net = gcv.model_zoo.get_model(args.network, pretrained=False, pretrained_base=False)
net.load_parameters(args.pretrained)
net.set_nms(0.45, 200)
net.collect_params().reset_ctx(ctx = ctx)
for image in image_list:
ax = None
x, img = presets.yolo.load_test(image, short=512)
######################################################################
# Load a pretrained model
# -------------------------
#
# Let's get an Faster RCNN model trained on COCO
# dataset with ResNet-50 backbone.
net = model_zoo.get_model('faster_rcnn_resnet50_v1b_coco', pretrained=True)
######################################################################
# Pre-process an image
# --------------------
# Similar to faster rcnn inference tutorial, we grab and preprocess a demo image
im_fname = utils.download('https://github.com/dmlc/web-data/blob/master/' +
'gluoncv/detection/biking.jpg?raw=true',
path='biking.jpg')
x, orig_img = data.transforms.presets.rcnn.load_test(im_fname)
######################################################################
# Reset classes to exactly what we want
# -------------------------------------
# Original COCO model has 80 classes
print('coco classes: ', net.classes)
net.reset_class(classes=['bicycle', 'backpack'], reuse_weights=['bicycle', 'backpack'])
# now net has 2 classes as desired
print('new classes: ', net.classes)
######################################################################
# Inference and display
# ---------------------
def download_ade(path, overwrite=False):
_AUG_DOWNLOAD_URLS = [
('http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip', '219e1696abb36c8ba3a3afe7fb2f4b4606a897c7'),
('http://data.csail.mit.edu/places/ADEchallenge/release_test.zip', 'e05747892219d10e9243933371a497e905a4860c'),]
download_dir = os.path.join(path, 'downloads')
makedirs(download_dir)
for url, checksum in _AUG_DOWNLOAD_URLS:
filename = download(url, path=download_dir, overwrite=overwrite, sha1_hash=checksum)
# extract
with zipfile.ZipFile(filename,"r") as zip_ref:
zip_ref.extractall(path=path)
def download_aug(path, overwrite=False):
_AUG_DOWNLOAD_URLS = [
('http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz', '7129e0a480c2d6afb02b517bb18ac54283bfaa35')]
makedirs(path)
for url, checksum in _AUG_DOWNLOAD_URLS:
filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum)
# extract
with tarfile.open(filename) as tar:
tar.extractall(path=path)
shutil.move(os.path.join(path, 'benchmark_RELEASE'),
os.path.join(path, 'VOCaug'))
filenames = ['VOCaug/dataset/train.txt', 'VOCaug/dataset/val.txt']
# generate trainval.txt
with open(os.path.join(path, 'VOCaug/dataset/trainval.txt'), 'w') as outfile:
for fname in filenames:
fname = os.path.join(path, fname)
with open(fname) as infile:
for line in infile:
outfile.write(line)
_DOWNLOAD_URLS = [
('http://images.cocodataset.org/zips/train2017.zip',
'10ad623668ab00c62c096f0ed636d6aff41faca5'),
('http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
'8551ee4bb5860311e79dace7e79cb91e432e78b3'),
('http://images.cocodataset.org/zips/val2017.zip',
'4950dc9d00dbe1c933ee0170f5797584351d2a41'),
# ('http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip',
# '46cdcf715b6b4f67e980b529534e79c2edffe084'),
# test2017.zip, for those who want to attend the competition.
# ('http://images.cocodataset.org/zips/test2017.zip',
# '4e443f8a2eca6b1dac8a6c57641b67dd40621a49'),
]
makedirs(path)
for url, checksum in _DOWNLOAD_URLS:
filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum)
# extract
with zipfile.ZipFile(filename) as zf:
zf.extractall(path=path)
#
# First, we download the video and sample the video frames at a speed of 1 frame per second.
#
# .. raw:: html
#
# <div align="center">
# <img src="../../_static/action_basketball_demo.gif">
# </div>
#
# <br>
from gluoncv.utils import try_import_cv2
cv2 = try_import_cv2()
url = 'https://github.com/bryanyzhu/tiny-ucf101/raw/master/v_Basketball_g01_c01.avi'
video_fname = utils.download(url)
cap = cv2.VideoCapture(video_fname)
cnt = 0
video_frames = []
while(cap.isOpened()):
ret, frame = cap.read()
cnt += 1
if ret and cnt % 25 == 0:
video_frames.append(frame)
if not ret: break
cap.release()
print('We evenly extract %d frames from the video %s.' % (len(video_frames), video_fname))
################################################################
#
print('image shape:', first_img.shape)
print('Label example:')
print(record_dataset[0][1])
##############################################################################
#
# .. _pascal_voc_like:
#
# 2. Derive from PASCAL VOC format
# --------------------------------
# It you have a custom dataset fully comply with the `Pascal VOC `_ object detection format,
# that could be good news, because it's can be adapted to GluonCV format real quick.
#
# We provide a template for you to peek the structures
fname = utils.download('https://github.com/dmlc/web-data/blob/master/gluoncv/datasets/VOCtemplate.zip?raw=true', 'VOCtemplate.zip')
with zipfile.ZipFile(fname) as zf:
zf.extractall('.')
##############################################################################
# A VOC-like dataset will have the following structure:
#
"""
VOCtemplate
└── VOC2018
├── Annotations
│ └── 000001.xml
├── ImageSets
│ └── Main
│ └── train.txt
└── JPEGImages
└── 000001.jpg
def build_rec_process(img_dir, train=False, num_thread=1):
from gluoncv.utils import download, makedirs
rec_dir = os.path.abspath(os.path.join(img_dir, '../rec'))
makedirs(rec_dir)
prefix = 'train' if train else 'val'
print('Building ImageRecord file for ' + prefix + ' ...')
# to_path = rec_dir
# download lst file and im2rec script
script_path = os.path.join(rec_dir, 'im2rec.py')
script_url = 'https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py'
download(script_url, script_path)
lst_path = os.path.join(rec_dir, prefix + '.lst')
lst_url = 'http://data.mxnet.io/models/imagenet/resnet/' + prefix + '.lst'
download(lst_url, lst_path)
# execution
import sys
cmd = [
sys.executable,
script_path,
rec_dir,
img_dir,
'--recursive',
'--pass-through',
'--pack-label',
'--num-thread',
def build_rec_process(img_dir, train=False, num_thread=1):
rec_dir = os.path.abspath(os.path.join(img_dir, '../rec'))
makedirs(rec_dir)
prefix = 'train' if train else 'val'
print('Building ImageRecord file for ' + prefix + ' ...')
to_path = rec_dir
# download lst file and im2rec script
script_path = os.path.join(rec_dir, 'im2rec.py')
script_url = 'https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py'
download(script_url, script_path)
lst_path = os.path.join(rec_dir, prefix + '.lst')
lst_url = 'http://data.mxnet.io/models/imagenet/resnet/' + prefix + '.lst'
download(lst_url, lst_path)
# execution
import sys
cmd = [
sys.executable,
script_path,
rec_dir,
img_dir,
'--recursive',
'--pass-through',
'--pack-label',
'--num-thread',