Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# import and create face feature extractor
import face_features
feature_extractor = face_features.FaceFeatureExtractor()
# Compute features for all image paths in args.images_list
all_feats = {'paths': [], 'rois': [], 'feats': []}
with open(args.images_list) as fin:
for img_path in fin:
img_path = img_path.replace('\n', '')
if len(img_path) > 0:
full_path = os.path.join(args.dataset_base_path, img_path)
print ('Computing features for file %s' % (full_path))
# read image
img = imutils.acquire_image(full_path)
# run face detector
detections = face_detector.detect_faces(img)
if numpy.all(detections != None):
for det in detections:
# The coordinates should be already integers, but some basic
# conversion is need for compatibility with all face detectors.
# Plus we have to get rid of the detection score det[4]
det = [int(det[0]), int(det[1]), int(det[2]), int(det[3])]
# crop image to detected face area.
crop_img = img[det[1]:det[3], det[0]:det[2], :]
else:
from_dataset = False
roi = None
uri = -1
# create empty dictionary for the image information
img = dict()
# if the image is brand new ..
if uri == -1:
# and no roi was specified ...
if roi == None:
# read image
theim = imutils.acquire_image(impath)
# run face detector, but only get the best detection.
# multiple detections are not supported for on-the-fly training images
det = self.face_detector.detect_faces(theim, return_best=True)
if numpy.all(det != None):
# if a face is found, save it
print ('Single ROI detected')
# The coordinates should be already integers, but some basic
# conversion is need for compatibility with all face detectors.
# Plus we have to get rid of the detection score det[4]
det = [int(det[0][0]), int(det[0][1]), int(det[0][2]), int(det[0][3])]
print ('final det ' + str(det))
img["path"] = impath
img["roi"] = det
if 'uri' in req_params['extra_params']:
uri = req_params['extra_params']['uri']
else:
uri = -1
if 'roi' in req_params['extra_params']:
# if request specifies a ROI ...
roi = req_params['extra_params']['roi']
roi = numpy.array([int(x) for x in roi]).reshape(len(roi)//2, 2)
xl, yl = roi.min(axis=0)
xu, yu = roi.max(axis=0)
roi = [xl, yl, xu, yu]
print ('Request specifies ROI ' + str(roi))
# ... check there is a face on the roi
theim = imutils.acquire_image(impath)
crop_img = theim[yl:yu, xl:xu, :]
det = self.face_detector.detect_faces(crop_img, return_best=True)
if numpy.all(det == None):
print ('No detection found in specified ROI')
return self.prepare_success_json_str_(False)
else:
# If found, replace the previous with a more accurate one
det = det[0]
# The coordinates should be already integers, but some basic
# conversion is need for compatibility with all face detectors.
# Plus we have to get rid of the detection score det[4]
det = [int(det[0]), int(det[1]), int(det[2]), int(det[3])]
roi = [det[0]+xl, det[1]+yl, det[2]+xl, det[3]+yl]
print ('Automatically adjusting ROI to more accurate region ' + str(roi))
else:
roi = None
shot_end = shot[1] + '.jpg'
shot_begin_index = video_frames_list.index(shot_begin)
shot_end_index = video_frames_list.index(shot_end)
shot_detections = []
shot_tracks = []
shot_images = []
#####
# Compute face detections in shot
#####
for index in range(shot_begin_index, shot_end_index+1):
img_name = video_frames_list[index]
full_path = os.path.join(args.video_frames_path, img_name)
# read image
img = imutils.acquire_image(full_path)
shot_images.append(img)
# run face detector
detections = face_detector.detect_faces(img)
shot_detections.append(detections)
if numpy.all(detections != None):
shot_tracks.append([-1] * len(detections)) # init all tracks number with -1 ...
else:
shot_tracks.append(None) # ... or None if there are no detections
#####
# Compute face tracks in shot
#####
# The code below uses two pointers to the array of images: index A and index B.
# Index A points to the current image
Body of the thread that runs the face feature extraction for
a list of images
Arguments:
image_list: List of images to be processed. Each item in the list corresponds to a dictionary with
at least two keys: "path" and "roi". The "path" should contain the full path to the image
file to be processed and "roi" the coordinates of the bounding-box of a face detected on
the image.
"""
list_of_feats = []
if len(image_list) > 0:
try:
# init feature extractor
feature_extractor = face_features.FaceFeatureExtractor()
for image in image_list:
# read image
theim = imutils.acquire_image(image["path"])
det = image["roi"]
# crop image to face detection bounding-box
crop_img = theim[det[1]:det[3], det[0]:det[2], :]
# extract features
feat = feature_extractor.feature_compute(crop_img)
# reshape for compatibility with ranking function
feat = numpy.reshape(feat, (1, settings.FEATURES_VECTOR_SIZE))
# add to list of features to be returned
list_of_feats.append(feat)
except Exception as e:
print ('Exception in group_feature_extractor: ' + str(e))
list_of_feats = []
pass
return list_of_feats