Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def get_facial_landmark_vectors_from_frame(frame):
# print('Fetching face detections and landmarks...')
bboxes, landmarks = face_detection_model.update(frame)
dets = []
if bboxes is None:
print('no detections')
return (None, None)
# assume only 1 face per frame
facial_points = []
for k, bbox in enumerate(bboxes):
pre_b = dlib.rectangle(int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))
dets.append(pre_b)
shape = shape_predictor(frame, pre_b)
if shape is None:
continue
for i in np.arange(0, 68):
part = shape.part(i)
# mouth_points.append((part2.x, part2.y))
facial_points.append(part.x)
facial_points.append(part.y)
if len(facial_points) > 0:
break
# print('Returning (' + str(len(dets)) + ', ' + str(len(facial_points)) + ')')
def detect_mouth_and_write(saveDir, wordFileName, frameNumer, frame, detector, predictor,
dontWriteMouthIfExists=True, prevFace=dlib.rectangle(30, 30, 220, 220),
verbose=False):
# Image Name
mouthImageName = os.path.join(saveDir, "/".join(wordFileName.split(
"/")[-3:]).split('.')[0] + \
"_{0:02d}_mouth".format(frameNumer) + ".jpg")
# If file is not supposed to be written if it exists
if dontWriteMouthIfExists:
# Check if file exists
if os.path.isfile(mouthImageName):
if verbose:
print("Mouth image", mouthImageName,
"exists, so not detected. (detect_mouth_and_write)")
# Return if file exists
return prevFace
vehicle_vel_bool.append(0)
vehicle_frame_counter.append(0)
which_conflict.append([])
which_intersect.append([-1])
if points_ped:
#initiate tracker
tracker_ped = [dlib.correlation_tracker() for _ in xrange(len(points_ped))]
# Provide the tracker the initial position of the object
[tracker_ped[i].start_track(frame, dlib.rectangle(*rect)) for i, rect in enumerate(points_ped)]
if points_veh:
#initiate tracker
tracker_veh = [dlib.correlation_tracker() for _ in xrange(len(points_veh))]
# Provide the tracker the initial position of the object
[tracker_veh[i].start_track(frame, dlib.rectangle(*rect)) for i, rect in enumerate(points_veh)]
print "press 'r' to see output "
print "press 'q' to quit "
if cv2.waitKey(-1) & 0xFF == ord('r'):
cv2.destroyWindow("Select objects to be tracked here.")
cv2.destroyWindow("Objects to be tracked.")
print "\nResumed\n"
break
if cv2.waitKey(-1) & 0xFF == ord('q'):
exit()
if points_ped or points_veh:
if points_ped:
def do_alignment(img, bbox):
fh, temp_file = tempfile.mkstemp('.jpg')
os.close(fh)
temp_file_no_ext = ".".join(temp_file.rsplit('.')[:-1])
d = dlib.rectangle(bbox.x, bbox.y, bbox.x + bbox.w, bbox.y + bbox.h)
# num_pts = len(source_pts)
num_pts = 5
try:
landmark_predictor = landmark_predictors[str(num_pts)]
except KeyError:
raise Exception("Incorrect number of landmarks")
detection_object = landmark_predictor(img, d)
chip_size = 150
border = 0.2
dlib.save_face_chip(img, detection_object, temp_file_no_ext, chip_size, border)
# Playing with OpenCVs geometric transforms - they don't work out of the box
# due to faces not being a plane.
#self.old_time = time.time()
_, image = self.cap.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
small = cv2.resize(gray, (0,0), fx=self.imageScale, fy=self.imageScale)
if(self.count % 4 == 0):
rects = self.detector(small, 1)
self.count += 1
messages = ""
data = []
# loop over the face detections
if(len(rects)):
rect = rects[0]
#get points from the predictor
# this part is important
faceRectangle = dlib.rectangle(int(rect.left() / self.imageScale), int(rect.top() / self.imageScale), int(rect.right() / self.imageScale), int(rect.bottom() / self.imageScale))
shape = self.predictor(gray, faceRectangle)
shape = face_utils.shape_to_np(shape)
## till here
data = []
x = np.array(shape[27] - shape[30])
reference_scale_i = 1 / (np.sqrt(x.dot(x)) * self.reference_scale_multiplier)
#side of lips
for i in [5,9,11,48,50,52,54,56,58,61,63,65,67]:#range(50,67):
x = np.array(shape[27] - shape[i])
y = list(map(lambda z: ((z) * reference_scale_i), x))
data += y
#print(len(data), data)
messages="shapes|"
# loop over the (x, y)-coordinates for the facial landmarks
def performFaceDetection(img, scale=1.0):
if scale is not 1.0:
img = cv2.resize(img, (0,0), fx=scale, fy=scale)
# perform CNN detection
dets = dlib_detector(img, 1)
# rescale
return [dlib.rectangle(top = int(d.top() / scale),
bottom = int(d.bottom() / scale),
left = int(d.left() / scale),
right = int(d.right() / scale)) for d in dets]
r"""
Convert a `menpo.shape.PointCloud` to a `dlib.rect`.
Parameters
----------
pg : `menpo.shape.PointDirectedGraph`
The Menpo PointDirectedGraph to convert into a rect. No check is done
to see if the PointDirectedGraph actually is a rectangle.
Returns
-------
bounding_rect : `dlib.rect`
A dlib Rectangle.
"""
min_p, max_p = pg.bounds()
return dlib.rectangle(left=int(min_p[1]), top=int(min_p[0]),
right=int(max_p[1]), bottom=int(max_p[0]))
ret, frame = stream.read()
if ret is False:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # dlib and opencv use different channel representations
detections = self._detect(frame, 0)
if len(detections) > 0: # else the buffer will preserve the zeros initialisation
bbox = detections[0]
left, top, right, bottom = _get_bbox_corners(bbox, frame.shape, self._gpu)
# print(left, top, right, bottom, frame.shape)
if self._align is True:
face_coords = dlib.rectangle(left, top, right, bottom)
landmarks5 = self._fitter5(frame, face_coords)
face_img = dlib.get_face_chip(frame, landmarks5, 256)
face_img = np.asarray(face_img)
else:
face_img = frame[top:bottom, left:right]
face_img = cv2.resize(face_img, (160, 160), interpolation=cv2.INTER_CUBIC)
face_chip_area = dlib.rectangle(0, 0, face_img.shape[0], face_img.shape[1])
landmarks68 = self._fitter68(face_img, face_chip_area)
arr = _dlib_parts_to_numpy(landmarks68)[48:68]
top_left, bottom_right = _get_array_bounds(arr, face_img.shape, border=self._border)
def draw_object_boxes(read_image,response):
height , width = read_image.shape[:2]
for (i,r) in enumerate(response):
box = dlib.rectangle(int(r["box"][1]*width), int(r["box"][0]*height), int(r["box"][3]*width), int(r["box"][2]*height))
top = box.top()
right = box.right()
bottom = box.bottom()
left = box.left()
cv2.rectangle(read_image, (left, top), (right, bottom), (0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(read_image, r["category"], (left, y), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 255, 0), 2)
return read_image
cv2.circle(img, pt_pos, 2, (0, 255, 0), 1)
face_descriptor = facerec.compute_face_descriptor(img, shape)
d_test2 = numpy.array(face_descriptor)
# 计算欧式距离
dist = []
for i in descriptors:
dist_ = numpy.linalg.norm(i - d_test2)
dist.append(dist_)
if (min(dist)) > is_not_candidate:
this_is = "Unknow"
else:
num = dist.index(min(dist)) # 返回最小值
this_is = candidate[num][0:4]
# print( min(dist))
left_top = (dlib.rectangle.left(d), dlib.rectangle.top(d))
right_bottom = (dlib.rectangle.right(d), dlib.rectangle.bottom(d))
cv2.rectangle(img, left_top, right_bottom, (0, 255, 0), 2, cv2.LINE_AA)
text_point = (dlib.rectangle.left(d), dlib.rectangle.top(d) - 5)
cv2.putText(img, this_is, text_point, cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), 2, 1) # 标出face
if this_is == file[0:4]:
right_num += 1
else:
print("Processing file: ",img_path," ERROR !")
cv2.imwrite(os.path.join(faceRect_ERROR_path, file+"_to_"+this_is+".jpg"), img)
cv2.imwrite(os.path.join(faceRect_path,file), img)
count += 1
accuracy = right_num/count