How to use the dlib.hit_enter_to_continue function in dlib

To help you get started, we’ve selected a few dlib examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ghostwriternr / lowpolify / scripts / lowpolify.py View on Github external
# pts = my_reduce(pts, 5)
    # print(len(pts))
    dets = detector(im, 1)
    # print("Number of faces detected: {}".format(len(dets)))
    if show:
        win.clear_overlay()
        win.set_image(im)
    for k, d in enumerate(dets):
        shape = predictor(im, d)
        for i in range(shape.num_parts):
            pts = np.vstack([pts, [shape.part(i).x, shape.part(i).y]])
        if show:
            win.add_overlay(shape)
    if show:
        win.add_overlay(dets)
        dlib.hit_enter_to_continue()
    # Construct Delaunay Triangulation from these set of points.
    # Reference: https://en.wikipedia.org/wiki/Delaunay_triangulation
    tris = Delaunay(pts, incremental=True)
    # tris_vertices = pts[tris.simplices]
    # for tri in range(tris_vertices.shape[0]):
    #     x_coords = []
    #     y_coords = []
    #     print(tris_vertices[tri])
    #     for coord in range(tris_vertices.shape[1]):
    #         x_coords.append(tris_vertices[tri][coord][0])
    #         y_coords.append(tris_vertices[tri][coord][1])
    # divideHighVariance(tris, im)
    tris.close()
    # exit(0)
    # Return triangulation
    return tris
github mit-nlp / MITIE / python_examples / train_object_detector.py View on Github external
# results.
print("Showing detections on the images in the faces folder...")
win = dlib.image_window()
for f in glob.glob(os.path.join(faces_folder, "*.jpg")):
    print("Processing file: {}".format(f))
    img = io.imread(f)
    dets = detector(img)
    print("Number of faces detected: {}".format(len(dets)))
    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))

    win.clear_overlay()
    win.set_image(img)
    win.add_overlay(dets)
    dlib.hit_enter_to_continue()







# Finally, note that you don't have to use the XML based input to
# train_simple_object_detector().  If you have already loaded your training
# images and bounding boxes for the objects then you can call it as shown
# below.

# You just need to put your images into a list.
images = [io.imread(faces_folder + '/2008_002506.jpg'),
          io.imread(faces_folder + '/2009_004587.jpg')]
# Then for each image you make a list of rectangles which give the pixel
github ck090 / FaceRecognition_DeepNeuralNetworks / dlib2a.py View on Github external
for i, face_rect in enumerate(detected_faces):

	# Detected faces are returned as an object with the coordinates 
	# of the top, left, right and bottom edges
	print("- Face #{} found at Left: {} Top: {} Right: {} Bottom: {}".format(i, face_rect.left(), face_rect.top(), face_rect.right(), face_rect.bottom()))

	# Draw a box around each face we found
	win.add_overlay(face_rect)

	# Get the the face's pose
	pose_landmarks = face_pose_predictor(image, face_rect)

	# Draw the face landmarks on the screen.
	win.add_overlay(pose_landmarks)
	        
dlib.hit_enter_to_continue()
github mit-nlp / MITIE / dlib / python_examples / face_landmark_detection.py View on Github external
# second argument indicates that we should upsample the image 1 time. This
    # will make everything bigger and allow us to detect more faces.
    dets = detector(img, 1)
    print("Number of faces detected: {}".format(len(dets)))
    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))
        # Get the landmarks/parts for the face in box d.
        shape = predictor(img, d)
        print("Part 0: {}, Part 1: {} ...".format(shape.part(0),
                                                  shape.part(1)))
        # Draw the face landmarks on the screen.
        win.add_overlay(shape)

    win.add_overlay(dets)
    dlib.hit_enter_to_continue()
github Jim-Lin / like-japanavgirls / dark-classifier / util / find-biggest-face-landmarks.py View on Github external
left = 0 if (biggest_face.left()-EXTEND) < 0 else (biggest_face.left()-EXTEND)
	right = width if (biggest_face.right()+EXTEND) > width else (biggest_face.right()+EXTEND)
	crop = image[top:bottom, left:right]
	io.imsave("cropped.jpg", crop)

	face_rect = biggest_face
	# Draw a box around each face we found
	win.add_overlay(face_rect)

	# Get the the face's pose
	pose_landmarks = face_pose_predictor(image, face_rect)

	# Draw the face landmarks on the screen.
	win.add_overlay(pose_landmarks)

dlib.hit_enter_to_continue()
github anubhavshrimal / Face-Recognition / demo-python-files / find_face.py View on Github external
# Open a window on the desktop showing the image
win.set_image(image)

# Loop through each face we found in the image
for i, face_rect in enumerate(detected_faces):
    # Detected faces are returned as an object with the coordinates
    # of the top, left, right and bottom edges
    print("- Face #{} found at Left: {} Top: {} Right: {} Bottom: {}".format(i, face_rect.left(), face_rect.top(),
                                                                             face_rect.right(), face_rect.bottom()))

    # Draw a box around each face we found
    win.add_overlay(face_rect)

# Wait until the user hits  to close the window
dlib.hit_enter_to_continue()
github coneypo / Dlib_examples / face_detector / face_detector_v1.py View on Github external
win.set_image(img)

# 使用detector检测器来检测图像中的人脸
faces = detector(img, 1)

print("人脸数 / faces in all:", len(faces))

for i, d in enumerate(faces):
    print("第", i+1, "个人脸的矩形框坐标:",
          "left:", d.left(), '\t', "right:", d.right(), '\t', "top:", d.top(),'\t',  "bottom:", d.bottom())

# 绘制矩阵轮廓
win.add_overlay(faces)

# 保持图像
dlib.hit_enter_to_continue()
github worldveil / photomosaic / face_montage.py View on Github external
# extract keypoints
        keypoints = keypoint_finder(resized, rect)

        # embed the face in 128D
        embedding = np.array(face_embedder.compute_face_descriptor(resized, keypoints, num_embedding_jitters)).reshape(1, -1)
        
        if is_target_face(embedding):
            matches.append((resized, Image(path), path, rect, keypoints))
            if len(matches) % 5 == 0:
                print("Have found %s matches so far" % len(matches))
            if interactive:
                win.clear_overlay()
                win.set_image(resized[:, :, [2, 1, 0]])
                win.add_overlay(rect)
                win.add_overlay(keypoints)
                dlib.hit_enter_to_continue()
        elif interactive:
            win.clear_overlay()
            win.set_image(resized[:, :, [2, 1, 0]])
            dlib.hit_enter_to_continue()

    seen_paths.add(path)

# save as temporary measure
with open('cache/matches.pkl', 'wb') as pf:
    pickle.dump(matches, pf)

# now that we have matches, we can actually create our montage
# first load each image and align
aligned_images = []

def get_taken_at_sort_key(m):
github mit-nlp / MITIE / python_examples / face_landmark_detection.py View on Github external
# second argument indicates that we should upsample the image 1 time. This
    # will make everything bigger and allow us to detect more faces.
    dets = detector(img, 1)
    print("Number of faces detected: {}".format(len(dets)))
    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))
        # Get the landmarks/parts for the face in box d.
        shape = predictor(img, d)
        print("Part 0: {}, Part 1: {} ...".format(shape.part(0),
                                                  shape.part(1)))
        # Draw the face landmarks on the screen.
        win.add_overlay(shape)

    win.add_overlay(dets)
    dlib.hit_enter_to_continue()
github mit-nlp / MITIE / python_examples / train_shape_predictor.py View on Github external
# second argument indicates that we should upsample the image 1 time. This
    # will make everything bigger and allow us to detect more faces.
    dets = detector(img, 1)
    print("Number of faces detected: {}".format(len(dets)))
    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))
        # Get the landmarks/parts for the face in box d.
        shape = predictor(img, d)
        print("Part 0: {}, Part 1: {} ...".format(shape.part(0),
                                                  shape.part(1)))
        # Draw the face landmarks on the screen.
        win.add_overlay(shape)

    win.add_overlay(dets)
    dlib.hit_enter_to_continue()