How to use the dlib.face_recognition_model_v1 function in dlib

To help you get started, we’ve selected a few dlib examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github incluit / OpenVino-Driver-Behaviour / third-party / dlib / python_examples / face_recognition.py View on Github external
"   ./face_recognition.py shape_predictor_5_face_landmarks.dat dlib_face_recognition_resnet_model_v1.dat ../examples/faces\n"
        "You can download a trained facial shape predictor and recognition model from:\n"
        "    http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\n"
        "    http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2")
    exit()

predictor_path = sys.argv[1]
face_rec_model_path = sys.argv[2]
faces_folder_path = sys.argv[3]

# Load all the models we need: a detector to find the faces, a shape predictor
# to find face landmarks so we can precisely localize the face, and finally the
# face recognition model.
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_path)
facerec = dlib.face_recognition_model_v1(face_rec_model_path)

win = dlib.image_window()

# Now process all the images
for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
    print("Processing file: {}".format(f))
    img = dlib.load_rgb_image(f)

    win.clear_overlay()
    win.set_image(img)

    # Ask the detector to find the bounding boxes of each face. The 1 in the
    # second argument indicates that we should upsample the image 1 time. This
    # will make everything bigger and allow us to detect more faces.
    dets = detector(img, 1)
    print("Number of faces detected: {}".format(len(dets)))
github worldveil / photomosaic / face_montage.py View on Github external
"""
Usage:
    
    run face_montage.py \
        --target-face-dir media/faces/will \
        --other-face-dir media/faces/other \
        --photos-dir media/pics \
        --output-size 800 \
        --savedir media/output/montage_will/ \
        --sort-by-photo-age
"""

# load detector, keypoints, and face embedder
face_detector = dlib.get_frontal_face_detector()
keypoint_finder = dlib.shape_predictor(faces.WEIGHTS_2_PATH['landmarks_5'])
face_embedder = dlib.face_recognition_model_v1(faces.WEIGHTS_2_PATH['face_recognition'])

p = argparse.ArgumentParser()

# required
p.add_argument("--target-face-dir", dest='target_face_dir', type=str, required=True, help="We'll train a model on the single face in photos in this directory")
p.add_argument("--other-face-dir", dest='other_face_dir', type=str, required=True, help="Directory of negative examples of other faces")
p.add_argument("--photos-dir", dest='photos_dir', type=str, required=True, help="Directory of photos to use in the actual montage")
p.add_argument("--output-size", dest='output_size', type=int, required=True, help="Dimensions of the square images to output")
p.add_argument("--savedir", dest='savedir', type=str, required=True, help="Directory where to save the face-aligned images")

p.add_argument("--start-closeness", dest='start_closeness', type=float, default=0.4, help="Starting closenness (in range 0.2 - 0.49)")
p.add_argument("--end-closeness", dest='end_closeness', type=float, default=0.49, help="Ending closeness (in range 0.2 - 0.49)")

# optional
p.add_argument("--sort-by-photo-age", dest='sort_by_photo_age', action='store_true', default=False, help="Should we sort by photo age? Otherwise random order.")
github mrlathome / faceutils / ros / scripts / face_api.py View on Github external
import dlib
import os
import cv2
import random
import time
import pickle

_base_dir = os.path.dirname(__file__)
_data_dir = os.path.join(_base_dir, "data")

_detector = dlib.get_frontal_face_detector()
_predictor = dlib.shape_predictor(
    os.path.join(os.path.join(_base_dir, "data"), "shape_predictor_68_face_landmarks.dat"))
_recognizer = dlib.face_recognition_model_v1(
    os.path.join(os.path.join(_base_dir, "data"), "dlib_face_recognition_resnet_model_v1.dat"))

_classifier = pickle.load(open(os.path.join(os.path.join(_base_dir, "data"), "gender_model.pickle"), 'r'))


def predict_gender(encoding):
    result = _classifier(dlib.vector(encoding))
    if result > 0.5:
        return "male"

    if result < -0.5:
        return "female"

    return "unknown"
github inspurer / WorkAttendanceSystem / V1.0 / face_recognize_punchcard.py View on Github external
import dlib  # 人脸识别的库dlib
import numpy as np  # 数据处理的库numpy
import cv2  # 图像处理的库OpenCv
import pandas as pd  # 数据处理的库Pandas
import wx
import os
import csv
import datetime
import _thread

# face recognition model, the object maps human faces into 128D vectors
facerec = dlib.face_recognition_model_v1("model/dlib_face_recognition_resnet_model_v1.dat")

# Dlib 预测器
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('model/shape_predictor_68_face_landmarks.dat')

loading = 'icon/loading.png'
pun_fail = 'icon/pun_fail.png'
pun_repeat = 'icon/pun_repeat.png'
pun_success = 'icon/pun_success.png'

path_logcat_csv = "data/logcat.csv"
def read_csv_to_recoders():
    recodes = []
    if os.path.exists(path_logcat_csv):
        with open(path_logcat_csv, "r", newline="") as csvfiler:
            reader = csv.reader(csvfiler)
github kaschmo / sh_face_rec / sh_face_rec / facerecognizer.py View on Github external
#MTCNN detector
    MTCNN_face_detector = MTCNNDetector(minsize = cf.getint('MTCNN_MINSIZE'), thresholds = [0.6, 0.7, 0.8], scale_factor = cf.getfloat('MTCNN_SCALE_FACTOR'), bb_margin = cf.getint('MTCNN_BB_MARGIN'))

    #Models for landmark description
    predictor_68_point_model = model_path + "shape_predictor_68_face_landmarks.dat"
    #pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)
    predictor_5_point_model = model_path + "shape_predictor_5_face_landmarks.dat"
    #pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)
    if cf['LANDMARK_DET'] == "68_point":
        pose_predictor = dlib.shape_predictor(predictor_68_point_model)
    else:
        pose_predictor = dlib.shape_predictor(predictor_5_point_model)
    #Model 128byte embedding generation
    face_recognition_model = model_path + "dlib_face_recognition_resnet_model_v1.dat"
    face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
    face_encoding_num_jitters=1 #how many times to resample when ecoding.

    #Classifier Model for KNN classifier
    knn_model_path=model_path + cf['KNN_MODEL_NAME']
    #allowed distance for recognition
    knn_distance_threshold=cf.getfloat('KNN_TRESHOLD')


    def __init__(self):
        fileConfig('sh_face_rec/logging.conf')
        self.logger = logging.getLogger("FaceRecognizer")
        self.logger.info("Initializing MTCNN Detector")
        #init required models        
        with open(self.knn_model_path, 'rb') as f:
            self.knn_clf = pickle.load(f)
github smitshilu / AISecurityCamera / start.py View on Github external
if (val < 0.6):
                	name = names[idx]

                print(name)
                # Save image for future use
                if name=="Unknown":
                    cv2.imwrite(os.path.join(os.getcwd(), "unknown", str(int(time.time()))+".png"), frame)
                    pygame.mixer.music.play()
                else:
                    return


if __name__ == '__main__':
    detect_faces = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
    face_rec = dlib.face_recognition_model_v1('dlib_face_recognition_resnet_model_v1.dat')

    video_capture = cv2.VideoCapture(0)
    video_capture.set(3, 320)
    video_capture.set(4, 240)

    saved_face_descriptor = []
    names = []

    for face in glob.glob(os.path.join(os.getcwd(), "face_encodings", "*.npy")):
    	temp = np.load(face)
    	saved_face_descriptor.append(temp)
    	names.append(os.path.basename(face[:-4]))
    
    while True:
        if (motion_detected()):
            main(video_capture, saved_face_descriptor, names, detect_faces, predictor, face_rec)
github coneypo / Dlib_face_recognition_from_camera / face_reco_from_camera.py View on Github external
import dlib         # 人脸处理的库 Dlib
import numpy as np  # 数据处理的库 Numpy
import cv2          # 图像处理的库 OpenCV
import pandas as pd # 数据处理的库 Pandas
import os
import time
from PIL import Image, ImageDraw, ImageFont # ImageDraw 润饰已存在的图像 # ImageFont 可以使用Truetype的字体

# 1. Dlib 正向人脸检测器
detector = dlib.get_frontal_face_detector()

# 2. Dlib 人脸 landmark 特征点检测器
predictor = dlib.shape_predictor('data/data_dlib/shape_predictor_68_face_landmarks.dat')

# 3. Dlib Resnet 人脸识别模型,提取 128D 的特征矢量
face_reco_model = dlib.face_recognition_model_v1("data/data_dlib/dlib_face_recognition_resnet_model_v1.dat")


class Face_Recognizer:
    def __init__(self):
        # 用来存放所有录入人脸特征的数组 / Save the features of faces in the database
        self.features_known_list = []

        # 存储录入人脸名字 / Save the name of faces known
        self.name_known_cnt = 0
        self.name_known_list = []

        # 存储当前摄像头中捕获到的所有人脸的坐标名字 / Save the positions and names of current faces captured
        self.pos_camera_list = []
        self.name_camera_list = []
        # 存储当前摄像头中捕获到的人脸数
        self.faces_cnt = 0
github smitshilu / AISecurityCamera / save_new_face.py View on Github external
else:
        print("Either no face or more then one face detected. Please check the image file again")
                
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("-f", "--file", type=str, help="give file for to extract face from it", required=True)
    parser.add_argument("-n", "--name", type=str, help="give name for the person", required=True)

    args = parser.parse_args()

    file = args.file
    person_name = args.name

    detect_faces = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
    face_rec = dlib.face_recognition_model_v1('dlib_face_recognition_resnet_model_v1.dat')

    main(file, person_name, detect_faces, predictor)
github inspurer / WorkAttendanceSystem / V1.0 / useless / face_feature_storage.py View on Github external
from skimage import io
import csv
import numpy as np
import pandas as pd

path_face_img = "data/face_img_database/"
path_face_feature = "data/face_feature_database/"

# detector to find the faces
detector = dlib.get_frontal_face_detector()

# shape predictor to find the face landmarks
predictor = dlib.shape_predictor("model/shape_predictor_68_face_landmarks.dat")

# face recognition model, the object maps human faces into 128D vectors
facerec = dlib.face_recognition_model_v1("model/dlib_face_recognition_resnet_model_v1.dat")


# 返回单张图像的128D特征
def return_128d_features(path_img):
    img = io.imread(path_img)
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    dets = detector(img_gray, 1)

    print("检测的人脸图像:", path_img, "\n")
    # 因为有可能截下来的人脸再去检测,检测不出来人脸了
    # 所以要确保是 检测到人脸的人脸图像 拿去算特征
    if len(dets) != 0:
        shape = predictor(img_gray, dets[0])
        face_descriptor = facerec.compute_face_descriptor(img_gray, shape)
    else:
        face_descriptor = 0