How to use the dlib.cnn_face_detection_model_v1 function in dlib

To help you get started, we’ve selected a few dlib examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ageitgey / face_recognition / face_recognition / api.py View on Github external
print("Please install `face_recognition_models` with this command before using `face_recognition`:\n")
    print("pip install git+https://github.com/ageitgey/face_recognition_models")
    quit()

ImageFile.LOAD_TRUNCATED_IMAGES = True

face_detector = dlib.get_frontal_face_detector()

predictor_68_point_model = face_recognition_models.pose_predictor_model_location()
pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)

predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location()
pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)

cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location()
cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)

face_recognition_model = face_recognition_models.face_recognition_model_location()
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)


def _rect_to_css(rect):
    """
    Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order

    :param rect: a dlib 'rect' object
    :return: a plain tuple representation of the rect in (top, right, bottom, left) order
    """
    return rect.top(), rect.right(), rect.bottom(), rect.left()


def _css_to_rect(css):
github Team-Neighborhood / awesome-face-detection / dlib-cnn.py View on Github external
from __future__ import print_function
import numpy as np
import cv2
import dlib
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('--with_draw', help='do draw?', default='True')
args = parser.parse_args()

detector_hog = dlib.cnn_face_detection_model_v1('./models/mmod_human_face_detector.dat')

bgr_img = cv2.imread('./test.jpg', 1)
print (bgr_img.shape)

### detection
list_time = []
for idx in range(10):
    rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
    start = cv2.getTickCount()
    (h, w) = bgr_img.shape[:2]

    rgb_img = cv2.resize(rgb_img, None, fx=0.5, fy=0.5)
    mmod_rects = detector_hog(rgb_img, 1)

    time = (cv2.getTickCount() - start) / cv2.getTickFrequency() * 1000
    list_time.append(time)
github richmondu / libfaceid / libfaceid / detector.py View on Github external
def __init__(self, path, optimize, minfacesize):
        import dlib # lazy loading
        self._optimize = optimize
        self._minfacesize = minfacesize
        self._detector = dlib.cnn_face_detection_model_v1(path + 'mmod_human_face_detector.dat')
github spmallick / learnopencv / FaceDetectionComparison / run-all.py View on Github external
if DNN=="CAFFE":
    modelFile = "res10_300x300_ssd_iter_140000_fp16.caffemodel"
    configFile = "deploy.prototxt"
    net = cv2.dnn.readNetFromCaffe(configFile, modelFile)
else:
    modelFile = "opencv_face_detector_uint8.pb"
    configFile = "opencv_face_detector.pbtxt"
    net = cv2.dnn.readNetFromTensorflow(modelFile, configFile)

conf_threshold = 0.7

# DLIB HoG
hogFaceDetector = dlib.get_frontal_face_detector()

# DLIB MMOD
dnnFaceDetector = dlib.cnn_face_detection_model_v1("./mmod_human_face_detector.dat")


def detectFaceOpenCVHaar(faceCascade, frame, inHeight=300, inWidth=0):
    frameOpenCVHaar = frame.copy()
    frameHeight = frameOpenCVHaar.shape[0]
    frameWidth = frameOpenCVHaar.shape[1]
    if not inWidth:
        inWidth = int((frameWidth / frameHeight) * inHeight)

    scaleHeight = frameHeight / inHeight
    scaleWidth = frameWidth / inWidth

    frameOpenCVHaarSmall = cv2.resize(frameOpenCVHaar, (inWidth, inHeight))
    frameGray = cv2.cvtColor(frameOpenCVHaarSmall, cv2.COLOR_BGR2GRAY)

    faces = faceCascade.detectMultiScale(frameGray)
github yu4u / age-gender-estimation / lap / create_lap_dataset.py View on Github external
def crop():
    detector_model_path = model_root.joinpath("mmod_human_face_detector.dat")

    if not detector_model_path.is_file():
        model_root.mkdir(parents=True, exist_ok=True)  # requires Python 3.5 or above
        detector_model_url = "http://dlib.net/files/mmod_human_face_detector.dat.bz2"
        detector_model_bz2 = str(detector_model_path) + ".bz2"
        print("downloading {}".format(detector_model_path.name))
        urllib.request.urlretrieve(detector_model_url, detector_model_bz2, reporthook)

        with open(detector_model_bz2, "rb") as source, open(str(detector_model_path), "wb") as dest:
            dest.write(bz2.decompress(source.read()))

    detector = dlib.cnn_face_detection_model_v1(str(detector_model_path))

    for image_dir, crop_dir in [[train_image_dir, train_crop_dir], [validation_image_dir, validation_crop_dir]]:
        for image_path in image_dir.glob("*.jpg"):
            frame = cv2.imread(str(image_path))
            img_h, img_w, _ = frame.shape
            factor = 800 / max(img_h, img_w)
            frame_resized = cv2.resize(frame, None, fx=factor, fy=factor)
            frame_rgb = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2RGB)
            dets = detector(frame_rgb, 1)

            if len(dets) != 1:
                print("{} faces were detected for {}".format(len(dets), image_path.name))
                rects = [[d.rect.left(), d.rect.right(), d.rect.top(), d.rect.bottom()] for d in dets]
                print(rects)
github visionjo / FIW_KRT / src / scripts / dlib_cnn_face_detect_align.py View on Github external
import fiwtools.utils.io as myio
import dlib
from skimage import io
import glob
import pandas as pd


print("DLIB's CNN FACE DETECTOR: START")
dir_root = "Families_In_The_Wild/Database/"
dir_images = dir_root + "Images/"
dir_det_out = dir_root + "F0664/"

# instantiate CNN face detector
f_model = "~/Documents/dlib-19.6/mmod_human_face_detector.dat"
cnn_face_detector = dlib.cnn_face_detection_model_v1(f_model)

dir_fids = glob.glob(dir_images + "F0664/")
dir_fids.sort()

f_pids = glob.glob(dir_images + "F0664/P*.jpg")
f_pids.sort()

f_prefix = [f.replace(dir_images, "").replace("/", "_").replace(".jpg", "_face") for f in f_pids]

# f_pids =list( np.array(f_pids)[ids])
fids = [myio.file_base(myio.filepath(p)) for p in f_pids]
pids = [myio.file_base(p) for p in f_pids]
# f_prefix =list( np.array(f_prefix)[ids])
npids = len(f_pids)
print("Processing {} images".format(npids))
github distant-viewing / dvt / dvt / frame.py View on Github external
rect = rect_overlap(face, h)
        if (rect[0] < rect[2]) and (rect[3] < rect[1]):
            c_size = (face[2] - face[0]) * (face[1] - face[3])
            h_size = (h[2] - h[0]) * (h[1] - h[3])
            o_size = (rect[2] - rect[0]) * (rect[1] - rect[3])
            prop = o_size / (c_size + h_size - o_size)
            if overlap < prop:
                overlap = prop
                best_score = score

    return overlap, best_score


class FaceFrameAnnotator(FrameAnnotator):
    name = 'face'
    cfd = dlib.cnn_face_detection_model_v1(frm.cnn_face_detector_model_location())
    hfd = dlib.get_frontal_face_detector()

    def __init__(self):
        #prototxt = "/home/taylor/face-test/resnet50_256.prototxt"
        #caffemodel = "/home/taylor/face-test/resnet50_256.caffemodel"
        prototxt = "/home/taylor/face-test/caffe_model/resnet50_scratch_caffe/resnet50_scratch.prototxt"
        caffemodel = "/home/taylor/face-test/caffe_model/resnet50_scratch_caffe/resnet50_scratch.caffemodel"

        caffe.set_device(0)
        caffe.set_mode_gpu()
        self.net = caffe.Net(prototxt, 1, weights=caffemodel)
        self.net.blobs['data'].reshape(1, 3, 224, 224)

        self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
        self.transformer.set_transpose('data', (2, 0, 1))
        self.transformer.set_mean('data', np.array([91.4953, 103.8827, 131.0912]) )
github deepfakes / faceswap / lib / FaceLandmarksExtractor / FaceLandmarksExtractor.py View on Github external
def initialize(detector, scale_to=2048):
    global dlib_detectors
    global keras_model
    global is_initialized
    if not is_initialized:
        dlib_cnn_face_detector_path = os.path.join(os.path.dirname(__file__), "mmod_human_face_detector.dat")
        if not os.path.exists(dlib_cnn_face_detector_path):
            raise Exception ("Error: Unable to find %s, reinstall the lib !" % (dlib_cnn_face_detector_path) )
        
        if detector == 'cnn' or detector == "all":
            dlib_cnn_face_detector = dlib.cnn_face_detection_model_v1(dlib_cnn_face_detector_path)            
            #DLIB and TF competiting for VRAM, so dlib must do first allocation to prevent OOM error 
            dlib_cnn_face_detector ( np.zeros ( (scale_to, scale_to, 3), dtype=np.uint8), 0 ) 
            dlib_detectors.append(dlib_cnn_face_detector)
        
        if detector == "hog" or detector == "all":
            dlib_face_detector = dlib.get_frontal_face_detector()
            dlib_face_detector ( np.zeros ( (scale_to, scale_to, 3), dtype=np.uint8), 0 )
            dlib_detectors.append(dlib_face_detector)        
    
        keras_model_path = os.path.join( os.path.dirname(__file__) , "2DFAN-4.h5" )
        if not os.path.exists(keras_model_path):
            print ("Error: Unable to find %s, reinstall the lib !" % (keras_model_path) )
        else:
            print ("Info: initializing keras model...")
            keras_model = keras.models.load_model (keras_model_path, custom_objects={'TorchBatchNorm2D': TorchBatchNorm2D} )
github elggem / ros_people_model / scripts / vis_dlib_cnn.py View on Github external
if __name__ == "__main__":
    initializeModel()
    rospy.init_node('vis_dlib_cnn', anonymous=True)
    bridge = CvBridge()

    CNN_SCALE = rospy.get_param('~scale', 0.4)
    CNN_FRATE = 1.0/rospy.get_param('~fps', 5.0)
    CNN_PADDING = rospy.get_param('~padding', 0.1)

    # Publishers
    pub = rospy.Publisher('/vis_dlib_cnn', Features, queue_size=10)
    # Subscribers
    rospy.Subscriber(rospy.get_param('~topic_name', '/camera/image_raw'), Image, imageCallback)

    # Dlib
    dlib_cnn_detector = dlib.cnn_face_detection_model_v1(DLIB_CNN_MODEL_FILE)
    # Launch detectors
    rospy.Timer(rospy.Duration(CNN_FRATE), faceDetectCNNCallback)

    rospy.spin()