How to use the dlib.shape_predictor function in dlib

To help you get started, we’ve selected a few dlib examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github danmohaha / CVPRW2019_Face_Artifacts / demo.py View on Github external
print('***********')
print('Detecting DeepFake images, prob == -1 denotes opt out')
print('***********')
# Parse config
cfg_file = 'cfgs/res50.yml'
with open(cfg_file, 'r') as f:
    cfg = edict(yaml.load(f))
sample_num = 10

# Employ dlib to extract face area and landmark points
pwd = os.path.dirname(__file__)
front_face_detector = dlib.get_frontal_face_detector()
lmark_predictor = dlib.shape_predictor(pwd + '/dlib_model/shape_predictor_68_face_landmarks.dat')

tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth=True
# init session
sess = tf.Session(config=tfconfig)
# Build network
reso_net = ResoNet(cfg=cfg, is_train=False)
reso_net.build()
# Build solver
solver = Solver(sess=sess, cfg=cfg, net=reso_net)
solver.init()


def im_test(im):
    face_info = lib.align(im[:, :, (2,1,0)], front_face_detector, lmark_predictor)
    # Samples
github charlielito / snapchat-filters-opencv / faceswap / Webcam_face_swapping.py View on Github external
import dlib
from utils import applyAffineTransform, rectContains, calculateDelaunayTriangles, warpTriangle, face_swap3

if __name__ == '__main__' :

    # Make sure OpenCV is version 3.0 or above
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

    if int(major_ver) < 3 :
        print >>sys.stderr, 'ERROR: Script needs OpenCV 3.0 or higher'
        sys.exit(1)

    print("[INFO] loading facial landmark predictor...")
    model = "shape_predictor_68_face_landmarks.dat"
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(model)

    # Read images will swap image1 into image2
    filename1 = 'ted_cruz.jpg'
    filename1 = 'brad.jpg'
    #filename1 = 'hillary_clinton.jpg'

    img1 = cv2.imread(filename1);
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    # detect faces in the grayscale frame
    rects1 = detector(gray1, 0)
    shape1 = predictor(gray1, rects1[0])
    points1 = face_utils.shape_to_np(shape1) #type is a array of arrays (list of lists)
    #need to convert to a list of tuples
    points1 = list(map(tuple, points1))

    video_capture = cv2.VideoCapture(0)
github zhangodie / face_morpher / morphing_face.py View on Github external
MASK_IMAGE = sys.argv[2]
	alpha = sys.argv[3]

except:
	BOTTOM_IMAGE = 'img0.png'
	MASK_IMAGE = 'img1.png'
	alpha = 0.5

#----------------------------------------------------------------------------
# 模型导入

# dlib人脸方框检测, 可以考虑mtcnn
face_detector = dlib.get_frontal_face_detector()

# dlib关键点检测模型(68个), 可以考虑face++(106个), stasm(77个)
shape_predictor = dlib.shape_predictor(DLIB_MODEL_PATH)

#----------------------------------------------------------------------------
# 人脸相关域(dlib)

"""LEFT_FACE = list(range(0, 9)) + list(range(17, 22))
RIGHT_FACE = list(range(9, 17)) + list(range(22, 27))"""
JAW_POINTS = list(range(0, 27))
JAW_END = 17
FACE_END = 68

# cv2.fillConvexPoly多边形画图
OVERLAY_POINTS = [JAW_POINTS]# LEFT_FACE, RIGHT_FACE,

#----------------------------------------------------------------------------
# 68个关键点坐标获取函数
github shravankumar147 / Facial-Landmark-Detection / facial_landmark_detection.py View on Github external
help="path to facial landmark predictor")
ap.add_argument("-i", "--image", required=True,
	help="path to input image")
args = vars(ap.parse_args())

if os.path.isfile(args["shape_predictor"]):
	pass
else:
	# print("Oops...! File is not available. Shall I downlaod ?")
	cmd = "wget -c --progress=bar http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2"
	os.system(cmd)

# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])

# load the input image, resize it, and convert it to grayscale
image = plt.imread(args["image"])
orig = image
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# detect faces in the grayscale image
rects = detector(gray, 1)

# loop over the face detections
for (i, rect) in enumerate(rects):
	# determine the facial landmarks for the face region, then
	# convert the facial landmark (x, y)-coordinates to a NumPy
	# array
	shape = predictor(gray, rect)
github kendricktan / iffse / analyze.py View on Github external
import sys
import dlib

import skimage.io as skio
import skimage.draw as skdr

import numpy as np

from PIL import Image

from facemaps.cv.faces import align_face_to_template

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(
    './pretrained/shape_predictor_68_face_landmarks.dat')


def maybe_face_bounding_box(img):
    """
    Returns a bounding box if it finds
    ONE face. Any other case, it returns
    none
    """
    global detector

    dets = detector(img, 1)
    if len(dets) == 1:
        return dets[0]
    return None
github akshaybhatia10 / ComputerVision-Projects / FacialExpressionDetection / yawnDetector.py View on Github external
import cv2
import numpy as np
import dlib

# Path to shape predictor file
PATH = 'shape_predictor_68_face_landmarks.dat'
predictor = dlib.shape_predictor(PATH)
detector = dlib.get_frontal_face_detector() # Return a list of rectangles, corresponding to a face


class TooManyFaces(Exception):
	pass

class NoFaces(Exception):
	pass


def get_landmarks(image):
	points = detector(image, 1)

	if len(points) > 1:
		raise TooManyFaces
	if len(points) == 0:
github hay / facetool / facetool / classify.py View on Github external
def _load_image(self, image_path):
        detector = dlib.get_frontal_face_detector()
        predictor = dlib.shape_predictor(self.predictor_path)
        fa = FaceAligner(predictor, desiredFaceWidth=160)
        image = cv2.imread(image_path, cv2.IMREAD_COLOR)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        rects = detector(gray, 2)
        rect_nums = len(rects)
        XY, aligned_images = [], []

        if rect_nums == 0:
            aligned_images.append(image)

            return aligned_images, image, rect_nums, XY
        else:
            for i in range(rect_nums):
                aligned_image = fa.align(image, gray, rects[i])
                aligned_images.append(aligned_image)
                (x, y, w, h) = rect_to_bb(rects[i])
github bogireddytejareddy / micro-expression-recognition / CASME-SQUARE / Late_MicroExpFuseNet.py View on Github external
from keras.layers import Concatenate, Input, concatenate, add, multiply, maximum
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils, generic_utils
from sklearn.cross_validation import train_test_split
from sklearn import cross_validation
from sklearn import preprocessing
from keras import backend as K
import pylab
import matplotlib.pyplot as plt
K.set_image_dim_ordering('th')
import matplotlib
matplotlib.use('Agg')

# DLib Face Detection
predictor_path = "shape_predictor_68_face_landmarks.dat"
predictor = dlib.shape_predictor(predictor_path)
detector = dlib.get_frontal_face_detector()

class TooManyFaces(Exception):
	pass

class NoFaces(Exception):
	pass

def get_landmark(img):
	rects = detector(img, 1)
	if len(rects) > 1:
		pass
	if len(rects) == 0:
		pass
	ans = numpy.matrix([[p.x, p.y] for p in predictor(img, rects[0]).parts()])
	return ans
github parai / dms / models / gaze.py View on Github external
def get_landmarks_predictor():
    """Get a singleton dlib face landmark predictor."""
    global _landmarks_predictor
    if not _landmarks_predictor:
        dat_path = _get_dlib_data_file('shape_predictor_5_face_landmarks.dat')
        # dat_path = _get_dlib_data_file('shape_predictor_68_face_landmarks.dat')
        _landmarks_predictor = dlib.shape_predictor(dat_path)
    return _landmarks_predictor
github kaschmo / sh_face_rec / sh_face_rec / trainclassifier.py View on Github external
logger = logging.getLogger("trainClassifier")
# handler = logging.StreamHandler()
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# handler.setFormatter(formatter)
# logger.addHandler(handler)


MTCNN_face_detector = MTCNNDetector(minsize = cf.getint('MTCNN_MINSIZE'), thresholds = [0.6, 0.7, 0.8], scale_factor = cf.getfloat('MTCNN_SCALE_FACTOR'), bb_margin = cf.getint('MTCNN_BB_MARGIN'))

#MTCNN_face_detector = MTCNNDetector(minsize = 40, thresholds = [0.6, 0.7, 0.8], scale_factor = 0.709, bb_margin = 20)
predictor_68_point_model = model_dir + "shape_predictor_68_face_landmarks.dat"
#pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)
predictor_5_point_model = model_dir + "shape_predictor_5_face_landmarks.dat"
#pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)
if cf['LANDMARK_DET'] == "68_point":
    pose_predictor = dlib.shape_predictor(predictor_68_point_model)
else:
    pose_predictor = dlib.shape_predictor(predictor_5_point_model)

face_recognition_model = model_dir + "dlib_face_recognition_resnet_model_v1.dat"
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
face_encoding_num_jitters=1 #how many times to resample when ecoding.

def image_files_in_folder(folder):
    return [os.path.join(folder, f) for f in os.listdir(folder) if re.match(r'.*\.(jpg|jpeg|png)', f, flags=re.I)]


if __name__ == "__main__":
    logger.info("Training knn model in %s", os.path.join(model_dir, model_name))
    X = []
    y = []
    start=time.time()