How to use dlib - 10 common examples

To help you get started, we’ve selected a few dlib examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github celebrity-audio-collection / videoprocess / unused / test_lip.py View on Github external
def get_facial_landmark_vectors_from_frame(frame):
    # print('Fetching face detections and landmarks...')

    bboxes, landmarks = face_detection_model.update(frame)

    dets = []

    if bboxes is None:
        print('no detections')
        return (None, None)
    # assume only 1 face per frame
    facial_points = []
    for k, bbox in enumerate(bboxes):
        pre_b = dlib.rectangle(int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))
        dets.append(pre_b)
        shape = shape_predictor(frame, pre_b)

        if shape is None:
            continue

        for i in np.arange(0, 68):
            part = shape.part(i)
            # mouth_points.append((part2.x, part2.y))
            facial_points.append(part.x)
            facial_points.append(part.y)

        if len(facial_points) > 0:
            break

    # print('Returning (' + str(len(dets)) + ', ' + str(len(facial_points)) + ')')
github previtus / AttentionPipeline / _side_projects / hog_tests / hog_using_dlib.py View on Github external
import sys
import dlib
from PIL import Image
import numpy as np
from timeit import default_timer as timer

# (563, 511, 3)   total time: 0.433351890999802
#file_name = "/home/ekmek/intership_project/hog_tests/face_example.jpg"
file_name = '/home/ekmek/intership_project/video_parser_v1/_videos_to_test/small_dataset/input/frames/s0216.jpg'

show = True


if show:
    win = dlib.image_window()

# Create a HOG face detector using the built-in dlib class
face_detector = dlib.get_frontal_face_detector()

# Load the image into an array
#image = io.imread(file_name)
image = Image.open(file_name)

image = np.array(image)
print(image.shape)

# Run the HOG face detector on the image data.
# The result will be the bounding boxes of the faces in our image.

start = timer()
detected_faces = face_detector(image, 1)
github wkentaro-archive / rfcn / tests / utils_tests / test_label_rois.py View on Github external
def test_label_rois():
    img, lbl_cls, lbl_inst = get_instance_segmentation_data()

    rects = []
    dlib.find_candidate_object_locations(img, rects)
    rois = []
    for rect in rects:
        x1, y1, x2, y2 = rect.left(), rect.top(), rect.right(), rect.bottom()
        rois.append((x1, y1, x2, y2))
    rois = np.array(rois)

    roi_clss, roi_inst_masks = rfcn.utils.label_rois(
        rois, lbl_inst, lbl_cls, overlap_thresh=0.5)

    n_rois = len(rois)
    nose.tools.assert_equal(len(roi_clss), n_rois)
    nose.tools.assert_equal(len(roi_inst_masks), n_rois)
    np.testing.assert_equal(np.unique(roi_clss), [0, 1])

    viz_imgs = []
    colors = fcn.utils.labelcolormap(21)
github apollos / opencv-practice / object_detection_made_easy / test_detector.py View on Github external
# python test_detector.py --detector output/stop_sign_detector.svm --testing stop_sign_testing

# import the necessary packages
from imutils import paths
import argparse
import dlib
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--detector", required=True, help="Path to trained object detector")
ap.add_argument("-t", "--testing", required=True, help="Path to directory of testing images")
args = vars(ap.parse_args())

# load the detector
detector = dlib.simple_object_detector(args["detector"])

# loop over the testing images
for testingPath in paths.list_images(args["testing"]):
	# load the image and make predictions
	image = cv2.imread(testingPath)
	boxes = detector(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))

	# loop over the bounding boxes and draw them
	for b in boxes:
		(x, y, w, h) = (b.left(), b.top(), b.right(), b.bottom())
		cv2.rectangle(image, (x, y), (w, h), (0, 255, 0), 2)

	# show the image
	cv2.imshow("Image", image)
	cv2.waitKey(0)
github charlielito / snapchat-filters-opencv / faceswap / Webcam_face_swapping.py View on Github external
import time
import dlib
from utils import applyAffineTransform, rectContains, calculateDelaunayTriangles, warpTriangle, face_swap3

if __name__ == '__main__' :

    # Make sure OpenCV is version 3.0 or above
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

    if int(major_ver) < 3 :
        print >>sys.stderr, 'ERROR: Script needs OpenCV 3.0 or higher'
        sys.exit(1)

    print("[INFO] loading facial landmark predictor...")
    model = "shape_predictor_68_face_landmarks.dat"
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(model)

    # Read images will swap image1 into image2
    filename1 = 'ted_cruz.jpg'
    filename1 = 'brad.jpg'
    #filename1 = 'hillary_clinton.jpg'

    img1 = cv2.imread(filename1);
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    # detect faces in the grayscale frame
    rects1 = detector(gray1, 0)
    shape1 = predictor(gray1, rects1[0])
    points1 = face_utils.shape_to_np(shape1) #type is a array of arrays (list of lists)
    #need to convert to a list of tuples
    points1 = list(map(tuple, points1))
github danmohaha / CVPRW2019_Face_Artifacts / demo.py View on Github external
print('***********')
print('Detecting DeepFake images, prob == -1 denotes opt out')
print('***********')
# Parse config
cfg_file = 'cfgs/res50.yml'
with open(cfg_file, 'r') as f:
    cfg = edict(yaml.load(f))
sample_num = 10

# Employ dlib to extract face area and landmark points
pwd = os.path.dirname(__file__)
front_face_detector = dlib.get_frontal_face_detector()
lmark_predictor = dlib.shape_predictor(pwd + '/dlib_model/shape_predictor_68_face_landmarks.dat')

tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth=True
# init session
sess = tf.Session(config=tfconfig)
# Build network
reso_net = ResoNet(cfg=cfg, is_train=False)
reso_net.build()
# Build solver
solver = Solver(sess=sess, cfg=cfg, net=reso_net)
solver.init()


def im_test(im):
    face_info = lib.align(im[:, :, (2,1,0)], front_face_detector, lmark_predictor)
    # Samples
github charlielito / snapchat-filters-opencv / faceswap / Webcam_face_swapping.py View on Github external
import dlib
from utils import applyAffineTransform, rectContains, calculateDelaunayTriangles, warpTriangle, face_swap3

if __name__ == '__main__' :

    # Make sure OpenCV is version 3.0 or above
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

    if int(major_ver) < 3 :
        print >>sys.stderr, 'ERROR: Script needs OpenCV 3.0 or higher'
        sys.exit(1)

    print("[INFO] loading facial landmark predictor...")
    model = "shape_predictor_68_face_landmarks.dat"
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(model)

    # Read images will swap image1 into image2
    filename1 = 'ted_cruz.jpg'
    filename1 = 'brad.jpg'
    #filename1 = 'hillary_clinton.jpg'

    img1 = cv2.imread(filename1);
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    # detect faces in the grayscale frame
    rects1 = detector(gray1, 0)
    shape1 = predictor(gray1, rects1[0])
    points1 = face_utils.shape_to_np(shape1) #type is a array of arrays (list of lists)
    #need to convert to a list of tuples
    points1 = list(map(tuple, points1))

    video_capture = cv2.VideoCapture(0)
github voletiv / lipreading-in-the-wild-experiments / process-lrw / process_lrw_functions.py View on Github external
def detect_mouth_and_write(saveDir, wordFileName, frameNumer, frame, detector, predictor,
        dontWriteMouthIfExists=True, prevFace=dlib.rectangle(30, 30, 220, 220),
        verbose=False):

    # Image Name
    mouthImageName = os.path.join(saveDir, "/".join(wordFileName.split(
                                  "/")[-3:]).split('.')[0] + \
                                  "_{0:02d}_mouth".format(frameNumer) + ".jpg")

    # If file is not supposed to be written if it exists
    if dontWriteMouthIfExists:
        # Check if file exists
        if os.path.isfile(mouthImageName):
            if verbose:
                print("Mouth image", mouthImageName,
                    "exists, so not detected. (detect_mouth_and_write)")
            # Return if file exists
            return prevFace
github alokwhitewolf / MultiObjectTracker / tracker.py View on Github external
vehicle_vel_bool.append(0)
							vehicle_frame_counter.append(0)
							which_conflict.append([])
							which_intersect.append([-1])

				if points_ped:
					#initiate tracker
					tracker_ped = [dlib.correlation_tracker() for _ in xrange(len(points_ped))]
					# Provide the tracker the initial position of the object
					[tracker_ped[i].start_track(frame, dlib.rectangle(*rect)) for i, rect in enumerate(points_ped)]

				if points_veh:
					#initiate tracker
					tracker_veh = [dlib.correlation_tracker() for _ in xrange(len(points_veh))]
					# Provide the tracker the initial position of the object
					[tracker_veh[i].start_track(frame, dlib.rectangle(*rect)) for i, rect in enumerate(points_veh)]

				print "press 'r' to see output "
				print "press 'q' to quit "


				if cv2.waitKey(-1) & 0xFF == ord('r'):
					cv2.destroyWindow("Select objects to be tracked here.")
					cv2.destroyWindow("Objects to be tracked.")
					print "\nResumed\n"
					break
				if cv2.waitKey(-1) & 0xFF == ord('q'):
					exit()

		if points_ped or points_veh:

			if points_ped:
github singnet / face-services / services / face_align_server.py View on Github external
def do_alignment(img, bbox):
    fh, temp_file = tempfile.mkstemp('.jpg')
    os.close(fh)
    temp_file_no_ext = ".".join(temp_file.rsplit('.')[:-1])

    d = dlib.rectangle(bbox.x, bbox.y, bbox.x + bbox.w, bbox.y + bbox.h)

    # num_pts = len(source_pts)
    num_pts = 5
    try:
        landmark_predictor = landmark_predictors[str(num_pts)]
    except KeyError:
        raise Exception("Incorrect number of landmarks")

    detection_object = landmark_predictor(img, d)

    chip_size = 150
    border = 0.2
    dlib.save_face_chip(img, detection_object, temp_file_no_ext, chip_size, border)

    # Playing with OpenCVs geometric transforms - they don't work out of the box
    # due to faces not being a plane.