How to use the dlib.get_face_chip function in dlib

To help you get started, we’ve selected a few dlib examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github foamliu / FaceNet / pre_process.py View on Github external
# print(filename)
        img = cv.imread(filename)
        img = img[:, :, ::-1]
        dets = detector(img, 1)

        num_faces = len(dets)
        if num_faces == 0:
            return image_name

        # Find the 5 face landmarks we need to do the alignment.
        faces = dlib.full_object_detections()
        for detection in dets:
            faces.append(sp(img, detection))

        # It is also possible to get a single chip
        image = dlib.get_face_chip(img, faces[0], size=img_size)
        image = image[:, :, ::-1]
github incluit / OpenVino-Driver-Behaviour / third-party / dlib / python_examples / face_recognition.py View on Github external
# In particular, a padding of 0.5 would double the width of the cropped area, a value of 1.
        # would triple it, and so forth.

        # There is another overload of compute_face_descriptor that can take
        # as an input an aligned image. 
        #
        # Note that it is important to generate the aligned image as
        # dlib.get_face_chip would do it i.e. the size must be 150x150, 
        # centered and scaled.
        #
        # Here is a sample usage of that

        print("Computing descriptor on aligned image ..")
        
        # Let's generate the aligned image using get_face_chip
        face_chip = dlib.get_face_chip(img, shape)        

        # Now we simply pass this chip (aligned image) to the api
        face_descriptor_from_prealigned_image = facerec.compute_face_descriptor(face_chip)                
        print(face_descriptor_from_prealigned_image)        
        
        dlib.hit_enter_to_continue()
github foamliu / FaceNet / data_generator.py View on Github external
for i_batch in range(length):
            sample = self.samples[i + i_batch]
            for j, role in enumerate(['a', 'p', 'n']):
                image_name = sample[role]
                filename = os.path.join(self.image_folder, image_name)
                image = cv.imread(filename)  # BGR
                image = image[:, :, ::-1]  # RGB
                dets = self.detector(image, 1)

                num_faces = len(dets)
                if num_faces > 0:
                    # Find the 5 face landmarks we need to do the alignment.
                    faces = dlib.full_object_detections()
                    for detection in dets:
                        faces.append(self.sp(image, detection))
                    image = dlib.get_face_chip(image, faces[0], size=img_size)
                else:
                    image = cv.resize(image, (img_size, img_size), cv.INTER_CUBIC)

                if self.usage == 'train':
                    image = aug_pipe.augment_image(image)

                batch_inputs[j, i_batch] = preprocess_input(image)

        return [batch_inputs[0], batch_inputs[1], batch_inputs[2]], batch_dummy_target
github kaschmo / sh_face_rec / sh_face_rec / presencedetector.py View on Github external
def detectPresence(self, frame):
        if frame.hasFace:
            #FUTURE: harder acceptance criteria for presence?
            #FUTURE: this approach only detects one person!
            #TODO: work with face_ignore_liste for Painting

            for i in range(len(frame.faceNames)):
                faceChip = dlib.get_face_chip(frame.getRGB(), frame.faceLandmarks[i])
                face = Face(img = faceChip, name = frame.faceNames[i], timestamp = frame.timestamp, encoding = frame.faceEmbeddings[i], distance = frame.faceDistances[i])
                if face.name != 'unknown': 
                    if face.name == self.ignoreFaceList[0]: 
                        self.logger.info("Ignoring %s.", face.name)   
                        continue                                       
                    self.lastKnownFace = face 
                    newFace = True
                    for fl in range(len(self.knownFaceList)):
                        if self.knownFaceList[fl].name == face.name:
                            self.logger.info(" %s already detected.", face.name)
                            newFace = False
                    if newFace:
                        self.logger.info("New Person %s. Adding to list", face.name)
                        self.knownFaceList.append(face)
                        #trigger OH
                        self.OHInterface.setPresent(face.name)
github georgesterpu / pyVSR / pyVSR / Features / roi.py View on Github external
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)  # dlib and opencv use different channel representations

            detections = self._detect(frame, 0)

            if len(detections) > 0:  # else the buffer will preserve the zeros initialisation

                bbox = detections[0]
                left, top, right, bottom = _get_bbox_corners(bbox, frame.shape, self._gpu)
                # print(left, top, right, bottom, frame.shape)

                if self._align is True:
                    face_coords = dlib.rectangle(left, top, right, bottom)
                    landmarks5 = self._fitter5(frame, face_coords)

                    face_img = dlib.get_face_chip(frame, landmarks5, 256)
                    face_img = np.asarray(face_img)

                else:

                    face_img = frame[top:bottom, left:right]
                    face_img = cv2.resize(face_img, (160, 160), interpolation=cv2.INTER_CUBIC)

                face_chip_area = dlib.rectangle(0, 0, face_img.shape[0], face_img.shape[1])
                landmarks68 = self._fitter68(face_img, face_chip_area)

                arr = _dlib_parts_to_numpy(landmarks68)[48:68]
                top_left, bottom_right = _get_array_bounds(arr, face_img.shape, border=self._border)

                mouth_crop = face_img[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0], :]
                mouth_crop_resized = cv2.resize(mouth_crop, (self._xres, self._yres), cv2.INTER_AREA)
github foamliu / FaceNet / lfw_eval.py View on Github external
batch_inputs = np.empty((3, 1, img_size, img_size, channel), dtype=np.float32)

                for j, role in enumerate(['a', 'p', 'n']):
                    image_name = sample[role]
                    filename = os.path.join(lfw_folder, image_name)
                    image = cv.imread(filename)
                    image = image[:, :, ::-1]  # RGB
                    dets = self.detector(image, 1)

                    num_faces = len(dets)
                    if num_faces > 0:
                        # Find the 5 face landmarks we need to do the alignment.
                        faces = dlib.full_object_detections()
                        for detection in dets:
                            faces.append(self.sp(image, detection))
                        image = dlib.get_face_chip(image, faces[0], size=img_size)
                    else:
                        image = cv.resize(image, (img_size, img_size), cv.INTER_CUBIC)

                    batch_inputs[j, 0] = preprocess_input(image)

                y_pred = model.predict([batch_inputs[0], batch_inputs[1], batch_inputs[2]])
                a = y_pred[0, 0:128]
                p = y_pred[0, 128:256]
                n = y_pred[0, 256:384]

                self.out_queue.put({'image_name': sample['a'], 'embedding': a})
                self.out_queue.put({'image_name': sample['p'], 'embedding': p})
                self.out_queue.put({'image_name': sample['n'], 'embedding': n})
                self.signal_queue.put(SENTINEL)

                if self.in_queue.qsize() == 0: