How to use the mtcnn.detect_face function in mtcnn

To help you get started, we’ve selected a few mtcnn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github wujiyang / BossRecognition / detectionAndAlign.py View on Github external
def fac_detection_alignment(img, minsize, PNet, RNet, ONet, threshold, factor):
    '''
    return all the aligned faces with the given image
    '''
    

    # face detection
    boundingboxes, points = mtcnn.detect_face(img, minsize, PNet, RNet, ONet, threshold, False, factor)

    if(boundingboxes.shape[0] == 0):
        alignfaces = []
    else:
        # face alignment
        alignfaces = faceAlign(img, points)
        # original image with facial rects
        img = mtcnn.drawBoxes(img, boundingboxes)

    return alignfaces, img
github taylorlu / Facenet-Caffe / clustering.py View on Github external
if(image.shape[2]!=3 and image.shape[2]!=4):
            return [],[]

        if(image.shape[2]==4):
            image = image[:,:,:-1]

    except Exception as e:
        return [],[]

    img_matlab = image.copy()
    tmp = img_matlab[:,:,2].copy()
    img_matlab[:,:,2] = img_matlab[:,:,0]
    img_matlab[:,:,0] = tmp

    # boundingboxes: [None, 5] => the last dim is probability.
    boundingboxes, points = mtcnn.detect_face(img_matlab, minsize, PNet, RNet, ONet, threshold, False, factor)
    boundingboxes = boundingboxes.astype(np.int32)
    warpedFaces = []

    for i in range(boundingboxes.shape[0]):

        left = boundingboxes[i][0]
        right = boundingboxes[i][2]
        top = boundingboxes[i][1]
        bottom = boundingboxes[i][3]

        old_size = (right-left+bottom-top)/2.0
        centerX = right - (right-left)/2.0
        centerY = bottom - (bottom-top)/2 + old_size*0.1
        size = int(old_size*1.15)
        
        x1 = int(centerX-size/2)
github taylorlu / Facenet-Caffe / tf2caffe.py View on Github external
caffe_model_path = "./mtcnn"

    threshold = [0.8, 0.8, 0.6]
    factor = 0.709

    caffe.set_mode_cpu()
    PNet = caffe.Net(caffe_model_path+"/det1.prototxt", caffe_model_path+"/det1.caffemodel", caffe.TEST)
    RNet = caffe.Net(caffe_model_path+"/det2.prototxt", caffe_model_path+"/det2.caffemodel", caffe.TEST)
    ONet = caffe.Net(caffe_model_path+"/det3.prototxt", caffe_model_path+"/det3.caffemodel", caffe.TEST)

    img_matlab = img.copy()
    tmp = img_matlab[:,:,2].copy()
    img_matlab[:,:,2] = img_matlab[:,:,0]
    img_matlab[:,:,0] = tmp

    boundingboxes, points = mtcnn.detect_face(img_matlab, minsize, PNet, RNet, ONet, threshold, False, factor)
    warped = img_matlab[int(boundingboxes[0][1]):int(boundingboxes[0][3]),
                        int(boundingboxes[0][0]):int(boundingboxes[0][2])]
    print(int(boundingboxes[0][1]), int(boundingboxes[0][3]), int(boundingboxes[0][0]), int(boundingboxes[0][2]))
    return warped
github taylorlu / Facenet-Caffe / faceServer.py View on Github external
def mtcnnDetect(image):

    if(image.shape[2]!=3 and image.shape[2]!=4):
        return [],[],[]

    if(image.shape[2]==4):
        image = image[:,:,:-1]

    img_matlab = image.copy()
    tmp = img_matlab[:,:,2].copy()
    img_matlab[:,:,2] = img_matlab[:,:,0]
    img_matlab[:,:,0] = tmp

    # boundingboxes: [None, 5] => the last dim is probability.
    boundingboxes, points = mtcnn.detect_face(img_matlab, minsize, PNet, RNet, ONet, threshold, False, factor)
    boundingboxes = boundingboxes.astype(np.int32)
    vectors = []

    for i in range(boundingboxes.shape[0]):

        left = boundingboxes[i][0]
        right = boundingboxes[i][2]
        top = boundingboxes[i][1]
        bottom = boundingboxes[i][3]
        
        old_size = (right-left+bottom-top)/2.0
        centerX = right - (right-left)/2.0
        centerY = bottom - (bottom-top)/2 + old_size*0.1
        size = int(old_size*1.15)
        
        x1 = int(centerX-size/2)