How to use the dlib.correlation_tracker function in dlib

To help you get started, we’ve selected a few dlib examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github alokwhitewolf / MultiObjectTracker / tracker.py View on Github external
if mode:
							velocity.append(0)
							vehicle_vel_bool.append(0)
							vehicle_frame_counter.append(0)
							which_conflict.append([])
							which_intersect.append([-1])

				if points_ped:
					#initiate tracker
					tracker_ped = [dlib.correlation_tracker() for _ in xrange(len(points_ped))]
					# Provide the tracker the initial position of the object
					[tracker_ped[i].start_track(frame, dlib.rectangle(*rect)) for i, rect in enumerate(points_ped)]

				if points_veh:
					#initiate tracker
					tracker_veh = [dlib.correlation_tracker() for _ in xrange(len(points_veh))]
					# Provide the tracker the initial position of the object
					[tracker_veh[i].start_track(frame, dlib.rectangle(*rect)) for i, rect in enumerate(points_veh)]

				print "press 'r' to see output "
				print "press 'q' to quit "


				if cv2.waitKey(-1) & 0xFF == ord('r'):
					cv2.destroyWindow("Select objects to be tracked here.")
					cv2.destroyWindow("Objects to be tracked.")
					print "\nResumed\n"
					break
				if cv2.waitKey(-1) & 0xFF == ord('q'):
					exit()

		if points_ped or points_veh:
github bikz05 / object-tracker / object-tracker-single.py View on Github external
cv2.destroyWindow("Image")

    # Co-ordinates of objects to be tracked 
    # will be stored in a list named `points`
    points = get_points.run(img) 

    if not points:
        print "ERROR: No object to be tracked."
        exit()

    cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
    cv2.imshow("Image", img)

    # Initial co-ordinates of the object to be tracked 
    # Create the tracker object
    tracker = dlib.correlation_tracker()
    # Provide the tracker the initial position of the object
    tracker.start_track(img, dlib.rectangle(*points[0]))

    while True:
        # Read frame from device or file
        retval, img = cam.read()
        if not retval:
            print "Cannot capture frame device | CODE TERMINATING :("
            exit()
        # Update the tracker  
        tracker.update(img)
        # Get the position of the object, draw a 
        # bounding box around it and display it.
        rect = tracker.get_position()
        pt1 = (int(rect.left()), int(rect.top()))
        pt2 = (int(rect.right()), int(rect.bottom()))
github mohamed-mamdouh95 / pedestrainTracker / pedestrianTracker.py View on Github external
options = {"model": "cfg/yolo.cfg", "load": "bin/yolo.weights", "threshold": 0.1}
tfnet = TFNet(options)
it= 1
img = cv2.imread("./test/"+str(it)+".jpg")
result = tfnet.return_predict(img)
points = []
for i in range (0,len(result)):
    if result[i]['label'] == 'person' :
        points.append(( result[i]['topleft']['x'],result[i]['topleft']['y'],result[i]['bottomright']['x']
			,result[i]['bottomright']['y']))
print points
if not points:
    print "ERROR: No object to be tracked."
    exit()
tracker = [dlib.correlation_tracker() for _ in xrange(len(points))]
[tracker[i].start_track(img, dlib.rectangle(*rect)) for i, rect in enumerate(points)]

while it < 112:
   
    img = cv2.imread("./test/"+str(it)+".jpg")
    x = 1
    for i in xrange(len(tracker)):
            tracker[i].update(img)
            # Get the position of th object, draw a 
            # bounding box around it and display it.
            rect = tracker[i].get_position()
            pt1 = (int(rect.left()), int(rect.top()))
            pt2 = (int(rect.right()), int(rect.bottom()))
            cv2.rectangle(img, pt1, pt2, (255, 255, 255), 3)
            cv2.putText(img,str(x),pt1,cv2.FONT_HERSHEY_SIMPLEX, 2,(255,255,255),2)
            x = x+1
github Suraj520 / CognitiveAnnotationTool / main.py View on Github external
cv2.imshow("Image", img)
    cv2.destroyWindow("Image")

    
    points = run(img, multi=True)

    if not points:
       print("ERROR: No object to be annotated")
       exit()

    cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
    cv2.imshow("Image", img)

   # Initial co-ordinates of the object to be tracked using dlib correlation tracker
   # Create the tracker object
    tracker = [dlib.correlation_tracker() for _ in range(len(points))]
   # Provide the tracker the initial position of the object
    [tracker[i].start_track(img, dlib.rectangle(*rect)) for i, rect in enumerate(points)]
    alpha=0
    while True:
         User = str(UserName.get())
    # Read frame from device or file
         retval, img = cam.read()
         if not retval:
            print("Device not accessible ")
            exit()
            # Update the tracker
         for i in range(len(tracker)):
             tracker[i].update(img)
            #Get the position of th object, draw a
            #bounding box around it and display it.
             rect = tracker[i].get_position()
github PJunhyuk / people-counting-pose / video_tracking.py View on Github external
people_real_num = people_real_num + 1
            for point_i in range(0, point_num):
                if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0: # If coordinates of point is (0, 0) == meaningless data
                    draw.ellipse(ellipse_set(person_conf_multi, people_i, point_i), fill=point_color)
                    people_x.append(person_conf_multi[people_i][point_i][0])
                    people_y.append(person_conf_multi[people_i][point_i][1])
            if i == 0:
                target_points.append((int(min(people_x)), int(min(people_y)), int(max(people_x)), int(max(people_y))))
            else:
                is_new_person = True
                for k in range(len(tracker)):
                    rect = tracker[k].get_position()
                    if np.mean(people_x) < rect.right() and np.mean(people_x) > rect.left() and np.mean(people_y) < rect.bottom() and np.mean(people_y) > rect.top():
                        is_new_person = False
                if is_new_person == True:
                    tracker.append(dlib.correlation_tracker())
                    print('is_new_person!')
                    rect_temp = []
                    rect_temp.append((int(min(people_x)), int(min(people_y)), int(max(people_x)), int(max(people_y))))
                    [tracker[i+len(tracker)-1].start_track(image, dlib.rectangle(*rect)) for i, rect in enumerate(rect_temp)]

    ##########

    if i == 0:
        # Initial co-ordinates of the object to be tracked
        # Create the tracker object
        tracker = [dlib.correlation_tracker() for _ in range(len(target_points))]
        # Provide the tracker the initial position of the object
        [tracker[i].start_track(image, dlib.rectangle(*rect)) for i, rect in enumerate(target_points)]

    #####
github gdiepen / face-recognition / track multiple faces / demo - track multiple faces.py View on Github external
#detected as a face. If both of these conditions hold
                        #we have a match
                        if ( ( t_x <= x_bar   <= (t_x + t_w)) and 
                             ( t_y <= y_bar   <= (t_y + t_h)) and 
                             ( x   <= t_x_bar <= (x   + w  )) and 
                             ( y   <= t_y_bar <= (y   + h  ))):
                            matchedFid = fid


                    #If no matched fid, then we have to create a new tracker
                    if matchedFid is None:

                        print("Creating new tracker " + str(currentFaceID))

                        #Create and store the tracker 
                        tracker = dlib.correlation_tracker()
                        tracker.start_track(baseImage,
                                            dlib.rectangle( x-10,
                                                            y-20,
                                                            x+w+10,
                                                            y+h+20))

                        faceTrackers[ currentFaceID ] = tracker

                        #Start a new thread that is used to simulate 
                        #face recognition. This is not yet implemented in this
                        #version :)
                        t = threading.Thread( target = doRecognizePerson ,
                                               args=(faceNames, currentFaceID))
                        t.start()

                        #Increase the currentFaceID counter
github celebrity-audio-collection / videoprocess / RetinaFaceModel / src / recognizer_video.py View on Github external
# Get the highest accuracy embedded vector
                j = np.argmax(preds)
                proba = preds[j]
                # Compare this vector to source class vectors to verify it is actual belong to this class
                match_class_idx = (labels == j)
                match_class_idx = np.where(match_class_idx)[0]
                selected_idx = np.random.choice(match_class_idx, comparing_num)
                compare_embeddings = embeddings[selected_idx]
                # Calculate cosine similarity
                cos_similarity = CosineSimilarity(embedding, compare_embeddings)
                if cos_similarity < cosine_threshold and proba > proba_threshold:
                    name = le.classes_[j]
                    text = "{}".format(name)
                    # print("Recognized: {} <{:.2f}>".format(name, proba*100))
                # Start tracking
                tracker = dlib.correlation_tracker()
                rect = dlib.rectangle(int(box[0]), int(box[1]), int(box[2]), int(box[3]))
                tracker.start_track(rgb, rect)
                trackers.append(tracker)
                texts.append(text)

                y = bbox[1] - 10 if bbox[1] - 10 > 10 else bbox[1] + 10
                cv2.putText(frame, text, (bbox[0], y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
                cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), (255, 0, 0), 2)
    else:
        for tracker, text in zip(trackers, texts):
            pos = tracker.get_position()

            # unpack the position object
            startX = int(pos.left())
            startY = int(pos.top())
            endX = int(pos.right())
github PacktPublishing / Mastering-OpenCV-4-with-Python / Chapter11 / 01-chapter-content / face_tracking / object_tracking_correlation_filters.py View on Github external
# Create the video capture to read from the webcam:
capture = cv2.VideoCapture(0)

# Set window name:
window_name = "Object tracking using dlib correlation filter algorithm"

# Create the window:
cv2.namedWindow(window_name)

# We bind mouse events to the created window:
cv2.setMouseCallback(window_name, mouse_event_handler)

# First step is to initialize the correlation tracker.
tracker = dlib.correlation_tracker()

# This variable will hold if we are currently tracking the object:
tracking_state = False

while True:
    # Capture frame from webcam:
    ret, frame = capture.read()

    # We draw a basic instructions to the user:
    draw_text_info()

    # We set and draw the rectangle where the object will be tracked if it has the two points:
    if len(points) == 2:
        cv2.rectangle(frame, points[0], points[1], (0, 0, 255), 3)
        dlib_rectangle = dlib.rectangle(points[0][0], points[0][1], points[1][0], points[1][1])