How to use the esper.pose_detect.pose_detect function in esper

To help you get started, we’ve selected a few esper examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github scanner-research / esper-tv / app / esper / animatedness.py View on Github external
for (vid_shots, vid_faces, vid_indices) in zip(stitched_shots, shot_faces, indices)
        ])
        log.debug('shots: {} --> {}'.format(len(stitched_shots[0]), len(matching_shots[0])))

    with Timer('Computing sparse poses to find shots with hands in view'):
        pose_frame_per_shot, matching_shots, matching_faces = unzip([
            unzip(
                sorted(
                    [(face.person.frame.number, shot, face)
                     for (face, shot) in zip(vid_faces, vid_shots)],
                    key=itemgetter(0)))
            for (vid_faces, vid_shots) in zip(matching_faces, matching_shots)
        ])

    with Timer('Computing sparse poses'):
        all_poses = pose_detect(videos, pose_frame_per_shot)
        assert (len(all_poses[0]) == len(matching_faces[0]))
        matching_poses = match_poses_to_faces(all_poses, matching_faces)
        assert (len(matching_poses[0]) == len(matching_faces[0]))

    with Timer('Filtering invalid poses'):
        filtered_poses, indices = filter_invalid_poses(matching_poses)
        filtered_shots = [[vid_shots[i] for i in vid_indices]
                          for vid_shots, vid_indices in zip(matching_shots, indices)]
        log.debug('shots: {} --> {}'.format(len(matching_poses[0]), len(filtered_poses[0])))

    with Timer('Computing dense poses for animatedness'):
        pose_frames_per_shot = [
            sum([
                list(range(shot.min_frame, shot.max_frame + 1, POSE_STRIDE)) for shot in vid_shots
            ], []) for vid_shots in filtered_shots
        ]
github scanner-research / esper-tv / app / esper / animatedness.py View on Github external
matching_poses = match_poses_to_faces(all_poses, matching_faces)
        assert (len(matching_poses[0]) == len(matching_faces[0]))

    with Timer('Filtering invalid poses'):
        filtered_poses, indices = filter_invalid_poses(matching_poses)
        filtered_shots = [[vid_shots[i] for i in vid_indices]
                          for vid_shots, vid_indices in zip(matching_shots, indices)]
        log.debug('shots: {} --> {}'.format(len(matching_poses[0]), len(filtered_poses[0])))

    with Timer('Computing dense poses for animatedness'):
        pose_frames_per_shot = [
            sum([
                list(range(shot.min_frame, shot.max_frame + 1, POSE_STRIDE)) for shot in vid_shots
            ], []) for vid_shots in filtered_shots
        ]
        all_dense_poses = pose_detect(videos, pose_frames_per_shot)

    with Timer('Tracking poses'):
        all_tracks = pose_track(videos, filtered_shots, filtered_poses, all_dense_poses)

    for video, vid_tracks in zip(videos, all_tracks):
        scores = [(track.id, animated_score(track)) for track in vid_tracks]
        print((sorted(scores, key=itemgetter(1))))