How to use the scenedetect.platform.tqdm function in scenedetect

To help you get started, we’ve selected a few scenedetect examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Breakthrough / PySceneDetect / scenedetect / video_splitter.py View on Github external
' generates less accurate output, but supports multiple input videos.')
        raise NotImplementedError()

    arg_override = arg_override.replace('\\"', '"')

    ret_val = None
    arg_override = arg_override.split(' ')
    filename_template = Template(output_file_template)
    scene_num_format = '%0'
    scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd'

    try:
        progress_bar = None
        total_frames = scene_list[-1][1].get_frames() - scene_list[0][0].get_frames()
        if tqdm and not hide_progress:
            progress_bar = tqdm(total=total_frames, unit='frame', miniters=1)
        processing_start_time = time.time()
        for i, (start_time, end_time) in enumerate(scene_list):
            duration = (end_time - start_time)
            # Fix FFmpeg start timecode frame shift.
            start_time -= 1
            call_list = ['ffmpeg']
            if suppress_output:
                call_list += ['-v', 'quiet']
            elif i > 0:
                # Only show ffmpeg output for the first call, which will display any
                # errors if it fails, and then break the loop. We only show error messages
                # for the remaining calls.
                call_list += ['-v', 'error']
            call_list += [
                '-y',
                '-ss',
github Breakthrough / PySceneDetect / scenedetect / video_splitter.py View on Github external
' -c / --copy option with the split-video to use mkvmerge, which'
            ' generates less accurate output, but supports multiple input videos.')
        raise NotImplementedError()

    arg_override = arg_override.replace('\\"', '"')

    ret_val = None
    arg_override = arg_override.split(' ')
    filename_template = Template(output_file_template)
    scene_num_format = '%0'
    scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd'

    try:
        progress_bar = None
        total_frames = scene_list[-1][1].get_frames() - scene_list[0][0].get_frames()
        if tqdm and not hide_progress:
            progress_bar = tqdm(total=total_frames, unit='frame', miniters=1)
        processing_start_time = time.time()
        for i, (start_time, end_time) in enumerate(scene_list):
            duration = (end_time - start_time)
            # Fix FFmpeg start timecode frame shift.
            start_time -= 1
            call_list = ['ffmpeg']
            if suppress_output:
                call_list += ['-v', 'quiet']
            elif i > 0:
                # Only show ffmpeg output for the first call, which will display any
                # errors if it fails, and then break the loop. We only show error messages
                # for the remaining calls.
                call_list += ['-v', 'error']
            call_list += [
                '-y',
github Breakthrough / PySceneDetect / scenedetect / scene_manager.py View on Github external
if isinstance(end_time, FrameTimecode):
            end_frame = end_time.get_frames()
        elif end_time is not None:
            end_frame = int(end_time)

        if end_frame is not None:
            total_frames = end_frame

        if start_frame is not None and not isinstance(start_time, FrameTimecode):
            total_frames -= start_frame

        if total_frames < 0:
            total_frames = 0

        progress_bar = None
        if tqdm and show_progress:
            progress_bar = tqdm(
                total=total_frames, unit='frames')
        try:

            while True:
                if end_frame is not None and curr_frame >= end_frame:
                    break
                # We don't compensate for frame_skip here as the frame_skip option
                # is not allowed when using a StatsManager - thus, processing is
                # *always* required for *all* frames when frame_skip > 0.
                if (self._is_processing_required(self._num_frames + start_frame)
                        or self._is_processing_required(self._num_frames + start_frame + 1)):
                    ret_val, frame_im = frame_source.read()
                else:
                    ret_val = frame_source.grab()
                    frame_im = None
github Breakthrough / PySceneDetect / scenedetect / cli / context.py View on Github external
imwrite_param = []
        if self.image_param is not None:
            imwrite_param = [self.imwrite_params[self.image_extension], self.image_param]

        # Reset video manager and downscale factor.
        self.video_manager.release()
        self.video_manager.reset()
        self.video_manager.set_downscale_factor(1)
        self.video_manager.start()

        # Setup flags and init progress bar if available.
        completed = True
        logging.info('Generating output images (%d per scene)...', self.num_images)
        progress_bar = None
        if tqdm and not self.quiet_mode:
            progress_bar = tqdm(
                total=len(scene_list) * self.num_images, unit='images')

        filename_template = Template(image_name_template)


        scene_num_format = '%0'
        scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd'
        image_num_format = '%0'
        image_num_format += str(math.floor(math.log(self.num_images, 10)) + 2) + 'd'

        timecode_list = dict()

        fps = scene_list[0][0].framerate

        timecode_list = [
            [
github Breakthrough / PySceneDetect / scenedetect / cli / context.py View on Github external
imwrite_param = []
        if self.image_param is not None:
            imwrite_param = [self.imwrite_params[self.image_extension], self.image_param]

        # Reset video manager and downscale factor.
        self.video_manager.release()
        self.video_manager.reset()
        self.video_manager.set_downscale_factor(1)
        self.video_manager.start()

        # Setup flags and init progress bar if available.
        completed = True
        logging.info('Generating output images (%d per scene)...', self.num_images)
        progress_bar = None
        if tqdm and not self.quiet_mode:
            progress_bar = tqdm(
                total=len(scene_list) * self.num_images, unit='images')

        filename_template = Template(image_name_template)


        scene_num_format = '%0'
        scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd'
        image_num_format = '%0'
        image_num_format += str(math.floor(math.log(self.num_images, 10)) + 2) + 'd'

        timecode_list = dict()

        fps = scene_list[0][0].framerate

        timecode_list = [
github Breakthrough / PySceneDetect / scenedetect / cli / context.py View on Github external
imwrite_param = []
        if self.image_param is not None:
            imwrite_param = [self.imwrite_params[self.image_extension], self.image_param]

        # Reset video manager and downscale factor.
        self.video_manager.release()
        self.video_manager.reset()
        self.video_manager.set_downscale_factor(1)
        self.video_manager.start()

        # Setup flags and init progress bar if available.
        completed = True
        logging.info('Generating output images (%d per scene)...', self.num_images)
        progress_bar = None
        if tqdm and not self.quiet_mode:
            progress_bar = tqdm(
                total=len(scene_list) * self.num_images, unit='images')

        filename_template = Template(image_name_template)


        scene_num_format = '%0'
        scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd'
        image_num_format = '%0'
        image_num_format += str(math.floor(math.log(self.num_images, 10)) + 2) + 'd'

        timecode_list = dict()
        self.image_filenames = dict()

        for i in range(len(scene_list)):
            timecode_list[i] = []
github Breakthrough / PySceneDetect / scenedetect / cli / context.py View on Github external
imwrite_param = []
        if self.image_param is not None:
            imwrite_param = [self.imwrite_params[self.image_extension], self.image_param]

        # Reset video manager and downscale factor.
        self.video_manager.release()
        self.video_manager.reset()
        self.video_manager.set_downscale_factor(1)
        self.video_manager.start()

        # Setup flags and init progress bar if available.
        completed = True
        logging.info('Generating output images (%d per scene)...', self.num_images)
        progress_bar = None
        if tqdm and not self.quiet_mode:
            progress_bar = tqdm(
                total=len(scene_list) * self.num_images, unit='images')

        filename_template = Template(image_name_template)


        scene_num_format = '%0'
        scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd'
        image_num_format = '%0'
        image_num_format += str(math.floor(math.log(self.num_images, 10)) + 2) + 'd'

        timecode_list = dict()
        self.image_filenames = dict()

        for i in range(len(scene_list)):
            timecode_list[i] = []
            self.image_filenames[i] = []
github Breakthrough / PySceneDetect / scenedetect / scene_manager.py View on Github external
valid_extensions = str(list(available_extensions.keys()))
            raise RuntimeError(
                'Invalid image extension, must be one of (case-sensitive): %s' %
                valid_extensions)

    # Reset video manager and downscale factor.
    video_manager.release()
    video_manager.reset()
    video_manager.set_downscale_factor(downscale_factor)
    video_manager.start()

    # Setup flags and init progress bar if available.
    completed = True
    progress_bar = None
    if tqdm and show_progress:
        progress_bar = tqdm(
            total=len(scene_list) * num_images, unit='images')

    filename_template = Template(image_name_template)

    scene_num_format = '%0'
    scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd'
    image_num_format = '%0'
    image_num_format += str(math.floor(math.log(num_images, 10)) + 2) + 'd'

    timecode_list = dict()

    for i in range(len(scene_list)):
        timecode_list[i] = []

    if num_images == 1:
        for i, (start_time, end_time) in enumerate(scene_list):