How to use the tqdm.auto.tqdm.write function in tqdm

To help you get started, we’ve selected a few tqdm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github jkoutsikakis / pytorch-wrapper / pytorch_wrapper / tuner.py View on Github external
def _print_hyper_parameters(hyper_parameters):
        """
        Prints parameters.

        :param hyper_parameters: Dict with the hyper parameters.
        """

        tqdm.write('-' * 80)
        tqdm.write('Hyper-Parameters')
        tqdm.write('-' * 80)
        tqdm.write(pprint.pformat(hyper_parameters))
        tqdm.write('-' * 80)
github AnyBody-Research-Group / AnyPyTools / anypytools / abcutils.py View on Github external
if self.cached_tasklist and self.cached_arg_hash == arg_hash:
                tasklist = self.cached_tasklist
            else:
                self.cached_arg_hash = arg_hash
                tasklist = list(
                    _Task.from_macrofolderlist(macrolist, folderlist, logfile)
                )
        else:
            raise ValueError("Nothing to process for " + str(macrolist))

        # Start the scheduler
        try:
            with tqdm(total=len(tasklist), disable=self.silent) as pbar:
                for task in self._schedule_processes(tasklist):
                    if task.has_error() and not self.silent:
                        tqdm.write(task_summery(task))
                        if hasattr(pbar, "container"):
                            pbar.container.children[0].bar_style = "danger"
                    pbar.update()
        except KeyboardInterrupt as e:
            tqdm.write("KeyboardInterrupt: User aborted")
            time.sleep(1)
        finally:
            if not self.silent:
                tqdm.write(tasklist_summery(tasklist))

        self.cleanup_logfiles(tasklist)
        # Cache the processed tasklist for restarting later
        self.cached_tasklist = tasklist
        # self.summery.final_summery(process_time, tasklist)
        task_output = [task.get_output() for task in tasklist]
        return AnyPyProcessOutputList(task_output)
github jkoutsikakis / pytorch-wrapper / pytorch_wrapper / system.py View on Github external
for callback in self.callbacks:
                callback.on_evaluation_start(self.training_context)

            current_results = {}
            for current_dataset_name in self.evaluation_data_loaders:
                auto_tqdm.write(current_dataset_name)
                current_dataset_results = self.training_context['system'].evaluate(
                    self.evaluation_data_loaders[current_dataset_name],
                    self.evaluators,
                    self.batch_input_key,
                    self.training_context['_verbose']
                )
                current_results[current_dataset_name] = current_dataset_results
                for evaluator_name in self.evaluators:
                    auto_tqdm.write(str(current_results[current_dataset_name][evaluator_name]))

            self.training_context['_results_history'].append(current_results)

            for callback in self.callbacks:
                callback.on_evaluation_end(self.training_context)
github jkoutsikakis / pytorch-wrapper / pytorch_wrapper / system.py View on Github external
"""

        self.training_context['_current_epoch'] += 1

        self.training_context['system'].model.train(True)

        for callback in self.callbacks:
            callback.on_epoch_start(self.training_context)

        if self.training_context['_verbose']:
            pre_time = time.time()
            auto_tqdm.write('-' * 80)
            auto_tqdm.write('')
            auto_tqdm.write('Epoch: %d' % (self.training_context['_current_epoch']))
            auto_tqdm.write('')
            auto_tqdm.write('Training...')
            auto_tqdm.write('')

            pbar = auto_tqdm(total=len(self.train_data_loader), ncols=NCOLS)

        cum_loss = 0
        self.training_context['optimizer'].zero_grad()

        for i, batch in enumerate(self.train_data_loader):
            perform_opt_step = (i % self.gradient_accumulation_steps == 0) or (i == (len(self.train_data_loader) - 1))
            cum_loss += self._train_batch(batch, perform_opt_step)

            if self.training_context['_verbose']:
                train_loss = cum_loss / (i + 1)
                pbar.update(1)
                pbar.set_postfix(ordered_dict=OrderedDict([('loss', '%5.4f' % train_loss)]))
github jkoutsikakis / pytorch-wrapper / pytorch_wrapper / system.py View on Github external
def _train_epoch(self):
        """
        Trains the model for a single epoch.
        """

        self.training_context['_current_epoch'] += 1

        self.training_context['system'].model.train(True)

        for callback in self.callbacks:
            callback.on_epoch_start(self.training_context)

        if self.training_context['_verbose']:
            pre_time = time.time()
            auto_tqdm.write('-' * 80)
            auto_tqdm.write('')
            auto_tqdm.write('Epoch: %d' % (self.training_context['_current_epoch']))
            auto_tqdm.write('')
            auto_tqdm.write('Training...')
            auto_tqdm.write('')

            pbar = auto_tqdm(total=len(self.train_data_loader), ncols=NCOLS)

        cum_loss = 0
        self.training_context['optimizer'].zero_grad()

        for i, batch in enumerate(self.train_data_loader):
            perform_opt_step = (i % self.gradient_accumulation_steps == 0) or (i == (len(self.train_data_loader) - 1))
            cum_loss += self._train_batch(batch, perform_opt_step)

            if self.training_context['_verbose']:
github jkoutsikakis / pytorch-wrapper / pytorch_wrapper / system.py View on Github external
for i, batch in enumerate(self.train_data_loader):
            perform_opt_step = (i % self.gradient_accumulation_steps == 0) or (i == (len(self.train_data_loader) - 1))
            cum_loss += self._train_batch(batch, perform_opt_step)

            if self.training_context['_verbose']:
                train_loss = cum_loss / (i + 1)
                pbar.update(1)
                pbar.set_postfix(ordered_dict=OrderedDict([('loss', '%5.4f' % train_loss)]))

        for callback in self.callbacks:
            callback.on_epoch_end(self.training_context)

        if self.training_context['_verbose']:
            pbar.close()
            auto_tqdm.write('Time elapsed: %d' % (time.time() - pre_time))
            auto_tqdm.write('')
github AnyBody-Research-Group / AnyPyTools / anypytools / abcutils.py View on Github external
# Start the scheduler
        try:
            with tqdm(total=len(tasklist), disable=self.silent) as pbar:
                for task in self._schedule_processes(tasklist):
                    if task.has_error() and not self.silent:
                        tqdm.write(task_summery(task))
                        if hasattr(pbar, "container"):
                            pbar.container.children[0].bar_style = "danger"
                    pbar.update()
        except KeyboardInterrupt as e:
            tqdm.write("KeyboardInterrupt: User aborted")
            time.sleep(1)
        finally:
            if not self.silent:
                tqdm.write(tasklist_summery(tasklist))

        self.cleanup_logfiles(tasklist)
        # Cache the processed tasklist for restarting later
        self.cached_tasklist = tasklist
        # self.summery.final_summery(process_time, tasklist)
        task_output = [task.get_output() for task in tasklist]
        return AnyPyProcessOutputList(task_output)
github jkoutsikakis / pytorch-wrapper / pytorch_wrapper / tuner.py View on Github external
def _print_hyper_parameters(hyper_parameters):
        """
        Prints parameters.

        :param hyper_parameters: Dict with the hyper parameters.
        """

        tqdm.write('-' * 80)
        tqdm.write('Hyper-Parameters')
        tqdm.write('-' * 80)
        tqdm.write(pprint.pformat(hyper_parameters))
        tqdm.write('-' * 80)