How to use the tqdm.tqdm.monitor_interval function in tqdm

To help you get started, we’ve selected a few tqdm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github haotianteng / Chiron / chiron / utils / extract_sig_ref.py View on Github external
os.makedirs(FLAGS.output_dir)
        raw_folder = os.path.abspath(os.path.join(out_folder, 'raw'))
        ref_folder = os.path.abspath(os.path.join(out_folder, 'reference'))
        log_folder = os.path.abspath(os.path.join(out_folder, 'log'))
    if not os.path.isdir(raw_folder):
        os.mkdir(raw_folder)
    if not os.path.isdir(ref_folder):
        os.mkdir(ref_folder)
    if not os.path.isdir(log_folder):
        os.mkdir(log_folder)
    FLAGS.raw_folder = raw_folder
    FLAGS.ref_folder = ref_folder
    FLAGS.log_folder = log_folder
    set_logger(os.path.join(FLAGS.log_folder,'extract.log'))
    FLAGS.count = 0
    tqdm.monitor_interval = 0
    if FLAGS.threads == 0:
        FLAGS.threads = cpu_count()
    pool = Pool(FLAGS.threads)
    if FLAGS.polya is not None:
        FLAGS.polya_pair = {}
        with open(FLAGS.polya,'r') as f:
            for line in f:
                split_line = line.split(',')
                FLAGS.polya_pair[(os.path.basename(split_line[0]),split_line[1])] = int(split_line[2])
    else:
        FLAGS.polya_pair = None
    if FLAGS.recursive:
        dir_list = os.walk(root_folder)
    else:
        dir_list = [root_folder]
    for dir_tuple in tqdm(dir_list,desc = "Subdirectory processing:",position = 0):
github ARTFL-Project / text-pair / lib / textpair / generate_ngrams.py View on Github external
import configparser
import json
import os
import sys
from collections import defaultdict
from glob import glob
from math import floor

from text_preprocessing import PreProcessor
from tqdm import tqdm

from mmh3 import hash as hash32

# https://github.com/tqdm/tqdm/issues/481
tqdm.monitor_interval = 0
PHILO_TEXT_OBJECT_LEVELS = {"doc": 1, "div1": 2, "div2": 3, "div3": 4, "para": 5, "sent": 6, "word": 7}


class Ngrams:
    """Generate Ngrams"""

    def __init__(
        self,
        text_object_level="doc",
        ngram=3,
        gap=0,
        stemmer=True,
        lemmatizer="",
        stopwords=None,
        numbers=False,
        language="french",
github alan-turing-institute / ThermodynamicAnalyticsToolkit / src / TATi / models / trajectories / trajectory_training.py View on Github external
#
#    You should have received a copy of the GNU General Public License
#    along with this program.  If not, see .
#
###

import logging
import tensorflow as tf
import time

try:
    from tqdm import tqdm # allows progress bar
    tqdm_present = True
    # workaround: otherwise we get deadlock on exceptions,
    # see https://github.com/tqdm/tqdm/issues/469
    tqdm.monitor_interval = 0
except ImportError:
    tqdm_present = False

from TATi.models.trajectories.trajectory_base import TrajectoryBase

class TrajectoryTraining(TrajectoryBase):
    """Refines the Trajectory class to perform a training trajectory."""
    def __init__(self, trajectory_state):
        super(TrajectoryTraining, self).__init__(trajectory_state)
        self.optimizer = None

    def init_accumulator(self):
        self._init_accumulator(self.state.FLAGS.optimizer)

    def _print_parameters(self, session, feed_dict):
        for walker_index in range(self.state.FLAGS.number_walkers):
github allenai / allennlp / allennlp / common / tqdm.py View on Github external
SHELL = str(type(get_ipython()))  # type:ignore # noqa: F821
except:  # noqa: E261
    SHELL = ""

if "zmqshell.ZMQInteractiveShell" in SHELL:
    from tqdm import tqdm_notebook as _tqdm
else:
    from tqdm import tqdm as _tqdm

# This is neccesary to stop tqdm from hanging
# when exceptions are raised inside iterators.
# It should have been fixed in 4.2.1, but it still
# occurs.
# TODO(Mark): Remove this once tqdm cleans up after itself properly.
# https://github.com/tqdm/tqdm/issues/469
_tqdm.monitor_interval = 0


class Tqdm:
    # These defaults are the same as the argument defaults in tqdm.
    default_mininterval: float = 0.1

    @staticmethod
    def set_default_mininterval(value: float) -> None:
        Tqdm.default_mininterval = value

    @staticmethod
    def set_slower_interval(use_slower_interval: bool) -> None:
        """
        If ``use_slower_interval`` is ``True``, we will dramatically slow down ``tqdm's`` default
        output rate.  ``tqdm's`` default output rate is great for interactively watching progress,
        but it is not great for log files.  You might want to set this if you are primarily going
github BloodAxe / segmentation-networks-benchmark / torch_train.py View on Github external
from lib.metrics import JaccardScore, PixelAccuracy
from lib.models import linknet, unet16, unet11
from lib.models.dilated_linknet import DilatedLinkNet34
from lib.models.duc_hdc import ResNetDUCHDC, ResNetDUC
from lib.models.gcn152 import GCN152, GCN34
from lib.models.linknext import LinkNext
from lib.models.psp_net import PSPNet
from lib.models.squeezenet import SqueezeNet
from lib.models.tiramisu import FCDenseNet67
from lib.models.unet import UNet
from lib.models.unet_abn import UNetABN
from lib.models.zf_unet import ZF_UNET
from lib.train_utils import AverageMeter, PRCurveMeter
from lib.common import count_parameters

tqdm.monitor_interval = 0  # Workaround for https://github.com/tqdm/tqdm/issues/481


def get_dataset(dataset_name, dataset_dir, grayscale, patch_size, keep_in_mem=False):
    dataset_name = dataset_name.lower()

    if dataset_name == 'inria':
        return INRIA(dataset_dir, grayscale, patch_size, keep_in_mem)

    if dataset_name == 'inria-1024':
        if patch_size != 1024:
            raise ValueError('Patch size must be 1024')
        return INRIASliced(dataset_dir, grayscale)

    if dataset_name == 'inria-512':
        if patch_size != 512:
            raise ValueError('Patch size must be 512')
github alan-turing-institute / ThermodynamicAnalyticsToolkit / src / TATi / models / trajectories / trajectory_sampling_hamiltonian.py View on Github external
#    You should have received a copy of the GNU General Public License
#    along with this program.  If not, see .
#
###

import logging
import numpy as np
import tensorflow as tf
import time

try:
    from tqdm import tqdm # allows progress bar
    tqdm_present = True
    # workaround: otherwise we get deadlock on exceptions,
    # see https://github.com/tqdm/tqdm/issues/469
    tqdm.monitor_interval = 0
except ImportError:
    tqdm_present = False

from TATi.models.trajectories.trajectory_sampling import TrajectorySampling


class TrajectorySamplingHamiltonian(TrajectorySampling):
    """This implements sampling of a trajectory using Hamiltonian dynamics.
    
    Due to the Metropolis-Hastings criterion it behaves quite differently
    compared to a Langevin dynamics based sampler. Therefore a number
    of extra functions are needed for the book-keeping of all values
    associated with the criterion evaluation.

    Args:
github joeybose / Flexible-Fairness-Constraints / eval_reddit.py View on Github external
from torch.nn.init import xavier_normal, xavier_uniform
from torch.distributions import Categorical
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import roc_auc_score, accuracy_score
from sklearn.metrics import f1_score
from sklearn import preprocessing
import numpy as np
import random
import argparse
import pickle
import json
import logging
import sys, os
import subprocess
from tqdm import tqdm
tqdm.monitor_interval = 0
from utils import *
from preprocess_movie_lens import make_dataset
import joblib
from collections import Counter
import ipdb
sys.path.append('../')
import gc
from collections import OrderedDict
from sklearn.metrics import roc_auc_score, accuracy_score
from sklearn.dummy import DummyClassifier
from model import *
from train_reddit import corrupt_reddit_batch,mask_fairDiscriminators

def optimizer(params, mode, *args, **kwargs):
    if mode == 'SGD':
        opt = optim.SGD(params, *args, momentum=0., **kwargs)
github joeybose / Flexible-Fairness-Constraints / main_movielens.py View on Github external
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from torch.nn.init import xavier_normal, xavier_uniform
from torch.distributions import Categorical
from sklearn.metrics import precision_recall_fscore_support
import numpy as np
import random
import argparse
import pickle
import json
import logging
import sys, os
import subprocess
from tqdm import tqdm
tqdm.monitor_interval = 0
from utils import *
from preprocess_movie_lens import *
from transD_movielens import *
import joblib
from collections import Counter, OrderedDict
import ipdb
sys.path.append('../')
import gc
from model import *

# ftensor = torch.FloatTensor
ltensor = torch.LongTensor

v2np = lambda v: v.data.cpu().numpy()
USE_SPARSE_EMB = True
github XiSHEN0220 / ArtMiner / feature_learning / train_shuffle.py View on Github external
'--nbImgEpoch', type=int , default = 200, help='how many images for each epoch')
	
parser.add_argument(
	'--batchSize', type=int , default = 4, help='batch size')
	
parser.add_argument(
	'--cuda', action='store_true', help='cuda setting')

parser.add_argument(
	'--shuffle', action='store_true', help='shuffle data or not')

parser.add_argument(
	'--nbSearchImgEpoch', type=int, default = 2000, help='maximum number of searching image in one epoch')

args = parser.parse_args()
tqdm.monitor_interval = 0
print args


## Dataset, Minimum dimension, Total patch during the training
imgList = sorted(os.listdir(args.searchDir))
nbPatchTotal = args.nbSearchImgEpoch
imgFeatMin = args.searchRegion + 2 * args.margin + 1 ## Minimum dimension of feature map in a image 
iterEpoch = int(args.nbImgEpoch * 4. / args.batchSize) 
msg = '\n\nAlgo Description : \n\n In each Epoch, \n\t1. {:d} {:d}X{:d} features are utilized to search candidate regions; \n\t2. we validate on the outermost part in {:d}X{:d} region; \n\t3. We train on 4 corners in the {:d}X{:d} region for the top {:d} pairs; \n\t4. Batch size is {:d}, thus each epoch we do {:d} update. \n\n'.format(nbPatchTotal, args.searchRegion, args.searchRegion, args.validRegion, args.validRegion, args.trainRegion, args.trainRegion, args.nbImgEpoch, args.batchSize, iterEpoch)
print msg


## ImageNet Pre-processing
transform = transforms.Compose([
	transforms.ToTensor(),
	transforms.Normalize(mean = [ 0.485, 0.456, 0.406 ],
github NDAR / nda-tools / nda-validationtool-client.py View on Github external
self.upload = self.upload_queue.get()
                if self.upload == "STOP":
                    self.upload_queue.put("STOP")
                    self.shutdown_flag.set()
                    break
                file_id, credentials, bucket, key, full_path, file_size = self.upload_config()
                if credentials:
                    session = boto3.session.Session(
                        aws_access_key_id=credentials['access_key'],
                        aws_secret_access_key=credentials['secret_key'],
                        aws_session_token=credentials['session_token'],
                        region_name='us-east-1'
                    )
                    s3 = session.client('s3')
                    s3_transfer = S3Transfer(s3)
                    tqdm.monitor_interval = 0
                    s3_transfer.upload_file(full_path, bucket, key,
                                            callback=self.UpdateProgress(self.progress_queue)
                                            )
                    api_request(self, "PUT", "/".
                                join([self.api, self.submission_id, "files", file_id])
                                + "?submissionFileStatus=Complete")
                    self.progress_queue.put(None)
                else:
                    print('There was an error uploading {} after {} retry attempts'.format(full_path,
                                                                                           self.upload_tries))
                    continue
                self.upload_tries = 0
                self.upload = None
                self.upload_queue.task_done()