How to use the tqdm.tqdm function in tqdm

To help you get started, we’ve selected a few tqdm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sfleischer / SingleOrNah / singles_app / api / app.py View on Github external
def get_stories(self, dst, executor, future_to_item, user, username):
        """Scrapes the user's stories."""
        if self.logged_in and 'story' in self.media_types:
            # Get the user's stories.
            stories = self.fetch_stories(user['id'])

            # Downloads the user's stories and sends it to the executor.
            iter = 0
            for item in tqdm.tqdm(stories, desc='Searching {0} for stories'.format(username), unit=" media",
                                  disable=self.quiet):
                future = executor.submit(self.download, item, dst)
                future_to_item[future] = item

                iter = iter + 1
                if self.maximum != 0 and iter >= self.maximum:
                    break
github liuziwei7 / aerial-recognition / code / data_ml_functions / dataFunctions.py View on Github external
executor = ProcessPoolExecutor(max_workers=params.num_workers)
    futures = []
    paramsDict = vars(params)
    keysToKeep = ['image_format', 'target_img_size', 'metadata_length', 'category_names']
    paramsDict = {keepKey: paramsDict[keepKey] for keepKey in keysToKeep}
    
    for currDir in walkDirs:
        isTrain = (currDir == 'train') or (currDir == 'val')
        if isTrain:
            outDir = params.directories['train_data']
        else:
            outDir = params.directories['test_data']

        print('Queuing sequences in: ' + currDir)
        for root, dirs, files in tqdm(os.walk(os.path.join(params.directories['dataset'], currDir))):
            if len(files) > 0:
                slashes = [i for i,ltr in enumerate(root) if ltr == '/']
                        
            for file in files:
                if file.endswith('_rgb.json'): #skip _msrgb images
                    task = partial(_process_file, file, slashes, root, isTrain, outDir, paramsDict)
                    futures.append(executor.submit(task))

    print('Wait for all preprocessing tasks to complete...')
    results = []
    [results.extend(future.result()) for future in tqdm(futures)]
    allTrainFeatures = [np.array(r[0]) for r in results if r[0] is not None]
    
    metadataTrainSum = np.zeros(params.metadata_length)
    for features in allTrainFeatures:
        metadataTrainSum += features
github ly015 / intrinsic_flow / scripts / data_generation / create_seg.py View on Github external
# train a linear regressor, which predict neck point location from left/right shoulder locations
    print('training regressor...')
    pts_dfm = np.array(joint_label.values()) #(N,18,2)
    v = (pts_dfm[:,[1,2,5],:].reshape(-1,6) >= 0).all(axis=1)
    x_train = (pts_dfm[v])[:,[2,5]].reshape(-1,4) #shoulder points
    y_train = (pts_dfm[v])[:,1].reshape(-1,2) #neck points
    reg = RidgeCV(normalize=False)
    reg.fit(x_train, y_train)

    pts_hmr = np.array(joint_label_hmr.values())
    x_test = pts_hmr[:,[2,5],:].reshape(-1,4)
    y_test = reg.predict(x_test).reshape(-1,2)

    # generate adapted joint label
    joint_label_adapt = {}
    for idx, sid in enumerate(tqdm.tqdm(joint_label_hmr.keys())):
        p_h = np.array(joint_label_hmr[sid])
        p_d = np.array(joint_label[sid])
        if (p_h[[2,5],:] >= 0).all():
            p_h[1,:] = y_test[idx]
        
        inv = (p_d < 0).any(axis=1) | (p_h < 0).any(axis=1) | (p_h > 255).any(axis=1) # invalid joint points in joint_dfm will also be marked as invalid in joint_hmr
        p_h[inv,:] = -1
        joint_label_adapt[sid] = p_h.tolist()
    
    io.save_data(joint_label_adapt, fn_out)
github didi / delta / egs / iemocap / emo / v1 / local / python / mocap_data_collect.py View on Github external
def read_iemocap_mocap():
    for session in sessions:
        path_to_wav = os.path.join(iemocap_path, session, 'dialog', 'wav')
        files2 = os.listdir(path_to_wav)

        files = []
        for f in files2:
            if f.endswith(".wav"):
                if 'perturb' not in f:
                    if f[0] == '.':
                        files.append(f[2:-4])
                    else:
                        files.append(f[:-4])
        print('Collect ', session)
        with Pool(cpu_num) as p:
            r = list(tqdm.tqdm(p.imap(collect, files), total=len(files)))
                        
    sort_key = get_field(data, "id")
    return np.array(data)[np.argsort(sort_key)]
github javiribera / locating-objects-without-bboxes / object-locator / train.py View on Github external
# Time to do validation?
    if (epoch + 1) % args.val_freq != 0:
        epoch += 1
        continue

    # === VALIDATION ===

    # Set the module in evaluation mode
    model.eval()

    judge = Judge(r=args.radius)
    sum_term1 = 0
    sum_term2 = 0
    sum_term3 = 0
    sum_loss = 0
    iter_val = tqdm(valset_loader,
                    desc=f'Validating Epoch {epoch} ({len(valset)} images)')
    for batch_idx, (imgs, dictionaries) in enumerate(iter_val):

        # Pull info from this batch and move to device
        imgs = imgs.to(device)
        target_locations = [dictt['locations'].to(device)
                            for dictt in dictionaries]
        target_counts = [dictt['count'].to(device)
                        for dictt in dictionaries]
        target_orig_heights = [dictt['orig_height'].to(device)
                               for dictt in dictionaries]
        target_orig_widths = [dictt['orig_width'].to(device)
                              for dictt in dictionaries]

        with torch.no_grad():
            target_counts = torch.stack(target_counts)
github Calamari-OCR / calamari / calamari_ocr / ocr / predictor.py View on Github external
Parameters
        ----------
        datas : list of array_like
            list of images
        progress_bar : bool, optional
            Show or hide a progress bar
        apply_preproc : bool, optional
            Apply the `data_preproc` to the `datas` before predicted by the DNN
        Yields
        -------
        PredictionResult
            A single PredictionResult
        """

        if progress_bar:
            out = tqdm(self.network.predict_dataset(input_dataset), desc="Prediction", total=len(input_dataset))
        else:
            out = self.network.predict_dataset(input_dataset)

        for p in out:
            yield PredictionResult(p.decoded, codec=self.codec, text_postproc=self.text_postproc,
                                   out_to_in_trans=self.out_to_in_trans, data_proc_params=p.params,
                                   ground_truth=p.ground_truth)
github monologg / JointBERT / predict.py View on Github external
# Convert input file to TensorDataset
    pad_token_label_id = args.ignore_index
    tokenizer = load_tokenizer(args)
    lines = read_input_file(pred_config)
    dataset = convert_input_file_to_tensor_dataset(lines, pred_config, args, tokenizer, pad_token_label_id)

    # Predict
    sampler = SequentialSampler(dataset)
    data_loader = DataLoader(dataset, sampler=sampler, batch_size=pred_config.batch_size)

    all_slot_label_mask = None
    intent_preds = None
    slot_preds = None

    for batch in tqdm(data_loader, desc="Predicting"):
        batch = tuple(t.to(device) for t in batch)
        with torch.no_grad():
            inputs = {"input_ids": batch[0],
                      "attention_mask": batch[1],
                      "intent_label_ids": None,
                      "slot_labels_ids": None}
            if args.model_type != "distilbert":
                inputs["token_type_ids"] = batch[2]
            outputs = model(**inputs)
            _, (intent_logits, slot_logits) = outputs[:2]

            # Intent Prediction
            if intent_preds is None:
                intent_preds = intent_logits.detach().cpu().numpy()
            else:
                intent_preds = np.append(intent_preds, intent_logits.detach().cpu().numpy(), axis=0)
github albertogaspar / dts / dts / utils / split.py View on Github external
:param horizon: int
        Forecasting horizon, the number of future steps that have to be forecasted
    :param multivariate_output: if True, the target array will not have shape
        (n_samples, output_sequence_len) but (n_samples, output_sequence_len, n_features)
    :param shuffle: if True shuffle the data on the first axis
    :param other_horizon:
    :return: tuple
        Return two numpy.arrays: the input and the target for the model.
        the inputs has shape (n_samples, input_sequence_len, n_features)
        the target has shape (n_samples, output_sequence_len)
    """
    if data.ndim == 2:
        data = np.expand_dims(data, 0)
    inputs = []
    targets = []
    for X in tqdm(data):  # for each array of shape (n_samples, n_features)
        n_used_samples = X.shape[0] - horizon - window_size + 1
        for i in range(n_used_samples):
            inputs.append(X[i: i + window_size])
            # TARGET FEATURE SHOULD BE THE FIRST
            if multivariate_output:
                if other_horizon is None:
                    targets.append(
                        X[i + window_size: i + window_size + horizon])
                else:
                    targets.append(
                        X[i + 1: i + window_size + 1])
            else:
                if other_horizon is None:
                    targets.append(
                        X[i + window_size: i + window_size + horizon, 0])
                else:
github coderholic / django-cities / cities / management / commands / cities.py View on Github external
total = sum(1 for _ in data)

        data = self.get_data('city')

        self.build_country_index()
        self.build_region_index()
        self.build_hierarchy()

        city_index = {}
        for obj in tqdm(City.objects.all(),
                        disable=self.options.get('quiet'),
                        total=City.objects.all().count(),
                        desc="Building city index"):
            city_index[obj.id] = obj

        for item in tqdm(data, disable=self.options.get('quiet'), total=total, desc="Importing districts"):
            if not self.call_hook('district_pre', item):
                continue

            _type = item['featureCode']
            if _type not in district_types:
                continue

            defaults = {
                'name': item['name'],
                'name_std': item['asciiName'],
                'location': Point(float(item['longitude']), float(item['latitude'])),
                'population': int(item['population']),
            }

            if hasattr(District, 'code'):
                defaults['code'] = item['admin3Code'],
github Academic-Hammer / SciTSR / scitsr / train.py View on Github external
def train_epoch(self, epoch, dataset, should_print=False):
        self.model.train()
        loss_list = []
        for index, data in tqdm(enumerate(dataset)):
            torch.cuda.empty_cache()
            self._to_device(data)
            # if index % 10 == 0:
            percent = index / len(dataset) * 100
            if should_print:
                print('[Epoch %d] Train | Data %d (%d%%): loss: | path: %s' % \
                (epoch, index, percent, data.path), ' ' * 20, end='\r')
            # try:
            outputs = self.model(data.nodes, data.edges, data.adj, data.incidence)
            # except Exception as e:
                # print(e, data.path)
            loss = self.criterion(outputs, data.labels)
            loss_list.append(loss.item())

            if should_print:
                print('[Epoch %d] Train | Data %d (%d%%): loss: %.3f | path: %s' % \