How to use the tqdm.tqdm.tqdm function in tqdm

To help you get started, we’ve selected a few tqdm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github hukkelas / DeepPrivacy / deep_privacy / metrics / fid.py View on Github external
def preprocess_images(images, use_multiprocessing):
    """Resizes and shifts the dynamic range of image to 0-1
    Args:
        images: np.array, shape: (N, H, W, 3), dtype: float32 between 0-1 or np.uint8
        use_multiprocessing: If multiprocessing should be used to pre-process the images
    Return:
        final_images: torch.tensor, shape: (N, 3, 299, 299), dtype: torch.float32 between 0-1
    """
    if use_multiprocessing:
        with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:
            jobs = []
            for im in tqdm.tqdm(images, desc="Starting FID jobs"):
                job = pool.apply_async(preprocess_image, (im,))
                jobs.append(job)
            final_images = torch.zeros(images.shape[0], 3, 299, 299)
            for idx, job in enumerate(tqdm(jobs, desc="finishing jobs")):
                im = job.get()
                final_images[idx] = im#job.get()
    else:
        final_images = torch.zeros((len(images), 3, 299, 299), dtype=torch.float32)
        for idx in range(len(images)):
            im = preprocess_image(images[idx])
            final_images[idx] = im
    assert final_images.shape == (images.shape[0], 3, 299, 299)
    assert final_images.max() <= 1.0
    assert final_images.min() >= 0.0
    assert final_images.dtype == torch.float32
    return final_images
github FederatedAI / FATE / research / federated_object_detection_benchmark / model / model_wrapper.py View on Github external
def eval(self, dataloader, faster_rcnn, test_num=10000):
        pred_bboxes, pred_labels, pred_scores = list(), list(), list()
        gt_bboxes, gt_labels, gt_difficults = list(), list(), list()
        total_losses = list()
        for ii, (imgs, sizes, gt_bboxes_, gt_labels_, scale, gt_difficults_) \
                in tqdm.tqdm(enumerate(dataloader)):
            img = imgs.cuda().float()
            bbox = gt_bboxes_.cuda()
            label = gt_labels_.cuda()
            sizes = [sizes[0][0].item(), sizes[1][0].item()]
            pred_bboxes_, pred_labels_, pred_scores_ = \
                faster_rcnn.predict(imgs, [sizes])
            losses = self.trainer.forward(img, bbox, label, float(scale))
            total_losses.append(losses.total_loss.item())
            gt_bboxes += list(gt_bboxes_.numpy())
            gt_labels += list(gt_labels_.numpy())
            gt_difficults += list(gt_difficults_.numpy())
            pred_bboxes += pred_bboxes_
            pred_labels += pred_labels_
            pred_scores += pred_scores_
            if ii == test_num: break
github SeuTao / RSNA2019_1st_place_solution / 2DNet / src / prepare_data.py View on Github external
def prepare_images(imgs_path, subfolder):
    for i in tqdm.tqdm(imgs_path):
        prepare_and_save(i, subfolder)
github FederatedAI / FATE / research / federated_object_detection_benchmark / model / model_wrapper.py View on Github external
def train_one_epoch(self):
        """
        Return:
            total_loss: the total loss during training
            accuracy: the mAP
        """
        pred_bboxes, pred_labels, pred_scores = list(), list(), list()
        gt_bboxes, gt_labels, gt_difficults = list(), list(), list()
        self.trainer.reset_meters()
        for ii, (img, sizes, bbox_, label_, scale, gt_difficults_) in \
                tqdm.tqdm(enumerate(self.dataloader)):
            scale = at.scalar(scale)
            img, bbox, label = img.cuda().float(), bbox_.cuda(), label_.cuda()
            self.trainer.train_step(img, bbox, label, scale)
            if (ii + 1) % self.opt.plot_every == 0:
                sizes = [sizes[0][0].item(), sizes[1][0].item()]
                pred_bboxes_, pred_labels_, pred_scores_ = \
                    self.faster_rcnn.predict(img, [sizes])
                pred_bboxes += pred_bboxes_
                pred_labels += pred_labels_
                pred_scores += pred_scores_
                gt_bboxes += list(bbox_.numpy())
                gt_labels += list(label_.numpy())
                gt_difficults += list(gt_difficults_.numpy())

        return self.trainer.get_meter_data()['total_loss']
github FederatedAI / FATE / research / federated_object_detection_benchmark / model / model_wrapper.py View on Github external
def eval(self, dataloader, yolo, test_num=10000):
        labels = []
        sample_metrics = []  # List of tuples (TP, confs, pred)
        total_losses = list()
        Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
        for batch_i, (_, imgs, targets) in enumerate(tqdm.tqdm(dataloader, desc="Detecting objects")):
            # Extract labels
            labels += targets[:, 1].tolist()
            # Rescale target
            targets = Variable(targets.to(self.device), requires_grad=False)

            imgs = Variable(imgs.type(Tensor), requires_grad=False)
            with torch.no_grad():
                loss, outputs = yolo(imgs, targets)
                outputs = non_max_suppression(outputs, conf_thres=0.5, nms_thres=0.5)
                total_losses.append(loss.item())
            targets = targets.to("cpu")
            targets[:, 2:] = xywh2xyxy(targets[:, 2:])
            targets[:, 2:] *= int(self.model_config['img_size'])
            sample_metrics += get_batch_statistics(outputs, targets, iou_threshold=0.5)
        if len(sample_metrics) > 0:
            true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*sample_metrics))]