How to use the dataset.TrainDataset function in dataset

To help you get started, we’ve selected a few dataset examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sunreef / BlindSR / src / manager.py View on Github external
def init_train_data(self):
        batch_size = TRAINING_BATCH_SIZE
        if self.args.network_type == 'discriminator':
            batch_size = 1

        train_folder = self.args.train_input
        train_dataset = TrainDataset(train_folder)
        self.train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

        valid_folder = self.args.valid_input
        valid_dataset = ValidDataset(valid_folder)
        self.valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size)
github CSAILVision / semantic-segmentation-pytorch / dataset.py View on Github external
def __init__(self, root_dataset, odgt, opt, batch_per_gpu=1, **kwargs):
        super(TrainDataset, self).__init__(odgt, opt, **kwargs)
        self.root_dataset = root_dataset
        # down sampling rate of segm labe
        self.segm_downsampling_rate = opt.segm_downsampling_rate
        self.batch_per_gpu = batch_per_gpu

        # classify images into two classes: 1. h > w and 2. h <= w
        self.batch_record_list = [[], []]

        # override dataset length when trainig with batch_per_gpu > 1
        self.cur_idx = 0
        self.if_shuffled = False
github nmhkahn / CARN-pytorch / carn / solver.py View on Github external
else:
            self.refiner = model(multi_scale=True, 
                                 group=cfg.group)
        
        if cfg.loss_fn in ["MSE"]: 
            self.loss_fn = nn.MSELoss()
        elif cfg.loss_fn in ["L1"]: 
            self.loss_fn = nn.L1Loss()
        elif cfg.loss_fn in ["SmoothL1"]:
            self.loss_fn = nn.SmoothL1Loss()

        self.optim = optim.Adam(
            filter(lambda p: p.requires_grad, self.refiner.parameters()), 
            cfg.lr)
        
        self.train_data = TrainDataset(cfg.train_data_path, 
                                       scale=cfg.scale, 
                                       size=cfg.patch_size)
        self.train_loader = DataLoader(self.train_data,
                                       batch_size=cfg.batch_size,
                                       num_workers=1,
                                       shuffle=True, drop_last=True)
        
        
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.refiner = self.refiner.to(self.device)
        self.loss_fn = self.loss_fn

        self.cfg = cfg
        self.step = 0
        
        self.writer = SummaryWriter(log_dir=os.path.join("runs", cfg.ckpt_name))
github CSAILVision / unifiedparsing / train.py View on Github external
def create_multi_source_train_data_loader(args):
    training_records = broden_dataset.record_list['train']

    # 0: object, part, scene
    # 1: material
    multi_source_iters = []
    for idx_source in range(len(training_records)):
        dataset = TrainDataset(training_records[idx_source], idx_source, args,
                               batch_per_gpu=args.batch_size_per_gpu)
        loader_object_part_scene = torchdata.DataLoader(
            dataset,
            batch_size=args.num_gpus,  # we have modified data_parallel
            shuffle=False,  # we do not use this param
            collate_fn=user_scattered_collate,
            num_workers=int(args.workers),
            drop_last=True,
            pin_memory=True)
        multi_source_iters.append(iter(loader_object_part_scene))

    # sample from multi source
    nr_record = [len(records) for records in training_records]
    sample_prob = np.asarray(nr_record) / np.sum(nr_record)
    while True:  # TODO(LYC):: set random seed.
        source_idx = np.random.choice(len(training_records), 1, p=sample_prob)[0]
github SeuTao / kaggle-competition-solutions / CVPR19_iMetCollection_7th_solution / main_apex.py View on Github external
def make_loader(df: pd.DataFrame, image_transform, name='train') -> DataLoader:
        return DataLoader(
            TrainDataset(train_root, df, debug=args.debug, name=name),
            shuffle=True,
            batch_size=args.batch_size,
            num_workers=16,
        )
github SeuTao / kaggle-competition-solutions / CVPR19_iMetCollection_7th_solution / main-mixup.py View on Github external
def make_loader(df: pd.DataFrame, image_transform, name='train') -> DataLoader:
        return DataLoader(
            TrainDataset(train_root, df, image_transform, debug=args.debug, name='train'),
            shuffle=True,
            batch_size=args.batch_size,
            num_workers=args.workers,
        )
github XiaLiPKU / EMANet / dataset.py View on Github external
def test_dt():
    train_dt = TrainDataset()
    print('train', len(train_dt))
    for i in range(10):
        img, lbl = train_dt[i]
        print(img.shape, lbl.shape, img.mean(), np.unique(lbl))

    val_dt = ValDataset()
    print('val', len(val_dt))
    for i in range(10):
        img, lbl = val_dt[i]
        print(img.shape, lbl.shape, img.mean(), np.unique(lbl))
github SeuTao / kaggle-competition-solutions / CVPR19_iMetCollection_7th_solution / main.py View on Github external
def make_loader(df: pd.DataFrame, image_transform, name='train') -> DataLoader:
        return DataLoader(
            TrainDataset(train_root, df, debug=args.debug, name=name),
            shuffle=True,
            batch_size=args.batch_size,
            num_workers=16,
        )
github XiaLiPKU / EMANet / train.py View on Github external
def __init__(self, dt_split):
        torch.manual_seed(66)
        torch.cuda.manual_seed_all(66)
        torch.cuda.set_device(settings.DEVICE)

        self.log_dir = settings.LOG_DIR
        self.model_dir = settings.MODEL_DIR
        ensure_dir(self.log_dir)
        ensure_dir(self.model_dir)
        logger.info('set log dir as %s' % self.log_dir)
        logger.info('set model dir as %s' % self.model_dir)

        self.step = 1
        self.writer = SummaryWriter(osp.join(self.log_dir, 'train.events'))
        dataset = TrainDataset(split=dt_split)
        self.dataloader = DataLoader(
            dataset, batch_size=settings.BATCH_SIZE, pin_memory=True,
            num_workers=settings.NUM_WORKERS, shuffle=True, drop_last=True)

        self.net = EMANet(settings.N_CLASSES, settings.N_LAYERS).cuda()
        self.opt = SGD(
            params=[
                {
                    'params': get_params(self.net, key='1x'),
                    'lr': 1 * settings.LR,
                    'weight_decay': settings.WEIGHT_DECAY,
                },
                {
                    'params': get_params(self.net, key='1y'),
                    'lr': 1 * settings.LR,
                    'weight_decay': 0,