How to use the fastprogress.fastprogress.MAX_COLS function in fastprogress

To help you get started, we’ve selected a few fastprogress examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github fastai / fastai_dev / dev / examples / train_imagenette.py View on Github external
from fastai2.basics import *
from fastai2.vision.all import *
from fastai2.callback.all import *
from fastai2.distributed import *
from fastprogress import fastprogress
from torchvision.models import *
from fastai2.vision.models.xresnet import *

torch.backends.cudnn.benchmark = True
fastprogress.MAX_COLS = 80

def get_dbunch(size, woof, bs, workers=None):
    if   size<=128: path = URLs.IMAGEWOOF_160 if woof else URLs.IMAGENETTE_160
    elif size<=224: path = URLs.IMAGEWOOF_320 if woof else URLs.IMAGENETTE_320
    else          : path = URLs.IMAGEWOOF     if woof else URLs.IMAGENETTE
    source = untar_data(path)

    n_gpus = num_distrib() or 1
    if workers is None: workers = min(8, num_cpus()//n_gpus)
        
    dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
                       splitter=GrandparentSplitter(valid_name='val'),
                       get_items=get_image_files,
                       get_y=parent_label)

    return dblock.databunch(source, path=source, item_tfms=[RandomResizedCrop(size, min_scale=0.35), FlipItem(0.5)], bs=bs, num_workers=workers)
github fastai / fastai / examples / train_imagenette.py View on Github external
from fastai.script import *
from fastai.vision import *
from fastai.callbacks import *
from fastai.distributed import *
from fastprogress import fastprogress
from torchvision.models import *
from fastai.vision.models.xresnet import *
from fastai.vision.models.presnet import *

torch.backends.cudnn.benchmark = True
fastprogress.MAX_COLS = 80

def get_data(size, woof, bs, workers=None):
    if   size<=128: path = URLs.IMAGEWOOF_160 if woof else URLs.IMAGENETTE_160
    elif size<=192: path = URLs.IMAGEWOOF_320 if woof else URLs.IMAGENETTE_320
    else          : path = URLs.IMAGEWOOF     if woof else URLs.IMAGENETTE
    path = untar_data(path)

    n_gpus = num_distrib() or 1
    if workers is None: workers = min(8, num_cpus()//n_gpus)

    return (ImageList.from_folder(path).split_by_folder(valid='val')
            .label_from_folder().transform(([flip_lr(p=0.5)], []), size=size)
            .databunch(bs=bs, num_workers=workers)
            .presize(size, scale=(0.35,1))
            .normalize(imagenet_stats))
github fastai / fastai / examples / train_imagenette_adv.py View on Github external
from fastai.script import *
from fastai.vision import *
from fastai.callbacks import *
from fastai.distributed import *
from fastai.callbacks.tracker import *
torch.backends.cudnn.benchmark = True
import time
from fastprogress import fastprogress
from fastai.general_optimizer import *

fastprogress.MAX_COLS = 80

def get_data(size, woof, bs, workers=None, use_lighting=False):
    path = Path('/mnt/fe2_disk')
    if   size<=128: path = path/('imagewoof-160' if woof else 'imagenette-160')
    elif size<=192: path = path/('imagewoof-320' if woof else 'imagenette-320')
    else          : path = path/('imagewoof'     if woof else 'imagenette'    )

    n_gpus = num_distrib() or 1
    if workers is None: workers = min(8, num_cpus()//n_gpus)

    tfms = [flip_lr(p=0.5)]
    if use_lighting:
        tfms += [brightness(change=(0.4,0.6)), contrast(scale=(0.7,1.3))]
    return (ImageList.from_folder(path).split_by_folder(valid='val')
            .label_from_folder().transform((tfms, []), size=size)
            .databunch(bs=bs, num_workers=workers)