Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# Use GPUs if available.
if torch.cuda.is_available():
model.cuda()
model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# Define loss function and optimizer.
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),
lr=args.lr,
momentum=args.momentum,
nesterov=args.nesterov,
weight_decay=args.weight_decay)
log_dir = 'logs/' + datetime.now().strftime('%B%d %H:%M:%S')
train_writer = SummaryWriter(os.path.join(log_dir ,'train'))
test_writer = SummaryWriter(os.path.join(log_dir ,'test'))
# Save argparse commandline to a file.
with open(os.path.join(log_dir, 'commandline_args.txt'), 'w') as f:
f.write('\n'.join(sys.argv[1:]))
best_acc = 0 # best test accuracy
for epoch in range(start_epoch, args.epochs):
# Learning rate schedule.
lr = adjust_learning_rate(optimizer, epoch + 1)
train_writer.add_scalar('lr', lr, epoch)
# Train for one epoch.
train(train_loader, model, criterion, optimizer, train_writer, epoch)
from torch.utils.data import DataLoader
import utils.simul_transforms as simul_transforms
import utils.transforms as expanded_transforms
from config import ckpt_path
from datasets.cityscapes import CityScapes
from datasets.cityscapes.config import num_classes, ignored_label
from datasets.cityscapes.utils import colorize_mask
from models import FCN8ResNet
from utils.io import rmrf_mkdir
from utils.loss import CrossEntropyLoss2d
from utils.training import calculate_mean_iu
cudnn.benchmark = True
exp_name = 'fcn8resnet_cityscapes224*448'
writer = SummaryWriter('exp/' + exp_name)
pil_to_tensor = standard_transforms.ToTensor()
train_record = {'best_val_loss': 1e20, 'corr_mean_iu': 0, 'corr_epoch': -1}
train_args = {
'batch_size': 16,
'epoch_num': 800, # I stop training only when val loss doesn't seem to decrease anymore, so just set a large value.
'pretrained_lr': 1e-6, # used for the pretrained layers of model
'new_lr': 1e-6, # used for the newly added layers of model
'weight_decay': 5e-4,
'snapshot': 'epoch_184_loss_0.8953_mean_iu_0.3923_lr_0.00001000.pth', # empty string denotes initial training, otherwise it should be a string of snapshot name
'print_freq': 50,
'input_size': (224, 448), # (height, width)
}
val_args = {
'batch_size': 8,
from torch.autograd import Variable
from torch.backends import cudnn
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
import utils.joint_transforms as joint_transforms
import utils.transforms as extended_transforms
from datasets import cityscapes
from models import *
from utils import check_mkdir, evaluate, AverageMeter, CrossEntropyLoss2d
cudnn.benchmark = True
ckpt_path = '../../ckpt'
exp_name = 'cityscapes-fcn8s (caffe vgg)'
writer = SummaryWriter(os.path.join(ckpt_path, 'exp', exp_name))
args = {
'train_batch_size': 12,
'epoch_num': 500,
'lr': 1e-10,
'weight_decay': 5e-4,
'input_size': (256, 512),
'momentum': 0.99,
'lr_patience': 100, # large patience denotes fixed lr
'snapshot': '', # empty string denotes no snapshot
'print_freq': 20,
'val_batch_size': 16,
'val_save_to_img_file': False,
'val_img_sample_rate': 0.05 # randomly sample some validation results to display
}
def __init__(self, logging_dir=None, prefix='val', roc_path=None, class_names=None):
self.prefix = prefix
self.roc_path = roc_path
self.class_names = class_names
try:
from tensorboard import SummaryWriter
self.summary_writer = SummaryWriter(logging_dir)
except ImportError:
logging.error('You can install tensorboard via `pip install tensorboard`.')
def __init__(self, logging_dir, prefix=None, layers_list=None):
self.prefix = prefix
self.layers_list = layers_list
try:
from tensorboard import SummaryWriter
self.summary_writer = SummaryWriter(logging_dir)
except ImportError:
logging.error('You can install tensorboard via `pip install tensorboard`.')
if torch.cuda.is_available():
model.cuda()
model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# Define loss function and optimizer.
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),
lr=args.lr,
momentum=args.momentum,
nesterov=args.nesterov,
weight_decay=args.weight_decay)
log_dir = 'logs/' + datetime.now().strftime('%B%d %H:%M:%S')
train_writer = SummaryWriter(os.path.join(log_dir ,'train'))
test_writer = SummaryWriter(os.path.join(log_dir ,'test'))
# Save argparse commandline to a file.
with open(os.path.join(log_dir, 'commandline_args.txt'), 'w') as f:
f.write('\n'.join(sys.argv[1:]))
best_acc = 0 # best test accuracy
for epoch in range(start_epoch, args.epochs):
# Learning rate schedule.
lr = adjust_learning_rate(optimizer, epoch + 1)
train_writer.add_scalar('lr', lr, epoch)
# Train for one epoch.
train(train_loader, model, criterion, optimizer, train_writer, epoch)
parser.add_argument('--imgsize' ,type=int, default = 256, help='image size')
parser.add_argument('--batchsize' ,type=int, default = 20, help='batchsize')
parser.add_argument('--workers' ,type=int, default = 6, help='number of workers')
parser.add_argument('--nepoch' ,type=int, default = 50, help='number of epochs')
parser.add_argument('--lr', type=float, default=0.002, help='learning rate, default=0.002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--outf', type=str, default="filler_pano_pc", help='output folder')
parser.add_argument('--model', type=str, default="", help='model path')
parser.add_argument('--cepoch' ,type=int, default = 0, help='current epoch')
mean = torch.from_numpy(np.array([ 0.45725039, 0.44777581, 0.4146058 ]).astype(np.float32))
opt = parser.parse_args()
print(opt)
writer = SummaryWriter(opt.outf + '/runs/'+datetime.now().strftime('%B%d %H:%M:%S'))
try:
os.makedirs(opt.outf)
except OSError:
pass
tf = transforms.Compose([
transforms.Scale(opt.imgsize, opt.imgsize * 2),
transforms.ToTensor(),
])
mist_tf = transforms.Compose([
transforms.ToTensor(),
])
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--outf', type=str, default="filler_pano_pc_full", help='output folder')
parser.add_argument('--model', type=str, default="", help='model path')
parser.add_argument('--cepoch', type=int, default = 0, help='current epoch')
parser.add_argument('--loss', type=str, default="perceptual", help='l1 only')
parser.add_argument('--init', type=str, default = "iden", help='init method')
parser.add_argument('--l1', type=float, default = 0, help='add l1 loss')
parser.add_argument('--color_coeff', type=float, default = 0, help='add color match loss')
parser.add_argument('--cascade' , action='store_true', help='debug mode')
mean = torch.from_numpy(np.array([0.57441127, 0.54226291, 0.50356019]).astype(np.float32))
opt = parser.parse_args()
print(opt)
writer = SummaryWriter(opt.outf + '/runs/'+datetime.now().strftime('%B%d %H:%M:%S'))
try:
os.makedirs(opt.outf)
except OSError:
pass
tf = transforms.Compose([
transforms.ToTensor(),
])
mist_tf = transforms.Compose([
transforms.ToTensor(),
])
d = PairDataset(root = opt.dataroot, transform=tf, mist_transform = mist_tf)
d_test = PairDataset(root = opt.dataroot, transform=tf, mist_transform = mist_tf, train = False)
import torchvision.transforms as standard_transforms
import torchvision.utils as vutils
from tensorboard import SummaryWriter
from torch import optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import utils.joint_transforms as joint_transforms
import utils.transforms as extended_transforms
from datasets import voc
from models import *
from utils import check_mkdir, evaluate, AverageMeter, CrossEntropyLoss2d
ckpt_path = '../../ckpt'
exp_name = 'voc-psp_net'
writer = SummaryWriter(os.path.join(ckpt_path, 'exp', exp_name))
args = {
'train_batch_size': 1,
'lr': 1e-2 / sqrt(16 / 4),
'lr_decay': 0.9,
'max_iter': 3e4,
'longer_size': 512,
'crop_size': 473,
'stride_rate': 2 / 3.,
'weight_decay': 1e-4,
'momentum': 0.9,
'snapshot': '',
'print_freq': 10,
'val_save_to_img_file': True,
'val_img_sample_rate': 0.01, # randomly sample some validation results to display,
'val_img_display_size': 384,
parser.add_argument('--imgsize' ,type=int, default = 256, help='image size')
parser.add_argument('--batchsize' ,type=int, default = 36, help='batchsize')
parser.add_argument('--workers' ,type=int, default = 6, help='number of workers')
parser.add_argument('--nepoch' ,type=int, default = 50, help='number of epochs')
parser.add_argument('--lr', type=float, default=0.002, help='learning rate, default=0.002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--outf', type=str, default="filler", help='output folder')
parser.add_argument('--model', type=str, default="", help='model path')
mean = torch.from_numpy(np.array([ 0.45725039, 0.44777581, 0.4146058 ]).astype(np.float32))
opt = parser.parse_args()
print(opt)
writer = SummaryWriter(opt.outf + '/runs/'+datetime.now().strftime('%B%d %H:%M:%S'))
try:
os.makedirs(opt.outf)
except OSError:
pass
if opt.debug:
d = Places365Dataset(root = opt.dataroot, transform=transforms.Compose([
vision_utils.RandomScale(opt.imgsize, int(opt.imgsize * 1.5)),
transforms.RandomCrop(opt.imgsize),
transforms.ToTensor(),
]), train = False)
else:
d = Places365Dataset(root = opt.dataroot, transform=transforms.Compose([
vision_utils.RandomScale(opt.imgsize, int(opt.imgsize * 1.5)),