How to use the tensorboard.FileWriter function in tensorboard

To help you get started, we’ve selected a few tensorboard examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github shijx12 / AR-Tree / age / train.py View on Github external
model.cuda(args.gpu)
    if args.optimizer == 'adam':
        optimizer_class = optim.Adam
    elif args.optimizer == 'adagrad':
        optimizer_class = optim.Adagrad
    elif args.optimizer == 'adadelta':
        optimizer_class = optim.Adadelta
    elif args.optimizer == 'SGD':
        optimizer_class = optim.SGD
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = optimizer_class(params=params, lr=args.lr, weight_decay=args.l2reg)
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='max', factor=0.5, patience=10, verbose=True)
    criterion = nn.CrossEntropyLoss()
    trpack = [model, params, criterion, optimizer]

    train_summary_writer = tensorboard.FileWriter(
        logdir=os.path.join(args.save_dir, 'log', 'train'), flush_secs=10)
    valid_summary_writer = tensorboard.FileWriter(
        logdir=os.path.join(args.save_dir, 'log', 'valid'), flush_secs=10)
    tsw, vsw = train_summary_writer, valid_summary_writer

    num_train_batches = data.train_size // data.batch_size 
    logging.info(f'num_train_batches: {num_train_batches}')
    validate_every = num_train_batches // 10
    best_vaild_accuacy = 0
    iter_count = 0
    tic = time.time()

    for epoch_num in range(args.max_epoch):
        for batch_iter, train_batch in enumerate(data.train_minibatch_generator()):
            progress = epoch_num + batch_iter/num_train_batches
            iter_count += 1
github shijx12 / AR-Tree / sst / train.py View on Github external
if args.gpu > -1:
        logging.info(f'Using GPU {args.gpu}')
        model.cuda(args.gpu)
    if args.optimizer == 'adam':
        optimizer_class = optim.Adam
    elif args.optimizer == 'adagrad':
        optimizer_class = optim.Adagrad
    elif args.optimizer == 'adadelta':
        optimizer_class = optim.Adadelta
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = optimizer_class(params=params, weight_decay=args.l2reg)
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='max', factor=0.5, patience=20, verbose=True)
    criterion = nn.CrossEntropyLoss()
    trpack = [model, params, criterion, optimizer]

    train_summary_writer = tensorboard.FileWriter(
        logdir=os.path.join(args.save_dir, 'log', 'train'), flush_secs=10)
    valid_summary_writer = tensorboard.FileWriter(
        logdir=os.path.join(args.save_dir, 'log', 'valid'), flush_secs=10)
    tsw, vsw = train_summary_writer, valid_summary_writer

    num_train_batches = len(train_loader)
    logging.info(f'num_train_batches: {num_train_batches}')
    validate_every = num_train_batches // 10
    best_vaild_accuacy = 0
    iter_count = 0
    tic = time.time()

    for batch_iter, train_batch in enumerate(train_loader):
        progress = train_loader.epoch
        if progress > args.max_epoch:
            break
github shijx12 / AR-Tree / sst / train.py View on Github external
model.cuda(args.gpu)
    if args.optimizer == 'adam':
        optimizer_class = optim.Adam
    elif args.optimizer == 'adagrad':
        optimizer_class = optim.Adagrad
    elif args.optimizer == 'adadelta':
        optimizer_class = optim.Adadelta
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = optimizer_class(params=params, weight_decay=args.l2reg)
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='max', factor=0.5, patience=20, verbose=True)
    criterion = nn.CrossEntropyLoss()
    trpack = [model, params, criterion, optimizer]

    train_summary_writer = tensorboard.FileWriter(
        logdir=os.path.join(args.save_dir, 'log', 'train'), flush_secs=10)
    valid_summary_writer = tensorboard.FileWriter(
        logdir=os.path.join(args.save_dir, 'log', 'valid'), flush_secs=10)
    tsw, vsw = train_summary_writer, valid_summary_writer

    num_train_batches = len(train_loader)
    logging.info(f'num_train_batches: {num_train_batches}')
    validate_every = num_train_batches // 10
    best_vaild_accuacy = 0
    iter_count = 0
    tic = time.time()

    for batch_iter, train_batch in enumerate(train_loader):
        progress = train_loader.epoch
        if progress > args.max_epoch:
            break
        iter_count += 1
        ################################# train iteration ####################################
github jihunchoi / shortcut-stacked-encoder-pytorch / train_snli.py View on Github external
def train(args):
    experiment_name = (f'w{args.word_dim}_lh{args.lstm_hidden_dims}'
                       f'_mh{args.mlp_hidden_dim}_ml{args.mlp_num_layers}'
                       f'_d{args.dropout_prob}')
    save_dir = os.path.join(args.save_root_dir, experiment_name)
    train_summary_writer = tensorboard.FileWriter(
        logdir=os.path.join(save_dir, 'log', 'train'))
    valid_summary_writer = tensorboard.FileWriter(
        logdir=os.path.join(save_dir, 'log', 'valid'))

    lstm_hidden_dims = [int(d) for d in args.lstm_hidden_dims.split(',')]

    logging.info('Loading data...')
    text_field = data.Field(lower=True, include_lengths=True,
                            batch_first=False)
    label_field = data.Field(sequential=False)
    if not os.path.exists(args.data_dir):
        os.makedirs(args.data_dir)
    dataset_splits = datasets.SNLI.splits(
        text_field=text_field, label_field=label_field, root=args.data_dir)
    text_field.build_vocab(*dataset_splits, vectors=args.pretrained)
    label_field.build_vocab(*dataset_splits)
    train_loader, valid_loader, _ = data.BucketIterator.splits(
        datasets=dataset_splits, batch_size=args.batch_size, device=args.gpu)
github jihunchoi / shortcut-stacked-encoder-pytorch / train_snli.py View on Github external
def train(args):
    experiment_name = (f'w{args.word_dim}_lh{args.lstm_hidden_dims}'
                       f'_mh{args.mlp_hidden_dim}_ml{args.mlp_num_layers}'
                       f'_d{args.dropout_prob}')
    save_dir = os.path.join(args.save_root_dir, experiment_name)
    train_summary_writer = tensorboard.FileWriter(
        logdir=os.path.join(save_dir, 'log', 'train'))
    valid_summary_writer = tensorboard.FileWriter(
        logdir=os.path.join(save_dir, 'log', 'valid'))

    lstm_hidden_dims = [int(d) for d in args.lstm_hidden_dims.split(',')]

    logging.info('Loading data...')
    text_field = data.Field(lower=True, include_lengths=True,
                            batch_first=False)
    label_field = data.Field(sequential=False)
    if not os.path.exists(args.data_dir):
        os.makedirs(args.data_dir)
    dataset_splits = datasets.SNLI.splits(
        text_field=text_field, label_field=label_field, root=args.data_dir)
    text_field.build_vocab(*dataset_splits, vectors=args.pretrained)
    label_field.build_vocab(*dataset_splits)
github luoyetx / WGAN / main.py View on Github external
modD.bind(data_shapes=image_iter.provide_data,
              inputs_need_grad=True)
    modD.init_params(mx.init.Normal(0.002))
    modD.init_optimizer(
        optimizer='sgd',
        optimizer_params={
            'learning_rate': lr,
        })
    # train
    logging.info('Start training')
    metricD = WGANMetric()
    metricG = WGANMetric()
    fix_noise_batch = mx.io.DataBatch([mx.random.normal(0, 1, shape=(batch_size, z_dim, 1, 1))], [])
    # visualization with TensorBoard if possible
    if use_tb:
        writer = FileWriter('tmp/exp')
    for epoch in range(epoches):
        image_iter.reset()
        metricD.reset()
        metricG.reset()
        for i, batch in enumerate(image_iter):
            # clip weight
            for params in modD._exec_group.param_arrays:
                for param in params:
                    mx.nd.clip(param, -wclip, wclip, out=param)
            # forward G
            rbatch = rand_iter.next()
            modG.forward(rbatch, is_train=True)
            outG = modG.get_outputs()
            # fake
            modD.forward(mx.io.DataBatch(outG, label=[]), is_train=True)
            fw_g = modD.get_outputs()[0].asnumpy()
github shijx12 / AR-Tree / imdb / train.py View on Github external
elif args.optimizer == 'adagrad':
        optimizer_class = optim.Adagrad
    elif args.optimizer == 'adadelta':
        optimizer_class = optim.Adadelta
    elif args.optimizer == 'sgd':
        optimizer_class = optim.SGD
    elif args.optimizer == 'RMSprop':
        optimizer_class = optim.RMSprop
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = optimizer_class(params=params, lr=args.lr, weight_decay=args.l2reg)

    scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=args.lrd_every_epoch)

    criterion = nn.CrossEntropyLoss()

    train_summary_writer = tensorboard.FileWriter(
        logdir=os.path.join(args.save_dir, 'log', 'train'), flush_secs=10)
    valid_summary_writer = tensorboard.FileWriter(
        logdir=os.path.join(args.save_dir, 'log', 'valid'), flush_secs=10)

    def run_iter(batch, is_training):
        model.train(is_training)
        words, length = batch.text
        label = batch.label
        length = wrap_with_variable(length, volatile=not is_training, gpu=args.gpu)
        logits, supplements = model(words=words, length=length, display=not is_training)
        label_pred = logits.max(1)[1]
        accuracy = torch.eq(label, label_pred).float().mean()
        loss = criterion(input=logits, target=label)
        if is_training:
            optimizer.zero_grad()
            loss.backward()
github happywu / A3C / a3c.py View on Github external
import logging
import os
import threading
import gym
from datetime import datetime
import time
from a3cmodule import A3CModule
from tensorboard import summary
from tensorboard import FileWriter

T = 0
TMAX = 80000000
t_max = 32

logdir = './a3c_logs/'
summary_writer = FileWriter(logdir)

parser = argparse.ArgumentParser(description='Traing A3C with OpenAI Gym')
parser.add_argument('--test', action='store_true',
                    help='run testing', default=False)
parser.add_argument('--log-file', type=str, help='the name of log file')
parser.add_argument('--log-dir', type=str, default="./log",
                    help='directory of the log file')
parser.add_argument('--model-prefix', type=str,
                    help='the prefix of the model to load')
parser.add_argument('--save-model-prefix', type=str,
                    help='the prefix of the model to save')
parser.add_argument('--load-epoch', type=int,
                    help="load the model on an epoch using the model-prefix")

parser.add_argument('--kv-store', type=str,
                    default='device', help='the kvstore type')
github lvzhaoyang / DeeperInverseCompositionalAlgorithm / code / Logger.py View on Github external
def __init__(self, logging_dir, logfile_name, print_freq = 10):

        self.log_dir = logging_dir
        self.print_freq = print_freq

        if not os.path.isdir(logging_dir):
            os.makedirs(logging_dir)

        self.summary_writer = tensorboard.FileWriter(logdir=logging_dir)

        # standard logger to print to terminal
        logfile = osp.join(logging_dir,'log.txt')
        stdout = Logger(logfile)
        sys.stdout = stdout
github shijx12 / AR-Tree / imdb / train.py View on Github external
elif args.optimizer == 'adadelta':
        optimizer_class = optim.Adadelta
    elif args.optimizer == 'sgd':
        optimizer_class = optim.SGD
    elif args.optimizer == 'RMSprop':
        optimizer_class = optim.RMSprop
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = optimizer_class(params=params, lr=args.lr, weight_decay=args.l2reg)

    scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=args.lrd_every_epoch)

    criterion = nn.CrossEntropyLoss()

    train_summary_writer = tensorboard.FileWriter(
        logdir=os.path.join(args.save_dir, 'log', 'train'), flush_secs=10)
    valid_summary_writer = tensorboard.FileWriter(
        logdir=os.path.join(args.save_dir, 'log', 'valid'), flush_secs=10)

    def run_iter(batch, is_training):
        model.train(is_training)
        words, length = batch.text
        label = batch.label
        sample_num = args.sample_num

        length = wrap_with_variable(length, volatile=not is_training, gpu=args.gpu)
        logits, supplements = model(words=words, length=length, display=not is_training)
        label_pred = logits.max(1)[1]
        accuracy = torch.eq(label, label_pred).float().mean()
        sv_loss = criterion(input=logits, target=label)
        if is_training:
            ###########################
            # rl training loss for sampled trees