How to use visdom - 10 common examples

To help you get started, we’ve selected a few visdom examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pytorch / ignite / tests / ignite / contrib / handlers / test_visdom_logger.py View on Github external
def visdom_server():

    import time
    import subprocess

    from visdom.server import download_scripts
    download_scripts()

    hostname = "localhost"
    port = 8098
    p = subprocess.Popen("visdom --hostname {} -port {}".format(hostname, port), shell=True)
    time.sleep(5)
    yield (hostname, port)
    p.terminate()
github ming71 / toolbox / visdom-train-example.py View on Github external
ac=test('/py/mnist/data/test_list.txt',28,'%s/lr-adaptivenet_%03d.pth' % (save_path, i + 1))
                vis.line(X=np.array([i]),Y=np.array([ac]),win='accurancy',update='append',name='lr-0.001',opts=dict(linecolor=np.array([[218,165,32]]),showlegend=True))
                


if __name__ == "__main__":

    parser = argparse.ArgumentParser()
    parser.add_argument('--epoch', default=1, help='epoches') 
    parser.add_argument('--img_size', default=28, help='img size ')
    parser.add_argument('--train_path', default='/py/mnist/data/traintrain_list.txt', help="train_list_txt_path")  
    parser.add_argument('--save_path', default='/py/mnist/weights', help='path to save model')
    parser.add_argument('--augmentation', default=False, help='aug ')
    opt = parser.parse_args()
    print(opt)
    vis = visdom.Visdom(env='MNIST')
    train(opt.epoch,opt.train_path,opt.img_size,opt.save_path,opt.augmentation)
github guanfuchen / facedet / train.py View on Github external
def train():
    vis = visdom.Visdom()

    num_classses = 2
    net = facebox.FaceBox(num_classes=num_classses)
    if os.path.exists('weight/facebox.pt'):
        net.load_state_dict(torch.load('weight/facebox.pt', map_location=lambda storage, loc: storage))
    facebox_box_coder = facebox.FaceBoxCoder(net)

    root = os.path.expanduser('~/Data/WIDER')
    train_dataset = wider_face_loader.WiderFaceLoader(root=root, boxcoder=facebox_box_coder)
    train_dataloader = data.DataLoader(train_dataset, batch_size=1, shuffle=True)

    # optimizer = optim.SGD(net.parameters(), lr=1e-5, momentum=0.9, weight_decay=5e-4)
    optimizer = optim.Adam(net.parameters(), lr=1e-5, weight_decay=1e-4)
    criterion = facebox.FaceBoxLoss(num_classes=num_classses)

    for epoch in range(100):
github chenjun2hao / CLPR.pytorch / train.py View on Github external
MEANS))
    # ocr dataset
    else:
        cfg = ocr
        converter = strLabelConverter(args.alphabets)
        dataset = LPDataset(
            root=args.root,
            csv_root=None,
            transform=SSDAugmentation(cfg['min_dim'], MEANS),
            target_transform=converter.encode
        )


    if args.visdom:
        import visdom
        viz = visdom.Visdom()

    ssd_net = build_ssd('train', cfg['min_dim'], cfg['num_classes'])
    net = ssd_net
    ctccriterion = OcrLoss(args.alphabets)


    if args.resume:
        print('Resuming training, loading {}...'.format(args.resume))
        # ssd_net.load_weights(args.resume)
        checkpoint = torch.load(args.resume)
        net.load_state_dict(checkpoint['model'])
        ctccriterion.load_state_dict(checkpoint['ocr'])
    else:
        vgg_weights = torch.load(args.save_folder + args.basenet)
        print('Loading base network...')
        ssd_net.vgg.load_state_dict(vgg_weights)
github atapour / monocularDepth-Inference / util / visualizer.py View on Github external
def __init__(self, opt):
        # self.opt = opt
        self.display_id = opt.display_id
        self.use_html = False and not opt.no_html
        self.win_size = opt.display_winsize
        self.name = 'inference'
        self.opt = opt
        self.saved = False
        if self.display_id > 0:
            import visdom
            self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port)

        if self.use_html:
            self.web_dir = os.path.join(opt.checkpoints_dir, 'web')
            self.img_dir = os.path.join(self.web_dir, 'images')
            print('create web directory %s...' % self.web_dir)
            util.mkdirs([self.web_dir, self.img_dir])
        self.log_name = os.path.join(opt.checkpoints_dir, 'loss_log.txt')
        with open(self.log_name, "a") as log_file:
            now = time.strftime("%c")
            log_file.write('================ Training Loss (%s) ================\n' % now)
github hrhodin / NeuralSceneDecomposition / python / configs / train_detect_encode_decode.py View on Github external
def run(self, config_dict_file, config_dict):
        # create visualization windows
        try:
            import visdom
            port = 3557
            vis = visdom.Visdom(port=port)
            if not vis.check_connection():
                vis = None
                print("WARNING: Visdom server not running. Please run 'python -m visdom.server -port port' to see visual output")
            else:
                print("Visdom connected, reporting progress there!")
        except ImportError:
            vis = None
            print("WARNING: No visdom package is found. Please install it with command: \n pip install visdom to see visual output")
            #raise RuntimeError("WARNING: No visdom package is found. Please install it with command: \n pip install visdom to see visual output")
        vis_windows = {}
    
        # save path and config files
        save_path = self.get_parameter_description(config_dict)
        utils_io.savePythonFile(config_dict_file, save_path)
        utils_io.savePythonFile(__file__, save_path)
github bradyz / pytorch_starter / logger.py View on Github external
def __init__(self, use_visdom):
        self.vis = visdom.Visdom() if use_visdom else None
        self.use_visdom = use_visdom

        self.epoch = 0

        self.metrics = {
                'loss_train', 'loss_test', 'accuracy_train', 'accuracy_test'}
        self.plots = {
                name: ScalarPlot(self.vis, name, use_visdom)
                for name in self.metrics}
github yifita / 3PU_pytorch / main.py View on Github external
# 3. dataset.curr_threshold
    stage, progress = get_stage_progress(model.step)
    start_ratio = STEP_RATIO ** (stage + 1)
    dataset.set_max_ratio(start_ratio)
    if progress > 0.5:
        dataset.set_combined()
        if progress > 0.6:
            model.chamfer_criteria.set_threshold(CD_THRESHOLD)
    else:
        model.chamfer_criteria.unset_threshold()
        dataset.unset_combined()

    dataloader = data.DataLoader(dataset, batch_size=1, pin_memory=True)

    # visualization
    vis_logger = visdom.Visdom(env=FLAGS.id)
    for epoch in range(start_epoch + 1, MAX_EPOCH):
        for i, examples in enumerate(dataloader):
            input_pc, label_pc, ratio = examples
            ratio = ratio.item()
            # 1xBx3xN
            input_pc = input_pc[0].to(DEVICE)
            label_pc = label_pc[0].to(DEVICE)
            model.set_input(input_pc, ratio, label_pc=label_pc)
            # run gradient decent and increment model.step
            model.optimize()
            new_stage, new_progress = get_stage_progress(model.step)
            # advance to the next training stage with an added ratio
            if stage + 1 == new_stage:
                dataset.add_next_ratio()
                dataset.unset_combined()
                model.chamfer_criteria.unset_threshold()
github luyao777 / HBP-pytorch / HBP_fc.py View on Github external
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Fine-tune the fc layer only for  HBP(Hierarchical Bilinear Pooling for Fine-Grained Visual Recognition).
Usage:
    CUDA_VISIBLE_DEVICES=0,1 python HBP_fc.py --base_lr 1.0 --batch_size 12 --epochs 120 --weight_decay 0.000005 | tee 'hbp_fc.log'
"""

import os
import torch
import torchvision
import cub200
import visdom
import argparse

vis = visdom.Visdom(env=u'HBP_fc',use_incoming_socket=False)
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)

class HBP(torch.nn.Module):
    def __init__(self):
        torch.nn.Module.__init__(self)
        # Convolution and pooling layers of VGG-16.
        self.features = torchvision.models.vgg16(pretrained=True).features
        self.features_conv5_1 = torch.nn.Sequential(*list(self.features.children())
                                            [:-5])  
        self.features_conv5_2 = torch.nn.Sequential(*list(self.features.children())
                                            [-5:-3])  
        self.features_conv5_3 = torch.nn.Sequential(*list(self.features.children())
                                            [-3:-1])     
        self.bilinear_proj = torch.nn.Sequential(torch.nn.Conv2d(512,8192,kernel_size=1,bias=False),
                                        torch.nn.BatchNorm2d(8192),
github 1Konny / Beta-VAE / solver.py View on Github external
else:
            raise NotImplementedError('only support model H or B')

        self.net = cuda(net(self.z_dim, self.nc), self.use_cuda)
        self.optim = optim.Adam(self.net.parameters(), lr=self.lr,
                                    betas=(self.beta1, self.beta2))

        self.viz_name = args.viz_name
        self.viz_port = args.viz_port
        self.viz_on = args.viz_on
        self.win_recon = None
        self.win_kld = None
        self.win_mu = None
        self.win_var = None
        if self.viz_on:
            self.viz = visdom.Visdom(port=self.viz_port)

        self.ckpt_dir = os.path.join(args.ckpt_dir, args.viz_name)
        if not os.path.exists(self.ckpt_dir):
            os.makedirs(self.ckpt_dir, exist_ok=True)
        self.ckpt_name = args.ckpt_name
        if self.ckpt_name is not None:
            self.load_checkpoint(self.ckpt_name)

        self.save_output = args.save_output
        self.output_dir = os.path.join(args.output_dir, args.viz_name)
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir, exist_ok=True)

        self.gather_step = args.gather_step
        self.display_step = args.display_step
        self.save_step = args.save_step

visdom

A tool for visualizing live, rich data for Torch and Numpy

Apache-2.0
Latest version published 1 year ago

Package Health Score

65 / 100
Full package analysis