How to use the lab.logger.section function in lab

To help you get started, we’ve selected a few lab examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github vpj / lab / backend / samples / mnist_tensorflow.py View on Github external
def test(session: tf.Session, loss_value, accuracy_value, batches):
    with logger.section("Test", total_steps=batches):
        test_loss = 0
        correct = 0
        batch_idx = -1
        while True:
            batch_idx += 1
            try:
                l, a = session.run([loss_value, accuracy_value])
                test_loss += l
                correct += a
            except tf.errors.OutOfRangeError:
                break
            logger.progress(batch_idx + 1)

        logger.store(test_loss=test_loss / batches)
        logger.store(accuracy=correct / batches)
github vpj / lab / backend / samples / mnist_pytorch.py View on Github external
def test(model, device, test_loader):
    with logger.section("Test", total_steps=len(test_loader)):
        model.eval()
        test_loss = 0
        correct = 0
        with torch.no_grad():
            for batch_idx, (data, target) in enumerate(test_loader):
                data, target = data.to(device), target.to(device)
                output = model(data)
                test_loss += F.nll_loss(output, target, reduction='sum').item()
                pred = output.argmax(dim=1, keepdim=True)
                correct += pred.eq(target.view_as(pred)).sum().item()
                logger.progress(batch_idx + 1)

        # Add test loss and accuracy to logger
        logger.store(test_loss=test_loss / len(test_loader.dataset))
        logger.store(accuracy=correct / len(test_loader.dataset))
github vpj / lab / backend / samples / mnist_tensorflow.py View on Github external
def train(args, session: tf.Session, loss_value, train_op, batches, epoch):
    with logger.section("Train", total_steps=batches):
        batch_idx = -1
        while True:
            batch_idx += 1
            try:
                l, _ = session.run([loss_value, train_op])
            except tf.errors.OutOfRangeError:
                break

            # Add training loss to the logger.
            # The logger will queue the values and output the mean
            logger.store(train_loss=l)
            logger.progress(batch_idx + 1)
            logger.set_global_step(epoch * batches + batch_idx)

            # Print output to the console
            if batch_idx % args.log_interval == 0:
github vpj / lab / samples / getting_started.py View on Github external
from lab import logger
from lab.experiment.tensorflow import Experiment


# Create the sample experiment
EXPERIMENT = Experiment(name="Sample",
                        python_file=__file__,
                        comment="Sample lab experiment",
                        check_repo_dirty=False)

# Sections are use to keep track of
# what's going on from the console output.
# It is also useful to organize the code into sections,
# when separating them into functions is difficult
with logger.section("Create model"):
    # Indicate that this section failed. You don't have to set
    #  this if it is successful.
    logger.set_successful(False)

    # Sleep for a minute.
    time.sleep(1)

# Print sample info
logger.info(one=1,
            two=2,
            string="string")

# ### Set logger indicators

# Reward is queued; this is useful when you want to track the moving
# average of something.
github vpj / lab / backend / lab / experiment / tensorflow.py View on Github external
# load checkpoint if we are starting from middle
            with logger.section("Loading checkpoint") as m:
                is_successful = self.__checkpoint_saver.load(session)
                logger.set_successful(is_successful)
                if is_successful:
                    global_step = self.__checkpoint_saver.max_step

        self._start(global_step)

        if global_step == 0:
            # initialize variables and clear summaries if we are starting from scratch
            with logger.section("Clearing summaries"):
                self.clear_summaries()
            with logger.section("Clearing checkpoints"):
                self.clear_checkpoints()
            with logger.section("Initializing variables"):
                tf_util.init_variables(session)

        self.create_writer(session)
github vpj / lab / samples / mnist_configs.py View on Github external
def test_loader(c: Configs):
    with logger.section("Testing data"):
        return _data_loader(False, c.test_batch_size, c.data_loader_args)
github vpj / lab / samples / mnist_loop.py View on Github external
def model_optimizer(c: Configs):
    with logger.section("Create model"):
        m: Net = Net()
        m.to(c.device)

    with logger.section("Create optimizer"):
        o = optim.SGD(m.parameters(), lr=c.learning_rate, momentum=c.momentum)

    return m, o
github vpj / lab / samples / mnist_configs.py View on Github external
@Configs.calc()
def set_seed(c: Configs):
    with logger.section("Setting seed"):
        torch.manual_seed(c.seed)
github vpj / lab / backend / samples / mnist_pytorch.py View on Github external
train_loader = torch.utils.data.DataLoader(
            datasets.MNIST('./data', train=True, download=True,
                           transform=transforms.Compose([
                               transforms.ToTensor(),
                               transforms.Normalize((0.1307,), (0.3081,))
                           ])),
            batch_size=args.batch_size, shuffle=True, **kwargs)
        test_loader = torch.utils.data.DataLoader(
            datasets.MNIST('./data', train=False, transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307,), (0.3081,))
            ])),
            batch_size=args.test_batch_size, shuffle=True, **kwargs)

    # Model creation
    with logger.section("Create model"):
        model = Net().to(device)
        optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)

    # Specify indicators
    logger.add_indicator("train_loss", queue_limit=10, is_print=True)
    logger.add_indicator("test_loss", is_histogram=False, is_print=True)
    logger.add_indicator("accuracy", is_histogram=False, is_print=True)
    for name, param in model.named_parameters():
        if param.requires_grad:
            logger.add_indicator(name, is_histogram=True, is_print=False)
            logger.add_indicator(f"{name}_grad", is_histogram=True, is_print=False)

    # Start the experiment
    EXPERIMENT.start_train()

    # Loop through the monitored iterator