How to use trains - 10 common examples

To help you get started, we’ve selected a few trains examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github allegroai / trains / trains / model.py View on Github external
Files are uploaded separately to the destination storage (e.g. s3,gc,file) and then
        a link to the uploaded model is stored in the model object
        Notice: credentials for the upload destination will be pooled from the
        global configuration file (i.e. ~/trains.conf)

        :param uri: upload destination (string). example: 's3://bucket/directory/' or 'file:///tmp/debug/'
        :return: True if destination scheme is supported (i.e. s3:// file:// gc:// etc...)
        """
        if not uri:
            return

        # Test if we can update the model.
        self._validate_update()

        # Create the storage helper
        storage = StorageHelper.get(uri)

        # Verify that we can upload to this destination
        try:
            uri = storage.verify_upload(folder_uri=uri)
        except Exception:
            raise ValueError("Could not set destination uri to: %s [Check write permissions]" % uri)

        # store default uri
        self._get_base_model().upload_storage_uri = uri
github allegroai / trains / trains / logger.py View on Github external
try:
            level = int(level)
        except (TypeError, ValueError):
            self._task.log.log(level=logging.ERROR,
                               msg='Logger failed casting log level "%s" to integer' % str(level))
            level = logging.INFO

        if not running_remotely():
            # noinspection PyBroadException
            try:
                record = self._task.log.makeRecord(
                    "console", level=level, fn='', lno=0, func='', msg=msg, args=args, exc_info=None
                )
                # find the task handler that matches our task
                if not self._task_handler:
                    self._task_handler = [h for h in LoggerRoot.get_base_logger().handlers
                                          if isinstance(h, TaskHandler) and h.task_id == self._task.id][0]
                self._task_handler.emit(record)
            except Exception:
                LoggerRoot.get_base_logger().warning(msg='Logger failed sending log: [level %s]: "%s"'
                                                         % (str(level), str(msg)))

        if not omit_console:
            # if we are here and we grabbed the stdout, we need to print the real thing
            if DevWorker.report_stdout and not running_remotely():
                # noinspection PyBroadException
                try:
                    # make sure we are writing to the original stdout
                    StdStreamPatch.stdout_original_write(str(msg)+'\n')
                except Exception:
                    pass
            else:
github allegroai / trains / trains / logger.py View on Github external
msg='Logger failed casting log level "%s" to integer' % str(level))
            level = logging.INFO

        if not running_remotely():
            # noinspection PyBroadException
            try:
                record = self._task.log.makeRecord(
                    "console", level=level, fn='', lno=0, func='', msg=msg, args=args, exc_info=None
                )
                # find the task handler that matches our task
                if not self._task_handler:
                    self._task_handler = [h for h in LoggerRoot.get_base_logger().handlers
                                          if isinstance(h, TaskHandler) and h.task_id == self._task.id][0]
                self._task_handler.emit(record)
            except Exception:
                LoggerRoot.get_base_logger().warning(msg='Logger failed sending log: [level %s]: "%s"'
                                                         % (str(level), str(msg)))

        if not omit_console:
            # if we are here and we grabbed the stdout, we need to print the real thing
            if DevWorker.report_stdout and not running_remotely():
                # noinspection PyBroadException
                try:
                    # make sure we are writing to the original stdout
                    StdStreamPatch.stdout_original_write(str(msg)+'\n')
                except Exception:
                    pass
            else:
                print(str(msg))

        # if task was not started, we have to start it
        self._start_task_if_needed()
github allegroai / trains / trains / binding / matplotlib_bind.py View on Github external
def patch_matplotlib():
        # only once
        if PatchedMatplotlib._patched_original_plot is not None:
            return True
        # noinspection PyBroadException
        try:
            # we support matplotlib version 2.0.0 and above
            import matplotlib
            PatchedMatplotlib._matplot_major_version = int(matplotlib.__version__.split('.')[0])
            if PatchedMatplotlib._matplot_major_version < 2:
                LoggerRoot.get_base_logger().warning(
                    'matplotlib binding supports version 2.0 and above, found version {}'.format(
                        matplotlib.__version__))
                return False

            if running_remotely():
                # disable GUI backend - make headless
                matplotlib.rcParams['backend'] = 'agg'
                import matplotlib.pyplot
                matplotlib.pyplot.switch_backend('agg')
            import matplotlib.pyplot as plt
            import matplotlib.figure as figure
            from matplotlib import _pylab_helpers
            if six.PY2:
                PatchedMatplotlib._patched_original_plot = staticmethod(plt.show)
                PatchedMatplotlib._patched_original_imshow = staticmethod(plt.imshow)
                PatchedMatplotlib._patched_original_figure = staticmethod(figure.Figure.show)
            else:
                PatchedMatplotlib._patched_original_plot = plt.show
                PatchedMatplotlib._patched_original_imshow = plt.imshow
                PatchedMatplotlib._patched_original_figure = figure.Figure.show
github allegroai / trains / trains / task.py View on Github external
def add_tags(self, tags):
        """
        Add tags to this task. Old tags are not deleted

        In remote, this is a no-op.

        :param tags: An iterable or space separated string of new tags (string) to add.
        :type tags: str or iterable of str
        """

        if not running_remotely() or not self.is_main_task():
            if isinstance(tags, six.string_types):
                tags = tags.split(" ")

            self.data.tags.extend(tags)
            self._edit(tags=list(set(self.data.tags)))
github allegroai / trains / examples / tensorflow_eager.py View on Github external
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import os
import sys
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from trains import Task

tf.compat.v1.enable_eager_execution()

task = Task.init(project_name='examples', task_name='Tensorflow eager mode')


FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('data_num', 100, """Flag of type integer""")
tf.app.flags.DEFINE_string('img_path', './img', """Flag of type string""")


layers = tf.keras.layers
FLAGS = None


class Discriminator(tf.keras.Model):
  """GAN Discriminator.
  A network to differentiate between generated and real handwritten digits.
  """
github allegroai / trains / examples / tensorboard_toy.py View on Github external
# TRAINS - Example of tensorboard with tensorflow (without any actual training)
#
import tensorflow as tf
import numpy as np
from PIL import Image

from trains import Task
task = Task.init(project_name='examples', task_name='tensorboard toy example')


k = tf.placeholder(tf.float32)

# Make a normal distribution, with a shifting mean
mean_moving_normal = tf.random_normal(shape=[1000], mean=(5*k), stddev=1)
# Record that distribution into a histogram summary
tf.summary.histogram("normal/moving_mean", mean_moving_normal)
tf.summary.scalar("normal/value", mean_moving_normal[-1])

# Make a normal distribution with shrinking variance
variance_shrinking_normal = tf.random_normal(shape=[1000], mean=0, stddev=1-(k))
# Record that distribution too
tf.summary.histogram("normal/shrinking_variance", variance_shrinking_normal)
tf.summary.scalar("normal/variance_shrinking_normal", variance_shrinking_normal[-1])
github allegroai / trains / examples / hyper_parameters_example.py View on Github external
import sys
from argparse import ArgumentParser

from absl import app
from absl import flags
from absl import logging

from trains import Task


FLAGS = flags.FLAGS

flags.DEFINE_string('echo', None, 'Text to echo.')
flags.DEFINE_string('another_str', 'My string', 'A string', module_name='test')

task = Task.init(project_name='examples', task_name='hyper-parameters example')

flags.DEFINE_integer('echo3', 3, 'Text to echo.')
flags.DEFINE_string('echo5', '5', 'Text to echo.', module_name='test')


parameters = {
    'list': [1, 2, 3],
    'dict': {'a': 1, 'b': 2},
    'tuple': (1, 2, 3),
    'int': 3,
    'float': 2.2,
    'string': 'my string',
}
parameters = task.connect(parameters)

# adding new parameter after connect (will be logged as well)
github allegroai / trains / examples / pytorch_matplotlib.py View on Github external
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

from PIL import Image
import matplotlib.pyplot as plt

import torchvision.transforms as transforms
import torchvision.models as models

import copy
from trains import Task


task = Task.init(project_name='examples', task_name='pytorch with matplotlib example', task_type=Task.TaskTypes.testing)


######################################################################
# Next, we need to choose which device to run the network on and import the
# content and style images. Running the neural transfer algorithm on large
# images takes longer and will go much faster when running on a GPU. We can
# use ``torch.cuda.is_available()`` to detect if there is a GPU available.
# Next, we set the ``torch.device`` for use throughout the tutorial. Also the ``.to(device)``
# method is used to move tensors or modules to a desired device.

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

######################################################################
# Loading the Images
# ------------------
#
github allegroai / trains / examples / pytorch_mnist.py View on Github external
# TRAINS - Example of Pytorch mnist training integration
#
from __future__ import print_function
import argparse
import os
from tempfile import gettempdir

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms

from trains import Task
task = Task.init(project_name='examples', task_name='pytorch mnist train')


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, 5, 1)
        self.conv2 = nn.Conv2d(20, 50, 5, 1)
        self.fc1 = nn.Linear(4 * 4 * 50, 500)
        self.fc2 = nn.Linear(500, 10)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.max_pool2d(x, 2, 2)
        x = F.relu(self.conv2(x))
        x = F.max_pool2d(x, 2, 2)
        x = x.view(-1, 4 * 4 * 50)