How to use the tensorpack.utils.logger.warn function in tensorpack

To help you get started, we’ve selected a few tensorpack examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pkumusic / E-DRL / tensorpack / dataflow / dataset / View on Github external
# File:
# Author: Yuxin Wu 

import os
import random
import numpy as np
from six.moves import range

from ...utils import logger, get_rng, get_dataset_path
from ..base import RNGDataFlow

    __all__ = ['SVHNDigit']
except ImportError:
    logger.warn("Cannot import scipy. SVHNDigit dataset won't be available!")
    __all__ = []


class SVHNDigit(RNGDataFlow):
    SVHN Cropped Digit Dataset.
    return img of 32x32x3, label of 0-9
    _Cache = {}

    def __init__(self, name, data_dir=None, shuffle=True):
        :param name: 'train', 'test', or 'extra'
        :param data_dir: a directory containing the original {train,test,extra}_32x32.mat
github armandmcqueen / tensorpack-mask-rcnn / tensorpack / callbacks / View on Github external
def __new__(cls, logdir=None, max_queue=10, flush_secs=120, **kwargs):
        if logdir is None:
            logdir = logger.get_logger_dir()

        if logdir is not None:
            return super(TFEventWriter, cls).__new__(cls)
            logger.warn("logger directory was not set. Ignore TFEventWriter.")
            return NoOpMonitor("TFEventWriter")
github qq456cvb / doudizhu-C / TensorPack / ValueSL / View on Github external
is_pair = True
                minor_type = 1
            for target in minor_cards_targets:
                target_val = Card.char2value_3_17(target) - 3
                _, _, _, _, _, _, minor_response_prob = func(
                    [state.copy().reshape(1, -1), last_out_cards.reshape(1, -1), np.array([minor_type])]
                stats[6].feed(int(target_val == np.argmax(minor_response_prob)))
                cards = [target]
                if is_pair:
                    if target not in handcards:
                        logger.warn('something wrong...')
                        logger.warn('minor', target)
                        logger.warn('main_cards', main_cards)
                        logger.warn('handcards', handcards)

                # correct for one-hot state
                cards_onehot = Card.char2onehot60(cards)

                # print(s.shape)
                # print(cards_onehot.shape)
                discard_onehot_from_s_60(state, cards_onehot)
    return stats
github tensorpack / tensorpack / tensorpack / utils / View on Github external
cmd, stderr=subprocess.STDOUT,
            shell=True, timeout=timeout)
        return output, 0
    except subprocess.TimeoutExpired as e:
        logger.warn("Command '{}' timeout!".format(cmd))
        if e.output:
            return e.output, -1
            return "", -1
    except subprocess.CalledProcessError as e:
        logger.warn("Command '{}' failed, return code={}".format(cmd, e.returncode))
        return e.output, e.returncode
    except Exception:
        logger.warn("Command '{}' failed to run.".format(cmd))
        return "", -2
github pkumusic / E-DRL / tensorpack / tfutils / View on Github external
def _read_checkpoint_vars(model_path):
        """ return a set of strings """
        reader = tf.train.NewCheckpointReader(model_path)
        ckpt_vars = reader.get_variable_to_shape_map().keys()
        for v in ckpt_vars:
            if v.startswith('towerp'):
                logger.warn("Found {} in checkpoint. Anything from prediction tower shouldn't be saved.".format(
        return set(ckpt_vars)
github armandmcqueen / tensorpack-mask-rcnn / tensorpack / callbacks / View on Github external
def _get_value_to_set(self):
            last = self.trainer.monitors.get_history(self.stat_name)[-1]
        except (KeyError, IndexError):
                "[StatMonitorParamSetter] No history data available for key '{}'.".format(self.stat_name))
            return None
        if len(self.history) and last[0] == self.history[-1][0]:
            logger.warn("StatMonitorParamSetter is triggered, but no new data has been added since last time.")
            return None


        if len(self.history) < self.history.maxlen:
            return None

        values = [k[1] for k in self.history]
        hist_first = values[0]
        if not self.reverse:
            hist_min = min(values)
            if hist_min < hist_first - self.threshold:  # small enough
                return None
            hist_max = max(values)
            if hist_max > hist_first + self.threshold:  # large enough
github tensorpack / tensorpack / tensorpack / graph_builder / View on Github external
def log_failure(name, reason):
            logger.warn("[ReplicatedTrainer] Do not know how to sync variable '{}' across GPUs. "
                        "Reason: {} ".format(name, reason))
            assert name not in trainable_names, \
                "The aforementioned variable is trainable, so this is probably a fatal error."
                "[ReplicatedTrainer] This variable is non-trainable. "
                "Ignore this warning if you know it's OK to leave it out-of-sync.")
github tensorpack / tensorpack / tensorpack / models / View on Github external
def wrapped_func(*args, **kwargs):
            assert args[0] is not None, args
            if use_scope:
                name, inputs = args[0], args[1]
                args = args[1:]  # actual positional args used to call func
                assert isinstance(name, six.string_types), "First argument for \"{}\" should be a string. ".format(
                    func.__name__) + "Did you forget to specify the name of the layer?"
                assert not log_shape
                if isinstance(args[0], six.string_types):
                    if use_scope is False:
                            "Please call layer {} without the first scope name argument, "
                            "or register the layer with use_scope=None to allow calling it "
                            "with scope names.".format(func.__name__))
                    name, inputs = args[0], args[1]
                    args = args[1:]  # actual positional args used to call func
                    inputs = args[0]
                    name = None
            if not (isinstance(inputs, (tf.Tensor, tf.Variable)) or
                    (isinstance(inputs, (list, tuple)) and
                        isinstance(inputs[0], (tf.Tensor, tf.Variable)))):
                raise ValueError("Invalid inputs to layer: " + str(inputs))

            # use kwargs from current argument scope
            actual_args = copy.copy(get_arg_scope()[func.__name__])
            # explicit kwargs overwrite argscope
github tensorpack / tensorpack / tensorpack / graph_builder / View on Github external
2. the op which sync variables from GPU 0 to other GPUs.
                It has to be run before the training has started.
                And you can optionally run it later to sync non-trainable variables.
        assert len(grad_list) == len(self.towers)
        raw_devices = ['/gpu:{}'.format(k) for k in self.towers]


        dtypes = set([x[0].dtype.base_dtype for x in grad_list[0]])
        dtypes_nccl_supported = [tf.float32, tf.float64]
        if get_tf_version_tuple() >= (1, 8):
        valid_for_nccl = all([k in dtypes_nccl_supported for k in dtypes])
        if self._mode == 'nccl' and not valid_for_nccl:
            logger.warn("Cannot use mode='nccl' because some gradients have unsupported types. Fallback to mode='cpu'")
            self._mode = 'cpu'

        if self._mode in ['nccl', 'hierarchical']:
            all_grads, all_vars = split_grad_list(grad_list)
            # use allreduce from tf-benchmarks
            # from .batch_allreduce import AllReduceSpecAlgorithm
            # algo = AllReduceSpecAlgorithm('nccl', list(range(8)), 0, 10)
            # all_grads, warmup_ops = algo.batch_all_reduce(all_grads, 1, True, False)
            # print("WARMUP OPS", warmup_ops)

            if self._mode == 'nccl':
                all_grads = allreduce_grads(all_grads, average=self._average)  # #gpu x #param
                packer = GradientPacker(len(raw_devices))
                succ = packer.compute_strategy(all_grads[0])
                if succ:
github armandmcqueen / tensorpack-mask-rcnn / tensorpack / graph_builder / View on Github external
        Being deprecated.
        You're recommended to return a cost tensor in :meth:`build_graph` method directly.

        This function takes the `self.cost` tensor defined by :meth:`build_graph`,
        and applies the collection
        ``tf.GraphKeys.REGULARIZATION_LOSSES`` to the cost automatically.
            "get_cost() and self.cost",
            "Return the cost tensor directly in build_graph() instead!",
        cost = self._get_cost()
        reg_cost = regularize_cost_from_collection()
        if reg_cost.op.type != 'Const':
            logger.warn("Regularization losses found in collection, and a 'cost' tensor was "
                        "not returned by `build_graph`. Therefore applying regularization automatically!")
            return tf.add(cost, reg_cost, name='cost_with_regularizer')
            return cost