How to use the reader.ptb_producer function in reader

To help you get started, we’ve selected a few reader examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github jingli9111 / RUM-Tensorflow / ptb_task.py View on Github external
def __init__(self, config, data, name=None):
        self.batch_size = batch_size = config.batch_size
        self.num_steps = num_steps = config.num_steps
        self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
        self.input_data, self.targets = reader.ptb_producer(
            data, batch_size, num_steps, name=name)
github lverwimp / tf-lm / scripts / word_lm_rescore_nbest.py View on Github external
def __init__(self, config, data, name=None):
		flattened_data = [word for sentence in data for word in sentence] # flatten list of lists
		self.batch_size = batch_size = config['batch_size']
		self.num_steps = num_steps = config['num_steps']
		self.epoch_size = ((len(flattened_data) // batch_size) - 1) // num_steps

		# input_data = Tensor of size batch_size x num_steps, same for targets (but shifted 1 step to the right)
		self.input_data, self.targets = reader.ptb_producer(data, config, name=name)
github jingli9111 / RUM-Tensorflow / main.py View on Github external
def __init__(self, config, data, name=None):
        self.batch_size = batch_size = config.batch_size
        self.num_steps = num_steps = config.num_steps
        self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
        self.input_data, self.targets = reader.ptb_producer(
            data, batch_size, num_steps, name=name)
github cmdowney / psrnn / ptb_word_lm.py View on Github external
def __init__(self, config, data, name=None):
    self.batch_size = batch_size = config.batch_size
    self.num_steps = num_steps = config.num_steps
    self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
    self.input_data, self.targets = reader.ptb_producer(
        data, batch_size, num_steps, name=name)
github woodfrog / ActionRecognition / rnn_practice / tf_rnn_tut / ptb_word_lm.py View on Github external
def __init__(self, config, data, name=None):
        '''
          num_steps: the number of timesteps (or unrolled steps)

        '''
        self.batch_size = batch_size = config.batch_size
        self.num_steps = num_steps = config.num_steps
        self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
        self.input_data, self.targets = reader.ptb_producer(
            data, batch_size, num_steps, name=name)
github rdangovs / rotational-unit-of-memory / tasks / LM / main.py View on Github external
def __init__(self, config, data, name=None):
        self.batch_size = batch_size = config.batch_size
        self.num_steps = num_steps = config.num_steps
        self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
        self.input_data, self.targets = reader.ptb_producer(
            data, batch_size, num_steps, name=name)
github mirceamironenco / BayesianRecurrentNN / bayesian_rnn.py View on Github external
def __init__(self, config, data, name=None):
		self.batch_size = batch_size = config.batch_size
		self.num_steps = num_steps = config.num_steps
		self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
		self.input_data, self.targets = reader.ptb_producer(
			data, batch_size, num_steps, name=name)
github lverwimp / tf-lm / scripts / word_lm.py View on Github external
def __init__(self, config, data, name=None):
		self.batch_size = batch_size = config['batch_size']
		self.num_steps = num_steps = config['num_steps']
		self.epoch_size = ((len(data) // batch_size) - 1) // num_steps

		# input_data = Tensor of size batch_size x num_steps, same for targets (but shifted 1 step to the right)
		self.input_data, self.targets = reader.ptb_producer(data, config, name=name)
github widiot / tensorflow-practices / lstm / nl-modeling / train.py View on Github external
train_data_len = len(train_data)  # 数据集的大小
    train_batch_len = train_data_len // TRAIN_BATCH_SIZE  # batch的个数
    train_epoch_size = (train_batch_len - 1) // TRAIN_NUM_STEP  # 该epoch的训练次数

    valid_data_len = len(valid_data)
    valid_batch_len = valid_data_len // EVAL_BATCH_SIZE
    valid_epoch_size = (valid_batch_len - 1) // EVAL_NUM_STEP

    test_data_len = len(test_data)
    test_batch_len = test_data_len // EVAL_BATCH_SIZE
    test_epoch_size = (test_batch_len - 1) // EVAL_NUM_STEP

    # 生成数据队列,必须放在开启多线程之前
    train_queue = reader.ptb_producer(train_data, train_model.batch_size,
                                      train_model.num_steps)
    valid_queue = reader.ptb_producer(valid_data, eval_model.batch_size,
                                      eval_model.num_steps)
    test_queue = reader.ptb_producer(test_data, eval_model.batch_size,
                                     eval_model.num_steps)

    # 定义初始化函数
    initializer = tf.random_uniform_initializer(-0.05, 0.05)

    # 定义训练用的模型
    with tf.variable_scope(
            'language_model', reuse=None, initializer=initializer):
        train_model = PTBModel(True, TRAIN_BATCH_SIZE, TRAIN_NUM_STEP)

    # 定义评估用的模型
    with tf.variable_scope(
            'language_model', reuse=True, initializer=initializer):
        eval_model = PTBModel(False, EVAL_BATCH_SIZE, EVAL_NUM_STEP)