How to use the reader.ptb_iterator function in reader

To help you get started, we’ve selected a few reader examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github qinyao-he / bit-rnn / train.py View on Github external
def run_epoch(session, m, data, eval_op, verbose=False):
    """Runs the model on the given data."""
    epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
    start_time = time.time()
    costs = 0.0
    iters = 0
    state = m.initial_state.eval()
    for step, (x, y) in enumerate(
            reader.ptb_iterator(data, m.batch_size, m.num_steps)):
        cost, state, _ = session.run([m.cost, m.final_state, eval_op],
                                     {m.input_data: x,
                                      m.targets: y,
                                      m.initial_state: state})
        costs += cost
        iters += m.num_steps

        if verbose and step % (epoch_size // 10) == 10:
            print("%.3f perplexity: %.3f speed: %.0f wps" %
                  (step * 1.0 / epoch_size, np.exp(costs / iters),
                   iters * m.batch_size / (time.time() - start_time)))

    return np.exp(costs / iters)
github ujjax / fast-slow-lstm / main.py View on Github external
def run_epoch(model, data, is_train=False, lr=1.0):
    """Runs the model on the given data."""
    if is_train:
        model.is_train = True
    else:
        model.eval()
    
    epoch_size = ((len(data) // model.batch_size) - 1) // model.num_steps
    start_time = time.time()
    #hidden = model.init_hidden()
    costs = 0.0
    iters = 0.0

    for step, (x, y) in enumerate(reader.ptb_iterator(data, model.batch_size, model.num_steps)):
        inputs = Variable(torch.from_numpy(x.astype(np.int64)).transpose(0, 1).contiguous()).cuda()
        inputs = torch.transpose(inputs, 0, 1)
        
        model.zero_grad()
        #hidden = repackage_hidden(hidden)
        outputs, hidden = model(inputs)
        targets = Variable(torch.from_numpy(y.astype(np.int64)).transpose(0, 1).contiguous()).cuda()
        tt = torch.squeeze(targets.view(-1, model.batch_size * model.num_steps))

        loss = criterion(outputs.view(-1, model.vocab_size), tt)
        costs += loss.data[0] * model.num_steps
        iters += model.num_steps

        if is_train:
            loss.backward()
            torch.nn.utils.clip_grad_norm(model.parameters(), 0.25)
github xiaohu2015 / DeepLearning_tutorials / examples / lstm_model_ptb / ptb_lstm_model.py View on Github external
def model_run_epoch(sess, model, data, eval_op, verbose=True):
    """Runs the model for one epoch on the given data"""
    epoch_size = ((len(data)// model.batch_size) - 1) // model.num_steps
    start_time = time.time()
    costs = 0.0
    iters = 0
    state = sess.run(model.initial_state)
    for step, (x, y) in enumerate(ptb_iterator(data, model.batch_size, model.num_steps)):
        feed_dict = {model.input: x, model.target: y, model.initial_state: state}
        cost, state, _ = sess.run([model.cost, model.final_state, eval_op],
                                    feed_dict=feed_dict)
        costs += cost
        iters += model.num_steps
        if verbose and step % (epoch_size // 10) == 10:
            print("%.3f perplexity: %.3f speed: %.0f wps" %
            (step * 1.0 / epoch_size, np.exp(costs / iters), iters * m.batch_size / (time.time() - start_time)))
    return np.exp(costs/iters)
github deeplearningathome / rnn_text_writer / ptb_word_lm.py View on Github external
def run_epoch(session, model, data, is_train=False, verbose=False):
  """Runs the model on the given data."""
  epoch_size = ((len(data) // model.batch_size) - 1) // model.num_steps
  start_time = time.time()
  costs = 0.0
  iters = 0
  state = session.run(model.initial_state)

  for step, (x, y) in enumerate(reader.ptb_iterator(data, model.batch_size, model.num_steps)):
    if is_train:
      fetches = [model.cost, model.final_state, model.train_op]
    else:
      fetches = [model.cost, model.final_state]
    feed_dict = {}
    feed_dict[model.input_data] = x
    feed_dict[model.targets] = y
    for layer_num, (c, h) in enumerate(model.initial_state):
      feed_dict[c] = state[layer_num].c
      feed_dict[h] = state[layer_num].h

    if is_train:
      cost, state, _ = session.run(fetches, feed_dict)
    else:
      cost, state = session.run(fetches, feed_dict)
github DeNeutoy / act-tensorflow / src / epoch.py View on Github external
def run_epoch(session, m, data, eval_op, max_steps=None, verbose=False):
    """Runs the model on the given data."""
    epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
    start_time = time.time()
    costs = 0.0
    iters = 0
    num_batch_steps_completed = 0

    for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size, m.num_steps)):
        cost, state, _ = session.run([m.cost, m.final_state, eval_op],
                                     {m.input_data: x,
                                      m.targets: y})
        costs += cost
        iters += m.num_steps
        num_batch_steps_completed += 1

        #if verbose and step % (epoch_size // 10) == 10:
        print("%.3f perplexity: %.3f speed: %.0f wps" %
              (step * 1.0 / epoch_size, np.exp(costs / iters),
               iters * m.batch_size / (time.time() - start_time)))

        if iters > max_steps:
            break

    return (costs / iters)
github katsugeneration / tensor-fsmn / ptb.py View on Github external
def _run_epoch(self, session, data, eval_op, verbose=False):
        epoch_size = ((len(data) // self._batch_size) - 1) // self._num_steps
        start_time = time.time()
        costs = 0.0
        iters = 0

        for step, (x, y) in enumerate(reader.ptb_iterator(data, self._batch_size, self._num_steps)):
            fetches, feed_dict = self._one_loop_setup(eval_op)
            feed_dict[self._input_data] = x
            feed_dict[self._targets] = y

            res = session.run(fetches, feed_dict)
            self.train_writer.add_summary(res[2], step / 13)
            cost = res[0]

            costs += cost
            iters += self._num_steps

            if verbose and step % (epoch_size // 10) == 10:
                sys.stdout.write("%.3f perplexity: %.3f speed: %.0f wps\n" %
                    (step * 1.0 / epoch_size, np.exp(costs / iters),
                    iters * self._batch_size * self._num_steps / (time.time() - start_time)))
                sys.stdout.flush()
github katsugeneration / tensor-fsmn / ptb.py View on Github external
def predict(self, session, data, word_to_id):
        def _get_word_fromid(word_to_id, search_id):
            for word, wid in word_to_id.items():
                if wid == search_id:
                    return word

        for step, (x, y) in enumerate(reader.ptb_iterator(data, self._batch_size, self._num_steps)):
            fetches, feed_dict = self._one_loop_setup(self._logits)
            feed_dict[self._input_data] = x
            feed_dict[self._targets] = y

            res = session.run(fetches, feed_dict)
            label = res[1]
            label = np.argmax(label, 1)
            y = np.reshape(y, (self._batch_size * self._num_steps))
            for pre, real in zip(label, y):
                sys.stdout.write("Predict %s : Real %s\n" % (_get_word_fromid(word_to_id, pre), _get_word_fromid(word_to_id, real)))
github team79 / MyDLRoad / lstm.py View on Github external
def run_epoch( session, model, data, train_op, output_log ) :
    total_costs = 0.0
    iters = 0
    state = session.run( model.initial_state )

    # step = 0
    # [x,y] = reader.ptb_producer( data, model.batch_size, model.num_steps )
    # coord = tf.train.Coordinator()
    # tf.train.start_queue_runners(session, coord=coord)
    for step, ( x, y ) in enumerate(
        reader.ptb_iterator( data, model.batch_size, model.num_steps )
    ): 
        # [a,b] = session.run([x,y])
        # if a.size != model.batch_size * model.num_steps :
        #     break
        cost, state, _ = session.run(
            [ model.cost, model.final_state, train_op ],
            {
                model.input_data : x,
                model.targets : y,
                model.initial_state : state
            }
        )
        total_costs += cost
        iters += model.num_steps
        step += 1
        if output_log and step % 100 == 0 :