How to use the tflearn.is_training function in tflearn

To help you get started, we’ve selected a few tflearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github mindgarage / Ovation / templates / heirarchical_attention_sentiment_analysis_classification.py View on Github external
def evaluate(sess, dataset, model, step, max_dev_itr=100, verbose=True,
             mode='val'):
    results_dir = model.val_results_dir if mode == 'val' \
        else model.test_results_dir
    samples_path = os.path.join(results_dir,
                                '{}_samples_{}.txt'.format(mode, step))
    history_path = os.path.join(results_dir,
                                '{}_history.txt'.format(mode))

    avg_val_loss, avg_val_pco = 0.0, 0.0
    print("Running Evaluation {}:".format(mode))
    tflearn.is_training(False, session=sess)

    # This is needed to reset the local variables initialized by
    # TF for calculating streaming Pearson Correlation and MSE
    sess.run(tf.local_variables_initializer())
    all_dev_review, all_dev_score, all_dev_gt = [], [], []
    dev_itr = 0
    while (dev_itr < max_dev_itr and max_dev_itr != 0) \
            or mode in ['test', 'train']:
        val_batch = dataset.next_batch(FLAGS.batch_size, rescale=[0.0, 1.0],
                                       pad=model.args["sequence_length"])
        val_loss, val_pco, val_mse, val_ratings = \
            model.evaluate_step(sess, val_batch.text, val_batch.ratings,
                                val_batch.lengths)
        avg_val_loss += val_mse
        avg_val_pco += val_pco[0]
        all_dev_review += id2seq(val_batch.text, dataset.vocab_i2w)
github TensorMSA / tensormsa_old / tflearn / helpers / trainer.py View on Github external
def evaluate_flow(session, ops_to_evaluate, dataflow):
        if not isinstance(ops_to_evaluate, list):
            ops_to_evaluate = [ops_to_evaluate]
        tflearn.is_training(False, session)
        dataflow.reset()
        dataflow.start()
        res = [0. for i in ops_to_evaluate]
        feed_batch = dataflow.next()

        while feed_batch:
            r = session.run(ops_to_evaluate, feed_batch)
            current_batch_size = get_current_batch_size(feed_batch, dataflow)
            for i in range(len(r)):
                res[i] += r[i] * current_batch_size
            feed_batch = dataflow.next()
        res = [r / dataflow.n_samples for r in res]
        return res
github mindgarage / Ovation / templates / sentiment_analysis_regression.py View on Github external
def evaluate(sess, dataset, model, step, max_dev_itr=100, verbose=True,
             mode='val'):
    results_dir = model.val_results_dir if mode == 'val' \
        else model.test_results_dir
    samples_path = os.path.join(results_dir,
                                '{}_samples_{}.txt'.format(mode, step))
    history_path = os.path.join(results_dir,
                                '{}_history.txt'.format(mode))

    avg_val_loss, avg_val_pco = 0.0, 0.0
    print("Running Evaluation {}:".format(mode))
    tflearn.is_training(False, session=sess)

    # This is needed to reset the local variables initialized by
    # TF for calculating streaming Pearson Correlation and MSE
    sess.run(tf.local_variables_initializer())
    all_dev_review, all_dev_score, all_dev_gt = [], [], []
    dev_itr = 0
    while (dev_itr < max_dev_itr and max_dev_itr != 0) \
            or mode in ['test', 'train']:
        val_batch = dataset.next_batch(FLAGS.batch_size, rescale=[0.0, 1.0],
                                       pad=model.args["sequence_length"])
        val_loss, val_pco, val_mse, val_ratings = \
            model.evaluate_step(sess, val_batch.text, val_batch.ratings)
        avg_val_loss += val_mse
        avg_val_pco += val_pco[0]
        all_dev_review += id2seq(val_batch.text, dataset.vocab_i2w)
        all_dev_score += val_ratings.tolist()
github mindgarage / Ovation / templates / attention_blstm_quora.py View on Github external
if mode == 'train' and dataset.epochs_completed == 1: break

    result_set = (all_dev_x1, all_dev_x2, all_dev_sims, all_dev_gt)
    avg_loss = avg_val_loss / dev_itr
    avg_pco = avg_val_pco / dev_itr
    if verbose:
        print("{}:\t Loss: {}\tPco{}".format(mode, avg_loss, avg_pco))

    with open(samples_path, 'w') as sf, open(history_path, 'a') as hf:
        for x1, x2, sim, gt in zip(all_dev_x1, all_dev_x2,
                                   all_dev_sims, all_dev_gt):
            sf.write('{}\t{}\t{}\t{}\n'.format(x1, x2, sim, gt))
        hf.write('STEP:{}\tTIME:{}\tPCO:{}\tMSE\t{}\n'.format(
            step, datetime.datetime.now().isoformat(),
            avg_pco, avg_loss))
    tflearn.is_training(True, session=sess)
    return avg_loss, avg_pco, result_set
github tflearn / tflearn / tflearn / helpers / evaluator.py View on Github external
""" Evaluate.

        Evaluate a list of tensors over a whole dataset. Generally,
        'ops' argument are average performance metrics (such as average mean,
        top-3, etc...)

        Arguments:
            feed_dict: `dict`. The feed dictionary of data.
            ops: list of `Tensors`. The tensors to evaluate.
            batch_size: `int`. A batch size.

        Returns:
            The mean average result per tensor over all batches.

        """
        tflearn.is_training(False, self.session)
        coord = tf.train.Coordinator()
        inputs = tf.get_collection(tf.GraphKeys.INPUTS)
        # Data Preprocessing
        dprep_dict = {}
        dprep_collection = tf.get_collection(tf.GraphKeys.DATA_PREP)
        for i in range(len(inputs)):
            # Support for custom inputs not using dprep/daug
            if len(dprep_collection) > i:
                if dprep_collection[i] is not None:
                    dprep_dict[inputs[i]] = dprep_collection[i]
        # Data Flow
        df = data_flow.FeedDictFlow(feed_dict, coord,
                                    batch_size=batch_size,
                                    dprep_dict=dprep_dict,
                                    daug_dict=None,
                                    index_array=None,
github ZhengyaoJiang / PGPortfolio / pgportfolio / learn / rollingtrainer.py View on Github external
def __rolling_logging(self):
        fast_train = self.train_config["fast_train"]
        if not fast_train:
            tflearn.is_training(False, self._agent.session)

            v_pv, v_log_mean = self._evaluate("validation",
                                              self._agent.portfolio_value,
                                              self._agent.log_mean)
            t_pv, t_log_mean = self._evaluate("test", self._agent.portfolio_value, self._agent.log_mean)
            loss_value = self._evaluate("training", self._agent.loss)

            logging.info('training loss is %s\n' % loss_value)
            logging.info('the portfolio value on validation asset is %s\nlog_mean is %s\n' %
                         (v_pv,v_log_mean))
            logging.info('the portfolio value on test asset is %s\n mean is %s' % (t_pv,t_log_mean))
github ZhengyaoJiang / PGPortfolio / pgportfolio / learn / nnagent.py View on Github external
def decide_by_history(self, history, last_w):
        assert isinstance(history, np.ndarray),\
            "the history should be a numpy array, not %s" % type(history)
        assert not np.any(np.isnan(last_w))
        assert not np.any(np.isnan(history))
        tflearn.is_training(False, self.session)
        history = history[np.newaxis, :, :, :]
        return np.squeeze(self.session.run(self.__net.output, feed_dict={self.__net.input_tensor: history,
                                                                         self.__net.previous_w: last_w[np.newaxis, 1:],
                                                                         self.__net.input_num: 1}))
github TensorMSA / tensormsa_old / tflearn / helpers / evaluator.py View on Github external
""" Evaluate.

        Evaluate a list of tensors over a whole dataset. Generally,
        'ops' argument are average performance metrics (such as average mean,
        top-3, etc...)

        Arguments:
            feed_dict: `dict`. The feed dictionary of data.
            ops: list of `Tensors`. The tensors to evaluate.
            batch_size: `int`. A batch size.

        Returns:
            The mean average result per tensor over all batches.

        """
        tflearn.is_training(False, self.session)
        coord = tf.train.Coordinator()
        inputs = tf.get_collection(tf.GraphKeys.INPUTS)
        # Data Preprocessing
        dprep_dict = {}
        dprep_collection = tf.get_collection(tf.GraphKeys.DATA_PREP)
        for i in range(len(inputs)):
            # Support for custom inputs not using dprep/daug
            if len(dprep_collection) > i:
                if dprep_collection[i] is not None:
                    dprep_dict[inputs[i]] = dprep_collection[i]
        # Data Flow
        df = data_flow.FeedDictFlow(feed_dict, coord,
                                    batch_size=batch_size,
                                    dprep_dict=dprep_dict,
                                    daug_dict=None,
                                    index_array=None,
github locuslab / icnn / multi-label-cls / icnn_ebundle.py View on Github external
if len(self.proj) > 0:
                self.sess.run(self.proj)
            else:
                print("Warning: Not projecting any weights.")
            self.trainWriter.add_summary(summary, i)

            trainW.writerow((i, trainF1, l_yN))
            trainF.flush()

            print(" + trainF1: {:0.2f}".format(trainF1))
            print(" + loss: {:0.5e}".format(l_yN))
            print(" + time: {:0.2f} s".format(time.time()-start))

            if i % np.ceil(nTrain/args.trainBatchSz) == 0:
                print("=== Testing ===")
                tflearn.is_training(True)
                def fg(yhats):
                    fd = {self.x_: valX, self.y_: valY}
                    e, ge = self.sess.run([self.E_, self.dE_dy_], feed_dict=fd)
                    return e, ge

                y0 = np.full(valY.shape, 0.5)
                yN, G, h, lam, ys, _ = bundle_entropy.solveBatch(
                    fg, y0, nIter=args.inference_nIter)
                testF1 = util.macroF1(valY, yN)
                l_yN = crossEntr(valY, yN)
                print(" + testF1: {:0.4f}".format(testF1))
                testW.writerow((i, testF1, l_yN))
                testF.flush()

                if testF1 > bestTestF1:
                    print('+ Saving best model.')