How to use the tflearn.input_data function in tflearn

To help you get started, we’ve selected a few tflearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github RishalAggarwal / 3D-Convnet-for-Alzheimer-s-Detection / 3D Convolutional Network for Alzheimer's Detection / test cnn.py View on Github external
data_dir='path_to_test_data'

datanp=[]                               #images
truenp=[]                               #labels

for file in os.listdir(data_dir):
    data=np.load(os.path.join(data_dir,file))
    datanp.append((data[0][0]))
    truenp.append(data[0][1])

sh=datanp.shape

tf.reset_default_graph()

net = tflearn.input_data(shape=[None, sh[1], sh[2], sh[3], sh[4]])
net = tflearn.conv_3d(net, 16,5,strides=2,activation='leaky_relu', padding='VALID',weights_init='xavier',regularizer='L2',weight_decay=0.01)
net = tflearn.max_pool_3d(net, kernel_size = 3, strides=2, padding='VALID')
net = tflearn.conv_3d(net, 32,3,strides=2, padding='VALID',weights_init='xavier',regularizer='L2',weight_decay=0.01)
net = tflearn.normalization.batch_normalization(net)
net = tflearn.activations.leaky_relu (net)
net = tflearn.max_pool_3d(net, kernel_size = 2, strides=2, padding='VALID')
net = tflearn.dropout(net,0.5)
net = tflearn.fully_connected(net, 1024,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.1,beta=0.1)
net = tflearn.activations.leaky_relu (net)
net = tflearn.dropout(net,0.6)
net = tflearn.fully_connected(net, 512,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.2,beta=0.2)
net = tflearn.activations.leaky_relu (net)
net = tflearn.dropout(net,0.7)
net = tflearn.fully_connected(net, 128,weights_init='xavier',regularizer='L2')
github tflearn / tflearn / examples / nlp / lstm_generator_cityname.py View on Github external
import tflearn
from tflearn.data_utils import *

path = "US_Cities.txt"
if not os.path.isfile(path):
    context = ssl._create_unverified_context()
    moves.urllib.request.urlretrieve("https://raw.githubusercontent.com/tflearn/tflearn.github.io/master/resources/US_Cities.txt", path, context=context)

maxlen = 20

string_utf8 = open(path, "r").read().decode('utf-8')
X, Y, char_idx = \
    string_to_semi_redundant_sequences(string_utf8, seq_maxlen=maxlen, redun_step=3)

g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                       learning_rate=0.001)

m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                              seq_maxlen=maxlen,
                              clip_gradients=5.0,
                              checkpoint_path='model_us_cities')

for i in range(40):
    seed = random_sequence_from_string(string_utf8, maxlen)
    m.fit(X, Y, validation_set=0.1, batch_size=128,
github duoergun0729 / 1book / code / 16-5.py View on Github external
global max_sys_call
    # Data preprocessing
    # Sequence padding

    trainX = pad_sequences(trainX, maxlen=max_sequences_len, value=0.)
    testX = pad_sequences(testX, maxlen=max_sequences_len, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY_old=testY
    testY = to_categorical(testY, nb_classes=2)

    # Network building
    print "GET max_sequences_len embedding %d" % max_sequences_len
    print "GET max_sys_call embedding %d" % max_sys_call

    net = tflearn.input_data([None, max_sequences_len])
    net = tflearn.embedding(net, input_dim=max_sys_call+1, output_dim=128)
    net = tflearn.lstm(net, 128, dropout=0.3)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.1,
                             loss='categorical_crossentropy')

    # Training



    model = tflearn.DNN(net, tensorboard_verbose=3)
    model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
             batch_size=32,run_id="maidou")

    y_predict_list = model.predict(testX)
    #print y_predict_list
github tflearn / tflearn / examples / nlp / lstm.py View on Github external
# IMDB Dataset loading
train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000,
                                valid_portion=0.1)
trainX, trainY = train
testX, testY = test

# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=100, value=0.)
testX = pad_sequences(testX, maxlen=100, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY)
testY = to_categorical(testY)

# Network building
net = tflearn.input_data([None, 100])
net = tflearn.embedding(net, input_dim=10000, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy')

# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
          batch_size=32)
github ahmetozlu / aipa / tensorflow chatbot / chatbot-v1.py View on Github external
output_row[classes.index(doc[1])] = 1

    training.append([bag, output_row])

# shuffle our features and turn into np.array
random.shuffle(training)
training = np.array(training)

# create train and test lists
train_x = list(training[:,0])
train_y = list(training[:,1])

# reset underlying graph data
tf.reset_default_graph()
# Build neural network
net = tflearn.input_data(shape=[None, len(train_x[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
net = tflearn.regression(net)

# Define model and setup tensorboard
model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')
# Start training (apply gradient descent algorithm)
model.fit(train_x, train_y, n_epoch=1000, batch_size=8, show_metric=True)
model.save('model.tflearn')

def clean_up_sentence(sentence):
    # tokenize the pattern
    sentence_words = nltk.word_tokenize(sentence)
    # stem each word
    sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]
github agakshat / maddpg / actorcritic.py View on Github external
def initializeCriticNetwork(self):
		inputs = tflearn.input_data(shape = [None,self.input_dim])
		action = tflearn.input_data(shape = [None,self.output_dim])
		net = tflearn.fully_connected(inputs,64)
		net = tflearn.layers.normalization.batch_normalization(net)
		net = tflearn.activations.relu(net)

		temp1 = tflearn.fully_connected(net,64)
		temp2 = tflearn.fully_connected(action,64)
		net = tflearn.activation(tf.matmul(net,temp1.W)+tf.matmul(action,temp2.W)+temp2.b,activation = 'relu')
		
		w_init = tflearn.initializations.uniform(minval = -0.003,maxval = 0.003)
		out = tflearn.fully_connected(net,1,weights_init = w_init)
		return inputs,action,out
github cgarciae / tensorbuilder / examples / tflearn_patch.py View on Github external
# Here is an example using the `tflearn` patch
import tflearn
from tensorbuilder import tb
import tensorbuilder.patches.tflearn.patch

model = (
	tflearn.input_data(shape=[None, 784]).builder()
	.fully_connected(64)
	.dropout(0.5)
	.fully_connected(10, activation='softmax')
	.regression(optimizer='adam', loss='categorical_crossentropy')
	.map(tflearn.DNN)
	.tensor()
)

print(model)
github tobybreckon / fire-detection-cnn / firenet.py View on Github external
def construct_firenet (x,y, training=False):

    # Build network as per architecture in [Dunnings/Breckon, 2018]

    network = tflearn.input_data(shape=[None, y, x, 3], dtype=tf.float32)

    network = conv_2d(network, 64, 5, strides=4, activation='relu')

    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)

    network = conv_2d(network, 128, 4, activation='relu')

    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)

    network = conv_2d(network, 256, 1, activation='relu')

    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
github GeniSysAI / NLU / tools / Model.py View on Github external
def createDNNLayers(self, x, y):
        
        net = tflearn.input_data(shape=[None, len(x[0])])

        for i in range(self._confs["ClassifierSettings"]['FcLayers']):
            net = tflearn.fully_connected(net, self._confs["ClassifierSettings"]['FcUnits'])

        net = tflearn.fully_connected(net, len(y[0]), activation=str(self._confs["ClassifierSettings"]['Activation']))
        net = tflearn.regression(net)

        return net
github tflearn / tflearn / examples / images / variational_autoencoder.py View on Github external
from scipy.stats import norm
import tensorflow as tf

import tflearn

# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True)

# Params
original_dim = 784 # MNIST images are 28x28 pixels
hidden_dim = 256
latent_dim = 2

# Building the encoder
encoder = tflearn.input_data(shape=[None, 784], name='input_images')
encoder = tflearn.fully_connected(encoder, hidden_dim, activation='relu')
z_mean = tflearn.fully_connected(encoder, latent_dim)
z_std = tflearn.fully_connected(encoder, latent_dim)

# Sampler: Normal (gaussian) random distribution
eps = tf.random_normal(tf.shape(z_std), dtype=tf.float32, mean=0., stddev=1.0,
                       name='epsilon')
z = z_mean + tf.exp(z_std / 2) * eps

# Building the decoder (with scope to re-use these layers later)
decoder = tflearn.fully_connected(z, hidden_dim, activation='relu',
                                  scope='decoder_h')
decoder = tflearn.fully_connected(decoder, original_dim, activation='sigmoid',
                                  scope='decoder_out')

# Define VAE Loss