am trying to use a deep neural network architecture to classify against a binary label value - 0 and +1. Here is my code to do it in tensorflow. Also this question carries forward from the discussion in a previous question
import tensorflow as tf
import numpy as np
from preprocess import create_feature_sets_and_labels
train_x,train_y,test_x,test_y = create_feature_sets_and_labels()
x = tf.placeholder('float', [None, 5])
y = tf.placeholder('float')
n_nodes_hl1 = 500
n_nodes_hl2 = 500
# n_nodes_hl3 = 500
n_classes = 1
batch_size = 100
def neural_network_model(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([5, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
# hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
# 'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
# output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
# 'biases':tf.Variable(tf.random_normal([n_classes]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
# l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
# l3 = tf.nn.relu(l3)
# output = tf.transpose(tf.add(tf.matmul(l3, output_layer['weights']), output_layer['biases']))
output = tf.add(tf.matmul(l2, output_layer['weights']), output_layer['biases'])
return output
def train_neural_network(x):
prediction = tf.sigmoid(neural_network_model(x))
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(prediction, y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
while i < len(train_x):
start = i
end = i + batch_size
batch_x = np.array(train_x[start:end])
batch_y = np.array(train_y[start:end])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
y: batch_y})
epoch_loss += c
i+=batch_size
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)
# correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
# accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
predicted_class = tf.greater(prediction,0.5)
correct = tf.equal(predicted_class, tf.equal(y,1.0))
accuracy = tf.reduce_mean( tf.cast(correct, 'float') )
# print (test_x.shape)
# accuracy = tf.nn.l2_loss(prediction-y,name="squared_error_test_cost")/test_x.shape[0]
print('Accuracy:', accuracy.eval({x: test_x, y: test_y}))
train_neural_network(x)
Specifically, (carrying over the discussion from the previous question) I removed one layer - hidden_3_layer
. Changed
prediction = neural_network_model(x)
to
prediction = tf.sigmoid(neural_network_model(x))
and added the predicted_class, correct, accuracy
part according to Neil's answer. I also changed all -1s to 0s in my csv.
This is my trace:
('Epoch', 0, 'completed out of', 10, 'loss:', 37.312037646770477)
('Epoch', 1, 'completed out of', 10, 'loss:', 37.073578298091888)
('Epoch', 2, 'completed out of', 10, 'loss:', 37.035196363925934)
('Epoch', 3, 'completed out of', 10, 'loss:', 37.035196363925934)
('Epoch', 4, 'completed out of', 10, 'loss:', 37.035196363925934)
('Epoch', 5, 'completed out of', 10, 'loss:', 37.035196363925934)
('Epoch', 6, 'completed out of', 10, 'loss:', 37.035196363925934)
('Epoch', 7, 'completed out of', 10, 'loss:', 37.035196363925934)
('Epoch', 8, 'completed out of', 10, 'loss:', 37.035196363925934)
('Epoch', 9, 'completed out of', 10, 'loss:', 37.035196363925934)
('Accuracy:', 0.42608696)
As you can see, the loss doesn't decrease. Hence I don't know if it is still working correctly.
Here are results from multiple re-runs. Results are swaying wildly:
('Epoch', 0, 'completed out of', 10, 'loss:', 26.513012945652008)
('Epoch', 1, 'completed out of', 10, 'loss:', 26.513012945652008)
('Epoch', 2, 'completed out of', 10, 'loss:', 26.513012945652008)
('Epoch', 3, 'completed out of', 10, 'loss:', 26.513012945652008)
('Epoch', 4, 'completed out of', 10, 'loss:', 26.513012945652008)
('Epoch', 5, 'completed out of', 10, 'loss:', 26.513012945652008)
('Epoch', 6, 'completed out of', 10, 'loss:', 26.513012945652008)
('Epoch', 7, 'completed out of', 10, 'loss:', 26.513012945652008)
('Epoch', 8, 'completed out of', 10, 'loss:', 26.513012945652008)
('Epoch', 9, 'completed out of', 10, 'loss:', 26.513012945652008)
('Accuracy:', 0.60124224)
another:
('Epoch', 0, 'completed out of', 10, 'loss:', 22.873702049255371)
('Epoch', 1, 'completed out of', 10, 'loss:', 22.873702049255371)
('Epoch', 2, 'completed out of', 10, 'loss:', 22.873702049255371)
('Epoch', 3, 'completed out of', 10, 'loss:', 22.873702049255371)
('Epoch', 4, 'completed out of', 10, 'loss:', 22.873702049255371)
('Epoch', 5, 'completed out of', 10, 'loss:', 22.873702049255371)
('Epoch', 6, 'completed out of', 10, 'loss:', 22.873702049255371)
('Epoch', 7, 'completed out of', 10, 'loss:', 22.873702049255371)
('Epoch', 8, 'completed out of', 10, 'loss:', 22.873702049255371)
('Epoch', 9, 'completed out of', 10, 'loss:', 22.873702049255371)
('Accuracy:', 1.0)
and another:
('Epoch', 0, 'completed out of', 10, 'loss:', 23.163824260234833)
('Epoch', 1, 'completed out of', 10, 'loss:', 22.88000351190567)
('Epoch', 2, 'completed out of', 10, 'loss:', 22.873702049255371)
('Epoch', 3, 'completed out of', 10, 'loss:', 22.873702049255371)
('Epoch', 4, 'completed out of', 10, 'loss:', 22.873702049255371)
('Epoch', 5, 'completed out of', 10, 'loss:', 22.873702049255371)
('Epoch', 6, 'completed out of', 10, 'loss:', 22.873702049255371)
('Epoch', 7, 'completed out of', 10, 'loss:', 22.873702049255371)
('Epoch', 8, 'completed out of', 10, 'loss:', 22.873702049255371)
('Epoch', 9, 'completed out of', 10, 'loss:', 22.873702049255371)
('Accuracy:', 0.99627328)
I have also seen accuracy value of 0.0 -_-
---------------EDIT---------------
Some details about data and data processing. I am using daily stock data for IBM from Yahoo! finance for a 20 year(almost) period. This amounts to roughly 5200 lines of entries.
Here is how I am processing it:
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import csv
import pickle
def create_feature_sets_and_labels(test_size = 0.2):
df = pd.read_csv("ibm.csv")
df = df.iloc[::-1]
features = df.values
testing_size = int(test_size*len(features))
train_x = list(features[1:,1:6][:-testing_size])
train_y = list(features[1:,7][:-testing_size])
test_x = list(features[1:,1:6][-testing_size:])
test_y = list(features[1:,7][-testing_size:])
scaler = MinMaxScaler(feature_range=(-5,5))
train_x = scaler.fit_transform(train_x)
train_y = scaler.fit_transform(train_y)
test_x = scaler.fit_transform(test_x)
test_y = scaler.fit_transform(test_y)
return train_x, train_y, test_x, test_y
if __name__ == "__main__":
train_x, train_y, test_x, test_y = create_feature_sets_and_labels()
with open('stockdata.pickle', 'wb') as f:
pickle.dump([train_x, train_y, test_x, test_y], f)
column 0 is date. So that is not used as a feature. Nor is column 7. I normalized the data using sklearn
's MinMaxScaler()
over a range of -5 to 5.
-------------EDIT 2-------------------
I've noticed that the system doesn't change its accuracy when data is presented in non-normalized form.