I'm trying to create a Categorical classification Neural Network(NN) I have been given dataset which has 169307 rows. My output labels are [0,1,2] I one hot encoded them but I'm not able to operate on them using Neural Nets. I encounter Value Error. I guess I have made mistake in Reshaping my "target" column. I have converted into a list l Here is my complete Code to the Solution.
# coding: utf-8
# In[349]:
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
from sklearn import preprocessing
from sklearn.preprocessing import OneHotEncoder
# In[382]:
df =pd.read_csv("train_data.csv")
num_labels = 3
# In[392]:
import numpy as np
nb_classes = 3
targets = np.array([0,1,2]).reshape(-1)
one_hot_targets = np.eye(nb_classes)[targets]
one_hot_targets
# In[420]:
target = df["target"]
feat=df.drop(['target','connection_id'],axis=1)
target[10]
l=[]
l=target.values.tolist()
l=np.array(l)
l[9]
# In[410]:
# In[394]:
logs_path="Server_attack"
# In[395]:
#Hyperparameters
batch_size=100
learning_rate=0.5
training_epochs=10
# In[396]:
X=tf.placeholder(tf.float32,[None,41])
Y_=tf.placeholder(tf.float32,[None,3])
lr=tf.placeholder(tf.float32)
XX=tf.reshape(X,[41,-1])
# In[397]:
#5Layer Neural Network
L=200
M=100
N=60
O=30
# In[398]:
#Weights and Biases
W1=tf.Variable(tf.truncated_normal([41,L],stddev=0.1))
B1=tf.Variable(tf.ones([L]))
W2=tf.Variable(tf.truncated_normal([L,M],stddev=0.1))
B2=tf.Variable(tf.ones([M]))
W3=tf.Variable(tf.truncated_normal([M,N],stddev=0.1))
B3=tf.Variable(tf.ones([N]))
W4=tf.Variable(tf.truncated_normal([N,O],stddev=0.1))
B4=tf.Variable(tf.ones([O]))
W5=tf.Variable(tf.truncated_normal([O,3],stddev=0.1))
B5=tf.Variable(tf.ones([3]))
# In[399]:
Y1=tf.nn.relu(tf.matmul(XX,W1)+B1)
Y2=tf.nn.relu(tf.matmul(Y1,W2)+B2)
Y3=tf.nn.relu(tf.matmul(Y2,W3)+B3)
Y4=tf.nn.relu(tf.matmul(Y3,W4)+B4)
Ylogits=tf.nn.relu(tf.matmul(Y4,W5)+B5)
Y=tf.nn.softmax(Ylogits)
# In[400]:
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits,labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy)
# In[401]:
correct_prediction=tf.equal(tf.argmax(Y,1),tf.argmax(Y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
# In[402]:
train_step=tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
# In[403]:
#TensorBoard Parameters
tf.summary.scalar("cost",cross_entropy)
tf.summary.scalar("accuracy",accuracy)
summary_op=tf.summary.merge_all()
# In[404]:
init = tf.global_variables_initializer()
sess=tf.Session()
sess.run(init)
# In[417]:
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter(logs_path,graph=tf.get_default_graph())
for epoch in range(training_epochs):
batch_count=int(len(feature)/batch_size)
for i in range(batch_count):
#batch_x,batch_y=feature.iloc[i, :].values.tolist(),target[i]
batch_x = np.expand_dims(np.array(feature.iloc[i, :].values.tolist()), axis=0)
batch_y = np.expand_dims(l, axis=0)
# batch_y = np.reshape(batch_y,(1, 3))
_,summary = sess.run([train_step,summary_op],
{X:batch_x,Y:batch_y,learning_rate:0.001}
)
writer.add_summary(summary, epoch * batch_count + i)
print("Epoch: ", epoch)
Error :
ValueError: Cannot feed value of shape (1, 169307) for Tensor 'Softmax_16:0', which has shape '(41, 3)'
Please guide me
You actually didn't do the conversion. You've only created a 3x3 identity matrix
one_hot_targets
, but never used it. As a result,batch_y
is an array ofdf["target"]
:Your
batch_x
also doesn't seem correct, but thefeature
is not defined in the snippet, so I can't say what exactly that is.[Update] How to do one-hot encoding: