I am trying to implement a Multivariate regression in tensorflow where I have 192 examples with 6 features and one output variable. From my model I get a matrix (192, 6) while it should be (192, 1). Does anybody know what is wrong with my code? I provided my code below.
# Parameters
learning_rate = 0.0001
training_epochs = 50
display_step = 5
train_X = Data_ABX3[0:192, 0:6]
train_Y = Data_ABX3[0:192, [24]]
# placeholders for a tensor that will be always fed.
X = tf.placeholder('float', shape = [None, 6])
Y = tf.placeholder('float', shape = [None, 1])
# Training Data
n_samples = train_Y.shape[0]
# Set model weights
W = tf.cast(tf.Variable(rng.randn(1, 6), name="weight"), tf.float32)
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
pred = tf.add(tf.multiply(X, W), b)
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# Gradient descent
# Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Accuracy
# #accuracy = tf.contrib.metrics.streaming_accuracy(Y, pred)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
#for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: train_X, Y: train_Y})
# Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
#training_cost = 0
#for (x, y) in zip(train_X, train_Y):
# tr_cost = sess.run(cost, feed_dict={X: x, Y: y})
# training_cost += tr_cost
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
# Graphic display
plt.plot(train_Y, train_X * sess.run(W) + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
Please use tf.matmul
instead of tf.multiply
in your pred
equation. tf.multiply
does a element wise multiplication hence, it will generate a matrix of same dimension as train_X
, whereas tf.matmul
will do a matrix multiplication and will generate the resultant matrix based on the actual matrix multiplication rule.
I am not sure what is your data. Adding random data and then changed code to meet all the dimension requirements. If you can help me with your intention, that will help in seeing the issue better.
EDIT
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# Parameters
learning_rate = 0.0001
training_epochs = 50
display_step = 5
Data_ABX3 = np.random.random((193, 8)).astype('f')
train_X = Data_ABX3[0:192, 0:6]
train_Y = Data_ABX3[0:192, [7]]
# placeholders for a tensor that will be always fed.
X = tf.placeholder('float32', shape = [None, 6])
Y = tf.placeholder('float32', shape = [None, 1])
# Training Data
n_samples = train_Y.shape[0]
# Set model weights
W = tf.cast(tf.Variable(np.random.randn(6, 1), name="weight"), tf.float32)
b = tf.Variable(np.random.randn(), name="bias")
mult_node = tf.matmul(X, W)
print(mult_node.shape)
# Construct a linear model
pred = tf.add(tf.matmul(X, W), b)
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# Gradient descent
# Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Accuracy
# #accuracy = tf.contrib.metrics.streaming_accuracy(Y, pred)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
#for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: train_X, Y: train_Y})
# Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
#training_cost = 0
#for (x, y) in zip(train_X, train_Y):
# tr_cost = sess.run(cost, feed_dict={X: x, Y: y})
# training_cost += tr_cost
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
line = sess.run(tf.add(tf.matmul(train_X, W), b))
# Graphic display
plt.plot(train_Y, line, label='Fitted line')
plt.legend()
plt.show()`