计算使用TensorFlow多元回归(Calculating Multivariate regres

2019-09-28 04:30发布

我想实现在tensorflow多元回归,我有192例,6个特征和一个输出变量。 从我的模型我得到一个矩阵(192,6),而应该是(192:1)。 有谁知道什么是错我的代码? 下面我提供我的代码。

# Parameters
learning_rate = 0.0001
training_epochs = 50
display_step = 5

train_X = Data_ABX3[0:192, 0:6]
train_Y = Data_ABX3[0:192, [24]]


# placeholders for a tensor that will be always fed.
X = tf.placeholder('float', shape = [None, 6])
Y = tf.placeholder('float', shape = [None, 1])


# Training Data

n_samples = train_Y.shape[0]


# Set model weights
W = tf.cast(tf.Variable(rng.randn(1, 6), name="weight"), tf.float32)
b = tf.Variable(rng.randn(), name="bias")

# Construct a linear model
pred = tf.add(tf.multiply(X, W), b)

# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# Gradient descent
#  Note, minimize() knows to modify W and b because Variable objects are       trainable=True by default
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

# Accuracy
# #accuracy = tf.contrib.metrics.streaming_accuracy(Y, pred)

# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()

# Start training
with tf.Session() as sess:

    # Run the initializer
    sess.run(init)

    # Fit all training data
    for epoch in range(training_epochs):
        #for (x, y) in zip(train_X, train_Y):
        sess.run(optimizer, feed_dict={X: train_X, Y: train_Y})

        # Display logs per epoch step
        if (epoch+1) % display_step == 0:
            c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
            print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
                "W=", sess.run(W), "b=", sess.run(b))

    print("Optimization Finished!")
    #training_cost = 0
    #for (x, y) in zip(train_X, train_Y):
    #     tr_cost = sess.run(cost, feed_dict={X: x, Y: y})
    #     training_cost += tr_cost
    training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
    print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')

    # Graphic display
    plt.plot(train_Y, train_X * sess.run(W) + sess.run(b), label='Fitted line')
    plt.legend()
    plt.show()

Answer 1:

请使用tf.matmul而不是tf.multiplypred方程式。 tf.multiply做了元素方式乘法,因此,就会产生相同的尺寸的矩阵train_X ,而tf.matmul会做的矩阵乘法,并且将基于所述实际矩阵乘法法则得到的矩阵。

我不知道什么是您的数据。 添加随机的数据,然后改变的代码,以满足所有的尺寸要求。 如果你能帮助我与你的意图,这将有助于在看到问题的更好。

编辑

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# Parameters
learning_rate = 0.0001
training_epochs = 50
display_step = 5

Data_ABX3 = np.random.random((193, 8)).astype('f')

train_X = Data_ABX3[0:192, 0:6]
train_Y = Data_ABX3[0:192, [7]]


# placeholders for a tensor that will be always fed.
X = tf.placeholder('float32', shape = [None, 6])
Y = tf.placeholder('float32', shape = [None, 1])

# Training Data
n_samples = train_Y.shape[0]

# Set model weights
W = tf.cast(tf.Variable(np.random.randn(6, 1), name="weight"), tf.float32)
b = tf.Variable(np.random.randn(), name="bias")

mult_node = tf.matmul(X, W)
print(mult_node.shape)
# Construct a linear model
pred = tf.add(tf.matmul(X, W), b)

# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# Gradient descent
#  Note, minimize() knows to modify W and b because Variable objects are               trainable=True by default
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

# Accuracy
# #accuracy = tf.contrib.metrics.streaming_accuracy(Y, pred)

# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()

# Start training
with tf.Session() as sess:

# Run the initializer
sess.run(init)

# Fit all training data
for epoch in range(training_epochs):
    #for (x, y) in zip(train_X, train_Y):
    sess.run(optimizer, feed_dict={X: train_X, Y: train_Y})

    # Display logs per epoch step
    if (epoch+1) % display_step == 0:
        c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
        print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
            "W=", sess.run(W), "b=", sess.run(b))

print("Optimization Finished!")
#training_cost = 0
#for (x, y) in zip(train_X, train_Y):
#     tr_cost = sess.run(cost, feed_dict={X: x, Y: y})
#     training_cost += tr_cost
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')

line = sess.run(tf.add(tf.matmul(train_X, W), b))
# Graphic display
plt.plot(train_Y, line, label='Fitted line')
plt.legend()
plt.show()`


文章来源: Calculating Multivariate regression using TensorFlow