import tensorflow as tf
x = tf.placeholder(tf.float32, [None,4]) # input vector
w1 = tf.Variable(tf.random_normal([4,2])) # weights between first and second layers
b1 = tf.Variable(tf.zeros([2])) # biases added to hidden layer
w2 = tf.Variable(tf.random_normal([2,1])) # weights between second and third layer
b2 = tf.Variable(tf.zeros([1])) # biases added to third (output) layer
def feedForward(x,w,b): # function for forward propagation
Input = tf.add(tf.matmul(x,w), b)
Output = tf.sigmoid(Input)
return Output
Out1 = feedForward(x,w1,b1) # output of first layer
Out2 = feedForward(Out1,w2,b2) # output of second layer
MHat = 50*Out2 # final prediction is in the range (0,50)
M = tf.placeholder(tf.float32, [None,1]) # placeholder for actual (target value of marks)
J = tf.reduce_mean(tf.square(MHat - M)) # cost function -- mean square errors
train_step = tf.train.GradientDescentOptimizer(0.05).minimize(J) # minimize J using Gradient Descent
sess = tf.InteractiveSession() # create interactive session
tf.global_variables_initializer().run() # initialize all weight and bias variables with specified values
xs = [[1,3,9,7],
[7,9,8,2], # x training data
[2,4,6,5]]
Ms = [[47],
[43], # M training data
[39]]
for _ in range(1000): # performing learning process on training data 1000 times
sess.run(train_step, feed_dict = {x:xs, M:Ms})
>>> print(sess.run(MHat, feed_dict = {x:[[1,15,9,7]]}))
[[50.]]
>>> print(sess.run(MHat, feed_dict = {x:[[3,8,1,2]]}))
[[50.]]
>>> print(sess.run(MHat, feed_dict = {x:[[6,7,10,9]]}))
[[50.]]
在这段代码中,我试图根据学生在考试前一天的睡眠、学习、使用电子产品和玩耍的时间来预测学生在 50 分中获得的分数 M。这 4 个特征属于输入特征向量 x。
为了解决这个回归问题,我使用了一个深度神经网络,输入层有 4 个感知器(输入特征),隐藏层有两个感知器,输出层有一个感知器。
我使用 sigmoid 作为激活函数。但是,对于我输入的所有可能的输入向量,我得到了与 M 完全相同的预测([[50.0]])。
有人可以告诉我上面的代码有什么问题,为什么我每次都得到相同的结果?