我正在使用 keras 中的反向传播和随机梯度下降来训练神经网络。然而,网络生成的图形根本不接近目标函数,我不知道为什么。
我在此处添加了代码,并在下面添加了目标函数和生成的 NN 近似图。
import math
import random
import matplotlib.pyplot as plt
import numpy
from matplotlib import pyplot
numpy.random.seed(7)
random.seed = 775
print(type(random.seed))
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import SGD
THEANO_FLAGS = ""
r = Sequential()
numpy.array
def setup_nn():
r.add(Dense(1, activation='sigmoid', input_dim=1, init='uniform'))
r.add(Dense(50, activation='sigmoid', input_dim=1, init='uniform'))
r.add(Dense(output_dim=1, activation='linear', input_dim=50))
sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=False)
r.compile(loss='mean_squared_error', optimizer='sgd', metrics=['accuracy'])
def target_function(X):
a = math.sin(X*3)
return a*10
def trainRandomX(samplesize):
X = []
Y = []
for j in range(0, samplesize):
xj = random.random()
X.append(xj)
Y.append(target_function(xj))
# X=numpy.array(X)
# Y=numpy.array(Y)
r.fit(X, Y, batch_size=100, nb_epoch=1)
return
def testRandomX():
X = [random.random()]
Y = target_function(X[0])
X = numpy.array(X)
Ypred = r.predict(X, batch_size=1)
error = Ypred[0][0] - Y
print("error: ", error)
# print(Ypred)
return [X, Ypred[0][0]]
setup_nn()
plt.interactive(False)
# for i in range(0, 1):
trainRandomX(10000)
error = 0
X = []
Y = []
for i in range(0, 20):
# error += abs(testRandomX())
XY = testRandomX()
X.append(XY[0][0])
Y.append(XY[1])
pyplot.plot(X, Y, 'o')
def plotfunction():
X = []
Y = []
for i in range(0, 100):
x = i / 100
X.append(x)
Y.append(target_function(x))
pyplot.plot(X, Y, '.')
plotfunction()
print("average error: ", error / 20)
plt.show()
这里的情节:大点是神经网络的近似值。为什么它们不能更好地对应目标函数?

