在 Keras 中使用 ANN 过拟合随机游走

数据挖掘 机器学习 神经网络 喀拉斯 过拟合
2021-09-21 16:56:01

我正在尝试构建一个过度拟合随机游走路径的神经网络。所以,到目前为止,我无法获得我们粉碎/过度拟合的神经网络。我想知道我应该探索哪些参数,或者我应该遵循哪些指导方针来完成这个(奇怪的)任务。

代码:

from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD,Adam
from keras import regularizers
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline  
import random
import math

model=Sequential()
num_units=100
model.add(Dense(num_units,input_shape=(1,),activation='tanh')) 
model.add(Dense(num_units,activation='tanh'))
model.add(Dense(num_units,activation='tanh'))
model.add(Dense(num_units,activation='tanh'))
model.add(Dense(1, activation='tanh')) #output layer 1 unit
model.compile(Adam(),'mean_squared_error',metrics=['mse'])

num_of_steps=3000;
step_size=0.01
x_all=list(range(0,num_of_steps))

##########################random walk functino
random_walk=[0]
for i in x_all[1:]:
    f=np.random.uniform(0,1,1)
    if f<0.5:
        random_walk.append(random_walk[i-1]+step_size)
    else:
        random_walk.append(random_walk[i-1]-step_size)
########################

x1=list(range(1,1000,2))
x2=list(range(2,1000,2))
y1=[random_walk[x] for x in x1]
y2=[random_walk[x] for x in x2]

model.fit(x1,y1,epochs=2500,verbose=1)
fit1=model.predict(x1)
fit2=model.predict(x2)
plt.plot(x1,y1,'k')
plt.plot(x2, y2, 'r')
plt.scatter(x1, fit1, facecolors='none', edgecolors='g') #plt.plot(x_value,sample,'bo')
plt.scatter(x2, fit2, facecolors='none', edgecolors='b') #plt.plot(x_value,sample,'bo')

这是我得到的结果的一个例子:

在此处输入图像描述

我想得到一个更好的拟合(我的样本数据:x1,y1)。

2个回答

尝试更改内核正则化器。要么减少它的影响,要么完全关闭它,然后重新训练,看看会发生什么。

编辑:我希望内核正则化器是您最好的选择。您还可以尝试更改网络的深度(层数)以及为每层提供多少单位。我希望如果您将更多单元放入当前有 10 个的层中,您会看到更多的过度拟合。

from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD,Adam
from keras import regularizers
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline  
import random
import math

model=Sequential()
num_units=300
act='relu'
model.add(Dense(num_units,input_shape=(1,),activation=act)) 
model.add(Dense(num_units,activation=act))
model.add(Dense(num_units,activation=act))
model.add(Dense(num_units,activation=act))
model.add(Dense(num_units,activation=act))
model.add(Dense(num_units,activation=act))

model.add(Dense(1, activation='tanh')) #output layer 1 unit
model.compile(Adam(),'mean_squared_error',metrics=['mse'])

num_of_steps=3000;
step_size=0.01
x_all=list(range(0,num_of_steps))

##########################random walk functino
random_walk=[0]
for i in x_all[1:]:
    f=np.random.uniform(0,1,1)
    if f<0.5:
        random_walk.append(random_walk[i-1]+step_size)
    else:
        random_walk.append(random_walk[i-1]-step_size)
########################

x1=list(range(1,1000,2))
x2=list(range(2,1000,2))
x1_norm=[x/(len(x1)+0.0) -1.0 for x in x1]
x2_norm=[x/(len(x2)+0.0) -1.0 for x in x2]

y1=[random_walk[x] for x in x1]
y2=[random_walk[x] for x in x2]

model.fit(x1_norm,y1,epochs=2500,verbose=0)
fit1=model.predict(x1_norm)
fit2=model.predict(x2_norm)
plt.plot(x1_norm,y1,'k')
plt.plot(x2_norm, y2, 'r')
plt.scatter(x1_norm, fit1, facecolors='none', edgecolors='g') #plt.plot(x_value,sample,'bo')
plt.scatter(x2_norm, fit2, facecolors='none', edgecolors='b') #plt.plot(x_value,sample,'bo')from