我正在使用深度 Q 学习 (DQN) 来实现 OpenAI 健身房的推车问题。我按照教程(视频和其他)学习了所有相关知识。我为自己实现了一个代码,我认为它应该可以工作,但代理没有学习。如果有人能指出我做错了什么,我将非常感激。
请注意,我已经有一个目标神经网络和一个策略网络。代码如下。
import numpy as np
import gym
import random
from keras.optimizers import Adam
from keras.models import Sequential
from keras.layers import Dense
from collections import deque
env = gym.make('CartPole-v0')
EPISODES = 2000
BATCH_SIZE = 32
DISCOUNT = 0.95
UPDATE_TARGET_EVERY = 5
STATE_SIZE = env.observation_space.shape[0]
ACTION_SIZE = env.action_space.n
SHOW_EVERY = 50
class DQNAgents:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.replay_memory = deque(maxlen = 2000)
self.gamma = 0.95
self.epsilon = 1
self.epsilon_decay = 0.995
self.epsilon_min = 0.01
self.model = self._build_model()
self.target_model = self.model
self.target_update_counter = 0
print('Initialize the agent')
def _build_model(self):
model = Sequential()
model.add(Dense(20, input_dim = self.state_size, activation = 'relu'))
model.add(Dense(10, activation = 'relu'))
model.add(Dense(self.action_size, activation = 'linear'))
model.compile(loss = 'mse', optimizer = Adam(lr = 0.001))
return model
def update_replay_memory(self, current_state, action, reward, next_state, done):
self.replay_memory.append((current_state, action, reward, next_state, done))
def train(self, terminal_state):
# Sample from replay memory
minibatch = random.sample(self.replay_memory, BATCH_SIZE)
#Picks the current states from the randomly selected minibatch
current_states = np.array([t[0] for t in minibatch])
current_qs_list= self.model.predict(current_states) #gives the Q value for the policy network
new_state = np.array([t[3] for t in minibatch])
future_qs_list = self.target_model.predict(new_state)
X = []
Y = []
# This loop will run 32 times (actually minibatch times)
for index, (current_state, action, reward, next_state, done) in enumerate(minibatch):
if not done:
new_q = reward + DISCOUNT * np.max(future_qs_list)
else:
new_q = reward
# Update Q value for given state
current_qs = current_qs_list[index]
current_qs[action] = new_q
X.append(current_state)
Y.append(current_qs)
# Fitting the weights, i.e. reducing the loss using gradient descent
self.model.fit(np.array(X), np.array(Y), batch_size = BATCH_SIZE, verbose = 0, shuffle = False)
# Update target network counter every episode
if terminal_state:
self.target_update_counter += 1
# If counter reaches set value, update target network with weights of main network
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
def get_qs(self, state):
return self.model.predict(np.array(state).reshape(-1, *state.shape))[0]
''' We start here'''
agent = DQNAgents(STATE_SIZE, ACTION_SIZE)
for e in range(EPISODES):
done = False
current_state = env.reset()
time = 0
total_reward = 0
while not done:
if np.random.random() > agent.epsilon:
action = np.argmax(agent.get_qs(current_state))
else:
action = env.action_space.sample()
next_state, reward, done, _ = env.step(action)
agent.update_replay_memory(current_state, action, reward, next_state, done)
if len(agent.replay_memory) < BATCH_SIZE:
pass
else:
agent.train(done)
time+=1
current_state = next_state
total_reward += reward
print(f'episode : {e}, steps {time}, epsilon : {agent.epsilon}')
if agent.epsilon > agent.epsilon_min:
agent.epsilon *= agent.epsilon_decay
前 40 次迭代的结果如下(查看步数,它们应该会增加并且应该达到最大值 199)
episode : 0, steps 14, epsilon : 1
episode : 1, steps 13, epsilon : 0.995
episode : 2, steps 17, epsilon : 0.990025
episode : 3, steps 12, epsilon : 0.985074875
episode : 4, steps 29, epsilon : 0.9801495006250001
episode : 5, steps 14, epsilon : 0.9752487531218751
episode : 6, steps 11, epsilon : 0.9703725093562657
episode : 7, steps 13, epsilon : 0.9655206468094844
episode : 8, steps 11, epsilon : 0.960693043575437
episode : 9, steps 14, epsilon : 0.9558895783575597
episode : 10, steps 39, epsilon : 0.9511101304657719
episode : 11, steps 14, epsilon : 0.946354579813443
episode : 12, steps 19, epsilon : 0.9416228069143757
episode : 13, steps 16, epsilon : 0.9369146928798039
episode : 14, steps 14, epsilon : 0.9322301194154049
episode : 15, steps 18, epsilon : 0.9275689688183278
episode : 16, steps 31, epsilon : 0.9229311239742362
episode : 17, steps 14, epsilon : 0.918316468354365
episode : 18, steps 21, epsilon : 0.9137248860125932
episode : 19, steps 9, epsilon : 0.9091562615825302
episode : 20, steps 26, epsilon : 0.9046104802746175
episode : 21, steps 20, epsilon : 0.9000874278732445
episode : 22, steps 53, epsilon : 0.8955869907338783
episode : 23, steps 24, epsilon : 0.8911090557802088
episode : 24, steps 14, epsilon : 0.8866535105013078
episode : 25, steps 40, epsilon : 0.8822202429488013
episode : 26, steps 10, epsilon : 0.8778091417340573
episode : 27, steps 60, epsilon : 0.8734200960253871
episode : 28, steps 17, epsilon : 0.8690529955452602
episode : 29, steps 11, epsilon : 0.8647077305675338
episode : 30, steps 42, epsilon : 0.8603841919146962
episode : 31, steps 16, epsilon : 0.8560822709551227
episode : 32, steps 12, epsilon : 0.851801859600347
episode : 33, steps 12, epsilon : 0.8475428503023453
episode : 34, steps 10, epsilon : 0.8433051360508336
episode : 35, steps 30, epsilon : 0.8390886103705794
episode : 36, steps 21, epsilon : 0.8348931673187264
episode : 37, steps 24, epsilon : 0.8307187014821328
episode : 38, steps 33, epsilon : 0.8265651079747222
episode : 39, steps 32, epsilon : 0.8224322824348486
episode : 40, steps 15, epsilon : 0.8183201210226743
episode : 41, steps 20, epsilon : 0.8142285204175609
episode : 42, steps 37, epsilon : 0.810157377815473
episode : 43, steps 11, epsilon : 0.8061065909263957
episode : 44, steps 30, epsilon : 0.8020760579717637
episode : 45, steps 11, epsilon : 0.798065677681905
episode : 46, steps 34, epsilon : 0.7940753492934954
episode : 47, steps 12, epsilon : 0.7901049725470279
episode : 48, steps 26, epsilon : 0.7861544476842928
episode : 49, steps 19, epsilon : 0.7822236754458713
episode : 50, steps 20, epsilon : 0.778312557068642