本文主要是介绍深度强化学习系列tensorflow2.0自定义loss函数实现policy gradient策略梯度,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
本篇文章利用tensorflow2.0自定义loss函数实现policy gradient策略梯度,自定义loss=-log(prob) *Vt
现在训练最高分能到193分,但是还是不稳定,在修改中,欢迎一起探讨文章代码也有参考莫烦大佬的代码action_dim = 2 //定义动作
state_dim = 4 //定义状态
env = gym.make('CartPole-v0')
class PGModel(tf.keras.Model):def __init__(self):super().__init__()self.dense1 = layers.Dense(128,input_dim=state_dim,activation='relu')layers.Dropout(0.1)self.all_acts = layers.Dense(units=action_dim)self.x = 0def call(self,inputs):x = self.dense1(inputs)x = self.all_acts(x)self.x = xoutput = tf.nn.softmax(x)return outputclass PG():def __init__(self):self.model = PGModel()def choose_action(self, s):prob = self.model.predict(np.array([s]))[0]#print(prob)return np.random.choice(len(prob),p=prob)def discount_reward(self,rewards,gamma=0.95): #衰减reward 通过最后一步奖励反推真实奖励out = np.zeros_like(rewards)dis_reward = 0for i in reversed(range(len(rewards))):dis_reward = dis_reward + gamma * rewards[i] # 前一步的reward等于后一步衰减reward加上即时奖励乘以衰减因子out[i] = dis_rewardreturn out/np.std(out - np.mean(out))def all_actf(self):all_act = self.model.xprint(all_act)return all_actdef reca_batch(self,a_batch):a = a_batchreturn adef def_loss(self,label=reca_batch,logit=all_actf): //自定义loss函数neg_log_prob = tf.nn.softmax_cross_entropy_with_logits(labels=label,logits=logit)return neg_log_probdef train(self,records): #训练s_batch = np.array([record[0] for record in records]) #取状态,每次batch个状态a_batch = np.array([[1 if record[1]==i else 0 for i in range(action_dim)]for record in records])self.reca_batch(a_batch)prob_batch = self.model.predict(s_batch) * a_batchr_batch = self.discount_reward([record[2] for record in records ])self.model.compile(loss=self.def_loss,optimizer=optimizers.Adam(0.001))self.model.fit(s_batch,prob_batch,sample_weight=r_batch,verbose=1)episodes = 2000
score_list= []
pg = PG()for i in range(episodes):score = 0records = []s = env.reset()while True:a = pg.choose_action(s)#print(a)next_s,r,done,_ = env.step(a)records.append((s, a, r))s = next_sscore += rif done:pg.train(records)score_list.append(score)print("episode:", i, "score:", score, "maxscore:", max(score_list))breakif np.mean(score_list[-10:]) > 195:pg.model.save('CarPoleModel.h5')breakenv.close()
这篇关于深度强化学习系列tensorflow2.0自定义loss函数实现policy gradient策略梯度的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!