本文主要是介绍强化学习-DQN-ATARI2600-打砖块,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
我是按照面向过程的方法写的,比较好理解,也比较简短。要是有人看的话之后可以出一个分解讲解的博客。训练了五百多万轮能打七十几分,需要网络参数的话 留言区~~~。祝你学习快乐
"""
"""
import hiddenlayer as hl
import torch
import pandas as pd
import numpy as np
import gym
import time
import PIL.Image as Image
import torch.nn as nn
import os
from warnings import catch_warnings
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
import warnings
warnings.filterwarnings("ignore")
torch.set_printoptions(profile="full")class DQBReplayer:def __init__(self,capacity):self.memory = pd.DataFrame(index=range(capacity),columns=['observation','action','reward','next_observation','done'])self.i=0self.count=0self.capacity=capacitydef store(self,*args):self.memory.loc[self.i]=argsself.i=(self.i+1)%self.capacityself.count=min(self.count+1,self.capacity)def sample(self,size):indics=np.random.choice(self.count,size=size)return (np.stack(self.memory.loc[indics,field]) for field in self.memory.columns)#为什么#是第indics行和feild列class DQN_net(nn.Module):def __init__(self, ):super(DQN_net, self).__init__()self.conv= nn.Sequential(nn.Conv2d(in_channels=4, out_channels=32, kernel_size=8, stride=4),nn.ReLU(),nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2, ),nn.ReLU(),nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, ),nn.ReLU(),)self.classifier = nn.Sequential(nn.Linear(3136, 512),nn.ReLU(),nn.Linear(512, 4),)def forward(self, x):x=self.conv(x)x=x.view(x.size(0),-1)output=self.classifier(x)return outputclass DQN(nn.Module):def __init__(self,input_shape,env):super(DQN,self).__init__()self.replayer_start_size = 100000#十万self.upon_times=20self.replayer=DQBReplayer(capacity=self.replayer_start_size)self.action_n=env.action_space.nself.image_stack=input_shape[2]self.gamma=0.99self.image_shape=(input_shape[0],input_shape[1])self.e_net=DQN_net()self.t_net =DQN_net()self.learn_step=0self.max_learn_step=650000#六十五万self.epsilon=1.self.start_learn=Falseself.canvasl = hl.Canvas()self.history = hl.History()def get_nex_state(self,state=None,observation=None):img=Image.fromarray(observation,"RGB")img=img.resize(self.image_shape).convert('L')img=np.asarray(img.getdata(),dtype=np.uint8,).reshape(img.size[1],img.size[0])if state is None:next_state = np.array([img,]*self.image_stack)else:next_state=np.append(state[1:],[img,],axis=0)return next_statedef decide(self,state,step):if self.start_learn == False:#前5万步随机选择action = np.random.randint(0, 4)return actionelse:self.epsilon -= 0.0000053if step<30:#每局前三十步随机选择,中间30万,以一定概率(1-epsilon)通过神经网络选择,最后30万次以0.99概率通过神经网络选择action = np.random.randint(0, 4)elif np.random.random() < max(self.epsilon, 0.0005):action=np.random.randint(0,4)else:state=state/128-1.y=torch.Tensor(state)y = y.float().unsqueeze(0)y=y.to(device)x=self.e_net(y).detach()if self.learn_step%2000==0:print("q value{}".format(x))action = torch.argmax(x).data.item()return actionLoad_Net=int(input("load or not(if this is ur first time to run, input 0,else 1):"))for j in range(9,15,1):# 如果训练到j=n时中断,可以把j的起始值设为n-1再运行,并输入1,接着训练sum_reward=0device=torch.device("cuda" if torch.cuda.is_available() else"cpu")#store_count=0env=gym.make("BreakoutDeterministic-v4")env.unwrappednet=DQN([84,84,4],env).to(device)load_net_path="e_3_data\\part"+str(j)+"\\complete_all_training.pkl"save_net_path="e_3_data\\part"+str(j+1)+"\\"net.t_net.load_state_dict(net.e_net.state_dict())if Load_Net==1:print("u choose to load old net and the path is:",load_net_path)net.e_net = torch.load(load_net_path)net.t_net = torch.load(load_net_path)max_score=0mse = nn.MSELoss()opt = torch.optim.RMSprop(net.e_net.parameters(), lr=0.00015) #12/13 lr=0.00015 效果if Load_Net == 0:Load_Net = 1print(net.t_net)#i是已经玩过的游戏局数for i in range(20000):lives=5action_list = [0, 0, 0, 0]#用于统计选择四种动作的频率if net.learn_step>net.max_learn_step:torch.save(net.e_net, save_net_path+"complete_all_training.pkl")print("complete_all_training")breakobservation = env.reset()state=net.get_nex_state(None,observation)epoch_reward=0#每局游戏的累计奖励if i%net.upon_times==0:print("{} times_game".format(i))if i%10==0:time_start = time.time() # 开始计时for step in range(500000):#step为一局游戏的步数# env.render() # 环境展示,为节省时间,不展示action=net.decide(state,step=step)action_list[action]+=1observation,r,done,info=env.step(action)next_state=net.get_nex_state(state,observation)# print(info)epoch_reward+=r# net.learn(state,action,r,next_state,done)dead = info['lives'] < liveslives = info['lives']r=1*rif dead:r=-1net.replayer.store(state, action, r, next_state, done)net.learn_step += 1if net.learn_step >=net.replayer_start_size // 2 and net.learn_step % 4 == 0:if net.start_learn == False:net.start_learn = Trueprint("start_learn")sample_n = 32 # 每批训练32个样本states, actions, rewards, next_states, dones = net.replayer.sample(sample_n)states = states / 128 - 1.next_states = next_states / 128 - 1.rewards = torch.Tensor(np.clip(rewards,-1,1)).unsqueeze(1).to(device)states = torch.Tensor(states).to(device)next_states = torch.Tensor(next_states).to(device)actions = torch.Tensor(actions).long().unsqueeze(1).to(device)dones = torch.Tensor(dones).unsqueeze(1).to(device)# print("learn{}".format(states))q = net.e_net(states).gather(1, actions)q_next = net.t_net(next_states).detach().max(1)[0].reshape(sample_n, 1)# 这里会不会有错?tq = rewards + net.gamma * (1 - dones) * q_nextloss = mse(q, tq)opt.zero_grad()loss.backward()opt.step()if net.learn_step % (net.upon_times * 5) == 0:net.t_net.load_state_dict(net.e_net.state_dict())if net.learn_step % 100 == 0:loss_record = loss.item()a_r = torch.mean(rewards, 0).item()net.history.log((net.learn_step), loss=loss_record,avg_reward=a_r)with net.canvasl:net.canvasl.draw_plot(net.history["loss"])net.canvasl.draw_plot(net.history["avg_reward"])state = next_stateif done:sum_reward+=epoch_rewardif epoch_reward > max_score:name = "epsiode_" + str(net.learn_step) + "_reward_" + str(epoch_reward) + ".pkl"torch.save(net.e_net, save_net_path+name)max_score = epoch_rewardelif i % 1000 == 0:name ="No."+str(i)+".pkl"torch.save(net.e_net, save_net_path + name)if i%10==0:time_end = time.time() # 结束计时print("sum_time {:.2f}s---avg_reward:{:.2f}---epsilon {:.2f}---action_p{}".format(time_end-time_start,sum_reward/(10),net.epsilon,[round(pro/sum(action_list),2) for pro in action_list]))sum_reward=0break
这篇关于强化学习-DQN-ATARI2600-打砖块的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!