怎么用vs做网站,软件开发的七个流程,做网站的图片是怎么做的,网站源码制作一、PPO优化
PPO的简介和实践可以看笔者之前的文章 强化学习_06_pytorch-PPO实践(Pendulum-v1) 针对之前的PPO做了主要以下优化#xff1a;
batch_normalize: 在mini_batch 函数中进行adv的normalize, 加速模型对adv的学习policyNet采用beta分布(0~1): 同时增加MaxMinScale …一、PPO优化
PPO的简介和实践可以看笔者之前的文章 强化学习_06_pytorch-PPO实践(Pendulum-v1) 针对之前的PPO做了主要以下优化
batch_normalize: 在mini_batch 函数中进行adv的normalize, 加速模型对adv的学习policyNet采用beta分布(0~1): 同时增加MaxMinScale 将beta分布产出值转换到action的分布空间收集多个episode的数据依次计算adv后合并到一个dataloader中进行遍历加速模型收敛
1.1 PPO2 代码
详细可见 Github: PPO2.py
class PPO2:PPO2算法, 采用截断方式def __init__(self,state_dim: int,actor_hidden_layers_dim: typ.List,critic_hidden_layers_dim: typ.List,action_dim: int,actor_lr: float,critic_lr: float,gamma: float,PPO_kwargs: typ.Dict,device: torch.device,reward_func: typ.Optional[typ.Callable]None):dist_type PPO_kwargs.get(dist_type, beta)self.dist_type dist_typeself.actor policyNet(state_dim, actor_hidden_layers_dim, action_dim, dist_typedist_type).to(device)self.critic valueNet(state_dim, critic_hidden_layers_dim).to(device)self.actor_lr actor_lrself.critic_lr critic_lrself.actor_opt torch.optim.Adam(self.actor.parameters(), lractor_lr)self.critic_opt torch.optim.Adam(self.critic.parameters(), lrcritic_lr)self.gamma gammaself.lmbda PPO_kwargs[lmbda]self.k_epochs PPO_kwargs[k_epochs] # 一条序列的数据用来训练的轮次self.eps PPO_kwargs[eps] # PPO中截断范围的参数self.sgd_batch_size PPO_kwargs.get(sgd_batch_size, 512)self.minibatch_size PPO_kwargs.get(minibatch_size, 128)self.action_bound PPO_kwargs.get(action_bound, 1.0)self.action_low -1 * self.action_bound self.action_high self.action_boundif action_space in PPO_kwargs:self.action_low self.action_space.lowself.action_high self.action_space.highself.count 0 self.device deviceself.reward_func reward_funcself.min_batch_collate_func partial(mini_batch, mini_batch_sizeself.minibatch_size)def _action_fix(self, act):if self.dist_type beta:# beta 0-1 - low ~ highreturn act * (self.action_high - self.action_low) self.action_lowreturn act def _action_return(self, act):if self.dist_type beta:# low ~ high - 0-1 act_out (act - self.action_low) / (self.action_high - self.action_low)return act_out * 1 0return act def policy(self, state):state torch.FloatTensor(np.array([state])).to(self.device)action_dist self.actor.get_dist(state, self.action_bound)action action_dist.sample()action self._action_fix(action)return action.cpu().detach().numpy()[0]def _one_deque_pp(self, samples: deque):state, action, reward, next_state, done zip(*samples)state torch.FloatTensor(np.stack(state)).to(self.device)action torch.FloatTensor(np.stack(action)).to(self.device)reward torch.tensor(np.stack(reward)).view(-1, 1).to(self.device)if self.reward_func is not None:reward self.reward_func(reward)next_state torch.FloatTensor(np.stack(next_state)).to(self.device)done torch.FloatTensor(np.stack(done)).view(-1, 1).to(self.device)old_v self.critic(state)td_target reward self.gamma * self.critic(next_state) * (1 - done)td_delta td_target - old_vadvantage compute_advantage(self.gamma, self.lmbda, td_delta, done).to(self.device)# recomputetd_target advantage old_vaction_dists self.actor.get_dist(state, self.action_bound)old_log_probs action_dists.log_prob(self._action_return(action))return state, action, old_log_probs, advantage, td_targetdef data_prepare(self, samples_list: List[deque]):state_pt_list []action_pt_list []old_log_probs_pt_list []advantage_pt_list []td_target_pt_list []for sample in samples_list:state_i, action_i, old_log_probs_i, advantage_i, td_target_i self._one_deque_pp(sample)state_pt_list.append(state_i)action_pt_list.append(action_i)old_log_probs_pt_list.append(old_log_probs_i)advantage_pt_list.append(advantage_i)td_target_pt_list.append(td_target_i)state torch.concat(state_pt_list) action torch.concat(action_pt_list) old_log_probs torch.concat(old_log_probs_pt_list) advantage torch.concat(advantage_pt_list) td_target torch.concat(td_target_pt_list)return state, action, old_log_probs, advantage, td_targetdef update(self, samples_list: List[deque]):state, action, old_log_probs, advantage, td_target self.data_prepare(samples_list)if len(old_log_probs.shape) 2:old_log_probs old_log_probs.sum(dim1)d_set memDataset(state, action, old_log_probs, advantage, td_target)train_loader DataLoader(d_set,batch_sizeself.sgd_batch_size,shuffleTrue,drop_lastTrue,collate_fnself.min_batch_collate_func)for _ in range(self.k_epochs):for state_, action_, old_log_prob, adv, td_v in train_loader:action_dists self.actor.get_dist(state_, self.action_bound)log_prob action_dists.log_prob(self._action_return(action_))if len(log_prob.shape) 2:log_prob log_prob.sum(dim1)# e(log(a/b))ratio torch.exp(log_prob - old_log_prob.detach())surr1 ratio * advsurr2 torch.clamp(ratio, 1 - self.eps, 1 self.eps) * advactor_loss torch.mean(-torch.min(surr1, surr2)).float()critic_loss torch.mean(F.mse_loss(self.critic(state_).float(), td_v.detach().float())).float()self.actor_opt.zero_grad()self.critic_opt.zero_grad()actor_loss.backward()critic_loss.backward()torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 0.5) torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 0.5) self.actor_opt.step()self.critic_opt.step()return Truedef save_model(self, file_path):if not os.path.exists(file_path):os.makedirs(file_path)act_f os.path.join(file_path, PPO_actor.ckpt)critic_f os.path.join(file_path, PPO_critic.ckpt)torch.save(self.actor.state_dict(), act_f)torch.save(self.critic.state_dict(), critic_f)def load_model(self, file_path):act_f os.path.join(file_path, PPO_actor.ckpt)critic_f os.path.join(file_path, PPO_critic.ckpt)self.actor.load_state_dict(torch.load(act_f, map_locationcpu))self.critic.load_state_dict(torch.load(critic_f, map_locationcpu))self.actor.to(self.device)self.critic.to(self.device)self.actor_opt torch.optim.Adam(self.actor.parameters(), lrself.actor_lr)self.critic_opt torch.optim.Adam(self.critic.parameters(), lrself.critic_lr)def train(self):self.training Trueself.actor.train()self.critic.train()def eval(self):self.training Falseself.actor.eval()self.critic.eval()二、 Pytorch实践
2.1 智能体构建与训练
PPO2主要是收集多轮的结果序列进行训练增加训练轮数适当降低学习率稍微增Actor和Critic的网络深度 详细可见 Github: test_ppo.Hopper_v4_ppo2_test
import os
from os.path import dirname
import sys
import gymnasium as gym
import torch
# 笔者的github-RL库
from RLAlgo.PPO import PPO
from RLAlgo.PPO2 import PPO2
from RLUtils import train_on_policy, random_play, play, Config, gym_env_descenv_name Hopper-v4
gym_env_desc(env_name)
print(gym.__version__ , gym.__version__ )
path_ os.path.dirname(__file__)
env gym.make(env_name, exclude_current_positions_from_observationTrue,# healthy_reward0
)
cfg Config(env, # 环境参数save_pathos.path.join(path_, test_models ,PPO_Hopper-v4_test2), seed42,# 网络参数actor_hidden_layers_dim[256, 256, 256],critic_hidden_layers_dim[256, 256, 256],# agent参数actor_lr1.5e-4,critic_lr5.5e-4,gamma0.99,# 训练参数num_episode12500,off_buffer_size512,off_minimal_size510,max_episode_steps500,PPO_kwargs{lmbda: 0.9,eps: 0.25,k_epochs: 4, sgd_batch_size: 128,minibatch_size: 12, actor_bound: 1,dist_type: beta}
)
agent PPO2(state_dimcfg.state_dim,actor_hidden_layers_dimcfg.actor_hidden_layers_dim,critic_hidden_layers_dimcfg.critic_hidden_layers_dim,action_dimcfg.action_dim,actor_lrcfg.actor_lr,critic_lrcfg.critic_lr,gammacfg.gamma,PPO_kwargscfg.PPO_kwargs,devicecfg.device,reward_funcNone
)
agent.train()
train_on_policy(env, agent, cfg, wandb_flagFalse, train_without_seedTrue, test_ep_freq1000, online_collect_numscfg.off_buffer_size,test_episode_count5)2.2 训练出的智能体观测
最后将训练的最好的网络拿出来进行观察
agent.load_model(cfg.save_path)
agent.eval()
env_ gym.make(env_name, exclude_current_positions_from_observationTrue,render_modehuman) # , render_modehuman
play(env_, agent, cfg, episode_count3, play_without_seedTrue, renderTrue)