
近年来,深度强化学习(Deep Reinforcement Learning, DRL)在人工智能领域取得了巨大的进展,成为了解决复杂决策问题的有力工具。本文将详细介绍多种流行的DRL算法在PyTorch中的实现,包括Q-learning、DQN(Deep Q-Network)、PPO(Proximal Policy Optimization)、DDPG(Deep Deterministic Policy Gradient)、TD3(Twin Delayed Deep Deterministic Policy Gradient)和SAC(Soft Actor-Critic)等。我们将探讨这些算法的原理、优缺点以及在实际问题中的应用。
深度强化学习结合了深度学习和强化学习的优点,能够在复杂的环境中学习最优策略。以下是一些主要的DRL算法:
PyTorch作为一个灵活而强大的深度学习框架,非常适合实现各种DRL算法。以下是这些算法在PyTorch中实现的关键点:
Q-learning是最基础的强化学习算法之一,它通过迭代更新Q值表来学习最优策略。虽然Q-learning本身不需要深度学习,但了解它有助于理解更复杂的DRL算法。
import torch class QLearning: def __init__(self, state_dim, action_dim, learning_rate=0.1, gamma=0.99): self.Q = torch.zeros((state_dim, action_dim)) self.lr = learning_rate self.gamma = gamma def update(self, state, action, reward, next_state): target = reward + self.gamma * torch.max(self.Q[next_state]) self.Q[state, action] += self.lr * (target - self.Q[state, action]) def get_action(self, state): return torch.argmax(self.Q[state]).item()
DQN通过使用深度神经网络来近似Q函数,极大地扩展了Q-learning处理复杂环境的能力。
import torch import torch.nn as nn class DQN(nn.Module): def __init__(self, state_dim, action_dim): super(DQN, self).__init__() self.fc1 = nn.Linear(state_dim, 64) self.fc2 = nn.Linear(64, 64) self.fc3 = nn.Linear(64, action_dim) def forward(self, x): x = torch.relu(self.fc1(x)) x = torch.relu(self.fc2(x)) return self.fc3(x) class DQNAgent: def __init__(self, state_dim, action_dim, learning_rate=1e-3, gamma=0.99): self.q_network = DQN(state_dim, action_dim) self.target_network = DQN(state_dim, action_dim) self.target_network.load_state_dict(self.q_network.state_dict()) self.optimizer = torch.optim.Adam(self.q_network.parameters(), lr=learning_rate) self.gamma = gamma def update(self, state, action, reward, next_state, done): state = torch.FloatTensor(state) next_state = torch.FloatTensor(next_state) q_values = self.q_network(state) next_q_values = self.target_network(next_state).detach() target = q_values.clone() target[action] = reward + (1 - done) * self.gamma * next_q_values.max() loss = nn.MSELoss()(q_values, target) self.optimizer.zero_grad() loss.backward() self.optimizer.step() def get_action(self, state): state = torch.FloatTensor(state) q_values = self.q_network(state) return torch.argmax(q_values).item()
PPO是一种基于策略梯度的算法,通过引入信任区域约束来提高训练的稳定性。
import torch import torch.nn as nn import torch.optim as optim class PPO(nn.Module): def __init__(self, state_dim, action_dim): super(PPO, self).__init__() self.actor = nn.Sequential( nn.Linear(state_dim, 64), nn.Tanh(), nn.Linear(64, 64), nn.Tanh(), nn.Linear(64, action_dim), nn.Softmax(dim=-1) ) self.critic = nn.Sequential( nn.Linear(state_dim, 64), nn.Tanh(), nn.Linear(64, 64), nn.Tanh(), nn.Linear(64, 1) ) def forward(self, state): return self.actor(state), self.critic(state) class PPOAgent: def __init__(self, state_dim, action_dim, lr=3e-4, gamma=0.99, epsilon=0.2): self.ppo = PPO(state_dim, action_dim) self.optimizer = optim.Adam(self.ppo.parameters(), lr=lr) self.gamma = gamma self.epsilon = epsilon def update(self, states, actions, rewards, next_states, dones, old_probs): states = torch.FloatTensor(states) actions = torch.LongTensor(actions) rewards = torch.FloatTensor(rewards) next_states = torch.FloatTensor(next_states) old_probs = torch.FloatTensor(old_probs) for _ in range(10): # 多次更新 new_probs, state_values = self.ppo(states) new_probs = new_probs.gather(1, actions.unsqueeze(1)).squeeze(1) ratio = new_probs / old_probs surr1 = ratio * rewards surr2 = torch.clamp(ratio, 1-self.epsilon, 1+self.epsilon) * rewards actor_loss = -torch.min(surr1, surr2).mean() critic_loss = nn.MSELoss()(state_values, rewards) loss = actor_loss + 0.5 * critic_loss self.optimizer.zero_grad() loss.backward() self.optimizer.step() def get_action(self, state): state = torch.FloatTensor(state) probs, _ = self.ppo(state) return torch.multinomial(probs, 1).item()
DDPG是一种适用于连续动作空间的算法,它结合了DQN和确定性策略梯度的思想。
import torch import torch.nn as nn import torch.optim as optim class Actor(nn.Module): def __init__(self, state_dim, action_dim, max_action): super(Actor, self).__init__() self.fc1 = nn.Linear(state_dim, 400) self.fc2 = nn.Linear(400, 300) self.fc3 = nn.Linear(300, action_dim) self.max_action = max_action def forward(self, state): a = torch.relu(self.fc1(state)) a = torch.relu(self.fc2(a)) return self.max_action * torch.tanh(self.fc3(a)) class Critic(nn.Module): def __init__(self, state_dim, action_dim): super(Critic, self).__init__() self.fc1 = nn.Linear(state_dim + action_dim, 400) self.fc2 = nn.Linear(400, 300) self.fc3 = nn.Linear(300, 1) def forward(self, state, action): q = torch.cat([state, action], 1) q = torch.relu(self.fc1(q)) q = torch.relu(self.fc2(q)) return self.fc3(q) class DDPGAgent: def __init__(self, state_dim, action_dim, max_action, lr=1e-4, gamma=0.99, tau=0.001): self.actor = Actor(state_dim, action_dim, max_action) self.actor_target = Actor(state_dim, action_dim, max_action) self.actor_target.load_state_dict(self.actor.state_dict()) self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=lr) self.critic = Critic(state_dim, action_dim) self.critic_target = Critic(state_dim, action_dim) self.critic_target.load_state_dict(self.critic.state_dict()) self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=lr) self.gamma = gamma self.tau = tau def select_action(self, state): state = torch.FloatTensor(state.reshape(1, -1)) return self.actor(state).cpu().data.numpy().flatten() def update(self, replay_buffer, batch_size=100): # 从经验回放中采样 state, action, next_state, reward, done = replay_buffer.sample(batch_size) # 计算目标Q值 target_Q = self.critic_target(next_state, self.actor_target(next_state)) target_Q = reward + (1 - done) * self.gamma * target_Q.detach() # 更新Critic current_Q = self.critic(state, action) critic_loss = nn.MSELoss()(current_Q, target_Q) self.critic_optimizer.zero_grad() critic_loss.backward() self.critic_optimizer.step() # 更新Actor actor_loss = -self.critic(state, self.actor(state)).mean() self.actor_optimizer.zero_grad() actor_loss.backward() self.actor_optimizer.step() # 软更新目标网络 for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()): target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data) for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()): target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
TD3是DDPG的改进版本,通过引入双Q学习、延迟策略更新和目标策略平滑等技巧来提高性能和稳定性。
import torch import torch.nn as nn import torch.optim as optim class Actor(nn.Module): def __init__(self, state_dim, action_dim, max_action): super(Actor, self).__init__() self.fc1 = nn.Linear(state_dim, 400) self.fc2 = nn.Linear(400, 300) self.fc3 = nn.Linear(300, action_dim) self.max_action = max_action def forward(self, state): a = torch.relu(self.fc1(state)) a = torch.relu(self.fc2(a)) return self.max_action * torch.tanh(self.fc3(a)) class Critic(nn.Module): def __init__(self, state_dim, action_dim): super(Critic, self).__init__() self.fc1 = nn.Linear(state_dim + action_dim, 400) self.fc2 = nn.Linear(400, 300) self.fc3 = nn.Linear(300, 1) def forward(self, state, action): sa = torch.cat([state, action], 1) q = torch.relu(self.fc1(sa)) q = torch.relu(self.fc2(q)) return self.fc3(q) class TD3Agent: def __init__(self, state_dim, action_dim, max_action, lr=1e-3, gamma=0.99, tau=0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=2): self.actor = Actor(state_dim, action_dim, max_action) self.actor_target = Actor(state_dim, action_dim, max_action) self.actor_target.load_state_dict(self.actor.state_dict()) self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=lr) self.critic1 = Critic(state_dim, action_dim) self.critic2 = Critic(state_dim, action_dim) self.critic1_target = Critic(state_dim, action_dim) self.critic2_target = Critic(state_dim, action_dim) self.critic1_target.load_state_dict(self.critic1.state_dict()) self.critic2_target.load_state_dict(self.critic2.state_dict()) self.critic1_optimizer = optim.Adam(self.critic1.parameters(), lr=lr) self.critic2_optimizer = optim.Adam(self.critic2.parameters(), lr=lr) self.max_action = max_action self.gamma = gamma self.tau = tau self.policy_noise = policy_noise self.noise_clip = noise_clip self.policy_freq = policy_freq


免费创建高清无水印Sora视频
Vora是一个免费创建高清无水印Sora视频的AI工具


最适合小白的AI自动化工作流平台
无需编码,轻松生成可复用、可变现的AI自动化工作流

大模型驱动的Excel数据处理工具
基于大模型交互的表格处理系统,允许用户通过对话方式完成数据整理和可视化分析。系统采用机器学习算法解析用户指令,自动执行排序、 公式计算和数据透视等操作,支持多种文件格式导入导出。数据处理响应速度保持在0.8秒以内,支持超过100万行数据的即时分析。


AI辅助编程,代码自动修复
Trae是一种自适应 的集成开发环境(IDE),通过自动化和多元协作改变开发流程。利用Trae,团队能够更快速、精确地编写和部署代码,从而提高编程效率和项目交付速度。Trae具备上下文感知和代码自动完成功能,是提升开发效率的理想工具。


AI论文写作指导平台
AIWritePaper论文写作是一站式AI论文写作辅助工具,简化了选题、文献检索至论文撰写的整个过程。通过简单设定,平台可快速生成高质量论文大纲和全文,配合图表、参考文献等一应俱全,同时提供开题报告和答辩PPT等增值服务,保障数据安全,有效提升写作效率和论文质量。


AI一键生成PPT,就用博思AIPPT!
博思AIPPT,新一代的AI生成PPT平台,支持智能生成PPT、AI美化PPT、文本&链接生成PPT、导入Word/PDF/Markdown文档生成PPT等,内置海量精美PPT模板,涵盖商务、教育、科技等不同风格,同时针对每个页面提供多种版式,一键自适应切换,完美适配各种办公场景。


AI赋能电商视觉革命,一站式智能商拍平台
潮际好麦深耕服装行业,是国内AI试衣效果最好的软件。使用先进AIGC能力为电商卖家批量提供优质的、低成本的商拍图。合作品牌有Shein、Lazada、安踏、百丽等65个国内外头部品牌,以及国内10万+淘宝、天猫、京东等主流平台的品牌商家,为卖家节省将近85%的出图成本,提升约3倍出图效率,让品牌能够快速上架。


企业专属的AI法律顾问
iTerms是法大大集团旗下法律子品牌,基于最先进的大语言模型(LLM)、专业的法律知识库和强大的智能体架构,帮助企业扫清合规障碍,筑牢风控防线,成为您企业专属的AI法律顾问。


稳定高效的流量提升解决方案,助力品牌曝光
稳定高效的流量提升解决方案,助力品牌曝光


最新版Sora2模型免费使用,一键生成无水印视频
最新版Sora2模型免费使用,一键生成无水印视频
最新AI工具、AI资讯
独家AI资源、AI项目落地

微信扫一扫关注公众号