您好,登录后才能下订单哦!
密码登录
登录注册
点击 登录注册 即表示同意《亿速云用户服务条款》
本文小编为大家详细介绍“Pytorch模型的保存/复用/迁移怎么实现”,内容详细,步骤清晰,细节处理妥当,希望这篇“Pytorch模型的保存/复用/迁移怎么实现”文章能帮助大家解决疑惑,下面跟着小编的思路慢慢深入,一起来学习新知识吧。
# 定义模型结构 class LenNet(nn.Module): def __init__(self): super(LenNet, self).__init__() self.conv = nn.Sequential( # [batch, 1, 28, 28] nn.Conv2d(1, 8, 5, 2), # [batch, 1, 28, 28] nn.ReLU(inplace=True), nn.MaxPool2d(2, 2), # [batch, 8, 14, 14] nn.Conv2d(8, 16, 5), # [batch, 16, 10, 10] nn.ReLU(inplace=True), nn.MaxPool2d(2, 2), # [batch, 16, 5, 5] ) self.fc = nn.Sequential( nn.Flatten(), nn.Linear(16*5*5, 128), nn.ReLU(inplace=True), nn.Linear(128, 64), nn.ReLU(inplace=True), nn.Linear(64, 10) ) def forward(self, X): return self.fc(self.conv(X))
# 查看模型参数 # 网络模型中的参数model.state_dict()是以字典形式保存(实质上是collections模块中的OrderedDict) model = LenNet() print("Model's state_dict:") for param_tensor in model.state_dict(): print(param_tensor, "\t", model.state_dict()[param_tensor].size()) # 参数名中的fc和conv前缀是根据定义nn.Sequential()时的名字所确定。 # 参数名中的数字表示每个Sequential()中网络层所在的位置。 print(model.state_dict().keys()) # 打印键 print(model.state_dict().values()) # 打印值 # 优化器optimizer的参数打印类似 optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) print("Optimizer's state_dict:") for var_name in optimizer.state_dict(): print(var_name, "\t", optimizer.state_dict()[var_name])
import os # 指定保存的模型名称时Pytorch官方建议的后缀为.pt或者.pth model_save_dir = './model_logs/' model_save_path = os.path.join(model_save_dir, 'LeNet.pt') torch.save(model.state_dict(), model_save_path) # 在训练过程中保存某个条件下的最优模型,可以如下操作 best_model_state = deepcopy(model.state_dict()) torch.save(best_model_state, model_save_path) # 下面这种方法是错误的,因为best_model_state只是model.state_dict()的引用,会随着训练的改变而改变 best_model_state = model.state_dict() torch.save(best_model_state, model_save_path)
def inference(data_iter, device, model_save_dir): model = LeNet() # 初始化现有模型的权重参数 model.to(device) model_save_path = os.path.join(model_save_dir, 'LeNet.pt') # 如果本地存在模型,则加载本地模型参数覆盖原有模型 if os.path.exists(model_save_path): loaded_paras = torch.load(model_save_path) model.load_state_dict(loaded_paras) model.eval() with torch.no_grad(): # 开始推理 acc_sum, n = 0., 0 for x, y in data_iter: x, y = x.to(device), y.to(device) logits = model(x) acc_sum += (logits.argmax(1) == y).float().sum().item() n += len(y) print("Accuracy in test data is : ", acc_sum / n)
class MyModel: def __init__(self, batch_size=64, epochs=5, learning_rate=0.001, model_save_dir='./MODEL'): self.batch_size = batch_size self.epochs = epochs self.learning_rate = learning_rate self.model_save_dir = model_save_dir self.model = LeNet() def train(self): train_iter, test_iter = load_dataset(self.batch_size) # 在训练过程中只保存网络权重,在再训练时只载入网络权重参数初始化网络训练。这里是核心部分,开始。 if not os.path.exists(self.model_save_dir): os.makedirs(self.model_save_dir) model_save_path = os.path.join(self.model_save_dir, 'model.pt') if os.path.exists(model_save_path): loaded_paras = torch.load(model_save_path) self.model.load_state_dict(loaded_paras) print("#### 成功载入已有模型,进行再训练...") # 结束 optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.model.to(device) for epoch in range(self.epochs): for i, (x, y) in enumerate(train_iter): x, y = x.to(device), y.to(device) loss, logits = self.model(x) optimizer.zero_grad() loss.backward() optimizer.step() if i % 100 == 0: acc = (logits.argmax(1) == y).float().mean() print("Epochs[{}/{}]---batch[{}/{}]---acc {:.4}---loss {:.4}".format( epoch, self.epochs, len(train_iter), i, acc, loss.item())) print("Epochs[{}/{}]--acc on test {:.4}".format(epoch, self.epochs, self.evaluate(test_iter, self.model, device))) torch.save(self.model.state_dict(), model_save_path) @staticmethod def evaluate(data_iter, model, device): with torch.no_grad(): acc_sum, n = 0.0, 0 for x, y in data_iter: x, y = x.to(device), y.to(device) logits = model(x) acc_sum += (logits.argmax(1) == y).float().sum().item() n += len(y) return acc_sum / n
# 在保存参数的时候,将优化器参数、损失值等可一同保存,然后在恢复模型时连同其它参数一起恢复 model_save_path = os.path.join(model_save_dir, 'LeNet.pt') torch.save({ 'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss, ... }, model_save_path) # 加载方式如下 checkpoint = torch.load(model_save_path) model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) epoch = checkpoint['epoch'] loss = checkpoint['loss']
# 定义新模型NewLeNet 和LeNet区别在于新增了一个全连接层 class NewLenNet(nn.Module): def __init__(self): super(NewLenNet, self).__init__() self.conv = nn.Sequential( # [batch, 1, 28, 28] nn.Conv2d(1, 8, 5, 2), # [batch, 1, 28, 28] nn.ReLU(inplace=True), nn.MaxPool2d(2, 2), # [batch, 8, 14, 14] nn.Conv2d(8, 16, 5), # [batch, 16, 10, 10] nn.ReLU(inplace=True), nn.MaxPool2d(2, 2), # [batch, 16, 5, 5] ) self.fc = nn.Sequential( nn.Flatten(), nn.Linear(16*5*5, 128), nn.ReLU(inplace=True), nn.Linear(128, 64), # 这层以前和LeNet结构一致 可以用LeNet的参数来进行替换 nn.ReLU(inplace=True), nn.Linear(64, 32), nn.ReLU(inplace=True), nn.Linear(32, 10) ) def forward(self, X): return self.fc(self.conv(X))
# 定义替换函数 匹配两个网络 size相同处地方进行参数替换 def para_state_dict(model, model_save_dir): state_dict = deepcopy(model.state_dict()) model_save_path = os.path.join(model_save_dir, 'model.pt') if os.path.exists(model_save_path): loaded_paras = torch.load(model_save_path) for key in state_dict: # 在新的网络模型中遍历对应参数 if key in loaded_paras and state_dict[key].size() == loaded_paras[key].size(): print("成功初始化参数:", key) state_dict[key] = loaded_paras[key] return state_dict
# 更新一下模型迁移后的训练代码 def train(self): train_iter, test_iter = load_dataset(self.batch_size) if not os.path.exists(self.model_save_dir): os.makedirs(self.model_save_dir) model_save_path = os.path.join(self.model_save_dir, 'model_new.pt') old_model = os.path.join(self.model_save_dir, 'LeNet.pt') if os.path.exists(old_model): state_dict = para_state_dict(self.model, self.model_save_dir) # 调用迁移代码 将LeNet的前几层参数迁移到NewLeNet self.model.load_state_dict(state_dict) print("#### 成功载入已有模型,进行再训练...") optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.model.to(device) for epoch in range(self.epochs): for i, (x, y) in enumerate(train_iter): x, y = x.to(device), y.to(device) loss, logits = self.model(x) optimizer.zero_grad() loss.backward() optimizer.step() if i % 100 == 0: acc = (logits.argmax(1) == y).float().mean() print("Epochs[{}/{}]---batch[{}/{}]---acc {:.4}---loss {:.4}".format( epoch, self.epochs, len(train_iter), i, acc, loss.item())) print("Epochs[{}/{}]--acc on test {:.4}".format(epoch, self.epochs, self.evaluate(test_iter, self.model, device))) torch.save(self.model.state_dict(), model_save_path)
# 这里更新未进行训练的推理 def inference(data_iter, device, model_save_dir='./MODEL'): model = NewLeNet() # 初始化现有模型的权重参数 print("初始化参数 conv.0.bias 为:", model.state_dict()['conv.0.bias']) model.to(device) state_dict = para_state_dict(model, model_save_dir) # 迁移模型参数 model.load_state_dict(state_dict) model.eval() print("载入本地模型重新初始化 conv.0.bias 为:", model.state_dict()['conv.0.bias']) with torch.no_grad(): acc_sum, n = 0.0, 0 for x, y in data_iter: x, y = x.to(device), y.to(device) logits = model(x) acc_sum += (logits.argmax(1) == y).float().sum().item() n += len(y) print("Accuracy in test data is :", acc_sum / n)
读到这里,这篇“Pytorch模型的保存/复用/迁移怎么实现”文章已经介绍完毕,想要掌握这篇文章的知识点还需要大家自己动手实践使用过才能领会,如果想了解更多相关内容的文章,欢迎关注亿速云行业资讯频道。
免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。