在CentOS上进行PyTorch的分布式训练,可以按照以下步骤进行:
conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch
(根据你的CUDA版本选择合适的cudatoolkit)。PYTHONPATH
和LD_LIBRARY_PATH
。python -m torch.distributed.launch
命令来启动分布式训练。python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE YOUR_TRAINING_SCRIPT.py
torch.distributed.init_process_group()
来初始化分布式环境。以下是一个简单的分布式训练示例:
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, DistributedSampler
from torchvision import datasets, transforms
# 初始化分布式环境
world_size = torch.cuda.device_count() # 获取GPU数量
rank = int(os.environ['LOCAL_RANK']) # 获取当前节点的rank
torch.distributed.init_process_group(backend='nccl', init_method='env://')
# 定义模型
class SimpleModel(nn.Module):
def __init__(self):
super(SimpleModel, self).__init__()
self.fc = nn.Linear(784, 10)
def forward(self, x):
x = x.view(-1, 784)
return self.fc(x)
model = SimpleModel().to(rank)
model = nn.parallel.DistributedDataParallel(model, device_ids=[rank])
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01)
# 加载数据
transform = transforms.Compose([transforms.ToTensor()])
train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
train_sampler = DistributedSampler(train_dataset)
train_loader = DataLoader(dataset=train_dataset, batch_size=64, sampler=train_sampler)
# 训练模型
for epoch in range(5):
train_sampler.set_epoch(epoch)
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
inputs, labels = data[0].to(rank), data[1].to(rank)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
print(f'Epoch {epoch + 1}, Loss: {running_loss / len(train_loader)}')
# 清理分布式环境
torch.distributed.destroy_process_group()
通过以上步骤,你应该能够在CentOS上成功进行PyTorch的分布式训练。