1,DataParallel
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if cfg.cuda:
model = model.to(device)
loss = loss.to(device)
if torch.cuda.device_count() > 1:
device_ids = range(torch.cuda.device_count())
model = torch.nn.DataParallel(model, device_ids=device_ids)
model = model.to(device)
model.load_state_dict({k.replace('module.',''):v for k,v in torch.load(checkout).items()})
2,DistributedDataParallel, DDP,仅支持Linux系统
from torch.utils.data.distributed import DistributedSampler
# 1) 初始化
torch.distributed.init_process_group(backend="nccl")
# 2) 配置每个进程的gpu
torch.cuda.set_device(local_rank) # local_rank默认为0
device = torch.device("cuda", local_rank)
# 3)使用DistributedSampler
train_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
sampler=DistributedSampler(dataset))
# 4) 封装之前要把模型移到对应的gpu
model.to(device) # cpu -> gpu
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[local_rank],
output_device=local_rank,
broadcast_buffers=False,
find_unused_parameters=True)
python -m torch.distributed.launch --nproc_per_node=4 train.py # 执行
3,模型推理
3.1 设置显卡
# -*- coding: utf-8 -*-
import torch
from torchvision import models
def main():
model = models.resnet50(pretrained=True).eval()
model = model.cuda(3)
x = torch.randn(1,3,224,224)
x = x.cuda(3)
out = model(x)
print(out.size())
print(out.get_device())
if __name__ == '__main__':
main()
设置显卡二
# -*- coding: utf-8 -*-
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
import torch
from torchvision import models
device = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu')
if __name__ == '__main__':
resnet18 = models.resnet18(pretrained=True)
resnet18.to(device).eval()
x = torch.randn(1, 3, 224, 224).to(device)
print(x.device)
out = resnet18(x)
print(out.size())
print(out.device)
数据在多张卡切换
# -*- coding: utf-8 -*-
import os; os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
import torch
device0 = torch.device("cuda:0")
device1 = torch.device("cuda:1")
if __name__ == '__main__':
x = torch.randn(1,3,224,224).to(device0)
print(x.get_device())
x = x.cuda(device1)
print(x.get_device())
3.2 多线程+显卡
1. 一机多卡(one matchine multi-GPU)
1.1 DataParallel
DataParallel(DP):Parameter Server模式,一张卡位reducer,实现也超级简单,一行代码。 有个不能接受的缺陷是:DataParallel是基于Parameter server的算法,所有的loss都在主卡上计算,负载不均衡的问题比较严重,有时在模型较大的时候(比如bert-large),主卡占满了,其他的卡一半都占不到,及其浪费资源。
值得注意的是,模型和数据都需要先 load