-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmp.py
More file actions
35 lines (32 loc) · 985 Bytes
/
mp.py
File metadata and controls
35 lines (32 loc) · 985 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import os
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
from torch.nn.parallel import DistributedDataParallel as DDP
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
def example(rank, world_size):
# 初始化
dist.init_process_group("gloo", rank=rank, world_size=world_size)
# 创建模型
model = nn.Linear(10, 10).to(rank)
# 放入DDP
ddp_model = DDP(model, device_ids=[rank])
loss_fn = nn.MSELoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)
# 进行前向后向计算
for i in range(1000):
outputs = ddp_model(torch.randn(20, 10).to(rank))
labels = torch.randn(20, 10).to(rank)
loss_fn(outputs, labels).backward()
optimizer.step()
def main():
nprocs = 2
mp.spawn(example,
args=(nprocs,),
nprocs=nprocs,
join=True)
if __name__=="__main__":
main()