forked from Khrylx/AgentFormer
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain.py
More file actions
130 lines (112 loc) · 5.23 KB
/
train.py
File metadata and controls
130 lines (112 loc) · 5.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import os
import sys
import argparse
import time
import numpy as np
import torch
from torch import optim
from torch.utils.tensorboard import SummaryWriter
sys.path.append(os.getcwd())
from data.dataloader import data_generator
from model.model_lib import model_dict
from lib.torch import *
from lib.config import Config
from lib.utils import prepare_seed, print_log, AverageMeter, convert_secs2time, get_timestring
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def logging(cfg, epoch, total_epoch, iter, total_iter, ep, seq, frame, losses_str, log):
print_log('{} | Epo: {:02d}/{:02d}, '
'It: {:04d}/{:04d}, '
'EP: {:s}, ETA: {:s}, seq {:s}, frame {:05d}, {}'
.format(cfg, epoch, total_epoch, iter, total_iter, \
convert_secs2time(ep), convert_secs2time(ep / iter * (total_iter * (total_epoch - epoch) - iter)), seq, frame, losses_str), log)
def train(epoch):
global tb_ind
since_train = time.time()
generator.shuffle()
train_loss_meter = {x: AverageMeter() for x in cfg.loss_cfg.keys()}
train_loss_meter['total_loss'] = AverageMeter()
last_generator_index = 0
while not generator.is_epoch_end():
# Get data from the generator (should be a dictionary)
data = generator()
if data is not None:
print(data.keys())
seq, frame = data['seq'], data['frame']
model.set_data(data)
# Forward pass
__ = model()
# Compute loss
total_loss, loss_dict, loss_unweighted_dict = model.compute_loss()
# Backward pass and optimization step
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
train_loss_meter['total_loss'].update(total_loss.item())
for key in loss_unweighted_dict.keys():
train_loss_meter[key].update(loss_unweighted_dict[key])
if generator.index - last_generator_index > cfg.print_freq:
ep = time.time() - since_train
losses_str = ' '.join([f'{x}: {y.avg:.3f} ({y.val:.3f})' for x, y in train_loss_meter.items()])
logging(args.cfg, epoch, cfg.num_epochs, generator.index, generator.num_total_samples, ep, seq, frame, losses_str, log)
for name, meter in train_loss_meter.items():
tb_logger.add_scalar('model_' + name, meter.avg, tb_ind)
tb_ind += 1
last_generator_index = generator.index
scheduler.step()
model.step_annealer()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', default=None)
parser.add_argument('--start_epoch', type=int, default=0)
parser.add_argument('--tmp', action='store_true', default=False)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
""" setup """
cfg = Config(args.cfg, args.tmp, create_dirs=True)
prepare_seed(cfg.seed)
torch.set_default_dtype(torch.float32)
device = torch.device('cuda', index=args.gpu) if torch.cuda.is_available() else torch.device('cpu')
if torch.cuda.is_available(): torch.cuda.set_device(args.gpu)
time_str = get_timestring()
log = open(os.path.join(cfg.log_dir, 'log.txt'), 'a+')
print_log("time str: {}".format(time_str), log)
print_log("python version : {}".format(sys.version.replace('\n', ' ')), log)
print_log("torch version : {}".format(torch.__version__), log)
print_log("cudnn version : {}".format(torch.backends.cudnn.version()), log)
tb_logger = SummaryWriter(cfg.tb_dir)
tb_ind = 0
# Data generator for the train phase, with the train split
generator = data_generator(cfg, log, split='train', phase='training')
# Get the model
model_id = cfg.get('model_id', 'agentformer')
model = model_dict[model_id](cfg)
# Get the optimizer and scheduler
optimizer = optim.Adam(model.parameters(), lr=cfg.lr)
scheduler_type = cfg.get('lr_scheduler', 'linear')
if scheduler_type == 'linear':
scheduler = get_scheduler(optimizer, policy='lambda', nepoch_fix=cfg.lr_fix_epochs, nepoch=cfg.num_epochs)
elif scheduler_type == 'step':
scheduler = get_scheduler(optimizer, policy='step', decay_step=cfg.decay_step, decay_gamma=cfg.decay_gamma)
else:
raise ValueError('unknown scheduler type!')
if args.start_epoch > 0:
cp_path = cfg.model_path % args.start_epoch
print_log(f'loading model from checkpoint: {cp_path}', log)
model_cp = torch.load(cp_path, map_location='cpu')
model.load_state_dict(model_cp['model_dict'])
if 'opt_dict' in model_cp:
optimizer.load_state_dict(model_cp['opt_dict'])
if 'scheduler_dict' in model_cp:
scheduler.load_state_dict(model_cp['scheduler_dict'])
""" start training """
model.set_device(device)
model.train()
for i in range(args.start_epoch, cfg.num_epochs):
train(i)
""" save model """
if cfg.model_save_freq > 0 and (i + 1) % cfg.model_save_freq == 0:
cp_path = cfg.model_path % (i + 1)
model_cp = {'model_dict': model.state_dict(), 'opt_dict': optimizer.state_dict(), 'scheduler_dict': scheduler.state_dict(), 'epoch': i + 1}
torch.save(model_cp, cp_path)