-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain.py
More file actions
60 lines (52 loc) · 2.69 KB
/
train.py
File metadata and controls
60 lines (52 loc) · 2.69 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
from utils.datasets_utils import get_dataset
from utils.config import load_config
import torch
from models.unet import TrajectoryDenoiser_CondEmbed
from models.training import train_one_epoch
from models.diffusion.ddpm import DDPM
import gc,logging,os
logging.basicConfig(format='%(levelname)s: %(message)s',level=20)
# Device to use later on
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logging.info("Using device: "+torch.cuda.get_device_name(DEVICE))
# Load configuation file (conditional model)
# config = load_config("ethucy-conditional-past-social.yaml")
config = load_config("ethucy-conditional-past.yaml")
# Load configuation file (unconditional model)
# config = load_config("ethucy-unconditional.yaml")
# Get the data
batched_train_data,batched_val_data,batched_test_data,homography,reference_image = get_dataset(config["dataset"])
# Instantiate the denoiser
denoiser = TrajectoryDenoiser_CondEmbed(num_res_blocks = config["model"]["num_res_blocks"],
base_channels = config["model"]["base_ch"],
base_channels_multiples=config["model"]["base_ch_mult"],
apply_attention=config["model"]["apply_attention"],
dropout_rate = config["model"]["dropout_rate"],
time_multiple = config["model"]["time_emb_mult"],
condition=config["model"]["condition"])
denoiser.to(DEVICE)
# The optimizer (Adam with weight decay)
optimizer= torch.optim.AdamW(denoiser.parameters(),lr=config["train"]["initial_lr"])
# Instantiate the diffusion model
diffusionmodel = DDPM(timesteps = config["diffusion"]["timesteps"])
diffusionmodel.to(DEVICE)
# Training loop
best_loss = 1e6
for epoch in range(1,config["train"]["epochs"] + 1):
torch.cuda.empty_cache()
gc.collect()
# One epoch of training
epoch_loss = train_one_epoch(denoiser,diffusionmodel,batched_train_data,optimizer,DEVICE,epoch=epoch,total_epochs=config["train"]["epochs"])
if epoch_loss < best_loss:
best_loss = epoch_loss
# Save best checkpoints
checkpoint_dict = {
"opt": optimizer.state_dict(),
"model": denoiser.state_dict()
}
if not os.path.exists(config["model"]["save_dir"]):
# Create a new directory if it does not exist
os.makedirs(config["model"]["save_dir"])
save_path = config["model"]["save_dir"]+(config["model"]["model_name"].format(config["model"]["condition"],config["train"]["epochs"]))
torch.save(checkpoint_dict, save_path)
del checkpoint_dict