-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathquick-start.py
More file actions
128 lines (111 loc) · 3.64 KB
/
quick-start.py
File metadata and controls
128 lines (111 loc) · 3.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
'''Train CIFAR10 with PyTorch.'''
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
# >>>>>>>>>> Define summary writer
from writer.summary_writer import SummaryWriter
log_dir = "path/to/content" # User define
writer = SummaryWriter(log_dir)
# <<<<<<<<<< Define summary writer
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(
root='./data', train=True, download=True, transform=transform_train)
record_trainset = torchvision.datasets.CIFAR10(
root='./data', train=True, download=True, transform=transform_test)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=128, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(
root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(
testset, batch_size=100, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
# >>>>>>>>>>Record Data
writer.add_training_data(record_trainset) # use test_transform
writer.add_testing_data(testset)
# <<<<<<<<<<Record Data
print('==> Building model..')
net = ResNet18() # choose your own model
net = net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.01,
momentum=0.9, weight_decay=5e-4)
# Training
def train():
net.train()
for _, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
prev_id = None
idxs = list(range(len(trainset)))
for epoch in range(1,200,1):
train()
if epoch % 10 == 0:
# >>>>>>>>>>record checkpoint for every 10 epochs
writer.add_checkpoint_data(net.state_dict(), idxs, prev_id)
# <<<<<<<<<<record checkpoint for every 10 epochs
prev_id = epoch
# >>>>>>>>>> Record Config
config_dict = {
"SETTING": "normal",
"CLASSES": classes,
"GPU":"1",
"DATASET": "cifar10",
"EPOCH_START": 1,
"EPOCH_END": 200,
"EPOCH_PERIOD": 1,
"TRAINING": {
"NET": "resnet18",
"num_class": 10,
"train_num": 60000,
"test_num": 10000,
},
"VISUALIZATION":{
"PREPROCESS":1,
"BOUNDARY":{
"B_N_EPOCHS": 0,
"L_BOUND":0.5,
},
"INIT_NUM": 300,
"ALPHA":1,
"BETA":1,
"MAX_HAUSDORFF":0.33,
"LAMBDA": 1,
"S_LAMBDA": 1,
"ENCODER_DIMS":[512,256,256,256,2],
"DECODER_DIMS":[2,256,256,256,512],
"N_NEIGHBORS":15,
"MAX_EPOCH": 20,
"S_N_EPOCHS": 5,
"T_N_EPOCHS": 20,
"PATIENT": 3,
"RESOLUTION":300,
"VIS_MODEL_NAME": "DeepDebugger",
"EVALUATION_NAME": "test_evaluation_DeepDebugger"
}
}
# <<<<<<<<<< Record Config
# >>>>>>>>>> Choose a visualization method to visualize embedding
from singleVis.Strategy import DeepDebugger
dd = DeepDebugger(config)
dd.visualize_embedding()
# <<<<<<<<<< Choose a visualization method to visualize embedding
# Next start server and frontend