-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathneural_network.lua
More file actions
81 lines (66 loc) · 2.47 KB
/
neural_network.lua
File metadata and controls
81 lines (66 loc) · 2.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
require 'torch'
require 'cunn'
require 'nn'
require 'optim'
require 'image'
require 'dataset-mnist'
require 'pl'
require 'paths'
----------------------------------------------------------------------
-- parse command-line options
--
local opt = lapp[[
-s,--save (default "logs") subdirectory to save logs
-n,--network (default "") reload pretrained network
-m,--model (default "convnet") type of model tor train: convnet | mlp | linear
-f,--full use the full dataset
-p,--plot plot while training
-o,--optimization (default "SGD") optimization: SGD | LBFGS
-r,--learningRate (default 0.05) learning rate, for SGD only
-b,--batchSize (default 10) batch size
-m,--momentum (default 0) momentum, for SGD only
-i,--maxIter (default 3) maximum nb of iterations per batch, for LBFGS
--coefL1 (default 0) L1 penalty on the weights
--coefL2 (default 0) L2 penalty on the weights
-t,--threads (default 4) number of threads
]]
----------------------------------------------------------------------
-- get/create dataset
--
if opt.full then
nbTrainingPatches = 60000
nbTestingPatches = 10000
else
nbTrainingPatches = 2000
nbTestingPatches = 1000
print('<warning> only using 2000 samples to train quickly (use flag -full to use 60000 samples)')
end
-- geometry: width and height of input images
geometry = {32,32}
-- create training set and normalize
trainData = mnist.loadTrainSet(nbTrainingPatches, geometry)
trainData:normalizeGlobal(mean, std)
-- create test set and normalize
testData = mnist.loadTestSet(nbTestingPatches, geometry)
testData:normalizeGlobal(mean, std)
----------------------------------------------------------------------
-- define model to train
-- on the 10-class classification problem
--
classes = {'1','2','3','4','5','6','7','8','9','10'}
model = nn.Sequential()
model:add(nn.Reshape(1024))
model:add(nn.Linear(1024, 300))
model:add(nn.Tanh())
model:add(nn.Linear(300,#classes))
model:add(nn.Tanh())
print('<mnist> using model:')
print(model)
----------------------------------------------------------------------
-- loss function: negative log-likelihood
--
criterion = nn.ClassNLLCriterion()
criterion = nn.MSECriterion()
trainer = nn.StochasticGradient(model, criterion)
trainer.learningRate = 0.01
trainer:train(trainData)