forked from e-lab/clustering-learning
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun.lua
executable file
·149 lines (121 loc) · 5.85 KB
/
run.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
----------------------------------------------------------------------
-- Author: Eugenio Culurciello, Aysegul Dundar
-- This script is 2layer unsupervised network with clustering learning
-- This code is used for the paper : http://arxiv.org/abs/1306.0152
----------------------------------------------------------------------
require 'pl'
require 'image'
require 'nnx'
require 'optim'
require 'trainLayer' -- functions for Clustering Learning on video
require 'whiten'
require 'unsup' -- standard kmeans
-- Title ---------------------------------------------------------------------
print [[
********************************************************************************
>>>>>>>>>>>>>>>>>> Clustering learning on multiple datasets <<<<<<<<<<<<<<<<<<<<
********************************************************************************
]]
----------------------------------------------------------------------
print '==> processing options'
opt = lapp[[
-r,--learningRate (default 1e-3) learning rate
-d,--learningRateDecay (default 1e-7) learning rate decay (in # samples)
-w,--weightDecay (default 0) L2 penalty on the weights
-m,--momentum (default 0) momentum
-b,--batchsize (default 1000) batch size for k-mean
-t,--threads (default 8) number of threads
-s,--size (default extra) dataset: small or full or extra
-o,--save (default results) save directory
-c,--colorbypass (default true) subsampling of the input and feeding into the classifier
-a,--nsamples (default 40000) samples for the kmeans
-l,--plot (default true) plot training/testing curves
-e,--niter (default 15) number of iterations for k-means
-n,--loss (default nll) type of loss function to minimize: nll | mse | margin
-w,--whitening (default true) whitening applied to first layer
-f,--dataset (default cifar) dataset: cifar or svhn
-v,--verbose (default true) display information and print stuff
]]
opt.initstd = 0.1
opt.batchSize = 1 -- mini batch for the stochastic gradient
if (opt.whitening=='false') then opt.whitening = false end -- false from the option is not boolean format
torch.setdefaulttensortype('torch.FloatTensor')
opt.threads = tonumber(opt.threads)
if opt.threads > 1 then
torch.setnumthreads(opt.threads)
print('<trainer> using ' .. opt.threads .. ' threads')
end
----------------------------------------------------------------------
-- loading and processing dataset:
if opt.dataset=='cifar' then
dofile '1_data_cifar.lua'
else
dofile '1_data_svhn.lua'
end
-- input image dataset params:
ivch = trainData.data[1]:size(1) -- channels
----------------------------------------------------------------------
print '==> generating CL unsupervised network:'
-- compute network CL train time
time = sys.clock()
----------------------------------------------------------------------
-- 1st layer network
opt.model = '1st-layer'
dofile '2_model.lua'
----------------------------------------------------------------------
print '==> generating filters for layer 1:'
kernels1, counts1, M, P = trainLayer(1, trainData.data, opt.nsamples, nk1, is1, opt.verbose)
-- setup net/ load kernels into network:
model.modules[1].bias = model.modules[1].bias*0 -- set bias to 0
model.modules[1].weight = kernels1:reshape(nk1, ivch, is1,is1)
----------------------------------------------------------------------
print '==> process dataset throught 1st layer:'
if opt.whitening then
trainData2, testData2 = whitenprocessLayer(model, trainData.data, testData.data, M, P, opt.verbose)
else
trainData2, testData2 = processLayer(model, trainData.data, testData.data, opt.verbose)
end
----------------------------------------------------------------------
print '==> Computing connection tables based on co-occurence of features and generate filters'
cTable2, kernels2 = createCoCnx(2, trainData2, nk1, feat_group, fanin, opt.nsamples, is2, opt.verbose)
nk2 = cTable2:max()
----------------------------------------------------------------------
-- 2nd layer
opt.model = '2nd-layer'
dofile '2_model.lua'
-- setup net/ load kernels into network:
model.modules[1].bias = model.modules[1].bias*0 -- set bias to 0
model.modules[1].weight = kernels2:reshape(kernels2:size(1),is2,is2)
----------------------------------------------------------------------
print '==> process dataset throught 2nd layer:'
trainData2, testData2 = processLayer(model, trainData2, testData2, opt.verbose)
----------------------------------------------------------------------
-- compute network creation time time
time = sys.clock() - time
print("<net> time to CL train network = " .. (time*1000) .. 'ms')
-- colorbypass
model = nn.Sequential()
model:add(nn.SpatialDownSampling(ss3,ss3,ss3,ss3))
trainData3, testData3 = processLayer(model, trainData.data, testData.data, opt.verbose)
l1netoutsize = testData2:size(2)*testData2:size(3)*testData2:size(4)
cdatasize = trainData3:size(2)*trainData3:size(3)*trainData3:size(4)
-- concatenate final network output
trainData.data = torch.cat(trainData3:reshape(trsize, cdatasize),
trainData2:reshape(trsize, l1netoutsize)):float()
testData.data = torch.cat(testData3:reshape(tesize, cdatasize),
testData2:reshape(tesize, l1netoutsize)):float()
----------------------------------------------------------------------
-- classifier for train/test:
----------------------------------------------------------------------
print "==> creating classifier"
opt.model = '2mlp-classifier'
dofile '2_model.lua'
dofile '3_loss.lua'
dofile '4_train.lua'
dofile '5_test.lua'
----------------------------------------------------------------------
print "==> training classifier"
while true do
train()
test()
end