-
Notifications
You must be signed in to change notification settings - Fork 0
/
dca.m
55 lines (52 loc) · 1.89 KB
/
dca.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
function M = dca(L, options, lambda, s, X, Y, nn)
%% Distance metric learning using DCA
% INPUT:
% L: initial solution
% options:
% MaxFunEvals: maximum number of iterations
% max_iter: maximum number of iteration on DCA
% optTol: tolerance
% Display: 'off'
% lambda: the hyper-parameter
% s: (s < 1) the parameter for Ramp loss function
% X: (d x n) the input examples
% Y: (n x 1) the class labels
% nn: number of examples to keep in miss and hit sets
% OUTPUT:
% M: the Mahalanobis matrix
% =========================================================================
% Created by: Bac Nguyen ([email protected])
% Data : November 6, 2016
% =========================================================================
quiet = strcmp(options.Display,'off');
if ~quiet,
fprintf('-----------------------------------------------------\n');
fprintf('%6s %15s %15s %14s\n', '#iter', 'G(x)', 'H(x)', ' F(x)');
fprintf('-----------------------------------------------------\n');
end
best = Inf;
M = L*L';
% start optimizing
for iter=1:options.max_iter,
%update sets of hit and mit examples
[Hs, Ms] = get_hit_mit_sets(L'*X, Y, nn, nn);
% linearizing the first part
[valH, grad, valG] = H_func(L, s, X, Hs, Ms);
valG = valG + lambda*(L(:)'*L(:));
% saving the Mahalanobis matrix
if (valG - valH < best)
M = L*L';
best = valG - valH;
end
if ~quiet,
fprintf('%6d %15.5f %15.5f %14.5f\n',iter,valG,valH,valG-valH);
end
% optimizing the convex part
prevL = L;
L = subgrad_descent(options, X, Y, L, lambda, grad, Hs, Ms);
% found a good solution
if (norm(prevL*prevL'-L*L', 'fro') < options.optTol),
break;
end
end
end