-
Notifications
You must be signed in to change notification settings - Fork 0
/
TVBMF.py
124 lines (89 loc) · 3.6 KB
/
TVBMF.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
from __future__ import division
import torch
import numpy as np
# from scipy.sparse.linalg import svds
from scipy.optimize import minimize_scalar
def EVBMF(Y, sigma2=None, H=None):
"""Implementation of the analytical solution to Empirical Variational Bayes Matrix Factorization.
This function can be used to calculate the analytical solution to empirical VBMF.
This is based on the paper and MatLab code by Nakajima et al.:
"Global analytic solution of fully-observed variational Bayesian matrix factorization."
Notes
-----
If sigma2 is unspecified, it is estimated by minimizing the free energy.
If H is unspecified, it is set to the smallest of the sides of the input Y.
Attributes
----------
Y : numpy-array
Input matrix that is to be factorized. Y has shape (L,M), where L<=M.
sigma2 : int or None (default=None)
Variance of the noise on Y.
H : int or None (default = None)
Maximum rank of the factorized matrices.
Returns
-------
U : numpy-array
Left-singular vectors.
S : numpy-array
Diagonal matrix of singular values.
V : numpy-array
Right-singular vectors.
post : dictionary
Dictionary containing the computed posterior values.
References
----------
.. [1] Nakajima, Shinichi, et al. "Global analytic solution of fully-observed variational Bayesian matrix factorization." Journal of Machine Learning Research 14.Jan (2013): 1-37.
.. [2] Nakajima, Shinichi, et al. "Perfect dimensionality recovery by variational Bayesian PCA." Advances in Neural Information Processing Systems. 2012.
"""
L,M = Y.shape #has to be L<=M
if H is None:
H = L
alpha = L/M
tauubar = 2.5129*np.sqrt(alpha)
#SVD of the input matrix, max rank of H
U,s,V = torch.svd(Y)
U = U[:,:H]
s = s[:H]
V[:H].t_()
#Calculate residual
residual = 0.
if H<L:
residual = torch.sum(torch.sum(Y**2)-torch.sum(s**2))
#Estimation of the variance when sigma2 is unspecified
if sigma2 is None:
xubar = (1+tauubar)*(1+alpha/tauubar)
eH_ub = int(np.min([np.ceil(L/(1+alpha))-1, H]))-1
upper_bound = (torch.sum(s**2)+residual)/(L*M)
lower_bound = np.max([s[eH_ub+1]**2/(M*xubar), torch.mean(s[eH_ub+1:]**2)/M])
scale = 1.#/lower_bound
s = s*np.sqrt(scale)
residual = residual*scale
lower_bound = lower_bound*scale
upper_bound = upper_bound*scale
sigma2_opt = minimize_scalar(EVBsigma2, args=(L,M,s,residual,xubar), bounds=[lower_bound, upper_bound], method='Bounded')
sigma2 = sigma2_opt.x
#Threshold gamma term
threshold = np.sqrt(M*sigma2*(1+tauubar)*(1+alpha/tauubar))
pos = torch.sum(s>threshold)
if pos == 0: return np.array([])
#Formula (15) from [2]
d = torch.mul(s[:pos]/2, \
1-(L+M)*sigma2/s[:pos]**2 + torch.sqrt( \
(1-((L+M)*sigma2)/s[:pos]**2)**2 - \
(4*L*M*sigma2**2)/s[:pos]**4) )
return torch.diag(d)
def EVBsigma2(sigma2,L,M,s,residual,xubar):
H = len(s)
alpha = L/M
x = s**2/(M*sigma2)
z1 = x[x>xubar]
z2 = x[x<=xubar]
tau_z1 = tau(z1, alpha)
term1 = torch.sum(z2 - torch.log(z2))
term2 = torch.sum(z1 - tau_z1)
term3 = torch.sum(torch.log( (tau_z1+1) / z1 ))
term4 = alpha*torch.sum(torch.log(tau_z1/alpha+1))
obj = term1+term2+term3+term4+ residual/(M*sigma2) + (L-H)*np.log(sigma2)
return obj
def tau(x, alpha):
return 0.5 * (x-(1+alpha) + torch.sqrt((x-(1+alpha))**2 - 4*alpha))