-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathsem_opt_classic_old.py
139 lines (106 loc) · 3.96 KB
/
sem_opt_classic_old.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
from sem_model import SEMData, SEMModel
import numpy as np
from scipy.optimize import minimize
from functools import partial
from scipy.stats import multivariate_normal, norm
import random
from sem_opt_abc import SEMOptABC
class SEMOptClassic(SEMOptABC):
def __init__(self, mod: SEMModel, data: SEMData, estimator, regularization=None):
"""
:param mod:
:param data:
:param estimator:
:param regularizator:
"""
super().__init__(mod, data, estimator, regularization)
# Loss-functional and its additional parameters
self.loss_func = self.get_loss_function(estimator)
@staticmethod
def loss_functions() -> dict:
"""
Create the dictionary of possible functions
:return:
"""
tmp_dict = dict()
tmp_dict['ULS'] = SEMOptClassic.unweighted_least_squares
tmp_dict['GLS'] = SEMOptClassic.general_least_squares
tmp_dict['WLS'] = SEMOptClassic.general_least_squares
tmp_dict['MLW'] = SEMOptClassic.ml_wishart
tmp_dict['MLN'] = SEMOptClassic.ml_normal
tmp_dict['Naive'] = SEMOptClassic.naive_loss
return tmp_dict
def optimize(self, opt_method='SLSQP', bounds=None, alpha=0):
"""
:param optMethod:
:param bounds:
:param alpha:
:return:
"""
def func_to_min(params):
""" Sum of loss function and regularisation """
p_beta = [p for i, p in enumerate(params) if self.param_pos[i][0]
== 'Beta']
return self.loss_func(self, params) + \
alpha * self.regularization(p_beta)
# Specify initial parameters and function to minimize
params_init = self.params
# Minimisation
options = {'maxiter': 1e3}
cons = ({'type': 'ineq', 'fun': lambda p: self.constraint_all(p)})
res = minimize(func_to_min, params_init,
constraints=cons,
method=opt_method, options=options,
bounds=self.param_bounds)
# Save results
self.params = res.x
return func_to_min(params_init), func_to_min(self.params)
def unweighted_least_squares(self, params):
m_sigma = self.calculate_sigma(params)
m_cov = self.m_cov
t = m_sigma - m_cov
loss = np.trace(np.matmul(t, t.T))
print(loss)
return loss
def naive_loss(self, params):
m_sigma = self.calculate_sigma(params)
m_cov = self.m_cov
t = m_cov - m_sigma
return np.linalg.norm(t)
def general_least_squares(self, params):
m_sigma = self.calculate_sigma(params)
m_cov = self.m_cov
w = np.linalg.inv(m_cov)
t = (m_cov - m_sigma) @ w
loss = np.trace(np.matmul(t, t.T))
return loss
def weighted_least_squares(self, params, weights):
m_sigma = self.calculate_sigma(params)
m_cov = self.m_cov
t = m_sigma - m_cov
w = np.linalg.inv(weights.T * weights)
return np.trace(np.matmul(np.matmul(t, w), t.T))
def ml_normal(self, params, alpha=0.01):
"""
Multivariate Normal Distribution
:param params:
:param alpha:
:return:
"""
m_sigma = self.calculate_sigma(params)
m_cov = self.m_cov
# TODO need to be removed: A kind of regularisation
if self.constraint_sigma(params) < 0:
return 10 ** 20
m_profiles = self.m_profiles
log_likelihood_sigma = self.ml_norm_log_likelihood(m_sigma, m_profiles)
log_likelihood_cov = self.ml_norm_log_likelihood(m_cov, m_profiles)
loss = - (log_likelihood_sigma - log_likelihood_cov)
# TODO: Strange moment
if loss < 0:
return self.min_loss
# Remember the best loss_func value
if (loss < self.min_loss) and (loss > 0):
self.min_loss = loss
self.min_params = params
return loss