Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make project python3 compatible #19

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@
numpy
scipy
scikit-learn
Cython
10 changes: 5 additions & 5 deletions skbayes/linear_models/precision_inversion_tester.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,22 +37,22 @@ def inversion_checker(X,alpha,beta):
beta = 1000
print('\n Example 1: beta = {0} \n'.format(beta))
S,v1, v2 = inversion_checker(X, alpha, beta)
print "Previous inversion method \n"
print("Previous inversion method \n")
print (v1)
print '\n Current inversion method \n'
print("\n Current inversion method \n")
print (v2)

# large beta case
beta = 1e+16
print('\n Example 2: beta = {0} \n'.format(beta))
S,v1, v2 = inversion_checker(X, alpha, beta)
print "Previous inversion method \n"
print("Previous inversion method \n")
print (v1)
print '\n Current inversion method \n'
print("\n Current inversion method \n")
print (v2)


X = np.random.random([5,5]) + 0.00000001*np.eye(5)
#print np.linalg.inv(X)
#print pinvh(X)


2 changes: 1 addition & 1 deletion skbayes/linear_models/variational_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def fit(self,X,y):
# --------- Convergence Check ---------

if self.verbose is True:
print "Iteration {0} is completed".format(i)
print("Iteration {0} is completed".format(i))

# check convergence
converged = self._check_convergence(Mw,Mw_old)
Expand Down
50 changes: 29 additions & 21 deletions skbayes/rvm_ard_models/fast_rvm.py
Original file line number Diff line number Diff line change
Expand Up @@ -596,7 +596,7 @@ def predict_proba(self,X):
----------
X: array-like of size [n_samples_test,n_features]
Matrix of explanatory variables (test set)

Returns
-------
probs: numpy array of size [n_samples_test]
Expand All @@ -618,7 +618,7 @@ def predict_proba(self,X):
prob = pr / np.reshape(np.sum(pr, axis = 1), (pr.shape[0],1))
return prob


def _predict_proba(self,X,y_hat,sigma):
'''
Calculates predictive distribution
Expand All @@ -627,26 +627,34 @@ def _predict_proba(self,X,y_hat,sigma):
ks = 1. / ( 1. + np.pi * var/ 8)**0.5
pr = expit(y_hat * ks)
return pr


def _sparsity_quality(self,X,Xa,y,B,A,Aa,active,Sn):
'''
Calculates sparsity & quality parameters for each feature
'''
XB = X.T*B
YB = y*B
XSX = np.dot(np.dot(Xa,Sn),Xa.T)
bxy = np.dot(XB,y)
Q = bxy - np.dot( np.dot(XB,XSX), YB)
S = np.sum( XB*X.T,1 ) - np.sum( np.dot( XB,XSX )*XB,1 )
qi = np.copy(Q)
si = np.copy(S)
Qa,Sa = Q[active], S[active]
qi[active] = Aa * Qa / (Aa - Sa )
si[active] = Aa * Sa / (Aa - Sa )
return [si,qi,S,Q]



def _sparsity_quality(self, X, Xa, y, B, A, Aa, active, Sn):
'''Calculates sparsity & quality parameters for each feature.'''
XB = X.T*B
XSX = np.dot(Xa, Sn)
XSX = np.dot(XSX, Xa.T)

S = np.dot(XB, XSX)
del XSX

Q = -np.dot(S, y*B)
Q += np.dot(XB, y)

S *= XB
S = -np.sum(S, 1)
S += np.sum(XB*X.T, 1)
del XB

qi = np.copy(Q)
si = np.copy(S)
Qa, Sa = Q[active], S[active]
qi[active] = Aa * Qa / (Aa - Sa)
si[active] = Aa * Sa / (Aa - Sa)

return [si, qi, S, Q]


def _posterior_dist(self,X,y,A,intercept_prior):
'''
Uses Laplace approximation for calculating posterior distribution
Expand Down
4 changes: 2 additions & 2 deletions skbayes/rvm_ard_models/vrvm.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,11 +155,11 @@ def fit(self,X,y):

# print progress report if required
if self.verbose is True:
print "Iteration {0} is completed, lower bound equals {1}".format(i,self.lower_bound[-1])
print("Iteration {0} is completed, lower bound equals {1}".format(i,self.lower_bound[-1]))

if np.sum( abs(Mw - Mw0) > self.tol) == 0 or i == self.n_iter - 1:
if self.verbose is True:
print "Mean Field Approximation completed"
print("Mean Field Approximation completed")
break
Mw0 = Mw

Expand Down