Skip to content

Commit

Permalink
manage imports
Browse files Browse the repository at this point in the history
  • Loading branch information
santiaago committed Mar 22, 2013
1 parent 6e577d0 commit 7231623
Show file tree
Hide file tree
Showing 8 changed files with 55 additions and 64 deletions.
15 changes: 7 additions & 8 deletions hw1.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

'''The Perceptron Learning Algorithm
In this problem, you will create your own target function f and data set D to see
how the Perceptron Learning Algorithm works. Take d = 2 so you can visualize the
Expand All @@ -12,26 +11,26 @@

'''Part 1
Take N = 10. Run the Perceptron Learning Algorithm to nd g and measure
the dierence between f and g as Pr(f(x) =6 g(x)) (you can either calculate
this exactly, or approximate it by generating a suciently large separate set of
the difference between f and g as Pr(f(x) =6 g(x)) (you can either calculate
this exactly, or approximate it by generating a sufciently large separate set of
points to evaluate it). Repeat the experiment for 1000 runs (as speci ed above)
and take the average. Start the PLA with the weight vector w being all zeros,
and at each iteration have the algorithm choose a point randomly from the set
of misclassifed points.
How many iterations does it take on average for the PLA to converge for N = 10
training points? Pick the value closest to your results (again, closest is the
answer that makes the expression jyour answer given optionj closest to 0).'''

from numpy import array

from random import uniform
from random import randint

from tools import build_training_set
from tools import data
from tools import randomline
from tools import target_function
from tools import build_training_set
from tools import sign

from numpy import array

from tools import target_function

def build_misclassified_set(t_set,w):
'''returns a tuple of index of t_set items
Expand Down
51 changes: 24 additions & 27 deletions hw2.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,34 @@
from numpy import array
from numpy import dot
from numpy import sign
from numpy import transpose
from numpy.linalg import pinv # pseudo inverse aka dagger
from numpy.linalg import norm

from random import randint

from hw1 import PLA

from tools import build_training_set
from tools import build_training_set_fmultipleparams
from tools import data
from tools import input_data_matrix
from tools import linear_regression
from tools import pseudo_inverse
from tools import print_avg
from tools import randomline
from tools import sign
from tools import target_function
from tools import target_vector

N_COINS = 1000
N_TOSS = 10
N_EXPERIMENT = 100000
HEAD = 0
TAILS = 1

from random import randint

#--------------------------------------------------
#Hoeffding inequality
#--------------------------------------------------------------------------
#Hoeffding

def hoeffding_inequality():
'''average experiment on N_EXPERIMENT times
Expand Down Expand Up @@ -46,7 +66,6 @@ def hoeffding_inequality():
v1 = 0.500339
vrand = 0.500511
vmin = 0.03751'''


def fractionOfHeads(c):
'fractions of Heads in list c'
Expand Down Expand Up @@ -85,28 +104,6 @@ def flip_coin(n = N_TOSS):
#--------------------------------------------------------------------------
#Linear regresion

from tools import data
from tools import randomline
from tools import target_function
from tools import build_training_set
from tools import build_training_set_fmultipleparams
from tools import sign
from tools import print_avg
from tools import linear_regression
from tools import target_vector
from tools import input_data_matrix
from tools import pseudo_inverse

from hw1 import PLA

from numpy import array
from numpy import transpose
from numpy import dot
from numpy import sign
from numpy.linalg import pinv as pinv # pseudo inverse aka dagger
from numpy.linalg import norm


verbose_lr = False

def run_linear_regression(N_samples,N_points):
Expand Down
1 change: 0 additions & 1 deletion hw3.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

from math import exp

EPSILON = .05
Expand Down
6 changes: 3 additions & 3 deletions hw4.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@

from math import log
from math import sqrt
from math import sin
from math import sqrt
from math import pi

from tools import data_interval
from random import uniform

from tools import data_interval

def generalization_error(dvc,confidence,gen_err, iterations=10):
'from the VC generalization bound and using the Sample Complexity algorithm page:57'
N = 1000 #start with an initial sample of 1000
Expand Down
7 changes: 3 additions & 4 deletions hw5.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,15 @@
from copy import copy

from math import exp
from math import sqrt
from math import log

from random import shuffle
from math import sqrt

from numpy import array
from numpy import dot
from numpy import array
from numpy.linalg import norm

from random import shuffle

from tools import target_random_function
from tools import randomline
from tools import data
Expand Down
13 changes: 6 additions & 7 deletions hw6.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,17 @@

from numpy import array
from numpy import transpose
from numpy import dot
from numpy import sign
from numpy.linalg import pinv as pinv # pseudo inverse aka dagger
from numpy import transpose
from numpy import identity
from numpy.linalg import inv
from numpy.linalg import norm
from numpy import identity
from numpy.linalg import pinv # pseudo inverse aka dagger

from tools import target_vector
from tools import input_data_matrix
from tools import pseudo_inverse
from tools import data_from_file
from tools import input_data_matrix
from tools import linear_regression
from tools import pseudo_inverse
from tools import target_vector

KA = -3
LAMBDA = 10**KA
Expand Down
22 changes: 11 additions & 11 deletions hw7.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,18 @@
from math import sqrt
from math import fabs

from numpy import dot
from numpy import sign
from numpy import array

#cvxopt Python Software for Convex Optimization: http://abel.ee.ucla.edu/cvxopt/
from cvxopt import solvers
from cvxopt import spmatrix
from cvxopt import matrix

from numpy import array
from numpy import dot
from numpy import sign

from math import fabs
from math import sqrt

from hw1 import evaluate_diff_f_g
from hw1 import PLA

from tools import build_training_set
from tools import data
from tools import data_from_file
Expand All @@ -19,9 +22,6 @@
from tools import target_function
from tools import target_vector

from hw1 import evaluate_diff_f_g
from hw1 import PLA

def compute_Eval(wlin, X, y):
'fraction of in sample points which got classified incorrectly from Validation data set'
N = len(y)
Expand All @@ -35,7 +35,7 @@ def compute_Eval(wlin, X, y):
return nEin / (len(vEin) *1.0)

def compute_Eout_from_data(w,t_set_out,N_points):
'number of out-of-sample points misclassifed / total number of out-of-sample points from data'
'number of out-of-sample points misclassifed/total number of out-of-sample points from data'

X_matrix = input_data_matrix(t_set_out)
y_vector = target_vector(t_set_out)
Expand Down
4 changes: 1 addition & 3 deletions hw8.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,6 @@ def run_rbf_kernel(dTrain,dTest):
def run_reg_linear_reg_one_vs_all(dTrain,dTest):

lda = 1.0

for i in range(0,10):
dTrain_current = getDataOneVsAll(dTrain,i)
t_set = []
Expand Down Expand Up @@ -341,8 +340,7 @@ def run_reg_linear_reg_one_vs_one(dTrain,dTest):
t_setout = transform_t_set(t_setout)
wt,xt,yt = linear_regression(len(t_setout),t_setout,lda1)
print 'For 1 vs 5 with transformation Ein = %s'%(compute_Ein(wtrans,Xtrans,ytrans))
print 'For 1 vs 5 with transformation Eout = %s'%(compute_Ein(wtrans,xt,yt))

print 'For 1 vs 5 with transformation Eout = %s'%(compute_Ein(wtrans,xt,yt))
print '--------------------------------------------------'
print 'lambda is: %s'%(lda2)
# in sample with no transform
Expand Down

0 comments on commit 7231623

Please sign in to comment.