-
Notifications
You must be signed in to change notification settings - Fork 22
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge remote-tracking branch 'upstream/master'
Conflicts: src/Run.py src/model/perceptron.py
- Loading branch information
Showing
6 changed files
with
265 additions
and
33 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,113 @@ | ||
import numpy as np | ||
|
||
from util.activation_functions import Activation | ||
|
||
|
||
class LogisticLayer(): | ||
""" | ||
A layer of neural | ||
Parameters | ||
---------- | ||
n_in: int: number of units from the previous layer (or input data) | ||
n_out: int: number of units of the current layer (or output) | ||
activation: string: activation function of every units in the layer | ||
is_classifier_layer: bool: to do classification or regression | ||
Attributes | ||
---------- | ||
n_in : positive int: | ||
number of units from the previous layer | ||
n_out : positive int: | ||
number of units of the current layer | ||
weights : ndarray | ||
weight matrix | ||
activation : functional | ||
activation function | ||
activation_string : string | ||
the name of the activation function | ||
is_classifier_layer: bool | ||
to do classification or regression | ||
deltas : ndarray | ||
partial derivatives | ||
size : positive int | ||
number of units in the current layer | ||
shape : tuple | ||
shape of the layer, is also shape of the weight matrix | ||
""" | ||
|
||
def __init__(self, n_in, n_out, weights=None, | ||
activation='sigmoid', is_classifier_layer=False): | ||
|
||
# Get activation function from string | ||
self.activation_string = activation | ||
self.activation = Activation.get_activation(self.activation_string) | ||
|
||
self.n_in = n_in | ||
self.n_out = n_out | ||
|
||
self.inp = np.ndarray((n_in+1, 1)) | ||
self.inp[0] = 1 | ||
self.outp = np.ndarray((n_out, 1)) | ||
self.deltas = np.zeros((n_out, 1)) | ||
|
||
# You can have better initialization here | ||
if weights is None: | ||
self.weight = np.random.rand(n_in, n_out)/10 | ||
else: | ||
self.weights = weights | ||
|
||
self.is_classifier_layer = is_classifier_layer | ||
|
||
# Some handy properties of the layers | ||
self.size = self.n_out | ||
self.shape = self.weights.shape | ||
|
||
def forward(self, inp): | ||
""" | ||
Compute forward step over the input using its weights | ||
Parameters | ||
---------- | ||
inp : ndarray | ||
a numpy array (1,n_in + 1) containing the input of the layer | ||
Change outp | ||
------- | ||
outp: ndarray | ||
a numpy array (1,n_out) containing the output of the layer | ||
""" | ||
|
||
# Here you have to implement the forward pass | ||
pass | ||
|
||
def computeDerivative(self, nextDerivatives, nextWeights): | ||
""" | ||
Compute the derivatives (backward) | ||
Parameters | ||
---------- | ||
nextDerivatives: ndarray | ||
a numpy array containing the derivatives from next layer | ||
nextWeights : ndarray | ||
a numpy array containing the weights from next layer | ||
Change deltas | ||
------- | ||
deltas: ndarray | ||
a numpy array containing the partial derivatives on this layer | ||
""" | ||
|
||
# Here the implementation of partial derivative calculation | ||
pass | ||
|
||
def updateWeights(self): | ||
""" | ||
Update the weights of the layer | ||
""" | ||
|
||
# Here the implementation of weight updating mechanism | ||
pass | ||
|
||
def _fire(self, inp): | ||
return Activation.sigmoid(np.dot(np.array(inp), self.weight)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,93 @@ | ||
# -*- coding: utf-8 -*- | ||
|
||
import sys | ||
import logging | ||
|
||
import numpy as np | ||
|
||
from util.activation_functions import Activation | ||
from model.classifier import Classifier | ||
|
||
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', | ||
level=logging.DEBUG, | ||
stream=sys.stdout) | ||
|
||
|
||
class LogisticRegression(Classifier): | ||
""" | ||
A digit-7 recognizer based on logistic regression algorithm | ||
Parameters | ||
---------- | ||
train : list | ||
valid : list | ||
test : list | ||
learningRate : float | ||
epochs : positive int | ||
Attributes | ||
---------- | ||
trainingSet : list | ||
validationSet : list | ||
testSet : list | ||
weight : list | ||
learningRate : float | ||
epochs : positive int | ||
""" | ||
|
||
def __init__(self, train, valid, test, learningRate=0.01, epochs=50): | ||
|
||
self.learningRate = learningRate | ||
self.epochs = epochs | ||
|
||
self.trainingSet = train | ||
self.validationSet = valid | ||
self.testSet = test | ||
|
||
def train(self, verbose=True): | ||
"""Train the Logistic Regression. | ||
Parameters | ||
---------- | ||
verbose : boolean | ||
Print logging messages with validation accuracy if verbose is True. | ||
""" | ||
|
||
# Here you have to implement training method "epochs" times | ||
# Please using LogisticLayer class | ||
pass | ||
|
||
def classify(self, testInstance): | ||
"""Classify a single instance. | ||
Parameters | ||
---------- | ||
testInstance : list of floats | ||
Returns | ||
------- | ||
bool : | ||
True if the testInstance is recognized as a 7, False otherwise. | ||
""" | ||
|
||
# Here you have to implement classification method given an instance | ||
pass | ||
|
||
def evaluate(self, test=None): | ||
"""Evaluate a whole dataset. | ||
Parameters | ||
---------- | ||
test : the dataset to be classified | ||
if no test data, the test set associated to the classifier will be used | ||
Returns | ||
------- | ||
List: | ||
List of classified decisions for the dataset's entries. | ||
""" | ||
if test is None: | ||
test = self.testSet.input | ||
# Once you can classify an instance, just use map for all of the test | ||
# set. | ||
return list(map(self.classify, test)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.