diff --git a/ex3/displayData.m b/ex3/displayData.m new file mode 100644 index 0000000..160697e --- /dev/null +++ b/ex3/displayData.m @@ -0,0 +1,59 @@ +function [h, display_array] = displayData(X, example_width) +%DISPLAYDATA Display 2D data in a nice grid +% [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data +% stored in X in a nice grid. It returns the figure handle h and the +% displayed array if requested. + +% Set example_width automatically if not passed in +if ~exist('example_width', 'var') || isempty(example_width) + example_width = round(sqrt(size(X, 2))); +end + +% Gray Image +colormap(gray); + +% Compute rows, cols +[m n] = size(X); +example_height = (n / example_width); + +% Compute number of items to display +display_rows = floor(sqrt(m)); +display_cols = ceil(m / display_rows); + +% Between images padding +pad = 1; + +% Setup blank display +display_array = - ones(pad + display_rows * (example_height + pad), ... + pad + display_cols * (example_width + pad)); + +% Copy each example into a patch on the display array +curr_ex = 1; +for j = 1:display_rows + for i = 1:display_cols + if curr_ex > m, + break; + end + % Copy the patch + + % Get the max value of the patch + max_val = max(abs(X(curr_ex, :))); + display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ... + pad + (i - 1) * (example_width + pad) + (1:example_width)) = ... + reshape(X(curr_ex, :), example_height, example_width) / max_val; + curr_ex = curr_ex + 1; + end + if curr_ex > m, + break; + end +end + +% Display Image +h = imagesc(display_array, [-1 1]); + +% Do not show axis +axis image off + +drawnow; + +end diff --git a/ex3/ex3.m b/ex3/ex3.m new file mode 100644 index 0000000..da858ca --- /dev/null +++ b/ex3/ex3.m @@ -0,0 +1,69 @@ +%% Machine Learning Online Class - Exercise 3 | Part 1: One-vs-all + +% Instructions +% ------------ +% +% This file contains code that helps you get started on the +% linear exercise. You will need to complete the following functions +% in this exericse: +% +% lrCostFunction.m (logistic regression cost function) +% oneVsAll.m +% predictOneVsAll.m +% predict.m +% +% For this exercise, you will not need to change any code in this file, +% or any other files other than those mentioned above. +% + +%% Initialization +clear ; close all; clc + +%% Setup the parameters you will use for this part of the exercise +input_layer_size = 400; % 20x20 Input Images of Digits +num_labels = 10; % 10 labels, from 1 to 10 + % (note that we have mapped "0" to label 10) + +%% =========== Part 1: Loading and Visualizing Data ============= +% We start the exercise by first loading and visualizing the dataset. +% You will be working with a dataset that contains handwritten digits. +% + +% Load Training Data +fprintf('Loading and Visualizing Data ...\n') + +load('ex3data1.mat'); % training data stored in arrays X, y +m = size(X, 1); + +% Randomly select 100 data points to display +rand_indices = randperm(m); +sel = X(rand_indices(1:100), :); + +displayData(sel); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + +%% ============ Part 2: Vectorize Logistic Regression ============ +% In this part of the exercise, you will reuse your logistic regression +% code from the last exercise. You task here is to make sure that your +% regularized logistic regression implementation is vectorized. After +% that, you will implement one-vs-all classification for the handwritten +% digit dataset. +% + +fprintf('\nTraining One-vs-All Logistic Regression...\n') + +lambda = 0.1; +[all_theta] = oneVsAll(X, y, num_labels, lambda); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + + +%% ================ Part 3: Predict for One-Vs-All ================ +% After ... +pred = predictOneVsAll(all_theta, X); + +fprintf('\nTraining Set Accuracy: %f\n', mean(double(pred == y)) * 100); + diff --git a/ex3/ex3_nn.m b/ex3/ex3_nn.m new file mode 100644 index 0000000..073bc4d --- /dev/null +++ b/ex3/ex3_nn.m @@ -0,0 +1,88 @@ +%% Machine Learning Online Class - Exercise 3 | Part 2: Neural Networks + +% Instructions +% ------------ +% +% This file contains code that helps you get started on the +% linear exercise. You will need to complete the following functions +% in this exericse: +% +% lrCostFunction.m (logistic regression cost function) +% oneVsAll.m +% predictOneVsAll.m +% predict.m +% +% For this exercise, you will not need to change any code in this file, +% or any other files other than those mentioned above. +% + +%% Initialization +clear ; close all; clc + +%% Setup the parameters you will use for this exercise +input_layer_size = 400; % 20x20 Input Images of Digits +hidden_layer_size = 25; % 25 hidden units +num_labels = 10; % 10 labels, from 1 to 10 + % (note that we have mapped "0" to label 10) + +%% =========== Part 1: Loading and Visualizing Data ============= +% We start the exercise by first loading and visualizing the dataset. +% You will be working with a dataset that contains handwritten digits. +% + +% Load Training Data +fprintf('Loading and Visualizing Data ...\n') + +load('ex3data1.mat'); +m = size(X, 1); + +% Randomly select 100 data points to display +sel = randperm(size(X, 1)); +sel = sel(1:100); + +displayData(X(sel, :)); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + +%% ================ Part 2: Loading Pameters ================ +% In this part of the exercise, we load some pre-initialized +% neural network parameters. + +fprintf('\nLoading Saved Neural Network Parameters ...\n') + +% Load the weights into variables Theta1 and Theta2 +load('ex3weights.mat'); + +%% ================= Part 3: Implement Predict ================= +% After training the neural network, we would like to use it to predict +% the labels. You will now implement the "predict" function to use the +% neural network to predict the labels of the training set. This lets +% you compute the training set accuracy. + +pred = predict(Theta1, Theta2, X); + +fprintf('\nTraining Set Accuracy: %f\n', mean(double(pred == y)) * 100); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + +% To give you an idea of the network's output, you can also run +% through the examples one at the a time to see what it is predicting. + +% Randomly permute examples +rp = randperm(m); + +for i = 1:m + % Display + fprintf('\nDisplaying Example Image\n'); + displayData(X(rp(i), :)); + + pred = predict(Theta1, Theta2, X(rp(i),:)); + fprintf('\nNeural Network Prediction: %d (digit %d)\n', pred, mod(pred, 10)); + + % Pause + fprintf('Program paused. Press enter to continue.\n'); + pause; +end + diff --git a/ex3/ex3data1.mat b/ex3/ex3data1.mat new file mode 100644 index 0000000..371bd0c Binary files /dev/null and b/ex3/ex3data1.mat differ diff --git a/ex3/ex3weights.mat b/ex3/ex3weights.mat new file mode 100644 index 0000000..ace2a09 Binary files /dev/null and b/ex3/ex3weights.mat differ diff --git a/ex3/fmincg.m b/ex3/fmincg.m new file mode 100644 index 0000000..34bf539 --- /dev/null +++ b/ex3/fmincg.m @@ -0,0 +1,175 @@ +function [X, fX, i] = fmincg(f, X, options, P1, P2, P3, P4, P5) +% Minimize a continuous differentialble multivariate function. Starting point +% is given by "X" (D by 1), and the function named in the string "f", must +% return a function value and a vector of partial derivatives. The Polack- +% Ribiere flavour of conjugate gradients is used to compute search directions, +% and a line search using quadratic and cubic polynomial approximations and the +% Wolfe-Powell stopping criteria is used together with the slope ratio method +% for guessing initial step sizes. Additionally a bunch of checks are made to +% make sure that exploration is taking place and that extrapolation will not +% be unboundedly large. The "length" gives the length of the run: if it is +% positive, it gives the maximum number of line searches, if negative its +% absolute gives the maximum allowed number of function evaluations. You can +% (optionally) give "length" a second component, which will indicate the +% reduction in function value to be expected in the first line-search (defaults +% to 1.0). The function returns when either its length is up, or if no further +% progress can be made (ie, we are at a minimum, or so close that due to +% numerical problems, we cannot get any closer). If the function terminates +% within a few iterations, it could be an indication that the function value +% and derivatives are not consistent (ie, there may be a bug in the +% implementation of your "f" function). The function returns the found +% solution "X", a vector of function values "fX" indicating the progress made +% and "i" the number of iterations (line searches or function evaluations, +% depending on the sign of "length") used. +% +% Usage: [X, fX, i] = fmincg(f, X, options, P1, P2, P3, P4, P5) +% +% See also: checkgrad +% +% Copyright (C) 2001 and 2002 by Carl Edward Rasmussen. Date 2002-02-13 +% +% +% (C) Copyright 1999, 2000 & 2001, Carl Edward Rasmussen +% +% Permission is granted for anyone to copy, use, or modify these +% programs and accompanying documents for purposes of research or +% education, provided this copyright notice is retained, and note is +% made of any changes that have been made. +% +% These programs and documents are distributed without any warranty, +% express or implied. As the programs were written for research +% purposes only, they have not been tested to the degree that would be +% advisable in any important application. All use of these programs is +% entirely at the user's own risk. +% +% [ml-class] Changes Made: +% 1) Function name and argument specifications +% 2) Output display +% + +% Read options +if exist('options', 'var') && ~isempty(options) && isfield(options, 'MaxIter') + length = options.MaxIter; +else + length = 100; +end + + +RHO = 0.01; % a bunch of constants for line searches +SIG = 0.5; % RHO and SIG are the constants in the Wolfe-Powell conditions +INT = 0.1; % don't reevaluate within 0.1 of the limit of the current bracket +EXT = 3.0; % extrapolate maximum 3 times the current bracket +MAX = 20; % max 20 function evaluations per line search +RATIO = 100; % maximum allowed slope ratio + +argstr = ['feval(f, X']; % compose string used to call function +for i = 1:(nargin - 3) + argstr = [argstr, ',P', int2str(i)]; +end +argstr = [argstr, ')']; + +if max(size(length)) == 2, red=length(2); length=length(1); else red=1; end +S=['Iteration ']; + +i = 0; % zero the run length counter +ls_failed = 0; % no previous line search has failed +fX = []; +[f1 df1] = eval(argstr); % get function value and gradient +i = i + (length<0); % count epochs?! +s = -df1; % search direction is steepest +d1 = -s'*s; % this is the slope +z1 = red/(1-d1); % initial step is red/(|s|+1) + +while i < abs(length) % while not finished + i = i + (length>0); % count iterations?! + + X0 = X; f0 = f1; df0 = df1; % make a copy of current values + X = X + z1*s; % begin line search + [f2 df2] = eval(argstr); + i = i + (length<0); % count epochs?! + d2 = df2'*s; + f3 = f1; d3 = d1; z3 = -z1; % initialize point 3 equal to point 1 + if length>0, M = MAX; else M = min(MAX, -length-i); end + success = 0; limit = -1; % initialize quanteties + while 1 + while ((f2 > f1+z1*RHO*d1) | (d2 > -SIG*d1)) & (M > 0) + limit = z1; % tighten the bracket + if f2 > f1 + z2 = z3 - (0.5*d3*z3*z3)/(d3*z3+f2-f3); % quadratic fit + else + A = 6*(f2-f3)/z3+3*(d2+d3); % cubic fit + B = 3*(f3-f2)-z3*(d3+2*d2); + z2 = (sqrt(B*B-A*d2*z3*z3)-B)/A; % numerical error possible - ok! + end + if isnan(z2) | isinf(z2) + z2 = z3/2; % if we had a numerical problem then bisect + end + z2 = max(min(z2, INT*z3),(1-INT)*z3); % don't accept too close to limits + z1 = z1 + z2; % update the step + X = X + z2*s; + [f2 df2] = eval(argstr); + M = M - 1; i = i + (length<0); % count epochs?! + d2 = df2'*s; + z3 = z3-z2; % z3 is now relative to the location of z2 + end + if f2 > f1+z1*RHO*d1 | d2 > -SIG*d1 + break; % this is a failure + elseif d2 > SIG*d1 + success = 1; break; % success + elseif M == 0 + break; % failure + end + A = 6*(f2-f3)/z3+3*(d2+d3); % make cubic extrapolation + B = 3*(f3-f2)-z3*(d3+2*d2); + z2 = -d2*z3*z3/(B+sqrt(B*B-A*d2*z3*z3)); % num. error possible - ok! + if ~isreal(z2) | isnan(z2) | isinf(z2) | z2 < 0 % num prob or wrong sign? + if limit < -0.5 % if we have no upper limit + z2 = z1 * (EXT-1); % the extrapolate the maximum amount + else + z2 = (limit-z1)/2; % otherwise bisect + end + elseif (limit > -0.5) & (z2+z1 > limit) % extraplation beyond max? + z2 = (limit-z1)/2; % bisect + elseif (limit < -0.5) & (z2+z1 > z1*EXT) % extrapolation beyond limit + z2 = z1*(EXT-1.0); % set to extrapolation limit + elseif z2 < -z3*INT + z2 = -z3*INT; + elseif (limit > -0.5) & (z2 < (limit-z1)*(1.0-INT)) % too close to limit? + z2 = (limit-z1)*(1.0-INT); + end + f3 = f2; d3 = d2; z3 = -z2; % set point 3 equal to point 2 + z1 = z1 + z2; X = X + z2*s; % update current estimates + [f2 df2] = eval(argstr); + M = M - 1; i = i + (length<0); % count epochs?! + d2 = df2'*s; + end % end of line search + + if success % if line search succeeded + f1 = f2; fX = [fX' f1]'; + fprintf('%s %4i | Cost: %4.6e\r', S, i, f1); + s = (df2'*df2-df1'*df2)/(df1'*df1)*s - df2; % Polack-Ribiere direction + tmp = df1; df1 = df2; df2 = tmp; % swap derivatives + d2 = df1'*s; + if d2 > 0 % new slope must be negative + s = -df1; % otherwise use steepest direction + d2 = -s'*s; + end + z1 = z1 * min(RATIO, d1/(d2-realmin)); % slope ratio but max RATIO + d1 = d2; + ls_failed = 0; % this line search did not fail + else + X = X0; f1 = f0; df1 = df0; % restore point from before failed line search + if ls_failed | i > abs(length) % line search failed twice in a row + break; % or we ran out of time, so we give up + end + tmp = df1; df1 = df2; df2 = tmp; % swap derivatives + s = -df1; % try steepest + d1 = -s'*s; + z1 = 1/(1-d1); + ls_failed = 1; % this line search failed + end + if exist('OCTAVE_VERSION') + fflush(stdout); + end +end +fprintf('\n'); diff --git a/ex3/lrCostFunction.m b/ex3/lrCostFunction.m new file mode 100644 index 0000000..c494b1d --- /dev/null +++ b/ex3/lrCostFunction.m @@ -0,0 +1,58 @@ +function [J, grad] = lrCostFunction(theta, X, y, lambda) +%LRCOSTFUNCTION Compute cost and gradient for logistic regression with +%regularization +% J = LRCOSTFUNCTION(theta, X, y, lambda) computes the cost of using +% theta as the parameter for regularized logistic regression and the +% gradient of the cost w.r.t. to the parameters. + +% Initialize some useful values +m = length(y); % number of training examples + +% You need to return the following variables correctly +J = 0; +grad = zeros(size(theta)); + +% ====================== YOUR CODE HERE ====================== +% Instructions: Compute the cost of a particular choice of theta. +% You should set J to the cost. +% Compute the partial derivatives and set grad to the partial +% derivatives of the cost w.r.t. each parameter in theta +% +% Hint: The computation of the cost function and gradients can be +% efficiently vectorized. For example, consider the computation +% +% sigmoid(X * theta) +% +% Each row of the resulting matrix will contain the value of the +% prediction for that example. You can make use of this to vectorize +% the cost function and gradient computations. +% +% Hint: When computing the gradient of the regularized cost function, +% there're many possible vectorized solutions, but one solution +% looks like: +% grad = (unregularized gradient for logistic regression) +% temp = theta; +% temp(1) = 0; % because we don't add anything for j = 0 +% grad = grad + YOUR_CODE_HERE (using the temp variable) +% + +J = (1/m).* ( (-y'*log(sigmoid(X*theta)) - (1 - y)'*log(1-sigmoid(X*theta))) + (lambda/2)*sum(power(theta(2:end),2))); + +grad(1) = (1/m).*((sigmoid(X*theta)-y)'*X(:,1)); + +grad(2:end) = (1/m).*(((sigmoid(X*theta)-y)'*X(:,2:end))' + lambda*theta(2:end)); + + + + + + + + + + +% ============================================================= + +grad = grad(:); + +end diff --git a/ex3/ml_login_data.mat b/ex3/ml_login_data.mat new file mode 100644 index 0000000..45a59f6 --- /dev/null +++ b/ex3/ml_login_data.mat @@ -0,0 +1,11 @@ +# Created by Octave 3.2.4, Sat Oct 20 14:58:09 2012 中国标准时间 +# name: login +# type: string +# elements: 1 +# length: 19 +x.wangyan@gmail.com +# name: password +# type: string +# elements: 1 +# length: 10 +CgKSs5ur5p diff --git a/ex3/oneVsAll.m b/ex3/oneVsAll.m new file mode 100644 index 0000000..68bfc00 --- /dev/null +++ b/ex3/oneVsAll.m @@ -0,0 +1,69 @@ +function [all_theta] = oneVsAll(X, y, num_labels, lambda) +%ONEVSALL trains multiple logistic regression classifiers and returns all +%the classifiers in a matrix all_theta, where the i-th row of all_theta +%corresponds to the classifier for label i +% [all_theta] = ONEVSALL(X, y, num_labels, lambda) trains num_labels +% logisitc regression classifiers and returns each of these classifiers +% in a matrix all_theta, where the i-th row of all_theta corresponds +% to the classifier for label i + +% Some useful variables +m = size(X, 1); +n = size(X, 2); + +% You need to return the following variables correctly +all_theta = zeros(num_labels, n + 1); + +% Add ones to the X data matrix +X = [ones(m, 1) X]; + +% ====================== YOUR CODE HERE ====================== +% Instructions: You should complete the following code to train num_labels +% logistic regression classifiers with regularization +% parameter lambda. +% +% Hint: theta(:) will return a column vector. +% +% Hint: You can use y == c to obtain a vector of 1's and 0's that tell use +% whether the ground truth is true/false for this class. +% +% Note: For this assignment, we recommend using fmincg to optimize the cost +% function. It is okay to use a for-loop (for c = 1:num_labels) to +% loop over the different classes. +% +% fmincg works similarly to fminunc, but is more efficient when we +% are dealing with large number of parameters. +% +% Example Code for fmincg: +% +% % Set Initial theta +% initial_theta = zeros(n + 1, 1); +% +% % Set options for fminunc +% options = optimset('GradObj', 'on', 'MaxIter', 50); +% +% % Run fmincg to obtain the optimal theta +% % This function will return theta and the cost +% [theta] = ... +% fmincg (@(t)(lrCostFunction(t, X, (y == c), lambda)), ... +% initial_theta, options); +% + + +initial_theta = zeros(n + 1, 1); + +options = optimset('GradObj', 'on', 'MaxIter', 50); + +for c = 1:num_labels + [all_theta(c,:), cost]= ... + fmincg (@(t)(lrCostFunction(t, X, (y == c), lambda)), ... + initial_theta, options); + +end + + + +% ========================================================================= + + +end diff --git a/ex3/predict.m b/ex3/predict.m new file mode 100644 index 0000000..f576aaf --- /dev/null +++ b/ex3/predict.m @@ -0,0 +1,39 @@ +function p = predict(Theta1, Theta2, X) +%PREDICT Predict the label of an input given a trained neural network +% p = PREDICT(Theta1, Theta2, X) outputs the predicted label of X given the +% trained weights of a neural network (Theta1, Theta2) + +% Useful values +m = size(X, 1); +num_labels = size(Theta2, 1); + +% You need to return the following variables correctly +p = zeros(size(X, 1), 1); + +% ====================== YOUR CODE HERE ====================== +% Instructions: Complete the following code to make predictions using +% your learned neural network. You should set p to a +% vector containing labels between 1 to num_labels. +% +% Hint: The max function might come in useful. In particular, the max +% function can also return the index of the max element, for more +% information see 'help max'. If your examples are in rows, then, you +% can use max(A, [], 2) to obtain the max for each row. +% +X = [ones(m, 1) X]; +A =(sigmoid(X*Theta1')); % find the matrix of predict dictionary +A = [ones(m, 1) A]; +B = sigmoid(A*Theta2'); +[temp ,p] = max(B, [], 2); + + + + + + + + +% ========================================================================= + + +end diff --git a/ex3/predictOneVsAll.m b/ex3/predictOneVsAll.m new file mode 100644 index 0000000..18de4cc --- /dev/null +++ b/ex3/predictOneVsAll.m @@ -0,0 +1,42 @@ +function p = predictOneVsAll(all_theta, X) +%PREDICT Predict the label for a trained one-vs-all classifier. The labels +%are in the range 1..K, where K = size(all_theta, 1). +% p = PREDICTONEVSALL(all_theta, X) will return a vector of predictions +% for each example in the matrix X. Note that X contains the examples in +% rows. all_theta is a matrix where the i-th row is a trained logistic +% regression theta vector for the i-th class. You should set p to a vector +% of values from 1..K (e.g., p = [1; 3; 1; 2] predicts classes 1, 3, 1, 2 +% for 4 examples) + +m = size(X, 1); +num_labels = size(all_theta, 1); + +% You need to return the following variables correctly +p = zeros(size(X, 1), 1); + +% Add ones to the X data matrix +X = [ones(m, 1) X]; + +% ====================== YOUR CODE HERE ====================== +% Instructions: Complete the following code to make predictions using +% your learned logistic regression parameters (one-vs-all). +% You should set p to a vector of predictions (from 1 to +% num_labels). +% +% Hint: This code can be done all vectorized using the max function. +% In particular, the max function can also return the index of the +% max element, for more information see 'help max'. If your examples +% are in rows, then, you can use max(A, [], 2) to obtain the max +% for each row. +% + +A =(sigmoid(X*all_theta')); % find the matrix of predict dictionary + +[temp ,p] = max(A, [], 2); + + + +% ========================================================================= + + +end diff --git a/ex3/sigmoid.m b/ex3/sigmoid.m new file mode 100644 index 0000000..6deca13 --- /dev/null +++ b/ex3/sigmoid.m @@ -0,0 +1,6 @@ +function g = sigmoid(z) +%SIGMOID Compute sigmoid functoon +% J = SIGMOID(z) computes the sigmoid of z. + +g = 1.0 ./ (1.0 + exp(-z)); +end diff --git a/ex3/submit.m b/ex3/submit.m new file mode 100644 index 0000000..18d7005 --- /dev/null +++ b/ex3/submit.m @@ -0,0 +1,574 @@ +function submit(partId, webSubmit) +%SUBMIT Submit your code and output to the ml-class servers +% SUBMIT() will connect to the ml-class server and submit your solution + + fprintf('==\n== [ml-class] Submitting Solutions | Programming Exercise %s\n==\n', ... + homework_id()); + if ~exist('partId', 'var') || isempty(partId) + partId = promptPart(); + end + + if ~exist('webSubmit', 'var') || isempty(webSubmit) + webSubmit = 0; % submit directly by default + end + + % Check valid partId + partNames = validParts(); + if ~isValidPartId(partId) + fprintf('!! Invalid homework part selected.\n'); + fprintf('!! Expected an integer from 1 to %d.\n', numel(partNames) + 1); + fprintf('!! Submission Cancelled\n'); + return + end + + if ~exist('ml_login_data.mat','file') + [login password] = loginPrompt(); + save('ml_login_data.mat','login','password'); + else + load('ml_login_data.mat'); + [login password] = quickLogin(login, password); + save('ml_login_data.mat','login','password'); + end + + if isempty(login) + fprintf('!! Submission Cancelled\n'); + return + end + + fprintf('\n== Connecting to ml-class ... '); + if exist('OCTAVE_VERSION') + fflush(stdout); + end + + % Setup submit list + if partId == numel(partNames) + 1 + submitParts = 1:numel(partNames); + else + submitParts = [partId]; + end + + for s = 1:numel(submitParts) + thisPartId = submitParts(s); + if (~webSubmit) % submit directly to server + [login, ch, signature, auxstring] = getChallenge(login, thisPartId); + if isempty(login) || isempty(ch) || isempty(signature) + % Some error occured, error string in first return element. + fprintf('\n!! Error: %s\n\n', login); + return + end + + % Attempt Submission with Challenge + ch_resp = challengeResponse(login, password, ch); + + [result, str] = submitSolution(login, ch_resp, thisPartId, ... + output(thisPartId, auxstring), source(thisPartId), signature); + + partName = partNames{thisPartId}; + + fprintf('\n== [ml-class] Submitted Assignment %s - Part %d - %s\n', ... + homework_id(), thisPartId, partName); + fprintf('== %s\n', strtrim(str)); + + if exist('OCTAVE_VERSION') + fflush(stdout); + end + else + [result] = submitSolutionWeb(login, thisPartId, output(thisPartId), ... + source(thisPartId)); + result = base64encode(result); + + fprintf('\nSave as submission file [submit_ex%s_part%d.txt (enter to accept default)]:', ... + homework_id(), thisPartId); + saveAsFile = input('', 's'); + if (isempty(saveAsFile)) + saveAsFile = sprintf('submit_ex%s_part%d.txt', homework_id(), thisPartId); + end + + fid = fopen(saveAsFile, 'w'); + if (fid) + fwrite(fid, result); + fclose(fid); + fprintf('\nSaved your solutions to %s.\n\n', saveAsFile); + fprintf(['You can now submit your solutions through the web \n' ... + 'form in the programming exercises. Select the corresponding \n' ... + 'programming exercise to access the form.\n']); + + else + fprintf('Unable to save to %s\n\n', saveAsFile); + fprintf(['You can create a submission file by saving the \n' ... + 'following text in a file: (press enter to continue)\n\n']); + pause; + fprintf(result); + end + end + end +end + +% ================== CONFIGURABLES FOR EACH HOMEWORK ================== + +function id = homework_id() + id = '3'; +end + +function [partNames] = validParts() + partNames = { 'Vectorized Logistic Regression ', ... + 'One-vs-all classifier training', ... + 'One-vs-all classifier prediction', ... + 'Neural network prediction function' ... + }; +end + +function srcs = sources() + % Separated by part + srcs = { { 'lrCostFunction.m' }, ... + { 'oneVsAll.m' }, ... + { 'predictOneVsAll.m' }, ... + { 'predict.m' } }; +end + +function out = output(partId, auxdata) + % Random Test Cases + X = [ones(20,1) (exp(1) * sin(1:1:20))' (exp(0.5) * cos(1:1:20))']; + y = sin(X(:,1) + X(:,2)) > 0; + Xm = [ -1 -1 ; -1 -2 ; -2 -1 ; -2 -2 ; ... + 1 1 ; 1 2 ; 2 1 ; 2 2 ; ... + -1 1 ; -1 2 ; -2 1 ; -2 2 ; ... + 1 -1 ; 1 -2 ; -2 -1 ; -2 -2 ]; + ym = [ 1 1 1 1 2 2 2 2 3 3 3 3 4 4 4 4 ]'; + t1 = sin(reshape(1:2:24, 4, 3)); + t2 = cos(reshape(1:2:40, 4, 5)); + + if partId == 1 + [J, grad] = lrCostFunction([0.25 0.5 -0.5]', X, y, 0.1); + out = sprintf('%0.5f ', J); + out = [out sprintf('%0.5f ', grad)]; + elseif partId == 2 + out = sprintf('%0.5f ', oneVsAll(Xm, ym, 4, 0.1)); + elseif partId == 3 + out = sprintf('%0.5f ', predictOneVsAll(t1, Xm)); + elseif partId == 4 + out = sprintf('%0.5f ', predict(t1, t2, Xm)); + end +end + + +% ====================== SERVER CONFIGURATION =========================== + +% ***************** REMOVE -staging WHEN YOU DEPLOY ********************* +function url = site_url() + url = 'http://class.coursera.org/ml-2012-002'; +end + +function url = challenge_url() + url = [site_url() '/assignment/challenge']; +end + +function url = submit_url() + url = [site_url() '/assignment/submit']; +end + +% ========================= CHALLENGE HELPERS ========================= + +function src = source(partId) + src = ''; + src_files = sources(); + if partId <= numel(src_files) + flist = src_files{partId}; + for i = 1:numel(flist) + fid = fopen(flist{i}); + if (fid == -1) + error('Error opening %s (is it missing?)', flist{i}); + end + line = fgets(fid); + while ischar(line) + src = [src line]; + line = fgets(fid); + end + fclose(fid); + src = [src '||||||||']; + end + end +end + +function ret = isValidPartId(partId) + partNames = validParts(); + ret = (~isempty(partId)) && (partId >= 1) && (partId <= numel(partNames) + 1); +end + +function partId = promptPart() + fprintf('== Select which part(s) to submit:\n'); + partNames = validParts(); + srcFiles = sources(); + for i = 1:numel(partNames) + fprintf('== %d) %s [', i, partNames{i}); + fprintf(' %s ', srcFiles{i}{:}); + fprintf(']\n'); + end + fprintf('== %d) All of the above \n==\nEnter your choice [1-%d]: ', ... + numel(partNames) + 1, numel(partNames) + 1); + selPart = input('', 's'); + partId = str2num(selPart); + if ~isValidPartId(partId) + partId = -1; + end +end + +function [email,ch,signature,auxstring] = getChallenge(email, part) + str = urlread(challenge_url(), 'post', {'email_address', email, 'assignment_part_sid', [homework_id() '-' num2str(part)], 'response_encoding', 'delim'}); + + str = strtrim(str); + r = struct; + while(numel(str) > 0) + [f, str] = strtok (str, '|'); + [v, str] = strtok (str, '|'); + r = setfield(r, f, v); + end + + email = getfield(r, 'email_address'); + ch = getfield(r, 'challenge_key'); + signature = getfield(r, 'state'); + auxstring = getfield(r, 'challenge_aux_data'); +end + +function [result, str] = submitSolutionWeb(email, part, output, source) + + result = ['{"assignment_part_sid":"' base64encode([homework_id() '-' num2str(part)], '') '",' ... + '"email_address":"' base64encode(email, '') '",' ... + '"submission":"' base64encode(output, '') '",' ... + '"submission_aux":"' base64encode(source, '') '"' ... + '}']; + str = 'Web-submission'; +end + +function [result, str] = submitSolution(email, ch_resp, part, output, ... + source, signature) + + params = {'assignment_part_sid', [homework_id() '-' num2str(part)], ... + 'email_address', email, ... + 'submission', base64encode(output, ''), ... + 'submission_aux', base64encode(source, ''), ... + 'challenge_response', ch_resp, ... + 'state', signature}; + + str = urlread(submit_url(), 'post', params); + + % Parse str to read for success / failure + result = 0; + +end + +% =========================== LOGIN HELPERS =========================== + +function [login password] = loginPrompt() + % Prompt for password + [login password] = basicPrompt(); + + if isempty(login) || isempty(password) + login = []; password = []; + end +end + + +function [login password] = basicPrompt() + login = input('Login (Email address): ', 's'); + password = input('Password: ', 's'); +end + +function [login password] = quickLogin(login,password) + disp(['You are currently logged in as ' login '.']); + cont_token = input('Is this you? (y/n - type n to reenter password)','s'); + if(isempty(cont_token) || cont_token(1)=='Y'||cont_token(1)=='y') + return; + else + [login password] = loginPrompt(); + end +end + +function [str] = challengeResponse(email, passwd, challenge) + str = sha1([challenge passwd]); +end + +% =============================== SHA-1 ================================ + +function hash = sha1(str) + + % Initialize variables + h0 = uint32(1732584193); + h1 = uint32(4023233417); + h2 = uint32(2562383102); + h3 = uint32(271733878); + h4 = uint32(3285377520); + + % Convert to word array + strlen = numel(str); + + % Break string into chars and append the bit 1 to the message + mC = [double(str) 128]; + mC = [mC zeros(1, 4-mod(numel(mC), 4), 'uint8')]; + + numB = strlen * 8; + if exist('idivide') + numC = idivide(uint32(numB + 65), 512, 'ceil'); + else + numC = ceil(double(numB + 65)/512); + end + numW = numC * 16; + mW = zeros(numW, 1, 'uint32'); + + idx = 1; + for i = 1:4:strlen + 1 + mW(idx) = bitor(bitor(bitor( ... + bitshift(uint32(mC(i)), 24), ... + bitshift(uint32(mC(i+1)), 16)), ... + bitshift(uint32(mC(i+2)), 8)), ... + uint32(mC(i+3))); + idx = idx + 1; + end + + % Append length of message + mW(numW - 1) = uint32(bitshift(uint64(numB), -32)); + mW(numW) = uint32(bitshift(bitshift(uint64(numB), 32), -32)); + + % Process the message in successive 512-bit chs + for cId = 1 : double(numC) + cSt = (cId - 1) * 16 + 1; + cEnd = cId * 16; + ch = mW(cSt : cEnd); + + % Extend the sixteen 32-bit words into eighty 32-bit words + for j = 17 : 80 + ch(j) = ch(j - 3); + ch(j) = bitxor(ch(j), ch(j - 8)); + ch(j) = bitxor(ch(j), ch(j - 14)); + ch(j) = bitxor(ch(j), ch(j - 16)); + ch(j) = bitrotate(ch(j), 1); + end + + % Initialize hash value for this ch + a = h0; + b = h1; + c = h2; + d = h3; + e = h4; + + % Main loop + for i = 1 : 80 + if(i >= 1 && i <= 20) + f = bitor(bitand(b, c), bitand(bitcmp(b), d)); + k = uint32(1518500249); + elseif(i >= 21 && i <= 40) + f = bitxor(bitxor(b, c), d); + k = uint32(1859775393); + elseif(i >= 41 && i <= 60) + f = bitor(bitor(bitand(b, c), bitand(b, d)), bitand(c, d)); + k = uint32(2400959708); + elseif(i >= 61 && i <= 80) + f = bitxor(bitxor(b, c), d); + k = uint32(3395469782); + end + + t = bitrotate(a, 5); + t = bitadd(t, f); + t = bitadd(t, e); + t = bitadd(t, k); + t = bitadd(t, ch(i)); + e = d; + d = c; + c = bitrotate(b, 30); + b = a; + a = t; + + end + h0 = bitadd(h0, a); + h1 = bitadd(h1, b); + h2 = bitadd(h2, c); + h3 = bitadd(h3, d); + h4 = bitadd(h4, e); + + end + + hash = reshape(dec2hex(double([h0 h1 h2 h3 h4]), 8)', [1 40]); + + hash = lower(hash); + +end + +function ret = bitadd(iA, iB) + ret = double(iA) + double(iB); + ret = bitset(ret, 33, 0); + ret = uint32(ret); +end + +function ret = bitrotate(iA, places) + t = bitshift(iA, places - 32); + ret = bitshift(iA, places); + ret = bitor(ret, t); +end + +% =========================== Base64 Encoder ============================ +% Thanks to Peter John Acklam +% + +function y = base64encode(x, eol) +%BASE64ENCODE Perform base64 encoding on a string. +% +% BASE64ENCODE(STR, EOL) encode the given string STR. EOL is the line ending +% sequence to use; it is optional and defaults to '\n' (ASCII decimal 10). +% The returned encoded string is broken into lines of no more than 76 +% characters each, and each line will end with EOL unless it is empty. Let +% EOL be empty if you do not want the encoded string broken into lines. +% +% STR and EOL don't have to be strings (i.e., char arrays). The only +% requirement is that they are vectors containing values in the range 0-255. +% +% This function may be used to encode strings into the Base64 encoding +% specified in RFC 2045 - MIME (Multipurpose Internet Mail Extensions). The +% Base64 encoding is designed to represent arbitrary sequences of octets in a +% form that need not be humanly readable. A 65-character subset +% ([A-Za-z0-9+/=]) of US-ASCII is used, enabling 6 bits to be represented per +% printable character. +% +% Examples +% -------- +% +% If you want to encode a large file, you should encode it in chunks that are +% a multiple of 57 bytes. This ensures that the base64 lines line up and +% that you do not end up with padding in the middle. 57 bytes of data fills +% one complete base64 line (76 == 57*4/3): +% +% If ifid and ofid are two file identifiers opened for reading and writing, +% respectively, then you can base64 encode the data with +% +% while ~feof(ifid) +% fwrite(ofid, base64encode(fread(ifid, 60*57))); +% end +% +% or, if you have enough memory, +% +% fwrite(ofid, base64encode(fread(ifid))); +% +% See also BASE64DECODE. + +% Author: Peter John Acklam +% Time-stamp: 2004-02-03 21:36:56 +0100 +% E-mail: pjacklam@online.no +% URL: http://home.online.no/~pjacklam + + if isnumeric(x) + x = num2str(x); + end + + % make sure we have the EOL value + if nargin < 2 + eol = sprintf('\n'); + else + if sum(size(eol) > 1) > 1 + error('EOL must be a vector.'); + end + if any(eol(:) > 255) + error('EOL can not contain values larger than 255.'); + end + end + + if sum(size(x) > 1) > 1 + error('STR must be a vector.'); + end + + x = uint8(x); + eol = uint8(eol); + + ndbytes = length(x); % number of decoded bytes + nchunks = ceil(ndbytes / 3); % number of chunks/groups + nebytes = 4 * nchunks; % number of encoded bytes + + % add padding if necessary, to make the length of x a multiple of 3 + if rem(ndbytes, 3) + x(end+1 : 3*nchunks) = 0; + end + + x = reshape(x, [3, nchunks]); % reshape the data + y = repmat(uint8(0), 4, nchunks); % for the encoded data + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % Split up every 3 bytes into 4 pieces + % + % aaaaaabb bbbbcccc ccdddddd + % + % to form + % + % 00aaaaaa 00bbbbbb 00cccccc 00dddddd + % + y(1,:) = bitshift(x(1,:), -2); % 6 highest bits of x(1,:) + + y(2,:) = bitshift(bitand(x(1,:), 3), 4); % 2 lowest bits of x(1,:) + y(2,:) = bitor(y(2,:), bitshift(x(2,:), -4)); % 4 highest bits of x(2,:) + + y(3,:) = bitshift(bitand(x(2,:), 15), 2); % 4 lowest bits of x(2,:) + y(3,:) = bitor(y(3,:), bitshift(x(3,:), -6)); % 2 highest bits of x(3,:) + + y(4,:) = bitand(x(3,:), 63); % 6 lowest bits of x(3,:) + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % Now perform the following mapping + % + % 0 - 25 -> A-Z + % 26 - 51 -> a-z + % 52 - 61 -> 0-9 + % 62 -> + + % 63 -> / + % + % We could use a mapping vector like + % + % ['A':'Z', 'a':'z', '0':'9', '+/'] + % + % but that would require an index vector of class double. + % + z = repmat(uint8(0), size(y)); + i = y <= 25; z(i) = 'A' + double(y(i)); + i = 26 <= y & y <= 51; z(i) = 'a' - 26 + double(y(i)); + i = 52 <= y & y <= 61; z(i) = '0' - 52 + double(y(i)); + i = y == 62; z(i) = '+'; + i = y == 63; z(i) = '/'; + y = z; + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % Add padding if necessary. + % + npbytes = 3 * nchunks - ndbytes; % number of padding bytes + if npbytes + y(end-npbytes+1 : end) = '='; % '=' is used for padding + end + + if isempty(eol) + + % reshape to a row vector + y = reshape(y, [1, nebytes]); + + else + + nlines = ceil(nebytes / 76); % number of lines + neolbytes = length(eol); % number of bytes in eol string + + % pad data so it becomes a multiple of 76 elements + y = [y(:) ; zeros(76 * nlines - numel(y), 1)]; + y(nebytes + 1 : 76 * nlines) = 0; + y = reshape(y, 76, nlines); + + % insert eol strings + eol = eol(:); + y(end + 1 : end + neolbytes, :) = eol(:, ones(1, nlines)); + + % remove padding, but keep the last eol string + m = nebytes + neolbytes * (nlines - 1); + n = (76+neolbytes)*nlines - neolbytes; + y(m+1 : n) = ''; + + % extract and reshape to row vector + y = reshape(y, 1, m+neolbytes); + + end + + % output is a character array + y = char(y); + +end diff --git a/ex3/submitWeb.m b/ex3/submitWeb.m new file mode 100644 index 0000000..8611707 --- /dev/null +++ b/ex3/submitWeb.m @@ -0,0 +1,20 @@ +% submitWeb Creates files from your code and output for web submission. +% +% If the submit function does not work for you, use the web-submission mechanism. +% Call this function to produce a file for the part you wish to submit. Then, +% submit the file to the class servers using the "Web Submission" button on the +% Programming Exercises page on the course website. +% +% You should call this function without arguments (submitWeb), to receive +% an interactive prompt for submission; optionally you can call it with the partID +% if you so wish. Make sure your working directory is set to the directory +% containing the submitWeb.m file and your assignment files. + +function submitWeb(partId) + if ~exist('partId', 'var') || isempty(partId) + partId = []; + end + + submit(partId, 1); +end + diff --git a/ex4/checkNNGradients.m b/ex4/checkNNGradients.m new file mode 100644 index 0000000..f9930aa --- /dev/null +++ b/ex4/checkNNGradients.m @@ -0,0 +1,52 @@ +function checkNNGradients(lambda) +%CHECKNNGRADIENTS Creates a small neural network to check the +%backpropagation gradients +% CHECKNNGRADIENTS(lambda) Creates a small neural network to check the +% backpropagation gradients, it will output the analytical gradients +% produced by your backprop code and the numerical gradients (computed +% using computeNumericalGradient). These two gradient computations should +% result in very similar values. +% + +if ~exist('lambda', 'var') || isempty(lambda) + lambda = 0; +end + +input_layer_size = 3; +hidden_layer_size = 5; +num_labels = 3; +m = 5; + +% We generate some 'random' test data +Theta1 = debugInitializeWeights(hidden_layer_size, input_layer_size); +Theta2 = debugInitializeWeights(num_labels, hidden_layer_size); +% Reusing debugInitializeWeights to generate X +X = debugInitializeWeights(m, input_layer_size - 1); +y = 1 + mod(1:m, num_labels)'; + +% Unroll parameters +nn_params = [Theta1(:) ; Theta2(:)]; + +% Short hand for cost function +costFunc = @(p) nnCostFunction(p, input_layer_size, hidden_layer_size, ... + num_labels, X, y, lambda); + +[cost, grad] = costFunc(nn_params); +numgrad = computeNumericalGradient(costFunc, nn_params); + +% Visually examine the two gradient computations. The two columns +% you get should be very similar. +disp([numgrad grad]); +fprintf(['The above two columns you get should be very similar.\n' ... + '(Left-Your Numerical Gradient, Right-Analytical Gradient)\n\n']); + +% Evaluate the norm of the difference between two solutions. +% If you have a correct implementation, and assuming you used EPSILON = 0.0001 +% in computeNumericalGradient.m, then diff below should be less than 1e-9 +diff = norm(numgrad-grad)/norm(numgrad+grad); + +fprintf(['If your backpropagation implementation is correct, then \n' ... + 'the relative difference will be small (less than 1e-9). \n' ... + '\nRelative Difference: %g\n'], diff); + +end diff --git a/ex4/computeNumericalGradient.m b/ex4/computeNumericalGradient.m new file mode 100644 index 0000000..c3abeac --- /dev/null +++ b/ex4/computeNumericalGradient.m @@ -0,0 +1,29 @@ +function numgrad = computeNumericalGradient(J, theta) +%COMPUTENUMERICALGRADIENT Computes the gradient using "finite differences" +%and gives us a numerical estimate of the gradient. +% numgrad = COMPUTENUMERICALGRADIENT(J, theta) computes the numerical +% gradient of the function J around theta. Calling y = J(theta) should +% return the function value at theta. + +% Notes: The following code implements numerical gradient checking, and +% returns the numerical gradient.It sets numgrad(i) to (a numerical +% approximation of) the partial derivative of J with respect to the +% i-th input argument, evaluated at theta. (i.e., numgrad(i) should +% be the (approximately) the partial derivative of J with respect +% to theta(i).) +% + +numgrad = zeros(size(theta)); +perturb = zeros(size(theta)); +e = 1e-4; +for p = 1:numel(theta) + % Set perturbation vector + perturb(p) = e; + loss1 = J(theta - perturb); + loss2 = J(theta + perturb); + % Compute Numerical Gradient + numgrad(p) = (loss2 - loss1) / (2*e); + perturb(p) = 0; +end + +end diff --git a/ex4/debugInitializeWeights.m b/ex4/debugInitializeWeights.m new file mode 100644 index 0000000..a71b5ab --- /dev/null +++ b/ex4/debugInitializeWeights.m @@ -0,0 +1,22 @@ +function W = debugInitializeWeights(fan_out, fan_in) +%DEBUGINITIALIZEWEIGHTS Initialize the weights of a layer with fan_in +%incoming connections and fan_out outgoing connections using a fixed +%strategy, this will help you later in debugging +% W = DEBUGINITIALIZEWEIGHTS(fan_in, fan_out) initializes the weights +% of a layer with fan_in incoming connections and fan_out outgoing +% connections using a fix set of values +% +% Note that W should be set to a matrix of size(1 + fan_in, fan_out) as +% the first row of W handles the "bias" terms +% + +% Set W to zeros +W = zeros(fan_out, 1 + fan_in); + +% Initialize W using "sin", this ensures that W is always of the same +% values and will be useful for debugging +W = reshape(sin(1:numel(W)), size(W)) / 10; + +% ========================================================================= + +end diff --git a/ex4/displayData.m b/ex4/displayData.m new file mode 100644 index 0000000..160697e --- /dev/null +++ b/ex4/displayData.m @@ -0,0 +1,59 @@ +function [h, display_array] = displayData(X, example_width) +%DISPLAYDATA Display 2D data in a nice grid +% [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data +% stored in X in a nice grid. It returns the figure handle h and the +% displayed array if requested. + +% Set example_width automatically if not passed in +if ~exist('example_width', 'var') || isempty(example_width) + example_width = round(sqrt(size(X, 2))); +end + +% Gray Image +colormap(gray); + +% Compute rows, cols +[m n] = size(X); +example_height = (n / example_width); + +% Compute number of items to display +display_rows = floor(sqrt(m)); +display_cols = ceil(m / display_rows); + +% Between images padding +pad = 1; + +% Setup blank display +display_array = - ones(pad + display_rows * (example_height + pad), ... + pad + display_cols * (example_width + pad)); + +% Copy each example into a patch on the display array +curr_ex = 1; +for j = 1:display_rows + for i = 1:display_cols + if curr_ex > m, + break; + end + % Copy the patch + + % Get the max value of the patch + max_val = max(abs(X(curr_ex, :))); + display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ... + pad + (i - 1) * (example_width + pad) + (1:example_width)) = ... + reshape(X(curr_ex, :), example_height, example_width) / max_val; + curr_ex = curr_ex + 1; + end + if curr_ex > m, + break; + end +end + +% Display Image +h = imagesc(display_array, [-1 1]); + +% Do not show axis +axis image off + +drawnow; + +end diff --git a/ex4/ex4.m b/ex4/ex4.m new file mode 100644 index 0000000..f7b3fa9 --- /dev/null +++ b/ex4/ex4.m @@ -0,0 +1,234 @@ +%% Machine Learning Online Class - Exercise 4 Neural Network Learning + +% Instructions +% ------------ +% +% This file contains code that helps you get started on the +% linear exercise. You will need to complete the following functions +% in this exericse: +% +% sigmoidGradient.m +% randInitializeWeights.m +% nnCostFunction.m +% +% For this exercise, you will not need to change any code in this file, +% or any other files other than those mentioned above. +% + +%% Initialization +clear ; close all; clc + +%% Setup the parameters you will use for this exercise +input_layer_size = 400; % 20x20 Input Images of Digits +hidden_layer_size = 25; % 25 hidden units +num_labels = 10; % 10 labels, from 1 to 10 + % (note that we have mapped "0" to label 10) + +%% =========== Part 1: Loading and Visualizing Data ============= +% We start the exercise by first loading and visualizing the dataset. +% You will be working with a dataset that contains handwritten digits. +% + +% Load Training Data +fprintf('Loading and Visualizing Data ...\n') + +load('ex4data1.mat'); +m = size(X, 1); + +% Randomly select 100 data points to display +sel = randperm(size(X, 1)); +sel = sel(1:100); + +displayData(X(sel, :)); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + + +%% ================ Part 2: Loading Parameters ================ +% In this part of the exercise, we load some pre-initialized +% neural network parameters. + +fprintf('\nLoading Saved Neural Network Parameters ...\n') + +% Load the weights into variables Theta1 and Theta2 +load('ex4weights.mat'); + +% Unroll parameters +nn_params = [Theta1(:) ; Theta2(:)]; + +%% ================ Part 3: Compute Cost (Feedforward) ================ +% To the neural network, you should first start by implementing the +% feedforward part of the neural network that returns the cost only. You +% should complete the code in nnCostFunction.m to return cost. After +% implementing the feedforward to compute the cost, you can verify that +% your implementation is correct by verifying that you get the same cost +% as us for the fixed debugging parameters. +% +% We suggest implementing the feedforward cost *without* regularization +% first so that it will be easier for you to debug. Later, in part 4, you +% will get to implement the regularized cost. +% +fprintf('\nFeedforward Using Neural Network ...\n') + +% Weight regularization parameter (we set this to 0 here). +lambda = 0; + +J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, ... + num_labels, X, y, lambda); + +fprintf(['Cost at parameters (loaded from ex4weights): %f '... + '\n(this value should be about 0.287629)\n'], J); + +fprintf('\nProgram paused. Press enter to continue.\n'); +pause; + +%% =============== Part 4: Implement Regularization =============== +% Once your cost function implementation is correct, you should now +% continue to implement the regularization with the cost. +% + +fprintf('\nChecking Cost Function (w/ Regularization) ... \n') + +% Weight regularization parameter (we set this to 1 here). +lambda = 1; + +J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, ... + num_labels, X, y, lambda); + +fprintf(['Cost at parameters (loaded from ex4weights): %f '... + '\n(this value should be about 0.383770)\n'], J); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + + +%% ================ Part 5: Sigmoid Gradient ================ +% Before you start implementing the neural network, you will first +% implement the gradient for the sigmoid function. You should complete the +% code in the sigmoidGradient.m file. +% + +fprintf('\nEvaluating sigmoid gradient...\n') + +g = sigmoidGradient([1 -0.5 0 0.5 1]); +fprintf('Sigmoid gradient evaluated at [1 -0.5 0 0.5 1]:\n '); +fprintf('%f ', g); +fprintf('\n\n'); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + + +%% ================ Part 6: Initializing Pameters ================ +% In this part of the exercise, you will be starting to implment a two +% layer neural network that classifies digits. You will start by +% implementing a function to initialize the weights of the neural network +% (randInitializeWeights.m) + +fprintf('\nInitializing Neural Network Parameters ...\n') + +initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size); +initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels); + +% Unroll parameters +initial_nn_params = [initial_Theta1(:) ; initial_Theta2(:)]; + + +%% =============== Part 7: Implement Backpropagation =============== +% Once your cost matches up with ours, you should proceed to implement the +% backpropagation algorithm for the neural network. You should add to the +% code you've written in nnCostFunction.m to return the partial +% derivatives of the parameters. +% +fprintf('\nChecking Backpropagation... \n'); + +% Check gradients by running checkNNGradients +checkNNGradients; + +fprintf('\nProgram paused. Press enter to continue.\n'); +pause; + + +%% =============== Part 8: Implement Regularization =============== +% Once your backpropagation implementation is correct, you should now +% continue to implement the regularization with the cost and gradient. +% + +fprintf('\nChecking Backpropagation (w/ Regularization) ... \n') + +% Check gradients by running checkNNGradients +lambda = 3; +checkNNGradients(lambda); + +% Also output the costFunction debugging values +debug_J = nnCostFunction(nn_params, input_layer_size, ... + hidden_layer_size, num_labels, X, y, lambda); + +fprintf(['\n\nCost at (fixed) debugging parameters (w/ lambda = 10): %f ' ... + '\n(this value should be about 0.576051)\n\n'], debug_J); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + + +%% =================== Part 8: Training NN =================== +% You have now implemented all the code necessary to train a neural +% network. To train your neural network, we will now use "fmincg", which +% is a function which works similarly to "fminunc". Recall that these +% advanced optimizers are able to train our cost functions efficiently as +% long as we provide them with the gradient computations. +% +fprintf('\nTraining Neural Network... \n') + +% After you have completed the assignment, change the MaxIter to a larger +% value to see how more training helps. +options = optimset('MaxIter', 50); + +% You should also try different values of lambda +lambda = 1; + +% Create "short hand" for the cost function to be minimized +costFunction = @(p) nnCostFunction(p, ... + input_layer_size, ... + hidden_layer_size, ... + num_labels, X, y, lambda); + +% Now, costFunction is a function that takes in only one argument (the +% neural network parameters) +[nn_params, cost] = fmincg(costFunction, initial_nn_params, options); + +% Obtain Theta1 and Theta2 back from nn_params +Theta1 = reshape(nn_params(1:hidden_layer_size * (input_layer_size + 1)), ... + hidden_layer_size, (input_layer_size + 1)); + +Theta2 = reshape(nn_params((1 + (hidden_layer_size * (input_layer_size + 1))):end), ... + num_labels, (hidden_layer_size + 1)); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + + +%% ================= Part 9: Visualize Weights ================= +% You can now "visualize" what the neural network is learning by +% displaying the hidden units to see what features they are capturing in +% the data. + +fprintf('\nVisualizing Neural Network... \n') + +displayData(Theta1(:, 2:end)); + +fprintf('\nProgram paused. Press enter to continue.\n'); +pause; + +%% ================= Part 10: Implement Predict ================= +% After training the neural network, we would like to use it to predict +% the labels. You will now implement the "predict" function to use the +% neural network to predict the labels of the training set. This lets +% you compute the training set accuracy. + +pred = predict(Theta1, Theta2, X); + +fprintf('\nTraining Set Accuracy: %f\n', mean(double(pred == y)) * 100); + + diff --git a/ex4/ex4data1.mat b/ex4/ex4data1.mat new file mode 100644 index 0000000..371bd0c Binary files /dev/null and b/ex4/ex4data1.mat differ diff --git a/ex4/ex4weights.mat b/ex4/ex4weights.mat new file mode 100644 index 0000000..ace2a09 Binary files /dev/null and b/ex4/ex4weights.mat differ diff --git a/ex4/fmincg.m b/ex4/fmincg.m new file mode 100644 index 0000000..34bf539 --- /dev/null +++ b/ex4/fmincg.m @@ -0,0 +1,175 @@ +function [X, fX, i] = fmincg(f, X, options, P1, P2, P3, P4, P5) +% Minimize a continuous differentialble multivariate function. Starting point +% is given by "X" (D by 1), and the function named in the string "f", must +% return a function value and a vector of partial derivatives. The Polack- +% Ribiere flavour of conjugate gradients is used to compute search directions, +% and a line search using quadratic and cubic polynomial approximations and the +% Wolfe-Powell stopping criteria is used together with the slope ratio method +% for guessing initial step sizes. Additionally a bunch of checks are made to +% make sure that exploration is taking place and that extrapolation will not +% be unboundedly large. The "length" gives the length of the run: if it is +% positive, it gives the maximum number of line searches, if negative its +% absolute gives the maximum allowed number of function evaluations. You can +% (optionally) give "length" a second component, which will indicate the +% reduction in function value to be expected in the first line-search (defaults +% to 1.0). The function returns when either its length is up, or if no further +% progress can be made (ie, we are at a minimum, or so close that due to +% numerical problems, we cannot get any closer). If the function terminates +% within a few iterations, it could be an indication that the function value +% and derivatives are not consistent (ie, there may be a bug in the +% implementation of your "f" function). The function returns the found +% solution "X", a vector of function values "fX" indicating the progress made +% and "i" the number of iterations (line searches or function evaluations, +% depending on the sign of "length") used. +% +% Usage: [X, fX, i] = fmincg(f, X, options, P1, P2, P3, P4, P5) +% +% See also: checkgrad +% +% Copyright (C) 2001 and 2002 by Carl Edward Rasmussen. Date 2002-02-13 +% +% +% (C) Copyright 1999, 2000 & 2001, Carl Edward Rasmussen +% +% Permission is granted for anyone to copy, use, or modify these +% programs and accompanying documents for purposes of research or +% education, provided this copyright notice is retained, and note is +% made of any changes that have been made. +% +% These programs and documents are distributed without any warranty, +% express or implied. As the programs were written for research +% purposes only, they have not been tested to the degree that would be +% advisable in any important application. All use of these programs is +% entirely at the user's own risk. +% +% [ml-class] Changes Made: +% 1) Function name and argument specifications +% 2) Output display +% + +% Read options +if exist('options', 'var') && ~isempty(options) && isfield(options, 'MaxIter') + length = options.MaxIter; +else + length = 100; +end + + +RHO = 0.01; % a bunch of constants for line searches +SIG = 0.5; % RHO and SIG are the constants in the Wolfe-Powell conditions +INT = 0.1; % don't reevaluate within 0.1 of the limit of the current bracket +EXT = 3.0; % extrapolate maximum 3 times the current bracket +MAX = 20; % max 20 function evaluations per line search +RATIO = 100; % maximum allowed slope ratio + +argstr = ['feval(f, X']; % compose string used to call function +for i = 1:(nargin - 3) + argstr = [argstr, ',P', int2str(i)]; +end +argstr = [argstr, ')']; + +if max(size(length)) == 2, red=length(2); length=length(1); else red=1; end +S=['Iteration ']; + +i = 0; % zero the run length counter +ls_failed = 0; % no previous line search has failed +fX = []; +[f1 df1] = eval(argstr); % get function value and gradient +i = i + (length<0); % count epochs?! +s = -df1; % search direction is steepest +d1 = -s'*s; % this is the slope +z1 = red/(1-d1); % initial step is red/(|s|+1) + +while i < abs(length) % while not finished + i = i + (length>0); % count iterations?! + + X0 = X; f0 = f1; df0 = df1; % make a copy of current values + X = X + z1*s; % begin line search + [f2 df2] = eval(argstr); + i = i + (length<0); % count epochs?! + d2 = df2'*s; + f3 = f1; d3 = d1; z3 = -z1; % initialize point 3 equal to point 1 + if length>0, M = MAX; else M = min(MAX, -length-i); end + success = 0; limit = -1; % initialize quanteties + while 1 + while ((f2 > f1+z1*RHO*d1) | (d2 > -SIG*d1)) & (M > 0) + limit = z1; % tighten the bracket + if f2 > f1 + z2 = z3 - (0.5*d3*z3*z3)/(d3*z3+f2-f3); % quadratic fit + else + A = 6*(f2-f3)/z3+3*(d2+d3); % cubic fit + B = 3*(f3-f2)-z3*(d3+2*d2); + z2 = (sqrt(B*B-A*d2*z3*z3)-B)/A; % numerical error possible - ok! + end + if isnan(z2) | isinf(z2) + z2 = z3/2; % if we had a numerical problem then bisect + end + z2 = max(min(z2, INT*z3),(1-INT)*z3); % don't accept too close to limits + z1 = z1 + z2; % update the step + X = X + z2*s; + [f2 df2] = eval(argstr); + M = M - 1; i = i + (length<0); % count epochs?! + d2 = df2'*s; + z3 = z3-z2; % z3 is now relative to the location of z2 + end + if f2 > f1+z1*RHO*d1 | d2 > -SIG*d1 + break; % this is a failure + elseif d2 > SIG*d1 + success = 1; break; % success + elseif M == 0 + break; % failure + end + A = 6*(f2-f3)/z3+3*(d2+d3); % make cubic extrapolation + B = 3*(f3-f2)-z3*(d3+2*d2); + z2 = -d2*z3*z3/(B+sqrt(B*B-A*d2*z3*z3)); % num. error possible - ok! + if ~isreal(z2) | isnan(z2) | isinf(z2) | z2 < 0 % num prob or wrong sign? + if limit < -0.5 % if we have no upper limit + z2 = z1 * (EXT-1); % the extrapolate the maximum amount + else + z2 = (limit-z1)/2; % otherwise bisect + end + elseif (limit > -0.5) & (z2+z1 > limit) % extraplation beyond max? + z2 = (limit-z1)/2; % bisect + elseif (limit < -0.5) & (z2+z1 > z1*EXT) % extrapolation beyond limit + z2 = z1*(EXT-1.0); % set to extrapolation limit + elseif z2 < -z3*INT + z2 = -z3*INT; + elseif (limit > -0.5) & (z2 < (limit-z1)*(1.0-INT)) % too close to limit? + z2 = (limit-z1)*(1.0-INT); + end + f3 = f2; d3 = d2; z3 = -z2; % set point 3 equal to point 2 + z1 = z1 + z2; X = X + z2*s; % update current estimates + [f2 df2] = eval(argstr); + M = M - 1; i = i + (length<0); % count epochs?! + d2 = df2'*s; + end % end of line search + + if success % if line search succeeded + f1 = f2; fX = [fX' f1]'; + fprintf('%s %4i | Cost: %4.6e\r', S, i, f1); + s = (df2'*df2-df1'*df2)/(df1'*df1)*s - df2; % Polack-Ribiere direction + tmp = df1; df1 = df2; df2 = tmp; % swap derivatives + d2 = df1'*s; + if d2 > 0 % new slope must be negative + s = -df1; % otherwise use steepest direction + d2 = -s'*s; + end + z1 = z1 * min(RATIO, d1/(d2-realmin)); % slope ratio but max RATIO + d1 = d2; + ls_failed = 0; % this line search did not fail + else + X = X0; f1 = f0; df1 = df0; % restore point from before failed line search + if ls_failed | i > abs(length) % line search failed twice in a row + break; % or we ran out of time, so we give up + end + tmp = df1; df1 = df2; df2 = tmp; % swap derivatives + s = -df1; % try steepest + d1 = -s'*s; + z1 = 1/(1-d1); + ls_failed = 1; % this line search failed + end + if exist('OCTAVE_VERSION') + fflush(stdout); + end +end +fprintf('\n'); diff --git a/ex4/ml_login_data.mat b/ex4/ml_login_data.mat new file mode 100644 index 0000000..b825f2d --- /dev/null +++ b/ex4/ml_login_data.mat @@ -0,0 +1,11 @@ +# Created by Octave 3.2.4, Tue Oct 23 20:11:37 2012 中国标准时间 +# name: login +# type: string +# elements: 1 +# length: 19 +x.wangyan@gmail.com +# name: password +# type: string +# elements: 1 +# length: 10 +CgKSs5ur5p diff --git a/ex4/nnCostFunction.m b/ex4/nnCostFunction.m new file mode 100644 index 0000000..b7207dd --- /dev/null +++ b/ex4/nnCostFunction.m @@ -0,0 +1,119 @@ +function [J grad] = nnCostFunction(nn_params, ... + input_layer_size, ... + hidden_layer_size, ... + num_labels, ... + X, y, lambda) +%NNCOSTFUNCTION Implements the neural network cost function for a two layer +%neural network which performs classification +% [J grad] = NNCOSTFUNCTON(nn_params, hidden_layer_size, num_labels, ... +% X, y, lambda) computes the cost and gradient of the neural network. The +% parameters for the neural network are "unrolled" into the vector +% nn_params and need to be converted back into the weight matrices. +% +% The returned parameter grad should be a "unrolled" vector of the +% partial derivatives of the neural network. +% + +% Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices +% for our 2 layer neural network +Theta1 = reshape(nn_params(1:hidden_layer_size * (input_layer_size + 1)), ... + hidden_layer_size, (input_layer_size + 1)); + +Theta2 = reshape(nn_params((1 + (hidden_layer_size * (input_layer_size + 1))):end), ... + num_labels, (hidden_layer_size + 1)); + +% Setup some useful variables +m = size(X, 1); + +% You need to return the following variables correctly +J = 0; +Theta1_grad = zeros(size(Theta1)); +Theta2_grad = zeros(size(Theta2)); + +% ====================== YOUR CODE HERE ====================== +% Instructions: You should complete the code by working through the +% following parts. +% +% Part 1: Feedforward the neural network and return the cost in the +% variable J. After implementing Part 1, you can verify that your +% cost function computation is correct by verifying the cost +% computed in ex4.m +% +% Part 2: Implement the backpropagation algorithm to compute the gradients +% Theta1_grad and Theta2_grad. You should return the partial derivatives of +% the cost function with respect to Theta1 and Theta2 in Theta1_grad and +% Theta2_grad, respectively. After implementing Part 2, you can check +% that your implementation is correct by running checkNNGradients +% +% Note: The vector y passed into the function is a vector of labels +% containing values from 1..K. You need to map this vector into a +% binary vector of 1's and 0's to be used with the neural network +% cost function. +% +% Hint: We recommend implementing backpropagation using a for-loop +% over the training examples if you are implementing it for the +% first time. +% +% Part 3: Implement regularization with the cost function and gradients. +% +% Hint: You can implement this around the code for +% backpropagation. That is, you can compute the gradients for +% the regularization separately and then add them to Theta1_grad +% and Theta2_grad from Part 2. +% + +% Feedforward +a1 = [ones(m ,1) X]; +z2 = a1*Theta1'; +a2 = [ones(m, 1) sigmoid(z2)]; +z3 = a2*Theta2'; +a3 = sigmoid(z3); +Y = zeros(m, num_labels); +for i = 1:num_labels + Y(:,i) = (y==i); +end +J = (1/m)*(sum(sum((-Y).*log(a3) - (1-Y).*(log(1-a3))))); +Theta1_temp = Theta1(:,2:end); +Theta2_temp = Theta2(:,2:end); +Jtheta = (lambda/(2*m))*(sum(Theta1_temp(:).^2) + sum(Theta2_temp(:).^2 )); + +J = J + Jtheta; +%backpropagation +sigma3 = a3 - Y; +sigma2 = (sigma3*Theta2).*sigmoidGradient([ones(size(z2, 1), 1) z2]); +sigma2 = sigma2(:, 2:end); +delta2 = sigma3'*a2; +delta1 = sigma2'*a1; +% delta_3 = H - yy; +% delta_2 = (delta_3*Theta2).*sigmoidGradient(A); +% +% %delta_2 = delta_2(2:end); +% temp = (delta_2(:,2:end)*Theta1); +% temp = temp(:,2:end); +% delta_1 = temp.*sigmoidGradient(X); +Theta2_grad(1,:) = delta2(1,:)./m; +Theta2_grad(2:end,:) = (lambda*Theta2(2:end,:) + delta2(2:end,:))./m; +Theta1_grad(1,:) = delta1(1,:)./m; +Theta1_grad(2:end,:) = (lambda*Theta1(2:end,:) + delta1(2:end,:))./m; + + + + + + + + + + + + + +% ------------------------------------------------------------- + +% ========================================================================= + +% Unroll gradients +grad = [Theta1_grad(:) ; Theta2_grad(:)]; + + +end diff --git a/ex4/predict.m b/ex4/predict.m new file mode 100644 index 0000000..9ec3f6d --- /dev/null +++ b/ex4/predict.m @@ -0,0 +1,20 @@ +function p = predict(Theta1, Theta2, X) +%PREDICT Predict the label of an input given a trained neural network +% p = PREDICT(Theta1, Theta2, X) outputs the predicted label of X given the +% trained weights of a neural network (Theta1, Theta2) + +% Useful values +m = size(X, 1); +num_labels = size(Theta2, 1); + +% You need to return the following variables correctly +p = zeros(size(X, 1), 1); + +h1 = sigmoid([ones(m, 1) X] * Theta1'); +h2 = sigmoid([ones(m, 1) h1] * Theta2'); +[dummy, p] = max(h2, [], 2); + +% ========================================================================= + + +end diff --git a/ex4/randInitializeWeights.m b/ex4/randInitializeWeights.m new file mode 100644 index 0000000..dd3e354 --- /dev/null +++ b/ex4/randInitializeWeights.m @@ -0,0 +1,33 @@ +function W = randInitializeWeights(L_in, L_out) +%RANDINITIALIZEWEIGHTS Randomly initialize the weights of a layer with L_in +%incoming connections and L_out outgoing connections +% W = RANDINITIALIZEWEIGHTS(L_in, L_out) randomly initializes the weights +% of a layer with L_in incoming connections and L_out outgoing +% connections. +% +% Note that W should be set to a matrix of size(L_out, 1 + L_in) as +% the column row of W handles the "bias" terms +% + +% You need to return the following variables correctly +W = zeros(L_out, 1 + L_in); + +% ====================== YOUR CODE HERE ====================== +% Instructions: Initialize W randomly so that we break the symmetry while +% training the neural network. +% +% Note: The first row of W corresponds to the parameters for the bias units +% + +epsilon_init = 0.12; +W = rand(L_out, 1 + L_in) * 2 * epsilon_init - epsilon_init; + + + + + + + +% ========================================================================= + +end diff --git a/ex4/sigmoid.m b/ex4/sigmoid.m new file mode 100644 index 0000000..6deca13 --- /dev/null +++ b/ex4/sigmoid.m @@ -0,0 +1,6 @@ +function g = sigmoid(z) +%SIGMOID Compute sigmoid functoon +% J = SIGMOID(z) computes the sigmoid of z. + +g = 1.0 ./ (1.0 + exp(-z)); +end diff --git a/ex4/sigmoidGradient.m b/ex4/sigmoidGradient.m new file mode 100644 index 0000000..99b2189 --- /dev/null +++ b/ex4/sigmoidGradient.m @@ -0,0 +1,33 @@ +function g = sigmoidGradient(z) +%SIGMOIDGRADIENT returns the gradient of the sigmoid function +%evaluated at z +% g = SIGMOIDGRADIENT(z) computes the gradient of the sigmoid function +% evaluated at z. This should work regardless if z is a matrix or a +% vector. In particular, if z is a vector or matrix, you should return +% the gradient for each element. + +g = zeros(size(z)); + +% ====================== YOUR CODE HERE ====================== +% Instructions: Compute the gradient of the sigmoid function evaluated at +% each value of z (z can be a matrix, vector or scalar). + + + +g = sigmoid(z).*(1 - sigmoid(z)); + + + + + + + + + + +% ============================================================= + + + + +end diff --git a/ex4/submit.m b/ex4/submit.m new file mode 100644 index 0000000..e4d2166 --- /dev/null +++ b/ex4/submit.m @@ -0,0 +1,578 @@ +function submit(partId, webSubmit) +%SUBMIT Submit your code and output to the ml-class servers +% SUBMIT() will connect to the ml-class server and submit your solution + + fprintf('==\n== [ml-class] Submitting Solutions | Programming Exercise %s\n==\n', ... + homework_id()); + if ~exist('partId', 'var') || isempty(partId) + partId = promptPart(); + end + + if ~exist('webSubmit', 'var') || isempty(webSubmit) + webSubmit = 0; % submit directly by default + end + + % Check valid partId + partNames = validParts(); + if ~isValidPartId(partId) + fprintf('!! Invalid homework part selected.\n'); + fprintf('!! Expected an integer from 1 to %d.\n', numel(partNames) + 1); + fprintf('!! Submission Cancelled\n'); + return + end + + if ~exist('ml_login_data.mat','file') + [login password] = loginPrompt(); + save('ml_login_data.mat','login','password'); + else + load('ml_login_data.mat'); + [login password] = quickLogin(login, password); + save('ml_login_data.mat','login','password'); + end + + if isempty(login) + fprintf('!! Submission Cancelled\n'); + return + end + + fprintf('\n== Connecting to ml-class ... '); + if exist('OCTAVE_VERSION') + fflush(stdout); + end + + % Setup submit list + if partId == numel(partNames) + 1 + submitParts = 1:numel(partNames); + else + submitParts = [partId]; + end + + for s = 1:numel(submitParts) + thisPartId = submitParts(s); + if (~webSubmit) % submit directly to server + [login, ch, signature, auxstring] = getChallenge(login, thisPartId); + if isempty(login) || isempty(ch) || isempty(signature) + % Some error occured, error string in first return element. + fprintf('\n!! Error: %s\n\n', login); + return + end + + % Attempt Submission with Challenge + ch_resp = challengeResponse(login, password, ch); + + [result, str] = submitSolution(login, ch_resp, thisPartId, ... + output(thisPartId, auxstring), source(thisPartId), signature); + + partName = partNames{thisPartId}; + + fprintf('\n== [ml-class] Submitted Assignment %s - Part %d - %s\n', ... + homework_id(), thisPartId, partName); + fprintf('== %s\n', strtrim(str)); + + if exist('OCTAVE_VERSION') + fflush(stdout); + end + else + [result] = submitSolutionWeb(login, thisPartId, output(thisPartId), ... + source(thisPartId)); + result = base64encode(result); + + fprintf('\nSave as submission file [submit_ex%s_part%d.txt (enter to accept default)]:', ... + homework_id(), thisPartId); + saveAsFile = input('', 's'); + if (isempty(saveAsFile)) + saveAsFile = sprintf('submit_ex%s_part%d.txt', homework_id(), thisPartId); + end + + fid = fopen(saveAsFile, 'w'); + if (fid) + fwrite(fid, result); + fclose(fid); + fprintf('\nSaved your solutions to %s.\n\n', saveAsFile); + fprintf(['You can now submit your solutions through the web \n' ... + 'form in the programming exercises. Select the corresponding \n' ... + 'programming exercise to access the form.\n']); + + else + fprintf('Unable to save to %s\n\n', saveAsFile); + fprintf(['You can create a submission file by saving the \n' ... + 'following text in a file: (press enter to continue)\n\n']); + pause; + fprintf(result); + end + end + end +end + +% ================== CONFIGURABLES FOR EACH HOMEWORK ================== + +function id = homework_id() + id = '4'; +end + +function [partNames] = validParts() + partNames = { 'Feedforward and Cost Function', ... + 'Regularized Cost Function', ... + 'Sigmoid Gradient', ... + 'Neural Network Gradient (Backpropagation)' ... + 'Regularized Gradient' ... + }; +end + +function srcs = sources() + % Separated by part + srcs = { { 'nnCostFunction.m' }, ... + { 'nnCostFunction.m' }, ... + { 'sigmoidGradient.m' }, ... + { 'nnCostFunction.m' }, ... + { 'nnCostFunction.m' } }; +end + +function out = output(partId, auxstring) + % Random Test Cases + X = reshape(3 * sin(1:1:30), 3, 10); + Xm = reshape(sin(1:32), 16, 2) / 5; + ym = 1 + mod(1:16,4)'; + t1 = sin(reshape(1:2:24, 4, 3)); + t2 = cos(reshape(1:2:40, 4, 5)); + t = [t1(:) ; t2(:)]; + if partId == 1 + [J] = nnCostFunction(t, 2, 4, 4, Xm, ym, 0); + out = sprintf('%0.5f ', J); + elseif partId == 2 + [J] = nnCostFunction(t, 2, 4, 4, Xm, ym, 1.5); + out = sprintf('%0.5f ', J); + elseif partId == 3 + out = sprintf('%0.5f ', sigmoidGradient(X)); + elseif partId == 4 + [J, grad] = nnCostFunction(t, 2, 4, 4, Xm, ym, 0); + out = sprintf('%0.5f ', J); + out = [out sprintf('%0.5f ', grad)]; + elseif partId == 5 + [J, grad] = nnCostFunction(t, 2, 4, 4, Xm, ym, 1.5); + out = sprintf('%0.5f ', J); + out = [out sprintf('%0.5f ', grad)]; + end +end + + +% ====================== SERVER CONFIGURATION =========================== + +% ***************** REMOVE -staging WHEN YOU DEPLOY ********************* +function url = site_url() + url = 'http://class.coursera.org/ml-2012-002'; +end + +function url = challenge_url() + url = [site_url() '/assignment/challenge']; +end + +function url = submit_url() + url = [site_url() '/assignment/submit']; +end + +% ========================= CHALLENGE HELPERS ========================= + +function src = source(partId) + src = ''; + src_files = sources(); + if partId <= numel(src_files) + flist = src_files{partId}; + for i = 1:numel(flist) + fid = fopen(flist{i}); + if (fid == -1) + error('Error opening %s (is it missing?)', flist{i}); + end + line = fgets(fid); + while ischar(line) + src = [src line]; + line = fgets(fid); + end + fclose(fid); + src = [src '||||||||']; + end + end +end + +function ret = isValidPartId(partId) + partNames = validParts(); + ret = (~isempty(partId)) && (partId >= 1) && (partId <= numel(partNames) + 1); +end + +function partId = promptPart() + fprintf('== Select which part(s) to submit:\n'); + partNames = validParts(); + srcFiles = sources(); + for i = 1:numel(partNames) + fprintf('== %d) %s [', i, partNames{i}); + fprintf(' %s ', srcFiles{i}{:}); + fprintf(']\n'); + end + fprintf('== %d) All of the above \n==\nEnter your choice [1-%d]: ', ... + numel(partNames) + 1, numel(partNames) + 1); + selPart = input('', 's'); + partId = str2num(selPart); + if ~isValidPartId(partId) + partId = -1; + end +end + +function [email,ch,signature,auxstring] = getChallenge(email, part) + str = urlread(challenge_url(), 'post', {'email_address', email, 'assignment_part_sid', [homework_id() '-' num2str(part)], 'response_encoding', 'delim'}); + + str = strtrim(str); + r = struct; + while(numel(str) > 0) + [f, str] = strtok (str, '|'); + [v, str] = strtok (str, '|'); + r = setfield(r, f, v); + end + + email = getfield(r, 'email_address'); + ch = getfield(r, 'challenge_key'); + signature = getfield(r, 'state'); + auxstring = getfield(r, 'challenge_aux_data'); +end + +function [result, str] = submitSolutionWeb(email, part, output, source) + + result = ['{"assignment_part_sid":"' base64encode([homework_id() '-' num2str(part)], '') '",' ... + '"email_address":"' base64encode(email, '') '",' ... + '"submission":"' base64encode(output, '') '",' ... + '"submission_aux":"' base64encode(source, '') '"' ... + '}']; + str = 'Web-submission'; +end + +function [result, str] = submitSolution(email, ch_resp, part, output, ... + source, signature) + + params = {'assignment_part_sid', [homework_id() '-' num2str(part)], ... + 'email_address', email, ... + 'submission', base64encode(output, ''), ... + 'submission_aux', base64encode(source, ''), ... + 'challenge_response', ch_resp, ... + 'state', signature}; + + str = urlread(submit_url(), 'post', params); + + % Parse str to read for success / failure + result = 0; + +end + +% =========================== LOGIN HELPERS =========================== + +function [login password] = loginPrompt() + % Prompt for password + [login password] = basicPrompt(); + + if isempty(login) || isempty(password) + login = []; password = []; + end +end + + +function [login password] = basicPrompt() + login = input('Login (Email address): ', 's'); + password = input('Password: ', 's'); +end + +function [login password] = quickLogin(login,password) + disp(['You are currently logged in as ' login '.']); + cont_token = input('Is this you? (y/n - type n to reenter password)','s'); + if(isempty(cont_token) || cont_token(1)=='Y'||cont_token(1)=='y') + return; + else + [login password] = loginPrompt(); + end +end + +function [str] = challengeResponse(email, passwd, challenge) + str = sha1([challenge passwd]); +end + +% =============================== SHA-1 ================================ + +function hash = sha1(str) + + % Initialize variables + h0 = uint32(1732584193); + h1 = uint32(4023233417); + h2 = uint32(2562383102); + h3 = uint32(271733878); + h4 = uint32(3285377520); + + % Convert to word array + strlen = numel(str); + + % Break string into chars and append the bit 1 to the message + mC = [double(str) 128]; + mC = [mC zeros(1, 4-mod(numel(mC), 4), 'uint8')]; + + numB = strlen * 8; + if exist('idivide') + numC = idivide(uint32(numB + 65), 512, 'ceil'); + else + numC = ceil(double(numB + 65)/512); + end + numW = numC * 16; + mW = zeros(numW, 1, 'uint32'); + + idx = 1; + for i = 1:4:strlen + 1 + mW(idx) = bitor(bitor(bitor( ... + bitshift(uint32(mC(i)), 24), ... + bitshift(uint32(mC(i+1)), 16)), ... + bitshift(uint32(mC(i+2)), 8)), ... + uint32(mC(i+3))); + idx = idx + 1; + end + + % Append length of message + mW(numW - 1) = uint32(bitshift(uint64(numB), -32)); + mW(numW) = uint32(bitshift(bitshift(uint64(numB), 32), -32)); + + % Process the message in successive 512-bit chs + for cId = 1 : double(numC) + cSt = (cId - 1) * 16 + 1; + cEnd = cId * 16; + ch = mW(cSt : cEnd); + + % Extend the sixteen 32-bit words into eighty 32-bit words + for j = 17 : 80 + ch(j) = ch(j - 3); + ch(j) = bitxor(ch(j), ch(j - 8)); + ch(j) = bitxor(ch(j), ch(j - 14)); + ch(j) = bitxor(ch(j), ch(j - 16)); + ch(j) = bitrotate(ch(j), 1); + end + + % Initialize hash value for this ch + a = h0; + b = h1; + c = h2; + d = h3; + e = h4; + + % Main loop + for i = 1 : 80 + if(i >= 1 && i <= 20) + f = bitor(bitand(b, c), bitand(bitcmp(b), d)); + k = uint32(1518500249); + elseif(i >= 21 && i <= 40) + f = bitxor(bitxor(b, c), d); + k = uint32(1859775393); + elseif(i >= 41 && i <= 60) + f = bitor(bitor(bitand(b, c), bitand(b, d)), bitand(c, d)); + k = uint32(2400959708); + elseif(i >= 61 && i <= 80) + f = bitxor(bitxor(b, c), d); + k = uint32(3395469782); + end + + t = bitrotate(a, 5); + t = bitadd(t, f); + t = bitadd(t, e); + t = bitadd(t, k); + t = bitadd(t, ch(i)); + e = d; + d = c; + c = bitrotate(b, 30); + b = a; + a = t; + + end + h0 = bitadd(h0, a); + h1 = bitadd(h1, b); + h2 = bitadd(h2, c); + h3 = bitadd(h3, d); + h4 = bitadd(h4, e); + + end + + hash = reshape(dec2hex(double([h0 h1 h2 h3 h4]), 8)', [1 40]); + + hash = lower(hash); + +end + +function ret = bitadd(iA, iB) + ret = double(iA) + double(iB); + ret = bitset(ret, 33, 0); + ret = uint32(ret); +end + +function ret = bitrotate(iA, places) + t = bitshift(iA, places - 32); + ret = bitshift(iA, places); + ret = bitor(ret, t); +end + +% =========================== Base64 Encoder ============================ +% Thanks to Peter John Acklam +% + +function y = base64encode(x, eol) +%BASE64ENCODE Perform base64 encoding on a string. +% +% BASE64ENCODE(STR, EOL) encode the given string STR. EOL is the line ending +% sequence to use; it is optional and defaults to '\n' (ASCII decimal 10). +% The returned encoded string is broken into lines of no more than 76 +% characters each, and each line will end with EOL unless it is empty. Let +% EOL be empty if you do not want the encoded string broken into lines. +% +% STR and EOL don't have to be strings (i.e., char arrays). The only +% requirement is that they are vectors containing values in the range 0-255. +% +% This function may be used to encode strings into the Base64 encoding +% specified in RFC 2045 - MIME (Multipurpose Internet Mail Extensions). The +% Base64 encoding is designed to represent arbitrary sequences of octets in a +% form that need not be humanly readable. A 65-character subset +% ([A-Za-z0-9+/=]) of US-ASCII is used, enabling 6 bits to be represented per +% printable character. +% +% Examples +% -------- +% +% If you want to encode a large file, you should encode it in chunks that are +% a multiple of 57 bytes. This ensures that the base64 lines line up and +% that you do not end up with padding in the middle. 57 bytes of data fills +% one complete base64 line (76 == 57*4/3): +% +% If ifid and ofid are two file identifiers opened for reading and writing, +% respectively, then you can base64 encode the data with +% +% while ~feof(ifid) +% fwrite(ofid, base64encode(fread(ifid, 60*57))); +% end +% +% or, if you have enough memory, +% +% fwrite(ofid, base64encode(fread(ifid))); +% +% See also BASE64DECODE. + +% Author: Peter John Acklam +% Time-stamp: 2004-02-03 21:36:56 +0100 +% E-mail: pjacklam@online.no +% URL: http://home.online.no/~pjacklam + + if isnumeric(x) + x = num2str(x); + end + + % make sure we have the EOL value + if nargin < 2 + eol = sprintf('\n'); + else + if sum(size(eol) > 1) > 1 + error('EOL must be a vector.'); + end + if any(eol(:) > 255) + error('EOL can not contain values larger than 255.'); + end + end + + if sum(size(x) > 1) > 1 + error('STR must be a vector.'); + end + + x = uint8(x); + eol = uint8(eol); + + ndbytes = length(x); % number of decoded bytes + nchunks = ceil(ndbytes / 3); % number of chunks/groups + nebytes = 4 * nchunks; % number of encoded bytes + + % add padding if necessary, to make the length of x a multiple of 3 + if rem(ndbytes, 3) + x(end+1 : 3*nchunks) = 0; + end + + x = reshape(x, [3, nchunks]); % reshape the data + y = repmat(uint8(0), 4, nchunks); % for the encoded data + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % Split up every 3 bytes into 4 pieces + % + % aaaaaabb bbbbcccc ccdddddd + % + % to form + % + % 00aaaaaa 00bbbbbb 00cccccc 00dddddd + % + y(1,:) = bitshift(x(1,:), -2); % 6 highest bits of x(1,:) + + y(2,:) = bitshift(bitand(x(1,:), 3), 4); % 2 lowest bits of x(1,:) + y(2,:) = bitor(y(2,:), bitshift(x(2,:), -4)); % 4 highest bits of x(2,:) + + y(3,:) = bitshift(bitand(x(2,:), 15), 2); % 4 lowest bits of x(2,:) + y(3,:) = bitor(y(3,:), bitshift(x(3,:), -6)); % 2 highest bits of x(3,:) + + y(4,:) = bitand(x(3,:), 63); % 6 lowest bits of x(3,:) + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % Now perform the following mapping + % + % 0 - 25 -> A-Z + % 26 - 51 -> a-z + % 52 - 61 -> 0-9 + % 62 -> + + % 63 -> / + % + % We could use a mapping vector like + % + % ['A':'Z', 'a':'z', '0':'9', '+/'] + % + % but that would require an index vector of class double. + % + z = repmat(uint8(0), size(y)); + i = y <= 25; z(i) = 'A' + double(y(i)); + i = 26 <= y & y <= 51; z(i) = 'a' - 26 + double(y(i)); + i = 52 <= y & y <= 61; z(i) = '0' - 52 + double(y(i)); + i = y == 62; z(i) = '+'; + i = y == 63; z(i) = '/'; + y = z; + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % Add padding if necessary. + % + npbytes = 3 * nchunks - ndbytes; % number of padding bytes + if npbytes + y(end-npbytes+1 : end) = '='; % '=' is used for padding + end + + if isempty(eol) + + % reshape to a row vector + y = reshape(y, [1, nebytes]); + + else + + nlines = ceil(nebytes / 76); % number of lines + neolbytes = length(eol); % number of bytes in eol string + + % pad data so it becomes a multiple of 76 elements + y = [y(:) ; zeros(76 * nlines - numel(y), 1)]; + y(nebytes + 1 : 76 * nlines) = 0; + y = reshape(y, 76, nlines); + + % insert eol strings + eol = eol(:); + y(end + 1 : end + neolbytes, :) = eol(:, ones(1, nlines)); + + % remove padding, but keep the last eol string + m = nebytes + neolbytes * (nlines - 1); + n = (76+neolbytes)*nlines - neolbytes; + y(m+1 : n) = ''; + + % extract and reshape to row vector + y = reshape(y, 1, m+neolbytes); + + end + + % output is a character array + y = char(y); + +end diff --git a/ex4/submitWeb.m b/ex4/submitWeb.m new file mode 100644 index 0000000..8611707 --- /dev/null +++ b/ex4/submitWeb.m @@ -0,0 +1,20 @@ +% submitWeb Creates files from your code and output for web submission. +% +% If the submit function does not work for you, use the web-submission mechanism. +% Call this function to produce a file for the part you wish to submit. Then, +% submit the file to the class servers using the "Web Submission" button on the +% Programming Exercises page on the course website. +% +% You should call this function without arguments (submitWeb), to receive +% an interactive prompt for submission; optionally you can call it with the partID +% if you so wish. Make sure your working directory is set to the directory +% containing the submitWeb.m file and your assignment files. + +function submitWeb(partId) + if ~exist('partId', 'var') || isempty(partId) + partId = []; + end + + submit(partId, 1); +end +