From 8a3a895d038e535bf05be4850a5491ef2f089d6a Mon Sep 17 00:00:00 2001 From: JonathanAMichaels Date: Wed, 10 Jan 2018 16:46:31 -0800 Subject: [PATCH] Affiliation correction --- examples/geneticRNN_Example_CO.m | 2 +- examples/geneticRNN_Example_CO.m~ | 120 ---------------------------- examples/geneticRNN_Example_DNMS.m | 2 +- examples/geneticRNN_Example_DNMS.m~ | 83 ------------------- geneticRNN_learn_model.m | 2 +- geneticRNN_run_model.m | 2 +- 6 files changed, 4 insertions(+), 207 deletions(-) delete mode 100644 examples/geneticRNN_Example_CO.m~ delete mode 100644 examples/geneticRNN_Example_DNMS.m~ diff --git a/examples/geneticRNN_Example_CO.m b/examples/geneticRNN_Example_CO.m index 32e6421..7e42838 100644 --- a/examples/geneticRNN_Example_CO.m +++ b/examples/geneticRNN_Example_CO.m @@ -5,7 +5,7 @@ % % % Copyright (c) Jonathan A Michaels 2018 -% German Primate Center +% Stanford University % jonathanamichaels AT gmail DOT com diff --git a/examples/geneticRNN_Example_CO.m~ b/examples/geneticRNN_Example_CO.m~ deleted file mode 100644 index 9c3b16e..0000000 --- a/examples/geneticRNN_Example_CO.m~ +++ /dev/null @@ -1,120 +0,0 @@ -% hebbRNN_Example_CO -% -% This function illustrates an example of reward-modulated Hebbian learning -% in recurrent neural networks to complete a center-out reaching task. -% -% -% Copyright (c) Jonathan A Michaels 2016 -% German Primate Center -% jonathanamichaels AT gmail DOT com -% -% If used in published work please see repository README.md for citation -% and license information: https://github.com/JonathanAMichaels/hebbRNN - - -clear -close all - -numConds = 8; % Number of peripheral targets. Try changing this number to alter the difficulty! -totalTime = 50; % Total trial time -moveTime = 25; -L = [3 3]; % Length of each segment of the arm - -%% Populate target function passthrough data -% This is information that the user can define and passthrough to the -% network output function -targetFunPassthrough.L = L; -targetFunPassthrough.kinTimes = 1:totalTime; - -%% General inputs and output -inp = cell(1,numConds); -targ = cell(1,numConds); -ang = linspace(0, 2*pi - 2*pi/numConds, numConds); -blankTime = 5; -for cond = 1:numConds - inp{cond} = zeros(numConds+1, totalTime); - inp{cond}(cond,:) = 0.5; - inp{cond}(numConds+1,1:totalTime-moveTime-1) = 1; - targ{cond} = [[zeros(totalTime-moveTime,1); nan(blankTime,1); ones(moveTime-blankTime,1)]*sin(ang(cond)) ... - [zeros(totalTime-moveTime,1); nan(blankTime,1); ones(moveTime-blankTime,1)]*cos(ang(cond))]'; -end -% In the center-out reaching task the network needs to produce the joint angle -% velocities of a two-segment arm to reach to a number of peripheral -% targets spaced along a circle in the 2D plane, based on the desired target -% specified by the input. - -%% Initialize network parameters -N = 100; % Number of neurons -B = size(targ{1},1); % Outputs -I = size(inp{1},1); % Inputs -p = 1; % Sparsity -g = 1.4; % Spectral scaling -dt = 1; % Time step -tau = 10; % Time constant - -%% Initialize learning parameters -evalOpts = [2 1]; % Plotting level and frequency of evaluation -targetFun = @geneticRNN_COTargetFun; % handle of custom target function - -policyInitInputs = {N, B, I, p, g, dt, tau}; -policyInitInputsOptional = {'feedback', true, 'actFun', 'tanh', 'energyCost', 0.1}; - -mutationPower = 1e-2; -populationSize = 10000; -truncationSize = 500; -fitnessFunInputs = targ; - -%% Train network -% This step should take about 5 minutes, depending on your processor. -% Can be stopped at any time by pressing the STOP button. -% Look inside to see information about the many optional parameters. -[net, learnStats] = geneticRNN_learn_model(mutationPower, populationSize, truncationSize, fitnessFunInputs, policyInitInputs, ... - 'input', inp, ... - 'evalOpts', evalOpts, ... - 'policyInitInputsOptional', policyInitInputsOptional, ... - 'targetFun', targetFun, 'targetFunPassthrough', targetFunPassthrough); - -% run model -[Z0, Z1, R, dR, X, kin] = geneticRNN_run_model(net(1), 'input', inp, 'targetFun', targetFun, 'targetFunPassthrough', targetFunPassthrough); - - - - -%% Plot center-out reaching results -c = lines(length(inp)); -figure(1) -for cond = 1:length(inp) - h(cond) = filledCircle([targ{cond}(1,end) targ{cond}(2,end)], 0.2, 100, [0.9 0.9 0.9]); - h(cond).EdgeColor = c(cond,:); - hold on -end -for cond = 1:length(inp) - plot(Z1{cond}(1,:), Z1{cond}(2,:), 'Color', c(cond,:), 'Linewidth', 2) -end -axis([-1.3 1.3 -1.3 1.3]) -axis square - -%% Play short movie showing trained movements for all directions -figure(2) -set(gcf, 'Color', 'white') -for cond = 1:length(inp) - for t = 1:length(targetFunPassthrough.kinTimes)-1 - clf - for cond2 = 1:length(inp) - h(cond2) = filledCircle([targ{cond2}(1,1) targ{cond2}(2,1)], 0.2, 100, [0.9 0.9 0.9]); - h(cond2).EdgeColor = c(cond2,:); - hold on - end - - line([kin(cond).initvals(1) kin(cond).posL1(t,1)], ... - [kin(cond).initvals(2) kin(cond).posL1(t,2)], 'LineWidth', 8, 'Color', 'black') - line([kin(cond).posL1(t,1) Z1{cond}(1,targetFunPassthrough.kinTimes(t))], ... - [kin(cond).posL1(t,2) Z1{cond}(2,targetFunPassthrough.kinTimes(t))], 'LineWidth', 8, 'Color', 'black') - - axis([-1.2 4.5 kin(cond).initvals(2) 1.2]) - axis off - drawnow - pause(0.02) - end - pause(0.5) -end \ No newline at end of file diff --git a/examples/geneticRNN_Example_DNMS.m b/examples/geneticRNN_Example_DNMS.m index a11545f..f69e846 100644 --- a/examples/geneticRNN_Example_DNMS.m +++ b/examples/geneticRNN_Example_DNMS.m @@ -6,7 +6,7 @@ % % % Copyright (c) Jonathan A Michaels 2018 -% German Primate Center +% Stanford University % jonathanamichaels AT gmail DOT com diff --git a/examples/geneticRNN_Example_DNMS.m~ b/examples/geneticRNN_Example_DNMS.m~ deleted file mode 100644 index 048a75f..0000000 --- a/examples/geneticRNN_Example_DNMS.m~ +++ /dev/null @@ -1,83 +0,0 @@ -% geneticRNN_Example_DNMS -% -% This function illustrates an example of a simple genetic learning algorithm -% in a recurrent neural network to complete a delayed nonmatch-to-sample -% task. -% -% -% Copyright (c) Jonathan A Michaels 2018 -% German Primate Center -% jonathanamichaels AT gmail DOT com - - -clear -close all - -%% Generate inputs and outputs -inp = cell(1,4); -targ = cell(1,4); -level = 1; -cue1Time = 1:20; -cue2Time = 41:60; -totalTime = 100; -checkTime = 81:100; -target1 = 1; -target2 = -1; -for type = 1:4 - inp{type} = zeros(2, totalTime); - if type == 1 - inp{type}(1, [cue1Time cue2Time]) = level; - targ{type} = [nan(1, checkTime(1)-1) ones(1, totalTime-checkTime(1)+1)]*target1; - elseif type == 2 - inp{type}(2, [cue1Time cue2Time]) = level; - targ{type} = [nan(1, checkTime(1)-1) ones(1, totalTime-checkTime(1)+1)]*target1; - elseif type == 3 - inp{type}(1, cue1Time) = level; - inp{type}(2, cue2Time) = level; - targ{type} = [nan(1, checkTime(1)-1) ones(1, totalTime-checkTime(1)+1)]*target2; - elseif type == 4 - inp{type}(2, cue1Time) = level; - inp{type}(1, cue2Time) = level; - targ{type} = [nan(1, checkTime(1)-1) ones(1, totalTime-checkTime(1)+1)]*target2; - end -end -% In the delayed nonmatch-to-sample task the network receives two temporally -% separated inputs. Each input lasts 200ms and there is a 200ms gap between them. -% The goal of the task is to respond with one value if the inputs were -% identical, and a different value if they were not. This response must be -% independent of the order of the signals and therefore requires the -% network to remember the first input! - -%% Initialize network parameters -N = 100; % Number of neurons -B = size(targ{1},1); % Outputs -I = size(inp{1},1); % Inputs -p = 1; % Sparsity -g = 1.2; % Spectral scaling -dt = 10; % Time step -tau = 50; % Time constant - -%% Policy initialization parameters -policyInitInputs = {N, B, I, p, g, dt, tau}; -policyInitInputsOptional = {'feedback', false}; - -%% Initialize learning parameters -mutationPower = 1e-2; % Standard deviation of normally distributed noise to add -populationSize = 1000; % Number of individuals in each generation -truncationSize = 10; % Number of individuals to save for next generation -fitnessFunInputs = targ; % Target data for fitness calculation -policyInitFun = @geneticRNN_create_model; -evalOpts = [2 1]; % Plotting level and frequency of evaluation - -%% Train network -% This step should take about 5 minutes, depending on your processor. -% Should stopped at the desired time by pressing the STOP button and waiting for 1 iteration -% Look inside to see information about the many optional parameters. -[net, learnStats] = geneticRNN_learn_model_2(mutationPower, populationSize, truncationSize, fitnessFunInputs, policyInitInputs, ... - 'input', inp, ... - 'evalOpts', evalOpts, ... - 'policyInitInputsOptional', policyInitInputsOptional); - -%% Run network -[Z0, Z1, R, X, kin] = geneticRNN_run_model(net, 'input', inp); - diff --git a/geneticRNN_learn_model.m b/geneticRNN_learn_model.m index b5029b5..623dabc 100644 --- a/geneticRNN_learn_model.m +++ b/geneticRNN_learn_model.m @@ -68,7 +68,7 @@ % % % Copyright (c) Jonathan A Michaels 2018 -% German Primate Center +% Stanford University % jonathanamichaels AT gmail DOT com diff --git a/geneticRNN_run_model.m b/geneticRNN_run_model.m index c8ad375..5787c7c 100644 --- a/geneticRNN_run_model.m +++ b/geneticRNN_run_model.m @@ -27,7 +27,7 @@ % % % Copyright (c) Jonathan A Michaels 2018 -% German Primate Center +% Stanford University % jonathanamichaels AT gmail DOT com