diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 0000000..705f6cb --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,18 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + + { + "type": "julia", + "request": "launch", + "name": "Run active Julia file", + "program": "${file}", + "stopOnEntry": false, + "cwd": "${workspaceFolder}", + "juliaEnv": "${/home/guilam/phd-psy/ppsp/LiquidStateMachine.jl/LSM-v1}" + } + ] +} \ No newline at end of file diff --git a/archive/LSM-v2/Project.toml b/archive/LSM-v2/Project.toml new file mode 100644 index 0000000..f608643 --- /dev/null +++ b/archive/LSM-v2/Project.toml @@ -0,0 +1,4 @@ +[deps] +CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" +Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" +SpikingNN = "a2976702-bddd-11e9-29f3-e11e525b718e" diff --git a/archive/LSM-v3/Project.toml b/archive/LSM-v3/Project.toml new file mode 100644 index 0000000..03f6800 --- /dev/null +++ b/archive/LSM-v3/Project.toml @@ -0,0 +1,2 @@ +[deps] +DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" diff --git a/archive/LSM-v4/Project.toml b/archive/LSM-v4/Project.toml new file mode 100644 index 0000000..55be087 --- /dev/null +++ b/archive/LSM-v4/Project.toml @@ -0,0 +1,4 @@ +[deps] +Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" +Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" +SpikingNeuralNetworks = "9d8b7fda-1049-58bc-9481-071a9f369938" diff --git a/src/liquid_util.jl b/archive/liquid_util.jl similarity index 100% rename from src/liquid_util.jl rename to archive/liquid_util.jl diff --git a/src/lsm.jl b/archive/lsm.jl similarity index 100% rename from src/lsm.jl rename to archive/lsm.jl diff --git a/archive/min_lsm_v2.jl b/archive/min_lsm_v2.jl new file mode 100644 index 0000000..649fb45 --- /dev/null +++ b/archive/min_lsm_v2.jl @@ -0,0 +1,62 @@ +# from: https://github.com/darsnack/SpikingNN.jl/blob/master/examples/stdp-test.jl +# download with ] add {github page link} + +# PROS: +# GPU integration +# Clean code +# CONS: +# < SRM0(η₀, τᵣ), + synapse = Synapse.Alpha, + threshold = () -> Threshold.Ideal(vth), + learner = STDP(0.5, 0.5, size(weights, 1))) + +# create step input currents +input = InputPopulation([ConstantRate(0.8)]) + +# create network +net = Network(Dict([:input => input, :pop => pop])) +connect!(net, :input, :pop; weights = [1 0], synapse = Synapse.Alpha) + +# simulate +w = Float64[] +voltages = Dict([(i, Float64[]) for i in 1:2]) +cb = () -> begin + push!(w, net[:pop].weights[1, 2]) + for id in 1:size(pop) + push!(voltages[id], getvoltage(pop[id])) + end +end +@time outputs = simulate!(net, T; cb = cb, dense = true) + +weight_plot = plot(1:T, w, label = "") +title!("Synaptic Weights Over Simulation") +xlabel!("Time (sec)") +ylabel!("Weight") + +raster_plot = rasterplot(outputs[:pop]) +title!("Raster Plot") +xlabel!("Time (sec)") + +plot(voltages[1], label = "Input") +voltage_plot = plot!(voltages[2], label = "Neuron") +title!("Membrane Voltages") +xlabel!("Time (sec)") +ylabel!("Voltage (V)") + +plot(weight_plot, raster_plot, voltage_plot, layout = grid(3, 1)) \ No newline at end of file diff --git a/archive/min_lsm_v3.jl b/archive/min_lsm_v3.jl new file mode 100644 index 0000000..dcf41e8 --- /dev/null +++ b/archive/min_lsm_v3.jl @@ -0,0 +1,35 @@ +# from: https://docs.sciml.ai/SciMLTutorialsOutput/html/models/08-spiking_neural_systems.html + +using DifferentialEquations +# load error :/ +using Plots +gr() + +function lif(u,p,t); + gL, EL, C, Vth, I = p + (-gL*(u-EL)+I)/C +end + +function thr(u,t,integrator) + integrator.u > integrator.p[4] +end + +function reset!(integrator) + integrator.u = integrator.p[2] +end + +threshold = DiscreteCallback(thr,reset!) +current_step= PresetTimeCallback([2,15],integrator -> integrator.p[5] += 210.0) +cb = CallbackSet(current_step,threshold) + +u0 = -75 +tspan = (0.0, 40.0) +# p = (gL, EL, C, Vth, I) +p = [10.0, -75.0, 5.0, -55.0, 0] + +prob = ODEProblem(lif, u0, tspan, p, callback=cb) + +sol = solve(prob) + +plot(sol) + diff --git a/archive/min_lsm_v4.jl b/archive/min_lsm_v4.jl new file mode 100644 index 0000000..f45eb54 --- /dev/null +++ b/archive/min_lsm_v4.jl @@ -0,0 +1,45 @@ +# install: julia> Pkg.add("https://github.com/AStupidBear/SpikingNeuralNetworks.jl") + +using Plots +using Random +using SpikingNeuralNetworks +SNN.@load_units + +# N = 1000 +# E1 = SNN.IF(;N = N) +# E2 = SNN.IF(;N = N) +# EE = SNN.SpikingSynapse(E1, E2, :ge) +# for n = 1:E1.N SNN.connect!(EE, n, n) end +# SNN.monitor([E1, E2], [:fire]) +# SNN.monitor(EE, [:W]) + +# @time for t = 1:N +# E1.v[t] = 100 +# E2.v[N - t + 1] = 100 +# SNN.train!([E1, E2], [EE], 0.5ms, (t - 1) * 0.5ms) +# end +# SNN.raster([E1, E2]) +# ΔW = EE.records[:W][end] +# plot(ΔW) + +# spike_train = E1.:fire +# plot(spike_train) + +E = SNN.IF(;N = 3200, param = SNN.IFParameter(;El = -49mV)) +I = SNN.IF(;N = 800, param = SNN.IFParameter(;El = -49mV)) +EE = SNN.SpikingSynapse(E, E, :ge; σ = 60*0.27/10, p = 0.02) +EI = SNN.SpikingSynapse(E, I, :ge; σ = 60*0.27/10, p = 0.02) +IE = SNN.SpikingSynapse(I, E, :gi; σ = -20*4.5/10, p = 0.02) +II = SNN.SpikingSynapse(I, I, :gi; σ = -20*4.5/10, p = 0.02) +P = [E, I] +C = [EE, EI, IE, II] + +SNN.monitor([E, I], [:fire]) +SNN.monitor([EE, EI, IE, II], [:W]) + +SNN.sim!(P, C; duration = 1second) +SNN.raster(P) +SNN.train!(P, C; duration = 1second) + +ΔW = EE.records[:W][end] +plot(ΔW) \ No newline at end of file diff --git a/src/params.jl b/archive/params.jl similarity index 100% rename from src/params.jl rename to archive/params.jl diff --git a/src/spike_interpreter.jl b/archive/spike_interpreter.jl similarity index 100% rename from src/spike_interpreter.jl rename to archive/spike_interpreter.jl diff --git a/src/util.jl b/archive/util.jl similarity index 100% rename from src/util.jl rename to archive/util.jl diff --git a/src/waspenetUtil/ClNeuron.jl b/archive/waspenetUtil/ClNeuron.jl similarity index 100% rename from src/waspenetUtil/ClNeuron.jl rename to archive/waspenetUtil/ClNeuron.jl diff --git a/src/LiquidStateMachine.jl b/src/LiquidStateMachine.jl index 9c7bda7..c41188d 100644 --- a/src/LiquidStateMachine.jl +++ b/src/LiquidStateMachine.jl @@ -1,30 +1,43 @@ module LiquidStateMachine -using CUDA -using Distributions -# using DifferentialEquations +using Pkg + +Pkg.activate("LSM-v1") + using Flux -using LinearAlgebra -using Random -using StableRNGs -using WaspNet -using Zygote +using ChainRulesCore +using Plots +using SparseArrays +using Statistics -include("util.jl") -export genPositive, discretize +include("min_lsm_v1.jl") +export LSM -include("params.jl") +# using CUDA +# using Distributions +# # using DifferentialEquations +# using Flux +# using LinearAlgebra +# using Random +# using StableRNGs +# using WaspNet +# using Zygote -include("spike_interpreter.jl") -export SpikeTrainGenerator, SpikeTrainDecipher +# include("util.jl") +# export genPositive, discretize -include("lsm.jl") -export LSM, LSM_Params +# include("params.jl") +# include("spike_interpreter.jl") +# export SpikeTrainGenerator, SpikeTrainDecipher -using Plots +# include("lsm.jl") +# export LSM, LSM_Params + + +# using Plots -include("liquid_util.jl") -export SP,eigen_spectrum +# include("liquid_util.jl") +# export SP,eigen_spectrum end # module diff --git a/src/min_lsm_v1.jl b/src/min_lsm_v1.jl index 8d2c8ec..18c0891 100644 --- a/src/min_lsm_v1.jl +++ b/src/min_lsm_v1.jl @@ -15,7 +15,6 @@ using ChainRulesCore using Plots using SparseArrays using Statistics -# using Zygote mutable struct SpikingLIF tau :: Float64 @@ -125,11 +124,6 @@ function create_random(N::Int, p::Float64) return flist end -# nlist = [SpikingLIF(8.0, 1.0, 1.0) for i=1:N] -# snet = SpNet(nlist, cm, Wnn, 1.00) -# spsim = SpikeSim(N, 0.01) -# vin = 0.8 .+ 0.4*randn(N) - struct Reservoir N :: Int nlist :: Vector{SpikingLIF} @@ -153,7 +147,7 @@ function Reservoir(N::Int, out_n_neurons::Int) Reservoir(N, nlist, SpNet(nlist, cm, Wnn, 1.00), SpikeSim(N, 0.01), rand(1:N, out_n_neurons), 0.0) end -function (res::Reservoir)(x::Vector{Float64}) +function (res::Reservoir)(x)::Vector{Float64} # vin = 0.8 .+ 0.4*randn(N) activity = [] out = [] @@ -181,50 +175,25 @@ function (res::Reservoir)(x::Vector{Float64}) return readout_input end -# res(x::Matrix{Float64}) i want this function to use the res(::Vector{Float64}) function for each row of x and regroup into a matrix of size i x 2 -function (res::Reservoir)(x::Matrix{Float64}) - readout_input = [res(x[i,:]) for i in 1:size(x,1)] - readout_input = hcat(readout_input...) - return readout_input -end - -# activity = [] -# out = [] - -# @time for i=1:10000 -# out = next_step!(snet, spsim, vin) -# act = [j for j=1:N if out[j] > 0.5] -# for a in act -# # println("$i $a") -# push!(activity, (i,a)) -# end -# end -# println("$(length(activity))") - -# times = [t for (t, n) in activity] -# neurons = [n for (t, n) in activity] - -# sparse_activity = sparse(neurons, times, 1) -# padded_activity = hcat(sparse_activity, sparse(zeros(Int, size(sparse_activity, 1), 10000 - size(sparse_activity, 2)))) - -# println(padded_activity) - -# scatter(times, neurons, markersize=1, xlabel="Time", ylabel="Neuron index", legend=false) +function (res::Reservoir)(x::Matrix{Float64})::Matrix{Float64} + out_spike_train = [] -# readout_input_width = 20 - -# readout_neurons = rand(1:N, readout_input_width) -# readout_input = [padded_activity[:,i] for i in readout_neurons] + ignore_derivatives() do + readout_matrix = hcat([res(x[:,col]) for col in axes(x)[2]]...) + out_spike_train = readout_matrix + end -# summed_activity = sum.(readout_input) + return out_spike_train +end -reservoir_size = 1000 -readout_input_width = 20 +# struct LSM +# lsm :: Chain +# end -readout_output_width = 1 # binary classification -readout = Chain(Dense(readout_input_width, readout_output_width, relu), softmax) +# LSM(reservoir::Reservoir, readout::Chain) = LSM(Chain(reservoir, readout)) +# (lsm::LSM)(x::Vector{Float64}) = lsm.lsm(x) -reservoir = Reservoir(reservoir_size, readout_input_width) +# vin = 0.8 .+ 0.4*randn(N) function ChainRulesCore.rrule(::typeof(reservoir), x) y = reservoir(x) @@ -234,35 +203,13 @@ function ChainRulesCore.rrule(::typeof(reservoir), x) return y, pb end -# function ChainRulesCore.rrule(reservoir::Reservoir, x) -# bar_pullback(Δy) = Tangent{Reservoir}(;grad_dummy=Δy), Δy, Δy -# return reservoir(x), bar_pullback -# end - -# z = res(vin) -# y = readout(z) - -# lsm = Chain(reservoir, readout) - -# lsm(x) = readout(res(x)) -# Flux.@functor CustomModel -# lsm = CustomModel(lsm) - -# function xor_fn(x) -# num_categories = 4 -# y = zeros(1) -# if x[1:250] != x[2] -# y[1] = x[1] -# y[2] = x[2] -# end -# return y -# end +# lsm = LSM(reservoir, readout) function multiplex(n) multiplexed_bits = rand(0:1, 4, n) - vector_ones = fill(1, 250) - vector_zeros = fill(0, 250) + vector_ones = fill(1.0, 250) + vector_zeros = fill(0.0, 250) function int_to_vec(b) if b == 1 @@ -281,55 +228,121 @@ function multiplex(n) return res_compatible_in_data end -n_of_examples = 100 -# input_HI_signal = rand(0:4, n_of_examples) -input_data = multiplex(n_of_examples) - function col_xor(col) channels = [mean(col[1:250]), mean(col[251:500]), mean(col[501:750]), mean(col[751:1000])] if sum(channels) == 1 - return 1 + return [0.0, 1.0] else - return 0 + return [1.0, 0.0] end end -target_data = mapslices(col_xor, input_data, dims=1) +# function loss(x, y) +# z = reservoir(x) +# ŷ = readout(z) +# Flux.mse(ŷ, y) +# end -# input_data = rand(100, reservoir_size) # 10 samples of 20-dimensional input data -# output_data = rand(100, 2) +# loss(float.(input_data[:,1]), float.(target_data[:,1])) -function loss(x, y) - z = reservoir(x) - ŷ = readout(z) - Flux.mse(ŷ, y) -end -# gradient(reservoir, input_data[1,:]) -# rrule(reservoir, float.(input_data[:,1])) -opt = ADAM(0.01) +# loss_t = [] -loss(float.(input_data[:,1]), float.(target_data[:,1])) +function tune_readout() -loss_t = [] + test_x = multiplex(30) + test_y = mapslices(col_xor, test_x, dims=1) -function tune_readout() for i in 1:10 + + pre_w = copy(Flux.params(readout)[1]) + + n_of_examples = 100 + input_data = multiplex(n_of_examples) + target_data = mapslices(col_xor, input_data, dims=1) println("Running epoch $i") - for j in 1:size(input_data, 2) - println("Running sample $j") - @time Flux.train!(loss, Flux.params(readout), [(float.(input_data[:,j]), float.(target_data[:,j]))], opt) + @time Flux.train!(lsm, [(input_data, target_data)], opt) do m, x, y + Flux.mse(m(x), y) end + # flux_train(input_data, target_data) + + ŷ = lsm(test_x) + mse = Flux.mse(ŷ, test_y) + append!(mse_t, mse) + println("Current mse: $mse") + println("MSE hist: $mse_t") - append!(loss_t, mean([loss(float.(input_data[:,j]), float.(target_data[:,j])) for j in 36:53])) + post_w = copy(Flux.params(readout)[1]) + + Δw = post_w - pre_w + + display(heatmap(Δw)) + println(Δw) + println(sum(Δw)) + + # append!(loss_t, mean([loss(float.(input_data[:,j]), float.(target_data[:,j])) for j in 36:53])) + # print(loss_t) end end -@time tune_readout() +# function Flux.Optimise.update!(opt, xs::Zygote.Params, gs::Zygote.Params) +# for (x, g) in zip(xs, gs) +# update!(opt, x, g) +# end +# end + +# function Flux.Optimise.update!(opt, xs::Tuple, gs) +# for (x, g) in zip(xs, gs) +# update!(opt, x, g) +# end +# end -plot(loss_t) +# function flux_train(x, y) +# grads = gradient(()-> Flux.mse(read_out(reservoir(x)), y), Θ) -# for i in 1:100 -# println("Running epoch $i") -# @time Flux.train!(loss, Flux.params(readout), [(input_data, output_data)], opt) +# println("Gradients:") +# println(keys(grads)) + +# update!(opt, Θ, grads) # end +reservoir_size = 1000 +readout_input_width = 50 + +readout_output_width = 2 # binary classification +hidden_width = 7 + +# Make all vars float64 +layers = Chain(Dense(readout_input_width, hidden_width, sigmoid), Dense(hidden_width, readout_output_width, sigmoid)) +readout = Chain(layers, softmax) + +# W_in = randn(Float64, readout_input_width, hidden_width) +# b_in = randn(Float64, 1, hidden_width) +# W_out = randn(Float64, hidden_width, readout_output_width) +# b_out = randn(Float64, 1, readout_output_width) + +# function read_out(x::Matrix{Float64}) +# z = σ.(x * W_in .+ b_in) +# y = σ.(z * W_out .+ b_out) + +# y_sftmx = softmax(y) +# return y_sftmx +# end + +# θ = (W_in, b_in, W_out, b_out) + +# read_out(multiplex(readout_input_width)) + +reservoir = Reservoir(reservoir_size, readout_input_width) + +lsm = Chain(reservoir, readout) + +θ = Flux.params(lsm) + +opt = Flux.setup(ADAM(0.04), lsm) + +mse_t = [] +@time tune_readout() + +tune_readout() + +plot(mse_t) diff --git a/todo.txt b/todo.txt index 03e12d8..2526f1f 100644 --- a/todo.txt +++ b/todo.txt @@ -1 +1,4 @@ -remove spike train generator to allow to feed differnet noise into lsm +[ ] Ensure Float64 in readout layers +[ ] Ensure readout is training +[ ] Add STDP rules +[ ] Add LIM rules \ No newline at end of file