diff --git a/test/batching.jl b/test/batching.jl index e89b319f..19794d98 100644 --- a/test/batching.jl +++ b/test/batching.jl @@ -9,8 +9,8 @@ import Random Random.seed!(1234); t_start = 0.0 -t_step = 0.1 -t_stop = 150.0 +t_step = 0.01 +t_stop = 50.0 tData = t_start:t_step:t_stop # generate training data diff --git a/test/fmu_params.jl b/test/fmu_params.jl index 3ee3dc47..7076c7f6 100644 --- a/test/fmu_params.jl +++ b/test/fmu_params.jl @@ -10,8 +10,8 @@ import Random Random.seed!(1234); t_start = 0.0 -t_step = 0.1 -t_stop = 15.0 +t_step = 0.01 +t_stop = 5.0 tData = t_start:t_step:t_stop # generate training data @@ -66,7 +66,7 @@ numStates = length(fmu.modelDescription.stateValueReferences) # the "Chain" for training net = Chain(FMUParameterRegistrator(fmu, p_refs, p), - x -> fmu(x=x, dx=:all)) # , fmuLayer(p)) + x -> fmu(x=x, dx_refs=:all)) # , fmuLayer(p)) optim = Adam(ETA) solver = Tsit5() diff --git a/test/hybrid_CS.jl b/test/hybrid_CS.jl index d42ff630..cf3c5c79 100644 --- a/test/hybrid_CS.jl +++ b/test/hybrid_CS.jl @@ -9,8 +9,8 @@ import Random Random.seed!(1234); t_start = 0.0 -t_step = 0.1 -t_stop = 15.0 +t_step = 0.01 +t_stop = 5.0 tData = t_start:t_step:t_stop # generate training data diff --git a/test/hybrid_ME.jl b/test/hybrid_ME.jl index 7edaa91d..b4f1abca 100644 --- a/test/hybrid_ME.jl +++ b/test/hybrid_ME.jl @@ -10,8 +10,8 @@ import Random Random.seed!(1234); t_start = 0.0 -t_step = 0.1 -t_stop = 15.0 +t_step = 0.01 +t_stop = 5.0 tData = t_start:t_step:t_stop # generate training data diff --git a/test/hybrid_ME_dis.jl b/test/hybrid_ME_dis.jl index 50aae01d..fe1d8953 100644 --- a/test/hybrid_ME_dis.jl +++ b/test/hybrid_ME_dis.jl @@ -10,8 +10,8 @@ import Random Random.seed!(5678); t_start = 0.0 -t_step = 0.1 -t_stop = 15.0 +t_step = 0.01 +t_stop = 5.0 tData = t_start:t_step:t_stop # generate training data @@ -163,6 +163,7 @@ for i in 1:length(nets) # train it ... p_net = Flux.params(problem) + @test length(p_net) == 1 @test problem !== nothing @@ -175,7 +176,12 @@ for i in 1:length(nets) iterCB = 0 lastLoss = losssum(p_net[1]) - @info "[ $(iterCB)] Loss: $lastLoss" + @info "Start-Loss for net #$i: $lastLoss" + + if length(p_net[1]) == 0 + @info "The following warning is not an issue, because training on zero parameters must throw a warning:" + end + FMIFlux.train!(losssum, p_net, Iterators.repeated((), NUMSTEPS), optim; cb=()->callb(p_net), gradient=GRADIENT) # check results diff --git a/test/multi.jl b/test/multi.jl index 45e8b37a..cec3c176 100644 --- a/test/multi.jl +++ b/test/multi.jl @@ -10,8 +10,8 @@ import Random Random.seed!(1234); t_start = 0.0 -t_step = 0.1 -t_stop = 15.0 +t_step = 0.01 +t_stop = 5.0 tData = t_start:t_step:t_stop # generate training data diff --git a/test/multi_threading.jl b/test/multi_threading.jl index 4bdadda9..06014b34 100644 --- a/test/multi_threading.jl +++ b/test/multi_threading.jl @@ -10,8 +10,8 @@ import Random Random.seed!(5678); t_start = 0.0 -t_step = 0.1 -t_stop = 15.0 +t_step = 0.01 +t_stop = 5.0 tData = t_start:t_step:t_stop # generate training data diff --git a/test/optim.jl b/test/optim.jl index 70649065..4f5007a1 100644 --- a/test/optim.jl +++ b/test/optim.jl @@ -11,8 +11,8 @@ import Random Random.seed!(1234); t_start = 0.0 -t_step = 0.1 -t_stop = 15.0 +t_step = 0.01 +t_stop = 5.0 tData = t_start:t_step:t_stop # generate training data diff --git a/test/runtests.jl b/test/runtests.jl index ff6beb2a..bc12d500 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -27,9 +27,9 @@ global x0 = [2.0, 0.0] # training data for pendulum experiment function syntTrainingData(tData) - posData = cos.(tData)* 1.0 - velData = sin.(tData)*-1.0 - accData = cos.(tData)*-1.0 + posData = cos.(tData*3.0)* 2.0 + velData = sin.(tData*3.0)*-6.0 + accData = cos.(tData*3.0)*-18.0 return posData, velData, accData end diff --git a/test/train_modes.jl b/test/train_modes.jl index 44e62df6..8df4e24f 100644 --- a/test/train_modes.jl +++ b/test/train_modes.jl @@ -11,8 +11,8 @@ import Random Random.seed!(5678); t_start = 0.0 -t_step = 0.1 -t_stop = 15.0 +t_step = 0.01 +t_stop = 5.0 tData = t_start:t_step:t_stop # generate training data