From d2c92eaec16fc3e9aae261c07b85d621a881a2be Mon Sep 17 00:00:00 2001 From: ThummeTo <83663542+ThummeTo@users.noreply.github.com> Date: Sun, 7 Jul 2024 08:38:05 +0200 Subject: [PATCH] V0.12.1-1 (#136) * notebook adjusted library versions * spelling correction, parameters * adjustments in notebook --- .../SciMLUsingFMUs/SciMLUsingFMUs.jl | 257 ++++++++++-------- .../pluto-src/SciMLUsingFMUs/src/20000.jld2 | Bin 0 -> 11996 bytes 2 files changed, 138 insertions(+), 119 deletions(-) create mode 100644 examples/pluto-src/SciMLUsingFMUs/src/20000.jld2 diff --git a/examples/pluto-src/SciMLUsingFMUs/SciMLUsingFMUs.jl b/examples/pluto-src/SciMLUsingFMUs/SciMLUsingFMUs.jl index f746d5d9..f644e5a6 100644 --- a/examples/pluto-src/SciMLUsingFMUs/SciMLUsingFMUs.jl +++ b/examples/pluto-src/SciMLUsingFMUs/SciMLUsingFMUs.jl @@ -32,15 +32,15 @@ using FMIFlux.Flux # default Julia Machine Learning library # ╔═╡ 98c608d9-c60e-4eb6-b611-69d2ae7054c9 using FMIFlux.DifferentialEquations # the mighty (O)DE solver suite +# ╔═╡ ddc9ce37-5f93-4851-a74f-8739b38ab092 +using ProgressLogging: @withprogress, @logprogress, @progressid, uuid4 + # ╔═╡ de7a4639-e3b8-4439-924d-7d801b4b3eeb -using BenchmarkTools +using BenchmarkTools # default benchmarking library # ╔═╡ 45c4b9dd-0b04-43ae-a715-cd120c571424 using Plots -# ╔═╡ ddc9ce37-5f93-4851-a74f-8739b38ab092 -using ProgressLogging: @withprogress, @logprogress, @progressid, uuid4 - # ╔═╡ 1470df0f-40e1-45d5-a4cc-519cc3b28fb8 md""" # Scientific Machine Learning $br using Functional Mock-Up Units @@ -58,8 +58,8 @@ by Tobias Thummerer (University of Augsburg) If there is something YOU know about a physical system, AI shouldn’t need to learn it. How to integrate YOUR system knowledge into a ML development process is the core topic of this hands-on workshop. The entire workshop evolves around a challenging use case from robotics: Modeling a robot that is able to write arbitrary messages with a pen. After introducing the topic and the considered use case, participants can experiment with their very own hybrid model topology. # Introduction -This workshop focuses on the integration of Functional Mock-Up Units (FMUs) into a machine learning topology. FMUs are simulation models that can be generated within a variety of modeling tools, see the [FMI homepage](https://fmi-standard.org/). Together with deep neural networks that complement and improve the FMU prediction, so called *NeuralFMUs* can be created. -The workshop itself evolves around the hybrid modeling of a *Selective Compliance Assembly Robot Arm* (SCARA), that is able to write user defined words on a sheet of paper. A ready to use physical simulation model (FMU) for the SCARA is given and shortly highlighted in this workshop. However, this model – as any simulation model – shows some deviations if compared to measurements from the real system. These deviations results from unmodeled slip-stick-friction: The pen sticks to the paper until a force limit is reached, but then moves jerkily. A hard to model physical effect – but not for a NeuralFMU. +This workshop focuses on the integration of Functional Mock-Up Units (FMUs) into a machine learning topology. FMUs are simulation models that can be generated within a variety of modeling tools, see the [FMI homepage](https://fmi-standard.org/). Together with deep neural networks that complement and improve the FMU prediction, so called *neural FMUs* can be created. +The workshop itself evolves around the hybrid modeling of a *Selective Compliance Assembly Robot Arm* (SCARA), that is able to write user defined words on a sheet of paper. A ready to use physical simulation model (FMU) for the SCARA is given and shortly highlighted in this workshop. However, this model – as any simulation model – shows some deviations if compared to measurements from the real system. These deviations results from not modeled slip-stick-friction: The pen sticks to the paper until a force limit is reached, but then moves jerkily. A hard to model physical effect – but not for a neural FMU. More advanced code snippets are hidden by default and marked with a ghost `👻`. Computations, that are disabled for performance reasons, are marked with `ℹ️`. They offer a hint how to enable the idled computation by activating the corresponding checkbox marked with `🎬`. @@ -116,6 +116,16 @@ md""" Some additional libraries for machine learning and ODE solvers: """ +# ╔═╡ 3e2579c2-39ce-4249-ad75-228f82e616da +md""" +To visualize a progress bar during training: +""" + +# ╔═╡ 93fab704-a8dd-47ec-ac88-13f32be99460 +md""" +And to do some benchmarking: +""" + # ╔═╡ 5cb505f7-01bd-4824-8876-3e0f5a922fb7 md""" Load in the plotting libraries ... @@ -132,11 +142,6 @@ md""" # ╔═╡ e6e91a22-7724-46a3-88c1-315c40660290 plotlyjs() -# ╔═╡ 3e2579c2-39ce-4249-ad75-228f82e616da -md""" -To visualize the progress bar: -""" - # ╔═╡ 44500f0a-1b89-44af-b135-39ce0fec5810 md""" Next, we define some helper functions, that are not important to follow the workshop - they are hidden by default. However they are here, if you want to explore what it takes to write fully working code. If you do this workshop for the first time, it is recommended to skip the hidden part and directly go on. @@ -169,8 +174,8 @@ data_train = FMIZoo.RobotRR(:train) # ╔═╡ 33223393-bfb9-4e9a-8ea6-a3ab6e2f22aa begin -# define the prinintg messages used at different places in this notebook -LIVE_RESULTS_MESSAGE = md"""ℹ️ Live plotting are disabled to safe performance. Checkbox `Plot Results`.""" +# define the printing messages used at different places in this notebook +LIVE_RESULTS_MESSAGE = md"""ℹ️ Live plotting is disabled to safe performance. Checkbox `Plot Results`.""" LIVE_TRAIN_MESSAGE = md"""ℹ️ Live training is disabled to safe performance. Checkbox `Start Training`.""" BENCHMARK_MESSAGE = md"""ℹ️ Live benchmarks are disabled to safe performance. Checkbox `Start Benchmark`.""" HIDDEN_CODE_MESSAGE = md"""> 👻 Hidden Code | You probably want to skip this code section on the first run.""" @@ -224,6 +229,16 @@ function fmiSingleInstanceMode!(fmu::FMU2, return nothing end + function prepareSolveFMU(fmu, parameters) + FMIFlux.prepareSolveFMU(fmu, nothing, fmu.type, + fmu.executionConfig.instantiate, + fmu.executionConfig.freeInstance, + fmu.executionConfig.terminate, + fmu.executionConfig.reset, + fmu.executionConfig.setup, + parameters) + end + function dividePath(values) last_value = values[1] paths = [] @@ -312,7 +327,7 @@ plotRobot(data_train.solution, t_train_plot) # ╔═╡ d8ca5f66-4f55-48ab-a6c9-a0be662811d9 md""" -> 👁️ Interesstingly, the first part of the word "trai" is not significantly affected by the slip-stick-effect, the actual TCP trajectory (green) lays quite good on the target position (black dashed). However, the "n" is very jerky. This can be explained by the increasing lever, the motor needs more torque to overcome the static friction the further away the TCP (orange) is from the robot base (red). +> 👁️ Interestingly, the first part of the word "trai" is not significantly affected by the slip-stick-effect, the actual TCP trajectory (green) lays quite good on the target position (black dashed). However, the "n" is very jerky. This can be explained by the increasing lever, the motor needs more torque to overcome the static friction the further away the TCP (orange) is from the robot base (red). Let's extract a start and stop time, as well as saving points for the later solving process: """ @@ -321,14 +336,14 @@ Let's extract a start and stop time, as well as saving points for the later solv tSave = data_train.t # time points to save the solution at # ╔═╡ 8f45871f-f72a-423f-8101-9ce93e5a885b -tStart = tSave[1] # start time for simulation of FMU and NeuralFMU +tStart = tSave[1] # start time for simulation of FMU and neural FMU # ╔═╡ 57c039f7-5b24-4d63-b864-d5f808110b91 -tStop = tSave[end] # stop time for simulation of FMU and NeuralFMU +tStop = tSave[end] # stop time for simulation of FMU and neural FMU # ╔═╡ 4510022b-ad28-4fc2-836b-e4baf3c14d26 md""" -Finally, also the start state can be grabbed from *FMIZoo.jl*, as well as some default parameters for the simulation model we load in the next section. How to interpretate the six states is discussed in the next section where the model is loaded. +Finally, also the start state can be grabbed from *FMIZoo.jl*, as well as some default parameters for the simulation model we load in the next section. How to interpret the six states is discussed in the next section where the model is loaded. """ # ╔═╡ 9589416a-f9b3-4b17-a381-a4f660a5ee4c @@ -478,11 +493,11 @@ Now that we know our model and data a little bit better, it's time to care about # Experiments: $br Hybrid Model Topology -Today is opposite day! Instead of deriving a topology step by step, the final NeuralFMU topology is presented in the picture below... however, three experiments are intended to make clear why it looks the way it looks. +Today is opposite day! Instead of deriving a topology step by step, the final neural FMU topology is presented in the picture below... however, three experiments are intended to make clear why it looks the way it looks. ![](https://github.com/ThummeTo/FMIFlux.jl/blob/main/examples/pluto-src/SciMLUsingFMUs/src/plan_complete.png?raw=true) -The first experiment is on choosing a good interface between FMU and ANN. The second is on online data pre- and post-processing. And the third one on gates, that allow to control the influence of ANN and FMU on the resulting hybrid model dynamics. After you completed all three, you are equipped with the knowledge to cope the final challenge: Build your own NeuralFMU and train it! +The first experiment is on choosing a good interface between FMU and ANN. The second is on online data pre- and post-processing. And the third one on gates, that allow to control the influence of ANN and FMU on the resulting hybrid model dynamics. After you completed all three, you are equipped with the knowledge to cope the final challenge: Build your own neural FMU and train it! """ # ╔═╡ 786c4652-583d-43e9-a101-e28c0b6f64e4 @@ -524,7 +539,7 @@ x1 = FMIZoo.getState(data_train, tStart+1.0) # ╔═╡ f4e66f76-76ff-4e21-b4b5-c1ecfd846329 begin using FMIFlux.FMISensitivity.ReverseDiff - FMIFlux.prepareSolveFMU(fmu, nothing, fmu.type) + prepareSolveFMU(fmu, parameters) jac_rwd = ReverseDiff.jacobian(x -> model(x), x1); A_rwd = jac_rwd[1:length(x1), :] end @@ -532,7 +547,7 @@ end # ╔═╡ ea655baa-b4d8-4fce-b699-6a732dc06051 begin using FMIFlux.FMISensitivity.ForwardDiff - FMIFlux.prepareSolveFMU(fmu, nothing, fmu.type) + prepareSolveFMU(fmu, parameters) jac_fwd = ForwardDiff.jacobian(x -> model(x), x1); A_fwd = jac_fwd[1:length(x1), :] end @@ -580,7 +595,7 @@ For forward-mode automatic differentiation (using *ForwardDiff.jl*), it's the sa # ╔═╡ cae2e094-b6a2-45e4-9afd-a6b78e912ab7 md""" -We can determine further jacobians for FMUs, for example the Jacobian $C = \frac{\partial y}{\partial x}$ states (using *ReverseDiff.jl*): +We can determine further Jacobians for FMUs, for example the Jacobian $C = \frac{\partial y}{\partial x}$ states (using *ReverseDiff.jl*): """ # ╔═╡ ac0afa6c-b6ec-4577-aeb6-10d1ec63fa41 @@ -635,13 +650,13 @@ end # ╔═╡ 3bc2b859-d7b1-4b79-88df-8fb517a6929d md""" -Gradient and Jaobian computation takes a little longer of course. We use reverse-mode automatic differentiation via `ReverseDiff.jl` here: +Gradient and Jacobian computation takes a little longer of course. We use reverse-mode automatic differentiation via `ReverseDiff.jl` here: """ # ╔═╡ a501d998-6fd6-496f-9718-3340c42b08a6 begin if BENCHMARK - FMIFlux.prepareSolveFMU(fmu, nothing, fmu.type) + prepareSolveFMU(fmu, parameters) function ben_rwd(x) return ReverseDiff.jacobian(model, x + rand(6)*1e-12); end @@ -660,7 +675,7 @@ Further, forward-mode automatic differentiation is available too via `ForwardDif # ╔═╡ e342be7e-0806-4f72-9e32-6d74ed3ed3f2 begin if BENCHMARK - FMIFlux.prepareSolveFMU(fmu, nothing, fmu.type) + prepareSolveFMU(fmu, parameters) function ben_fwd(x) return ForwardDiff.jacobian(model, x + rand(6)*1e-12); end @@ -705,7 +720,7 @@ end md""" In general, it looks like the velocity isn't saturated too much by `tanh`. This is a good thing and not always the case! However, the very beginning of the trajectory is saturated too much (the peak value of $\approx -3$ is saturated to $\approx -1$). This is bad, because the hybrid model velocity is *slower* at this point in time and it won't reach the same angle over time as the original FMU. -We can add shift (=addition) and scale (=multiplication) operations before and after the ANN to bypass this issue. See how you can influence the output *after* the `tanh` (and the ANN repectively) to match the ranges. The goal is to choose pre- and post-processing parameters so that the signal ranges needed by the FMU are preserved by the hybrid model. +We can add shift (=addition) and scale (=multiplication) operations before and after the ANN to bypass this issue. See how you can influence the output *after* the `tanh` (and the ANN respectively) to match the ranges. The goal is to choose pre- and post-processing parameters so that the signal ranges needed by the FMU are preserved by the hybrid model. """ # ╔═╡ bf6bf640-54bc-44ef-bd4d-b98e934d416e @@ -749,7 +764,7 @@ The left plot shows the negative spike at the very beginning in more detail. In # ╔═╡ b864631b-a9f3-40d4-a6a8-0b57a37a476d md""" -> 💡 In many machine larning applications, pre- and post-processing is done offline. If we combine machine learning and physical models, we need to pre- and post-process online at the interfaces. This does at least improve training performance and is a necessity if the nominal values become very large or very small. +> 💡 In many machine learning applications, pre- and post-processing is done offline. If we combine machine learning and physical models, we need to pre- and post-process online at the interfaces. This does at least improve training performance and is a necessity if the nominal values become very large or very small. """ # ╔═╡ 0fb90681-5d04-471a-a7a8-4d0f3ded7bcf @@ -803,15 +818,15 @@ begin # (2) consumption from FMU (gate=1.0 | open) # (3) acceleration from ANN (gate=0.0 | closed) # (4) consumption from ANN (gate=0.0 | closed) - # the acelerations [1,3] and consumptions [2,4] are paired + # the accelerations [1,3] and consumptions [2,4] are paired gates = ScaleSum([GATE_INIT_FMU, GATE_INIT_ANN], [[1,2]]) # gates with sum - # setup the NeuralFMU topology + # setup the neural FMU topology model_gates = Flux.f64(Chain(dx -> cache(dx), # cache `dx` Dense(1, 16, tanh), Dense(16, 1, tanh), # pre-process `dx` dx -> cacheRetrieve(1, dx), # dynamics FMU | dynamics ANN - gates)) # stack toget + gates)) # stack together model_input = collect([v] for v in data_train.da1) model_output = collect(model_gates(inp) for inp in model_input) @@ -820,7 +835,7 @@ begin fig = plot(; ylims=(-3,1), legend=:bottomright) plot!(fig, data_train.t, collect(v[1] for v in model_input), label="dα1 of FMU") plot!(fig, data_train.t, collect(v[1] for v in ANN_output), label="dα1 of ANN") - plot!(fig, data_train.t, collect(v[1] for v in model_output), label="dα1 of NeuralFMU") + plot!(fig, data_train.t, collect(v[1] for v in model_output), label="dα1 of neural FMU") return fig end @@ -836,6 +851,11 @@ This equals the parallel topology: $((GATE_INIT_FMU==1 && GATE_INIT_ANN==1) ? " The neural FMU dynamics equal the FMU dynamics: $((GATE_INIT_FMU==1 && GATE_INIT_ANN==0) ? "✔️" : "❌") """ +# ╔═╡ 1cd976fb-db40-4ebe-b40d-b996e16fc213 +md""" +> 💡 Gates allow to make parts of the architecture *learnable* while still keeping the training results interpretable. +""" + # ╔═╡ 93771b35-4edd-49e3-bed1-a3ccdb7975e6 md""" > 💭 **Further reading:** Optimizing the gates together with the ANN parameters seems a useful strategy if we don't know how FMU and ANN need to interact in the later application. Technically, we keep a part of the architecture *parameterizable* and therefore learnable. How far can we push this game? @@ -843,11 +863,6 @@ md""" > Actually to the point, that the combination of FMU and ANN is described by a single *connection* equation, that is able to express all possible combinations of both models with each other - so a connection between every pair of inputs and outputs. This is discussed in detail as part of our article [*Learnable & Interpretable Model Combination in Dynamic Systems Modeling*](https://doi.org/10.48550/arXiv.2406.08093). """ -# ╔═╡ 1cd976fb-db40-4ebe-b40d-b996e16fc213 -md""" -> 💡 Gates allow to make parts of the architecture *learnable* while still keeping the training results interpretable. -""" - # ╔═╡ e79badcd-0396-4a44-9318-8c6b0a94c5c8 md""" Time to take care of the big picture next. @@ -913,8 +928,8 @@ ANN gates shall be initialized with $(GATES_INIT), meaning the ANN contributes $ # ╔═╡ c0ac7902-0716-4f18-9447-d18ce9081ba5 md""" -## Resulting NeuralFMU -Even if this looks a little confusing at first glance, our final NeuralFMU topology looks like this: +## Resulting neural FMU +Even if this looks a little confusing at first glance, our final neural FMU topology looks like this: """ # ╔═╡ 84215a73-1ab0-416d-a9db-6b29cd4f5d2a @@ -930,9 +945,9 @@ function build_topology(gates_init, add_y_refs, nl, lw) end # pre- and post-processing - preProcess = ShiftScale(ANN_input_Vals) # we put in the derivatives recorded above, FMIFlux shift and scales so we have a data mean of 0 and a standard deivation of 1 + preProcess = ShiftScale(ANN_input_Vals) # we put in the derivatives recorded above, FMIFlux shift and scales so we have a data mean of 0 and a standard deviation of 1 #preProcess.scale[:] *= 0.1 # add some additional "buffer" - postProcess = ScaleShift(preProcess; indices=[1,2]) # initialize the postPrcess as inverse of the preProcess, but only take indices 2 and 3 (we don't need 1, the vehcile velocity) + postProcess = ScaleShift(preProcess; indices=[1,2]) # initialize the postProcess as inverse of the preProcess, but only take indices 1 and 2 # cache cache = CacheLayer() # allocate a cache layer @@ -972,14 +987,14 @@ We can evaluate it, by putting in our start state `x0`. The model computes the r # ╔═╡ f02b9118-3fb5-4846-8c08-7e9bbca9d208 md""" -On basis of this `Chain`, we can build a NeuralFMU very easy: +On basis of this `Chain`, we can build a neural FMU very easy: """ # ╔═╡ d347d51b-743f-4fec-bed7-6cca2b17bacb md""" # Training -After setting everything up, we can give it a try and train our created NeuralFMU. Deepending on the chosen optimization hyper parameters, this will be more or less successful. Feel free to play around a bit, but keep in mind that for real application design, you should do hyper parameter optimization instead of playing around by yourself. +After setting everything up, we can give it a try and train our created neural FMU. Depending on the chosen optimization hyperparameters, this will be more or less successful. Feel free to play around a bit, but keep in mind that for real application design, you should do hyper parameter optimization instead of playing around by yourself. """ # ╔═╡ d60d2561-51a4-4f8a-9819-898d70596e0c @@ -991,7 +1006,7 @@ Besides the already introduced hyperparameters - the depth, width and initial ga For this example, we use the well-known `Adam`-Optimizer with a step size `eta` of $(@bind ETA Select([1e-4 => "1e-4", 1e-3 => "1e-3", 1e-2 => "1e-2"])). ### Batching -Because data has a significant length, gradient computation over the entire simulation trajectory might not be effective. The most common approach is to *cut* data into slices and train on these subsets instead of the entire trajctory at once. In this example, data is cut in pieces with length of $(@bind BATCHDUR Select([0.05, 0.1, 0.15, 0.2])) seconds. +Because data has a significant length, gradient computation over the entire simulation trajectory might not be effective. The most common approach is to *cut* data into slices and train on these subsets instead of the entire trajectory at once. In this example, data is cut in pieces with length of $(@bind BATCHDUR Select([0.05, 0.1, 0.15, 0.2])) seconds. """ # ╔═╡ c97f2dea-cb18-409d-9ae8-1d03647a6bb3 @@ -1043,7 +1058,7 @@ md""" ## Summary To summarize, your ANN has a **depth of $(NUM_LAYERS) layers** with a **width of $(LAYERS_WIDTH)** each. The **ANN gates are initialized with $(GATES_INIT*100)%**, so all FMU gates are initialized with $(100-GATES_INIT*100)%. You decided to batch your data with a **batch element length of $(BATCHDUR)** seconds. Besides the state derivatives, you **put $(length(y_refs)) additional variables** in the ANN. Adam optimizer will try to find a good minimum with **`eta` is $(ETA)**. -Batching takes a few seconds and training a few minutes (depending on the number of training steps), so this is not triggered automatically. If you are ready to go, choose a number of training steps and check the checkbox `Start Training`. This will start a training of $(@bind STEPS Select([0, 10, 100, 1000, 2500, 5000, 10000])) training steps. +Batching takes a few seconds and training a few minutes (depending on the number of training steps), so this is not triggered automatically. If you are ready to go, choose a number of training steps and check the checkbox `Start Training`. This will start a training of $(@bind STEPS Select([0, 10, 100, 1000, 2500, 5000, 10000])) training steps. Alternatively, you can change the training mode to `demo` which loads parameters from a pre-trained model. """ # ╔═╡ abc57328-4de8-42d8-9e79-dd4020769dd9 @@ -1069,7 +1084,7 @@ final_model(x0) # ╔═╡ 91473bef-bc23-43ed-9989-34e62166d455 begin neuralFMU = ME_NeuralFMU( - fmu, # the FMU used in the NeuralFMU + fmu, # the FMU used in the neural FMU final_model, # the model we specified above (tStart, tStop),# start and stop time for solving solver; # the solver (Tsit5) @@ -1078,9 +1093,12 @@ end # ╔═╡ 404ca10f-d944-4a9f-addb-05efebb4f159 begin + import Downloads + demo_path = Downloads.download("https://github.com/ThummeTo/FMIFlux.jl/blob/main/examples/pluto-src/SciMLUsingFMUs/src/20000.jld2?raw=true") + # in demo mode, we load parameters from a pre-trained model if MODE == :demo - fmiLoadParameters(neuralFMU, "C:\\Users\\thummeto\\Documents\\Dissertation\\Publikationen\\MODPROD 2024\\results\\20000.jld2") + fmiLoadParameters(neuralFMU, demo_path) end HIDDEN_CODE_MESSAGE @@ -1118,7 +1136,7 @@ function train(eta, batchdur, steps) return md"""⚠️ Number of training steps is `0`, no training.""" end - FMIFlux.prepareSolveFMU(fmu, nothing, fmu.type) + prepareSolveFMU(fmu, parameters) train_t = data_train.t train_data = collect([data_train.i2[i], data_train.i1[i]] for i in 1:length(train_t)) @@ -1126,7 +1144,7 @@ function train(eta, batchdur, steps) #@info @info "Started batching ..." - batch = batchDataSolution(neuralFMU, # our NeuralFMU model + batch = batchDataSolution(neuralFMU, # our neural FMU model t -> FMIZoo.getState(data_train, t), # a function returning a start state for a given time point `t`, to determine start states for batch elements train_t, # data time points train_data; # data cumulative consumption @@ -1145,11 +1163,11 @@ function train(eta, batchdur, steps) maxiters = round(Int, 1e5*batchdur) - _loss = p -> FMIFlux.Losses.loss(neuralFMU, # the NeuralFMU to simulate + _loss = p -> FMIFlux.Losses.loss(neuralFMU, # the neural FMU to simulate batch; # the batch to take an element from - p=p, # the NeuralFMU training parameters (given as input) + p=p, # the neural FMU training parameters (given as input) lossFct=lossFct, # our custom loss function - batchIndex=scheduler.elementIndex, # the index of the batch element to take, determined by the choosen scheduler + batchIndex=scheduler.elementIndex, # the index of the batch element to take, determined by the chosen scheduler logLoss=true, # log losses after every evaluation showProgress=false, parameters=parameters, @@ -1212,12 +1230,10 @@ md""" # ╔═╡ ff106912-d18c-487f-bcdd-7b7af2112cab md""" # Results -Now it's time to find out if it worked! - -ℹ️ Plotting results makes the notebook slow, so it's deactivated by default. Activate it to plot results of your training. +Now it's time to find out if it worked! Plotting results makes the notebook slow, so it's deactivated by default. Activate it to plot results of your training. ## Training results -Let's check out the *training* results of the freshly trained NeuralFMU. +Let's check out the *training* results of the freshly trained neural FMU. """ # ╔═╡ 51eeb67f-a984-486a-ab8a-a2541966fa72 @@ -1289,7 +1305,7 @@ begin "rRPositionControl_Elasticity.tCP.N", "rRPositionControl_Elasticity.tCP.a_x", "rRPositionControl_Elasticity.tCP.a_y"], - showProgress=true, maxiters=1e7, saveat=data_train.t, solver=Tsit5()); + showProgress=true, maxiters=1e6, saveat=data_train.t, solver=Tsit5()); nothing end @@ -1317,10 +1333,13 @@ begin end # ╔═╡ b0ce7b92-93e0-4715-8324-3bf4ff42a0b3 -begin +let if LIVE_RESULTS + loss_fmu = loss(fmu_train, data_train) + loss_nfmu = loss(result_train, data_train) + md""" -The loss function value of the FMU on training data is $(round(loss(fmu_train, data_train); digits=6)), of the NeuralFMU it is $(round(loss(result_train, data_train); digits=6)). +The loss function value of the FMU on training data is $(round(loss_fmu; digits=6)), of the neural FMU it is $(round(loss_nfmu; digits=6)). The neural FMU is about $(round(loss_fmu/loss_nfmu; digits=1)) times more accurate. """ else LIVE_RESULTS_MESSAGE @@ -1333,7 +1352,7 @@ let fig = plot(; dpi=300, size=(200*3,60*3)) plotPaths!(fig, data_train.tcp_px, data_train.tcp_py, data_train.tcp_norm_f, label="Data", color=:black, style=:dash) plotPaths!(fig, collect(v[1] for v in fmu_train.values.saveval), collect(v[2] for v in fmu_train.values.saveval), collect(v[3] for v in fmu_train.values.saveval), label="FMU", color=:orange) - plotPaths!(fig, collect(v[1] for v in result_train.values.saveval), collect(v[2] for v in result_train.values.saveval), collect(v[3] for v in result_train.values.saveval), label="NeuralFMU", color=:blue) + plotPaths!(fig, collect(v[1] for v in result_train.values.saveval), collect(v[2] for v in result_train.values.saveval), collect(v[3] for v in result_train.values.saveval), label="Neural FMU", color=:blue) else LIVE_RESULTS_MESSAGE end @@ -1345,7 +1364,7 @@ let fig = plot(; dpi=300, size=(40*10,40*10), xlims=(0.165, 0.205), ylims=(-0.035, 0.005)) plotPaths!(fig, data_train.tcp_px, data_train.tcp_py, data_train.tcp_norm_f, label="Data", color=:black, style=:dash) plotPaths!(fig, collect(v[1] for v in fmu_train.values.saveval), collect(v[2] for v in fmu_train.values.saveval), collect(v[3] for v in fmu_train.values.saveval), label="FMU", color=:orange) - plotPaths!(fig, collect(v[1] for v in result_train.values.saveval), collect(v[2] for v in result_train.values.saveval), collect(v[3] for v in result_train.values.saveval), label="NeuralFMU", color=:blue) + plotPaths!(fig, collect(v[1] for v in result_train.values.saveval), collect(v[2] for v in result_train.values.saveval), collect(v[3] for v in result_train.values.saveval), label="Neural FMU", color=:blue) else LIVE_RESULTS_MESSAGE end @@ -1357,7 +1376,7 @@ let fig = plot(; dpi=300, size=(50*10,40*10), xlims=(0.245, 0.295), ylims=(-0.04, 0.0)) plotPaths!(fig, data_train.tcp_px, data_train.tcp_py, data_train.tcp_norm_f, label="Data", color=:black, style=:dash) plotPaths!(fig, collect(v[1] for v in fmu_train.values.saveval), collect(v[2] for v in fmu_train.values.saveval), collect(v[3] for v in fmu_train.values.saveval), label="FMU", color=:orange) - plotPaths!(fig, collect(v[1] for v in result_train.values.saveval), collect(v[2] for v in result_train.values.saveval), collect(v[3] for v in result_train.values.saveval), label="NeuralFMU", color=:blue) + plotPaths!(fig, collect(v[1] for v in result_train.values.saveval), collect(v[2] for v in result_train.values.saveval), collect(v[3] for v in result_train.values.saveval), label="Neural FMU", color=:blue) else LIVE_RESULTS_MESSAGE end @@ -1366,7 +1385,7 @@ end # ╔═╡ b489f97d-ee90-48c0-af06-93b66a1f6d2e md""" ## Validation results -Let's check out the *validation* results of the freshly trained NeuralFMU. +Let's check out the *validation* results of the freshly trained neural FMU. """ # ╔═╡ 4dad3e55-5bfd-4315-bb5a-2680e5cbd11c @@ -1382,7 +1401,7 @@ begin recordValues=["rRPositionControl_Elasticity.tCP.p_x", "rRPositionControl_Elasticity.tCP.p_y", "rRPositionControl_Elasticity.tCP.N"], - showProgress=true, maxiters=1e7, saveat=data_validation.t, solver=Tsit5()); + showProgress=true, maxiters=1e6, saveat=data_validation.t, solver=Tsit5()); nothing end @@ -1400,7 +1419,7 @@ begin recordValues=["rRPositionControl_Elasticity.tCP.p_x", "rRPositionControl_Elasticity.tCP.p_y", "rRPositionControl_Elasticity.tCP.N"], - showProgress=true, maxiters=1e7, saveat=data_validation.t); + showProgress=true, maxiters=1e6, saveat=data_validation.t); nothing else LIVE_RESULTS_MESSAGE @@ -1410,8 +1429,10 @@ end # ╔═╡ 8d9dc86e-f38b-41b1-80c6-b2ab6f488a3a begin if LIVE_RESULTS + loss_fmu = loss(fmu_validation, data_validation) + loss_nfmu = loss(result_validation, data_validation) md""" -The loss function value of the FMU on validation data is $(round(loss(fmu_validation, data_validation); digits=6)), of the NeuralFMU it is $(round(loss(result_validation, data_validation); digits=6)). +The loss function value of the FMU on validation data is $(round(loss_fmu; digits=6)), of the neural FMU it is $(round(loss_nfmu; digits=6)). The neural FMU is about $(round(loss_fmu/loss_nfmu; digits=1)) times more accurate. """ else LIVE_RESULTS_MESSAGE @@ -1424,7 +1445,7 @@ let fig = plot(; dpi=300, size=(200*3,40*3)) plotPaths!(fig, data_validation.tcp_px, data_validation.tcp_py, data_validation.tcp_norm_f, label="Data", color=:black, style=:dash) plotPaths!(fig, collect(v[1] for v in fmu_validation.values.saveval), collect(v[2] for v in fmu_validation.values.saveval), collect(v[3] for v in fmu_validation.values.saveval), label="FMU", color=:orange) - plotPaths!(fig, collect(v[1] for v in result_validation.values.saveval), collect(v[2] for v in result_validation.values.saveval), collect(v[3] for v in result_validation.values.saveval), label="NeuralFMU", color=:blue) + plotPaths!(fig, collect(v[1] for v in result_validation.values.saveval), collect(v[2] for v in result_validation.values.saveval), collect(v[3] for v in result_validation.values.saveval), label="Neural FMU", color=:blue) else LIVE_RESULTS_MESSAGE end @@ -1436,7 +1457,7 @@ let fig = plot(; dpi=300, size=(35*10,50*10), xlims=(0.188, 0.223), ylims=(-0.025, 0.025)) plotPaths!(fig, data_validation.tcp_px, data_validation.tcp_py, data_validation.tcp_norm_f, label="Data", color=:black, style=:dash) plotPaths!(fig, collect(v[1] for v in fmu_validation.values.saveval), collect(v[2] for v in fmu_validation.values.saveval), collect(v[3] for v in fmu_validation.values.saveval), label="FMU", color=:orange) - plotPaths!(fig, collect(v[1] for v in result_validation.values.saveval), collect(v[2] for v in result_validation.values.saveval), collect(v[3] for v in result_validation.values.saveval), label="NeuralFMU", color=:blue) + plotPaths!(fig, collect(v[1] for v in result_validation.values.saveval), collect(v[2] for v in result_validation.values.saveval), collect(v[3] for v in result_validation.values.saveval), label="Neural FMU", color=:blue) else LIVE_RESULTS_MESSAGE end @@ -1498,6 +1519,7 @@ Tobias Thummerer, Johannes Stoljar and Lars Mikelsons. 2022. **NeuralFMU: presen PLUTO_PROJECT_TOML_CONTENTS = """ [deps] BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" +Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6" FMI = "14a09403-18e3-468f-ad8a-74f8dda2d9ac" FMIFlux = "fabad875-0d53-4e47-9446-963b74cae21f" FMIZoo = "724179cf-c260-40a9-bd27-cccc6fe2f195" @@ -1511,11 +1533,11 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" [compat] BenchmarkTools = "~1.5.0" FMI = "~0.13.3" -FMIFlux = "~0.12.1" +FMIFlux = "~0.12.2" FMIZoo = "~0.3.3" -JLD2 = "~0.4.48" +JLD2 = "~0.4.49" PlotlyJS = "~0.18.13" -Plots = "~1.40.4" +Plots = "~1.40.5" PlutoUI = "~0.7.59" ProgressLogging = "~0.1.4" """ @@ -1526,7 +1548,7 @@ PLUTO_MANIFEST_TOML_CONTENTS = """ julia_version = "1.10.3" manifest_format = "2.0" -project_hash = "08923028677604b1c11bc64fd352208482d3b0c5" +project_hash = "79772b37e2cae2421c7159b63f3cbe881b42eaeb" [[deps.ADTypes]] git-tree-sha1 = "016833eb52ba2d6bea9fcb50ca295980e728ee24" @@ -1604,9 +1626,9 @@ version = "0.4.0" [[deps.ArrayInterface]] deps = ["Adapt", "LinearAlgebra", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "5c9b74c973181571deb6442d41e5c902e6b9f38e" +git-tree-sha1 = "ed2ec3c9b483842ae59cd273834e5b46206d6dda" uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" -version = "7.12.0" +version = "7.11.0" [deps.ArrayInterface.extensions] ArrayInterfaceBandedMatricesExt = "BandedMatrices" @@ -1632,9 +1654,9 @@ version = "7.12.0" [[deps.ArrayLayouts]] deps = ["FillArrays", "LinearAlgebra"] -git-tree-sha1 = "8556500c18fcad8b4c44058e23fbc4a36143f6be" +git-tree-sha1 = "600078184f7de14b3e60efe13fc0ba5c59f6dca5" uuid = "4c555306-a7a7-4459-81d9-ec55ddd5c99a" -version = "1.10.1" +version = "1.10.0" weakdeps = ["SparseArrays"] [deps.ArrayLayouts.extensions] @@ -1797,9 +1819,9 @@ version = "0.1.13" [[deps.CodecZlib]] deps = ["TranscodingStreams", "Zlib_jll"] -git-tree-sha1 = "b8fe8546d52ca154ac556809e10c75e6e7430ac8" +git-tree-sha1 = "59939d8a997469ee05c4b4944560a820f9ba0d73" uuid = "944b1d66-785c-5afd-91f1-9de20f533193" -version = "0.7.5" +version = "0.7.4" [[deps.ColorSchemes]] deps = ["ColorTypes", "ColorVectorSpace", "Colors", "FixedPointNumbers", "PrecompileTools", "Random"] @@ -1871,9 +1893,9 @@ version = "0.2.3" [[deps.ConcurrentUtilities]] deps = ["Serialization", "Sockets"] -git-tree-sha1 = "ea32b83ca4fefa1768dc84e504cc0a94fb1ab8d1" +git-tree-sha1 = "6cbbd4d241d7e6579ab354737f4dd95ca43946e1" uuid = "f0e56b4a-5159-44fe-b623-3e5288b988bb" -version = "2.4.2" +version = "2.4.1" [[deps.ConstructionBase]] deps = ["LinearAlgebra"] @@ -2185,9 +2207,7 @@ version = "0.3.2" [[deps.FMIFlux]] deps = ["Colors", "DifferentiableEigen", "DifferentialEquations", "FMIImport", "FMISensitivity", "Flux", "Optim", "Printf", "ProgressMeter", "Requires", "Statistics", "ThreadPools"] -git-tree-sha1 = "792ac12176b79d9d2e31d61ad9cb0947cad8d69c" -repo-rev = "v0.12.2" -repo-url = "https://github.com/ThummeTo/FMIFlux.jl" +git-tree-sha1 = "1315f3bfe3e273eb35ea872d71869814349541cd" uuid = "fabad875-0d53-4e47-9446-963b74cae21f" version = "0.12.2" @@ -2378,16 +2398,16 @@ deps = ["Random"] uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" [[deps.GLFW_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Libglvnd_jll", "Xorg_libXcursor_jll", "Xorg_libXi_jll", "Xorg_libXinerama_jll", "Xorg_libXrandr_jll", "xkbcommon_jll"] -git-tree-sha1 = "3f74912a156096bd8fdbef211eff66ab446e7297" +deps = ["Artifacts", "JLLWrappers", "Libdl", "Libglvnd_jll", "Xorg_libXcursor_jll", "Xorg_libXi_jll", "Xorg_libXinerama_jll", "Xorg_libXrandr_jll"] +git-tree-sha1 = "ff38ba61beff76b8f4acad8ab0c97ef73bb670cb" uuid = "0656b61e-2033-5cc2-a64a-77c0f6c09b89" -version = "3.4.0+0" +version = "3.3.9+0" [[deps.GPUArrays]] deps = ["Adapt", "GPUArraysCore", "LLVM", "LinearAlgebra", "Printf", "Random", "Reexport", "Serialization", "Statistics"] -git-tree-sha1 = "04661708f5301394a1f1be86a07a89e835900db6" +git-tree-sha1 = "5c9de6d5af87acd2cf719e214ed7d51e14017b7a" uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7" -version = "10.2.3" +version = "10.2.2" [[deps.GPUArraysCore]] deps = ["Adapt"] @@ -2444,9 +2464,9 @@ version = "1.3.14+0" [[deps.Graphs]] deps = ["ArnoldiMethod", "Compat", "DataStructures", "Distributed", "Inflate", "LinearAlgebra", "Random", "SharedArrays", "SimpleTraits", "SparseArrays", "Statistics"] -git-tree-sha1 = "ebd18c326fa6cee1efb7da9a3b45cf69da2ed4d9" +git-tree-sha1 = "334d300809ae0a68ceee3444c6e99ded412bf0b3" uuid = "86223c79-3864-5bf0-83f7-82e725a168b6" -version = "1.11.2" +version = "1.11.1" [[deps.Grisu]] git-tree-sha1 = "53bb909d1151e57e2484c3d1b53e19552b887fb2" @@ -2466,10 +2486,10 @@ version = "0.17.2" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" [[deps.HDF5_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "LibCURL_jll", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenMPI_jll", "OpenSSL_jll", "TOML", "Zlib_jll", "libaec_jll"] -git-tree-sha1 = "82a471768b513dc39e471540fdadc84ff80ff997" +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LLVMOpenMP_jll", "LazyArtifacts", "LibCURL_jll", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenMPI_jll", "OpenSSL_jll", "TOML", "Zlib_jll", "libaec_jll"] +git-tree-sha1 = "38c8874692d48d5440d5752d6c74b0c6b0b60739" uuid = "0234f1f7-429e-5d53-9886-15a909be8d59" -version = "1.14.3+3" +version = "1.14.2+1" [[deps.HTTP]] deps = ["Base64", "CodecZlib", "ConcurrentUtilities", "Dates", "ExceptionUnwrapping", "Logging", "LoggingExtras", "MbedTLS", "NetworkOptions", "OpenSSL", "Random", "SimpleBufferStream", "Sockets", "URIs", "UUIDs"] @@ -2588,9 +2608,9 @@ version = "1.0.0" [[deps.JLD2]] deps = ["FileIO", "MacroTools", "Mmap", "OrderedCollections", "Pkg", "PrecompileTools", "Reexport", "Requires", "TranscodingStreams", "UUIDs", "Unicode"] -git-tree-sha1 = "bdbe8222d2f5703ad6a7019277d149ec6d78c301" +git-tree-sha1 = "84642bc18a79d715b39d3724b03cbdd2e7d48c62" uuid = "033835bb-8acc-5ee8-8aae-3f567f8a3819" -version = "0.4.48" +version = "0.4.49" [[deps.JLFzf]] deps = ["Pipe", "REPL", "Random", "fzf_jll"] @@ -2715,9 +2735,9 @@ version = "1.3.1" [[deps.Latexify]] deps = ["Format", "InteractiveUtils", "LaTeXStrings", "MacroTools", "Markdown", "OrderedCollections", "Requires"] -git-tree-sha1 = "5b0d630f3020b82c0775a51d05895852f8506f50" +git-tree-sha1 = "e0b5cd21dc1b44ec6e64f351976f961e6f31d6c4" uuid = "23fbe1c1-3f47-55db-b15f-69d7ec21a316" -version = "0.16.4" +version = "0.16.3" [deps.Latexify.extensions] DataFramesExt = "DataFrames" @@ -3062,22 +3082,20 @@ version = "4.5.1" [[deps.NNlib]] deps = ["Adapt", "Atomix", "ChainRulesCore", "GPUArraysCore", "KernelAbstractions", "LinearAlgebra", "Pkg", "Random", "Requires", "Statistics"] -git-tree-sha1 = "1288e6db94d98f7d194454452176b82edb25b32c" +git-tree-sha1 = "78de319bce99d1d8c1d4fe5401f7cfc2627df396" uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" -version = "0.9.19" +version = "0.9.18" [deps.NNlib.extensions] NNlibAMDGPUExt = "AMDGPU" NNlibCUDACUDNNExt = ["CUDA", "cuDNN"] NNlibCUDAExt = "CUDA" NNlibEnzymeCoreExt = "EnzymeCore" - NNlibFFTWExt = "FFTW" [deps.NNlib.weakdeps] AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" EnzymeCore = "f151be2c-9106-41f4-ab19-57ee4f262869" - FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd" [[deps.NaNMath]] @@ -3145,9 +3163,9 @@ uuid = "510215fc-4207-5dde-b226-833fc4488ee2" version = "0.5.5" [[deps.OffsetArrays]] -git-tree-sha1 = "1a27764e945a152f7ca7efa04de513d473e9542e" +git-tree-sha1 = "e64b4f5ea6b7389f6f046d13d4896a8f9c1ba71e" uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" -version = "1.14.1" +version = "1.14.0" weakdeps = ["Adapt"] [deps.OffsetArrays.extensions] @@ -3176,10 +3194,10 @@ uuid = "05823500-19ac-5b8b-9628-191a04bc5112" version = "0.8.1+2" [[deps.OpenMPI_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"] -git-tree-sha1 = "e25c1778a98e34219a00455d6e4384e017ea9762" +deps = ["Artifacts", "CompilerSupportLibraries_jll", "Hwloc_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML", "Zlib_jll"] +git-tree-sha1 = "a9de2f1fc98b92f8856c640bf4aec1ac9b2a0d86" uuid = "fe0851c0-eecd-5654-98d4-656369965a5c" -version = "4.1.6+0" +version = "5.0.3+0" [[deps.OpenSSL]] deps = ["BitFlags", "Dates", "MozillaCACerts_jll", "OpenSSL_jll", "Sockets"] @@ -3328,10 +3346,10 @@ uuid = "f2990250-8cf9-495f-b13a-cce12b45703c" version = "2.2.4" [[deps.Plots]] -deps = ["Base64", "Contour", "Dates", "Downloads", "FFMPEG", "FixedPointNumbers", "GR", "JLFzf", "JSON", "LaTeXStrings", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "Pkg", "PlotThemes", "PlotUtils", "PrecompileTools", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "RelocatableFolders", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs", "UnicodeFun", "UnitfulLatexify", "Unzip"] -git-tree-sha1 = "442e1e7ac27dd5ff8825c3fa62fbd1e86397974b" +deps = ["Base64", "Contour", "Dates", "Downloads", "FFMPEG", "FixedPointNumbers", "GR", "JLFzf", "JSON", "LaTeXStrings", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "Pkg", "PlotThemes", "PlotUtils", "PrecompileTools", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "RelocatableFolders", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "TOML", "UUIDs", "UnicodeFun", "UnitfulLatexify", "Unzip"] +git-tree-sha1 = "082f0c4b70c202c37784ce4bfbc33c9f437685bf" uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -version = "1.40.4" +version = "1.40.5" [deps.Plots.extensions] FileIOExt = "FileIO" @@ -3583,9 +3601,9 @@ version = "0.6.43" [[deps.SciMLBase]] deps = ["ADTypes", "Accessors", "ArrayInterface", "CommonSolve", "ConstructionBase", "Distributed", "DocStringExtensions", "EnumX", "FunctionWrappersWrappers", "IteratorInterfaceExtensions", "LinearAlgebra", "Logging", "Markdown", "PrecompileTools", "Preferences", "Printf", "RecipesBase", "RecursiveArrayTools", "Reexport", "RuntimeGeneratedFunctions", "SciMLOperators", "SciMLStructures", "StaticArraysCore", "Statistics", "SymbolicIndexingInterface", "Tables"] -git-tree-sha1 = "281e82f2ae2b73262fed9e7a518711eb7feb7e59" +git-tree-sha1 = "7a6c5c8c38d2e37f45d4686c3598c20c1aebf48e" uuid = "0bca4576-84f4-4d90-8ffe-ffa030f20462" -version = "2.42.0" +version = "2.41.3" [deps.SciMLBase.extensions] SciMLBaseChainRulesCoreExt = "ChainRulesCore" @@ -3773,9 +3791,9 @@ weakdeps = ["OffsetArrays", "StaticArrays"] [[deps.StaticArrays]] deps = ["LinearAlgebra", "PrecompileTools", "Random", "StaticArraysCore"] -git-tree-sha1 = "eeafab08ae20c62c44c8399ccb9354a04b80db50" +git-tree-sha1 = "20833c5b7f7edf0e5026f23db7f268e4f23ec577" uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "1.9.7" +version = "1.9.6" weakdeps = ["ChainRulesCore", "Statistics"] [deps.StaticArrays.extensions] @@ -3875,9 +3893,9 @@ version = "5.2.2+0" [[deps.SymbolicIndexingInterface]] deps = ["Accessors", "ArrayInterface", "RuntimeGeneratedFunctions", "StaticArraysCore"] -git-tree-sha1 = "9c490ee01823dc443da25bf9225827e3cdd2d7e9" +git-tree-sha1 = "a5f6f138b740c9d93d76f0feddd3092e6ef002b7" uuid = "2efcf032-c050-4f8e-a9bb-153293bab1f5" -version = "0.3.26" +version = "0.3.22" [[deps.TOML]] deps = ["Dates"] @@ -4021,9 +4039,9 @@ weakdeps = ["ConstructionBase", "InverseFunctions"] [[deps.UnitfulLatexify]] deps = ["LaTeXStrings", "Latexify", "Unitful"] -git-tree-sha1 = "975c354fcd5f7e1ddcc1f1a23e6e091d99e99bc8" +git-tree-sha1 = "e2d817cc500e960fdbafcf988ac8436ba3208bfd" uuid = "45397f5d-5981-4c77-b2b3-fc36d6e9b728" -version = "1.6.4" +version = "1.6.3" [[deps.UnsafeAtomics]] git-tree-sha1 = "6331ac3440856ea1988316b46045303bef658278" @@ -4418,14 +4436,15 @@ version = "1.4.1+1" # ╟─85308992-04c4-4d20-a840-6220cab54680 # ╠═eaae989a-c9d2-48ca-9ef8-fd0dbff7bcca # ╠═98c608d9-c60e-4eb6-b611-69d2ae7054c9 +# ╟─3e2579c2-39ce-4249-ad75-228f82e616da +# ╠═ddc9ce37-5f93-4851-a74f-8739b38ab092 +# ╟─93fab704-a8dd-47ec-ac88-13f32be99460 # ╠═de7a4639-e3b8-4439-924d-7d801b4b3eeb # ╟─5cb505f7-01bd-4824-8876-3e0f5a922fb7 # ╠═45c4b9dd-0b04-43ae-a715-cd120c571424 # ╠═33d648d3-e66e-488f-a18d-e538ebe9c000 # ╟─1e9541b8-5394-418d-8c27-2831951c538d # ╠═e6e91a22-7724-46a3-88c1-315c40660290 -# ╟─3e2579c2-39ce-4249-ad75-228f82e616da -# ╠═ddc9ce37-5f93-4851-a74f-8739b38ab092 # ╟─44500f0a-1b89-44af-b135-39ce0fec5810 # ╟─33223393-bfb9-4e9a-8ea6-a3ab6e2f22aa # ╟─74d23661-751b-4371-bf6b-986149124e81 @@ -4515,8 +4534,8 @@ version = "1.4.1+1" # ╟─845a95c4-9a35-44ae-854c-57432200da1a # ╟─5a399a9b-32d9-4f93-a41f-8f16a4b102dc # ╟─fd1cebf1-5ccc-4bc5-99d4-1eaa30e9762e -# ╟─93771b35-4edd-49e3-bed1-a3ccdb7975e6 # ╟─1cd976fb-db40-4ebe-b40d-b996e16fc213 +# ╟─93771b35-4edd-49e3-bed1-a3ccdb7975e6 # ╟─e79badcd-0396-4a44-9318-8c6b0a94c5c8 # ╟─2a5157c5-f5a2-4330-b2a3-0c1ec0b7adff # ╟─4454c8d2-68ed-44b4-adfa-432297cdc957 diff --git a/examples/pluto-src/SciMLUsingFMUs/src/20000.jld2 b/examples/pluto-src/SciMLUsingFMUs/src/20000.jld2 new file mode 100644 index 0000000000000000000000000000000000000000..e73c9760199b277f01f2602e507d1b18a3cdefc8 GIT binary patch literal 11996 zcmeIYX*AYd^fzt@l?H_dic$y_LK$vt$*fSxR5YMSna5N_#*$efLuNA1^ES^Hmy64s zd3Fm4iR!u5|Nj48JujbE&sxuUbIy0Iea<@Hv-a8Nvp@TzB&Q&D?%@L)W1|x)c27+o zoRE89`{0Cvh4r%swr5Y+8(Z6$T9}^@Ixl=)nCirt|4b-jq4Po~E{dLeXli@nnmixX z|7-uZo`8>%oWkDSY`bZ8?xdok`R_0O{{L<0{D0qY_P<{0a)PD0lH5%iDmgleDeW%m zJ(M}rRMd`i{~6G-P*Dq?04mB-lqN$Z%19;pmzIiZ=Rqo}9n>!W8BtXU{5NPrYx61k zU;Y0y%)@7Et2y5243IB$H>y7R3vDjfe~(Qa0>h|#WIY`p6tlZn@9Om$I$u1yvHQ;) zN?a|_)Dm?Aza!jv^1C9iUCAtut!*CIUw}7HD3#2-m+H~q$5~_wxSi7}dz5MBIC_6) z1f7g)D1R8^*ABd!HSbpKTL%U1BU5{7i$K9XfqMK#624U6`bjO2i;R{@tfbG)5N4Vf zBOBX`NjnV=s@T@zP;t7>vzf=>nyiz(v=jw<=EMFyX;FZ;)jf^xhIqrF7lJhQp#*R| ztD_<#H3ZOJX(b^%h%Dq6EMZgKpzkVgK<+b`3C8a_DCQOEX@LPL=#5Qi+qtn7^F!hycgPublgTW<&N{&692E)xa=mm^yO4 z5_FH9-cGqxhWAn?WEtBl!8S%Y_@E>i{QR!mj(ygFtkniNYuW_R)Z|MEznzLHa}{Cp zRK0)-?0fv5#-h$o!>ssM9r%=5EALlAAbvETZF9d$0xGZ7$x)g0X($o|zs(6R-)Y8S(_AIln?#VX{;pzmp&w2qE;}Ygl)zm~ zyXqj;4tEyX(j3DE;b;p}n2>K7>i;>|M`-Oq;+4cpMqDL8yG_e0Y*PoE58PQ@GkbCO zg2yp-lXfV3^rPe6&k)r6L#Jk2)CBJCa}Jb8KPcQgEKRwMtK8bS%1(Rugpbx?Kc*ss`2 zgV18oxGk~Jfp+DmwZb!}K;i5$+An*FNXNFQtIyg47Eb!`zOf0S(lSN9G}|LV>?ENe zy$L5JKRTFv>c!y9y7=p5V^9&wS@zLiR72e!A;V@oF1zwN zw>k|&6?zvcS=;dyLk;!urEp9%%3_Nb?gjbnEg|0nrPy2NvBGRW0$h_P;vBR)G02QV z_>Dspx)0dP#CQ_Xa%N@Mo6p^Nd1}FSOuZ7n_a9@t*GvRYI?g8Zq*8dePfciCvkeQ` z!dxOT8`kt&I(DelfnB>j?K`1-%x_k7{S{sV&s=1DH5y6CCCXH5>d*`Yo`qu8!URw% z4cS|I{tE~v5FXG+XJgFi$gpo%0E!KL!$qMz5NCYpOweFHoZ47t4Hc_F=^1PFYY!U1 z@!ng>bHe?gE>|dh+bkHR<2Cr4c^yYObrK7pGTyRpAIj@pKM6!y%N zvMDI{fYJrEnuJ#lcqs+2qfPwa%}CH7BYu?WNNw_X8|e9AH=hQ1A* zgGZv182ezKev|Lq(J?qG%%E8EvIzBJC#6^Y$I)s^(@)xC0xLJF@8#a;$9@+^vb4!O z?&L}J*?qDFdheuh9Hd%BC&7?YHFDh;|LJ$at>Q*B>QcEQ8W;*6wH3)v38P^DZ==sC z*cZ>IkWY~SoV;N`vsmC>Pk zor%lvq`s7`v3LzOEusz7WgB7VY4Hj5p`XC=dvrzIb_C!0#4Gm`31HxM%Z$H5(dANm#1;WIH%|0I0QUx>WsHSXvKJ(P;bw zyB*IxpRUM&^@zK?o6ZTia_iwJ=d*Df)7xNdk3?)qf5H^XGY)TUZ=XK&IRaka=Y$V5q?HnmyWsV71LVeaNdDK2#YB zdS1u@!KZ6)`ei0C+=zDHOZphRM$nlhoAiU|gU<}cI-_t*ud*9L`{6!q7yX>oSESC- zeedZWh+K7~J54wabz|Bx71;y8xY1> zdP4L(Ym8pR&U)n&vA{UR;wYADfr{t!6km>JqTyS*wI!Vz;E8zR+ZvLMqiw7u{+<=! zC81)H#!>+-hdRTBV?X2V1eTL4_AL-4!7X}ak`iC7cAmYI`UktxVD;qP|l3{CIdgthi^NJyrD?YbKHve>x^XX1B|&E2#zey8R>P ziMttidK^T>J>rmVeb6I_7iq)wYmTK;n>RFA2|A>FgmP>@YQXj#SrWru8l7F<+g07mN)}zb{I5yUGMJci#uJ5EfrMD*;4;wKPHfM?W za+j9OXZ|{H3O;fnTdWo}cbxt(BUlUqJyt)RGP(h6$WG>l)v(fqhX^Mtz|C`=C*^P# zOvSRfdW_V7U<b^hbqgAM;Z|lJq*M~6%(&;G|OCU*&zan!o5Bg5ToPEgK z3bg^4LL1kDo^~3#YgIvs z)n+M89V)B&%MZJZ0eigT+_lXn{P(r@N9i*NE3MZ(J>+XaYoe>?yHY2r_SXH0)cb;e zgZ})(7_tDu|D@DsFO^GBaFOF$!7Y=zoX-Gu19o{R_e7(qZXW!Ww zlN>mFM76{sh=7sxi+7zv>XGN4d4|HOfFEX1phiJDqnt*4g|*ik|*lxU}uItpPP9FRAw~oZWQT4nSh?f2!GYrl;8WiSRD#A&*!zG!QGU3kbj*ql@Js9!mo95LYZLn;4^o_&5 zJ}gfcHIlPvhh0<0-$r8v*tvLCxQaK!j&queG-DI^if*CsC&iDCXA1~25ymm!IP18s z#1uZ^JtX|@WgDCbGVsWaZifI3l5N986}ZfEIP|nt!1o=GQ|k$%c-6TorqrPe9kMq9 z^xNz4s`QonHL00k>@#w+`f@LZhC0?5WJRIVef>A5&Msn_R!_^KIf5UhF;!bg$nh0V14k}mMB&N17@#w%bI-zMgH3AKw89SuRaf7=qQBE zh)+kzV(BoV{P}uxZ!O*#vpZeX(hGM|b5sR#%0S$7%|#%m79B?~`1ox$pukL^(l~Ds zWDhlcJ}90GSgZS0{&yjc^L3w$AlAbbjuE%_NiE>B=`p8j*^Wm%Z(pL`HHaUNct@Gf zR-o^3yT_}68hGQnr0R6%IPe!+%apw(<5zP|f4#{%c>U^8oU&pz?(2FXUFJ~_hiOd{ zRawYza@`Q+3;}x?|p5&O|bsy zO+d^3cDVm~*GOm|890L2%D7m`P&!=WFm)^sGR`o%*=lEFze-jSE3qF`$R9p7F^?fZ zhkyS zrk3}v0$CDn z<9jv3BMV}+ASCBlCp_B^*3u@NAH(bM@r*Xl+^am)JVBC8l_bHbg0;%GJ3gb!S7FN! zD!q96!LG0F!31Ln=3@n zCc789E$rlFpT|Lse)8hTfewU&#RU(~5h!u7+wGit8^k|$ei0z|0mR}II#?NMK_rOWtTcwHADN%)R9{3wzK2w^e(*KI)97|UGiCpIXROB? zu;|YBEoYy%xc<9f+jS!lEVLKzI3Dsr`k7z+=CMs!Qv8yq{2c*001wy#CDk zD>Y%DqdX1cg;rpgt6lfiFUK3Ucf@s)^D#)8XgSeU4&7@sB|;r!2=92?s-7MRMQ@g5 zPBf9g?8-&;AGU+|K=)z9udD5N{SxCj)m^R7!*FzB(5?e~a@x4{4(GsIfqk8#TDd@@ z@RQ5P>nqkVGvJFe-FR@+d`O}%3x)~}m1c}d@~3j} z7+sL+s%yZv`zC8OcBg^Fb=`_fR|!Dn7jZ%3ehnmZnHpSr--5Q0<3c=tn_xz>vCViS z5duvFouf~dz>Ua?m8ZwLpn4&H_qS^S$Ve2)7f>m}V?~dN!~+Q+>dld%on8a)R+dP2 z#7L-ogQ%!^wG-zW50|a6$3S-KCjEpt`?Xvg0}7PRB)JgRf_F4-ej|d9H9=I_VhT=PHklucF2@3$D~Fu!cS4`1n9$j!O0d2n%S=<$ z4B9*=y+kPM_lsm|o3V5t`Df9W1=<$m=C-lClTW~%@!XwB-wN?)l)p`1I6`PW`PBiZ zPsl@mYq+6_09_;Q4hq-Hu=UPEdbtC=cx%^Ro0>C3sL625vUs14PqoO-^`50bJsAAr zaW^H8EIX!Z`k8=w`{}|@3B|#NX`7)yF9|jYUhjs>3UTVHj+3@{3@+sxclbt^;k4{V zdfa*?h@O}-S8^={*O!5j%wEG_)uq9H-DCoy-^tggSEM3?{)c1f=>!alM$t^AY!KPQ zdqvTQh|l64UA?K=jmNk0J=VsWDfqwr8N;zYRMoK)>2L^z9&L}YZvi>j4th0Xz1eu9 z?TP5}%{(}GzrUAwWfcEpzj0=IISRsM4>S!axN4DlNlwm>h_~BU8y7{ML+_6%abs~J zq>9MjiaS z8+wjDB%|kAkfDZOE5_ay8Y%j1h4n(5fe)E`@SA~V|)?Vrt_zi zes=+Uta`G0*!kY6s0e$8Tkz-)o0g0*Ar(`F#UZ+Ey@FzIROh)gZL3 z5AEc7(gpQ7T^>8FLcwW^a=*ozA+Jcwzpk$v{~5|=yspy?8j|4*J|YEJ=a`lL#6BCk z2P<~mdes6${)sTKHUQB^krJFQGoaFJnQK$103Meg%J;e0fNK12|6aJ!iI>0G4v5+0 zK%9S+n$i9c;HWc|ny+dD8ly0>e-ixx1tjVH1^K^W^Y945rd1(#8S9wk&5+fLGZ7O= zxz?`QxmYz)c8=VNU?u(b`j^jD*reni?#A2)w?f!B^f>BZven^*hGrwY7brA~Eb7OJ zv00^@o&9JbvQnT^Plja26O0zT6>xcDEuQHd8Td-w?d-BhQ1Q!(x0Uiv+;X{-R65as z1&e=*6gc;RzNX*v8$W7b??-kX_bauq`1ecyf=UxA{uAas7wiYc^a4-$=zC$jG<%PX zcssy7ofNV#3D4aub$ofF07C|Rjp|-e+%n zqxCNlx$(+GLj?gIra0Z@C0FA^i)cYNzB2Sqxtmjq`T~b%LY!ZoIVLMbNOzMv;CR5+Ejp}&?Bw+a5+zxf z7rx6+s=5rTBo+xwHwEwox1849RddYoqPZWWMS^ujfiDFeBz*cj{v*{3GK?siG0Q0z zVSJ>yc`7Vxil|ux}qN?;uOp$Y5F0@RP1}6 z>L^~85)%z>tcT}^ZM5GDHR8O70@K?lM6vu>j%Sjw*w+=vM(taTR_muvZ}07Zx!R+2 z-w1t}XwN1Q9FY&E3QvE%OU{Dl(|h+8->(4iqYQn#-Q94p|I}MYu^QNJs9L^vw-m{t z3=h>_m&3(_3yguXB_Q=6`R~2_4%nZ@Hg=jn2)ujf7nroWVLyFEOSWhM+)UDuEj(0$ z>c3(%wJ3Og=kIG}#Z5%0`tWX!K7#Tt!prw}M+@Qc?fhxiXdg^zdRoLv!53#EWS;&R zsD@LQykYN28^M9}Zz7kr}oEGECv2EN|v8u~O?j+!si*?Ma`A+eEv>3CKr96U0> z{P+eL+v**(r*bKNtI8_kkQ0s(&mz=%@8zTQW9~>}+b9!B3}Kmb$-{# zG#qc}kBt602*S@}=$)yDaX^Bu?8jOH)GzfvEaUBj`v;8Zc)Y41_x9%T9ambxKYldy zf;9=erHMh#;dLO_6Gb&=k%M0uee+7|Nx;8Q;2ab|hPkwC-vicN_~LNKHHpY})NX1S zzbHdO#;GDcbk zoqE1miH?)mPC2*I@J-bBu!7@_@O0QdjX2m2SHm8@X&)^^_6uhj_b`xQdMi^i_hJqH z`lFJ@$W8*5B}Iz~uR>rb-Mp|}I0E}N&NGd5b;60V6H@2ZT2Qk;>&?g>glp+q>!&MQ z;Qc=3;iGY_(5&&OJ%ZQ`O*IOJHa{xBxZ-Jm?UPg(p}nkPw?M!&uiQnq-Cc0>s{5qB z^-HMmk$68!*MMtna$5`m?f7@E*SdILBjz!do#al-0KPBQN5iUP@z$%4Ok9kexK?}q zLse5X#P;9noa*mJ!^{>>6lc zq+z)BsTF3J#|=^ozv8jNUw4<}NTAkZ%5XV46=!Y!IdX5E65k}@`thN9NE_gf)O4!G z14Y-TkF1hGM6Y;{<`puI%E>TZFsp+QrpOlzOl26wU8;LPd>Bqns=WGYM*^D0&cwxI zMD)~^T(Nbl!IAS)H|O+XpsswL?Vel}tXlu0{$*qou6x^N_-N2Z#Y*UxxAIqH~FHFXu&t-Y3;8!q)$Ga{(u^@tW^XYr772WW5(RNpaa21qulDh+3 z`f=7c?q-E&2LwJbI%)Z>9Te1dr|mK81?QY$N!Jh(C9f{ysm|^MuHV;MG5!b@TE+*m={jxQK`fY0lVU_P#jG(MH)L|DG7vcFBD8~s!z z3T^n)vEGCJ#yO8fJh~yh@(|L2e@y2kqhSx&r?jp=X2^sO2Y)m$G!S7usaE<+I01R0 z8Oc4bDE7?<66-A9WBFL{dv@twjEneGC-Ac$KlSgkI+j!q%8z6&XK_*LYGgb8TK0Y@ zlSPN(z+U|Mk_%Z5S7CtdrUaj09aw)`KB1O?D6;)|e_S-B>Q?@?+oSR;XxZ%j5FOBo zTbY8&a-Grmn=s@%c$El7qvjH-7UB4L@jJU`O*)tz*pbRBR*rRnu4aW_MlgG&zrjDPh7v3t0lCgSS?Ur390m25nRW=CiNMNTs*CbYgGAS$Pm7_aQ@=>g@0R^`_dbe*O zU?2&EO*i^V-Lt^HY;y9qDCM4;v_&aDtVEflb4hz;+rZM7zV*kY63otLI(68MYQ(v4%1F}vy~Cr7+8n0w?_~4 z(m25x9lM&w;5P7Lpzb|3+6wg780!@kI9ABo0Tadr+VrO&JB>bXZl7FB)iuBC8cpYAOU{u`cpTeOv5b`^qfmN&>nXP1RjY(Lc z>UWov3!!9KEaLhmbZ{K&oKunx#MZ;#?1oBznNe(Xb1ZLXNkkW-r!B{|5^QQPGpIS; z3lk%fCK}9L$l!89osF&mx@-r`wLT}p{ovhu)ph&vB=auG%z-eJTqlM4y`I6ZQ`PYx zT!I-KjOpL^RN|LkkuOXXt8n7l)}F1O#aKhlHpjdx11m#T;a&s*&CREHcI(As$nAol z68C0^ooY6&`PB;F;&`r@xsSo3X+#I#QN*nw9sk|>Q9x{M{pqtpfXs!n(Q18hkQG}e z_HaEGcyi&G4-jE!JEQ1>A_+OyBzEcVA>i5r=@8bjUW#8N&=sBx$4_^1pUdXgftc2U z5QAJUL}~nMeCvG?d~Rj$*=h0|WV-)~l8+sxU<&#XT#f1uLrje@PvHQO}X=iwEz)!7#N%FGLl8^GdlnNxl;$1n1p81 z#E78rMBR$>LOaBq=_9dPe?rARyTK)wT5#M~k=*Onk1ff{UM4EFKu=&kBiZT+>~*Pb zCQ{{C|J}sbC#IEhFXKfMD0Qe)UKMg1*6Hv>v!uT6cmp~xg&?|^Y^v~Ih<@)s4yA*B@Ct}DD`SjtDbR}s9tOySfYEqtI+ABdx^g~9TrtR3nEVEvT*<)%y*@vvL>fPhu|o#)4e{*JKT&5E)V2HVB}J!RE27|bQ@Cb4K$F#W{Yi_kqEn1dC_2`M|^V4pX>s5-w+4?eN)kqP}&i+AZ< zydI|4g4H$YZ`=VjcvbekNrKdS6kk7aXCXTlz8<45VEmhpB@WzL6{pB3<(2sFs8lbC z91LJSMDrD?azEUO)$M}4rl}7OrPRX3=jj6C6y93&uYkdM=4x;-x_zUVi3}-LSM1Jl z7UN3@4QLZ00eMniGG})z)bH?9Op6{HST7#3i*-QtvFR7ZlXo)vdx8wz?7i z7qT&x?}vDFaUa@n>vRdJP;>&32Q~AZ8gM^d<&oGuHMr2^dibS!3s`b56o-@yW9XXS zYdabe%rp0`2fl2?x*rww`p$ho?^Jroxj!4Y)Y2CICJ=E!o+exT!Z?!T``@^@Q+OU> zzolEkh0siTeqd}U6OOC+ofoVs#Na3EPW{v-i(hMeuX!+$HS~?=$+F-{is4?PGr|<0IFwU)Z!-Ncvyrb?UGP4 zDDv&$9P_8}W17!)rR3&;*XH~IKe=J_9u5#+Ug&@`qPLvVW{P3xXyLKeXStA}=F+-L zu@QT}sH&XG$b$iwndy!4*T~WyXcF>x6rMzn5idNmM5{~mW3zSjpiYyRu%gt6YBf?E zhFv5qbf*8~!QY7cNM#JbSC940FX$znr-8xy5Ec`rE~qsKJS^ykxI0DB(L}og`o-@n z?k}!Emh;ZC>y*01c6zQ)n87eOmvO_kUp59$GXHI+Z%6;{C29BWBV?|P6<0Ekz$Kb# zX@f@%$TuIHw{*Q1HH%$7WPKXNpLB^^C;#olqjkgI&y0Qq(#O9~Hl-)<^a@74IgyP= z8sJCa?Pd%-f2qm(aR(j|e}B;5w;!Z-o?qQOR{<_BkMC_yC&Ip+RtAb0De&#S=feBX zjUXzuSEFC20S|7?JerVif$@5VT+Lr~kfWwyI-pk#7A954ip2;(6tHA-?x{l=$Kx0O zVW@-t;FO6~Ln0`AsdTsIti;!F7Q-x?1o`+zSBxAt=OpQ z742wT4d3bBB&zaIcqYb|vPxx9crrvr_tOzeoIl_$Ir_#6ZM07*X0A73w80UUXyr1@ zEQO%weg$ZOwAmBxDd7G{>(>vHSdc5Im#!ZFfI$HhdB*$m&~9?eJ@^6vOGFRMD+=mD z<-0TyZVW>X$w1d>N}drUq_Sd4iTe%3ZA@z#g(xJtIQG#s5zO`nm{GS^t(CtQ>{^D9!625yU>@RKdf)_l4u}_e!qyfevl#k|?jK>-Ykk=`&leZ&$-$Xvx8m zFh`gRy!c$`Z~}1WeVeE0aKm$!JiA+?1Hh7goU8GE9Dma~y~E(g)q~YQJgFt?nK=Z~1b_QOJ)PJvEw}g4sV)p^ zN#)O%XhyZjW~JrmPB_+j*GT7u})3s#8kjv&RL5$>wF7%4mLyJ-n9^W3h$rTz;z=g zsSX>(KhDg4Dgv*5HOHR40eDG#zW;iDCF)G*C-S)uqlZFv`_ir<?cmKC^c~`pGk+ zFD^CV!nT9VQR_VL&*04)X)VE5{Iv8&nz>kHtk=G5V+)O|ZzNoJ@8e3+n2^&!imq*8 zqNQIA(Ps73oQYB+v`Q04-Ykv7zN^7krc*|MZ@;S6{nQMc4nE2J)bbWsS}fVRZJL8v z#LdEJcZ!a~JEbUKR~d}ZPdEiWY{tB`Zo^l9YhYCEZZ<9~zPp{M{aO1A&(B);!P{6k zwvvnQ;v)IpF^-_vcPJXC$Is>A=J~x~6mI6Cr{Y8wz0Q@Hh4x*t5e5t7ZM>qUG{qFxyPfUM^ii(j%$_Z7GU!q5t&1R8(^RGx$I3nf~|r3#5O{{}