Skip to content

Commit

Permalink
v0.10.2 (#87)
Browse files Browse the repository at this point in the history
* minor change tutorial

* Update hybrid_ME.jl

corrected ME continuous test

* Update hybrid_ME_dis.jl

corrected ME discontinuous test

* parameter sensitivities for FMUs

* new FMUParameterRegistrator Layer

* fixed test

* minor fix

* removed print message

* adjustments to new libraries

* version updates

* assert to warn for wrong saveat

* test

* test

* reactivated tests

* fixed multi-threading test
  • Loading branch information
ThummeTo committed Jul 3, 2023
1 parent 568e585 commit d887406
Show file tree
Hide file tree
Showing 13 changed files with 454 additions and 189 deletions.
10 changes: 5 additions & 5 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name = "FMIFlux"
uuid = "fabad875-0d53-4e47-9446-963b74cae21f"
version = "0.10.1"
version = "0.10.2"

[deps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
Expand All @@ -18,15 +18,15 @@ Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
ThreadPools = "b189fb0b-2eb5-4ed4-bc0c-d34c51242431"

[compat]
ChainRulesCore = "1.15.0"
ChainRulesCore = "1.16.0"
Colors = "0.12.8"
DiffEqCallbacks = "2.26.0"
DifferentialEquations = "7.7.0"
FMIImport = "0.15.2"
Flux = "0.13.14"
FMIImport = "0.15.6"
Flux = "0.13.16"
Optim = "1.7.0"
ProgressMeter = "1.7.0"
Requires = "1.3.0"
SciMLSensitivity = "7.27.0"
SciMLSensitivity = "7.31.0"
ThreadPools = "2.1.1"
julia = "1.6"
3 changes: 1 addition & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,8 @@

## What Platforms are supported?
[*FMIFlux.jl*](https://github.com/ThummeTo/FMIFlux.jl) is tested (and testing) under Julia versions *1.6* (LTS) and *1.8* (latest) on Windows (latest) and Ubuntu (latest). MacOS should work, but untested.
However, please use Julia versions *>= 1.7* if possible, because [*FMIFlux.jl*](https://github.com/ThummeTo/FMIFlux.jl) runs **a lot** faster with these newer Julia versions.
[*FMIFlux.jl*](https://github.com/ThummeTo/FMIFlux.jl) currently only works with FMI2-FMUs.
All shipped examples are tested under Julia version *1.8* (latest) on Windows (latest).
All shipped examples are automatically tested under Julia version *1.8* (latest) on Windows (latest).

## What FMI.jl-Library should I use?
![FMI.jl Family](https://github.com/ThummeTo/FMI.jl/blob/main/docs/src/assets/FMI_JL_family.png?raw=true "FMI.jl Family")
Expand Down
54 changes: 29 additions & 25 deletions src/FMIFlux.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,36 +12,40 @@ if VERSION < v"1.7.0"
end

# ToDo: Quick-fixes until patch release SciMLSensitivity v0.7.29
import SciMLSensitivity: FakeIntegrator, u_modified!, TrackedAffect
import SciMLSensitivity: FakeIntegrator, u_modified!, TrackedAffect, set_u!
function u_modified!(::FakeIntegrator, ::Bool)
end

# ToDo: Quick-fixes until patch release SciMLSensitivity v0.7.28
function Base.hasproperty(f::TrackedAffect, s::Symbol)
if hasfield(TrackedAffect, s)
return true
else
_affect = getfield(f, :affect!)
return hasfield(typeof(_affect), s)
end
end
function Base.getproperty(f::TrackedAffect, s::Symbol)
if hasfield(TrackedAffect, s)
return getfield(f, s)
else
_affect = getfield(f, :affect!)
return getfield(_affect, s)
end
end
function Base.setproperty!(f::TrackedAffect, s::Symbol, value)
if hasfield(TrackedAffect, s)
return setfield!(f, s, value)
else
_affect = getfield(f, :affect!)
return setfield!(_affect, s, value)
end
function set_u!(integrator::FakeIntegrator, u)
#integrator.u = u
end

# ToDo: Quick-fixes until patch release SciMLSensitivity v0.7.28
# function Base.hasproperty(f::TrackedAffect, s::Symbol)
# if hasfield(TrackedAffect, s)
# return true
# else
# _affect = getfield(f, :affect!)
# return hasfield(typeof(_affect), s)
# end
# end
# function Base.getproperty(f::TrackedAffect, s::Symbol)
# if hasfield(TrackedAffect, s)
# return getfield(f, s)
# else
# _affect = getfield(f, :affect!)
# return getfield(_affect, s)
# end
# end
# function Base.setproperty!(f::TrackedAffect, s::Symbol, value)
# if hasfield(TrackedAffect, s)
# return setfield!(f, s, value)
# else
# _affect = getfield(f, :affect!)
# return setfield!(_affect, s, value)
# end
# end

using Requires, Flux

using FMIImport
Expand Down
102 changes: 53 additions & 49 deletions src/batch.jl
Original file line number Diff line number Diff line change
Expand Up @@ -94,40 +94,40 @@ mutable struct FMU2EvaluationBatchElement <: FMU2BatchElement
end
end

function run!(neuralFMU::ME_NeuralFMU, batchElement::FMU2SolutionBatchElement; lastBatchElement=nothing, kwargs...)
function startStateCallback(fmu, batchElement)
#print("Setting state ... ")

ignore_derivatives() do
c = getCurrentComponent(fmu)

if batchElement.initialState != nothing
fmi2SetFMUstate(c, batchElement.initialState)
c.eventInfo = deepcopy(batchElement.initialEventInfo)
c.t = batchElement.tStart
else
batchElement.initialState = fmi2GetFMUstate(c)
batchElement.initialEventInfo = deepcopy(c.eventInfo)
@warn "Batch element does not provide a `initialState`, I try to simulate anyway. InitialState is overwritten."
end
end

function startStateCallback(fmu, batchElement)
#print("Setting state ... ")

c = getCurrentComponent(fmu)

if batchElement.initialState != nothing
fmi2SetFMUstate(c, batchElement.initialState)
c.eventInfo = deepcopy(batchElement.initialEventInfo)
c.t = batchElement.tStart
else
batchElement.initialState = fmi2GetFMUstate(c)
batchElement.initialEventInfo = deepcopy(c.eventInfo)
@warn "Batch element does not provide a `initialState`, I try to simulate anyway. InitialState is overwritten."
end
end
function stopStateCallback(fmu, batchElement)
#print("\nGetting state ... ")

function stopStateCallback(fmu, batchElement)
#print("\nGetting state ... ")

c = getCurrentComponent(fmu)

if batchElement.initialState != nothing
fmi2GetFMUstate!(c, Ref(batchElement.initialState))
else
batchElement.initialState = fmi2GetFMUstate(c)
end
batchElement.initialEventInfo = deepcopy(c.eventInfo)

#println("done @ $(batchElement.initialState) in componentState: $(c.state)!")
end
c = getCurrentComponent(fmu)

if batchElement.initialState != nothing
fmi2GetFMUstate!(c, Ref(batchElement.initialState))
else
batchElement.initialState = fmi2GetFMUstate(c)
end
batchElement.initialEventInfo = deepcopy(c.eventInfo)

#println("done @ $(batchElement.initialState) in componentState: $(c.state)!")
end

function run!(neuralFMU::ME_NeuralFMU, batchElement::FMU2SolutionBatchElement; lastBatchElement=nothing, kwargs...)

ignore_derivatives() do

neuralFMU.customCallbacksAfter = []
neuralFMU.customCallbacksBefore = []
Expand Down Expand Up @@ -163,8 +163,12 @@ function run!(neuralFMU::ME_NeuralFMU, batchElement::FMU2SolutionBatchElement; l
return batchElement.solution
end

function run!(model, batchElement::FMU2EvaluationBatchElement)
batchElement.result = collect(model(f)[batchElement.indicesModel] for f in batchElement.features)
function run!(model, batchElement::FMU2EvaluationBatchElement, p=nothing)
if isnothing(p) # implicite parameter model
batchElement.result = collect(model(f)[batchElement.indicesModel] for f in batchElement.features)
else # explicite parameter model
batchElement.result = collect(model(p)(f)[batchElement.indicesModel] for f in batchElement.features)
end
end

function plot(batchElement::FMU2SolutionBatchElement; targets::Bool=true, kwargs...)
Expand Down Expand Up @@ -297,8 +301,8 @@ function batchDataSolution(neuralFMU::NeuralFMU, x0_fun, train_t::AbstractArray{
iStop = timeToIndex(train_t, tStart + batchDuration)

startElement = FMIFlux.FMU2SolutionBatchElement()
startElement.tStart = tStart
startElement.tStop = tStart + batchDuration
startElement.tStart = train_t[iStart]
startElement.tStop = train_t[iStop]
startElement.xStart = x0_fun(tStart)

startElement.saveat = train_t[iStart:iStop]
Expand All @@ -314,12 +318,12 @@ function batchDataSolution(neuralFMU::NeuralFMU, x0_fun, train_t::AbstractArray{
FMIFlux.run!(neuralFMU, batch[i-1]; lastBatchElement=batch[i], solverKwargs...)

# overwrite start state
batch[i].tStart = tStart + (i-1) * batchDuration
batch[i].tStop = tStart + i * batchDuration
iStart = timeToIndex(train_t, tStart + (i-1) * batchDuration)
iStop = timeToIndex(train_t, tStart + i * batchDuration)
batch[i].tStart = train_t[iStart]
batch[i].tStop = train_t[iStop]
batch[i].xStart = x0_fun(batch[i].tStart)

iStart = timeToIndex(train_t, batch[i].tStart)
iStop = timeToIndex(train_t, batch[i].tStop)

batch[i].saveat = train_t[iStart:iStop]
batch[i].targets = targets[iStart:iStop]

Expand All @@ -335,7 +339,7 @@ function batchDataSolution(neuralFMU::NeuralFMU, x0_fun, train_t::AbstractArray{
end

function batchDataEvaluation(train_t::AbstractArray{<:Real}, targets::AbstractArray, features::Union{AbstractArray, Nothing}=nothing;
batchDuration::Real=(train_t[end]-train_t[1]), indicesModel=1:length(targets[1]), plot::Bool=false)
batchDuration::Real=(train_t[end]-train_t[1]), indicesModel=1:length(targets[1]), plot::Bool=false, round_digits=3)

batch = Array{FMIFlux.FMU2EvaluationBatchElement,1}()

Expand All @@ -347,8 +351,8 @@ function batchDataEvaluation(train_t::AbstractArray{<:Real}, targets::AbstractAr
iStop = timeToIndex(train_t, tStart + batchDuration)

startElement = FMIFlux.FMU2EvaluationBatchElement()
startElement.tStart = tStart
startElement.tStop = tStart + batchDuration
startElement.tStart = train_t[iStart]
startElement.tStop = train_t[iStop]

startElement.saveat = train_t[iStart:iStop]
startElement.targets = targets[iStart:iStop]
Expand All @@ -364,12 +368,12 @@ function batchDataEvaluation(train_t::AbstractArray{<:Real}, targets::AbstractAr
for i in 2:floor(Integer, (train_t[end]-train_t[1])/batchDuration)
push!(batch, FMIFlux.FMU2EvaluationBatchElement())

# overwrite start state
batch[i].tStart = tStart + (i-1) * batchDuration
batch[i].tStop = tStart + i * batchDuration

iStart = timeToIndex(train_t, batch[i].tStart)
iStop = timeToIndex(train_t, batch[i].tStop)
iStart = timeToIndex(train_t, tStart + (i-1) * batchDuration)
iStop = timeToIndex(train_t, tStart + i * batchDuration)

batch[i].tStart = train_t[iStart]
batch[i].tStop = train_t[iStop]

batch[i].saveat = train_t[iStart:iStop]
batch[i].targets = targets[iStart:iStop]
if features != nothing
Expand Down
47 changes: 40 additions & 7 deletions src/layers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,35 @@
#

using Statistics: mean, std
import FMIImport: fmi2Real, fmi2ValueReferenceFormat

### FMUParameterRegistrator ###

"""
ToDo.
"""
struct FMUParameterRegistrator
fmu::FMU2
p_refs::AbstractArray{<:fmi2ValueReference}
p::AbstractArray{<:Real}

function FMUParameterRegistrator(fmu::FMU2, p_refs::fmi2ValueReferenceFormat, p::AbstractArray{<:Real})
@assert length(p_refs) == length(p) "`p_refs` and `p` need to be the same length!"
p_refs = prepareValueReference(fmu, p_refs)
fmu.optim_p_refs = p_refs
fmu.optim_p = p
return new(fmu, p_refs, p)
end
end
export FMUParameterRegistrator

function (l::FMUParameterRegistrator)(x)
l.fmu.optim_p = l.p
l.fmu.optim_p_refs = l.p_refs
return x
end

Flux.@functor FMUParameterRegistrator (p, )

### SHIFTSCALE ###

Expand Down Expand Up @@ -94,23 +123,27 @@ Flux.@functor ScaleShift (scale, shift)

struct ScaleSum{T}
scale::AbstractArray{T}
groups::Union{AbstractVector{<:AbstractVector{<:Integer}}, Nothing}

function ScaleSum{T}(scale::AbstractArray{T}) where {T}
inst = new(scale)
function ScaleSum{T}(scale::AbstractArray{T}, groups::Union{AbstractVector{<:AbstractVector{<:Integer}}, Nothing}=nothing) where {T}
inst = new(scale, groups)
return inst
end

function ScaleSum(scale::AbstractArray{T}) where {T}
return ScaleSum{T}(scale)
function ScaleSum(scale::AbstractArray{T}, groups::Union{AbstractVector{<:AbstractVector{<:Integer}}, Nothing}=nothing) where {T}
return ScaleSum{T}(scale, groups)
end
end
export ScaleSum

function (l::ScaleSum)(x)

x_proc = sum(x .* l.scale)

return [x_proc]
if isnothing(l.groups)
x_proc = sum(x .* l.scale)
return [x_proc]
else
return collect(sum(x[g] .* l.scale[g]) for g in l.groups)
end
end

Flux.@functor ScaleSum (scale, )
Expand Down
21 changes: 10 additions & 11 deletions src/losses.jl
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,14 @@ end

function loss(model, batchElement::FMU2BatchElement;
logLoss::Bool=true,
lossFct=Flux.Losses.mse)
lossFct=Flux.Losses.mse, p=nothing)

model = nfmu.neuralODE.model[layers]

loss = 0.0

# evaluate model
result = run!(model, batchElement)
result = run!(model, batchElement, p=p)

# for i in 1:length(batchElement.targets[1])
# targets_model = collect(r[batchElement.indicesModel[i]] for r in batchElement.result)
Expand Down Expand Up @@ -71,22 +71,21 @@ function loss(nfmu::NeuralFMU, batch::AbstractArray{<:FMU2BatchElement};

solution = run!(nfmu, batch[batchIndex]; lastBatchElement=lastBatchElement, progressDescr="Sim. Batch $(batchIndex)/$(length(batch)) |", kwargs...)

if solution.success != true
@warn "Solving the NeuralFMU as part of the loss function failed. This is often because the ODE cannot be solved. Did you initialize the NeuralFMU model? Maybe additional errors are printed before this assertion."
if solution.success
return loss!(batch[batchIndex], lossFct; logLoss=logLoss)
else
@warn "Solving the NeuralFMU as part of the loss function failed. This is often because the ODE cannot be solved. Did you initialize the NeuralFMU model? Often additional solver errors/warnings are printed before this warning."
return Inf
end

loss = loss!(batch[batchIndex], lossFct; logLoss=logLoss)

return loss
end

function loss(model, batch::AbstractArray{<:FMU2BatchElement};
batchIndex::Integer=rand(1:length(batch)),
lossFct=Flux.Losses.mse,
logLoss::Bool=true)
logLoss::Bool=true, p=nothing)

run!(model, batch[batchIndex])
run!(model, batch[batchIndex], p)

loss = loss!(batch[batchIndex], lossFct; logLoss=logLoss)

Expand Down Expand Up @@ -127,7 +126,7 @@ function batch_loss(neuralFMU::ME_NeuralFMU, batch::AbstractArray{<:FMU2BatchEle
return accu
end

function batch_loss(model, batch::AbstractArray{<:FMU2BatchElement}; update::Bool=false, logLoss::Bool=false, lossFct=nothing)
function batch_loss(model, batch::AbstractArray{<:FMU2BatchElement}; update::Bool=false, logLoss::Bool=false, lossFct=nothing, p=nothing)

accu = 0.0

Expand All @@ -137,7 +136,7 @@ function batch_loss(model, batch::AbstractArray{<:FMU2BatchElement}; update::Boo
for i in 1:numBatch
b = batch[i]

run!(model, b)
run!(model, b, p)

accu += loss!(b, lossFct; logLoss=logLoss)
end
Expand Down
Loading

2 comments on commit d887406

@ThummeTo
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JuliaRegistrator
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Registration pull request created: JuliaRegistries/General/86751

After the above pull request is merged, it is recommended that a tag is created on this repository for the registered package version.

This will be done automatically if the Julia TagBot GitHub Action is installed, or can be done manually through the github interface, or via:

git tag -a v0.10.2 -m "<description of version>" d887406c2898338e35a241a12bc807d36906358c
git push origin v0.10.2

Please sign in to comment.