diff --git a/IncrementalInference/src/CliqueStateMachine/services/CliqueStateMachine.jl b/IncrementalInference/src/CliqueStateMachine/services/CliqueStateMachine.jl index c527a9ce..0d055d8a 100644 --- a/IncrementalInference/src/CliqueStateMachine/services/CliqueStateMachine.jl +++ b/IncrementalInference/src/CliqueStateMachine/services/CliqueStateMachine.jl @@ -928,11 +928,9 @@ Notes function updateFromSubgraph_StateMachine(csmc::CliqStateMachineContainer) isParametricSolve = csmc.algorithm == :parametric - # set PPE and solved for all frontals + # set solved for all frontals if !isParametricSolve for sym in getCliqFrontalVarIds(csmc.cliq) - # set PPE in cliqSubFg - setVariablePosteriorEstimates!(csmc.cliqSubFg, sym) # set solved flag vari = getVariable(csmc.cliqSubFg, sym, csmc.solveKey) setSolvedCount!(vari, getSolvedCount(vari, csmc.solveKey) + 1, csmc.solveKey) @@ -951,7 +949,6 @@ function updateFromSubgraph_StateMachine(csmc::CliqStateMachineContainer) frsyms, csmc.logger; solveKey = csmc.solveKey, - updatePPE = !isParametricSolve, ) #solve finished change color diff --git a/IncrementalInference/src/Deprecated.jl b/IncrementalInference/src/Deprecated.jl index 74872dea..02674e3b 100644 --- a/IncrementalInference/src/Deprecated.jl +++ b/IncrementalInference/src/Deprecated.jl @@ -155,12 +155,81 @@ function sampleTangent(x::ManifoldKernelDensity, p = mean(x)) error("sampleTangent(x::ManifoldKernelDensity, p) should be replaced by sampleTangent(M<:AbstractManifold, x::ManifoldKernelDensity, p)") end -## ================================================================================================ -## ================================================================================================ +export setPPE!, setVariablePosteriorEstimates! +setPPE!(args...; kw...) = error("PPEs are obsolete (use `calcMeanMaxSuggested` provisionally), see DFG #1133") +setVariablePosteriorEstimates!(args...; kw...) = error("PPEs are obsolete (use `calcMeanMaxSuggested` provisionally), see DFG #1133") + +@deprecate calcPPE( + var::VariableCompute, + varType::StateType = getVariableType(var); + solveKey::Symbol = :default, + kwargs..., +) calcMeanMaxSuggested(var, solveKey) + +@deprecate calcPPE( + dfg::AbstractDFG, + label::Symbol; + solveKey::Symbol = :default, + kwargs..., +) calcMeanMaxSuggested(dfg, label, solveKey) + +export calcVariablePPE +const calcVariablePPE = calcPPE + +#FIXME The next functions use PPEs and should be updated or deprecated +# getPPESuggestedAll no external use +# findVariablesNear used in 1 rome example +""" + $SIGNATURES + +Return `::Tuple` with matching variable ID symbols and `Suggested` PPE values. + +Related -# TODO maybe upstream to DFG -DFG.MeanMaxPPE(solveKey::Symbol, suggested::StaticArray, max::StaticArray, mean::StaticArray) = - DFG.MeanMaxPPE(solveKey, Vector(suggested), Vector(max), Vector(mean)) +getVariablePPE +""" +function getPPESuggestedAll(dfg::AbstractDFG, regexFilter::Union{Nothing, Regex} = nothing) + # + # get values + vsyms = listVariables(dfg, regexFilter) |> sortDFG + slamPPE = map(x -> getVariablePPE(dfg, x).suggested, vsyms) + # sizes to convert to matrix + rumax = zeros(Int, 2) + for ppe in slamPPE + rumax[2] = length(ppe) + rumax[1] = maximum(rumax) + end + + # populate with values + XYT = zeros(length(slamPPE), rumax[1]) + for i = 1:length(slamPPE) + XYT[i, 1:length(slamPPE[i])] = slamPPE[i] + end + return (vsyms, XYT) +end + +""" + $SIGNATURES + +Find and return a `::Tuple` of variables and distances to `loc::Vector{<:Real}`. + +Related + +findVariablesNearTimestamp +""" +function findVariablesNear( + dfg::AbstractDFG, + loc::Vector{<:Real}, + regexFilter::Union{Nothing, Regex} = nothing; + number::Int = 3, +) + # + + xy = getPPESuggestedAll(dfg, regexFilter) + dist = sum((xy[2][:, 1:length(loc)] .- loc') .^ 2; dims = 2) |> vec + prm = (dist |> sortperm)[1:number] + return (xy[1][prm], sqrt.(dist[prm])) +end ## ================================================================================================ diff --git a/IncrementalInference/src/ExportAPI.jl b/IncrementalInference/src/ExportAPI.jl index 4bddff4d..60b221bf 100644 --- a/IncrementalInference/src/ExportAPI.jl +++ b/IncrementalInference/src/ExportAPI.jl @@ -93,9 +93,6 @@ export CSMHistory, getLabel, getVariables, getVariableOrder, - getPPE, - getPPEDict, - getVariablePPE, isVariable, isFactor, getFactorType, @@ -292,12 +289,7 @@ export CSMHistory, reshapeVec2Mat export incrSuffix - -export calcPPE, calcVariablePPE -export setPPE!, setVariablePosteriorEstimates! -export getPPEDict -export getPPESuggested, getPPEMean, getPPEMax -export getPPESuggestedAll +export calcMeanMaxSuggested export loadDFG export findVariablesNear, defaultFixedLagOnTree! export fetchDataJSON diff --git a/IncrementalInference/src/IncrementalInference.jl b/IncrementalInference/src/IncrementalInference.jl index f83bb735..cd645f53 100644 --- a/IncrementalInference/src/IncrementalInference.jl +++ b/IncrementalInference/src/IncrementalInference.jl @@ -110,7 +110,6 @@ import DistributedFactorGraphs: addVariable!, addFactor!, ls, lsf, isInitialized import DistributedFactorGraphs: compare import DistributedFactorGraphs: rebuildFactorCache! import DistributedFactorGraphs: getDimension, getManifold, getPointType, getPointIdentity -import DistributedFactorGraphs: getPPE, getPPEDict import DistributedFactorGraphs: getPoint, getCoordinates import DistributedFactorGraphs: getVariableType import DistributedFactorGraphs: AbstractPointParametricEst, loadDFG diff --git a/IncrementalInference/src/parametric/services/ConsolidateParametricRelatives.jl b/IncrementalInference/src/parametric/services/ConsolidateParametricRelatives.jl index 549e2bb9..27dcce91 100644 --- a/IncrementalInference/src/parametric/services/ConsolidateParametricRelatives.jl +++ b/IncrementalInference/src/parametric/services/ConsolidateParametricRelatives.jl @@ -14,7 +14,7 @@ Notes DevNotes - TODO ensure type stability, likely returning types `Any` at this time. -- TODO MeanMaxPPE currently stored as coordinates, complicating fast calculation. +- TODO parametric estimates currently stored as coordinates, complicating fast calculation. Related: [`getMeasurementParametric`](@ref), [`approxConvBelief`](@ref), [`MutablePose2Pose2Gaussian`](@ref) """ @@ -45,9 +45,6 @@ function solveFactorParametric( # get variable points function _getParametric(vari::VariableCompute, key = :default) - # hasp = haskey(getPPEDict(vari), key) - # FIXME use PPE via Manifold points currently in coordinates - # hasp ? getPPE(vari, key).suggested : calcMean(getBelief(vari, key)) pt = calcMean(getBelief(vari, key)) return collect(getCoordinates(getVariableType(vari), pt)) diff --git a/IncrementalInference/src/parametric/services/ParametricManopt.jl b/IncrementalInference/src/parametric/services/ParametricManopt.jl index acc9fbe0..f515fcc0 100644 --- a/IncrementalInference/src/parametric/services/ParametricManopt.jl +++ b/IncrementalInference/src/parametric/services/ParametricManopt.jl @@ -575,8 +575,6 @@ function autoinitParametric!( vnd.initialized = true #fill in ppe as mean Xc::Vector{Float64} = collect(getCoordinates(getVariableType(xi), val)) - ppe = DFG.MeanMaxPPE(solveKey, Xc, Xc, Xc) - getPPEDict(xi)[solveKey] = ppe result = true diff --git a/IncrementalInference/src/parametric/services/ParametricUtils.jl b/IncrementalInference/src/parametric/services/ParametricUtils.jl index 2a231929..59a2ccbe 100644 --- a/IncrementalInference/src/parametric/services/ParametricUtils.jl +++ b/IncrementalInference/src/parametric/services/ParametricUtils.jl @@ -923,17 +923,13 @@ end """ $SIGNATURES -Update the fg from solution in vardict and add MeanMaxPPE (all just mean). Usefull for plotting +Update the fg from solution in vardict. Usefull for plotting """ function updateParametricSolution!(sfg, vardict::AbstractDict; solveKey::Symbol = :parametric) for (v, val) in vardict vnd = getState(getVariable(sfg, v), solveKey) # Update the variable node data value and covariance updateSolverDataParametric!(vnd, val.val, val.cov) - #fill in ppe as mean - Xc = collect(getCoordinates(getVariableType(sfg, v), val.val)) - ppe = DFG.MeanMaxPPE(solveKey, Xc, Xc, Xc) - getPPEDict(getVariable(sfg, v))[solveKey] = ppe end end @@ -948,10 +944,6 @@ function updateParametricSolution!(fg, M, labels::AbstractArray{Symbol}, vals, covar = isnothing(Σ) ? vnd.bw : covars[i] # Update the variable node data value and covariance updateSolverDataParametric!(vnd, val, covar)#FIXME add cov - #fill in ppe as mean - Xc = collect(getCoordinates(getVariableType(fg, v), val)) - ppe = DFG.MeanMaxPPE(solveKey, Xc, Xc, Xc) - getPPEDict(getVariable(fg, v))[solveKey] = ppe end end @@ -973,7 +965,7 @@ function createMvNormal(v::VariableCompute, key = :parametric) dims = vnd.dims return createMvNormal(vnd.val[1:dims, 1], vnd.bw[1:dims, 1:dims]) else - @warn "Trying MvNormal Fit, replace with PPE fits in future" + @warn "Trying MvNormal Fit" return fit(MvNormal, getState(v, key).val) end end @@ -1035,10 +1027,6 @@ function autoinitParametricOptim!( updateSolverDataParametric!(vnd, val, cov) vnd.initialized = true - #fill in ppe as mean - Xc = collect(getCoordinates(getVariableType(xi), val)) - ppe = DFG.MeanMaxPPE(:parametric, Xc, Xc, Xc) - getPPEDict(xi)[:parametric] = ppe # updateVariableSolverData!(dfg, xi, solveKey, true; warn_if_absent=false) # updateVariableSolverData!(dfg, xi.label, getState(xi, solveKey), :graphinit, true, Symbol[]; warn_if_absent=false) diff --git a/IncrementalInference/src/services/ApproxConv.jl b/IncrementalInference/src/services/ApproxConv.jl index b95264ce..f734b444 100644 --- a/IncrementalInference/src/services/ApproxConv.jl +++ b/IncrementalInference/src/services/ApproxConv.jl @@ -60,14 +60,12 @@ Notes - Fresh starting point will be used if first element in `fctLabels` is a unary `<:AbstractPriorObservation`. - This function will not change any values in `dfg`, and might have slightly less speed performance to meet this requirement. - pass in `tfg` to get a recoverable result of all convolutions in the chain. -- `setPPE` and `setPPEmethod` can be used to store PPE information in temporary `tfg` DevNotes - TODO strong requirement that this function is super efficient on single factor/variable case! - FIXME must consolidate with `accumulateFactorMeans` - TODO `solveKey` not fully wired up everywhere yet - tfg gets all the solveKeys inside the source `dfg` variables -- TODO add a approxConv on PPE option - Consolidate with [`accumulateFactorMeans`](@ref), `approxConvBinary` Related @@ -82,8 +80,6 @@ function approxConvBelief( solveKey::Symbol = :default, N::Int = length(measurement), tfg::AbstractDFG = LocalDFG(;solverParams=getSolverParams(dfg)), - setPPEmethod::Union{Nothing, Type{<:AbstractPointParametricEst}} = nothing, - setPPE::Bool = setPPEmethod !== nothing, path::AbstractVector{Symbol} = Symbol[], skipSolve::Bool = false, nullSurplus::Real = 0, @@ -149,9 +145,6 @@ function approxConvBelief( end # didn't return early so shift focus to using `tfg` more intensely initVariable!(tfg, varLbls[1], pts) - # use in combination with setPPE and setPPEmethod keyword arguments - ppemethod = setPPEmethod === nothing ? DFG.MeanMaxPPE : setPPEmethod - !setPPE ? nothing : setPPE!(tfg, varLbls[1], solveKey, ppemethod) # do chain of convolutions for idx = idxS:length(path) @@ -161,7 +154,6 @@ function approxConvBelief( addFactor!(tfg, fct) ptsBel = approxConvBelief(tfg, fct, path[idx + 1]; solveKey, N, skipSolve, keepCalcFactor) initVariable!(tfg, path[idx + 1], ptsBel) - !setPPE ? nothing : setPPE!(tfg, path[idx + 1], solveKey, ppemethod) end end diff --git a/IncrementalInference/src/services/FGOSUtils.jl b/IncrementalInference/src/services/FGOSUtils.jl index ce6afc9e..6a81d193 100644 --- a/IncrementalInference/src/services/FGOSUtils.jl +++ b/IncrementalInference/src/services/FGOSUtils.jl @@ -212,7 +212,6 @@ end # WIP # _getMeasurementRepresentation(::AbstractPriorObservation, coord::AbstractVector{<:Number}) = - """ $SIGNATURES @@ -223,20 +222,13 @@ Calculate new Parametric Point Estimates for a given variable. DevNotes - TODO update for manifold subgroups. - TODO standardize after AMP3D - -Related - -[`getPPE`](@ref), [`setPPE!`](@ref), [`getVariablePPE`](@ref) """ -function calcPPE( - var::VariableCompute, - varType::StateType = getVariableType(var); - ppeType::Type{<:DFG.MeanMaxPPE} = DFG.MeanMaxPPE, - solveKey::Symbol = :default, - ppeKey::Symbol = solveKey +function calcMeanMaxSuggested( + vari::VariableCompute, + solveKey::Symbol = :default ) - # - P = getBelief(var, solveKey) + varType = getVariableType(vari) + P = getBelief(vari, solveKey) maniDef = convert(MB.AbstractManifold, varType) manis = AMP._manifoldtuple(maniDef) # LEGACY, TODO REMOVE ops = buildHybridManifoldCallbacks(manis) @@ -246,44 +238,25 @@ function calcPPE( Pma = getKDEMax(P; addop = ops[1], diffop = ops[2]) # calculate point - ## TODO make PPE only use getCoordinates for now (IIF v0.25) + ## TODO use getCoordinates for now (IIF v0.25) Pme_ = getCoordinates(varType, Pme) # Pma_ = getCoordinates(M,Pme) - - ppes = getPPEDict(var) - id = if haskey(ppes, ppeKey) - ppes[ppeKey].id - else - nothing - end - - # suggested, max, mean, current time - # TODO, poor constructor argument assumptions on `ppeType` - return ppeType(; - id, - solveKey=ppeKey, - suggested=Pme_, - max=Pma, + + return ( mean=Pme_, + max=Pma, + suggested=Pme_, ) end -# calcPPE(var::VariableCompute; method::Type{<:AbstractPointParametricEst}=MeanMaxPPE, solveKey::Symbol=:default) = calcPPE(var, getVariableType(var), method=method, solveKey=solveKey) - - -function calcPPE( +function calcMeanMaxSuggested( dfg::AbstractDFG, - label::Symbol; + label::Symbol, solveKey::Symbol = :default, - ppeType::Type{<:AbstractPointParametricEst} = DFG.MeanMaxPPE, ) - # - var = getVariable(dfg, label) - return calcPPE(var, getVariableType(var); ppeType = ppeType, solveKey = solveKey) + return calcMeanMaxSuggested(getVariable(dfg, label), solveKey) end -const calcVariablePPE = calcPPE - """ $SIGNATURES @@ -378,58 +351,6 @@ function defaultFixedLagOnTree!( return getSolverParams(dfg) end -""" - $SIGNATURES - -Return `::Tuple` with matching variable ID symbols and `Suggested` PPE values. - -Related - -getVariablePPE -""" -function getPPESuggestedAll(dfg::AbstractDFG, regexFilter::Union{Nothing, Regex} = nothing) - # - # get values - vsyms = listVariables(dfg, regexFilter) |> sortDFG - slamPPE = map(x -> getVariablePPE(dfg, x).suggested, vsyms) - # sizes to convert to matrix - rumax = zeros(Int, 2) - for ppe in slamPPE - rumax[2] = length(ppe) - rumax[1] = maximum(rumax) - end - - # populate with values - XYT = zeros(length(slamPPE), rumax[1]) - for i = 1:length(slamPPE) - XYT[i, 1:length(slamPPE[i])] = slamPPE[i] - end - return (vsyms, XYT) -end - -""" - $SIGNATURES - -Find and return a `::Tuple` of variables and distances to `loc::Vector{<:Real}`. - -Related - -findVariablesNearTimestamp -""" -function findVariablesNear( - dfg::AbstractDFG, - loc::Vector{<:Real}, - regexFilter::Union{Nothing, Regex} = nothing; - number::Int = 3, -) - # - - xy = getPPESuggestedAll(dfg, regexFilter) - dist = sum((xy[2][:, 1:length(loc)] .- loc') .^ 2; dims = 2) |> vec - prm = (dist |> sortperm)[1:number] - return (xy[1][prm], sqrt.(dist[prm])) -end - """ $SIGNATURES @@ -507,71 +428,6 @@ function getFactorsAmongVariablesOnly( return usefcts end -""" - $SIGNATURES - -Calculate new and then set PPE estimates for variable from some distributed factor graph. - -DevNotes -- TODO solve key might be needed if one only wants to update one -- TODO consider a more fiting name. -- guess it would make sense that :default=>variableNodeData, goes with :default=>MeanMaxPPE - -Aliases -- `setVariablePosteriorEstimates!` - -DevNotes: - -JT - TODO if subfg is in the cloud or from another fg it has to be updated -it feels like a waste to update the whole variable for one field. -currently i could find mergeUpdateVariableSolverData() -might be handy to use a setter such as updatePointParametricEst(dfg, variable, solverkey) -This might also not be the correct place, if it is uncomment: -```` -if (subfg <: InMemoryDFGTypes) - updateVariable!(subfg, var) -end -``` - -Related - -[`calcPPE`](@ref), getVariablePPE, (updatePPE! ?) -""" -function setPPE!( - variable::VariableCompute, - solveKey::Symbol = :default, - ppeType::Type{T} = DFG.MeanMaxPPE, - newPPEVal::T = calcPPE(variable; ppeType = ppeType, solveKey = solveKey), -) where {T <: AbstractPointParametricEst} - # - # vnd = getState(variable, solveKey) - - #TODO in the future one can perhaps populate other solver data types here by looking at the typeof ppeDict entries - getPPEDict(variable)[solveKey] = newPPEVal - - return variable -end - -function setPPE!( - subfg::AbstractDFG, - label::Symbol, - solveKey::Symbol = :default, - ppeType::Type{T} = DFG.MeanMaxPPE, - newPPEVal::NothingUnion{T} = nothing, -) where {T <: AbstractPointParametricEst} - # - variable = getVariable(subfg, label) - # slight optimization to avoid double variable lookup (should be optimized out during code lowering) - newppe = if newPPEVal !== nothing - newPPEVal - else - calcPPE(variable; solveKey = solveKey, ppeType = ppeType) - end - return setPPE!(variable, solveKey, ppeType, newppe) -end - -const setVariablePosteriorEstimates! = setPPE! - ## ============================================================================ # Starting integration with Manifolds.jl, via ApproxManifoldProducts.jl first ## ============================================================================ diff --git a/IncrementalInference/src/services/FactorGraph.jl b/IncrementalInference/src/services/FactorGraph.jl index 7c448741..42ca90be 100644 --- a/IncrementalInference/src/services/FactorGraph.jl +++ b/IncrementalInference/src/services/FactorGraph.jl @@ -203,12 +203,10 @@ function setValKDE!( setinit::Bool = true, ipc::AbstractVector{<:Real} = [0.0;]; solveKey::Symbol = :default, - ppeType::Type{T} = DFG.MeanMaxPPE, -) where {P, T} +) where {P} vnd = getState(v, solveKey) # recover variableType information setValKDE!(vnd, val, setinit, ipc) - setPPE!(v; solveKey, ppeType) return nothing end function setValKDE!( diff --git a/IncrementalInference/src/services/GraphInit.jl b/IncrementalInference/src/services/GraphInit.jl index 2677ef2d..2eda84d3 100644 --- a/IncrementalInference/src/services/GraphInit.jl +++ b/IncrementalInference/src/services/GraphInit.jl @@ -176,8 +176,6 @@ function doautoinit!( Npts(bel) == getSolverParams(dfg).N ? bel : resample(bel, getSolverParams(dfg).N) # @info "MANIFOLD IS" bel.manifold isPartial(bel) string(bel._partial) string(getPoints(bel, false)[1]) setValKDE!(xi, bel_, true, ipc; solveKey) # getPoints(bel, false) - # Update the estimates (longer DFG function used so cloud is also updated) - setVariablePosteriorEstimates!(dfg, xi.label, solveKey) # Update the data in the event that it's not local # TODO perhaps use merge, but keeping to deepcopy as update variant used was set to copy. DFG.copytoState!(dfg, xi.label, solveKey, getState(xi, solveKey)) diff --git a/IncrementalInference/src/services/SolverUtilities.jl b/IncrementalInference/src/services/SolverUtilities.jl index 618b95ed..a905027f 100644 --- a/IncrementalInference/src/services/SolverUtilities.jl +++ b/IncrementalInference/src/services/SolverUtilities.jl @@ -190,6 +190,8 @@ function _buildGraphByFactorAndTypes!( return dfg, _dfgfct end +#FIXME this function depended on obsolete PPEs and may currently be broken +#TODO test """ $SIGNATURES @@ -236,7 +238,7 @@ end # the point is that only the (0,20) values in newFactor are needed, all calculations are abstracted away. ``` -See also: [`RoME.generateGraph_Honeycomb!`](@ref), [`accumulateFactorMeans`](@ref), [`getPPE`](@ref) +See also: [`RoME.generateGraph_Honeycomb!`](@ref), [`accumulateFactorMeans`](@ref) """ function _checkVariableByReference( fg::AbstractDFG, @@ -251,7 +253,7 @@ function _checkVariableByReference( nothing else DFG._getPriorType(srcType)( - MvNormal(getPPE(fg[srcLabel], refKey).suggested, diagm(ones(getDimension(srcType)))), + MvNormal(calcMeanMaxSuggested(fg, srcLabel, refKey).suggested, diagm(ones(getDimension(srcType)))), ) end, atol::Real = 1e-2, @@ -276,12 +278,9 @@ function _checkVariableByReference( accumulateFactorMeans(tfg, [:x0f1; :x0l0f1]) end - ppe = DFG.MeanMaxPPE(refKey, refVal, refVal, refVal) - - # now check if we already have a landmark at this location varLms = ls(fg, destRegex) |> sortDFG already = if doRef - ppeLms = getPPE.(getVariable.(fg, varLms), refKey) .|> x -> x.suggested + ppeLms = calcMeanMaxSuggested.(getVariable.(fg, varLms), refKey) .|> x -> x.suggested errmask = ppeLms .|> (x -> isapprox(x, refVal; atol = atol)) any(errmask) else @@ -322,10 +321,8 @@ function _checkVariableByReference( getMeasurementParametric(factor)[1] end - ppe = DFG.MeanMaxPPE(refKey, refVal, refVal, refVal) + ppe = (mean=refVal, max=refVal, suggested=refVal) # Nope does not exist, ppe, generated new variable label only return false, ppe, Symbol(destPrefix, srcNumber) end - -# diff --git a/IncrementalInference/src/services/SubGraphFunctions.jl b/IncrementalInference/src/services/SubGraphFunctions.jl index 1b5f3e3b..0fdc0052 100644 --- a/IncrementalInference/src/services/SubGraphFunctions.jl +++ b/IncrementalInference/src/services/SubGraphFunctions.jl @@ -133,7 +133,6 @@ function transferUpdateSubGraph!( src::AbstractDFG, syms::Vector{Symbol} = union(ls(src)...), logger = ConsoleLogger(); - updatePPE::Bool = true, solveKey::Symbol = :default, ) # @@ -152,15 +151,6 @@ function transferUpdateSubGraph!( getState(var, solveKey), ) - if updatePPE - # create ppe on new key using defaults, TODO improve - if haskey(getPPEDict(var), solveKey) - DFG.updatePPE!(dest, var, solveKey; warn_if_absent = false) - else - ppe = calcPPE(var; ppeKey = solveKey) - addPPE!(dest, var.label, ppe) - end - end end return nothing diff --git a/IncrementalInference/src/services/TetherUtils.jl b/IncrementalInference/src/services/TetherUtils.jl index a2719106..11d53263 100644 --- a/IncrementalInference/src/services/TetherUtils.jl +++ b/IncrementalInference/src/services/TetherUtils.jl @@ -141,7 +141,7 @@ function accumulateFactorMeans( nextsym = 1 < length(fctsyms) ? intersect(vars, ls(dfg, fctsyms[nextidx + 1])) : vars[end] currsym = 1 < length(fctsyms) ? setdiff(vars, nextsym)[1] : vars[1] - calcPPE(dfg, currsym; solveKey).suggested + calcMeanMaxSuggested(dfg, currsym, solveKey).suggested end srcsym = currsym diff --git a/IncrementalInference/test/runtests.jl b/IncrementalInference/test/runtests.jl index 69b51114..0f55aad7 100644 --- a/IncrementalInference/test/runtests.jl +++ b/IncrementalInference/test/runtests.jl @@ -88,7 +88,6 @@ include("testlocalconstraintexamples.jl") include("testManualInit.jl") include("testBasicTreeInit.jl") include("testSolveOrphanedFG.jl") -include("testSolveSetPPE.jl") include("testSolveKey.jl") end end diff --git a/IncrementalInference/test/testBasicGraphs.jl b/IncrementalInference/test/testBasicGraphs.jl index 5d27d695..c37207cb 100644 --- a/IncrementalInference/test/testBasicGraphs.jl +++ b/IncrementalInference/test/testBasicGraphs.jl @@ -329,7 +329,7 @@ addFactor!(fg, [:x0], Prior(Normal(1.0, 0.01))) # force a basic setup initAll!(fg) -@test isapprox( 1, getPPE(fg, :x0).suggested[1]; atol=0.1) +@test isapprox( 1, calcMeanMaxSuggested(fg, :x0, :default).suggested[1]; atol=0.1) ## @@ -348,8 +348,8 @@ tree = solveGraph!(fg) ## -@test isapprox( 1, getPPE(fg, :x0).suggested[1]; atol=0.1) -@test isapprox( 4, getPPE(fg, :x3).suggested[1]; atol=0.3) +@test isapprox( 1, calcMeanMaxSuggested(fg, :x0, :default).suggested[1]; atol=0.1) +@test isapprox( 4, calcMeanMaxSuggested(fg, :x3, :default).suggested[1]; atol=0.3) ## check contents of tree messages diff --git a/IncrementalInference/test/testBasicRecycling.jl b/IncrementalInference/test/testBasicRecycling.jl index cf40e70b..61683561 100644 --- a/IncrementalInference/test/testBasicRecycling.jl +++ b/IncrementalInference/test/testBasicRecycling.jl @@ -21,7 +21,7 @@ deleteFactor!.(fg, [Symbol("x$(i)lm0f1") for i=1:(N-1)]) tree = solveTree!(fg) for var in sortDFG(ls(fg)) - sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested + sppe = calcMeanMaxSuggested(fg, var, :default).suggested println("Testing ", var,": ", sppe) @test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35) end @@ -39,7 +39,7 @@ fifoFreeze!(fg) tree = solveTree!(fg; recordcliqs=ls(fg)); for var in sortDFG(ls(fg)) - sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested + sppe = calcMeanMaxSuggested(fg, var, :default).suggested println("Testing ", var,": ", sppe) @test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35) end @@ -76,7 +76,7 @@ hists = fetchCliqHistoryAll!(smtasks) @test !(IIF.solveUp_StateMachine in getindex.(hists[7], 3)) for var in sortDFG(ls(fg)) - sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested + sppe = calcMeanMaxSuggested(fg, var, :default).suggested println("Testing ", var,": ", sppe) @test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35) end @@ -109,7 +109,7 @@ hists = fetchCliqHistoryAll!(smtasks) tree = solveTree!(fg, tree; recordcliqs=ls(fg), eliminationOrder); for var in sortDFG(ls(fg)) - sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested + sppe = calcMeanMaxSuggested(fg, var, :default).suggested println("Testing ", var,": ", sppe) @test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.355) end @@ -163,7 +163,7 @@ hists = fetchCliqHistoryAll!(smtasks) @test !(IIF.solveUp_StateMachine in getindex.(hists[3], 3)) for var in sortDFG(ls(fg)) - sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested + sppe = calcMeanMaxSuggested(fg, var, :default).suggested println("Testing ", var,": ", sppe) @test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35) end @@ -202,7 +202,7 @@ smtasks = Task[] tree = solveTree!(fg, tree; smtasks=smtasks, recordcliqs=ls(fg)); for var in sortDFG(ls(fg)) - sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested + sppe = calcMeanMaxSuggested(fg, var, :default).suggested # println("Testing ", var,": ", sppe) @test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35) end @@ -213,7 +213,7 @@ deepcopyGraph!(fg, sfg, vsyms[4:6], fsyms[4:6]) tree = solveTree!(fg, tree; smtasks=smtasks, recordcliqs=ls(fg)); for var in sortDFG(ls(fg)) - sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested + sppe = calcMeanMaxSuggested(fg, var, :default).suggested # println("Testing ", var,": ", sppe) @test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35) end @@ -224,7 +224,7 @@ deepcopyGraph!(fg, sfg, vsyms[7:8], fsyms[7:8]) tree = solveTree!(fg, tree; smtasks=smtasks, recordcliqs=ls(fg)); for var in sortDFG(ls(fg)) - sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested + sppe = calcMeanMaxSuggested(fg, var, :default).suggested # println("Testing ", var,": ", sppe) @test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35) end @@ -235,7 +235,7 @@ deepcopyGraph!(fg, sfg, Symbol[], [fsyms[9]]) tree = solveTree!(fg, tree; smtasks=smtasks, recordcliqs=ls(fg)); for var in sortDFG(ls(fg)) - sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested + sppe = calcMeanMaxSuggested(fg, var, :default).suggested println("Testing ", var,": ", sppe) @test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35) end @@ -245,7 +245,7 @@ addFactor!(fg, [:x4], Prior(Normal(4.1,0.1))) tree = solveTree!(fg, tree; smtasks=smtasks, recordcliqs=ls(fg)); for var in sortDFG(ls(fg)) - sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested + sppe = calcMeanMaxSuggested(fg, var, :default).suggested # println("Testing ", var,": ", sppe) @test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.35) end @@ -256,7 +256,7 @@ addFactor!(fg, [:x4], Prior(Normal(3.9,0.1))) tree = solveTree!(fg, tree; smtasks=smtasks, recordcliqs=ls(fg)); for var in sortDFG(ls(fg)) - sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested + sppe = calcMeanMaxSuggested(fg, var, :default).suggested # println("Testing ", var,": ", sppe) @test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.355) end diff --git a/IncrementalInference/test/testBasicTreeInit.jl b/IncrementalInference/test/testBasicTreeInit.jl index dd3b23ce..72ee02ed 100644 --- a/IncrementalInference/test/testBasicTreeInit.jl +++ b/IncrementalInference/test/testBasicTreeInit.jl @@ -101,7 +101,7 @@ tree = solveTree!(fg; smtasks=smtasks, verbose=true) for var in good_vars - sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested + sppe = calcMeanMaxSuggested(fg,var).suggested println("Testing ", var,": ", sppe) @test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.15) end diff --git a/IncrementalInference/test/testCircular.jl b/IncrementalInference/test/testCircular.jl index f38da923..353a8780 100644 --- a/IncrementalInference/test/testCircular.jl +++ b/IncrementalInference/test/testCircular.jl @@ -20,7 +20,8 @@ solveTree!(fg); ## -sppes = map(var->getPPE(var).suggested[1], sortDFG(getVariables(fg),by=getLabel)) +sppes = map(var -> calcMeanMaxSuggested(fg, getLabel(var), :default).suggested[1], + sortDFG(getVariables(fg), by=getLabel)) gt = rem2pi.(collect(0:4), RoundNearest) diff --git a/IncrementalInference/test/testDERelative.jl b/IncrementalInference/test/testDERelative.jl index 397a37e1..784e1e00 100644 --- a/IncrementalInference/test/testDERelative.jl +++ b/IncrementalInference/test/testDERelative.jl @@ -165,15 +165,15 @@ ref_ = (getBelief(fg, :x0) |> getPoints) ## temp graph solve check tfg = initfg() -tx3_ = approxConvBelief(fg, :x0f1, :x3; setPPE=true, tfg) +tx3_ = approxConvBelief(fg, :x0f1, :x3; tfg) pts_ = getPoints(tx3_) # initVariable!(tfg, :x3, pts) @cast pts[i,j] := pts_[j][i] -@test isapprox( x0_val_ref, getPPE(tfg, :x0).suggested ; atol = 0.1) -@test isapprox( x1_val_ref, getPPE(tfg, :x1).suggested ; atol = 0.1) -@test isapprox( x2_val_ref, getPPE(tfg, :x2).suggested ; atol = 0.1) +@test isapprox( x0_val_ref, calcMeanMaxSuggested(tfg, :x0, :default).suggested ; atol = 0.1) +@test isapprox( x1_val_ref, calcMeanMaxSuggested(tfg, :x1, :default).suggested ; atol = 0.1) +@test isapprox( x2_val_ref, calcMeanMaxSuggested(tfg, :x2, :default).suggested ; atol = 0.1) @test isapprox( x3_val_ref, mean(tx3_); atol=0.1) # using KernelDensityEstimatePlotting @@ -234,7 +234,7 @@ printCSMHistoryLogical(hists) # intended steps at writing are 5, 6 (upsolve) _, csmc = repeatCSMStep!(hists[1], 5; duplicate=true) -@test isapprox( 1, getPPESuggested(csmc.cliqSubFg, :x0)[1]; atol=0.1 ) +@test isapprox( 1, calcMeanMaxSuggested(csmc.cliqSubFg, :x0, :default).suggested[1]; atol=0.1 ) nval_x0 = mean(getBelief(csmc.cliqSubFg, :x0)) @test isapprox( x0_val_ref, nval_x0; atol=0.1 ) @@ -255,7 +255,7 @@ dens, ipc = propagateBelief( sfg, :x1, :;) _, csmc = repeatCSMStep!(hists[1], 6; duplicate=true) # @enter repeatCSMStep!(hists[1], 6; duplicate=true) -@test isapprox( x0_val_ref, getPPESuggested(csmc.cliqSubFg, :x0); atol=0.1 ) +@test isapprox( x0_val_ref, calcMeanMaxSuggested(csmc.cliqSubFg, :x0, :default).suggested; atol=0.1 ) nval_x0 = mean(getBelief(csmc.cliqSubFg, :x0)) @test isapprox( x0_val_ref, nval_x0; atol=0.1 ) @@ -266,18 +266,18 @@ nval_x0 = mean(getBelief(csmc.cliqSubFg, :x0)) # TODO CHECK vnd.val points istype SArray??? # intended steps at writing are 11,12 (post-root clique downsolve) -val0 = getPPESuggested( hists[1][11][4].cliqSubFg[:x0] ) +val0 = calcMeanMaxSuggested(hists[1][11][4].cliqSubFg, :x0, :default).suggested @test isapprox( x0_val_ref, val0; atol=0.1) -val0 = getPPESuggested( hists[1][12][4].cliqSubFg[:x0] ) +val0 = calcMeanMaxSuggested(hists[1][12][4].cliqSubFg, :x0, :default).suggested @test isapprox( x0_val_ref, val0; atol=0.1) ## -@test isapprox( getPPE(fg, :x0).suggested, x0_val_ref; atol = 0.1) -@test isapprox( getPPE(fg, :x1).suggested, x1_val_ref; atol = 0.1) -@test isapprox( getPPE(fg, :x2).suggested, x2_val_ref; atol = 0.1) -@test isapprox( getPPE(fg, :x3).suggested, x3_val_ref; atol = 0.1) +@test isapprox( calcMeanMaxSuggested(fg, :x0, :default).suggested, x0_val_ref; atol = 0.1) +@test isapprox( calcMeanMaxSuggested(fg, :x1, :default).suggested, x1_val_ref; atol = 0.1) +@test isapprox( calcMeanMaxSuggested(fg, :x2, :default).suggested, x2_val_ref; atol = 0.1) +@test isapprox( calcMeanMaxSuggested(fg, :x3, :default).suggested, x3_val_ref; atol = 0.1) ## @@ -373,14 +373,14 @@ x7_val_ref = sl(getVariable(fg, :x7) |> getTimestamp |> DateTime |> datetime2uni ## -@test isapprox( getPPESuggested(fg, :x0), x0_val_ref; atol=0.2) -@test isapprox( getPPESuggested(fg, :x1), x1_val_ref; atol=0.2) -@test isapprox( getPPESuggested(fg, :x2), x2_val_ref; atol=0.2) -@test isapprox( getPPESuggested(fg, :x3), x3_val_ref; atol=0.2) -@test isapprox( getPPESuggested(fg, :x4), x4_val_ref; atol=0.2) -@test isapprox( getPPESuggested(fg, :x5), x5_val_ref; atol=0.2) -@test isapprox( getPPESuggested(fg, :x6), x6_val_ref; atol=0.2) -@test isapprox( getPPESuggested(fg, :x7), x7_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(fg, :x0, :default).suggested, x0_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(fg, :x1, :default).suggested, x1_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(fg, :x2, :default).suggested, x2_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(fg, :x3, :default).suggested, x3_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(fg, :x4, :default).suggested, x4_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(fg, :x5, :default).suggested, x5_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(fg, :x6, :default).suggested, x6_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(fg, :x7, :default).suggested, x7_val_ref; atol=0.2) ## check forward and backward solving @@ -416,19 +416,19 @@ tfg = initfg() # initVariable!(fg, s, [0.1.*zeros(2) for _ in 1:100]) # end -pts = approxConv(fg, :x0f1, :x7, setPPE=true, tfg=tfg) +pts = approxConv(fg, :x0f1, :x7, tfg=tfg) initVariable!(tfg, :x7, pts) ## -@test isapprox( getPPESuggested(tfg, :x0), x0_val_ref; atol=0.2) -@test isapprox( getPPESuggested(tfg, :x1), x1_val_ref; atol=0.2) -@test isapprox( getPPESuggested(tfg, :x2), x2_val_ref; atol=0.2) -@test isapprox( getPPESuggested(tfg, :x3), x3_val_ref; atol=0.2) -@test isapprox( getPPESuggested(tfg, :x4), x4_val_ref; atol=0.2) -@test isapprox( getPPESuggested(tfg, :x5), x5_val_ref; atol=0.2) -@test isapprox( getPPESuggested(tfg, :x6), x6_val_ref; atol=0.2) -@test isapprox( getPPESuggested(tfg, :x7), x7_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(tfg, :x0, :default).suggested, x0_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(tfg, :x1, :default).suggested, x1_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(tfg, :x2, :default).suggested, x2_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(tfg, :x3, :default).suggested, x3_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(tfg, :x4, :default).suggested, x4_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(tfg, :x5, :default).suggested, x5_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(tfg, :x6, :default).suggested, x6_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(tfg, :x7, :default).suggested, x7_val_ref; atol=0.2) ## @@ -447,15 +447,15 @@ _, csmc = repeatCSMStep!(hists[2], 6; duplicate=true); ## # solveTree has weird problem in breaking correct init and inserting zeros??? -@test isapprox( getPPESuggested(fg, :x0), x0_val_ref; atol=0.2) -@test isapprox( getPPESuggested(fg, :x1), x1_val_ref; atol=0.2) -@test isapprox( getPPESuggested(fg, :x2), x2_val_ref; atol=0.2) -@test isapprox( getPPESuggested(fg, :x3), x3_val_ref; atol=0.2) -@test isapprox( getPPESuggested(fg, :x4), x4_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(fg, :x0, :default).suggested, x0_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(fg, :x1, :default).suggested, x1_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(fg, :x2, :default).suggested, x2_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(fg, :x3, :default).suggested, x3_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(fg, :x4, :default).suggested, x4_val_ref; atol=0.2) -@test isapprox( getPPESuggested(fg, :x5), x5_val_ref; atol=0.2) -@test isapprox( getPPESuggested(fg, :x6), x6_val_ref; atol=0.2) -@test isapprox( getPPESuggested(fg, :x7), x7_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(fg, :x5, :default).suggested, x5_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(fg, :x6, :default).suggested, x6_val_ref; atol=0.2) +@test isapprox( calcMeanMaxSuggested(fg, :x7, :default).suggested, x7_val_ref; atol=0.2) ## @@ -601,7 +601,7 @@ push!(forcepath, :x5x6ωβf1) push!(forcepath, :x6) push!(forcepath, :x6x7ωβf1) push!(forcepath, :x7) -pts = approxConv(fg, :x0f1, :x7, setPPE=true, tfg=tfg, path=forcepath) +pts = approxConv(fg, :x0f1, :x7, tfg=tfg, path=forcepath) ## @@ -636,7 +636,7 @@ sl = DifferentialEquations.solve(oder_.forwardProblem) ## check the approxConv is working right for sym in setdiff(ls(tfg), [:ωβ]) - @test getPPE(tfg, sym).suggested - sl(getVariable(fg, sym) |> getTimestamp |> DateTime |> datetime2unix) |> norm < 0.2 + @test calcMeanMaxSuggested(tfg, sym, :default).suggested - sl(getVariable(fg, sym) |> getTimestamp |> DateTime |> datetime2unix) |> norm < 0.2 end @@ -691,10 +691,9 @@ pts_ = approxConv(fg, :x0x1ωβf1, :ωβ) # ## Solve quality might not yet be good enough for this particular test case -# @test getPPE(fg, :ωβ).suggested - [0.7;-0.3] |> norm < 0.2 - +# @test calcMeanMaxSuggested(fg, :ωβ, :default).suggested - [0.7;-0.3] |> norm < 0.2 # for sym in setdiff(ls(tfg), [:ωβ]) -# @test getPPE(fg, sym).suggested - sl(getVariable(fg, sym) |> getTimestamp |> DateTime |> datetime2unix) |> norm < 0.2 +# @test calcMeanMaxSuggested(fg, sym, :default).suggested - sl(getVariable(fg, sym) |> getTimestamp |> DateTime |> datetime2unix) |> norm < 0.2 # end diff --git a/IncrementalInference/test/testDeadReckoningTether.jl b/IncrementalInference/test/testDeadReckoningTether.jl index c6c3bf2f..2b16cbaa 100644 --- a/IncrementalInference/test/testDeadReckoningTether.jl +++ b/IncrementalInference/test/testDeadReckoningTether.jl @@ -100,7 +100,7 @@ val = accumulateFactorMeans(fg, [:x0deadreckon_x0f1]) # must fix return type stability fval = float(val...) -@test isapprox(fval, calcVariablePPE(fg, :x0).suggested[1], atol=1e-4 ) +@test isapprox(fval, calcMeanMaxSuggested(fg, :x0).suggested[1], atol=1e-4 ) #TODO improve test rebaseFactorVariable!(fg, :x0deadreckon_x0f1, [:x1; :deadreckon_x0]) diff --git a/IncrementalInference/test/testDefaultDeconv.jl b/IncrementalInference/test/testDefaultDeconv.jl index 917681e5..3ebdbb57 100644 --- a/IncrementalInference/test/testDefaultDeconv.jl +++ b/IncrementalInference/test/testDefaultDeconv.jl @@ -62,8 +62,8 @@ solveTree!(fg); ## # make sure each variable is where it should be first -@test isapprox(getPPE(fg, :hypoA).suggested[1], 5, atol=1) -@test isapprox(getPPE(fg, :hypoB).suggested[1], 10,atol=1) +@test isapprox(calcMeanMaxSuggested(fg, :hypoA, :default).suggested[1], 5, atol=1) +@test isapprox(calcMeanMaxSuggested(fg, :hypoB, :default).suggested[1], 10,atol=1) X0_ = getBelief(fg, :x0) X0 = AMP._pointsToMatrixCoords(X0_.manifold, getPoints(X0_)) @@ -140,7 +140,7 @@ solveTree!(fg); ## make sure result is in the right place -@test abs(getPPE(fg, :x0).suggested[1]) < 1.0 +@test abs(calcMeanMaxSuggested(fg, :x0, :default).suggested[1]) < 1.0 X1_ = getBelief(fg, :x1) |> getPoints TensorCast.@cast X1[i,j] := X1_[j][i] diff --git a/IncrementalInference/test/testEuclidDistance.jl b/IncrementalInference/test/testEuclidDistance.jl index 9c4629c7..558681f3 100644 --- a/IncrementalInference/test/testEuclidDistance.jl +++ b/IncrementalInference/test/testEuclidDistance.jl @@ -26,7 +26,7 @@ tree = solveTree!(fg) ## -@test isapprox(getPPE(fg, :x0).suggested[1], 0, atol=1) +@test isapprox(calcMeanMaxSuggested(fg, :x0, :default).suggested[1], 0, atol=1) pts_ = getBelief(fg, :x1) |> getPoints @cast pts[i,j] := pts_[j][i] @@ -57,8 +57,8 @@ addFactor!(fg, [:x0;:x1], eud) tree = solveTree!(fg) -@test isapprox(getPPE(fg, :x0).suggested[1], 0, atol=1) -@test isapprox(getPPE(fg, :x0).suggested[1], 0, atol=1) +@test isapprox(calcMeanMaxSuggested(fg, :x0, :default).suggested[1], 0, atol=1) +@test isapprox(calcMeanMaxSuggested(fg, :x0, :default).suggested[1], 0, atol=1) pts_ = getBelief(fg, :x1) |> getPoints @cast pts[i,j] := pts_[j][i] @@ -179,7 +179,7 @@ points = [[100.0],] fg = IIF.generateGraph_EuclidDistance(points) solveTree!(fg) -@test isapprox(getPPE(fg, :x1).suggested[1], 100, atol=1) +@test isapprox(calcMeanMaxSuggested(fg, :x1, :default).suggested[1], 100, atol=1) pts_ = getBelief(fg, :l1) |> getPoints @cast pts[i,j] := pts_[j][i] diff --git a/IncrementalInference/test/testExpXstroke.jl b/IncrementalInference/test/testExpXstroke.jl index 766d8a34..93b2de61 100644 --- a/IncrementalInference/test/testExpXstroke.jl +++ b/IncrementalInference/test/testExpXstroke.jl @@ -51,7 +51,7 @@ hist = IIF.solveTree!(fg; smtasks=smtasks); #, recordcliqs=ls(fg)); for var in sortDFG(ls(fg)) - sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested + sppe = calcMeanMaxSuggested(fg, var, :default).suggested println("Testing ", var,": ", sppe) @test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.2) end @@ -94,7 +94,7 @@ tree = IIF.solveTree!(fg; smtasks=smtasks); ## for var in sortDFG(ls(fg)) - sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested + sppe = calcMeanMaxSuggested(fg, var, :default).suggested println("Testing ", var,": ", sppe) @test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.4) end @@ -118,7 +118,7 @@ smtasks = Task[] tree = IIF.solveTree!(fg; smtasks=smtasks); for var in sortDFG(ls(fg)) - sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested + sppe = calcMeanMaxSuggested(fg, var, :default).suggested println("Testing ", var,": ", sppe) s = findfirst(r"\d", string(var))[1] @test isapprox(sppe[1], parse(Int,string(var)[s:end]), atol=0.45) diff --git a/IncrementalInference/test/testHasPriors913.jl b/IncrementalInference/test/testHasPriors913.jl index b986cbb7..022586d7 100644 --- a/IncrementalInference/test/testHasPriors913.jl +++ b/IncrementalInference/test/testHasPriors913.jl @@ -40,7 +40,7 @@ tree = solveTree!(fg); # tree = solveTree!(fg; smtasks, verbose=true, timeout=20, recordcliqs=ls(fg)); for i = 0:4 - ppe = getPPE(getVariable(fg, Symbol("x$i"))).suggested[1] + ppe = calcMeanMaxSuggested(fg, Symbol("x$i"), :default).suggested[1] @show i ppe @test isapprox(ppe, i; atol=0.7) end diff --git a/IncrementalInference/test/testMultiHypo3Door.jl b/IncrementalInference/test/testMultiHypo3Door.jl index 31eaeed0..01e74f1f 100644 --- a/IncrementalInference/test/testMultiHypo3Door.jl +++ b/IncrementalInference/test/testMultiHypo3Door.jl @@ -207,16 +207,16 @@ if false ## -# check the PPEs are the same -@test isapprox(getPPE(fg, :x0).suggested[1], x0; atol = 3.0) -@test isapprox(getPPE(fg, :x1).suggested[1], x1; atol = 3.0) -@test isapprox(getPPE(fg, :x2).suggested[1], x2; atol = 3.0) -@test isapprox(getPPE(fg, :x3).suggested[1], x3; atol = 3.0) - -@test isapprox(getPPE(fg, :l0).suggested[1], l0; atol = 3.0) -@test isapprox(getPPE(fg, :l1).suggested[1], l1; atol = 3.0) -@test isapprox(getPPE(fg, :l2).suggested[1], l2; atol = 3.0) -@test isapprox(getPPE(fg, :l3).suggested[1], l3; atol = 3.0) +# check the sugested are the same +@test isapprox(calcMeanMaxSuggested(fg, :x0, :default).suggested[1], x0; atol = 3.0) +@test isapprox(calcMeanMaxSuggested(fg, :x1, :default).suggested[1], x1; atol = 3.0) +@test isapprox(calcMeanMaxSuggested(fg, :x2, :default).suggested[1], x2; atol = 3.0) +@test isapprox(calcMeanMaxSuggested(fg, :x3, :default).suggested[1], x3; atol = 3.0) + +@test isapprox(calcMeanMaxSuggested(fg, :l0, :default).suggested[1], l0; atol = 3.0) +@test isapprox(calcMeanMaxSuggested(fg, :l1, :default).suggested[1], l1; atol = 3.0) +@test isapprox(calcMeanMaxSuggested(fg, :l2, :default).suggested[1], l2; atol = 3.0) +@test isapprox(calcMeanMaxSuggested(fg, :l3, :default).suggested[1], l3; atol = 3.0) end ## diff --git a/IncrementalInference/test/testMultihypoAndChain.jl b/IncrementalInference/test/testMultihypoAndChain.jl index 2bdf43a5..002c764b 100644 --- a/IncrementalInference/test/testMultihypoAndChain.jl +++ b/IncrementalInference/test/testMultihypoAndChain.jl @@ -71,9 +71,9 @@ tree = solveTree!(fg, eliminationOrder=eo) #, smtasks=smtasks, recordcliqs=ls(fg ## -@test isapprox(DFG.getPPESuggested(fg, :x0)[], 0, atol = 0.2) -@test isapprox(DFG.getPPESuggested(fg, :x1)[], 1, atol = 0.2) -@test isapprox(DFG.getPPESuggested(fg, :l1)[], 1, atol = 0.2) +@test isapprox(calcMeanMaxSuggested(fg, :x0, :default).suggested[], 0, atol = 0.2) +@test isapprox(calcMeanMaxSuggested(fg, :x1, :default).suggested[], 1, atol = 0.2) +@test isapprox(calcMeanMaxSuggested(fg, :l1, :default).suggested[], 1, atol = 0.2) L2 = getBelief(fg, :l2) npts = length(getPoints(L2)) @@ -82,7 +82,6 @@ L2_ = manikde!(ContinuousScalar, pts) # test that there is at least a mode present @test mmd(L2_, L2, ContinuousScalar) < 1e-3 -# @test isapprox(DFG.getPPESuggested(fg, :l2)[], 2, atol = 0.2) ## @@ -123,15 +122,14 @@ tree = solveTree!(fg) # expect x1 x2 to have at least one mode at 0 -@test getPPE(fg, :x1).suggested[1] - x1 |> abs < 1.2 -@test getPPE(fg, :x2).suggested[1] - x2 |> abs < 1.2 +@test calcMeanMaxSuggested(fg, :x1, :default).suggested[1] - x1 |> abs < 1.2 +@test calcMeanMaxSuggested(fg, :x2, :default).suggested[1] - x2 |> abs < 1.2 -@test getPPE(fg, :l1).suggested[1] - l1 |> abs < 1.2 -@test getPPE(fg, :l2).suggested[1] - l2 |> abs < 1.2 +@test calcMeanMaxSuggested(fg, :l1, :default).suggested[1] - l1 |> abs < 1.2 +@test calcMeanMaxSuggested(fg, :l2, :default).suggested[1] - l2 |> abs < 1.2 -# l1_0, l2_0 should be nearby around l1 and l2, but cannot confirm 100% -@test getPPE(fg, :l1_0).suggested[1] - l1 |> abs < 10 -@test getPPE(fg, :l2_0).suggested[1] - l2 |> abs < 10 +@test calcMeanMaxSuggested(fg, :l1_0, :default).suggested[1] - l1 |> abs < 10 +@test calcMeanMaxSuggested(fg, :l2_0, :default).suggested[1] - l2 |> abs < 10 ## diff --git a/IncrementalInference/test/testPartialNH.jl b/IncrementalInference/test/testPartialNH.jl index c82a0b14..11216cd0 100644 --- a/IncrementalInference/test/testPartialNH.jl +++ b/IncrementalInference/test/testPartialNH.jl @@ -50,8 +50,8 @@ solveTree!(fg); ## @warn "WIP on testPartialNH.jl during transition to Manifolds.jl" -@test isapprox( getPPE(fg, :x0).suggested, [0;0;0], atol=1) -@test isapprox( getPPE(fg, :x1).suggested, [10;0;0], atol=1) +@test isapprox( calcMeanMaxSuggested(fg, :x0, :default).suggested, [0;0;0], atol=1) +@test isapprox( calcMeanMaxSuggested(fg, :x1, :default).suggested, [10;0;0], atol=1) ## @@ -78,8 +78,8 @@ solveTree!(fg); ## @warn "WIP testPartialNH.jl during transition to Manifolds.jl" -@test isapprox( getPPE(fg, :x0).suggested, [0;0;0], atol=1) -@test isapprox( getPPE(fg, :x1).suggested, [10;0;0], atol=2) +@test isapprox( calcMeanMaxSuggested(fg, :x0, :default).suggested, [0;0;0], atol=1) +@test isapprox( calcMeanMaxSuggested(fg, :x1, :default).suggested, [10;0;0], atol=2) ## diff --git a/IncrementalInference/test/testSkipUpDown.jl b/IncrementalInference/test/testSkipUpDown.jl index e1942727..297ced5d 100644 --- a/IncrementalInference/test/testSkipUpDown.jl +++ b/IncrementalInference/test/testSkipUpDown.jl @@ -26,7 +26,7 @@ hists = fetchCliqHistoryAll!(smtasks) #test if values are still correct for var in sortDFG(ls(fg)) - sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested + sppe = calcMeanMaxSuggested(fg, var, :default).suggested @test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.2) end @@ -43,7 +43,7 @@ hists = fetchCliqHistoryAll!(smtasks); #test if values are still correct for var in sortDFG(ls(fg)) - sppe = getVariable(fg,var) |> getPPE |> IIF.getPPESuggested + sppe = calcMeanMaxSuggested(fg, var, :default).suggested @test isapprox(sppe[1], parse(Int,string(var)[end]), atol=0.2) end diff --git a/IncrementalInference/test/testSolveKey.jl b/IncrementalInference/test/testSolveKey.jl index 72c759e0..433ee862 100644 --- a/IncrementalInference/test/testSolveKey.jl +++ b/IncrementalInference/test/testSolveKey.jl @@ -62,15 +62,6 @@ doautoinit!(fg, :b, solveKey=:testSolveKey) @test (:testSolveKey in listSolveKeys(getVariable(fg, :a))) @test (:testSolveKey in listSolveKeys(getVariable(fg, :b))) - -## - -@test isapprox( calcPPE(fg, :a, solveKey=:testSolveKey).suggested[1], 10, atol=1) -@test isapprox( calcPPE(fg, :b, solveKey=:testSolveKey).suggested[1], 20, atol=1) - - -## - end @@ -102,16 +93,6 @@ getSolverParams(fg).graphinit=true solveTree!(fg, solveKey=:testSolveKey ) ## - -@test isapprox( calcPPE(fg, :a, solveKey=:testSolveKey).suggested[1], 0, atol=2) -@test isapprox( calcPPE(fg, :b, solveKey=:testSolveKey).suggested[1], 10, atol=2) -@test isapprox( calcPPE(fg, :c, solveKey=:testSolveKey).suggested[1], 20, atol=2) -@test isapprox( calcPPE(fg, :d, solveKey=:testSolveKey).suggested[1], 30, atol=2) -@test isapprox( calcPPE(fg, :e, solveKey=:testSolveKey).suggested[1], 40, atol=2) - - -## - # using RoMEPlotting # Gadfly.set_default_plot_size(35cm,25cm) diff --git a/IncrementalInference/test/testSolveSetPPE.jl b/IncrementalInference/test/testSolveSetPPE.jl deleted file mode 100644 index 58af8a9a..00000000 --- a/IncrementalInference/test/testSolveSetPPE.jl +++ /dev/null @@ -1,62 +0,0 @@ -# test that PPE values are update with solve, see issue #548 - -using Test -using IncrementalInference -using DistributedFactorGraphs - -@testset "test PPE update during solve" begin - -fg = generateGraph_Kaess(graphinit=true) -initAll!(fg) - -# list of variables to check -vars = listVariables(fg) - -# fetch values before solve -before = Dict() -for vs in vars - before[vs] = getVariablePPE(getVariable(fg, vs)) |> getPPESuggested -end - -# do the solve -# getSolverParams(fg).dbg = true - -# tree = buildTreeReset!(fg) -# drawTree(tree, show=true) - -# solveCliqUp!(fg, tree, :l2) -# solveCliqUp!(fg, tree, :x3) -# solveCliqUp!(fg, tree, :x2) - -solveTree!(fg) - - -after = Dict() -for vs in vars - after[vs] = getVariablePPE(getVariable(fg, vs)) |> getPPESuggested -end - -# before and after should be noticably different, because first inferred values have been found -for vs in vars - errd = norm(before[vs] - after[vs]) - # @show vs, errd - @test 1e-5 < errd -end - -# force recalc and update each PPE -force = Dict() -for vs in vars - setVariablePosteriorEstimates!(fg, vs) - force[vs] = getVariablePPE(getVariable(fg, vs)) |> getPPESuggested - # these need to be close to the same as after - errd = norm(force[vs] - after[vs]) - # @show vs, errd - @test errd < 0.1 -end - - - -## suspect cliqSubFg updated, but not back to main dfg object... test via load graph - - -end diff --git a/IncrementalInference/test/testSpecialSampler.jl b/IncrementalInference/test/testSpecialSampler.jl index ecac3ba4..c53fff26 100644 --- a/IncrementalInference/test/testSpecialSampler.jl +++ b/IncrementalInference/test/testSpecialSampler.jl @@ -49,9 +49,8 @@ addFactor!(fg, [:x0;:x1], SpecialLinearOffset(Normal(10,1))) tree = solveTree!(fg) -@test getPPE(fg, :x0).suggested[1] |> abs < 1.0 - -@test getPPE(fg, :x1).suggested[1] - 10 |> abs < 3.0 +@test calcMeanMaxSuggested(fg, :x0, :default).suggested[1] |> abs < 1.0 +@test calcMeanMaxSuggested(fg, :x1, :default).suggested[1] - 10 |> abs < 3.0 diff --git a/IncrementalInference/test/testpartialconstraint.jl b/IncrementalInference/test/testpartialconstraint.jl index de26fe65..33ec2dd3 100644 --- a/IncrementalInference/test/testpartialconstraint.jl +++ b/IncrementalInference/test/testpartialconstraint.jl @@ -310,7 +310,7 @@ pts_ = getVal(fg, :x1) pts_ = getVal(fg, :x2) -ppe = getPPE(fg, :x2).mean +ppe = calcMeanMaxSuggested(fg, :x2, :default).mean X2 = getBelief(fg, :x2)