diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 00000000..1d460077 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,21 @@ +cff-version: 1.2.0 +message: "If you use this software, please cite the article below." +authors: + - name: "ModelPredictiveControl.jl contributors" +title: "ModelPredictiveControl.jl - An open source model predictive control package for Julia." +url: "https://github.com/JuliaControl/ModelPredictiveControl.jl" +preferred-citation: + type: generic + authors: + - family-names: "Gagnon" + given-names: "Francis" + - family-names: "Thivierge" + given-names: "Alex" + - family-names: "Desbiens" + given-names: "André" + - family-names: "Bagge Carlson" + given-names: "Fredrik" + title: "ModelPredictiveControl.jl: advanced process control made easy in Julia" + year: 2024 + doi: "10.1007/978-3-030-68928-5" + url: "https://arxiv.org/abs/2411.09764" \ No newline at end of file diff --git a/README.md b/README.md index 883bb278..4bdcd66c 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,7 @@ for more detailed examples. - [x] move suppression - [x] input setpoint tracking - [x] terminal costs - - [x] economic costs (economic model predictive control) + - [x] custom economic costs (economic model predictive control) - [x] adaptive linear model predictive controller - [x] manual model modification - [x] automatic successive linearization of a nonlinear model @@ -95,7 +95,7 @@ for more detailed examples. - [x] manipulated inputs - [x] manipulated inputs increments - [x] terminal states to ensure nominal stability -- [ ] custom manipulated input constraints that are a function of the predictions +- [x] custom economic inequality constraints (soft or hard) - [x] supported feedback strategy: - [x] state estimator (see State Estimation features) - [x] internal model structure with a custom stochastic model diff --git a/docs/src/manual/nonlinmpc.md b/docs/src/manual/nonlinmpc.md index af14f6c5..96cd13d2 100644 --- a/docs/src/manual/nonlinmpc.md +++ b/docs/src/manual/nonlinmpc.md @@ -194,9 +194,9 @@ output vector of `plant` argument corresponds to the model output vector in `mpc We can now define the ``J_E`` function and the `empc` controller: ```@example 1 -function JE(UE, ŶE, _ , p) +function JE(Ue, Ŷe, _ , p) Ts = p - τ, ω = UE[1:end-1], ŶE[2:2:end-1] + τ, ω = Ue[1:end-1], Ŷe[2:2:end-1] return Ts*sum(τ.*ω) end empc = NonLinMPC(estim2; Hp, Hc, Nwt, Mwt=[0.5, 0], Cwt=Inf, Ewt=3.5e3, JE=JE, p=Ts) diff --git a/src/controller/construct.jl b/src/controller/construct.jl index 76b2b6c0..17fe3da4 100644 --- a/src/controller/construct.jl +++ b/src/controller/construct.jl @@ -1,5 +1,5 @@ "Include all the data for the constraints of [`PredictiveController`](@ref)" -struct ControllerConstraint{NT<:Real} +struct ControllerConstraint{NT<:Real, GCfunc<:Function} ẽx̂ ::Matrix{NT} fx̂ ::Vector{NT} gx̂ ::Matrix{NT} @@ -31,12 +31,14 @@ struct ControllerConstraint{NT<:Real} c_x̂min ::Vector{NT} c_x̂max ::Vector{NT} i_g ::BitVector + gc! ::GCfunc + nc ::Int end @doc raw""" setconstraint!(mpc::PredictiveController; ) -> mpc -Set the constraint parameters of the [`PredictiveController`](@ref) `mpc`. +Set the bound constraint parameters of the [`PredictiveController`](@ref) `mpc`. The predictive controllers support both soft and hard constraints, defined by: ```math @@ -146,7 +148,8 @@ function setconstraint!( C_Δumin = C_Deltaumin, C_Δumax = C_Deltaumax, ) model, con, optim = mpc.estim.model, mpc.con, mpc.optim - nu, ny, nx̂, Hp, Hc, nϵ = model.nu, model.ny, mpc.estim.nx̂, mpc.Hp, mpc.Hc, mpc.nϵ + nu, ny, nx̂, Hp, Hc = model.nu, model.ny, mpc.estim.nx̂, mpc.Hp, mpc.Hc + nϵ, nc = mpc.nϵ, con.nc notSolvedYet = (JuMP.termination_status(optim) == JuMP.OPTIMIZE_NOT_CALLED) if isnothing(Umin) && !isnothing(umin) size(umin) == (nu,) || throw(ArgumentError("umin size must be $((nu,))")) @@ -275,7 +278,8 @@ function setconstraint!( i_Ymin, i_Ymax = .!isinf.(con.Y0min), .!isinf.(con.Y0max) i_x̂min, i_x̂max = .!isinf.(con.x̂0min), .!isinf.(con.x̂0max) if notSolvedYet - con.i_b[:], con.i_g[:], con.A[:] = init_matconstraint_mpc(model, + con.i_b[:], con.i_g[:], con.A[:] = init_matconstraint_mpc( + model, nc, i_Umin, i_Umax, i_ΔŨmin, i_ΔŨmax, i_Ymin, i_Ymax, i_x̂min, i_x̂max, con.A_Umin, con.A_Umax, con.A_ΔŨmin, con.A_ΔŨmax, @@ -287,9 +291,10 @@ function setconstraint!( JuMP.delete(optim, optim[:linconstraint]) JuMP.unregister(optim, :linconstraint) @constraint(optim, linconstraint, A*ΔŨvar .≤ b) - setnonlincon!(mpc, model, optim) + set_nonlincon!(mpc, model, optim) else - i_b, i_g = init_matconstraint_mpc(model, + i_b, i_g = init_matconstraint_mpc( + model, nc, i_Umin, i_Umax, i_ΔŨmin, i_ΔŨmax, i_Ymin, i_Ymax, i_x̂min, i_x̂max ) @@ -302,8 +307,10 @@ end @doc raw""" - init_matconstraint_mpc(model::LinModel, - i_Umin, i_Umax, i_ΔŨmin, i_ΔŨmax, i_Ymin, i_Ymax, i_x̂min, i_x̂max, args... + init_matconstraint_mpc( + model::LinModel, nc::Int, + i_Umin, i_Umax, i_ΔŨmin, i_ΔŨmax, i_Ymin, i_Ymax, i_x̂min, i_x̂max, + args... ) -> i_b, i_g, A Init `i_b`, `i_g` and `A` matrices for the linear and nonlinear inequality constraints. @@ -315,19 +322,22 @@ The linear and nonlinear inequality constraints are respectively defined as: \mathbf{g(ΔŨ)} &≤ \mathbf{0} \end{aligned} ``` -`i_b` is a `BitVector` including the indices of ``\mathbf{b}`` that are finite numbers. -`i_g` is a similar vector but for the indices of ``\mathbf{g}`` (empty if `model` is a -[`LinModel`](@ref)). The method also returns the ``\mathbf{A}`` matrix if `args` is -provided. In such a case, `args` needs to contain all the inequality constraint matrices: +The argument `nc` is the number of custom nonlinear constraints in ``\mathbf{g_c}``. `i_b` +is a `BitVector` including the indices of ``\mathbf{b}`` that are finite numbers. `i_g` is a +similar vector but for the indices of ``\mathbf{g}``. The method also returns the +``\mathbf{A}`` matrix if `args` is provided. In such a case, `args` needs to contain all +the inequality constraint matrices: `A_Umin, A_Umax, A_ΔŨmin, A_ΔŨmax, A_Ymin, A_Ymax, A_x̂min, A_x̂max`. """ -function init_matconstraint_mpc(::LinModel{NT}, - i_Umin, i_Umax, i_ΔŨmin, i_ΔŨmax, i_Ymin, i_Ymax, i_x̂min, i_x̂max, args... +function init_matconstraint_mpc( + ::LinModel{NT}, nc::Int, + i_Umin, i_Umax, i_ΔŨmin, i_ΔŨmax, i_Ymin, i_Ymax, i_x̂min, i_x̂max, + args... ) where {NT<:Real} i_b = [i_Umin; i_Umax; i_ΔŨmin; i_ΔŨmax; i_Ymin; i_Ymax; i_x̂min; i_x̂max] - i_g = BitVector() + i_g = trues(nc) if isempty(args) - A = zeros(NT, length(i_b), 0) + A = nothing else A_Umin, A_Umax, A_ΔŨmin, A_ΔŨmax, A_Ymin, A_Ymax, A_x̂min, A_x̂max = args A = [A_Umin; A_Umax; A_ΔŨmin; A_ΔŨmax; A_Ymin; A_Ymax; A_x̂min; A_x̂max] @@ -336,13 +346,15 @@ function init_matconstraint_mpc(::LinModel{NT}, end "Init `i_b, A` without outputs and terminal constraints if `model` is not a [`LinModel`](@ref)." -function init_matconstraint_mpc(::SimModel{NT}, - i_Umin, i_Umax, i_ΔŨmin, i_ΔŨmax, i_Ymin, i_Ymax, i_x̂min, i_x̂max, args... +function init_matconstraint_mpc( + ::SimModel{NT}, nc::Int, + i_Umin, i_Umax, i_ΔŨmin, i_ΔŨmax, i_Ymin, i_Ymax, i_x̂min, i_x̂max, + args... ) where {NT<:Real} i_b = [i_Umin; i_Umax; i_ΔŨmin; i_ΔŨmax] - i_g = [i_Ymin; i_Ymax; i_x̂min; i_x̂max] + i_g = [i_Ymin; i_Ymax; i_x̂min; i_x̂max; trues(nc)] if isempty(args) - A = zeros(NT, length(i_b), 0) + A = nothing else A_Umin, A_Umax, A_ΔŨmin, A_ΔŨmax, _ , _ , _ , _ = args A = [A_Umin; A_Umax; A_ΔŨmin; A_ΔŨmax] @@ -351,7 +363,7 @@ function init_matconstraint_mpc(::SimModel{NT}, end "By default, there is no nonlinear constraint, thus do nothing." -setnonlincon!(::PredictiveController, ::SimModel, ::JuMP.GenericModel) = nothing +set_nonlincon!(::PredictiveController, ::SimModel, ::JuMP.GenericModel) = nothing """ default_Hp(model::LinModel) @@ -625,7 +637,10 @@ function init_quadprog(::SimModel{NT}, Ẽ, S̃, M_Hp, Ñ_Hc, L_Hp) where {NT<: end """ - init_defaultcon_mpc(estim, C, S, N_Hc, E, ex̂, fx̂, gx̂, jx̂, kx̂, vx̂) -> con, S̃, Ñ_Hc, Ẽ + init_defaultcon_mpc( + estim, C, S, N_Hc, E, ex̂, fx̂, gx̂, jx̂, kx̂, vx̂, + gc!=(_,_,_,_,_,_)->nothing, nc=0 + ) -> con, S̃, Ñ_Hc, Ẽ Init `ControllerConstraint` struct with default parameters based on estimator `estim`. @@ -633,8 +648,9 @@ Also return `S̃`, `Ñ_Hc` and `Ẽ` matrices for the the augmented decision ve """ function init_defaultcon_mpc( estim::StateEstimator{NT}, - Hp, Hc, C, S, N_Hc, E, ex̂, fx̂, gx̂, jx̂, kx̂, vx̂, bx̂ -) where {NT<:Real} + Hp, Hc, C, S, N_Hc, E, ex̂, fx̂, gx̂, jx̂, kx̂, vx̂, bx̂, + gc!::GCfunc=(_,_,_,_,_,_)->nothing, nc=0 +) where {NT<:Real, GCfunc<:Function} model = estim.model nu, ny, nx̂ = model.nu, model.ny, estim.nx̂ nϵ = isinf(C) ? 0 : 1 @@ -661,16 +677,17 @@ function init_defaultcon_mpc( i_Ymin, i_Ymax = .!isinf.(Y0min), .!isinf.(Y0max) i_x̂min, i_x̂max = .!isinf.(x̂0min), .!isinf.(x̂0max) i_b, i_g, A = init_matconstraint_mpc( - model, + model, nc, i_Umin, i_Umax, i_ΔŨmin, i_ΔŨmax, i_Ymin, i_Ymax, i_x̂min, i_x̂max, A_Umin, A_Umax, A_ΔŨmin, A_ΔŨmax, A_Ymin, A_Ymax, A_x̂max, A_x̂min ) b = zeros(NT, size(A, 1)) # dummy b vector (updated just before optimization) - con = ControllerConstraint{NT}( + con = ControllerConstraint{NT, GCfunc}( ẽx̂ , fx̂ , gx̂ , jx̂ , kx̂ , vx̂ , bx̂ , U0min , U0max , ΔŨmin , ΔŨmax , Y0min , Y0max , x̂0min , x̂0max, A_Umin , A_Umax, A_ΔŨmin, A_ΔŨmax , A_Ymin , A_Ymax , A_x̂min , A_x̂max, - A , b , i_b , C_ymin , C_ymax , c_x̂min , c_x̂max , i_g + A , b , i_b , C_ymin , C_ymax , c_x̂min , c_x̂max , i_g, + gc! , nc ) return con, nϵ, S̃, Ñ_Hc, Ẽ end diff --git a/src/controller/execute.jl b/src/controller/execute.jl index 843ff10b..69e82fa4 100644 --- a/src/controller/execute.jl +++ b/src/controller/execute.jl @@ -111,15 +111,21 @@ julia> round.(getinfo(mpc)[:Ŷ], digits=3) ``` """ function getinfo(mpc::PredictiveController{NT}) where NT<:Real - model = mpc.estim.model + model = mpc.estim.model + nŶe, nUe = (mpc.Hp+1)*model.ny, (mpc.Hp+1)*model.nu info = Dict{Symbol, Union{JuMP._SolutionSummary, Vector{NT}, NT}}() Ŷ0, u0, û0 = similar(mpc.Yop), similar(model.uop), similar(model.uop) Ŷs = similar(mpc.Yop) x̂0, x̂0next = similar(mpc.estim.x̂0), similar(mpc.estim.x̂0) Ȳ, Ū = similar(mpc.Yop), similar(mpc.Uop) + Ŷe, Ue = Vector{NT}(undef, nŶe), Vector{NT}(undef, nUe) Ŷ0, x̂0end = predict!(Ŷ0, x̂0, x̂0next, u0, û0, mpc, model, mpc.ΔŨ) - U0 = mpc.S̃*mpc.ΔŨ + mpc.T_lastu0 - J = obj_nonlinprog!(U0, Ȳ, Ū, mpc, model, Ŷ0, mpc.ΔŨ) + Ŷe, Ue = extended_predictions!(Ŷe, Ue, Ū, mpc, model, Ŷ0, mpc.ΔŨ) + J = obj_nonlinprog!(Ȳ, Ū, mpc, model, Ŷe, Ue, mpc.ΔŨ) + U = Ū + U .= @views Ue[1:end-model.nu] + Ŷ = Ȳ + Ŷ .= @views Ŷe[model.ny+1:end] oldF = copy(mpc.F) predictstoch!(mpc, mpc.estim) Ŷs .= mpc.F # predictstoch! init mpc.F with Ŷs value if estim is an InternalModel @@ -127,7 +133,7 @@ function getinfo(mpc::PredictiveController{NT}) where NT<:Real info[:ΔU] = mpc.ΔŨ[1:mpc.Hc*model.nu] info[:ϵ] = mpc.nϵ == 1 ? mpc.ΔŨ[end] : NaN info[:J] = J - info[:U] = U0 + mpc.Uop + info[:U] = U info[:u] = info[:U][1:model.nu] info[:d] = mpc.d0 + model.dop info[:D̂] = mpc.D̂0 + mpc.Dop @@ -135,8 +141,8 @@ function getinfo(mpc::PredictiveController{NT}) where NT<:Real info[:Ŷ] = Ŷ0 + mpc.Yop info[:x̂end] = x̂0end + mpc.estim.x̂op info[:Ŷs] = Ŷs - info[:R̂y] = mpc.R̂y0 + mpc.Yop - info[:R̂u] = mpc.R̂u0 + mpc.Uop + info[:R̂y] = mpc.R̂y + info[:R̂u] = mpc.R̂u # --- non-Unicode fields --- info[:DeltaU] = info[:ΔU] info[:epsilon] = info[:ϵ] @@ -193,19 +199,19 @@ function initpred!(mpc::PredictiveController, model::LinModel, d, D̂, R̂y, R̂ if model.nd ≠ 0 mpc.d0 .= d .- model.dop mpc.D̂0 .= D̂ .- mpc.Dop - mpc.D̂E[1:model.nd] .= d - mpc.D̂E[model.nd+1:end] .= D̂ + mpc.D̂e[1:model.nd] .= d + mpc.D̂e[model.nd+1:end] .= D̂ mul!(F, mpc.G, mpc.d0, 1, 1) mul!(F, mpc.J, mpc.D̂0, 1, 1) end - mpc.R̂y0 .= R̂y .- mpc.Yop - Cy = F .- mpc.R̂y0 + mpc.R̂y .= R̂y + Cy = F .- (R̂y .- mpc.Yop) M_Hp_Ẽ = mpc.M_Hp*mpc.Ẽ mul!(q̃, M_Hp_Ẽ', Cy) r .= dot(Cy, mpc.M_Hp, Cy) if ~mpc.noR̂u - mpc.R̂u0 .= R̂u .- mpc.Uop - Cu = mpc.T_lastu0 .- mpc.R̂u0 + mpc.R̂u .= R̂u + Cu = mpc.T_lastu0 .- (R̂u .- mpc.Uop) L_Hp_S̃ = mpc.L_Hp*mpc.S̃ mul!(q̃, L_Hp_S̃', Cu, 1, 1) r .+= dot(Cu, mpc.L_Hp, Cu) @@ -217,7 +223,7 @@ end @doc raw""" initpred!(mpc::PredictiveController, model::SimModel, d, D̂, R̂y, R̂u) -Init `ŷ, F, d0, D̂0, D̂E, R̂y0, R̂u0` vectors when model is not a [`LinModel`](@ref). +Init `ŷ, F, d0, D̂0, D̂e, R̂y, R̂u` vectors when model is not a [`LinModel`](@ref). """ function initpred!(mpc::PredictiveController, model::SimModel, d, D̂, R̂y, R̂u) mul!(mpc.T_lastu0, mpc.T, mpc.estim.lastu0) @@ -226,12 +232,12 @@ function initpred!(mpc::PredictiveController, model::SimModel, d, D̂, R̂y, R̂ if model.nd ≠ 0 mpc.d0 .= d .- model.dop mpc.D̂0 .= D̂ .- mpc.Dop - mpc.D̂E[1:model.nd] .= d - mpc.D̂E[model.nd+1:end] .= D̂ + mpc.D̂e[1:model.nd] .= d + mpc.D̂e[model.nd+1:end] .= D̂ end - mpc.R̂y0 .= (R̂y .- mpc.Yop) + mpc.R̂y .= R̂y if ~mpc.noR̂u - mpc.R̂u0 .= (R̂u .- mpc.Uop) + mpc.R̂u .= R̂u end return nothing end @@ -356,57 +362,76 @@ function predict!(Ŷ0, x̂0, x̂0next, u0, û0, mpc::PredictiveController, mod end """ - obj_nonlinprog!(U0 , Ȳ, _ , mpc::PredictiveController, model::LinModel, Ŷ0, ΔŨ) + extended_predictions!(Ŷe, Ue, Ū, mpc, model, Ŷ0, ΔŨ) -> Ŷe, Ue + +Compute the extended predictions `Ŷe` and `Ue` for the nonlinear optimization. + +The function mutates `Ŷe`, `Ue` and `Ū` in arguments, without assuming any initial values. +""" +function extended_predictions!(Ŷe, Ue, Ū, mpc, model, Ŷ0, ΔŨ) + ny, nu = model.ny, model.nu + # --- extended output predictions Ŷe = [ŷ(k); Ŷ] --- + Ŷe[1:ny] .= mpc.ŷ + Ŷe[ny+1:end] .= Ŷ0 .+ mpc.Yop + # --- extended manipulated inputs Ue = [U; u(k+Hp-1)] --- + U0 = Ū + U0 .= mul!(U0, mpc.S̃, ΔŨ) .+ mpc.T_lastu0 + Ue[1:end-nu] .= U0 .+ mpc.Uop + # u(k + Hp) = u(k + Hp - 1) since Δu(k+Hp) = 0 (because Hc ≤ Hp): + Ue[end-nu+1:end] .= @views Ue[end-2nu+1:end-nu] + return Ŷe, Ue +end + +""" + obj_nonlinprog!( _ , _ , mpc::PredictiveController, model::LinModel, Ŷe, Ue, ΔŨ) Nonlinear programming objective function when `model` is a [`LinModel`](@ref). -The function is called by the nonlinear optimizer of [`NonLinMPC`](@ref) controllers. It can +The method is called by the nonlinear optimizer of [`NonLinMPC`](@ref) controllers. It can also be called on any [`PredictiveController`](@ref)s to evaluate the objective function `J` -at specific input increments `ΔŨ` and predictions `Ŷ0` values. It mutates the `U0` and -`Ȳ` arguments. +at specific `Ue`, `Ŷe` and `ΔŨ`, values. It does not mutate any argument. """ function obj_nonlinprog!( - U0, Ȳ, _ , mpc::PredictiveController, model::LinModel, Ŷ0, ΔŨ::AbstractVector{NT} + _, _, mpc::PredictiveController, model::LinModel, Ŷe, Ue, ΔŨ::AbstractVector{NT} ) where NT <: Real - J = obj_quadprog(ΔŨ, mpc.H̃, mpc.q̃) + mpc.r[] - E_JE = obj_econ!(U0, Ȳ, mpc, model, Ŷ0, ΔŨ) - return J + E_JE + JQP = obj_quadprog(ΔŨ, mpc.H̃, mpc.q̃) + mpc.r[] + E_JE = obj_econ!(Ue, Ŷe, mpc, model) + return JQP + E_JE end """ - obj_nonlinprog!(U0, Ȳ, Ū, mpc::PredictiveController, model::SimModel, Ŷ0, ΔŨ) + obj_nonlinprog!(Ȳ, Ū, mpc::PredictiveController, model::SimModel, Ŷe, Ue, ΔŨ) -Nonlinear programming objective function when `model` is not a [`LinModel`](@ref). The +Nonlinear programming objective method when `model` is not a [`LinModel`](@ref). The function `dot(x, A, x)` is a performant way of calculating `x'*A*x`. This method mutates -`U0`, `Ȳ` and `Ū` arguments (input over `Hp`, and output and input setpoint tracking error, -respectively). +`Ȳ` and `Ū` arguments, without assuming any initial values (it recuperates the values in +`Ŷe` and `Ue` arguments). """ function obj_nonlinprog!( - U0, Ȳ, Ū, mpc::PredictiveController, model::SimModel, Ŷ0, ΔŨ::AbstractVector{NT} + Ȳ, Ū, mpc::PredictiveController, model::SimModel, Ŷe, Ue, ΔŨ::AbstractVector{NT} ) where NT<:Real + nu, ny = model.nu, model.ny # --- output setpoint tracking term --- - Ȳ .= mpc.R̂y0 .- Ŷ0 + Ȳ .= @views Ŷe[ny+1:end] + Ȳ .= mpc.R̂y .- Ȳ JR̂y = dot(Ȳ, mpc.M_Hp, Ȳ) # --- move suppression and slack variable term --- JΔŨ = dot(ΔŨ, mpc.Ñ_Hc, ΔŨ) - # --- input over prediction horizon --- - if !mpc.noR̂u || !iszero(mpc.E) - U0 .= mul!(U0, mpc.S̃, ΔŨ) .+ mpc.T_lastu0 - end # --- input setpoint tracking term --- if !mpc.noR̂u - Ū .= mpc.R̂u0 .- U0 + Ū .= @views Ue[1:end-nu] + Ū .= mpc.R̂u .- Ū JR̂u = dot(Ū, mpc.L_Hp, Ū) else JR̂u = 0.0 end # --- economic term --- - E_JE = obj_econ!(U0, Ȳ, mpc, model, Ŷ0, ΔŨ) + E_JE = obj_econ!(Ue, Ŷe, mpc, model) return JR̂y + JΔŨ + JR̂u + E_JE end "By default, the economic term is zero." -obj_econ!( _ , _ , ::PredictiveController, ::SimModel, _ , _ ) = 0.0 +obj_econ!( _ , _ , ::PredictiveController, ::SimModel) = 0.0 @doc raw""" optim_objective!(mpc::PredictiveController) -> ΔŨ diff --git a/src/controller/explicitmpc.jl b/src/controller/explicitmpc.jl index 71e829be..00e6e5e6 100644 --- a/src/controller/explicitmpc.jl +++ b/src/controller/explicitmpc.jl @@ -9,8 +9,8 @@ struct ExplicitMPC{NT<:Real, SE<:StateEstimator} <: PredictiveController{NT} Ñ_Hc::Hermitian{NT, Matrix{NT}} L_Hp::Hermitian{NT, Matrix{NT}} E::NT - R̂u0::Vector{NT} - R̂y0::Vector{NT} + R̂u::Vector{NT} + R̂y::Vector{NT} noR̂u::Bool S̃::Matrix{NT} T::Matrix{NT} @@ -30,7 +30,7 @@ struct ExplicitMPC{NT<:Real, SE<:StateEstimator} <: PredictiveController{NT} Ps::Matrix{NT} d0::Vector{NT} D̂0::Vector{NT} - D̂E::Vector{NT} + D̂e::Vector{NT} Uop::Vector{NT} Yop::Vector{NT} Dop::Vector{NT} @@ -49,7 +49,7 @@ struct ExplicitMPC{NT<:Real, SE<:StateEstimator} <: PredictiveController{NT} N_Hc = Hermitian(convert(Matrix{NT}, N_Hc), :L) L_Hp = Hermitian(convert(Matrix{NT}, L_Hp), :L) # dummy vals (updated just before optimization): - R̂y0, R̂u0, T_lastu0 = zeros(NT, ny*Hp), zeros(NT, nu*Hp), zeros(NT, nu*Hp) + R̂y, R̂u, T_lastu0 = zeros(NT, ny*Hp), zeros(NT, nu*Hp), zeros(NT, nu*Hp) noR̂u = iszero(L_Hp) S, T = init_ΔUtoU(model, Hp, Hc) E, G, J, K, V, B = init_predmat(estim, model, Hp, Hc) @@ -62,7 +62,7 @@ struct ExplicitMPC{NT<:Real, SE<:StateEstimator} <: PredictiveController{NT} H̃_chol = cholesky(H̃) Ks, Ps = init_stochpred(estim, Hp) # dummy vals (updated just before optimization): - d0, D̂0, D̂E = zeros(NT, nd), zeros(NT, nd*Hp), zeros(NT, nd + nd*Hp) + d0, D̂0, D̂e = zeros(NT, nd), zeros(NT, nd*Hp), zeros(NT, nd + nd*Hp) Uop, Yop, Dop = repeat(model.uop, Hp), repeat(model.yop, Hp), repeat(model.dop, Hp) nΔŨ = size(Ẽ, 2) ΔŨ = zeros(NT, nΔŨ) @@ -72,13 +72,13 @@ struct ExplicitMPC{NT<:Real, SE<:StateEstimator} <: PredictiveController{NT} ΔŨ, ŷ, Hp, Hc, nϵ, M_Hp, Ñ_Hc, L_Hp, Ewt, - R̂u0, R̂y0, noR̂u, + R̂u, R̂y, noR̂u, S̃, T, T_lastu0, Ẽ, F, G, J, K, V, B, H̃, q̃, r, H̃_chol, Ks, Ps, - d0, D̂0, D̂E, + d0, D̂0, D̂e, Uop, Yop, Dop, buffer ) diff --git a/src/controller/linmpc.jl b/src/controller/linmpc.jl index 8b84f8e4..770ba49c 100644 --- a/src/controller/linmpc.jl +++ b/src/controller/linmpc.jl @@ -19,8 +19,8 @@ struct LinMPC{ Ñ_Hc::Hermitian{NT, Matrix{NT}} L_Hp::Hermitian{NT, Matrix{NT}} E::NT - R̂u0::Vector{NT} - R̂y0::Vector{NT} + R̂u::Vector{NT} + R̂y::Vector{NT} noR̂u::Bool S̃::Matrix{NT} T::Matrix{NT} @@ -39,7 +39,7 @@ struct LinMPC{ Ps::Matrix{NT} d0::Vector{NT} D̂0::Vector{NT} - D̂E::Vector{NT} + D̂e::Vector{NT} Uop::Vector{NT} Yop::Vector{NT} Dop::Vector{NT} @@ -57,7 +57,7 @@ struct LinMPC{ N_Hc = Hermitian(convert(Matrix{NT}, N_Hc), :L) L_Hp = Hermitian(convert(Matrix{NT}, L_Hp), :L) # dummy vals (updated just before optimization): - R̂y0, R̂u0, T_lastu0 = zeros(NT, ny*Hp), zeros(NT, nu*Hp), zeros(NT, nu*Hp) + R̂y, R̂u, T_lastu0 = zeros(NT, ny*Hp), zeros(NT, nu*Hp), zeros(NT, nu*Hp) noR̂u = iszero(L_Hp) S, T = init_ΔUtoU(model, Hp, Hc) E, G, J, K, V, B, ex̂, gx̂, jx̂, kx̂, vx̂, bx̂ = init_predmat(estim, model, Hp, Hc) @@ -71,7 +71,7 @@ struct LinMPC{ q̃, r = zeros(NT, size(H̃, 1)), zeros(NT, 1) Ks, Ps = init_stochpred(estim, Hp) # dummy vals (updated just before optimization): - d0, D̂0, D̂E = zeros(NT, nd), zeros(NT, nd*Hp), zeros(NT, nd + nd*Hp) + d0, D̂0, D̂e = zeros(NT, nd), zeros(NT, nd*Hp), zeros(NT, nd + nd*Hp) Uop, Yop, Dop = repeat(model.uop, Hp), repeat(model.yop, Hp), repeat(model.dop, Hp) nΔŨ = size(Ẽ, 2) ΔŨ = zeros(NT, nΔŨ) @@ -81,12 +81,12 @@ struct LinMPC{ ΔŨ, ŷ, Hp, Hc, nϵ, M_Hp, Ñ_Hc, L_Hp, Ewt, - R̂u0, R̂y0, noR̂u, + R̂u, R̂y, noR̂u, S̃, T, T_lastu0, Ẽ, F, G, J, K, V, B, H̃, q̃, r, Ks, Ps, - d0, D̂0, D̂E, + d0, D̂0, D̂e, Uop, Yop, Dop, buffer ) @@ -109,7 +109,8 @@ The controller minimizes the following objective function at each discrete time + C ϵ^2 \end{aligned} ``` -in which the weight matrices are repeated ``H_p`` or ``H_c`` times by default: +subject to [`setconstraint!`](@ref) bounds, and in which the weight matrices are repeated +``H_p`` or ``H_c`` times by default: ```math \begin{aligned} \mathbf{M}_{H_p} &= \text{diag}\mathbf{(M,M,...,M)} \\ diff --git a/src/controller/nonlinmpc.jl b/src/controller/nonlinmpc.jl index 7e113d9a..4a0db080 100644 --- a/src/controller/nonlinmpc.jl +++ b/src/controller/nonlinmpc.jl @@ -23,8 +23,8 @@ struct NonLinMPC{ E::NT JE::JEfunc p::P - R̂u0::Vector{NT} - R̂y0::Vector{NT} + R̂u::Vector{NT} + R̂y::Vector{NT} noR̂u::Bool S̃::Matrix{NT} T::Matrix{NT} @@ -43,54 +43,55 @@ struct NonLinMPC{ Ps::Matrix{NT} d0::Vector{NT} D̂0::Vector{NT} - D̂E::Vector{NT} + D̂e::Vector{NT} Uop::Vector{NT} Yop::Vector{NT} Dop::Vector{NT} buffer::PredictiveControllerBuffer{NT} - function NonLinMPC{NT, SE, JM, JEFunc, P}( - estim::SE, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt, Ewt, JE::JEFunc, p::P, optim::JM - ) where {NT<:Real, SE<:StateEstimator, JM<:JuMP.GenericModel, JEFunc<:Function, P<:Any} + function NonLinMPC{NT, SE, JM, JEfunc, P}( + estim::SE, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt, Ewt, JE::JEfunc, gc, nc, p::P, optim::JM + ) where {NT<:Real, SE<:StateEstimator, JM<:JuMP.GenericModel, JEfunc<:Function, P<:Any} model = estim.model nu, ny, nd, nx̂ = model.nu, model.ny, model.nd, estim.nx̂ ŷ = copy(model.yop) # dummy vals (updated just before optimization) validate_JE(NT, JE) + gc! = get_mutating_gc(NT, gc) validate_weights(model, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt, Ewt) # convert `Diagonal` to normal `Matrix` if required: M_Hp = Hermitian(convert(Matrix{NT}, M_Hp), :L) N_Hc = Hermitian(convert(Matrix{NT}, N_Hc), :L) L_Hp = Hermitian(convert(Matrix{NT}, L_Hp), :L) # dummy vals (updated just before optimization): - R̂y0, R̂u0, T_lastu0 = zeros(NT, ny*Hp), zeros(NT, nu*Hp), zeros(NT, nu*Hp) + R̂y, R̂u, T_lastu0 = zeros(NT, ny*Hp), zeros(NT, nu*Hp), zeros(NT, nu*Hp) noR̂u = iszero(L_Hp) S, T = init_ΔUtoU(model, Hp, Hc) E, G, J, K, V, B, ex̂, gx̂, jx̂, kx̂, vx̂, bx̂ = init_predmat(estim, model, Hp, Hc) # dummy vals (updated just before optimization): F, fx̂ = zeros(NT, ny*Hp), zeros(NT, nx̂) con, nϵ, S̃, Ñ_Hc, Ẽ = init_defaultcon_mpc( - estim, Hp, Hc, Cwt, S, N_Hc, E, ex̂, fx̂, gx̂, jx̂, kx̂, vx̂, bx̂ + estim, Hp, Hc, Cwt, S, N_Hc, E, ex̂, fx̂, gx̂, jx̂, kx̂, vx̂, bx̂, gc!, nc ) H̃ = init_quadprog(model, Ẽ, S̃, M_Hp, Ñ_Hc, L_Hp) # dummy vals (updated just before optimization): q̃, r = zeros(NT, size(H̃, 1)), zeros(NT, 1) Ks, Ps = init_stochpred(estim, Hp) # dummy vals (updated just before optimization): - d0, D̂0, D̂E = zeros(NT, nd), zeros(NT, nd*Hp), zeros(NT, nd + nd*Hp) + d0, D̂0, D̂e = zeros(NT, nd), zeros(NT, nd*Hp), zeros(NT, nd + nd*Hp) Uop, Yop, Dop = repeat(model.uop, Hp), repeat(model.yop, Hp), repeat(model.dop, Hp) nΔŨ = size(Ẽ, 2) ΔŨ = zeros(NT, nΔŨ) buffer = PredictiveControllerBuffer{NT}(nu, ny, nd, Hp) - mpc = new{NT, SE, JM, JEFunc, P}( + mpc = new{NT, SE, JM, JEfunc, P}( estim, optim, con, ΔŨ, ŷ, Hp, Hc, nϵ, M_Hp, Ñ_Hc, L_Hp, Ewt, JE, p, - R̂u0, R̂y0, noR̂u, + R̂u, R̂y, noR̂u, S̃, T, T_lastu0, Ẽ, F, G, J, K, V, B, H̃, q̃, r, Ks, Ps, - d0, D̂0, D̂E, + d0, D̂0, D̂e, Uop, Yop, Dop, buffer ) @@ -112,28 +113,35 @@ controller minimizes the following objective function at each discrete time ``k` + \mathbf{(ΔU)}' \mathbf{N}_{H_c} \mathbf{(ΔU)} \\& + \mathbf{(R̂_u - U)}' \mathbf{L}_{H_p} \mathbf{(R̂_u - U)} + C ϵ^2 - + E J_E(\mathbf{U}_E, \mathbf{Ŷ}_E, \mathbf{D̂}_E, \mathbf{p}) + + E J_E(\mathbf{U_e}, \mathbf{Ŷ_e}, \mathbf{D̂_e}, \mathbf{p}) \end{aligned} ``` -See [`LinMPC`](@ref) for the variable definitions. The custom economic function ``J_E`` can -penalizes solutions with high economic costs. Setting all the weights to 0 except ``E`` -creates a pure economic model predictive controller (EMPC). The arguments of ``J_E`` include -the manipulated inputs, the predicted outputs and measured disturbances from ``k`` to -``k+H_p`` inclusively: +subject to [`setconstraint!`](@ref) bounds, and the custom inequality constraints: ```math - \mathbf{U}_E = \begin{bmatrix} \mathbf{U} \\ \mathbf{u}(k+H_p-1) \end{bmatrix} , \quad - \mathbf{Ŷ}_E = \begin{bmatrix} \mathbf{ŷ}(k) \\ \mathbf{Ŷ} \end{bmatrix} , \quad - \mathbf{D̂}_E = \begin{bmatrix} \mathbf{d}(k) \\ \mathbf{D̂} \end{bmatrix} +\mathbf{g_c}\Big(\mathbf{U_e}, \mathbf{Ŷ_e}, \mathbf{D̂_e}, \mathbf{p}, ϵ(k)\Big) ≤ \mathbf{0} +``` +The economic function ``J_E`` can penalizes solutions with high economic costs. Setting all +the weights to 0 except ``E`` creates a pure economic model predictive controller (EMPC). +As a matter of fact, ``J_E`` can be any nonlinear function to customize the objective, even +if there is no economic interpretation to it. The arguments of ``J_E`` and ``\mathbf{g_c}`` +include the manipulated inputs, predicted outputs and measured disturbances, extended from +``k`` to ``k + H_p`` (inclusively): +```math + \mathbf{U_e} = \begin{bmatrix} \mathbf{U} \\ \mathbf{u}(k+H_p-1) \end{bmatrix} , \quad + \mathbf{Ŷ_e} = \begin{bmatrix} \mathbf{ŷ}(k) \\ \mathbf{Ŷ} \end{bmatrix} , \quad + \mathbf{D̂_e} = \begin{bmatrix} \mathbf{d}(k) \\ \mathbf{D̂} \end{bmatrix} ``` since ``H_c ≤ H_p`` implies that ``\mathbf{Δu}(k+H_p) = \mathbf{0}`` or ``\mathbf{u}(k+H_p)= -\mathbf{u}(k+H_p-1)``. The vector ``\mathbf{D̂}`` includes the predicted measured disturbance -over ``H_p``. The argument ``\mathbf{p}`` is a custom parameter object of any type but use a -mutable one if you want to modify it later e.g.: a vector. +\mathbf{u}(k+H_p-1)``. The vector ``\mathbf{D̂}`` comprises the measured disturbance +predictions over ``H_p``. The argument ``\mathbf{p}`` is a custom parameter object of any +type, but use a mutable one if you want to modify it later e.g.: a vector. !!! tip - Replace any of the 4 arguments with `_` if not needed (see `JE` default value below). + Replace any of the arguments of ``J_E`` and ``\mathbf{g_c}`` functions with `_` if not + needed (see e.g. the default value of `JE` below). -This method uses the default state estimator : +See [`LinMPC`](@ref) for the definition of the other variables. This method uses the default +state estimator : - if `model` is a [`LinModel`](@ref), a [`SteadyKalmanFilter`](@ref) with default arguments; - else, an [`UnscentedKalmanFilter`](@ref) with default arguments. @@ -154,8 +162,13 @@ This method uses the default state estimator : - `L_Hp=diagm(repeat(Lwt,Hp))` : positive semidefinite symmetric matrix ``\mathbf{L}_{H_p}``. - `Cwt=1e5` : slack variable weight ``C`` (scalar), use `Cwt=Inf` for hard constraints only. - `Ewt=0.0` : economic costs weight ``E`` (scalar). -- `JE=(_,_,_,_)->0.0` : economic function ``J_E(\mathbf{U}_E, \mathbf{Ŷ}_E, \mathbf{D̂}_E, \mathbf{p})``. -- `p=model.p` : ``J_E`` function parameter ``\mathbf{p}`` (any type). +- `JE=(_,_,_,_)->0.0` : economic or custom cost function ``J_E(\mathbf{U_e}, \mathbf{Ŷ_e}, + \mathbf{D̂_e}, \mathbf{p})``. +- `gc=(_,_,_,_,_,_)->nothing` or `gc!` : custom inequality constraint function + ``\mathbf{g_c}(\mathbf{U_e}, \mathbf{Ŷ_e}, \mathbf{D̂_e}, \mathbf{p}, ϵ)``, mutating or + not (details in Extended Help). +- `nc=0` : number of custom inequality constraints. +- `p=model.p` : ``J_E`` and ``\mathbf{g_c}`` functions parameter ``\mathbf{p}`` (any type). - `optim=JuMP.Model(Ipopt.Optimizer)` : nonlinear optimizer used in the predictive controller, provided as a [`JuMP.Model`](https://jump.dev/JuMP.jl/stable/api/JuMP/#JuMP.Model) (default to [`Ipopt`](https://github.com/jump-dev/Ipopt.jl) optimizer). @@ -190,7 +203,18 @@ NonLinMPC controller with a sample time Ts = 10.0 s, Ipopt optimizer, UnscentedK state-space functions must be compatible with this feature. See [Automatic differentiation](https://jump.dev/JuMP.jl/stable/manual/nlp/#Automatic-differentiation) for common mistakes when writing these functions. - Note that if `Cwt≠Inf`, the attribute `nlp_scaling_max_gradient` of `Ipopt` is set to + If `LHS` represents the result of the left-hand side in the inequality + ``\mathbf{g_c}(\mathbf{U_e}, \mathbf{Ŷ_e}, \mathbf{D̂_e}, \mathbf{p}, ϵ) ≤ \mathbf{0}``, + the function `gc` can be implemented in two ways: + + 1. **Non-mutating function** (out-of-place): define it as `gc(Ue, Ŷe, D̂e, p, ϵ) -> LHS`. + This syntax is simple and intuitive but it allocates more memory. + 2. **Mutating function** (in-place): define it as `gc!(LHS, Ue, Ŷe, D̂e, p, ϵ) -> nothing`. + This syntax reduces the allocations and potentially the computational burden as well. + + The keyword argument `nc` is the number of elements in the `LHS` vector, and `gc!`, an + alias for the `gc` argument (both accepts non-mutating and mutating functions). Note + that if `Cwt≠Inf`, the attribute `nlp_scaling_max_gradient` of `Ipopt` is set to `10/Cwt` (if not already set), to scale the small values of ``ϵ``. """ function NonLinMPC( @@ -205,13 +229,19 @@ function NonLinMPC( L_Hp = diagm(repeat(Lwt, Hp)), Cwt = DEFAULT_CWT, Ewt = DEFAULT_EWT, - JE::Function = (args...) -> 0.0, + JE::Function = (_,_,_,_) -> 0.0, + gc!::Function = (_,_,_,_,_,_) -> nothing, + gc ::Function = gc!, + nc::Int = 0, p = model.p, optim::JuMP.GenericModel = JuMP.Model(DEFAULT_NONLINMPC_OPTIMIZER, add_bridges=false), kwargs... ) estim = UnscentedKalmanFilter(model; kwargs...) - NonLinMPC(estim; Hp, Hc, Mwt, Nwt, Lwt, Cwt, Ewt, JE, p, M_Hp, N_Hc, L_Hp, optim) + return NonLinMPC( + estim; + Hp, Hc, Mwt, Nwt, Lwt, Cwt, Ewt, JE, gc, nc, p, M_Hp, N_Hc, L_Hp, optim + ) end function NonLinMPC( @@ -226,13 +256,19 @@ function NonLinMPC( L_Hp = diagm(repeat(Lwt, Hp)), Cwt = DEFAULT_CWT, Ewt = DEFAULT_EWT, - JE::Function = (args...) -> 0.0, + JE ::Function = (_,_,_,_) -> 0.0, + gc!::Function = (_,_,_,_,_,_) -> nothing, + gc ::Function = gc!, + nc::Int = 0, p = model.p, optim::JuMP.GenericModel = JuMP.Model(DEFAULT_NONLINMPC_OPTIMIZER, add_bridges=false), kwargs... ) estim = SteadyKalmanFilter(model; kwargs...) - NonLinMPC(estim; Hp, Hc, Mwt, Nwt, Lwt, Cwt, Ewt, JE, p, M_Hp, N_Hc, L_Hp, optim) + return NonLinMPC( + estim; + Hp, Hc, Mwt, Nwt, Lwt, Cwt, Ewt, JE, gc, nc, p, M_Hp, N_Hc, L_Hp, optim + ) end @@ -271,17 +307,26 @@ function NonLinMPC( L_Hp = diagm(repeat(Lwt, Hp)), Cwt = DEFAULT_CWT, Ewt = DEFAULT_EWT, - JE::JEFunc = (args...) -> 0.0, + JE ::JEfunc = (_,_,_,_) -> 0.0, + gc!::Function = (_,_,_,_,_,_) -> nothing, + gc ::Function = gc!, + nc = 0, p::P = estim.model.p, optim::JM = JuMP.Model(DEFAULT_NONLINMPC_OPTIMIZER, add_bridges=false), -) where {NT<:Real, SE<:StateEstimator{NT}, JM<:JuMP.GenericModel, JEFunc<:Function, P<:Any} +) where { + NT<:Real, + SE<:StateEstimator{NT}, + JM<:JuMP.GenericModel, + JEfunc<:Function, + P<:Any +} nk = estimate_delays(estim.model) if Hp ≤ nk @warn("prediction horizon Hp ($Hp) ≤ estimated number of delays in model "* "($nk), the closed-loop system may be unstable or zero-gain (unresponsive)") end - return NonLinMPC{NT, SE, JM, JEFunc, P}( - estim, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt, Ewt, JE, p, optim + return NonLinMPC{NT, SE, JM, JEfunc, P}( + estim, Hp, Hc, M_Hp, N_Hc, L_Hp, Cwt, Ewt, JE, gc, nc, p, optim ) end @@ -291,15 +336,63 @@ end Validate `JE` function argument signature. """ function validate_JE(NT, JE) + # Ue, Ŷe, D̂e, p if !hasmethod(JE, Tuple{Vector{NT}, Vector{NT}, Vector{NT}, Any}) error( - "the economic function has no method with type signature "* - "JE(UE::Vector{$(NT)}, ŶE::Vector{$(NT)}, D̂E::Vector{$(NT)}, p::Any)" + "the economic cost function has no method with type signature "* + "JE(Ue::Vector{$(NT)}, Ŷe::Vector{$(NT)}, D̂e::Vector{$(NT)}, p::Any)" ) end return nothing end +""" + validate_gc(NT, gc) -> ismutating + +Validate `gc` function argument signature and return `true` if it is mutating. +""" +function validate_gc(NT, gc) + ismutating = hasmethod( + gc, + # LHS, Ue, Ŷe, D̂e, p, ϵ + Tuple{Vector{NT}, Vector{NT}, Vector{NT}, Vector{NT}, Any, NT} + ) + # Ue, Ŷe, D̂e, p, ϵ + if !(ismutating || hasmethod(gc, Tuple{Vector{NT}, Vector{NT}, Vector{NT}, Any, NT})) + error( + "the custom constraint function has no method with type signature "* + "gc(Ue::Vector{$(NT)}, Ŷe::Vector{$(NT)}, D̂e::Vector{$(NT)}, p::Any, ϵ::$(NT)) "* + "or mutating form gc!(LHS::Vector{$(NT)}, Ue::Vector{$(NT)}, Ŷe::Vector{$(NT)}, "* + "D̂e::Vector{$(NT)}, p::Any, ϵ::$(NT))" + ) + end + return ismutating +end + +"Get mutating custom constraint function `gc!` from the provided function in argument." +function get_mutating_gc(NT, gc) + ismutating_gc = validate_gc(NT, gc) + gc! = if ismutating_gc + gc + else + function gc!(LHS, Ue, Ŷe, D̂e, p, ϵ) + LHS .= gc(Ue, Ŷe, D̂e, p, ϵ) + return nothing + end + end + return gc! +end + +function test_custom_functions(JE, gc!, uop; Uop, dop, Dop, ΔŨ, p) + # TODO: contunue here (important to guide the user, sim! can be used on NonLinModel, + # but there is no similar function for the custom functions of NonLinMPC) + Ue = [Uop; uop] + D̂e = [dop; Dop] + Ŷ0, x̂0next = + Ŷ0, x̂0end = predict!(Ŷ0, x̂0, x̂0next, u0, û0, mpc, model, mpc.ΔŨ) + JE = JE(Uop, Uop, Dop, p) +end + """ addinfo!(info, mpc::NonLinMPC) -> info @@ -307,10 +400,10 @@ For [`NonLinMPC`](@ref), add `:sol` and the optimal economic cost `:JE`. """ function addinfo!(info, mpc::NonLinMPC) U, Ŷ, D̂, ŷ, d = info[:U], info[:Ŷ], info[:D̂], info[:ŷ], info[:d] - UE = [U; U[(end - mpc.estim.model.nu + 1):end]] - ŶE = [ŷ; Ŷ] - D̂E = [d; D̂] - info[:JE] = mpc.JE(UE, ŶE, D̂E, mpc.p) + Ue = [U; U[(end - mpc.estim.model.nu + 1):end]] + Ŷe = [ŷ; Ŷ] + D̂e = [d; D̂] + info[:JE] = mpc.JE(Ue, Ŷe, D̂e, mpc.p) info[:sol] = JuMP.solution_summary(mpc.optim, verbose=true) return info end @@ -344,28 +437,8 @@ function init_optimization!(mpc::NonLinMPC, model::SimModel, optim) Jfunc, gfunc = get_optim_functions(mpc, mpc.optim) @operator(optim, J, nΔŨ, Jfunc) @objective(optim, Min, J(ΔŨvar...)) - ny, nx̂, Hp = model.ny, mpc.estim.nx̂, mpc.Hp - if length(con.i_g) ≠ 0 - for i in eachindex(con.Y0min) - name = Symbol("g_Y0min_$i") - optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfunc[i]; name) - end - i_end_Ymin = 1Hp*ny - for i in eachindex(con.Y0max) - name = Symbol("g_Y0max_$i") - optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfunc[i_end_Ymin+i]; name) - end - i_end_Ymax = 2Hp*ny - for i in eachindex(con.x̂0min) - name = Symbol("g_x̂0min_$i") - optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfunc[i_end_Ymax+i]; name) - end - i_end_x̂min = 2Hp*ny + nx̂ - for i in eachindex(con.x̂0max) - name = Symbol("g_x̂0max_$i") - optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfunc[i_end_x̂min+i]; name) - end - end + init_nonlincon!(mpc, model, gfunc) + set_nonlincon!(mpc, model, mpc.optim) #TODO: check if this is really necessary !! return nothing end @@ -378,33 +451,38 @@ Inspired from: [User-defined operators with vector outputs](https://jump.dev/JuM """ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT<:Real model = mpc.estim.model - nu, ny, nx̂, Hp = model.nu, model.ny, mpc.estim.nx̂, mpc.Hp - ng, nΔŨ, nU, nŶ = length(mpc.con.i_g), length(mpc.ΔŨ), Hp*nu, Hp*ny - Nc = nΔŨ + 3 - ΔŨ_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nΔŨ), Nc) - Ŷ0_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nŶ), Nc) - U0_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nU), Nc) - g_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, ng), Nc) - x̂0_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nx̂), Nc) - x̂0next_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nx̂), Nc) - u0_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nu), Nc) - û0_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nu), Nc) - Ȳ_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nŶ), Nc) - Ū_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nU), Nc) + nu, ny, nx̂, nϵ, Hp = model.nu, model.ny, mpc.estim.nx̂, mpc.nϵ, mpc.Hp + ng, nc, nΔŨ, nU, nŶ = length(mpc.con.i_g), mpc.con.nc, length(mpc.ΔŨ), Hp*nu, Hp*ny + nUe, nŶe = nU + nu, nŶ + ny + Ncache = nΔŨ + 3 + ΔŨ_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nΔŨ), Ncache) + Ŷe_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nŶe), Ncache) + Ue_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nUe), Ncache) + Ȳ_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nŶ), Ncache) + Ū_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nU), Ncache) + x̂0_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nx̂), Ncache) + x̂0next_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nx̂), Ncache) + u0_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nu), Ncache) + û0_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nu), Ncache) + g_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, ng), Ncache) + gc_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nc), Ncache) function Jfunc(ΔŨtup::T...) where T<:Real ΔŨ1 = ΔŨtup[begin] ΔŨ, g = get_tmp(ΔŨ_cache, ΔŨ1), get_tmp(g_cache, ΔŨ1) for i in eachindex(ΔŨtup) ΔŨ[i] = ΔŨtup[i] # ΔŨ .= ΔŨtup seems to produce a type instability - end - Ŷ0 = get_tmp(Ŷ0_cache, ΔŨ1) + end + Ŷe, Ue = get_tmp(Ŷe_cache, ΔŨ1), get_tmp(Ue_cache, ΔŨ1) + Ȳ, Ū = get_tmp(Ȳ_cache, ΔŨ1), get_tmp(Ū_cache, ΔŨ1) x̂0, x̂0next = get_tmp(x̂0_cache, ΔŨ1), get_tmp(x̂0next_cache, ΔŨ1) - u0, û0 = get_tmp(u0_cache, ΔŨ1), get_tmp(û0_cache, ΔŨ1) - Ŷ0, x̂0end = predict!(Ŷ0, x̂0, x̂0next, u0, û0, mpc, model, ΔŨ) - g = get_tmp(g_cache, ΔŨ1) - g = con_nonlinprog!(g, mpc, model, x̂0end, Ŷ0, ΔŨ) - U0, Ȳ, Ū = get_tmp(U0_cache, ΔŨ1), get_tmp(Ȳ_cache, ΔŨ1), get_tmp(Ū_cache, ΔŨ1) - return obj_nonlinprog!(U0, Ȳ, Ū, mpc, model, Ŷ0, ΔŨ)::T + u0, û0 = get_tmp(u0_cache, ΔŨ1), get_tmp(û0_cache, ΔŨ1) + g, gc = get_tmp(g_cache, ΔŨ1), get_tmp(gc_cache, ΔŨ1) + Ŷ0, x̂0end = predict!(Ȳ, x̂0, x̂0next, u0, û0, mpc, model, ΔŨ) + Ŷe, Ue = extended_predictions!(Ŷe, Ue, Ū, mpc, model, Ŷ0, ΔŨ) + ϵ = (nϵ == 1) ? ΔŨ[end] : zero(JNT) # ϵ = 0 if nϵ == 0 (meaning no relaxation) + mpc.con.gc!(gc, Ue, Ŷe, mpc.D̂e, mpc.p, ϵ) + g = con_nonlinprog!(g, mpc, model, x̂0end, Ŷ0, gc, ϵ) + return obj_nonlinprog!(Ȳ, Ū, mpc, model, Ŷe, Ue, ΔŨ)::T end function gfunc_i(i, ΔŨtup::NTuple{N, T}) where {N, T<:Real} ΔŨ1 = ΔŨtup[begin] @@ -413,11 +491,16 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT for i in eachindex(ΔŨtup) ΔŨ[i] = ΔŨtup[i] # ΔŨ .= ΔŨtup seems to produce a type instability end - Ŷ0 = get_tmp(Ŷ0_cache, ΔŨ1) + Ŷe, Ue = get_tmp(Ŷe_cache, ΔŨ1), get_tmp(Ue_cache, ΔŨ1) + Ȳ, Ū = get_tmp(Ȳ_cache, ΔŨ1), get_tmp(Ū_cache, ΔŨ1) x̂0, x̂0next = get_tmp(x̂0_cache, ΔŨ1), get_tmp(x̂0next_cache, ΔŨ1) - u0, û0 = get_tmp(u0_cache, ΔŨ1), get_tmp(û0_cache, ΔŨ1) - Ŷ0, x̂0end = predict!(Ŷ0, x̂0, x̂0next, u0, û0, mpc, model, ΔŨ) - g = con_nonlinprog!(g, mpc, model, x̂0end, Ŷ0, ΔŨ) + u0, û0 = get_tmp(u0_cache, ΔŨ1), get_tmp(û0_cache, ΔŨ1) + g, gc = get_tmp(g_cache, ΔŨ1), get_tmp(gc_cache, ΔŨ1) + Ŷ0, x̂0end = predict!(Ȳ, x̂0, x̂0next, u0, û0, mpc, model, ΔŨ) + Ŷe, Ue = extended_predictions!(Ŷe, Ue, Ū, mpc, model, Ŷ0, ΔŨ) + ϵ = (nϵ == 1) ? ΔŨ[end] : zero(JNT) # ϵ = 0 if nϵ == 0 (meaning no relaxation) + mpc.con.gc!(gc, Ue, Ŷe, mpc.D̂e, mpc.p, ϵ) + g = con_nonlinprog!(g, mpc, model, x̂0end, Ŷ0, gc, ϵ) end return g[i]::T end @@ -425,9 +508,78 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT return Jfunc, gfunc end -"Set the nonlinear constraints on the output predictions `Ŷ` and terminal states `x̂end`." -function setnonlincon!( - mpc::NonLinMPC, ::NonLinModel, optim::JuMP.GenericModel{JNT} +function init_nonlincon!(mpc::NonLinMPC, ::LinModel, gfunc::Vector{<:Function}) + optim, con = mpc.optim, mpc.con + nΔŨ = length(mpc.ΔŨ) + if length(con.i_g) ≠ 0 + i_base = 0 + for i in 1:con.nc + name = Symbol("g_c_$i") + optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfunc[i_base+i]; name) + end + end + return nothing +end + +function init_nonlincon!(mpc::NonLinMPC, ::NonLinModel, gfunc::Vector{<:Function}) + optim, con = mpc.optim, mpc.con + ny, nx̂, Hp, nΔŨ = mpc.estim.model.ny, mpc.estim.nx̂, mpc.Hp, length(mpc.ΔŨ) + if length(con.i_g) ≠ 0 + i_base = 0 + for i in eachindex(con.Y0min) + name = Symbol("g_Y0min_$i") + optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfunc[i_base+i]; name) + end + i_base = 1Hp*ny + for i in eachindex(con.Y0max) + name = Symbol("g_Y0max_$i") + optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfunc[i_base+i]; name) + end + i_base = 2Hp*ny + for i in eachindex(con.x̂0min) + name = Symbol("g_x̂0min_$i") + optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfunc[i_base+i]; name) + end + i_base = 2Hp*ny + nx̂ + for i in eachindex(con.x̂0max) + name = Symbol("g_x̂0max_$i") + optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfunc[i_base+i]; name) + end + i_base = 2Hp*ny + 2nx̂ + for i in 1:con.nc + name = Symbol("g_c_$i") + optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfunc[i_base+i]; name) + end + end + return nothing +end + +""" + set_nonlincon!(mpc::NonLinMPC, ::LinModel, optim) + +Set the custom nonlinear inequality constraints for `LinModel`. +""" +function set_nonlincon!( + mpc::NonLinMPC, ::LinModel, optim::JuMP.GenericModel{JNT} +) where JNT<:Real + ΔŨvar = optim[:ΔŨvar] + con = mpc.con + nonlin_constraints = JuMP.all_constraints(optim, JuMP.NonlinearExpr, MOI.LessThan{JNT}) + map(con_ref -> JuMP.delete(optim, con_ref), nonlin_constraints) + for i in 1:con.nc + gfunc_i = optim[Symbol("g_c_$i")] + @constraint(optim, gfunc_i(ΔŨvar...) <= 0) + end + return nothing +end + +""" + set_nonlincon!(mpc::NonLinMPC, ::NonLinModel, optim) + +Also set output prediction `Ŷ` and terminal state `x̂end` constraints when not a `LinModel`. +""" +function set_nonlincon!( + mpc::NonLinMPC, ::SimModel, optim::JuMP.GenericModel{JNT} ) where JNT<:Real ΔŨvar = optim[:ΔŨvar] con = mpc.con @@ -449,19 +601,38 @@ function setnonlincon!( gfunc_i = optim[Symbol("g_x̂0max_$(i)")] @constraint(optim, gfunc_i(ΔŨvar...) <= 0) end + for i in 1:con.nc + gfunc_i = optim[Symbol("g_c_$i")] + @constraint(optim, gfunc_i(ΔŨvar...) <= 0) + end return nothing end """ - con_nonlinprog!(g, mpc::NonLinMPC, model::SimModel, x̂end, Ŷ, ΔŨ) -> g + con_nonlinprog!(g, mpc::NonLinMPC, model::LinModel, _ , _ , gc, ϵ) + +Nonlinear constrains for [`NonLinMPC`](@ref) when `model` is a [`LinModel`](@ref). + +The method mutates the `g` vectors in argument and returns it. Only the custom constraints +are include in the `g` vector. +""" +function con_nonlinprog!(g, mpc::NonLinMPC, ::LinModel, _ , _ , gc, ϵ) + for i in eachindex(g) + g[i] = gc[i] + end + return g +end + +""" + con_nonlinprog!(g, mpc::NonLinMPC, model::SimModel, x̂0end, Ŷ0, gc, ϵ) -> g Nonlinear constrains for [`NonLinMPC`](@ref) when `model` is not a [`LinModel`](@ref). -The method mutates the `g` vector in argument and returns it. +The method mutates the `g` vectors in argument and returns it. The output prediction, +the terminal state and the custom constraints are include in the `g` vector. """ -function con_nonlinprog!(g, mpc::NonLinMPC, ::SimModel, x̂0end, Ŷ0, ΔŨ) - nx̂, nŶ = mpc.estim.nx̂, length(Ŷ0) - ϵ = mpc.nϵ == 1 ? ΔŨ[end] : 0 # ϵ = 0 if Cwt=Inf (meaning: no relaxation) +function con_nonlinprog!(g, mpc::NonLinMPC, ::SimModel, x̂0end, Ŷ0, gc, ϵ) + nx̂, nŶ = length(x̂0end), length(Ŷ0) for i in eachindex(g) mpc.con.i_g[i] || continue if i ≤ nŶ @@ -473,31 +644,19 @@ function con_nonlinprog!(g, mpc::NonLinMPC, ::SimModel, x̂0end, Ŷ0, ΔŨ) elseif i ≤ 2nŶ + nx̂ j = i - 2nŶ g[i] = (mpc.con.x̂0min[j] - x̂0end[j]) - ϵ*mpc.con.c_x̂min[j] - else + elseif i ≤ 2nŶ + 2nx̂ j = i - 2nŶ - nx̂ g[i] = (x̂0end[j] - mpc.con.x̂0max[j]) - ϵ*mpc.con.c_x̂max[j] + else + j = i - 2nŶ - 2nx̂ + g[i] = gc[j] end end return g end -"No nonlinear constraints if `model` is a [`LinModel`](@ref), return `g` unchanged." -con_nonlinprog!(g, ::NonLinMPC, ::LinModel, _ , _ , _ ) = g - -"Evaluate the economic term of the objective function for [`NonLinMPC`](@ref)." -function obj_econ!(U0, Ȳ, mpc::NonLinMPC, model::SimModel, Ŷ0, ΔŨ) - if !iszero(mpc.E) - ny, Hp, ŷ, D̂E = model.ny, mpc.Hp, mpc.ŷ, mpc.D̂E - U = U0 - U .+= mpc.Uop - uend = @views U[(end-model.nu+1):end] - Ŷ = Ȳ - Ŷ .= Ŷ0 .+ mpc.Yop - UE = [U; uend] - ŶE = [ŷ; Ŷ] - E_JE = mpc.E*mpc.JE(UE, ŶE, D̂E, mpc.p) - else - E_JE = 0.0 - end +"Evaluate the economic term `E*JE` of the objective function for [`NonLinMPC`](@ref)." +function obj_econ!(Ue, Ŷe, mpc::NonLinMPC, model::SimModel) + E_JE = iszero(mpc.E) ? 0.0 : mpc.E*mpc.JE(Ue, Ŷe, mpc.D̂e, mpc.p) return E_JE end \ No newline at end of file diff --git a/src/estimator/mhe/construct.jl b/src/estimator/mhe/construct.jl index 023af2d9..ca85cb8d 100644 --- a/src/estimator/mhe/construct.jl +++ b/src/estimator/mhe/construct.jl @@ -421,7 +421,7 @@ end @doc raw""" setconstraint!(estim::MovingHorizonEstimator; ) -> estim -Set the constraint parameters of the [`MovingHorizonEstimator`](@ref) `estim`. +Set the bound constraint parameters of the [`MovingHorizonEstimator`](@ref) `estim`. It supports both soft and hard constraints on the estimated state ``\mathbf{x̂}``, process noise ``\mathbf{ŵ}`` and sensor noise ``\mathbf{v̂}``: @@ -652,7 +652,7 @@ function setconstraint!( JuMP.delete(optim, optim[:linconstraint]) JuMP.unregister(optim, :linconstraint) @constraint(optim, linconstraint, A*Z̃var .≤ b) - setnonlincon!(estim, model, optim) + set_nonlincon!(estim, model, optim) else i_b, i_g = init_matconstraint_mhe(model, i_x̃min, i_x̃max, i_X̂min, i_X̂max, i_Ŵmin, i_Ŵmax, i_V̂min, i_V̂max @@ -714,10 +714,10 @@ function init_matconstraint_mhe(::SimModel{NT}, end "By default, no nonlinear constraints in the MHE, thus return nothing." -setnonlincon!(::MovingHorizonEstimator, ::SimModel, ::JuMP.GenericModel) = nothing +set_nonlincon!(::MovingHorizonEstimator, ::SimModel, ::JuMP.GenericModel) = nothing "Set the nonlinear constraints on the output predictions `Ŷ` and terminal states `x̂end`." -function setnonlincon!( +function set_nonlincon!( estim::MovingHorizonEstimator, ::NonLinModel, optim::JuMP.GenericModel{JNT} ) where JNT<:Real optim, con = estim.optim, estim.con @@ -994,7 +994,8 @@ see [`initpred!(::MovingHorizonEstimator, ::LinModel)`](@ref) and [`linconstrain !!! details "Extended Help" Using the augmented process model matrices ``\mathbf{Â, B̂_u, Ĉ^m, B̂_d, D̂_d^m}``, and the function ``\mathbf{S}(j) = ∑_{i=0}^j \mathbf{Â}^i``, the prediction matrices for the - sensor noises depend on the constant ``p``. For ``p=0``, the matrices are computed by: + sensor noises depend on the constant ``p``. For ``p=0``, the matrices are computed by + (notice the minus signs after the equalities): ```math \begin{aligned} \mathbf{E} &= - \begin{bmatrix} diff --git a/src/model/nonlinmodel.jl b/src/model/nonlinmodel.jl index 0032bc2a..8e17691d 100644 --- a/src/model/nonlinmodel.jl +++ b/src/model/nonlinmodel.jl @@ -143,10 +143,7 @@ function NonLinModel{NT}( p=NT[], solver=RungeKutta(4) ) where {NT<:Real} isnothing(solver) && (solver=EmptySolver()) - ismutating_f = validate_f(NT, f) - ismutating_h = validate_h(NT, h) - f! = ismutating_f ? f : f!(xnext, x, u, d, p) = (xnext .= f(x, u, d, p); nothing) - h! = ismutating_h ? h : h!(y, x, d, p) = (y .= h(x, d, p); nothing) + f!, h! = get_mutating_functions(NT, f, h) f!, h! = get_solver_functions(NT, solver, f!, h!, Ts, nu, nx, ny, nd) F, H, P, DS = get_types(f!, h!, p, solver) return NonLinModel{NT, F, H, P, DS}(f!, h!, Ts, nu, nx, ny, nd, p, solver) @@ -159,11 +156,27 @@ function NonLinModel( return NonLinModel{Float64}(f, h, Ts, nu, nx, ny, nd; p, solver) end -"Get the types of `f!`, `h!` and `solver` to construct a `NonLinModel`." -function get_types( - ::F, ::H, ::P, ::DS -) where {F<:Function, H<:Function, P<:Any, DS<:DiffSolver} - return F, H, P, DS +"Get the mutating functions `f!` and `h!` from the provided functions in argument." +function get_mutating_functions(NT, f, h) + ismutating_f = validate_f(NT, f) + f! = if ismutating_f + f + else + function f!(xnext, x, u, d, p) + xnext .= f(x, u, d, p) + return nothing + end + end + ismutating_h = validate_h(NT, h) + h! = if ismutating_h + h + else + function h!(y, x, d, p) + y .= h(x, d, p) + return nothing + end + end + return f!, h! end """ @@ -172,7 +185,12 @@ end Validate `f` function argument signature and return `true` if it is mutating. """ function validate_f(NT, f) - ismutating = hasmethod(f, Tuple{Vector{NT}, Vector{NT}, Vector{NT}, Vector{NT}, Any}) + ismutating = hasmethod( + f, + # ẋ or xnext, x, u, d, p + Tuple{ Vector{NT}, Vector{NT}, Vector{NT}, Vector{NT}, Any} + ) + # x, u, d, p if !(ismutating || hasmethod(f, Tuple{Vector{NT}, Vector{NT}, Vector{NT}, Any})) error( "the state function has no method with type signature "* @@ -190,7 +208,12 @@ end Validate `h` function argument signature and return `true` if it is mutating. """ function validate_h(NT, h) - ismutating = hasmethod(h, Tuple{Vector{NT}, Vector{NT}, Vector{NT}, Any}) + ismutating = hasmethod( + h, + # y, x, d, p + Tuple{Vector{NT}, Vector{NT}, Vector{NT}, Any} + ) + # x, d, p if !(ismutating || hasmethod(h, Tuple{Vector{NT}, Vector{NT}, Any})) error( "the output function has no method with type signature "* @@ -201,6 +224,13 @@ function validate_h(NT, h) return ismutating end +"Get the types of `f!`, `h!` and `solver` to construct a `NonLinModel`." +function get_types( + ::F, ::H, ::P, ::DS +) where {F<:Function, H<:Function, P<:Any, DS<:DiffSolver} + return F, H, P, DS +end + "Do nothing if `model` is a [`NonLinModel`](@ref)." steadystate!(::SimModel, _ , _ ) = nothing diff --git a/src/precompile.jl b/src/precompile.jl index 53e753d2..6968678e 100644 --- a/src/precompile.jl +++ b/src/precompile.jl @@ -112,8 +112,8 @@ preparestate!(nmpc_mhe, [55, 30]) u = nmpc_mhe([55, 30]) sim!(nmpc_mhe, 3, [55, 30]) -function JE( _ , ŶE, _ , R̂y) - Ŷ = ŶE[3:end] +function JE( _ , Ŷe, _ , R̂y) + Ŷ = Ŷe[3:end] Ȳ = R̂y - Ŷ return dot(Ȳ, Ȳ) end diff --git a/test/test_predictive_control.jl b/test/test_predictive_control.jl index 5213db7f..6cbb20f8 100644 --- a/test/test_predictive_control.jl +++ b/test/test_predictive_control.jl @@ -479,7 +479,7 @@ end nonlinmodel = NonLinModel(f, h, Ts, 2, 4, 2, 1, p=linmodel1, solver=nothing) nmpc1 = NonLinMPC(nonlinmodel, Hp=15) @test isa(nmpc1.estim, UnscentedKalmanFilter) - @test size(nmpc1.R̂y0, 1) == 15*nmpc1.estim.model.ny + @test size(nmpc1.R̂y, 1) == 15*nmpc1.estim.model.ny nmpc2 = NonLinMPC(nonlinmodel, Hp=15, Hc=4, Cwt=Inf) @test size(nmpc2.Ẽ, 2) == 4*nonlinmodel.nu nmpc3 = NonLinMPC(nonlinmodel, Hp=15, Hc=4, Cwt=1e6) @@ -491,7 +491,7 @@ end @test nmpc5.Ñ_Hc ≈ Diagonal(diagm([repeat(Float64[3, 4], 5); [1e3]])) nmpc6 = NonLinMPC(nonlinmodel, Hp=15, Lwt=[0,1]) @test nmpc6.L_Hp ≈ Diagonal(diagm(repeat(Float64[0, 1], 15))) - nmpc7 = NonLinMPC(nonlinmodel, Hp=15, Ewt=1e-3, JE=(UE,ŶE,D̂E,p) -> p*UE.*ŶE.*D̂E, p=2) + nmpc7 = NonLinMPC(nonlinmodel, Hp=15, Ewt=1e-3, JE=(Ue,Ŷe,D̂e,p) -> p*Ue.*Ŷe.*D̂e, p=2) @test nmpc7.E == 1e-3 @test nmpc7.JE([1,2],[3,4],[4,6],2) == 2*[1,2].*[3,4].*[4,6] optim = JuMP.Model(optimizer_with_attributes(Ipopt.Optimizer, "nlp_scaling_max_gradient"=>1.0)) @@ -538,7 +538,7 @@ end @test info[:Ŷ][end] ≈ r[1] atol=5e-2 Hp = 1000 R̂y = fill(r[1], Hp) - JE = (_ , ŶE, _ , R̂y) -> sum((ŶE[2:end] - R̂y).^2) + JE = (_ , Ŷe, _ , R̂y) -> sum((Ŷe[2:end] - R̂y).^2) nmpc = NonLinMPC(linmodel, Mwt=[0], Nwt=[0], Cwt=Inf, Ewt=1, JE=JE, p=R̂y, Hp=Hp, Hc=1) preparestate!(nmpc, [10]) u = moveinput!(nmpc)