From 7273d2f59b79451f39f8fef390b85ca74c2bcd28 Mon Sep 17 00:00:00 2001 From: Alexis Montoison Date: Thu, 10 Mar 2022 10:37:03 -0500 Subject: [PATCH 01/16] [WIP] Quadratic constraints --- src/moi_nlp_model.jl | 28 +++++--- src/moi_nls_model.jl | 2 +- src/utils.jl | 155 ++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 166 insertions(+), 19 deletions(-) diff --git a/src/moi_nlp_model.jl b/src/moi_nlp_model.jl index 63e6745..05259e0 100644 --- a/src/moi_nlp_model.jl +++ b/src/moi_nlp_model.jl @@ -4,6 +4,9 @@ mutable struct MathOptNLPModel <: AbstractNLPModel{Float64, Vector{Float64}} meta::NLPModelMeta{Float64, Vector{Float64}} eval::Union{MOI.AbstractNLPEvaluator, Nothing} lincon::LinearConstraints + quadcon::Vector{QuadraticConstraint} + nquad::Int + nnln::Int obj::Objective counters::Counters end @@ -33,7 +36,10 @@ function MathOptNLPModel(jmodel::JuMP.Model; hessian::Bool = true, name::String (nnln == 0 ? 0 : sum(length(nl_con.hess_I) for nl_con in eval.constraints)) : 0 moimodel = backend(jmodel) - nlin, lincon, lin_lcon, lin_ucon = parser_MOI(moimodel) + nlin, lincon, lin_lcon, lin_ucon, nquad, quadcon, quad_lcon, quad_ucon = parser_MOI(moimodel, nvar) + + + quad_nnzh = nquad == 0 ? 0 : sum(length(quadcon[i].vals) for i = 1 : nquad) if (eval ≠ nothing) && eval.has_nlobj obj = Objective("NONLINEAR", 0.0, spzeros(Float64, nvar), COO(), 0) @@ -41,11 +47,11 @@ function MathOptNLPModel(jmodel::JuMP.Model; hessian::Bool = true, name::String obj = parser_objective_MOI(moimodel, nvar) end - ncon = nlin + nnln - lcon = vcat(lin_lcon, nl_lcon) - ucon = vcat(lin_ucon, nl_ucon) - nnzj = lincon.nnzj + nl_nnzj - nnzh = obj.nnzh + nl_nnzh + ncon = nlin + nquad + nnln + lcon = vcat(lin_lcon, quad_lcon, nl_lcon) + ucon = vcat(lin_ucon, quad_ucon, nl_ucon) + nnzj = lincon.nnzj + ... + nl_nnzj + nnzh = obj.nnzh + quad_nnzh + nl_nnzh meta = NLPModelMeta( nvar, @@ -62,11 +68,11 @@ function MathOptNLPModel(jmodel::JuMP.Model; hessian::Bool = true, name::String lin_nnzj = lincon.nnzj, nln_nnzj = nl_nnzj, minimize = objective_sense(jmodel) == MOI.MIN_SENSE, - islp = (obj.type == "LINEAR") && (nnln == 0), + islp = (obj.type == "LINEAR") && (nnln == 0) && (nquad == 0), name = name, ) - return MathOptNLPModel(meta, eval, lincon, obj, Counters()) + return MathOptNLPModel(meta, eval, lincon, quadcon, nquad, obj, Counters()) end function NLPModels.obj(nlp::MathOptNLPModel, x::AbstractVector) @@ -115,7 +121,11 @@ end function NLPModels.cons_nln!(nlp::MathOptNLPModel, x::AbstractVector, c::AbstractVector) increment!(nlp, :neval_cons_nln) - MOI.eval_constraint(nlp.eval, c, x) + for i = 1 : nlp.nquad + qcon = nlp.quadcon[i] + c[i] = 0.5 * coo_sym_dot(qcon.hessian.rows, qcon.hessian.cols, qcon.hessian.vals, x, x) + dot(qcon.b, x) + end + MOI.eval_constraint(nlp.eval, view(c, (nlp.nquad + 1):(nlp.meta.nnln)), x) return c end diff --git a/src/moi_nls_model.jl b/src/moi_nls_model.jl index 29bf10b..cb74649 100644 --- a/src/moi_nls_model.jl +++ b/src/moi_nls_model.jl @@ -40,7 +40,7 @@ function MathOptNLSModel(cmodel::JuMP.Model, F; hessian::Bool = true, name::Stri (nnln == 0 ? 0 : sum(length(con.hess_I) for con in ceval.constraints)) : 0 moimodel = backend(cmodel) - nlin, lincon, lin_lcon, lin_ucon = parser_MOI(moimodel) + nlin, lincon, lin_lcon, lin_ucon, nquad, quadcon, quad_lcon, quad_ucon = parser_MOI(moimodel, nvar) nequ = nlinequ + nnlnequ Fnnzj = linequ.nnzj + nl_Fnnzj diff --git a/src/utils.jl b/src/utils.jl index 5910610..f2671c4 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -6,10 +6,18 @@ import NLPModels.increment!, NLPModels.decrement! using JuMP, MathOptInterface const MOI = MathOptInterface +# VariableIndex +const VI = MOI.VariableIndex # VariableIndex(value) + # ScalarAffineFunctions and VectorAffineFunctions -const SAF = MOI.ScalarAffineFunction{Float64} -const VAF = MOI.VectorAffineFunction{Float64} -const AF = Union{SAF, VAF} +const SAF = MOI.ScalarAffineFunction{Float64} # ScalarAffineFunction{T}(terms, constant) +const VAF = MOI.VectorAffineFunction{Float64} # VectorAffineFunction{T}(terms, constants) +const AF = Union{SAF, VAF} + +# ScalarQuadraticFunctions and VectorQuadraticFunctions +const SQF = MOI.ScalarQuadraticFunction{Float64} # ScalarQuadraticFunction{T}(affine_terms, quadratic_terms, constant) +const VQF = MOI.VectorQuadraticFunction{Float64} # VectorQuadraticFunction{T}(affine_terms, quadratic_terms, constants) +const QF = Union{SQF, VQF} # AffLinSets and VecLinSets const ALS = Union{ @@ -21,10 +29,11 @@ const ALS = Union{ const VLS = Union{MOI.Nonnegatives, MOI.Nonpositives, MOI.Zeros} const LS = Union{ALS, VLS} +# Objective const VI = MOI.VariableIndex -const SQF = MOI.ScalarQuadraticFunction{Float64} const OBJ = Union{VI, SAF, SQF} +# Coordinate Matrix mutable struct COO rows::Vector{Int} cols::Vector{Int} @@ -38,6 +47,21 @@ mutable struct LinearConstraints nnzj::Int end +mutable struct QuadraticConstraint + hessian::COO + b::SparseVector{Float64} +end + +mutable struct QuadraticConstraints + qcons::Vector{QuadraticConstraint} + nnzj::Int + jrows::Vector{Int} + jcols::Vector{Int} + nnzh::Int + hrows::Vector{Int} + hcols::Vector{Int} +end + mutable struct LinearEquations jacobian::COO constants::Vector{Float64} @@ -96,6 +120,52 @@ function coo_sym_dot( return xᵀAy end +""" + jacobian_quad(qcons) + +`qcons` is a vector of `QuadraticConstraint` where each constraint has the form ½xᵀQᵢx + xᵀbᵢ. +Compute the sparcity pattern of the jacobian [Q₁x + b₁; ...; Qₚx + bₚ]ᵀ of `qcons`. +""" +function jacobian_quad(qcons) + jrows = Int[] + jcols = Int[] + nquad = length(qcons) + for i = 1 : nquad + # rows of Qᵢx + bᵢ with nonzeros coefficients + vec = unique(con.hessian.rows ∪ con.b.nzind) + for elt ∈ vec + push!(elt, jcols) + push!(i, jrows) + end + end + nnzj = length(jrows) + return nnzj, jrows, jcols +end + +""" + hessian_quad(qcons) + +`qcons` is a vector of `QuadraticConstraint` where each constraint has the form ½xᵀQᵢx + xᵀbᵢ. +Compute the sparcity pattern of the hessian ΣᵢQᵢ of `qcons`. +""" +function hessian_quad(qcons) + set = Set{Tuple{Int,Int}}() + for con ∈ qcons + for tuple ∈ zip(con.rows, con.vals) + # Only disctinct tuples are stored in the set + push!(tuple, set) + end + end + nnzh = length(set) + hrows = zeros(Int, nnzh) + hcols = zeros(Int, nnzh) + for (index,tuple) in enumerate(set) + hrows[index] = tuple[1] + hcols[index] = tuple[2] + end + return nnzh, hrows, hcols +end + """ parser_SAF(fun, set, linrows, lincols, linvals, nlin, lin_lcon, lin_ucon) @@ -157,11 +227,64 @@ function parser_VAF(fun, set, linrows, lincols, linvals, nlin, lin_lcon, lin_uco end """ - parser_MOI(moimodel) + parser_SQF(fun, set, qcons, quad_lcon, quad_ucon) -Parse linear constraints of a `MOI.ModelLike`. +Parse a `ScalarQuadraticFunction` fun with its associated set. +`qcons`, `quad_lcon`, `quad_ucon` are updated. """ -function parser_MOI(moimodel) +function parser_SQF(fun, set, nvar, qcons, quad_lcon, quad_ucon) + + b = spzeros(Float64, nvar) + rows = Int[] + cols = Int[] + vals = Float64[] + + # Parse a ScalarAffineTerm{Float64}(coefficient, variable_index) + for term in fun.affine_terms + b[term.variable.value] = term.coefficient + end + + # Parse a ScalarQuadraticTerm{Float64}(coefficient, variable_index_1, variable_index_2) + for term in fun.quadratic_terms + i = term.variable_1.value + j = term.variable_2.value + if i ≥ j + push!(rows, i) + push!(cols, j) + else + push!(cols, j) + push!(rows, i) + end + push!(vals, term.coefficient) + end + + if typeof(set) in (MOI.Interval{Float64}, MOI.GreaterThan{Float64}) + push!(quad_lcon, -fun.constant + set.lower) + elseif typeof(set) == MOI.EqualTo{Float64} + push!(quad_lcon, -fun.constant + set.value) + else + push!(quad_lcon, -Inf) + end + + if typeof(set) in (MOI.Interval{Float64}, MOI.LessThan{Float64}) + push!(quad_ucon, -fun.constant + set.upper) + elseif typeof(set) == MOI.EqualTo{Float64} + push!(quad_ucon, -fun.constant + set.value) + else + push!(quad_ucon, Inf) + end + + nnzh = length(vals) + qcon = QuadraticConstraint(COO[rows, cols, vals], b) + push!(qcons, qcon) +end + +""" + parser_MOI(moimodel, nvar) + +Parse linear and quadratic constraints of a `MOI.ModelLike`. +""" +function parser_MOI(moimodel, nvar) # Variables associated to linear constraints nlin = 0 @@ -171,10 +294,16 @@ function parser_MOI(moimodel) lin_lcon = Float64[] lin_ucon = Float64[] + # Variables associated to quadratic constraints + nquad = 0 + qcons = QuadraticConstraint[] + quad_lcon = Float64[] + quad_ucon = Float64[] + contypes = MOI.get(moimodel, MOI.ListOfConstraintTypesPresent()) for (F, S) in contypes F == VI && continue - F <: AF || @warn("Function $F is not supported.") + F <: AF || F <: SQF || @warn("Function $F is not supported.") S <: LS || @warn("Set $S is not supported.") conindices = MOI.get(moimodel, MOI.ListOfConstraintIndices{F, S}()) @@ -189,13 +318,21 @@ function parser_MOI(moimodel) parser_VAF(fun, set, linrows, lincols, linvals, nlin, lin_lcon, lin_ucon) nlin += set.dimension end + if typeof(fun) <: SQF + parser_SQF(fun, set, nvar, qcons, quad_lcon, quad_ucon) + nquad += 1 + end end end coo = COO(linrows, lincols, linvals) nnzj = length(linvals) lincon = LinearConstraints(coo, nnzj) - return nlin, lincon, lin_lcon, lin_ucon + nnzj, jrows, jcols = jacobian_quad(qcons) + nnzh, hrows, hcols = hessian_quad(qcons) + quadcon = QuadraticConstraints(qcons, nnzj, jrows, jcols, nnzh, hrows, hcols) + + return nlin, lincon, lin_lcon, lin_ucon, nquad, quadcon, quad_lcon, quad_ucon end """ From 69903087cedc970bca79499d2be7dfeb734132ba Mon Sep 17 00:00:00 2001 From: tmigot Date: Fri, 6 May 2022 14:59:44 -0400 Subject: [PATCH 02/16] add straightforward test --- test/nlp_problems/hs61.jl | 12 ++++++++++++ test/runtests.jl | 2 +- test/test_moi_nlp_model.jl | 2 +- 3 files changed, 14 insertions(+), 2 deletions(-) create mode 100644 test/nlp_problems/hs61.jl diff --git a/test/nlp_problems/hs61.jl b/test/nlp_problems/hs61.jl new file mode 100644 index 0000000..5e422cc --- /dev/null +++ b/test/nlp_problems/hs61.jl @@ -0,0 +1,12 @@ +"HS61 model" +function hs61(args...; kwargs...) + nlp = Model() + @variable(nlp, x[i = 1:3], start = 0) + + @constraint(nlp, 3 * x[1] - 2 * x[2]^2 - 7 == 0) + @constraint(nlp, 4 * x[1] - x[3]^2 - 11 == 0) + + @NLobjective(nlp, Min, 4 * x[1]^2 + 2 * x[2]^2 + 2 * x[3]^2 - 33 * x[1] + 16 * x[2] - 24 * x[3]) + + return nlp +end diff --git a/test/runtests.jl b/test/runtests.jl index 5306ee7..13fa408 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -6,7 +6,7 @@ nlp_problems = setdiff(NLPModelsTest.nlp_problems, ["MGH01Feas"]) nls_problems = NLPModelsTest.nls_problems extra_nls_problems = ["HS30", "HS43", "MGH07", "nlsnohesspb"] -for problem in lowercase.(nlp_problems ∪ ["nohesspb"]) +for problem in lowercase.(nlp_problems ∪ ["nohesspb", "hs61"]) include(joinpath("nlp_problems", "$problem.jl")) end diff --git a/test/test_moi_nlp_model.jl b/test/test_moi_nlp_model.jl index 162694d..dd2dfb5 100644 --- a/test/test_moi_nlp_model.jl +++ b/test/test_moi_nlp_model.jl @@ -11,7 +11,7 @@ println("Testing MathOptNLPModel") "‖c(x₀)‖" ) # Test that every problem can be instantiated. -for prob in Symbol.(lowercase.(nlp_problems ∪ ["nohesspb"])) +for prob in Symbol.(lowercase.(nlp_problems ∪ ["nohesspb", "hs61"])) prob_fn = eval(prob) nlp = MathOptNLPModel(prob_fn(), hessian = (prob != :nohesspb)) n = nlp.meta.nvar From 2b1574d9b1ab2e956cb99e45322c8e57a60c9fad Mon Sep 17 00:00:00 2001 From: tmigot Date: Fri, 6 May 2022 15:01:50 -0400 Subject: [PATCH 03/16] debug utils functions --- src/utils.jl | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/src/utils.jl b/src/utils.jl index f2671c4..710bed8 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -54,6 +54,7 @@ end mutable struct QuadraticConstraints qcons::Vector{QuadraticConstraint} + nquad::Int nnzj::Int jrows::Vector{Int} jcols::Vector{Int} @@ -62,6 +63,9 @@ mutable struct QuadraticConstraints hcols::Vector{Int} end +Base.getindex(qcon::QuadraticConstraints, i::Integer) = qcon.qcons[i] +Base.length(qcon::QuadraticConstraints) = qcon.nquad + mutable struct LinearEquations jacobian::COO constants::Vector{Float64} @@ -124,7 +128,7 @@ end jacobian_quad(qcons) `qcons` is a vector of `QuadraticConstraint` where each constraint has the form ½xᵀQᵢx + xᵀbᵢ. -Compute the sparcity pattern of the jacobian [Q₁x + b₁; ...; Qₚx + bₚ]ᵀ of `qcons`. +Compute the sparsity pattern of the jacobian [Q₁x + b₁; ...; Qₚx + bₚ]ᵀ of `qcons`. """ function jacobian_quad(qcons) jrows = Int[] @@ -132,10 +136,10 @@ function jacobian_quad(qcons) nquad = length(qcons) for i = 1 : nquad # rows of Qᵢx + bᵢ with nonzeros coefficients - vec = unique(con.hessian.rows ∪ con.b.nzind) + vec = unique(qcons[i].hessian.rows ∪ qcons[i].b.nzind) for elt ∈ vec - push!(elt, jcols) - push!(i, jrows) + push!(jcols, elt) + push!(jrows, i) end end nnzj = length(jrows) @@ -146,14 +150,16 @@ end hessian_quad(qcons) `qcons` is a vector of `QuadraticConstraint` where each constraint has the form ½xᵀQᵢx + xᵀbᵢ. -Compute the sparcity pattern of the hessian ΣᵢQᵢ of `qcons`. +Compute the sparsity pattern of the hessian ΣᵢQᵢ of `qcons`. """ function hessian_quad(qcons) set = Set{Tuple{Int,Int}}() - for con ∈ qcons - for tuple ∈ zip(con.rows, con.vals) + nquad = length(qcons) + for i = 1 : nquad + con = qcons[i] + for tuple ∈ zip(con.hessian.rows, con.hessian.cols) # Only disctinct tuples are stored in the set - push!(tuple, set) + push!(set, tuple) end end nnzh = length(set) @@ -275,7 +281,7 @@ function parser_SQF(fun, set, nvar, qcons, quad_lcon, quad_ucon) end nnzh = length(vals) - qcon = QuadraticConstraint(COO[rows, cols, vals], b) + qcon = QuadraticConstraint(COO(rows, cols, vals), b) push!(qcons, qcon) end @@ -330,7 +336,7 @@ function parser_MOI(moimodel, nvar) nnzj, jrows, jcols = jacobian_quad(qcons) nnzh, hrows, hcols = hessian_quad(qcons) - quadcon = QuadraticConstraints(qcons, nnzj, jrows, jcols, nnzh, hrows, hcols) + quadcon = QuadraticConstraints(qcons, nquad, nnzj, jrows, jcols, nnzh, hrows, hcols) return nlin, lincon, lin_lcon, lin_ucon, nquad, quadcon, quad_lcon, quad_ucon end From 3bec27bbf12d81a53c30a49be642e362b4478cf4 Mon Sep 17 00:00:00 2001 From: tmigot Date: Fri, 6 May 2022 15:02:18 -0400 Subject: [PATCH 04/16] add API for quadratic constraints --- src/moi_nlp_model.jl | 71 +++++++++++++++++++++++++++++++------------- 1 file changed, 50 insertions(+), 21 deletions(-) diff --git a/src/moi_nlp_model.jl b/src/moi_nlp_model.jl index 05259e0..5c61341 100644 --- a/src/moi_nlp_model.jl +++ b/src/moi_nlp_model.jl @@ -4,9 +4,7 @@ mutable struct MathOptNLPModel <: AbstractNLPModel{Float64, Vector{Float64}} meta::NLPModelMeta{Float64, Vector{Float64}} eval::Union{MOI.AbstractNLPEvaluator, Nothing} lincon::LinearConstraints - quadcon::Vector{QuadraticConstraint} - nquad::Int - nnln::Int + quadcon::QuadraticConstraints obj::Objective counters::Counters end @@ -37,9 +35,6 @@ function MathOptNLPModel(jmodel::JuMP.Model; hessian::Bool = true, name::String moimodel = backend(jmodel) nlin, lincon, lin_lcon, lin_ucon, nquad, quadcon, quad_lcon, quad_ucon = parser_MOI(moimodel, nvar) - - - quad_nnzh = nquad == 0 ? 0 : sum(length(quadcon[i].vals) for i = 1 : nquad) if (eval ≠ nothing) && eval.has_nlobj obj = Objective("NONLINEAR", 0.0, spzeros(Float64, nvar), COO(), 0) @@ -50,8 +45,8 @@ function MathOptNLPModel(jmodel::JuMP.Model; hessian::Bool = true, name::String ncon = nlin + nquad + nnln lcon = vcat(lin_lcon, quad_lcon, nl_lcon) ucon = vcat(lin_ucon, quad_ucon, nl_ucon) - nnzj = lincon.nnzj + ... + nl_nnzj - nnzh = obj.nnzh + quad_nnzh + nl_nnzh + nnzj = lincon.nnzj + quadcon.nnzj + nl_nnzj + nnzh = obj.nnzh + quadcon.nnzh + nl_nnzh meta = NLPModelMeta( nvar, @@ -66,13 +61,13 @@ function MathOptNLPModel(jmodel::JuMP.Model; hessian::Bool = true, name::String nnzh = nnzh, lin = collect(1:nlin), lin_nnzj = lincon.nnzj, - nln_nnzj = nl_nnzj, + nln_nnzj = quadcon.nnzj + nl_nnzj, minimize = objective_sense(jmodel) == MOI.MIN_SENSE, islp = (obj.type == "LINEAR") && (nnln == 0) && (nquad == 0), name = name, ) - return MathOptNLPModel(meta, eval, lincon, quadcon, nquad, obj, Counters()) + return MathOptNLPModel(meta, eval, lincon, quadcon, obj, Counters()) end function NLPModels.obj(nlp::MathOptNLPModel, x::AbstractVector) @@ -121,11 +116,11 @@ end function NLPModels.cons_nln!(nlp::MathOptNLPModel, x::AbstractVector, c::AbstractVector) increment!(nlp, :neval_cons_nln) - for i = 1 : nlp.nquad + for i = 1:(nlp.quadcon.nquad) qcon = nlp.quadcon[i] c[i] = 0.5 * coo_sym_dot(qcon.hessian.rows, qcon.hessian.cols, qcon.hessian.vals, x, x) + dot(qcon.b, x) end - MOI.eval_constraint(nlp.eval, view(c, (nlp.nquad + 1):(nlp.meta.nnln)), x) + MOI.eval_constraint(nlp.eval, view(c, (nlp.quadcon.nquad + 1):(nlp.meta.nnln)), x) return c end @@ -144,10 +139,13 @@ function NLPModels.jac_nln_structure!( rows::AbstractVector{<:Integer}, cols::AbstractVector{<:Integer}, ) + quad_nnzj, jrows, jcols = nlp.quadcon.nnzj, nlp.quadcon.jrows, nlp.quadcon.jcols + rows[1:quad_nnzj] .= jrows + cols[1:quad_nnzj] .= jcols jac_struct = MOI.jacobian_structure(nlp.eval) - for index = 1:(nlp.meta.nln_nnzj) + for index = (quad_nnzj + 1):(nlp.meta.nln_nnzj) row, col = jac_struct[index] - rows[index] = row + rows[index] = row + nlp.quadcon.nquad cols[index] = col end return rows, cols @@ -161,7 +159,19 @@ end function NLPModels.jac_nln_coord!(nlp::MathOptNLPModel, x::AbstractVector, vals::AbstractVector) increment!(nlp, :neval_jac_nln) - MOI.eval_constraint_jacobian(nlp.eval, vals, x) + quad_nnzj = nlp.quadcon.nnzj + k = 0 + for i = 1:(nlp.quadcon.nquad) + # rows of Qᵢx + bᵢ with nonzeros coefficients + qcon = nlp.quadcon[i] + vec = unique(qcon.hessian.rows ∪ qcon.b.nzind) # Can we improve here? Or store this information? + nnzj = length(vec) + res = similar(x) # Avoid extra allocation + coo_sym_prod!(qcon.hessian.rows, qcon.hessian.cols, qcon.hessian.vals, x, res) + vals[(k + 1):(k + nnzj)] .= res[vec] .+ qcon.b[vec] + k += nnzj + end + MOI.eval_constraint_jacobian(nlp.eval, view(vals, (quad_nnzj + 1):(nlp.meta.nln_nnzj)), x) return vals end @@ -316,9 +326,12 @@ function NLPModels.hess_structure!( end end if (nlp.obj.type == "NONLINEAR") || (nlp.meta.nnln > 0) + quad_nnzh = nlp.quadcon.nnzh + rows[(1 + nlp.obj.nnzh):(nlp.obj.nnzh + quad_nnzh)] .= nlp.quadcon.hrows + cols[(1 + nlp.obj.nnzh):(nlp.obj.nnzh + quad_nnzh)] .= nlp.quadcon.hcols hesslag_struct = MOI.hessian_lagrangian_structure(nlp.eval) - for index = (nlp.obj.nnzh + 1):(nlp.meta.nnzh) - shift_index = index - nlp.obj.nnzh + for index = (nlp.obj.nnzh + quad_nnzh + 1):(nlp.meta.nnzh) + shift_index = index - nlp.obj.nnzh - quad_nnzh rows[index] = hesslag_struct[shift_index][1] cols[index] = hesslag_struct[shift_index][2] end @@ -338,12 +351,20 @@ function NLPModels.hess_coord!( vals[1:(nlp.obj.nnzh)] .= obj_weight .* nlp.obj.hessian.vals end if (nlp.obj.type == "NONLINEAR") || (nlp.meta.nnln > 0) + quad_nnzh = nlp.quadcon.nnzh + k = 0 + for i = 1:(nlp.quadcon.nquad) + qcon = nlp.quadcon[i] + nnzh = length(qcon.hessian.vals) + vals[(k + 1):(k + nnzh)] .= qcon.hessian.vals .* y[nlp.meta.nlin + i] + k += nnzh + end MOI.eval_hessian_lagrangian( nlp.eval, - view(vals, (nlp.obj.nnzh + 1):(nlp.meta.nnzh)), + view(vals, (nlp.obj.nnzh + quad_nnzh + 1):(nlp.meta.nnzh)), x, obj_weight, - view(y, nlp.meta.nln), + view(y, (nlp.meta.nlin + nlp.quadcon.nquad + 1):(nlp.meta.ncon)), ) end return vals @@ -383,7 +404,14 @@ function NLPModels.hprod!( hv .= 0.0 end if (nlp.obj.type == "NONLINEAR") || (nlp.meta.nnln > 0) - MOI.eval_hessian_lagrangian_product(nlp.eval, hv, x, v, obj_weight, view(y, nlp.meta.nln)) + for i = 1:(nlp.quadcon.nquad) + qcon = nlp.quadcon[i] + res = similar(x) # Avoid extra allocation + coo_sym_prod!(qcon.hessian.rows, qcon.hessian.cols, qcon.hessian.vals, v, res) + hv .+= res .* y[nlp.meta.nlin + i] + end + ind_nln = (nlp.meta.nlin + nlp.quadcon.nquad + 1):(nlp.meta.ncon) + MOI.eval_hessian_lagrangian_product(nlp.eval, hv, x, v, obj_weight, view(y, ind_nln)) end if nlp.obj.type == "QUADRATIC" nlp.meta.nnln == 0 && (hv .= 0.0) @@ -414,7 +442,8 @@ function NLPModels.hprod!( hv .*= obj_weight end if nlp.obj.type == "NONLINEAR" - MOI.eval_hessian_lagrangian_product(nlp.eval, hv, x, v, obj_weight, zeros(nlp.meta.nnln)) + nnln = nlp.meta.nnln - nlp.quadcon.nquad + MOI.eval_hessian_lagrangian_product(nlp.eval, hv, x, v, obj_weight, zeros(nnln)) end return hv end From 0f08e9aaca140a030aec685ff9f311f025d7bea5 Mon Sep 17 00:00:00 2001 From: tmigot Date: Fri, 6 May 2022 15:35:04 -0400 Subject: [PATCH 05/16] simplify `nquad` --- src/moi_nlp_model.jl | 6 +++--- src/moi_nls_model.jl | 2 +- src/utils.jl | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/moi_nlp_model.jl b/src/moi_nlp_model.jl index 5c61341..c208e9d 100644 --- a/src/moi_nlp_model.jl +++ b/src/moi_nlp_model.jl @@ -34,7 +34,7 @@ function MathOptNLPModel(jmodel::JuMP.Model; hessian::Bool = true, name::String (nnln == 0 ? 0 : sum(length(nl_con.hess_I) for nl_con in eval.constraints)) : 0 moimodel = backend(jmodel) - nlin, lincon, lin_lcon, lin_ucon, nquad, quadcon, quad_lcon, quad_ucon = parser_MOI(moimodel, nvar) + nlin, lincon, lin_lcon, lin_ucon, quadcon, quad_lcon, quad_ucon = parser_MOI(moimodel, nvar) if (eval ≠ nothing) && eval.has_nlobj obj = Objective("NONLINEAR", 0.0, spzeros(Float64, nvar), COO(), 0) @@ -42,7 +42,7 @@ function MathOptNLPModel(jmodel::JuMP.Model; hessian::Bool = true, name::String obj = parser_objective_MOI(moimodel, nvar) end - ncon = nlin + nquad + nnln + ncon = nlin + quadcon.nquad + nnln lcon = vcat(lin_lcon, quad_lcon, nl_lcon) ucon = vcat(lin_ucon, quad_ucon, nl_ucon) nnzj = lincon.nnzj + quadcon.nnzj + nl_nnzj @@ -63,7 +63,7 @@ function MathOptNLPModel(jmodel::JuMP.Model; hessian::Bool = true, name::String lin_nnzj = lincon.nnzj, nln_nnzj = quadcon.nnzj + nl_nnzj, minimize = objective_sense(jmodel) == MOI.MIN_SENSE, - islp = (obj.type == "LINEAR") && (nnln == 0) && (nquad == 0), + islp = (obj.type == "LINEAR") && (nnln == 0) && (quadcon.nquad == 0), name = name, ) diff --git a/src/moi_nls_model.jl b/src/moi_nls_model.jl index cb74649..01f1160 100644 --- a/src/moi_nls_model.jl +++ b/src/moi_nls_model.jl @@ -40,7 +40,7 @@ function MathOptNLSModel(cmodel::JuMP.Model, F; hessian::Bool = true, name::Stri (nnln == 0 ? 0 : sum(length(con.hess_I) for con in ceval.constraints)) : 0 moimodel = backend(cmodel) - nlin, lincon, lin_lcon, lin_ucon, nquad, quadcon, quad_lcon, quad_ucon = parser_MOI(moimodel, nvar) + nlin, lincon, lin_lcon, lin_ucon, quadcon, quad_lcon, quad_ucon = parser_MOI(moimodel, nvar) nequ = nlinequ + nnlnequ Fnnzj = linequ.nnzj + nl_Fnnzj diff --git a/src/utils.jl b/src/utils.jl index 710bed8..4ce5e60 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -338,7 +338,7 @@ function parser_MOI(moimodel, nvar) nnzh, hrows, hcols = hessian_quad(qcons) quadcon = QuadraticConstraints(qcons, nquad, nnzj, jrows, jcols, nnzh, hrows, hcols) - return nlin, lincon, lin_lcon, lin_ucon, nquad, quadcon, quad_lcon, quad_ucon + return nlin, lincon, lin_lcon, lin_ucon, quadcon, quad_lcon, quad_ucon end """ From af04c2ee82d0582a1a1efa5adc9179fb7fa68feb Mon Sep 17 00:00:00 2001 From: tmigot Date: Wed, 11 May 2022 00:03:06 +0200 Subject: [PATCH 06/16] add hs100 --- test/nlp_problems/hs100.jl | 24 ++++++++++++++++++++++++ test/runtests.jl | 2 +- test/test_moi_nlp_model.jl | 2 +- 3 files changed, 26 insertions(+), 2 deletions(-) create mode 100644 test/nlp_problems/hs100.jl diff --git a/test/nlp_problems/hs100.jl b/test/nlp_problems/hs100.jl new file mode 100644 index 0000000..2dca312 --- /dev/null +++ b/test/nlp_problems/hs100.jl @@ -0,0 +1,24 @@ +function hs100(args...; kwargs...) + nlp = Model() + x0 = [1, 2, 0, 4, 0, 1, 1] + @variable(nlp, x[i = 1:7], start = x0[i]) + + @NLconstraint(nlp, 127 - 2 * x[1]^2 - 3 * x[2]^4 - x[3] - 4 * x[4]^2 - 5 * x[5] ≥ 0) + @constraint(nlp, 282 - 7 * x[1] - 3 * x[2] - 10 * x[3]^2 - x[4] + x[5] ≥ 0) + @constraint(nlp, 196 - 23 * x[1] - x[2]^2 - 6 * x[6]^2 + 8 * x[7] ≥ 0) + @constraint(nlp, -4 * x[1]^2 - x[2]^2 + 3 * x[1] * x[2] - 2 * x[3]^2 - 5 * x[6] + 11 * x[7] ≥ 0) + + @NLobjective( + nlp, + Min, + (x[1] - 10)^2 + + 5 * (x[2] - 12)^2 + + x[3]^4 + + 3 * (x[4] - 11)^2 + + 10 * x[5]^6 + + 7 * x[6]^2 + + x[7]^4 - 4 * x[6] * x[7] - 10 * x[6] - 8 * x[7] + ) + + return nlp +end \ No newline at end of file diff --git a/test/runtests.jl b/test/runtests.jl index 13fa408..7a34589 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -6,7 +6,7 @@ nlp_problems = setdiff(NLPModelsTest.nlp_problems, ["MGH01Feas"]) nls_problems = NLPModelsTest.nls_problems extra_nls_problems = ["HS30", "HS43", "MGH07", "nlsnohesspb"] -for problem in lowercase.(nlp_problems ∪ ["nohesspb", "hs61"]) +for problem in lowercase.(nlp_problems ∪ ["nohesspb", "hs61", "hs100"]) include(joinpath("nlp_problems", "$problem.jl")) end diff --git a/test/test_moi_nlp_model.jl b/test/test_moi_nlp_model.jl index dd2dfb5..4f236ba 100644 --- a/test/test_moi_nlp_model.jl +++ b/test/test_moi_nlp_model.jl @@ -11,7 +11,7 @@ println("Testing MathOptNLPModel") "‖c(x₀)‖" ) # Test that every problem can be instantiated. -for prob in Symbol.(lowercase.(nlp_problems ∪ ["nohesspb", "hs61"])) +for prob in Symbol.(lowercase.(nlp_problems ∪ ["nohesspb", "hs61", "hs100"])) prob_fn = eval(prob) nlp = MathOptNLPModel(prob_fn(), hessian = (prob != :nohesspb)) n = nlp.meta.nvar From 2aaf3539f422a828134f8be0c922c563351eac1f Mon Sep 17 00:00:00 2001 From: tmigot Date: Wed, 11 May 2022 00:21:41 +0200 Subject: [PATCH 07/16] store `vec` --- src/moi_nlp_model.jl | 2 +- src/utils.jl | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/moi_nlp_model.jl b/src/moi_nlp_model.jl index c208e9d..863626f 100644 --- a/src/moi_nlp_model.jl +++ b/src/moi_nlp_model.jl @@ -164,7 +164,7 @@ function NLPModels.jac_nln_coord!(nlp::MathOptNLPModel, x::AbstractVector, vals: for i = 1:(nlp.quadcon.nquad) # rows of Qᵢx + bᵢ with nonzeros coefficients qcon = nlp.quadcon[i] - vec = unique(qcon.hessian.rows ∪ qcon.b.nzind) # Can we improve here? Or store this information? + vec = nlp.quadcon[i].vec # unique(qcon.hessian.rows ∪ qcon.b.nzind) # Can we improve here? Or store this information? nnzj = length(vec) res = similar(x) # Avoid extra allocation coo_sym_prod!(qcon.hessian.rows, qcon.hessian.cols, qcon.hessian.vals, x, res) diff --git a/src/utils.jl b/src/utils.jl index 4ce5e60..0abca16 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -49,6 +49,7 @@ end mutable struct QuadraticConstraint hessian::COO + vec::Vector{Int} b::SparseVector{Float64} end @@ -129,6 +130,7 @@ end `qcons` is a vector of `QuadraticConstraint` where each constraint has the form ½xᵀQᵢx + xᵀbᵢ. Compute the sparsity pattern of the jacobian [Q₁x + b₁; ...; Qₚx + bₚ]ᵀ of `qcons`. +This function also allocates `qcons[i].vec`. """ function jacobian_quad(qcons) jrows = Int[] @@ -136,8 +138,8 @@ function jacobian_quad(qcons) nquad = length(qcons) for i = 1 : nquad # rows of Qᵢx + bᵢ with nonzeros coefficients - vec = unique(qcons[i].hessian.rows ∪ qcons[i].b.nzind) - for elt ∈ vec + qcons[i].vec = unique(qcons[i].hessian.rows ∪ qcons[i].b.nzind) + for elt ∈ qcons[i].vec push!(jcols, elt) push!(jrows, i) end @@ -281,7 +283,7 @@ function parser_SQF(fun, set, nvar, qcons, quad_lcon, quad_ucon) end nnzh = length(vals) - qcon = QuadraticConstraint(COO(rows, cols, vals), b) + qcon = QuadraticConstraint(COO(rows, cols, vals), Int[], b) push!(qcons, qcon) end From 90ea72a79a38d83ce667d4210f5f937bdc7cd736 Mon Sep 17 00:00:00 2001 From: tmigot Date: Wed, 11 May 2022 01:14:45 +0200 Subject: [PATCH 08/16] implement hprod and jac_coord properly --- src/moi_nlp_model.jl | 28 ++++++++++++++++------------ src/utils.jl | 14 +++++++++----- 2 files changed, 25 insertions(+), 17 deletions(-) diff --git a/src/moi_nlp_model.jl b/src/moi_nlp_model.jl index 863626f..e62f052 100644 --- a/src/moi_nlp_model.jl +++ b/src/moi_nlp_model.jl @@ -157,18 +157,20 @@ function NLPModels.jac_lin_coord!(nlp::MathOptNLPModel, x::AbstractVector, vals: return vals end -function NLPModels.jac_nln_coord!(nlp::MathOptNLPModel, x::AbstractVector, vals::AbstractVector) +function NLPModels.jac_nln_coord!(nlp::MathOptNLPModel, x::AbstractVector, vals::AbstractVector{T}) where {T} increment!(nlp, :neval_jac_nln) quad_nnzj = nlp.quadcon.nnzj k = 0 for i = 1:(nlp.quadcon.nquad) - # rows of Qᵢx + bᵢ with nonzeros coefficients qcon = nlp.quadcon[i] - vec = nlp.quadcon[i].vec # unique(qcon.hessian.rows ∪ qcon.b.nzind) # Can we improve here? Or store this information? - nnzj = length(vec) - res = similar(x) # Avoid extra allocation - coo_sym_prod!(qcon.hessian.rows, qcon.hessian.cols, qcon.hessian.vals, x, res) - vals[(k + 1):(k + nnzj)] .= res[vec] .+ qcon.b[vec] + vec = qcon.vec + for j=1:length(vec) + vals[k + j] = qcon.b[j] + end + nnzj = length(qcon.hessian.vals) + for i=1:nnzj + vals[k + qcon.hessian.rows[i]] += qcon.hessian.vals[i] * x[qcon.hessian.cols[i]] + end k += nnzj end MOI.eval_constraint_jacobian(nlp.eval, view(vals, (quad_nnzj + 1):(nlp.meta.nln_nnzj)), x) @@ -327,8 +329,9 @@ function NLPModels.hess_structure!( end if (nlp.obj.type == "NONLINEAR") || (nlp.meta.nnln > 0) quad_nnzh = nlp.quadcon.nnzh - rows[(1 + nlp.obj.nnzh):(nlp.obj.nnzh + quad_nnzh)] .= nlp.quadcon.hrows - cols[(1 + nlp.obj.nnzh):(nlp.obj.nnzh + quad_nnzh)] .= nlp.quadcon.hcols + hrows, hcols = hessian_structure(nlp.quadcon.set) + rows[(1 + nlp.obj.nnzh):(nlp.obj.nnzh + quad_nnzh)] .= hrows + cols[(1 + nlp.obj.nnzh):(nlp.obj.nnzh + quad_nnzh)] .= hcols hesslag_struct = MOI.hessian_lagrangian_structure(nlp.eval) for index = (nlp.obj.nnzh + quad_nnzh + 1):(nlp.meta.nnzh) shift_index = index - nlp.obj.nnzh - quad_nnzh @@ -406,9 +409,10 @@ function NLPModels.hprod!( if (nlp.obj.type == "NONLINEAR") || (nlp.meta.nnln > 0) for i = 1:(nlp.quadcon.nquad) qcon = nlp.quadcon[i] - res = similar(x) # Avoid extra allocation - coo_sym_prod!(qcon.hessian.rows, qcon.hessian.cols, qcon.hessian.vals, v, res) - hv .+= res .* y[nlp.meta.nlin + i] + for (index,tuple) in enumerate(nlp.quadcon.set) + hv[tuple[1]] += qcon.hessian.vals[index] * v[tuple[2]] + end + hv .*= y[nlp.meta.nlin + i] end ind_nln = (nlp.meta.nlin + nlp.quadcon.nquad + 1):(nlp.meta.ncon) MOI.eval_hessian_lagrangian_product(nlp.eval, hv, x, v, obj_weight, view(y, ind_nln)) diff --git a/src/utils.jl b/src/utils.jl index 0abca16..bfa0842 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -60,8 +60,7 @@ mutable struct QuadraticConstraints jrows::Vector{Int} jcols::Vector{Int} nnzh::Int - hrows::Vector{Int} - hcols::Vector{Int} + set::Set{Tuple{Int,Int}} end Base.getindex(qcon::QuadraticConstraints, i::Integer) = qcon.qcons[i] @@ -164,6 +163,10 @@ function hessian_quad(qcons) push!(set, tuple) end end + return set +end + +function hessian_structure(set) nnzh = length(set) hrows = zeros(Int, nnzh) hcols = zeros(Int, nnzh) @@ -171,7 +174,7 @@ function hessian_quad(qcons) hrows[index] = tuple[1] hcols[index] = tuple[2] end - return nnzh, hrows, hcols + return hrows, hcols end """ @@ -337,8 +340,9 @@ function parser_MOI(moimodel, nvar) lincon = LinearConstraints(coo, nnzj) nnzj, jrows, jcols = jacobian_quad(qcons) - nnzh, hrows, hcols = hessian_quad(qcons) - quadcon = QuadraticConstraints(qcons, nquad, nnzj, jrows, jcols, nnzh, hrows, hcols) + set = hessian_quad(qcons) + nnzh = length(set) + quadcon = QuadraticConstraints(qcons, nquad, nnzj, jrows, jcols, nnzh, set) return nlin, lincon, lin_lcon, lin_ucon, quadcon, quad_lcon, quad_ucon end From 836a7b955ba74d88723e79ad3a1149a8dfdffdaf Mon Sep 17 00:00:00 2001 From: tmigot Date: Wed, 11 May 2022 05:01:59 +0200 Subject: [PATCH 09/16] add test problem and fix --- src/moi_nlp_model.jl | 21 +++++++++--------- test/test_moi_nlp_model.jl | 44 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 10 deletions(-) diff --git a/src/moi_nlp_model.jl b/src/moi_nlp_model.jl index e62f052..fa9b782 100644 --- a/src/moi_nlp_model.jl +++ b/src/moi_nlp_model.jl @@ -143,10 +143,10 @@ function NLPModels.jac_nln_structure!( rows[1:quad_nnzj] .= jrows cols[1:quad_nnzj] .= jcols jac_struct = MOI.jacobian_structure(nlp.eval) - for index = (quad_nnzj + 1):(nlp.meta.nln_nnzj) + for index = 1:(nlp.meta.nln_nnzj - quad_nnzj) row, col = jac_struct[index] - rows[index] = row + nlp.quadcon.nquad - cols[index] = col + rows[quad_nnzj + index] = row + nlp.quadcon.nquad + cols[quad_nnzj + index] = col end return rows, cols end @@ -157,19 +157,19 @@ function NLPModels.jac_lin_coord!(nlp::MathOptNLPModel, x::AbstractVector, vals: return vals end -function NLPModels.jac_nln_coord!(nlp::MathOptNLPModel, x::AbstractVector, vals::AbstractVector{T}) where {T} +function NLPModels.jac_nln_coord!(nlp::MathOptNLPModel, x::AbstractVector, vals::AbstractVector) increment!(nlp, :neval_jac_nln) quad_nnzj = nlp.quadcon.nnzj + vals .= 0.0 k = 0 for i = 1:(nlp.quadcon.nquad) qcon = nlp.quadcon[i] - vec = qcon.vec - for j=1:length(vec) - vals[k + j] = qcon.b[j] + for j=1:length(qcon.vec) + vals[k + j] = qcon.b[qcon.vec[j]] end nnzj = length(qcon.hessian.vals) for i=1:nnzj - vals[k + qcon.hessian.rows[i]] += qcon.hessian.vals[i] * x[qcon.hessian.cols[i]] + vals[k + i] += qcon.hessian.vals[i] * x[qcon.hessian.cols[i]] end k += nnzj end @@ -388,6 +388,7 @@ function NLPModels.hess_coord!( vals[(nlp.obj.nnzh + 1):(nlp.meta.nnzh)] .= 0.0 end if nlp.obj.type == "NONLINEAR" + vals .= 0.0 MOI.eval_hessian_lagrangian(nlp.eval, vals, x, obj_weight, zeros(nlp.meta.nnln)) end @@ -407,6 +408,8 @@ function NLPModels.hprod!( hv .= 0.0 end if (nlp.obj.type == "NONLINEAR") || (nlp.meta.nnln > 0) + ind_nln = (nlp.meta.nlin + nlp.quadcon.nquad + 1):(nlp.meta.ncon) + MOI.eval_hessian_lagrangian_product(nlp.eval, hv, x, v, obj_weight, view(y, ind_nln)) for i = 1:(nlp.quadcon.nquad) qcon = nlp.quadcon[i] for (index,tuple) in enumerate(nlp.quadcon.set) @@ -414,8 +417,6 @@ function NLPModels.hprod!( end hv .*= y[nlp.meta.nlin + i] end - ind_nln = (nlp.meta.nlin + nlp.quadcon.nquad + 1):(nlp.meta.ncon) - MOI.eval_hessian_lagrangian_product(nlp.eval, hv, x, v, obj_weight, view(y, ind_nln)) end if nlp.obj.type == "QUADRATIC" nlp.meta.nnln == 0 && (hv .= 0.0) diff --git a/test/test_moi_nlp_model.jl b/test/test_moi_nlp_model.jl index 4f236ba..cd4bb20 100644 --- a/test/test_moi_nlp_model.jl +++ b/test/test_moi_nlp_model.jl @@ -23,3 +23,47 @@ for prob in Symbol.(lowercase.(nlp_problems ∪ ["nohesspb", "hs61", "hs100"])) @printf("%-15s %4d %4d %10.4e %10.4e %10s\n", prob, n, m, fx, ngx, ncx) end println() + +function hs219(args...; kwargs...) + nlp = Model() + x0 = [10, 10, 10, 10] + @variable(nlp, x[i = 1:4], start = x0[i]) + + @constraint(nlp, x[1]^2 - x[2] - x[4]^2 == 0) + @NLconstraint(nlp, x[2] - x[1]^3 - x[3]^2 == 0) + + @NLobjective( + nlp, + Min, + -x[1] + ) + + return nlp +end + +@testset "Testing quadratic constraints with JuMP" begin + g(x) = [-1., 0., 0., 0.] + Hess(x) = zeros(4, 4) + function Hess(x, y) + H = zeros(4, 4) + H[1, 1] = 2 * y[1] - y[2] * 6 * x[1] + H[3, 3] = - 2 * y[2] + H[4, 4] = - 2 * y[1] + return H + end + J(x) = [ + 2x[1] -1 0 -2x[4]; + -3x[1]^2 1 -2x[3] 0 + ] + + jump = hs219() + nlp = MathOptNLPModel(jump) + x1 = nlp.meta.x0 + y1 = ones(nlp.meta.ncon) + v1 = 2 * ones(nlp.meta.nvar) + @test jac(nlp, x1) ≈ J(x1) + @test hess(nlp, x1) ≈ Hess(x1) + @test hess(nlp, x1, y1) ≈ Hess(x1, y1) + @test hprod(nlp, x1, x1) ≈ Hess(x1) * x1 + @test hprod(nlp, x1, y1, v1) ≈ Hess(x1, y1) * v1 +end From 494e3a06acedea776e2446d7c1afc2b6cf362cae Mon Sep 17 00:00:00 2001 From: tmigot Date: Wed, 11 May 2022 15:39:57 +0200 Subject: [PATCH 10/16] only evaluate the NLPEval when there is nonlinear cons --- src/moi_nlp_model.jl | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/moi_nlp_model.jl b/src/moi_nlp_model.jl index fa9b782..a292cd8 100644 --- a/src/moi_nlp_model.jl +++ b/src/moi_nlp_model.jl @@ -120,7 +120,9 @@ function NLPModels.cons_nln!(nlp::MathOptNLPModel, x::AbstractVector, c::Abstrac qcon = nlp.quadcon[i] c[i] = 0.5 * coo_sym_dot(qcon.hessian.rows, qcon.hessian.cols, qcon.hessian.vals, x, x) + dot(qcon.b, x) end - MOI.eval_constraint(nlp.eval, view(c, (nlp.quadcon.nquad + 1):(nlp.meta.nnln)), x) + if nlp.meta.nnln > nlp.quadcon.nquad + MOI.eval_constraint(nlp.eval, view(c, (nlp.quadcon.nquad + 1):(nlp.meta.nnln)), x) + end return c end @@ -142,11 +144,13 @@ function NLPModels.jac_nln_structure!( quad_nnzj, jrows, jcols = nlp.quadcon.nnzj, nlp.quadcon.jrows, nlp.quadcon.jcols rows[1:quad_nnzj] .= jrows cols[1:quad_nnzj] .= jcols - jac_struct = MOI.jacobian_structure(nlp.eval) - for index = 1:(nlp.meta.nln_nnzj - quad_nnzj) - row, col = jac_struct[index] - rows[quad_nnzj + index] = row + nlp.quadcon.nquad - cols[quad_nnzj + index] = col + if nlp.meta.nnln > nlp.quadcon.nquad + jac_struct = MOI.jacobian_structure(nlp.eval) + for index = 1:(nlp.meta.nln_nnzj - quad_nnzj) + row, col = jac_struct[index] + rows[quad_nnzj + index] = row + nlp.quadcon.nquad + cols[quad_nnzj + index] = col + end end return rows, cols end @@ -173,7 +177,9 @@ function NLPModels.jac_nln_coord!(nlp::MathOptNLPModel, x::AbstractVector, vals: end k += nnzj end - MOI.eval_constraint_jacobian(nlp.eval, view(vals, (quad_nnzj + 1):(nlp.meta.nln_nnzj)), x) + if nlp.meta.nnln > nlp.quadcon.nquad + MOI.eval_constraint_jacobian(nlp.eval, view(vals, (quad_nnzj + 1):(nlp.meta.nln_nnzj)), x) + end return vals end From 01863f70050ebdb297a4f24dc331b2a25560eaf6 Mon Sep 17 00:00:00 2001 From: Alexis Montoison Date: Thu, 12 May 2022 19:13:47 -0400 Subject: [PATCH 11/16] =?UTF-8?q?[MathOptNLPModel]=C2=A0Fix=20hprod!?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/moi_nlp_model.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/moi_nlp_model.jl b/src/moi_nlp_model.jl index a292cd8..ecc58ec 100644 --- a/src/moi_nlp_model.jl +++ b/src/moi_nlp_model.jl @@ -418,10 +418,10 @@ function NLPModels.hprod!( MOI.eval_hessian_lagrangian_product(nlp.eval, hv, x, v, obj_weight, view(y, ind_nln)) for i = 1:(nlp.quadcon.nquad) qcon = nlp.quadcon[i] - for (index,tuple) in enumerate(nlp.quadcon.set) - hv[tuple[1]] += qcon.hessian.vals[index] * v[tuple[2]] + for k = 1:length(qcon.hessian.vals) + hv[qcon.hessian.rows[k]] += qcon.hessian.vals[k] * v[qcon.hessian.cols[k] end - hv .*= y[nlp.meta.nlin + i] + hv[i] *= obj_weight * y[nlp.meta.nlin + i] end end if nlp.obj.type == "QUADRATIC" From 86ccf4438e0c1307694df4a61fa68e36fe732c38 Mon Sep 17 00:00:00 2001 From: Alexis Montoison Date: Thu, 12 May 2022 19:32:25 -0400 Subject: [PATCH 12/16] =?UTF-8?q?[MathOptNLPModel]=C2=A0Fix=20cases=20with?= =?UTF-8?q?=20nlp.eval=20=3D=3D=20nothing?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/moi_nlp_model.jl | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/src/moi_nlp_model.jl b/src/moi_nlp_model.jl index ecc58ec..40cfc02 100644 --- a/src/moi_nlp_model.jl +++ b/src/moi_nlp_model.jl @@ -333,16 +333,19 @@ function NLPModels.hess_structure!( cols[index] = nlp.obj.hessian.cols[index] end end - if (nlp.obj.type == "NONLINEAR") || (nlp.meta.nnln > 0) + if nlp.quadcon.nquad > 0 quad_nnzh = nlp.quadcon.nnzh hrows, hcols = hessian_structure(nlp.quadcon.set) rows[(1 + nlp.obj.nnzh):(nlp.obj.nnzh + quad_nnzh)] .= hrows cols[(1 + nlp.obj.nnzh):(nlp.obj.nnzh + quad_nnzh)] .= hcols - hesslag_struct = MOI.hessian_lagrangian_structure(nlp.eval) - for index = (nlp.obj.nnzh + quad_nnzh + 1):(nlp.meta.nnzh) - shift_index = index - nlp.obj.nnzh - quad_nnzh - rows[index] = hesslag_struct[shift_index][1] - cols[index] = hesslag_struct[shift_index][2] + end + if (nlp.obj.type == "NONLINEAR") || (nlp.meta.nnln > nlp.quadcon.nquad) + hesslag_struct = MOI.hessian_lagrangian_structure(nlp.eval) + for index = (nlp.obj.nnzh + quad_nnzh + 1):(nlp.meta.nnzh) + shift_index = index - nlp.obj.nnzh - quad_nnzh + rows[index] = hesslag_struct[shift_index][1] + cols[index] = hesslag_struct[shift_index][2] + end end end return rows, cols @@ -359,7 +362,7 @@ function NLPModels.hess_coord!( if nlp.obj.type == "QUADRATIC" vals[1:(nlp.obj.nnzh)] .= obj_weight .* nlp.obj.hessian.vals end - if (nlp.obj.type == "NONLINEAR") || (nlp.meta.nnln > 0) + if nlp.quadcon.nquad > 0 quad_nnzh = nlp.quadcon.nnzh k = 0 for i = 1:(nlp.quadcon.nquad) @@ -368,12 +371,14 @@ function NLPModels.hess_coord!( vals[(k + 1):(k + nnzh)] .= qcon.hessian.vals .* y[nlp.meta.nlin + i] k += nnzh end + end + if (nlp.obj.type == "NONLINEAR") || (nlp.meta.nnln > nlp.quadcon.nquad) MOI.eval_hessian_lagrangian( nlp.eval, view(vals, (nlp.obj.nnzh + quad_nnzh + 1):(nlp.meta.nnzh)), x, obj_weight, - view(y, (nlp.meta.nlin + nlp.quadcon.nquad + 1):(nlp.meta.ncon)), + view(y, (nlp.meta.nlin + nlp.quadcon.nquad + 1):(nlp.meta.ncon)) ) end return vals @@ -413,16 +418,17 @@ function NLPModels.hprod!( if (nlp.obj.type == "LINEAR") && (nlp.meta.nnln == 0) hv .= 0.0 end - if (nlp.obj.type == "NONLINEAR") || (nlp.meta.nnln > 0) - ind_nln = (nlp.meta.nlin + nlp.quadcon.nquad + 1):(nlp.meta.ncon) - MOI.eval_hessian_lagrangian_product(nlp.eval, hv, x, v, obj_weight, view(y, ind_nln)) + if nlp.quadcon.nquad > 0 for i = 1:(nlp.quadcon.nquad) - qcon = nlp.quadcon[i] - for k = 1:length(qcon.hessian.vals) - hv[qcon.hessian.rows[k]] += qcon.hessian.vals[k] * v[qcon.hessian.cols[k] - end - hv[i] *= obj_weight * y[nlp.meta.nlin + i] + qcon = nlp.quadcon[i] + for k = 1:length(qcon.hessian.vals) + hv[qcon.hessian.rows[k]] += qcon.hessian.vals[k] * v[qcon.hessian.cols[k]] end + hv[i] *= obj_weight * y[nlp.meta.nlin + i] + end + if (nlp.obj.type == "NONLINEAR") || (nlp.meta.nnln > nlp.quadcon.nquad) + ind_nln = (nlp.meta.nlin + nlp.quadcon.nquad + 1):(nlp.meta.ncon) + MOI.eval_hessian_lagrangian_product(nlp.eval, hv, x, v, obj_weight, view(y, ind_nln)) end if nlp.obj.type == "QUADRATIC" nlp.meta.nnln == 0 && (hv .= 0.0) From ef957701624e926610ce9aacdaf35bec96cd49a8 Mon Sep 17 00:00:00 2001 From: Alexis <35051714+amontoison@users.noreply.github.com> Date: Thu, 12 May 2022 19:36:11 -0400 Subject: [PATCH 13/16] Update test/nlp_problems/hs100.jl --- test/nlp_problems/hs100.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/nlp_problems/hs100.jl b/test/nlp_problems/hs100.jl index 2dca312..b4af448 100644 --- a/test/nlp_problems/hs100.jl +++ b/test/nlp_problems/hs100.jl @@ -21,4 +21,4 @@ function hs100(args...; kwargs...) ) return nlp -end \ No newline at end of file +end From c64ac7421aa4a598fa7ae6e89e40348069d8ab71 Mon Sep 17 00:00:00 2001 From: Alexis Montoison Date: Thu, 12 May 2022 19:41:30 -0400 Subject: [PATCH 14/16] Remove hessian_structure function --- src/utils.jl | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/src/utils.jl b/src/utils.jl index bfa0842..12913be 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -166,17 +166,6 @@ function hessian_quad(qcons) return set end -function hessian_structure(set) - nnzh = length(set) - hrows = zeros(Int, nnzh) - hcols = zeros(Int, nnzh) - for (index,tuple) in enumerate(set) - hrows[index] = tuple[1] - hcols[index] = tuple[2] - end - return hrows, hcols -end - """ parser_SAF(fun, set, linrows, lincols, linvals, nlin, lin_lcon, lin_ucon) From 4b24cab04d99e5b99b95040ffe8dc03f5626df0a Mon Sep 17 00:00:00 2001 From: Alexis Montoison Date: Thu, 12 May 2022 19:42:04 -0400 Subject: [PATCH 15/16] Optimize hessian_structure function for quadratic constraints --- src/moi_nlp_model.jl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/moi_nlp_model.jl b/src/moi_nlp_model.jl index 40cfc02..eea3e57 100644 --- a/src/moi_nlp_model.jl +++ b/src/moi_nlp_model.jl @@ -334,10 +334,10 @@ function NLPModels.hess_structure!( end end if nlp.quadcon.nquad > 0 - quad_nnzh = nlp.quadcon.nnzh - hrows, hcols = hessian_structure(nlp.quadcon.set) - rows[(1 + nlp.obj.nnzh):(nlp.obj.nnzh + quad_nnzh)] .= hrows - cols[(1 + nlp.obj.nnzh):(nlp.obj.nnzh + quad_nnzh)] .= hcols + for (index, tuple) in enumerate(nlp.quadcon.set) + rows[nlp.obj.nnzh + index] = tuple[1] + cols[nlp.obj.nnzh + index] = tuple[2] + end end if (nlp.obj.type == "NONLINEAR") || (nlp.meta.nnln > nlp.quadcon.nquad) hesslag_struct = MOI.hessian_lagrangian_structure(nlp.eval) From e141a9c0b4df1828ba00147e81bc09c93c1a18b6 Mon Sep 17 00:00:00 2001 From: Alexis Montoison Date: Thu, 12 May 2022 20:00:15 -0400 Subject: [PATCH 16/16] [MathOptNLPModel] hotfix --- src/moi_nlp_model.jl | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/src/moi_nlp_model.jl b/src/moi_nlp_model.jl index eea3e57..682f5ad 100644 --- a/src/moi_nlp_model.jl +++ b/src/moi_nlp_model.jl @@ -163,7 +163,6 @@ end function NLPModels.jac_nln_coord!(nlp::MathOptNLPModel, x::AbstractVector, vals::AbstractVector) increment!(nlp, :neval_jac_nln) - quad_nnzj = nlp.quadcon.nnzj vals .= 0.0 k = 0 for i = 1:(nlp.quadcon.nquad) @@ -178,7 +177,7 @@ function NLPModels.jac_nln_coord!(nlp::MathOptNLPModel, x::AbstractVector, vals: k += nnzj end if nlp.meta.nnln > nlp.quadcon.nquad - MOI.eval_constraint_jacobian(nlp.eval, view(vals, (quad_nnzj + 1):(nlp.meta.nln_nnzj)), x) + MOI.eval_constraint_jacobian(nlp.eval, view(vals, (nlp.quadcon.nnzj + 1):(nlp.meta.nln_nnzj)), x) end return vals end @@ -340,12 +339,11 @@ function NLPModels.hess_structure!( end end if (nlp.obj.type == "NONLINEAR") || (nlp.meta.nnln > nlp.quadcon.nquad) - hesslag_struct = MOI.hessian_lagrangian_structure(nlp.eval) - for index = (nlp.obj.nnzh + quad_nnzh + 1):(nlp.meta.nnzh) - shift_index = index - nlp.obj.nnzh - quad_nnzh - rows[index] = hesslag_struct[shift_index][1] - cols[index] = hesslag_struct[shift_index][2] - end + hesslag_struct = MOI.hessian_lagrangian_structure(nlp.eval) + for index = (nlp.obj.nnzh + nlp.quadcon.nnzh + 1):(nlp.meta.nnzh) + shift_index = index - nlp.obj.nnzh - nlp.quadcon.nnzh + rows[index] = hesslag_struct[shift_index][1] + cols[index] = hesslag_struct[shift_index][2] end end return rows, cols @@ -375,7 +373,7 @@ function NLPModels.hess_coord!( if (nlp.obj.type == "NONLINEAR") || (nlp.meta.nnln > nlp.quadcon.nquad) MOI.eval_hessian_lagrangian( nlp.eval, - view(vals, (nlp.obj.nnzh + quad_nnzh + 1):(nlp.meta.nnzh)), + view(vals, (nlp.obj.nnzh + nlp.quadcon.nnzh + 1):(nlp.meta.nnzh)), x, obj_weight, view(y, (nlp.meta.nlin + nlp.quadcon.nquad + 1):(nlp.meta.ncon)) @@ -420,11 +418,12 @@ function NLPModels.hprod!( end if nlp.quadcon.nquad > 0 for i = 1:(nlp.quadcon.nquad) - qcon = nlp.quadcon[i] - for k = 1:length(qcon.hessian.vals) - hv[qcon.hessian.rows[k]] += qcon.hessian.vals[k] * v[qcon.hessian.cols[k]] + qcon = nlp.quadcon[i] + for k = 1:length(qcon.hessian.vals) + hv[qcon.hessian.rows[k]] += qcon.hessian.vals[k] * v[qcon.hessian.cols[k]] + end + hv[i] *= y[nlp.meta.nlin + i] end - hv[i] *= obj_weight * y[nlp.meta.nlin + i] end if (nlp.obj.type == "NONLINEAR") || (nlp.meta.nnln > nlp.quadcon.nquad) ind_nln = (nlp.meta.nlin + nlp.quadcon.nquad + 1):(nlp.meta.ncon)