Skip to content

Commit

Permalink
Add AbstractKKTSystem structure (#58)
Browse files Browse the repository at this point in the history
* Add AbstractKKTSystem structure

* implement SparseReducedKKTSystem and SparseAugmentedKKTSystem
* refactor Solver

* Avoid unecessary allocations by forcing specialization

* Deactive logs in benchmark scripts (#59)

* deactive logs in benchmark scripts

* benchmark: add verbose option to main script

* barrier iterations (#61)

* benchmark improvement (#60)

* allocation issue fixed

* added option buffered for NLPModels.jl

* added option buffered for NLPModels.jl

* added option buffered for NLPModels.jl

* ma27 fix

Co-authored-by: Sungho Shin <sshin@anl.gov>
  • Loading branch information
frapac and sshin23 committed Aug 29, 2021
1 parent 1b95263 commit 0a471d4
Show file tree
Hide file tree
Showing 13 changed files with 899 additions and 573 deletions.
2 changes: 1 addition & 1 deletion lib/MadNLPHSL/src/MadNLPHSL.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import MadNLP: @kwdef, Logger, @debug, @warn, @error,
AbstractOptions, AbstractLinearSolver, set_options!, SparseMatrixCSC, SubVector, StrideOneVector,
SymbolicException,FactorizationException,SolveException,InertiaException,
introduce, factorize!, solve!, improve!, is_inertia, inertia, findIJ, nnz,
get_tril_to_full, transform!
get_tril_to_full, transfer!

include(joinpath("..","deps","deps.jl"))

Expand Down
2 changes: 1 addition & 1 deletion lib/MadNLPHSL/src/ma27.jl
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ mutable struct Solver <: AbstractLinearSolver
info::Vector{Int32}

a::Vector{Float64}
a_view::SubVector{Float64}
a_view::StrideOneVector{Float64}
la::Int32
ikeep::Vector{Int32}

Expand Down
2 changes: 1 addition & 1 deletion lib/MadNLPHSL/src/ma77.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ module MadNLPMa77
import ..MadNLPHSL:
@kwdef, Logger, @debug, @warn, @error, libhsl,
SparseMatrixCSC, SparseMatrixCSC, SubVector, StrideOneVector,
get_tril_to_full, transform!,
get_tril_to_full, transfer!,
AbstractOptions, AbstractLinearSolver, set_options!,
SymbolicException,FactorizationException,SolveException,InertiaException,
introduce, factorize!, solve!, improve!, is_inertia, inertia
Expand Down
12 changes: 6 additions & 6 deletions src/Interfaces/MOI_interface.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1131,13 +1131,13 @@ function NonlinearProgram(model::Optimizer)
set_g!(model,l,gl,gu)

obj_scale = get_obj_scale(model.sense)
obj(x::AbstractArray{Float64,1}) = obj_scale*eval_objective(model,x)
obj_grad!(f::AbstractArray{Float64,1},x::AbstractArray{Float64,1}) =
obj(x::StrideOneVector{Float64}) = obj_scale*eval_objective(model,x)
obj_grad!(f::StrideOneVector{Float64},x::StrideOneVector{Float64}) =
(eval_objective_gradient(model,f,x); obj_scale!=1. && (f.*=obj_scale))
con!(c::Array{Float64,1},x::AbstractArray{Float64,1}) = eval_constraint(model,c,x)
con_jac!(jac::AbstractArray{Float64,1},
x::AbstractArray{Float64,1})=eval_constraint_jacobian(model,jac,x)
lag_hess!(hess::AbstractArray{Float64,1},x::AbstractArray{Float64,1},l::AbstractArray{Float64,1},
con!(c::StrideOneVector{Float64},x::StrideOneVector{Float64}) = eval_constraint(model,c,x)
con_jac!(jac::StrideOneVector{Float64},
x::StrideOneVector{Float64})=eval_constraint_jacobian(model,jac,x)
lag_hess!(hess::StrideOneVector{Float64},x::StrideOneVector{Float64},l::StrideOneVector{Float64},
sig::Float64) = eval_hessian_lagrangian(model,hess,x,obj_scale*sig,l)
hess_sparsity!(I,J)= hessian_lagrangian_structure(model,I,J)
jac_sparsity!(I,J) = jacobian_structure(model,I,J)
Expand Down
77 changes: 56 additions & 21 deletions src/Interfaces/NLPModels_interface.jl
Original file line number Diff line number Diff line change
@@ -1,43 +1,78 @@
# MadNLP.jl
# Created by Sungho Shin (sungho.shin@wisc.edu)

NonlinearProgram(model::AbstractNLPModel) = NonlinearProgram(
model.meta.nvar,model.meta.ncon,model.meta.nnzh,model.meta.nnzj,
0.,model.meta.x0,zeros(model.meta.ncon),model.meta.y0,
zeros(model.meta.nvar),zeros(model.meta.nvar),
model.meta.lvar,model.meta.uvar,model.meta.lcon,model.meta.ucon,
model.meta.minimize ? (x)->obj(model,x) : (x)->-obj(model,x),
model.meta.minimize ? (f,x)->grad!(model,x,f) : (f,x)->(grad!(model,x,f);f.*=-1.),
(c,x)->cons!(model,x,c),
(jac,x)->jac_coord!(model,x,jac),
model.meta.minimize ? (hess,x,l,sig)->hess_coord!(model,x,l,hess;obj_weight= sig) :
(hess,x,l,sig)->hess_coord!(model,x,l,hess;obj_weight= -sig),
(I,J)->hess_structure!(model,I,J),(I,J)->jac_structure!(model,I,J),INITIAL,Dict{Symbol,Any}())

status_translator = Dict(
const STATUS_TRANSLATOR = Dict(
SOLVE_SUCCEEDED=>:first_order,
SOLVED_TO_ACCEPTABLE_LEVEL=>:acceptable,
INFEASIBLE_PROBLEM_DETECTED=>:infeasible,
USER_REQUESTED_STOP=>:user,
MAXIMUM_ITERATIONS_EXCEEDED=>:max_iter,
MAXIMUM_WALLTIME_EXCEEDED=>:max_time)

function madnlp(model::AbstractNLPModel;kwargs...)

nlp = NonlinearProgram(model)
function NonlinearProgram(model::AbstractNLPModel; buffered=true)
buffered ? _nlp_model_to_nonlinear_program_buffered(model) : _nlp_model_to_nonlinear_program(model)
end

function _nlp_model_to_nonlinear_program(model)
return NonlinearProgram(
model.meta.nvar,model.meta.ncon,model.meta.nnzh,model.meta.nnzj,
0.,model.meta.x0,zeros(model.meta.ncon),model.meta.y0,
zeros(model.meta.nvar),zeros(model.meta.nvar),
model.meta.lvar,model.meta.uvar,model.meta.lcon,model.meta.ucon,
model.meta.minimize ? (x)->obj(model,x) : (x)->-obj(model,x),
model.meta.minimize ? (f,x)->grad!(model,x,f) : (f,x)->(grad!(model,x,f);f.*=-1.),
(c,x)->cons!(model,x,c),
(jac,x)->jac_coord!(model,x,jac),
model.meta.minimize ? (hess,x,l,sig)->hess_coord!(model,x,l,hess;obj_weight= sig) :
(hess,x,l,sig)->hess_coord!(model,x,l,hess;obj_weight= -sig),
(I,J)->hess_structure!(model,I,J),(I,J)->jac_structure!(model,I,J),INITIAL,Dict{Symbol,Any}())
end

function _nlp_model_to_nonlinear_program_buffered(model)
# buffers
xb = Vector{Float64}(undef,model.meta.nvar)
fb = Vector{Float64}(undef,model.meta.nvar)
cb = Vector{Float64}(undef,model.meta.ncon)
lb = cb
jacb= Vector{Float64}(undef,model.meta.nnzj)
hessb = Vector{Float64}(undef,model.meta.nnzh)

return NonlinearProgram(
model.meta.nvar,model.meta.ncon,model.meta.nnzh,model.meta.nnzj,
0.,model.meta.x0,zeros(model.meta.ncon),model.meta.y0,
zeros(model.meta.nvar),zeros(model.meta.nvar),
model.meta.lvar,model.meta.uvar,model.meta.lcon,model.meta.ucon,
model.meta.minimize ?
(x)->obj(model,copyto!(xb,x)) : (x)->-obj(model,copyto!(xb,x)),
model.meta.minimize ?
(f,x)->(grad!(model,copyto!(xb,x),fb);copyto!(f,fb)) :
(f,x)->(grad!(model,copyto!(xb,x),fb);copyto!(f,fb);f.*=-1.),
(c,x)->(cons!(model,copyto!(xb,x),cb);copyto!(c,cb)),
(jac,x)->(jac_coord!(model,copyto!(xb,x),jacb);copyto!(jac,jacb)),
model.meta.minimize ?
(hess,x,l,sig)->(hess_coord!(model,copyto!(xb,x),copyto!(lb,l),hessb;obj_weight= sig);
copyto!(hess,hessb)) :
(hess,x,l,sig)->(hess_coord!(model,copyto!(xb,x),copyto!(lb,l),hessb;obj_weight= -sig);
copyto!(hess,hessb)),
(I,J)->hess_structure!(model,I,J),(I,J)->jac_structure!(model,I,J),INITIAL,Dict{Symbol,Any}())
end


function madnlp(model::AbstractNLPModel;buffered=true, kwargs...)

nlp = NonlinearProgram(model;buffered=buffered)
ips = Solver(nlp;kwargs...)
# seems that some CUTEst model requires this
ips.f.=0
ips.f.=0
ips.c.=0
ips.jac.=0
ips.hess.=0
initialize!(ips.kkt)
ips.zl.=0
ips.zu.=0
# ------------------------------------------
optimize!(ips)

return GenericExecutionStats(
haskey(status_translator,nlp.status) ? status_translator[nlp.status] : :unknown,
haskey(STATUS_TRANSLATOR,nlp.status) ? STATUS_TRANSLATOR[nlp.status] : :unknown,
model,solution=nlp.x,
objective=nlp.obj_val, dual_feas=ips.inf_du, iter=ips.cnt.k,
primal_feas=ips.inf_pr, elapsed_time=ips.cnt.total_time, multipliers=nlp.l,
Expand Down
4 changes: 3 additions & 1 deletion src/MadNLP.jl
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,12 @@ introduce() = "MadNLP version v$(version())"
# Linear solver dependencies
include("enums.jl")
include("utils.jl")
include("options.jl")
include("nonlinearprogram.jl")
include("matrixtools.jl")
include(joinpath("LinearSolvers","linearsolvers.jl"))
include(joinpath("interiorpointsolver.jl"))
include("kktsystem.jl")
include("interiorpointsolver.jl")
include(joinpath("Interfaces","interfaces.jl"))

# Initialize
Expand Down
5 changes: 5 additions & 0 deletions src/enums.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,11 @@
INERTIA_BASED = 2,
INERTIA_FREE = 3)

@enum(KKTLinearSystem::Int,
SPARSE_KKT_SYSTEM = 1,
SPARSE_UNREDUCED_KKT_SYSTEM = 2,
DENSE_KKT_SYSTEM = 3)

@enum(Status::Int,
SOLVE_SUCCEEDED = 1,
SOLVED_TO_ACCEPTABLE_LEVEL = 2,
Expand Down
Loading

0 comments on commit 0a471d4

Please sign in to comment.