Skip to content

Commit

Permalink
Add interface for custom scaling (#152)
Browse files Browse the repository at this point in the history
  • Loading branch information
frapac committed Mar 25, 2022
1 parent 653270a commit 9c07280
Show file tree
Hide file tree
Showing 5 changed files with 82 additions and 21 deletions.
2 changes: 1 addition & 1 deletion lib/MadNLPGPU/src/MadNLPGPU.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import MadNLP

import MadNLP:
@kwdef, Logger, @debug, @warn, @error,
AbstractOptions, AbstractLinearSolver, set_options!, MadNLPLapackCPU,
AbstractOptions, AbstractLinearSolver, AbstractNLPModel, set_options!, MadNLPLapackCPU,
SymbolicException,FactorizationException,SolveException,InertiaException,
introduce, factorize!, solve!, improve!, is_inertia, inertia, tril_to_full!

Expand Down
9 changes: 7 additions & 2 deletions lib/MadNLPGPU/src/kernels.jl
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,16 @@ end
MadNLP.is_valid(src::CuArray) = true

# Constraint scaling
function MadNLP.set_con_scale!(con_scale::AbstractVector, jac::CuMatrix, nlp_scaling_max_gradient)
function MadNLP.scale_constraints!(
nlp::AbstractNLPModel,
con_scale::AbstractVector,
jac::CuMatrix;
max_gradient=1e-8,
)
# Compute reduction on the GPU with built-in CUDA.jl function
d_con_scale = maximum(abs, jac, dims=2)
copyto!(con_scale, d_con_scale)
con_scale .= min.(1.0, nlp_scaling_max_gradient ./ con_scale)
con_scale .= min.(1.0, max_gradient ./ con_scale)
end

@kernel function _treat_fixed_variable_kernell!(dest, ind_fixed)
Expand Down
1 change: 1 addition & 0 deletions src/MadNLP.jl
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ include("enums.jl")
include("utils.jl")
include("options.jl")
include("matrixtools.jl")
include("scaling.jl")
include(joinpath("LinearSolvers","linearsolvers.jl"))
include(joinpath("KKT", "KKTsystem.jl"))
include("interiorpointsolver.jl")
Expand Down
20 changes: 2 additions & 18 deletions src/interiorpointsolver.jl
Original file line number Diff line number Diff line change
Expand Up @@ -537,7 +537,7 @@ function initialize!(ips::AbstractInteriorPointSolver)
compress_jacobian!(ips.kkt)
if (ips.m > 0) && ips.opt.nlp_scaling
jac = get_raw_jacobian(ips.kkt)
set_con_scale!(ips.con_scale, jac, ips.opt.nlp_scaling_max_gradient)
scale_constraints!(ips.nlp, ips.con_scale, jac; max_gradient=ips.opt.nlp_scaling_max_gradient)
set_jacobian_scaling!(ips.kkt, ips.con_scale)
ips.l./=ips.con_scale
end
Expand All @@ -547,7 +547,7 @@ function initialize!(ips::AbstractInteriorPointSolver)
eval_grad_f_wrapper!(ips, ips.f,ips.x)
@trace(ips.logger,"Computing objective scaling.")
if ips.opt.nlp_scaling
ips.obj_scale[] = min(1,ips.opt.nlp_scaling_max_gradient/norm(ips.f,Inf))
ips.obj_scale[] = scale_objective(ips.nlp, ips.f; max_gradient=ips.opt.nlp_scaling_max_gradient)
ips.f.*=ips.obj_scale[]
end

Expand Down Expand Up @@ -1505,22 +1505,6 @@ function initialize_variables!(x,xl,xu,bound_push,bound_fac)
end
end

function set_con_scale!(con_scale::AbstractVector, jac::SparseMatrixCOO, nlp_scaling_max_gradient)
@simd for i in 1:nnz(jac)
row = @inbounds jac.I[i]
@inbounds con_scale[row] = max(con_scale[row], abs(jac.V[i]))
end
con_scale .= min.(1.0, nlp_scaling_max_gradient ./ con_scale)
end
function set_con_scale!(con_scale::AbstractVector, jac::Matrix, nlp_scaling_max_gradient)
for row in 1:size(jac, 1)
for col in 1:size(jac, 2)
@inbounds con_scale[row] = max(con_scale[row], abs(jac[row, col]))
end
end
con_scale .= min.(1.0, nlp_scaling_max_gradient ./ con_scale)
end

function adjust_boundary!(x_lr,xl_r,x_ur,xu_r,mu)
adjusted = 0
c1 = eps(Float64)*mu
Expand Down
71 changes: 71 additions & 0 deletions src/scaling.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@

function _set_scaling!(con_scale::AbstractVector, jac::SparseMatrixCOO)
@simd for i in 1:nnz(jac)
row = @inbounds jac.I[i]
@inbounds con_scale[row] = max(con_scale[row], abs(jac.V[i]))
end
end
function _set_scaling!(con_scale::AbstractVector, jac::Matrix)
for row in 1:size(jac, 1)
for col in 1:size(jac, 2)
@inbounds con_scale[row] = max(con_scale[row], abs(jac[row, col]))
end
end
end

"""
scale_constraints!(
nlp::AbstractNLPModel,
con_scale::AbstractVector,
jac::AbstractMatrix;
max_gradient=1e-8,
)
Compute the scaling of the constraints associated
to the nonlinear model `nlp`. By default, Ipopt's scaling
is applied. The user can write its own function to scale
appropriately any custom `AbstractNLPModel`.
### Notes
This function assumes that the Jacobian `jac` has been evaluated
before calling this function.
"""
function scale_constraints!(
nlp::AbstractNLPModel,
con_scale::AbstractVector,
jac::AbstractMatrix;
max_gradient=1e-8,
)
fill!(con_scale, 0.0)
_set_scaling!(con_scale, jac)
con_scale .= min.(1.0, max_gradient ./ con_scale)
end

"""
scale_objective(
nlp::AbstractNLPModel,
grad::AbstractVector;
max_gradient=1e-8,
)
Compute the scaling of the objective associated to the
nonlinear model `nlp`. By default, Ipopt's scaling
is applied. The user can write its own function to scale
appropriately the objective of any custom `AbstractNLPModel`.
### Notes
This function assumes that the gradient `gradient` has been evaluated
before calling this function.
"""
function scale_objective(
nlp::AbstractNLPModel,
grad::AbstractVector;
max_gradient=1e-8,
)
return min(1, max_gradient / norm(grad, Inf))
end

0 comments on commit 9c07280

Please sign in to comment.