From ff501e82f6b3963d349c59d66ced77c48b510ee1 Mon Sep 17 00:00:00 2001 From: odow Date: Sat, 25 Jan 2025 13:18:42 +1300 Subject: [PATCH 1/7] Add assert_is_solved_and_feasible --- .../algorithms/benders_decomposition.jl | 20 ++--- .../cutting_stock_column_generation.jl | 10 +-- docs/src/tutorials/algorithms/parallelism.md | 4 +- .../algorithms/tsp_lazy_constraints.jl | 6 +- .../applications/optimal_power_flow.jl | 8 +- .../tutorials/applications/power_systems.jl | 6 +- .../applications/two_stage_stochastic.jl | 6 +- .../tutorials/conic/arbitrary_precision.jl | 6 +- docs/src/tutorials/conic/dualization.jl | 8 +- docs/src/tutorials/conic/ellipse_approx.jl | 6 +- docs/src/tutorials/conic/ellipse_fitting.jl | 4 +- docs/src/tutorials/conic/experiment_design.jl | 6 +- .../tutorials/conic/logistic_regression.jl | 4 +- docs/src/tutorials/conic/min_ellipse.jl | 2 +- .../tutorials/conic/quantum_discrimination.jl | 4 +- docs/src/tutorials/conic/simple_examples.jl | 20 ++--- docs/src/tutorials/conic/start_values.jl | 2 +- docs/src/tutorials/conic/tips_and_tricks.jl | 28 +++---- .../tutorials/getting_started/debugging.jl | 2 +- .../design_patterns_for_larger_models.jl | 12 +-- .../getting_started_with_JuMP.jl | 2 +- .../getting_started_with_data_and_plotting.jl | 2 +- .../tutorials/getting_started/tolerances.jl | 12 +-- docs/src/tutorials/linear/basis.jl | 4 +- docs/src/tutorials/linear/callbacks.jl | 6 +- docs/src/tutorials/linear/cannery.jl | 2 +- .../linear/constraint_programming.jl | 16 ++-- docs/src/tutorials/linear/diet.jl | 2 +- .../src/tutorials/linear/facility_location.jl | 4 +- docs/src/tutorials/linear/factory_schedule.jl | 2 +- docs/src/tutorials/linear/finance.jl | 4 +- .../tutorials/linear/geographic_clustering.jl | 2 +- docs/src/tutorials/linear/knapsack.jl | 4 +- docs/src/tutorials/linear/lp_sensitivity.jl | 2 +- docs/src/tutorials/linear/mip_duality.jl | 8 +- docs/src/tutorials/linear/multi.jl | 2 +- .../linear/multi_commodity_network.jl | 2 +- .../linear/multi_objective_examples.jl | 6 +- .../linear/multi_objective_knapsack.jl | 2 +- .../tutorials/linear/multiple_solutions.jl | 4 +- docs/src/tutorials/linear/n-queens.jl | 2 +- docs/src/tutorials/linear/network_flows.jl | 6 +- docs/src/tutorials/linear/piecewise_linear.jl | 10 +-- docs/src/tutorials/linear/sudoku.jl | 4 +- docs/src/tutorials/linear/transp.jl | 2 +- docs/src/tutorials/nonlinear/classifiers.jl | 6 +- .../tutorials/nonlinear/complementarity.jl | 10 +-- .../tutorials/nonlinear/nested_problems.jl | 6 +- docs/src/tutorials/nonlinear/operator_ad.jl | 8 +- docs/src/tutorials/nonlinear/portfolio.jl | 2 +- .../tutorials/nonlinear/querying_hessians.jl | 2 +- .../src/tutorials/nonlinear/rocket_control.jl | 2 +- .../tutorials/nonlinear/simple_examples.jl | 10 +-- .../space_shuttle_reentry_trajectory.jl | 2 +- .../tutorials/nonlinear/tips_and_tricks.jl | 4 +- .../nonlinear/user_defined_hessians.jl | 2 +- .../transitioning_from_matlab.jl | 9 +-- src/optimizer_interface.jl | 74 +++++++++++++++++-- 58 files changed, 236 insertions(+), 177 deletions(-) diff --git a/docs/src/tutorials/algorithms/benders_decomposition.jl b/docs/src/tutorials/algorithms/benders_decomposition.jl index bca7412ccf7..bec989f7537 100644 --- a/docs/src/tutorials/algorithms/benders_decomposition.jl +++ b/docs/src/tutorials/algorithms/benders_decomposition.jl @@ -158,7 +158,7 @@ set_silent(model) @constraint(model, [i = 2:n-1], sum(y[i, :]) == sum(y[:, i])) @objective(model, Min, 0.1 * sum(x) - sum(y[1, :])) optimize!(model) -Test.@test is_solved_and_feasible(model) #src +assert_is_solved_and_feasible(model) #src solution_summary(model) # The optimal objective value is -5.1: @@ -210,7 +210,7 @@ function solve_subproblem(x_bar) @constraint(model, [i = 2:n-1], sum(y[i, :]) == sum(y[:, i])) @objective(model, Min, -sum(y[1, :])) optimize!(model) - @assert is_solved_and_feasible(model; dual = true) + assert_is_solved_and_feasible(model; dual = true) return (obj = objective_value(model), y = value.(y), π = reduced_cost.(x)) end @@ -241,7 +241,7 @@ ABSOLUTE_OPTIMALITY_GAP = 1e-6 println("Iteration Lower Bound Upper Bound Gap") for k in 1:MAXIMUM_ITERATIONS optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) lower_bound = objective_value(model) x_k = value.(x) ret = solve_subproblem(x_k) @@ -259,7 +259,7 @@ end # Finally, we can obtain the optimal solution: optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) x_optimal = value.(x) optimal_ret = solve_subproblem(x_optimal) iterative_solution = optimal_flows(optimal_ret.y) @@ -323,7 +323,7 @@ set_attribute(lazy_model, MOI.LazyConstraintCallback(), my_callback) # Now when we optimize!, our callback is run: optimize!(lazy_model) -@assert is_solved_and_feasible(lazy_model) +assert_is_solved_and_feasible(lazy_model) # For this model, the callback algorithm required more solves of the subproblem: @@ -378,7 +378,7 @@ subproblem function solve_subproblem(model, x) fix.(model[:x_copy], x) optimize!(model) - @assert is_solved_and_feasible(model; dual = true) + assert_is_solved_and_feasible(model; dual = true) return ( obj = objective_value(model), y = value.(model[:y]), @@ -391,7 +391,7 @@ end println("Iteration Lower Bound Upper Bound Gap") for k in 1:MAXIMUM_ITERATIONS optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) lower_bound = objective_value(model) x_k = value.(x) ret = solve_subproblem(subproblem, x_k) @@ -409,7 +409,7 @@ end # Finally, we can obtain the optimal solution: optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) x_optimal = value.(x) optimal_ret = solve_subproblem(subproblem, x_optimal) inplace_solution = optimal_flows(optimal_ret.y) @@ -486,7 +486,7 @@ end println("Iteration Lower Bound Upper Bound Gap") for k in 1:MAXIMUM_ITERATIONS optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) lower_bound = objective_value(model) x_k = value.(x) ret = solve_subproblem_with_feasibility(subproblem, x_k) @@ -510,7 +510,7 @@ end # Finally, we can obtain the optimal solution: optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) x_optimal = value.(x) optimal_ret = solve_subproblem(subproblem, x_optimal) feasible_inplace_solution = optimal_flows(optimal_ret.y) diff --git a/docs/src/tutorials/algorithms/cutting_stock_column_generation.jl b/docs/src/tutorials/algorithms/cutting_stock_column_generation.jl index 9e7809772b4..ea1df518bfc 100644 --- a/docs/src/tutorials/algorithms/cutting_stock_column_generation.jl +++ b/docs/src/tutorials/algorithms/cutting_stock_column_generation.jl @@ -235,7 +235,7 @@ set_silent(model) @objective(model, Min, sum(x)) @constraint(model, demand[i in 1:I], patterns[i]' * x >= data.pieces[i].d) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution_summary(model) # This solution requires 421 rolls. This solution is sub-optimal because the @@ -253,7 +253,7 @@ solution_summary(model) unset_integer.(x) optimize!(model) -@assert is_solved_and_feasible(model; dual = true) +assert_is_solved_and_feasible(model; dual = true) π_13 = dual(demand[13]) # Using the economic interpretation of the dual variable, we can say that a one @@ -284,7 +284,7 @@ function solve_pricing(data::Data, π::Vector{Float64}) @constraint(model, sum(data.pieces[i].w * y[i] for i in 1:I) <= data.W) @objective(model, Max, sum(π[i] * y[i] for i in 1:I)) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) number_of_rolls_saved = objective_value(model) if number_of_rolls_saved > 1 + 1e-8 ## Benefit of pattern is more than the cost of a new roll plus some @@ -315,7 +315,7 @@ solve_pricing(data, zeros(I)) while true ## Solve the linear relaxation optimize!(model) - @assert is_solved_and_feasible(model; dual = true) + assert_is_solved_and_feasible(model; dual = true) ## Obtain a new dual vector π = dual.(demand) ## Solve the pricing problem @@ -366,7 +366,7 @@ sum(ceil.(Int, solution.rolls)) set_integer.(x) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution = DataFrames.DataFrame([ (pattern = p, rolls = value(x_p)) for (p, x_p) in enumerate(x) ]) diff --git a/docs/src/tutorials/algorithms/parallelism.md b/docs/src/tutorials/algorithms/parallelism.md index d38f0f93539..5a1193bc783 100644 --- a/docs/src/tutorials/algorithms/parallelism.md +++ b/docs/src/tutorials/algorithms/parallelism.md @@ -141,7 +141,7 @@ julia> function a_good_way_to_use_threading() @variable(model, x >= i) @objective(model, Min, x) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) Threads.lock(my_lock) do push!(solutions, i => objective_value(model)) end @@ -357,7 +357,7 @@ julia> Distributed.@everywhere begin @objective(model, Min, x) set_lower_bound(x, i) optimize!(model) - @assert is_solved_and_feasible(sudoku) + assert_is_solved_and_feasible(sudoku) return objective_value(model) end end diff --git a/docs/src/tutorials/algorithms/tsp_lazy_constraints.jl b/docs/src/tutorials/algorithms/tsp_lazy_constraints.jl index 342f749fcd1..f5afb9bbae3 100644 --- a/docs/src/tutorials/algorithms/tsp_lazy_constraints.jl +++ b/docs/src/tutorials/algorithms/tsp_lazy_constraints.jl @@ -201,7 +201,7 @@ subtour(x::AbstractMatrix{VariableRef}) = subtour(value.(x)) iterative_model = build_tsp_model(d, n) optimize!(iterative_model) -@assert is_solved_and_feasible(iterative_model) +assert_is_solved_and_feasible(iterative_model) time_iterated = solve_time(iterative_model) cycle = subtour(iterative_model[:x]) while 1 < length(cycle) < n @@ -212,7 +212,7 @@ while 1 < length(cycle) < n sum(iterative_model[:x][i, j] for (i, j) in S) <= length(cycle) - 1, ) optimize!(iterative_model) - @assert is_solved_and_feasible(iterative_model) + assert_is_solved_and_feasible(iterative_model) global time_iterated += solve_time(iterative_model) global cycle = subtour(iterative_model[:x]) end @@ -270,7 +270,7 @@ optimize!(lazy_model) #- -@assert is_solved_and_feasible(lazy_model) +assert_is_solved_and_feasible(lazy_model) objective_value(lazy_model) #- diff --git a/docs/src/tutorials/applications/optimal_power_flow.jl b/docs/src/tutorials/applications/optimal_power_flow.jl index 0284b187062..4985fa42f63 100644 --- a/docs/src/tutorials/applications/optimal_power_flow.jl +++ b/docs/src/tutorials/applications/optimal_power_flow.jl @@ -25,7 +25,7 @@ # This tutorial takes a matrix-oriented approach focused on network nodes # that simplifies the construction of semidefinite programs. # Another approach is to formulate the problem focusing on network lines -# (known as a _branch model_) where it is easier to work with flow +# (known as a _branch model_) where it is easier to work with flow # constraints. A general approach is provided by # [PowerModels.jl](https://lanl-ansi.github.io/PowerModels.jl/stable/), # an open-source framework to a broad range of power flow model formulations @@ -137,7 +137,7 @@ println("Objective value (basic lower bound) : $basic_lower_bound") @constraint(model, sum(P_G) >= sum(P_Demand)) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) better_lower_bound = round(objective_value(model); digits = 2) println("Objective value (better lower bound): $better_lower_bound") @@ -281,7 +281,7 @@ P_G = real(S_G) # We're finally ready to solve our nonlinear AC-OPF problem: optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) Test.@test isapprox(objective_value(model), 3087.84; atol = 1e-2) #src solution_summary(model) @@ -420,7 +420,7 @@ optimize!(model) #- -Test.@test is_solved_and_feasible(model; allow_almost = true) +assert_is_solved_and_feasible(model; allow_almost = true) sdp_relaxation_lower_bound = round(objective_value(model); digits = 2) Test.@test isapprox(sdp_relaxation_lower_bound, 2753.04; rtol = 1e-3) #src println( diff --git a/docs/src/tutorials/applications/power_systems.jl b/docs/src/tutorials/applications/power_systems.jl index 9cd58397271..339136d4af0 100644 --- a/docs/src/tutorials/applications/power_systems.jl +++ b/docs/src/tutorials/applications/power_systems.jl @@ -115,7 +115,7 @@ function solve_economic_dispatch(generators::Vector, wind, scenario) @constraint(model, sum(g[i] for i in 1:N) + w == scenario.demand) ## Solve statement optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) ## return the optimal value of the objective function and its minimizers return ( g = value.(g), @@ -217,7 +217,7 @@ function solve_economic_dispatch_inplace( wind.variable_cost * w, ) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) push!(obj_out, objective_value(model)) push!(w_out, value(w)) push!(g1_out, value(g[1])) @@ -528,7 +528,7 @@ function solve_nonlinear_economic_dispatch( ) @constraint(model, sum(g[i] for i in 1:N) + sqrt(w) == scenario.demand) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return ( g = value.(g), w = value(w), diff --git a/docs/src/tutorials/applications/two_stage_stochastic.jl b/docs/src/tutorials/applications/two_stage_stochastic.jl index ac7a8bdce70..48229ceaa95 100644 --- a/docs/src/tutorials/applications/two_stage_stochastic.jl +++ b/docs/src/tutorials/applications/two_stage_stochastic.jl @@ -86,7 +86,7 @@ set_silent(model) @expression(model, z[ω in Ω], 5y[ω] - 0.1 * (x - y[ω])) @objective(model, Max, -2x + sum(P[ω] * z[ω] for ω in Ω)) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution_summary(model) # The optimal number of pies to make is: @@ -159,7 +159,7 @@ function CVaR(Z::Vector{Float64}, P::Vector{Float64}; γ::Float64) @constraint(model, [i in 1:N], z[i] >= ξ - Z[i]) @objective(model, Max, ξ - 1 / γ * sum(P[i] * z[i] for i in 1:N)) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return objective_value(model) end @@ -218,7 +218,7 @@ set_silent(model) @constraint(model, [ω in Ω], z[ω] >= ξ - Z[ω]) @objective(model, Max, -2x + ξ - 1 / γ * sum(P[ω] * z[ω] for ω in Ω)) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) # When ``\gamma = 0.4``, the optimal number of pies to bake is: diff --git a/docs/src/tutorials/conic/arbitrary_precision.jl b/docs/src/tutorials/conic/arbitrary_precision.jl index 834383d3793..710f305e47e 100644 --- a/docs/src/tutorials/conic/arbitrary_precision.jl +++ b/docs/src/tutorials/conic/arbitrary_precision.jl @@ -78,7 +78,7 @@ print(model) # Let's solve and inspect the solution: optimize!(model) -@assert is_solved_and_feasible(model; dual = true) +assert_is_solved_and_feasible(model; dual = true) solution_summary(model) # The value of each decision variable is a `BigFloat`: @@ -103,7 +103,7 @@ value.(x) .- [3 // 7, 3 // 14] set_attribute(model, "tol_gap_abs", 1e-32) set_attribute(model, "tol_gap_rel", 1e-32) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value.(x) .- [3 // 7, 3 // 14] # ## Rational arithmetic @@ -145,7 +145,7 @@ print(model) # Let's solve and inspect the solution: optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution_summary(model) # The optimal values are given in exact rational arithmetic: diff --git a/docs/src/tutorials/conic/dualization.jl b/docs/src/tutorials/conic/dualization.jl index 798ffcddf9d..0352b66bc56 100644 --- a/docs/src/tutorials/conic/dualization.jl +++ b/docs/src/tutorials/conic/dualization.jl @@ -133,7 +133,7 @@ print(model_dual) set_optimizer(model_primal, SCS.Optimizer) optimize!(model_primal) -@assert is_solved_and_feasible(model_primal; dual = true) +assert_is_solved_and_feasible(model_primal; dual = true) # (There are five rows in the constraint matrix because SCS expects problems in # geometric conic form, and so JuMP has reformulated the `X, PSD` variable @@ -157,7 +157,7 @@ objective_value(model_primal) set_optimizer(model_dual, SCS.Optimizer) optimize!(model_dual) -@assert is_solved_and_feasible(model_dual; dual = true) +assert_is_solved_and_feasible(model_dual; dual = true) # and the solution we obtain is: @@ -187,7 +187,7 @@ objective_value(model_dual) set_optimizer(model_primal, Dualization.dual_optimizer(SCS.Optimizer)) optimize!(model_primal) -@assert is_solved_and_feasible(model_primal; dual = true) +assert_is_solved_and_feasible(model_primal; dual = true) # The performance is the same as if we solved `model_dual`, and the correct # solution is returned to `X`: @@ -203,7 +203,7 @@ dual.(primal_c) set_optimizer(model_dual, Dualization.dual_optimizer(SCS.Optimizer)) optimize!(model_dual) -@assert is_solved_and_feasible(model_dual; dual = true) +assert_is_solved_and_feasible(model_dual; dual = true) #- diff --git a/docs/src/tutorials/conic/ellipse_approx.jl b/docs/src/tutorials/conic/ellipse_approx.jl index 5b2ef3efc3e..8ce31604038 100644 --- a/docs/src/tutorials/conic/ellipse_approx.jl +++ b/docs/src/tutorials/conic/ellipse_approx.jl @@ -110,7 +110,7 @@ m, n = size(S) @constraint(model, [t; vec(Z)] in MOI.RootDetConeSquare(n)) @objective(model, Max, t) optimize!(model) -Test.@test is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution_summary(model) # ## Results @@ -212,7 +212,7 @@ f = [1 - S[i, :]' * Z * S[i, :] + 2 * S[i, :]' * z - s for i in 1:m] ## The former @objective(model, Max, t) @objective(model, Max, 1 * t + 0) optimize!(model) -Test.@test is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) Test.@test isapprox(D, value.(Z); atol = 1e-3) #src solve_time_1 = solve_time(model) @@ -235,7 +235,7 @@ print_active_bridges(model) remove_bridge(model, MOI.Bridges.Constraint.GeoMeanToPowerBridge) optimize!(model) -Test.@test is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) # This time, the solve took: diff --git a/docs/src/tutorials/conic/ellipse_fitting.jl b/docs/src/tutorials/conic/ellipse_fitting.jl index a2fc2c9f0a9..031d134605c 100644 --- a/docs/src/tutorials/conic/ellipse_fitting.jl +++ b/docs/src/tutorials/conic/ellipse_fitting.jl @@ -299,7 +299,7 @@ for (i, cluster) in enumerate(clusters) ) @objective(model, Min, ζ) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) Q, d, e = value.(model[:Q]), value.(model[:d]), value.(model[:e]) push!(ellipses_C1, Dict(:Q => Q, :d => d, :e => e)) end @@ -356,7 +356,7 @@ for (i, cluster) in enumerate(clusters) ) @objective(model, Min, ζ) optimize!(model) - @assert is_solved_and_feasible(model; allow_almost = true) + assert_is_solved_and_feasible(model; allow_almost = true) Q, d, e = value.(model[:Q]), value.(model[:d]), value.(model[:e]) push!(ellipses_C2, Dict(:Q => Q, :d => d, :e => e)) end diff --git a/docs/src/tutorials/conic/experiment_design.jl b/docs/src/tutorials/conic/experiment_design.jl index 7bf8c4356bc..4736eb375b3 100644 --- a/docs/src/tutorials/conic/experiment_design.jl +++ b/docs/src/tutorials/conic/experiment_design.jl @@ -136,7 +136,7 @@ for i in 1:q end @objective(aOpt, Min, sum(u)) optimize!(aOpt) -@assert is_solved_and_feasible(aOpt) +assert_is_solved_and_feasible(aOpt) objective_value(aOpt) #- @@ -178,7 +178,7 @@ set_silent(eOpt) @constraint(eOpt, sum(np) <= n) @objective(eOpt, Max, t) optimize!(eOpt) -@assert is_solved_and_feasible(eOpt) +assert_is_solved_and_feasible(eOpt) objective_value(eOpt) #- value.(np) @@ -209,7 +209,7 @@ set_silent(dOpt) E = V * LinearAlgebra.diagm(0 => np ./ n) * V' @constraint(dOpt, [t; 1; triangle_vec(E)] in MOI.LogDetConeTriangle(q)) optimize!(dOpt) -@assert is_solved_and_feasible(dOpt) +assert_is_solved_and_feasible(dOpt) objective_value(dOpt) #- value.(np) diff --git a/docs/src/tutorials/conic/logistic_regression.jl b/docs/src/tutorials/conic/logistic_regression.jl index e376f3413d0..7bf158b05af 100644 --- a/docs/src/tutorials/conic/logistic_regression.jl +++ b/docs/src/tutorials/conic/logistic_regression.jl @@ -188,7 +188,7 @@ model = build_logit_model(X, y, λ) set_optimizer(model, SCS.Optimizer) set_silent(model) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) #- @@ -231,7 +231,7 @@ sparse_model = build_sparse_logit_model(X, y, λ) set_optimizer(sparse_model, SCS.Optimizer) set_silent(sparse_model) optimize!(sparse_model) -@assert is_solved_and_feasible(sparse_model) +assert_is_solved_and_feasible(sparse_model) #- diff --git a/docs/src/tutorials/conic/min_ellipse.jl b/docs/src/tutorials/conic/min_ellipse.jl index 9cf1859c115..dc8fe7888bc 100644 --- a/docs/src/tutorials/conic/min_ellipse.jl +++ b/docs/src/tutorials/conic/min_ellipse.jl @@ -125,7 +125,7 @@ end # Now, solve the program: optimize!(model) -Test.@test is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution_summary(model) # ## Results diff --git a/docs/src/tutorials/conic/quantum_discrimination.jl b/docs/src/tutorials/conic/quantum_discrimination.jl index 02ccba1e999..87d120d2075 100644 --- a/docs/src/tutorials/conic/quantum_discrimination.jl +++ b/docs/src/tutorials/conic/quantum_discrimination.jl @@ -98,7 +98,7 @@ E = [@variable(model, [1:d, 1:d] in HermitianPSDCone()) for i in 1:N] # Now we optimize: optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution_summary(model) # The probability of guessing correctly is: @@ -141,7 +141,7 @@ push!(E, E_N) # Then we can check that we get the same solution: optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution_summary(model) #- diff --git a/docs/src/tutorials/conic/simple_examples.jl b/docs/src/tutorials/conic/simple_examples.jl index ea06c1da0b3..2bdcffac2bc 100644 --- a/docs/src/tutorials/conic/simple_examples.jl +++ b/docs/src/tutorials/conic/simple_examples.jl @@ -66,7 +66,7 @@ function solve_max_cut_sdp(weights) @objective(model, Max, 0.25 * LinearAlgebra.dot(L, X)) @constraint(model, LinearAlgebra.diag(X) .== 1) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) V = svd_cholesky(value(X)) Random.seed!(N) r = rand(N) @@ -117,11 +117,11 @@ S, T = solve_max_cut_sdp([0 1 5 0; 1 0 0 9; 5 0 0 2; 0 9 2 0]) # ## Low-rank matrix completion -# The matrix completion problem seeks to find the missing entries of a matrix +# The matrix completion problem seeks to find the missing entries of a matrix # with a given (possibly random) subset of fixed entries, such that the completed # matrix has the lowest attainable rank. # -# For more details, see [RechtFazelParrilo2010](@cite). +# For more details, see [RechtFazelParrilo2010](@cite). function example_matrix_completion(; svdtol = 1e-6) rng = Random.MersenneTwister(1234) @@ -135,7 +135,7 @@ function example_matrix_completion(; svdtol = 1e-6) @constraint(model, [t; vec(X)] in MOI.NormNuclearCone(n, n)) @objective(model, Min, t) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) ## Return the approximate rank of the completed matrix to a given tolerance: return sum(LinearAlgebra.svdvals(value.(X)) .> svdtol) end @@ -163,7 +163,7 @@ function example_k_means_clustering() @constraint(model, [i = 1:m], sum(Z[i, :]) .== 1) @constraint(model, LinearAlgebra.tr(Z) == num_clusters) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) Z_val = value.(Z) current_cluster, visited = 0, Set{Int}() solution = [1, 1, 2, 1, 2, 2] #src @@ -216,12 +216,12 @@ function example_correlation_problem() @constraint(model, 0.4 <= ρ["B", "C"] <= 0.5) @objective(model, Max, ρ["A", "C"]) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) println("An upper bound for ρ_AC is $(value(ρ["A", "C"]))") Test.@test value(ρ["A", "C"]) ≈ 0.87195 atol = 1e-4 #src @objective(model, Min, ρ["A", "C"]) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) println("A lower bound for ρ_AC is $(value(ρ["A", "C"]))") Test.@test value(ρ["A", "C"]) ≈ -0.978 atol = 1e-3 #src return @@ -298,7 +298,7 @@ function example_minimum_distortion() fix(Q[1, 1], 0) @objective(model, Min, c²) optimize!(model) - Test.@test is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) Test.@test objective_value(model) ≈ 4 / 3 atol = 1e-4 ## Recover the minimal distorted embedding: X = [zeros(3) sqrt(value.(Q)[2:end, 2:end])] @@ -382,7 +382,7 @@ function example_theta_problem() J = ones(Int, 5, 5) @objective(model, Max, LinearAlgebra.dot(J, X)) optimize!(model) - Test.@test is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) Test.@test objective_value(model) ≈ sqrt(5) rtol = 1e-4 println("The Lovász number is: $(objective_value(model))") return @@ -413,7 +413,7 @@ function example_robust_uncertainty_sets() @constraint(model, [((1-ɛ)/ɛ) (u - μ)'; (u-μ) Σ] >= 0, PSDCone()) @objective(model, Max, c' * u) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) exact = μhat' * c + Γ1(𝛿 / 2, N) * LinearAlgebra.norm(c) + diff --git a/docs/src/tutorials/conic/start_values.jl b/docs/src/tutorials/conic/start_values.jl index e2b2975d979..3fa90967ae3 100644 --- a/docs/src/tutorials/conic/start_values.jl +++ b/docs/src/tutorials/conic/start_values.jl @@ -70,7 +70,7 @@ model = Model(SCS.Optimizer) @constraint(model, sum(x) <= 1) @objective(model, Max, sum(i * x[i] for i in 1:3)) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) # By looking at the log, we can see that SCS took 75 iterations to find the optimal # solution. Now we set the optimal solution as our starting point: diff --git a/docs/src/tutorials/conic/tips_and_tricks.jl b/docs/src/tutorials/conic/tips_and_tricks.jl index 20a27a94f1b..f4fbca4e028 100644 --- a/docs/src/tutorials/conic/tips_and_tricks.jl +++ b/docs/src/tutorials/conic/tips_and_tricks.jl @@ -92,7 +92,7 @@ set_silent(model) @constraint(model, [t; x] in SecondOrderCone()) @objective(model, Min, t) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value(t), value.(x) # ## Rotated Second-Order Cone @@ -115,7 +115,7 @@ set_silent(model) @constraint(model, [t; 0.5; residuals] in RotatedSecondOrderCone()) @objective(model, Min, t) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value(θ), value(t) # ## Exponential Cone @@ -138,7 +138,7 @@ set_silent(model) @objective(model, Min, z) @constraint(model, [x, 1, z] in MOI.ExponentialCone()) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value(z), exp(1.5) # ### Logarithm @@ -152,7 +152,7 @@ set_silent(model) @objective(model, Max, x) @constraint(model, [x, 1, z] in MOI.ExponentialCone()) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value(x), log(1.5) # ### Log-sum-exp @@ -214,7 +214,7 @@ set_silent(model) @constraint(model, A * x .<= b) @constraint(model, [i = 1:n], [t[i], x[i], 1] in MOI.ExponentialCone()) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) objective_value(model) # The [`MOI.ExponentialCone`](@ref) has a dual, the [`MOI.DualExponentialCone`](@ref), @@ -233,7 +233,7 @@ set_silent(model) @constraint(model, A * x .<= b) @constraint(model, [t; ones(n); x] in MOI.RelativeEntropyCone(2n + 1)) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) objective_value(model) # ## PowerCone @@ -255,7 +255,7 @@ set_silent(model) @constraint(model, [t, 1, x] in MOI.PowerCone(1 / 3)) @objective(model, Min, t) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value(t), value(x) # The [`MOI.PowerCone`](@ref) has a dual, the [`MOI.DualPowerCone`](@ref), @@ -278,7 +278,7 @@ function p_norm(x::Vector, p) @constraint(model, sum(r) == t) @objective(model, Min, t) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return value(t) end @@ -322,7 +322,7 @@ set_silent(model) @objective(model, Min, t) @constraint(model, t .* I - A in PSDCone()) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) objective_value(model) # ## GeometricMeanCone @@ -356,7 +356,7 @@ set_silent(model) @constraint(model, [t; vec(X)] in MOI.RootDetConeSquare(2)) @constraint(model, X .== [2 1; 1 3]) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value(t), sqrt(LinearAlgebra.det(value.(X))) # If `X` is symmetric, then you can use [`MOI.RootDetConeTriangle`](@ref) @@ -394,7 +394,7 @@ set_silent(model) @constraint(model, X .== [2 1; 1 3]) @constraint(model, u == 0.5) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value(t), 0.5 * log(LinearAlgebra.det(value.(X) ./ 0.5)) # If `X` is symmetric, then you can use [`MOI.LogDetConeTriangle`](@ref) @@ -415,7 +415,7 @@ set_silent(model) @constraint(model, X .== [2 1; 1 3]) @constraint(model, u == 0.5) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value(t), 0.5 * log(LinearAlgebra.det(value.(X) ./ 0.5)) # ## NormNuclearCone @@ -434,7 +434,7 @@ set_silent(model) @constraint(model, [t; vec(X)] in MOI.NormNuclearCone(2, 3)) @constraint(model, X .== [1 2 3; 4 5 6]) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value(t), sum(LinearAlgebra.svdvals(value.(X))) # ## NormSpectralCone @@ -453,7 +453,7 @@ set_silent(model) @constraint(model, [t; vec(X)] in MOI.NormSpectralCone(2, 3)) @constraint(model, X .== [1 2 3; 4 5 6]) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value(t), maximum(LinearAlgebra.svdvals(value.(X))) # ## Other Cones and Functions diff --git a/docs/src/tutorials/getting_started/debugging.jl b/docs/src/tutorials/getting_started/debugging.jl index 8d9bedba75b..ea21690e581 100644 --- a/docs/src/tutorials/getting_started/debugging.jl +++ b/docs/src/tutorials/getting_started/debugging.jl @@ -346,7 +346,7 @@ set_silent(model) # for variables with large positive or negative values in the optimal solution. optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) for var in all_variables(model) if var == objective continue diff --git a/docs/src/tutorials/getting_started/design_patterns_for_larger_models.jl b/docs/src/tutorials/getting_started/design_patterns_for_larger_models.jl index 1d6c574df57..1e1a15f05d0 100644 --- a/docs/src/tutorials/getting_started/design_patterns_for_larger_models.jl +++ b/docs/src/tutorials/getting_started/design_patterns_for_larger_models.jl @@ -55,7 +55,7 @@ model = Model(HiGHS.Optimizer) @objective(model, Max, sum(profit[i] * x[i] for i in 1:N)) @constraint(model, sum(weight[i] * x[i] for i in 1:N) <= capacity) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value.(x) # The benefits of this approach are: @@ -88,7 +88,7 @@ function solve_knapsack_1(profit::Vector, weight::Vector, capacity::Real) @objective(model, Max, sum(profit[i] * x[i] for i in 1:N)) @constraint(model, sum(weight[i] * x[i] for i in 1:N) <= capacity) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return value.(x) end @@ -161,7 +161,7 @@ function solve_knapsack_2(data::KnapsackData) sum(v.weight * x[k] for (k, v) in data.objects) <= data.capacity, ) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return value.(x) end @@ -233,7 +233,7 @@ function solve_knapsack_3(data::KnapsackData; binary_knapsack::Bool) sum(v.weight * x[k] for (k, v) in data.objects) <= data.capacity, ) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return value.(x) end @@ -276,7 +276,7 @@ function solve_knapsack_4(data::KnapsackData, config::AbstractConfiguration) sum(v.weight * x[k] for (k, v) in data.objects) <= data.capacity, ) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return value.(x) end @@ -364,7 +364,7 @@ function solve_knapsack_5(data::KnapsackData, config::AbstractConfiguration) add_knapsack_constraints(model, data, config) add_knapsack_objective(model, data, config) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return value.(model[:x]) end diff --git a/docs/src/tutorials/getting_started/getting_started_with_JuMP.jl b/docs/src/tutorials/getting_started/getting_started_with_JuMP.jl index 9814bdaa54d..7c74dd1907e 100644 --- a/docs/src/tutorials/getting_started/getting_started_with_JuMP.jl +++ b/docs/src/tutorials/getting_started/getting_started_with_JuMP.jl @@ -523,5 +523,5 @@ c = [1, 3, 5, 2] @constraint(vector_model, A * x .== b) @objective(vector_model, Min, c' * x) optimize!(vector_model) -@assert is_solved_and_feasible(vector_model) +assert_is_solved_and_feasible(vector_model) objective_value(vector_model) diff --git a/docs/src/tutorials/getting_started/getting_started_with_data_and_plotting.jl b/docs/src/tutorials/getting_started/getting_started_with_data_and_plotting.jl index e617457cc8a..8018198d6ef 100644 --- a/docs/src/tutorials/getting_started/getting_started_with_data_and_plotting.jl +++ b/docs/src/tutorials/getting_started/getting_started_with_data_and_plotting.jl @@ -366,7 +366,7 @@ solution_summary(model) # Just to be sure, check that the solver found an optimal solution: -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) # ### Solution diff --git a/docs/src/tutorials/getting_started/tolerances.jl b/docs/src/tutorials/getting_started/tolerances.jl index 46262fff53f..97d2a13d5b8 100644 --- a/docs/src/tutorials/getting_started/tolerances.jl +++ b/docs/src/tutorials/getting_started/tolerances.jl @@ -107,7 +107,7 @@ set_silent(model) @variable(model, x >= 0) @constraint(model, x == -1e-8) optimize!(model) -@assert is_solved_and_feasible(model) #src +assert_is_solved_and_feasible(model) #src is_solved_and_feasible(model) #- @@ -142,7 +142,7 @@ optimize!(model) # SCS reports that it solved the problem to optimality: -@assert is_solved_and_feasible(model) #src +assert_is_solved_and_feasible(model) #src is_solved_and_feasible(model) # and that the solution for `x[1]` is nearly zero: @@ -199,8 +199,8 @@ optimize!(model) #- -@assert is_solved_and_feasible(model) #src -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) #src +assert_is_solved_and_feasible(model) value(x[1]) # ### Why you shouldn't use a small tolerance @@ -231,7 +231,7 @@ model = Model(HiGHS.Optimizer) set_silent(model) @variable(model, x == 1 + 1e-6, Int) optimize!(model) -@assert is_solved_and_feasible(model) #src +assert_is_solved_and_feasible(model) #src is_solved_and_feasible(model) # HiGHS found an optimal solution, and the value of `x` is: @@ -357,7 +357,7 @@ set_start_value(y, 1e-6) # Now HiGHS will report that the problem is feasible: optimize!(model) -@assert is_solved_and_feasible(model) #src +assert_is_solved_and_feasible(model) #src is_solved_and_feasible(model) # ### Contradictory results are not a bug in the solver diff --git a/docs/src/tutorials/linear/basis.jl b/docs/src/tutorials/linear/basis.jl index 030f64428b7..5c95731c78e 100644 --- a/docs/src/tutorials/linear/basis.jl +++ b/docs/src/tutorials/linear/basis.jl @@ -44,7 +44,7 @@ set_silent(model) @variable(model, x[1:n] >= 0) @constraint(model, A * x == b) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) # This has a solution: @@ -96,7 +96,7 @@ set_silent(model) @constraint(model, c2, 7x + 12y >= 120) @constraint(model, c3, x + y <= 20) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) # A common way to query the basis status of every variable is: diff --git a/docs/src/tutorials/linear/callbacks.jl b/docs/src/tutorials/linear/callbacks.jl index e23193922eb..175fe72540e 100644 --- a/docs/src/tutorials/linear/callbacks.jl +++ b/docs/src/tutorials/linear/callbacks.jl @@ -62,7 +62,7 @@ function example_lazy_constraint() end set_attribute(model, MOI.LazyConstraintCallback(), my_callback_function) optimize!(model) - Test.@test is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) Test.@test lazy_called Test.@test value(x) == 1 Test.@test value(y) == 2 @@ -105,7 +105,7 @@ function example_user_cut_constraint() end set_attribute(model, MOI.UserCutCallback(), my_callback_function) optimize!(model) - Test.@test is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) Test.@test callback_called @show callback_called return @@ -143,7 +143,7 @@ function example_heuristic_solution() end set_attribute(model, MOI.HeuristicCallback(), my_callback_function) optimize!(model) - Test.@test is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) Test.@test callback_called return end diff --git a/docs/src/tutorials/linear/cannery.jl b/docs/src/tutorials/linear/cannery.jl index 13fae00c5a1..744aa5d7829 100644 --- a/docs/src/tutorials/linear/cannery.jl +++ b/docs/src/tutorials/linear/cannery.jl @@ -121,7 +121,7 @@ solution_summary(model) # What's the optimal shipment? -Test.@test is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) Test.@test isapprox(objective_value(model), 1_680.0, atol = 1e-6) #src for p in P, m in M println(p, " => ", m, ": ", value(x[p, m])) diff --git a/docs/src/tutorials/linear/constraint_programming.jl b/docs/src/tutorials/linear/constraint_programming.jl index a420186effa..84e92ad411b 100644 --- a/docs/src/tutorials/linear/constraint_programming.jl +++ b/docs/src/tutorials/linear/constraint_programming.jl @@ -29,7 +29,7 @@ set_silent(model) @variable(model, 1 <= x[1:4] <= 4, Int) @constraint(model, x in MOI.AllDifferent(4)) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value.(x) # ## BinPacking @@ -45,7 +45,7 @@ set_silent(model) @variable(model, 1 <= x[1:length(weights)] <= number_of_bins, Int) @constraint(model, x in MOI.BinPacking(capacity, weights)) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value.(x) # Here, the value of `x[i]` is the bin that item `i` was placed into. @@ -61,7 +61,7 @@ set_silent(model) @variable(model, x[1:4], Int) @constraint(model, x in MOI.Circuit(4)) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) # Let's see what tour was found, starting at node number `1`: y = round.(Int, value.(x)) @@ -115,7 +115,7 @@ n = 1 # Let's check that we found a valid solution: optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value.(x) # ## CountBelongs @@ -134,7 +134,7 @@ set_silent(model) set = Set([2, 3]) @constraint(model, [n; x] in MOI.CountBelongs(1 + length(x), set)) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value(n), value.(x) # ## CountDistinct @@ -149,7 +149,7 @@ set_silent(model) @objective(model, Max, sum(x)) @constraint(model, [n; x] in MOI.CountDistinct(1 + length(x))) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value(n), value.(x) # ## CountGreaterThan @@ -169,7 +169,7 @@ set_silent(model) @objective(model, Max, sum(x)) @constraint(model, [n; y; x] in MOI.CountGreaterThan(1 + 1 + length(x))) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value(n), value(y), value.(x) # Here `n` is strictly greater than the count, and there is no limit on how @@ -194,5 +194,5 @@ set_silent(model) @variable(model, x[i = 1:3], Int) @constraint(model, x in MOI.Table(table)) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) value.(x) diff --git a/docs/src/tutorials/linear/diet.jl b/docs/src/tutorials/linear/diet.jl index 413608f942e..5aea71d0717 100644 --- a/docs/src/tutorials/linear/diet.jl +++ b/docs/src/tutorials/linear/diet.jl @@ -145,7 +145,7 @@ print(model) # Let's optimize and take a look at the solution: optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) Test.@test objective_value(model) ≈ 11.8288 atol = 1e-4 #hide solution_summary(model) diff --git a/docs/src/tutorials/linear/facility_location.jl b/docs/src/tutorials/linear/facility_location.jl index 8b87955f072..6df72bdca85 100644 --- a/docs/src/tutorials/linear/facility_location.jl +++ b/docs/src/tutorials/linear/facility_location.jl @@ -130,7 +130,7 @@ set_silent(model) # Solve the uncapacitated facility location problem with HiGHS optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) println("Optimal value: ", objective_value(model)) # ### Visualizing the solution @@ -257,7 +257,7 @@ set_silent(model) # Solve the problem optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) println("Optimal value: ", objective_value(model)) # ### Visualizing the solution diff --git a/docs/src/tutorials/linear/factory_schedule.jl b/docs/src/tutorials/linear/factory_schedule.jl index 7e2e96f3def..603d638c253 100644 --- a/docs/src/tutorials/linear/factory_schedule.jl +++ b/docs/src/tutorials/linear/factory_schedule.jl @@ -186,7 +186,7 @@ function solve_factory_scheduling( ) ) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) schedules = Dict{Symbol,Vector{Float64}}( Symbol(f) => value.(production[:, f]) for f in factories ) diff --git a/docs/src/tutorials/linear/finance.jl b/docs/src/tutorials/linear/finance.jl index ae95dc92336..e3738748d80 100644 --- a/docs/src/tutorials/linear/finance.jl +++ b/docs/src/tutorials/linear/finance.jl @@ -92,7 +92,7 @@ end) ) optimize!(financing) -@assert is_solved_and_feasible(financing) +assert_is_solved_and_feasible(financing) objective_value(financing) # ## Combinatorial auctions @@ -137,7 +137,7 @@ for i in 1:6 @constraint(auction, sum(y[j] for j in 1:6 if i in bid_items[j]) <= 1) end optimize!(auction) -@assert is_solved_and_feasible(auction) +assert_is_solved_and_feasible(auction) objective_value(auction) #- diff --git a/docs/src/tutorials/linear/geographic_clustering.jl b/docs/src/tutorials/linear/geographic_clustering.jl index 681f7cf9155..67985e29ee4 100644 --- a/docs/src/tutorials/linear/geographic_clustering.jl +++ b/docs/src/tutorials/linear/geographic_clustering.jl @@ -151,7 +151,7 @@ end # We can then call `optimize!` and review the results. optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) # ### Reviewing the Results diff --git a/docs/src/tutorials/linear/knapsack.jl b/docs/src/tutorials/linear/knapsack.jl index 4ad1370ea13..83585778505 100644 --- a/docs/src/tutorials/linear/knapsack.jl +++ b/docs/src/tutorials/linear/knapsack.jl @@ -96,7 +96,7 @@ print(model) # We can now solve the optimization problem and inspect the results. optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution_summary(model) # The items chosen are @@ -125,7 +125,7 @@ function solve_knapsack_problem(; @objective(model, Max, profit' * x) @constraint(model, weight' * x <= capacity) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) println("Objective is: ", objective_value(model)) println("Solution is:") for i in 1:n diff --git a/docs/src/tutorials/linear/lp_sensitivity.jl b/docs/src/tutorials/linear/lp_sensitivity.jl index 0bc608cf19a..896a3a311a6 100644 --- a/docs/src/tutorials/linear/lp_sensitivity.jl +++ b/docs/src/tutorials/linear/lp_sensitivity.jl @@ -39,7 +39,7 @@ model = Model(HiGHS.Optimizer) @constraint(model, c2, 7x + 12y >= 120) @constraint(model, c3, x + y <= 20) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution_summary(model; verbose = true) # Can you identify: diff --git a/docs/src/tutorials/linear/mip_duality.jl b/docs/src/tutorials/linear/mip_duality.jl index b82dd1ab379..fcf00b8bad7 100644 --- a/docs/src/tutorials/linear/mip_duality.jl +++ b/docs/src/tutorials/linear/mip_duality.jl @@ -58,7 +58,7 @@ print(model) # If we optimize this model, we obtain a [`dual_status`](@ref) of [`NO_SOLUTION`](@ref): optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) dual_status(model) # This is because HiGHS cannot compute the duals of a mixed-integer program. We @@ -74,7 +74,7 @@ print(model) # dual: optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) dual_status(model) # and a marginal price of electricity of \$100/MWh: @@ -96,7 +96,7 @@ print(model) # the [`fix_discrete_variables`](@ref) function: optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) dual_status(model) #- @@ -116,7 +116,7 @@ print(model) #- optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) dual_status(model) #- diff --git a/docs/src/tutorials/linear/multi.jl b/docs/src/tutorials/linear/multi.jl index 3916ac2cb34..85d94511345 100644 --- a/docs/src/tutorials/linear/multi.jl +++ b/docs/src/tutorials/linear/multi.jl @@ -177,7 +177,7 @@ end # Finally, we can optimize the model: optimize!(model) -Test.@test is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) Test.@test objective_value(model) == 225_700.0 #src solution_summary(model) diff --git a/docs/src/tutorials/linear/multi_commodity_network.jl b/docs/src/tutorials/linear/multi_commodity_network.jl index 124d1e9af66..f60c74d84f8 100644 --- a/docs/src/tutorials/linear/multi_commodity_network.jl +++ b/docs/src/tutorials/linear/multi_commodity_network.jl @@ -201,7 +201,7 @@ df = DataFrames.leftjoin( # Finally, we can optimize the model: optimize!(model) -Test.@test is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution_summary(model) # update the solution in the DataFrames: diff --git a/docs/src/tutorials/linear/multi_objective_examples.jl b/docs/src/tutorials/linear/multi_objective_examples.jl index cbca784b18e..94dc63fb65b 100644 --- a/docs/src/tutorials/linear/multi_objective_examples.jl +++ b/docs/src/tutorials/linear/multi_objective_examples.jl @@ -37,7 +37,7 @@ solution_summary(model) #- for i in 1:result_count(model) - @assert is_solved_and_feasible(model; result = i) + assert_is_solved_and_feasible(model; result = i) print(i, ": z = ", round.(Int, objective_value(model; result = i)), " | ") println("x = ", value.([x1, x2]; result = i)) end @@ -66,7 +66,7 @@ solution_summary(model) #- for i in 1:result_count(model) - @assert is_solved_and_feasible(model; result = i) + assert_is_solved_and_feasible(model; result = i) print(i, ": z = ", round.(Int, objective_value(model; result = i)), " | ") println("x = ", round.(Int, value.(x; result = i))) end @@ -111,7 +111,7 @@ solution_summary(model) #- for i in 1:result_count(model) - @assert is_solved_and_feasible(model; result = i) + assert_is_solved_and_feasible(model; result = i) print(i, ": z = ", round.(Int, objective_value(model; result = i)), " | ") X = round.(Int, value.(x; result = i)) print("Path:") diff --git a/docs/src/tutorials/linear/multi_objective_knapsack.jl b/docs/src/tutorials/linear/multi_objective_knapsack.jl index a95b98497cf..ac763798b1b 100644 --- a/docs/src/tutorials/linear/multi_objective_knapsack.jl +++ b/docs/src/tutorials/linear/multi_objective_knapsack.jl @@ -142,7 +142,7 @@ solution_summary(model; result = 5) #- -@assert is_solved_and_feasible(model; result = 5) +assert_is_solved_and_feasible(model; result = 5) #- diff --git a/docs/src/tutorials/linear/multiple_solutions.jl b/docs/src/tutorials/linear/multiple_solutions.jl index b55051bed2c..9a390f68959 100644 --- a/docs/src/tutorials/linear/multiple_solutions.jl +++ b/docs/src/tutorials/linear/multiple_solutions.jl @@ -93,7 +93,7 @@ x_digits_upper = [x_digits[i, j] for j in 1:n for i in 1:j] set_optimizer(model, Gurobi.Optimizer) optimize!(model) -Test.@test is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) Test.@test result_count(model) == 1 solution_summary(model) @@ -120,7 +120,7 @@ set_attribute(model, "PoolSolutions", 100) # We can then call `optimize!` and view the results. optimize!(model) -Test.@test is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution_summary(model) # Now Gurobi has found 20 solutions: diff --git a/docs/src/tutorials/linear/n-queens.jl b/docs/src/tutorials/linear/n-queens.jl index c75ff6d824c..10c89668cd2 100644 --- a/docs/src/tutorials/linear/n-queens.jl +++ b/docs/src/tutorials/linear/n-queens.jl @@ -66,7 +66,7 @@ end # a feasible solution: optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) # We can now review the solution that our model found: diff --git a/docs/src/tutorials/linear/network_flows.jl b/docs/src/tutorials/linear/network_flows.jl index 89c3e98391f..9c24b0575c4 100644 --- a/docs/src/tutorials/linear/network_flows.jl +++ b/docs/src/tutorials/linear/network_flows.jl @@ -79,7 +79,7 @@ set_silent(shortest_path) @constraint(shortest_path, [i = 1:n], sum(x[i, :]) - sum(x[:, i]) == b[i],) @objective(shortest_path, Min, sum(G .* x)) optimize!(shortest_path) -@assert is_solved_and_feasible(shortest_path) +assert_is_solved_and_feasible(shortest_path) objective_value(shortest_path) #- value.(x) @@ -124,7 +124,7 @@ set_silent(assignment) @constraint(assignment, [j = 1:n], sum(y[j, :]) == 1) @objective(assignment, Max, sum(G .* y)) optimize!(assignment) -@assert is_solved_and_feasible(assignment) +assert_is_solved_and_feasible(assignment) objective_value(assignment) #- value.(y) @@ -165,7 +165,7 @@ max_flow = Model(HiGHS.Optimizer) @constraint(max_flow, [i = 1:n; i != 1 && i != 8], sum(f[i, :]) == sum(f[:, i])) @objective(max_flow, Max, sum(f[1, :])) optimize!(max_flow) -@assert is_solved_and_feasible(max_flow) +assert_is_solved_and_feasible(max_flow) objective_value(max_flow) #- value.(f) diff --git a/docs/src/tutorials/linear/piecewise_linear.jl b/docs/src/tutorials/linear/piecewise_linear.jl index 6ad6fff89ee..456700b6d2d 100644 --- a/docs/src/tutorials/linear/piecewise_linear.jl +++ b/docs/src/tutorials/linear/piecewise_linear.jl @@ -52,7 +52,7 @@ function outer_approximate_x_squared(x̄) @objective(model, Min, y) @constraint(model, x == x̄) # <-- a trivial constraint just for testing. optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return value(y) end @@ -103,7 +103,7 @@ function outer_approximate_log(x̄) @objective(model, Max, y) @constraint(model, x == x̄) # <-- a trivial constraint just for testing. optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return value(y) end @@ -169,7 +169,7 @@ function inner_approximate_x_squared(x̄) @objective(model, Min, y) @constraint(model, x == x̄) # <-- a trivial constraint just for testing. optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return value(y) end @@ -212,7 +212,7 @@ function inner_approximate_log(x̄) @objective(model, Max, y) @constraint(model, x == x̄) # <-- a trivial constraint just for testing. optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return value(y) end @@ -266,7 +266,7 @@ function piecewise_linear_sin(x̄) end) @constraint(model, x == x̄) # <-- a trivial constraint just for testing. optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return value(y) end diff --git a/docs/src/tutorials/linear/sudoku.jl b/docs/src/tutorials/linear/sudoku.jl index b13b6d2389f..a6b8b6024a9 100644 --- a/docs/src/tutorials/linear/sudoku.jl +++ b/docs/src/tutorials/linear/sudoku.jl @@ -134,7 +134,7 @@ end # solve problem optimize!(sudoku) -@assert is_solved_and_feasible(sudoku) +assert_is_solved_and_feasible(sudoku) # Extract the values of x x_val = value.(x); @@ -203,7 +203,7 @@ for i in 1:9, j in 1:9 end optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) # Display the solution diff --git a/docs/src/tutorials/linear/transp.jl b/docs/src/tutorials/linear/transp.jl index a62810ca3e8..bf189eb36d3 100644 --- a/docs/src/tutorials/linear/transp.jl +++ b/docs/src/tutorials/linear/transp.jl @@ -120,7 +120,7 @@ function solve_transportation_problem(data::Containers.DenseAxisArray) @constraint(model, [o in O], sum(x[o, :]) <= data[o, "SUPPLY"]) @constraint(model, [d in D], sum(x[:, d]) == data["DEMAND", d]) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) ## Pretty print the solution in the format of the input print(" ", join(lpad.(D, 7, ' '))) for o in O diff --git a/docs/src/tutorials/nonlinear/classifiers.jl b/docs/src/tutorials/nonlinear/classifiers.jl index c0761adc4fc..d65b1c5c979 100644 --- a/docs/src/tutorials/nonlinear/classifiers.jl +++ b/docs/src/tutorials/nonlinear/classifiers.jl @@ -127,7 +127,7 @@ function solve_SVM_classifier(P::Matrix, labels::Vector; C::Float64 = C_0) D = LinearAlgebra.Diagonal(labels) @constraint(model, D * (P * w .- g) .+ y .>= 1) optimize!(model) - Test.@test is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) slack = extrema(value.(y)) println("Minimum slack: ", slack[1], "\nMaximum slack: ", slack[2]) classifier(x) = line(x; w = value.(w), g = value(g)) @@ -233,7 +233,7 @@ function solve_dual_SVM_classifier(P::Matrix, labels::Vector; C::Float64 = C_0) @objective(model, Min, 1 / 2 * u' * D * P * P' * D * u - sum(u)) @constraint(model, con, sum(D * u) == 0) optimize!(model) - Test.@test is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) w = P' * D * value.(u) g = dual(con) classifier(x) = line(x; w = w, g = g) @@ -320,7 +320,7 @@ function solve_kernel_SVM_classifier( con = @constraint(model, sum(D * u) == 0) @objective(model, Min, 1 / 2 * u' * D * K * D * u - sum(u)) optimize!(model) - Test.@test is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) u_sol, g_sol = value.(u), dual(con) function classifier(v::Vector) return sum( diff --git a/docs/src/tutorials/nonlinear/complementarity.jl b/docs/src/tutorials/nonlinear/complementarity.jl index 1b712819fa1..6dce97c2153 100644 --- a/docs/src/tutorials/nonlinear/complementarity.jl +++ b/docs/src/tutorials/nonlinear/complementarity.jl @@ -50,7 +50,7 @@ set_silent(model) @variable(model, 0 <= x[1:4] <= 10, start = 0) @constraint(model, M * x + q ⟂ x) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) Test.@test value.(x) ≈ [2.8, 0.0, 0.8, 1.2] #src value.(x) @@ -71,7 +71,7 @@ set_silent(model) @constraint(model, w + 2x - 2y + 4z - 6 ⟂ z) @constraint(model, w - x + 2y - 2z - 2 ⟂ y) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) Test.@test value.([w, x, y, z]) ≈ [2.8, 0.0, 0.8, 1.2] #src value.([w, x, y, z]) @@ -107,7 +107,7 @@ set_silent(model) end ) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) Test.@test isapprox(value(p["new-york"]), 0.225; atol = 1e-3) #src value.(p) @@ -143,7 +143,7 @@ set_silent(model) end ) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) Test.@test isapprox(value(C_G), 0.996; atol = 1e-3) #src value(K) @@ -196,7 +196,7 @@ set_silent(model) ## Production does not exceed capacity @constraint(model, [ω = 1:5], x - Y[ω] ⟂ μ[ω]) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution_summary(model) # An equilibrium solution is to build 389 MW: diff --git a/docs/src/tutorials/nonlinear/nested_problems.jl b/docs/src/tutorials/nonlinear/nested_problems.jl index b9a437d7d7c..5751af8e56e 100644 --- a/docs/src/tutorials/nonlinear/nested_problems.jl +++ b/docs/src/tutorials/nonlinear/nested_problems.jl @@ -90,7 +90,7 @@ function solve_lower_level(x...) ) @constraint(model, (y[1] - 10)^2 + (y[2] - 10)^2 <= 25) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return objective_value(model), value.(y) end @@ -153,7 +153,7 @@ model = Model(Ipopt.Optimizer) @operator(model, op_V, 2, V, ∇V, ∇²V) @objective(model, Min, x[1]^2 + x[2]^2 + op_V(x[1], x[2])) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution_summary(model) # The optimal objective value is: @@ -233,7 +233,7 @@ cache = Cache(Float64[], NaN, Float64[]) ) @objective(model, Min, x[1]^2 + x[2]^2 + op_cached_f(x[1], x[2])) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution_summary(model) # an we can check we get the same objective value: diff --git a/docs/src/tutorials/nonlinear/operator_ad.jl b/docs/src/tutorials/nonlinear/operator_ad.jl index 9abc2a9be89..1dbac1fa82d 100644 --- a/docs/src/tutorials/nonlinear/operator_ad.jl +++ b/docs/src/tutorials/nonlinear/operator_ad.jl @@ -105,7 +105,7 @@ function analytic_rosenbrock() @operator(model, op_rosenbrock, 2, f, analytic_∇f, analytic_∇²f) @objective(model, Min, op_rosenbrock(x[1], x[2])) optimize!(model) - Test.@test is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return value.(x) end @@ -202,7 +202,7 @@ function fdiff_rosenbrock() @operator(model, op_rosenbrock, 2, f, fdiff_derivatives(f)...) @objective(model, Min, op_rosenbrock(x[1], x[2])) optimize!(model) - Test.@test is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return value.(x) end @@ -320,7 +320,7 @@ function enzyme_rosenbrock() @operator(model, op_rosenbrock, 2, f, enzyme_derivatives(f)...) @objective(model, Min, op_rosenbrock(x[1], x[2])) optimize!(model) - Test.@test is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return value.(x) end @@ -429,7 +429,7 @@ function di_rosenbrock(; backend) @operator(model, op_rosenbrock, 2, f, di_derivatives(f; backend)...) @objective(model, Min, op_rosenbrock(x[1], x[2])) optimize!(model) - Test.@test is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) return value.(x) end diff --git a/docs/src/tutorials/nonlinear/portfolio.jl b/docs/src/tutorials/nonlinear/portfolio.jl index 4e2786ae850..11043342e7c 100644 --- a/docs/src/tutorials/nonlinear/portfolio.jl +++ b/docs/src/tutorials/nonlinear/portfolio.jl @@ -158,7 +158,7 @@ set_silent(model) @constraint(model, sum(x) <= 1000) @constraint(model, r' * x >= 50) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution_summary(model) # The optimal allocation of our assets is: diff --git a/docs/src/tutorials/nonlinear/querying_hessians.jl b/docs/src/tutorials/nonlinear/querying_hessians.jl index c0a98aa1b7e..153fd42c3c6 100644 --- a/docs/src/tutorials/nonlinear/querying_hessians.jl +++ b/docs/src/tutorials/nonlinear/querying_hessians.jl @@ -73,7 +73,7 @@ set_silent(model) @constraint(model, g_2, (x[1] + x[2])^2 <= 2) @objective(model, Min, (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) # ## The analytic solution diff --git a/docs/src/tutorials/nonlinear/rocket_control.jl b/docs/src/tutorials/nonlinear/rocket_control.jl index b1f460235c4..a5dce0ce916 100644 --- a/docs/src/tutorials/nonlinear/rocket_control.jl +++ b/docs/src/tutorials/nonlinear/rocket_control.jl @@ -129,7 +129,7 @@ ddt(x::Vector, t::Int) = (x[t] - x[t-1]) / Δt # Now we optimize the model and check that we found a solution: optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution_summary(model) # Finally, we plot the solution: diff --git a/docs/src/tutorials/nonlinear/simple_examples.jl b/docs/src/tutorials/nonlinear/simple_examples.jl index 76361455700..3238e30387f 100644 --- a/docs/src/tutorials/nonlinear/simple_examples.jl +++ b/docs/src/tutorials/nonlinear/simple_examples.jl @@ -28,7 +28,7 @@ function example_rosenbrock() @variable(model, y) @objective(model, Min, (1 - x)^2 + 100 * (y - x^2)^2) optimize!(model) - Test.@test is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) Test.@test objective_value(model) ≈ 0.0 atol = 1e-10 Test.@test value(x) ≈ 1.0 Test.@test value(y) ≈ 1.0 @@ -89,7 +89,7 @@ function example_clnlbeam() primal_status = $(primal_status(model)) objective_value = $(objective_value(model)) """) - Test.@test is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) Test.@test objective_value(model) ≈ 350.0 #src return end @@ -117,7 +117,7 @@ function example_mle() sum((data[i] - μ)^2 for i in 1:n) / (2 * σ^2) ) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) println("μ = ", value(μ)) println("mean(data) = ", Statistics.mean(data)) println("σ^2 = ", value(σ)^2) @@ -128,7 +128,7 @@ function example_mle() ## You can even do constrained MLE! @constraint(model, μ == σ^2) optimize!(model) - @assert is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) Test.@test value(μ) ≈ value(σ)^2 println() println("With constraint μ == σ^2:") @@ -156,7 +156,7 @@ function example_qcp() @constraint(model, x * x + y * y - z * z <= 0) @constraint(model, x * x - y * z <= 0) optimize!(model) - Test.@test is_solved_and_feasible(model) + assert_is_solved_and_feasible(model) print(model) println("Objective value: ", objective_value(model)) println("x = ", value(x)) diff --git a/docs/src/tutorials/nonlinear/space_shuttle_reentry_trajectory.jl b/docs/src/tutorials/nonlinear/space_shuttle_reentry_trajectory.jl index 2e6adde4679..11f785ca1a7 100644 --- a/docs/src/tutorials/nonlinear/space_shuttle_reentry_trajectory.jl +++ b/docs/src/tutorials/nonlinear/space_shuttle_reentry_trajectory.jl @@ -310,7 +310,7 @@ end set_silent(model) # Hide solver's verbose output optimize!(model) # Solve for the control and state -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) ## Show final cross-range of the solution println( diff --git a/docs/src/tutorials/nonlinear/tips_and_tricks.jl b/docs/src/tutorials/nonlinear/tips_and_tricks.jl index e59aaf9bd72..eaa9b32f331 100644 --- a/docs/src/tutorials/nonlinear/tips_and_tricks.jl +++ b/docs/src/tutorials/nonlinear/tips_and_tricks.jl @@ -56,7 +56,7 @@ set_silent(model) @constraint(model, op_foo_2(x[1], x[2]) <= 2) function_calls = 0 optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) Test.@test objective_value(model) ≈ √3 atol = 1e-4 Test.@test value.(x) ≈ [1.0, 1.0] atol = 1e-4 println("Naive approach: function calls = $(function_calls)") @@ -127,7 +127,7 @@ set_silent(model) @constraint(model, op_foo_2(x[1], x[2]) <= 2) function_calls = 0 optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) Test.@test objective_value(model) ≈ √3 atol = 1e-4 Test.@test value.(x) ≈ [1.0, 1.0] atol = 1e-4 println("Memoized approach: function_calls = $(function_calls)") diff --git a/docs/src/tutorials/nonlinear/user_defined_hessians.jl b/docs/src/tutorials/nonlinear/user_defined_hessians.jl index dc2339468c0..753029b1172 100644 --- a/docs/src/tutorials/nonlinear/user_defined_hessians.jl +++ b/docs/src/tutorials/nonlinear/user_defined_hessians.jl @@ -74,5 +74,5 @@ model = Model(Ipopt.Optimizer) @operator(model, op_rosenbrock, 2, rosenbrock, ∇rosenbrock, ∇²rosenbrock) @objective(model, Min, op_rosenbrock(x[1], x[2])) optimize!(model) -@assert is_solved_and_feasible(model) +assert_is_solved_and_feasible(model) solution_summary(model; verbose = true) diff --git a/docs/src/tutorials/transitioning/transitioning_from_matlab.jl b/docs/src/tutorials/transitioning/transitioning_from_matlab.jl index 642a331aa84..ca3c1ff7fb2 100755 --- a/docs/src/tutorials/transitioning/transitioning_from_matlab.jl +++ b/docs/src/tutorials/transitioning/transitioning_from_matlab.jl @@ -375,12 +375,9 @@ function robustness_jump(d) set_optimizer(model, Clarabel.Optimizer) set_attribute(model, "verbose", true) optimize!(model) - if is_solved_and_feasible(model) - WT = dual(PPT) - return value(λ), real(LinearAlgebra.dot(WT, rhoT)) - else - return "Something went wrong: $(raw_status(model))" - end + assert_is_solved_and_feasible(model) + WT = dual(PPT) + return value(λ), real(LinearAlgebra.dot(WT, rhoT)) end robustness_jump(3) diff --git a/src/optimizer_interface.jl b/src/optimizer_interface.jl index 93717e65d4c..0e6f3514fdd 100644 --- a/src/optimizer_interface.jl +++ b/src/optimizer_interface.jl @@ -812,11 +812,20 @@ end result::Int = 1, ) -Return `true` if the model has a feasible primal solution associated with result -index `result` and the [`termination_status`](@ref) is [`OPTIMAL`](@ref) (the -solver found a global optimum) or [`LOCALLY_SOLVED`](@ref) (the solver found a -local optimum, which may also be the global optimum, but the solver could not -prove so). +Return `true` if: + + * the [`termination_status`](@ref) is one of: + * [`OPTIMAL`](@ref) (the solver found a global optimum) + * [`LOCALLY_SOLVED`](@ref) (the solver found a local optimum, which may also + be the global optimum, but the solver could not prove so). + * the [`primal_status`](@ref) of the result index `result` is `FEASIBLE_POINT`. + +This function is conservative, in that it returns `false` for situations like +the solver terminating with a feasible solution due to a time limit. + +See also: [`assert_is_solved_and_feasible`](@ref). + +## Keyword arguments If `allow_local = false`, then this function returns `true` only if the [`termination_status`](@ref) is [`OPTIMAL`](@ref). @@ -826,7 +835,8 @@ be [`ALMOST_OPTIMAL`](@ref) or [`ALMOST_LOCALLY_SOLVED`](@ref) (if `allow_local` and the [`primal_status`](@ref) and [`dual_status`](@ref) may additionally be [`NEARLY_FEASIBLE_POINT`](@ref). -If `dual`, additionally check that an optimal dual solution is available. +If `dual`, additionally use [`dual_status`](@ref) to check that a dual feasible +point is available. If this function returns `false`, use [`termination_status`](@ref), [`result_count`](@ref), [`primal_status`](@ref) and [`dual_status`](@ref) to @@ -871,6 +881,58 @@ function is_solved_and_feasible( return ret end +""" + assert_is_solved_and_feasible(model::GenericModel; kwargs...) + +A function calls [`is_solved_and_feasible`](@ref) and, if the return is `false`, +errors with an informative error message describing the state of the solver. + +## Keyword arguments + +See [`is_solved_and_feasible`](@ref) for a description of all keyword arguments. + +## Example + +```jldoctest +julia> import Ipopt + +julia> model = Model(Ipopt.Optimizer); + +julia> is_solved_and_feasible(model) +false + +julia> assert_is_solved_and_feasible(model) +ERROR: The model was not solved correctly. Here is a summary of the solution to help you debug why this happened: + +* Solver : Ipopt + +* Status + Result count : 0 + Termination status : OPTIMIZE_NOT_CALLED + Message from the solver: + "optimize not called" + +* Candidate solution (result #1) + Primal status : NO_SOLUTION + Dual status : NO_SOLUTION + +* Work counters + +Stacktrace: +[...] +``` +""" +function assert_is_solved_and_feasible(model::GenericModel; kwargs...) + if is_solved_and_feasible(model; kwargs...) + error( + "The model was not solved correctly. Here is a summary of the " * + "solution to help you debug why this happened:\n\n" * + string(solution_summary(model)), + ) + end + return +end + """ solve_time(model::GenericModel) From 0e9faa09a57775313eff8842808cf0b0ae9faa18 Mon Sep 17 00:00:00 2001 From: Oscar Dowson Date: Sat, 25 Jan 2025 14:07:39 +1300 Subject: [PATCH 2/7] Update src/optimizer_interface.jl --- src/optimizer_interface.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/optimizer_interface.jl b/src/optimizer_interface.jl index 0e6f3514fdd..95daa898e77 100644 --- a/src/optimizer_interface.jl +++ b/src/optimizer_interface.jl @@ -923,7 +923,7 @@ Stacktrace: ``` """ function assert_is_solved_and_feasible(model::GenericModel; kwargs...) - if is_solved_and_feasible(model; kwargs...) + if !is_solved_and_feasible(model; kwargs...) error( "The model was not solved correctly. Here is a summary of the " * "solution to help you debug why this happened:\n\n" * From d4ae9e02a4471d50ac2e1b86242df635c716315f Mon Sep 17 00:00:00 2001 From: Oscar Dowson Date: Sun, 26 Jan 2025 12:32:00 +1300 Subject: [PATCH 3/7] Update src/optimizer_interface.jl Co-authored-by: Miles Lubin --- src/optimizer_interface.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/optimizer_interface.jl b/src/optimizer_interface.jl index 95daa898e77..c8a51c06abf 100644 --- a/src/optimizer_interface.jl +++ b/src/optimizer_interface.jl @@ -926,7 +926,7 @@ function assert_is_solved_and_feasible(model::GenericModel; kwargs...) if !is_solved_and_feasible(model; kwargs...) error( "The model was not solved correctly. Here is a summary of the " * - "solution to help you debug why this happened:\n\n" * + "solution to help debug why this happened:\n\n" * string(solution_summary(model)), ) end From 98f0a4009174a5f4f14954280476dca5a0c4b100 Mon Sep 17 00:00:00 2001 From: odow Date: Mon, 27 Jan 2025 10:04:37 +1300 Subject: [PATCH 4/7] Update --- src/optimizer_interface.jl | 2 +- test/test_model.jl | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/src/optimizer_interface.jl b/src/optimizer_interface.jl index c8a51c06abf..e9959cea795 100644 --- a/src/optimizer_interface.jl +++ b/src/optimizer_interface.jl @@ -902,7 +902,7 @@ julia> is_solved_and_feasible(model) false julia> assert_is_solved_and_feasible(model) -ERROR: The model was not solved correctly. Here is a summary of the solution to help you debug why this happened: +ERROR: The model was not solved correctly. Here is a summary of the solution to help debug why this happened: * Solver : Ipopt diff --git a/test/test_model.jl b/test/test_model.jl index 2f6009acf3f..31802b0dcc0 100644 --- a/test/test_model.jl +++ b/test/test_model.jl @@ -1314,6 +1314,20 @@ function test_is_solved_and_feasible() return end +function test_assert_is_solved_and_feasible() + mock = MOI.Utilities.MockOptimizer(MOI.Utilities.Model{Float64}()) + model = direct_model(mock) + MOI.set(mock, MOI.TerminationStatus(), MOI.OPTIMAL) + MOI.set(mock, MOI.PrimalStatus(), MOI.FEASIBLE_POINT) + MOI.set(mock, MOI.DualStatus(), MOI.NO_SOLUTION) + @test assert_is_solved_and_feasible(model) === nothing + @test_throws( + ErrorException, + assert_is_solved_and_feasible(model; dual = true), + ) + return +end + function test_set_abstract_string() abstract_string = split("foo.bar", ".")[1] model = Model() do From 2138189ae689eaaa900552bcad232ca8201ae3cd Mon Sep 17 00:00:00 2001 From: odow Date: Mon, 27 Jan 2025 10:08:53 +1300 Subject: [PATCH 5/7] Update --- docs/src/tutorials/linear/multi_objective_knapsack.jl | 2 +- docs/src/tutorials/nonlinear/portfolio.jl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/src/tutorials/linear/multi_objective_knapsack.jl b/docs/src/tutorials/linear/multi_objective_knapsack.jl index ac763798b1b..b69b8fc753c 100644 --- a/docs/src/tutorials/linear/multi_objective_knapsack.jl +++ b/docs/src/tutorials/linear/multi_objective_knapsack.jl @@ -121,7 +121,7 @@ set_attribute(model, MOA.Algorithm(), MOA.EpsilonConstraint()) # Let's solve the problem and see the solution optimize!(model) -@assert termination_status(model) == OPTIMAL +assert_is_solved_and_feasible(model) solution_summary(model) # There are 9 solutions available. We can also use [`result_count`](@ref) to see diff --git a/docs/src/tutorials/nonlinear/portfolio.jl b/docs/src/tutorials/nonlinear/portfolio.jl index 11043342e7c..e4be8141eba 100644 --- a/docs/src/tutorials/nonlinear/portfolio.jl +++ b/docs/src/tutorials/nonlinear/portfolio.jl @@ -210,7 +210,7 @@ set_optimizer_attribute(model, MOA.SolutionLimit(), 50) ## a single objective sense `Min`, and negate any `Max` objectives: @objective(model, Min, [variance, -expected_return]) optimize!(model) -@assert termination_status(model) == OPTIMAL +assert_is_solved_and_feasible(model) solution_summary(model) # The algorithm found 50 different solutions. Let's plot them to see how they From ce687400ae3080591d1d6d580329fdef41f245aa Mon Sep 17 00:00:00 2001 From: odow Date: Mon, 27 Jan 2025 10:27:00 +1300 Subject: [PATCH 6/7] Update --- test/test_model.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_model.jl b/test/test_model.jl index 31802b0dcc0..51b188e0dc3 100644 --- a/test/test_model.jl +++ b/test/test_model.jl @@ -1320,6 +1320,7 @@ function test_assert_is_solved_and_feasible() MOI.set(mock, MOI.TerminationStatus(), MOI.OPTIMAL) MOI.set(mock, MOI.PrimalStatus(), MOI.FEASIBLE_POINT) MOI.set(mock, MOI.DualStatus(), MOI.NO_SOLUTION) + MOI.set(mock, MOI.RawStatusString(), "failed") @test assert_is_solved_and_feasible(model) === nothing @test_throws( ErrorException, From 0ad1cc9548f5b94ffe71e6ce4a1edc45f9a89d13 Mon Sep 17 00:00:00 2001 From: odow Date: Mon, 27 Jan 2025 11:16:57 +1300 Subject: [PATCH 7/7] Update --- src/optimizer_interface.jl | 10 +++++++--- test/test_model.jl | 40 +++++++++++++++++++++++++++++++++++++- 2 files changed, 46 insertions(+), 4 deletions(-) diff --git a/src/optimizer_interface.jl b/src/optimizer_interface.jl index e9959cea795..18fc1816b23 100644 --- a/src/optimizer_interface.jl +++ b/src/optimizer_interface.jl @@ -922,12 +922,16 @@ Stacktrace: [...] ``` """ -function assert_is_solved_and_feasible(model::GenericModel; kwargs...) - if !is_solved_and_feasible(model; kwargs...) +function assert_is_solved_and_feasible( + model::GenericModel; + result::Int = 1, + kwargs..., +) + if !is_solved_and_feasible(model; result, kwargs...) error( "The model was not solved correctly. Here is a summary of the " * "solution to help debug why this happened:\n\n" * - string(solution_summary(model)), + string(solution_summary(model; result)), ) end return diff --git a/test/test_model.jl b/test/test_model.jl index 51b188e0dc3..74e5d7046cd 100644 --- a/test/test_model.jl +++ b/test/test_model.jl @@ -1323,9 +1323,47 @@ function test_assert_is_solved_and_feasible() MOI.set(mock, MOI.RawStatusString(), "failed") @test assert_is_solved_and_feasible(model) === nothing @test_throws( - ErrorException, + ErrorException( + """ + The model was not solved correctly. Here is a summary of the solution to help debug why this happened: + + * Solver : Mock + + * Status + Result count : 1 + Termination status : OPTIMAL + Message from the solver: + "failed" + + * Candidate solution (result #1) + Primal status : FEASIBLE_POINT + Dual status : NO_SOLUTION + Objective value : 0.00000e+00 + Dual objective value : 0.00000e+00 + + * Work counters + """, + ), assert_is_solved_and_feasible(model; dual = true), ) + @test_throws( + ErrorException( + """ + The model was not solved correctly. Here is a summary of the solution to help debug why this happened: + + * Solver : Mock + + * Status + Result count : 1 + Termination status : OPTIMAL + + * Candidate solution (result #2) + Primal status : NO_SOLUTION + Dual status : NO_SOLUTION + """, + ), + assert_is_solved_and_feasible(model; dual = true, result = 2), + ) return end