Skip to content
Merged
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ jobs:
fail-fast: false
matrix:
include:
- version: '1.6'
- version: 'lts'
os: ubuntu-latest
arch: x64
- version: '1'
Expand Down
67 changes: 34 additions & 33 deletions src/NonLinearProgram/NonLinearProgram.jl
Original file line number Diff line number Diff line change
Expand Up @@ -552,45 +552,46 @@ function DiffOpt.reverse_differentiate!(model::Model; tol = 1e-6)
# Compute Jacobian
Δs, df_dp = _compute_sensitivity(model; tol = tol)
Δp = if !iszero(model.input_cache.dobj)
model.input_cache.dobj * df_dp
df_dp'model.input_cache.dobj
else
num_primal = length(cache.primal_vars)
# Fetch primal sensitivities
Δx = zeros(num_primal)
for (i, var_idx) in enumerate(cache.primal_vars)
if haskey(model.input_cache.dx, var_idx)
Δx[i] = model.input_cache.dx[var_idx]
end
zeros(length(cache.params))
end
num_primal = length(cache.primal_vars)
# Fetch primal sensitivities
Δx = zeros(num_primal)
for (i, var_idx) in enumerate(cache.primal_vars)
if haskey(model.input_cache.dx, var_idx)
Δx[i] = model.input_cache.dx[var_idx]
end
# Fetch dual sensitivities
num_constraints = length(cache.cons)
num_up = length(cache.has_up)
num_low = length(cache.has_low)
Δdual = zeros(num_constraints + num_up + num_low)
for (i, ci) in enumerate(cache.cons)
idx = form.nlp_index_2_constraint[ci]
if haskey(model.input_cache.dy, idx)
Δdual[i] = model.input_cache.dy[idx]
end
end
# Fetch dual sensitivities
num_constraints = length(cache.cons)
num_up = length(cache.has_up)
num_low = length(cache.has_low)
Δdual = zeros(num_constraints + num_up + num_low)
for (i, ci) in enumerate(cache.cons)
idx = form.nlp_index_2_constraint[ci]
if haskey(model.input_cache.dy, idx)
Δdual[i] = model.input_cache.dy[idx]
end
for (i, var_idx) in enumerate(cache.primal_vars[cache.has_low])
idx = form.constraint_lower_bounds[var_idx.value]
if haskey(model.input_cache.dy, idx)
Δdual[num_constraints+i] = model.input_cache.dy[idx]
end
end
for (i, var_idx) in enumerate(cache.primal_vars[cache.has_low])
idx = form.constraint_lower_bounds[var_idx.value]
if haskey(model.input_cache.dy, idx)
Δdual[num_constraints+i] = model.input_cache.dy[idx]
end
for (i, var_idx) in enumerate(cache.primal_vars[cache.has_up])
idx = form.constraint_upper_bounds[var_idx.value]
if haskey(model.input_cache.dy, idx)
Δdual[num_constraints+num_low+i] = model.input_cache.dy[idx]
end
end
for (i, var_idx) in enumerate(cache.primal_vars[cache.has_up])
idx = form.constraint_upper_bounds[var_idx.value]
if haskey(model.input_cache.dy, idx)
Δdual[num_constraints+num_low+i] = model.input_cache.dy[idx]
end
# Extract Parameter sensitivities
Δw = zeros(size(Δs, 1))
Δw[1:num_primal] = Δx
Δw[cache.index_duals] = Δdual
Δp = Δs' * Δw
end
# Extract Parameter sensitivities
Δw = zeros(size(Δs, 1))
Δw[1:num_primal] = Δx
Δw[cache.index_duals] = Δdual
Δp += Δs' * Δw

Δp_dict = Dict{MOI.ConstraintIndex,Float64}(
form.var2ci[var_idx] => Δp[form.var2param[var_idx].value]
Expand Down
12 changes: 12 additions & 0 deletions src/diff_opt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ Base.@kwdef mutable struct InputCache
MOIDD.DoubleDict{MOI.VectorAffineFunction{Float64}}() # also includes G for QPs
objective::Union{Nothing,MOI.AbstractScalarFunction} = nothing
factorization::Union{Nothing,Function} = nothing
allow_objective_and_solution_input::Bool = false
end

function Base.empty!(cache::InputCache)
Expand Down Expand Up @@ -122,6 +123,8 @@ MOI.set(model, DiffOpt.NonLinearKKTJacobianFactorization(), factorization)
"""
struct NonLinearKKTJacobianFactorization <: MOI.AbstractModelAttribute end

struct AllowObjectiveAndSolutionInput <: MOI.AbstractModelAttribute end

"""
ForwardConstraintFunction <: MOI.AbstractConstraintAttribute

Expand Down Expand Up @@ -440,6 +443,15 @@ function MOI.set(
return
end

function MOI.set(
model::AbstractModel,
::AllowObjectiveAndSolutionInput,
allow::Bool,
)
model.input_cache.allow_objective_and_solution_input = allow
return
end

function MOI.set(
model::AbstractModel,
::ReverseVariablePrimal,
Expand Down
8 changes: 8 additions & 0 deletions src/jump_moi_overloads.jl
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,14 @@ function MOI.set(
return MOI.set(JuMP.backend(model), attr, factorization)
end

function MOI.set(
model::JuMP.Model,
attr::AllowObjectiveAndSolutionInput,
allow::Bool,
)
return MOI.set(JuMP.backend(model), attr, allow)
end

function MOI.set(
model::JuMP.Model,
attr::ForwardObjectiveFunction,
Expand Down
30 changes: 27 additions & 3 deletions src/moi_wrapper.jl
Original file line number Diff line number Diff line change
Expand Up @@ -557,16 +557,22 @@ function reverse_differentiate!(model::Optimizer)
end
if !iszero(model.input_cache.dobj) &&
(!isempty(model.input_cache.dx) || !isempty(model.input_cache.dy))
error(
"Cannot compute the reverse differentiation with both solution sensitivities and objective sensitivities.",
)
if !MOI.get(model, AllowObjectiveAndSolutionInput())
@warn "Computing reverse differentiation with both solution sensitivities and objective sensitivities. " *
"Set `DiffOpt.AllowObjectiveAndSolutionInput()` to `true` to silence this warning."
end
end
diff = _diff(model)
MOI.set(
diff,
NonLinearKKTJacobianFactorization(),
model.input_cache.factorization,
)
MOI.set(
diff,
AllowObjectiveAndSolutionInput(),
model.input_cache.allow_objective_and_solution_input,
)
for (vi, value) in model.input_cache.dx
MOI.set(diff, ReverseVariablePrimal(), model.index_map[vi], value)
end
Expand Down Expand Up @@ -673,6 +679,11 @@ function forward_differentiate!(model::Optimizer)
NonLinearKKTJacobianFactorization(),
model.input_cache.factorization,
)
MOI.set(
diff,
AllowObjectiveAndSolutionInput(),
model.input_cache.allow_objective_and_solution_input,
)
T = Float64
list = MOI.get(
model,
Expand Down Expand Up @@ -1125,6 +1136,10 @@ function MOI.supports(
return true
end

function MOI.supports(::Optimizer, ::AllowObjectiveAndSolutionInput, ::Bool)
return true
end

function MOI.set(
model::Optimizer,
::NonLinearKKTJacobianFactorization,
Expand All @@ -1134,10 +1149,19 @@ function MOI.set(
return
end

function MOI.set(model::Optimizer, ::AllowObjectiveAndSolutionInput, allow)
model.input_cache.allow_objective_and_solution_input = allow
return
end

function MOI.get(model::Optimizer, ::NonLinearKKTJacobianFactorization)
return model.input_cache.factorization
end

function MOI.get(model::Optimizer, ::AllowObjectiveAndSolutionInput)
return model.input_cache.allow_objective_and_solution_input
end

function MOI.set(model::Optimizer, attr::MOI.AbstractOptimizerAttribute, value)
return MOI.set(model.optimizer, attr, value)
end
40 changes: 32 additions & 8 deletions test/nlp_program.jl
Original file line number Diff line number Diff line change
Expand Up @@ -644,14 +644,15 @@ function test_ObjectiveSensitivity_model1()
set_silent(model)

# Parameters
@variable(model, p ∈ MOI.Parameter(1.5))
p_val = 1.5
@variable(model, p ∈ MOI.Parameter(p_val))

# Variables
@variable(model, x)

# Constraints
@constraint(model, x * sin(p) == 1)
@objective(model, Min, sum(x))
@objective(model, Min, 2 * x)

optimize!(model)
@assert is_solved_and_feasible(model)
Expand All @@ -665,19 +666,42 @@ function test_ObjectiveSensitivity_model1()

# Test Objective Sensitivity wrt parameters
df_dp = MOI.get(model, DiffOpt.ForwardObjectiveSensitivity())
@test isapprox(df_dp, -0.0071092; atol = 1e-4)
df = -2cos(p_val) / sin(p_val)^2
@test isapprox(df_dp, df * Δp; atol = 1e-4)

# Clean up
DiffOpt.empty_input_sensitivities!(model)

# Set Too Many Sensitivities
# Test both obj and solution inputs
Δf = 0.5
MOI.set(model, DiffOpt.ReverseObjectiveSensitivity(), Δf)
MOI.set(model, DiffOpt.ReverseVariablePrimal(), x, Δp)

MOI.set(model, DiffOpt.ReverseVariablePrimal(), x, 1.0)
msg = "Computing reverse differentiation with both solution sensitivities and objective sensitivities. Set `DiffOpt.AllowObjectiveAndSolutionInput()` to `true` to silence this warning."
@test_logs (:warn, msg) DiffOpt.reverse_differentiate!(model)
MOI.set(model, DiffOpt.AllowObjectiveAndSolutionInput(), true)
@test_nowarn DiffOpt.reverse_differentiate!(model)

# Compute derivatives
@test_throws ErrorException DiffOpt.reverse_differentiate!(model)
dp_combined =
MOI.get(model, DiffOpt.ReverseConstraintSet(), ParameterRef(p)).value

ε = 1e-6
df_dp_fdpos = begin
set_parameter_value(p, p_val + ε)
optimize!(model)
Δf * objective_value(model) + Δp * value(x)
end
df_dp_fdneg = begin
set_parameter_value(p, p_val - ε)
optimize!(model)
Δf * objective_value(model) + Δp * value(x)
end
df_dp_fd = (df_dp_fdpos - df_dp_fdneg) / (2ε)

@test isapprox(df_dp_fd, dp_combined)

set_parameter_value(p, p_val)
optimize!(model)

DiffOpt.empty_input_sensitivities!(model)

Expand All @@ -691,7 +715,7 @@ function test_ObjectiveSensitivity_model1()
# Test Objective Sensitivity wrt parameters
dp = MOI.get(model, DiffOpt.ReverseConstraintSet(), ParameterRef(p)).value

@test isapprox(dp, -0.0355464; atol = 1e-4)
@test isapprox(dp, df * Δf; atol = 1e-4)
end

function test_ObjectiveSensitivity_model2()
Expand Down