Skip to content

Update jac_lin_coord methods #501

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 24 commits into from
Jul 27, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions docs/src/guidelines.md
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,11 @@ The following functions should be defined:
- `cons_nln!(nlp, x, c)`
- `jac_lin_structure!(nlp, jrows, jcols)`
- `jac_nln_structure!(nlp, jrows, jcols)`
- `jac_lin_coord!(nlp, x, jvals)`
- `jac_lin_coord!(nlp, jvals)`
- `jac_nln_coord!(nlp, x, jvals)`
- `jprod_lin!(nlp, x, v, Jv)`
- `jprod_lin!(nlp, v, Jv)`
- `jprod_nln!(nlp, x, v, Jv)`
- `jtprod_lin!(nlp, x, v, Jtv)`
- `jtprod_lin!(nlp, v, Jtv)`
- `jtprod_nln!(nlp, x, v, Jtv)`
- `hess_coord!(nlp, x, y, hvals; obj_weight=1)`
- `hprod!(nlp, x, y, v, Hv; obj_weight=1)`
Expand Down
93 changes: 53 additions & 40 deletions src/nlp/api.jl
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
using Base: @deprecate

export obj, grad, grad!, objgrad, objgrad!, objcons, objcons!
export cons, cons!, cons_lin, cons_lin!, cons_nln, cons_nln!
export jth_con, jth_congrad, jth_congrad!, jth_sparse_congrad
Expand Down Expand Up @@ -266,10 +268,10 @@ function jac_coord!(nlp::AbstractNLPModel, x::AbstractVector, vals::AbstractVect
increment!(nlp, :neval_jac)
if nlp.meta.nlin > 0
if nlp.meta.nnln == 0
jac_lin_coord!(nlp, x, vals)
jac_lin_coord!(nlp, vals)
else
lin_ind = 1:(nlp.meta.lin_nnzj)
jac_lin_coord!(nlp, x, view(vals, lin_ind))
jac_lin_coord!(nlp, view(vals, lin_ind))
end
end
if nlp.meta.nnln > 0
Expand Down Expand Up @@ -307,36 +309,40 @@ function jac(nlp::AbstractNLPModel, x::AbstractVector)
end

"""
vals = jac_lin_coord!(nlp, x, vals)
vals = jac_lin_coord!(nlp, vals)

Evaluate ``J(x)``, the linear constraints Jacobian at `x` in sparse coordinate format,
Evaluate the linear constraints Jacobian in sparse coordinate format,
overwriting `vals`.
"""
function jac_lin_coord! end

@deprecate jac_lin_coord!(nlp::AbstractNLPModel, x::AbstractVector, vals::AbstractVector) jac_lin_coord!(nlp, vals)

"""
vals = jac_lin_coord(nlp, x)
vals = jac_lin_coord(nlp)

Evaluate ``J(x)``, the linear constraints Jacobian at `x` in sparse coordinate format.
Evaluate the linear constraints Jacobian in sparse coordinate format.
"""
function jac_lin_coord(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S}
@lencheck nlp.meta.nvar x
function jac_lin_coord(nlp::AbstractNLPModel{T, S}) where {T, S}
vals = S(undef, nlp.meta.lin_nnzj)
return jac_lin_coord!(nlp, x, vals)
return jac_lin_coord!(nlp, vals)
end

@deprecate jac_lin_coord(nlp::AbstractNLPModel, x::AbstractVector) jac_lin_coord(nlp)

"""
Jx = jac_lin(nlp, x)
Jx = jac_lin(nlp)

Evaluate ``J(x)``, the linear constraints Jacobian at `x` as a sparse matrix.
Evaluate the linear constraints Jacobian as a sparse matrix.
"""
function jac_lin(nlp::AbstractNLPModel, x::AbstractVector)
@lencheck nlp.meta.nvar x
function jac_lin(nlp::AbstractNLPModel)
rows, cols = jac_lin_structure(nlp)
vals = jac_lin_coord(nlp, x)
vals = jac_lin_coord(nlp)
sparse(rows, cols, vals, nlp.meta.nlin, nlp.meta.nvar)
end

@deprecate jac_lin(nlp::AbstractNLPModel, x::AbstractVector) jac_lin(nlp)

"""
vals = jac_nln_coord!(nlp, x, vals)

Expand Down Expand Up @@ -390,9 +396,9 @@ function jprod!(nlp::AbstractNLPModel, x::AbstractVector, v::AbstractVector, Jv:
increment!(nlp, :neval_jprod)
if nlp.meta.nlin > 0
if nlp.meta.nnln == 0
jprod_lin!(nlp, x, v, Jv)
jprod_lin!(nlp, v, Jv)
else
jprod_lin!(nlp, x, v, view(Jv, nlp.meta.lin))
jprod_lin!(nlp, v, view(Jv, nlp.meta.lin))
end
end
if nlp.meta.nnln > 0
Expand Down Expand Up @@ -427,23 +433,26 @@ function jprod!(
end

"""
Jv = jprod_lin(nlp, x, v)
Jv = jprod_lin(nlp, v)

Evaluate ``J(x)v``, the linear Jacobian-vector product at `x`.
"""
function jprod_lin(nlp::AbstractNLPModel{T, S}, x::AbstractVector, v::AbstractVector) where {T, S}
@lencheck nlp.meta.nvar x v
function jprod_lin(nlp::AbstractNLPModel{T, S}, v::AbstractVector) where {T, S}
@lencheck nlp.meta.nvar v
Jv = S(undef, nlp.meta.nlin)
return jprod_lin!(nlp, x, v, Jv)
return jprod_lin!(nlp, v, Jv)
end

"""
Jv = jprod_lin!(nlp, x, v, Jv)
Jv = jprod_lin!(nlp, v, Jv)

Evaluate ``J(x)v``, the linear Jacobian-vector product at `x` in place.
"""
function jprod_lin! end

@deprecate jprod_lin(nlp::AbstractNLPModel, x::AbstractVector, v::AbstractVector) jprod_lin(nlp, v)
@deprecate jprod_lin!(nlp::AbstractNLPModel, x::AbstractVector, v::AbstractVector, Jv::AbstractVector) jprod_lin!(nlp, v, Jv)

"""
Jv = jprod_lin!(nlp, rows, cols, vals, v, Jv)

Expand Down Expand Up @@ -527,18 +536,18 @@ function jtprod!(nlp::AbstractNLPModel, x::AbstractVector, v::AbstractVector, Jt
@lencheck nlp.meta.ncon v
increment!(nlp, :neval_jtprod)
if nlp.meta.nnln == 0
(nlp.meta.nlin > 0) && jtprod_lin!(nlp, x, v, Jtv)
(nlp.meta.nlin > 0) && jtprod_lin!(nlp, v, Jtv)
elseif nlp.meta.nlin == 0
(nlp.meta.nnln > 0) && jtprod_nln!(nlp, x, v, Jtv)
elseif nlp.meta.nlin >= nlp.meta.nnln
jtprod_lin!(nlp, x, view(v, nlp.meta.lin), Jtv)
jtprod_lin!(nlp, view(v, nlp.meta.lin), Jtv)
if nlp.meta.nnln > 0
Jtv .+= jtprod_nln(nlp, x, view(v, nlp.meta.nln))
end
else
jtprod_nln!(nlp, x, view(v, nlp.meta.nln), Jtv)
if nlp.meta.nlin > 0
Jtv .+= jtprod_lin(nlp, x, view(v, nlp.meta.lin))
Jtv .+= jtprod_lin(nlp, view(v, nlp.meta.lin))
end
end
return Jtv
Expand Down Expand Up @@ -566,24 +575,26 @@ function jtprod!(
end

"""
Jtv = jtprod_lin(nlp, x, v)
Jtv = jtprod_lin(nlp, v)

Evaluate ``J(x)^Tv``, the linear transposed-Jacobian-vector product at `x`.
"""
function jtprod_lin(nlp::AbstractNLPModel{T, S}, x::AbstractVector, v::AbstractVector) where {T, S}
@lencheck nlp.meta.nvar x
function jtprod_lin(nlp::AbstractNLPModel{T, S}, v::AbstractVector) where {T, S}
@lencheck nlp.meta.nlin v
Jtv = S(undef, nlp.meta.nvar)
return jtprod_lin!(nlp, x, v, Jtv)
return jtprod_lin!(nlp, v, Jtv)
end

"""
Jtv = jtprod_lin!(nlp, x, v, Jtv)
Jtv = jtprod_lin!(nlp, v, Jtv)

Evaluate ``J(x)^Tv``, the linear transposed-Jacobian-vector product at `x` in place.
"""
function jtprod_lin! end

@deprecate jtprod_lin(nlp::AbstractNLPModel, x::AbstractVector, v::AbstractVector) jtprod_lin(nlp, v)
@deprecate jtprod_lin!(nlp::AbstractNLPModel, x::AbstractVector, v::AbstractVector, Jtv::AbstractVector) jtprod_lin!(nlp, v, Jtv)

"""
Jtv = jtprod_lin!(nlp, rows, cols, vals, v, Jtv)

Expand Down Expand Up @@ -736,37 +747,37 @@ function jac_op!(
end

"""
J = jac_lin_op(nlp, x)
J = jac_lin_op(nlp)

Return the linear Jacobian at `x` as a linear operator.
Return the linear Jacobian as a linear operator.
The resulting object may be used as if it were a matrix, e.g., `J * v` or
`J' * v`.
"""
function jac_lin_op(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S}
@lencheck nlp.meta.nvar x
function jac_lin_op(nlp::AbstractNLPModel{T, S}) where {T, S}
Jv = S(undef, nlp.meta.nlin)
Jtv = S(undef, nlp.meta.nvar)
return jac_lin_op!(nlp, x, Jv, Jtv)
return jac_lin_op!(nlp, Jv, Jtv)
end

@deprecate jac_lin_op(nlp::AbstractNLPModel, x::AbstractVector) jac_lin_op(nlp)

"""
J = jac_lin_op!(nlp, x, Jv, Jtv)
J = jac_lin_op!(nlp, Jv, Jtv)

Return the linear Jacobian at `x` as a linear operator.
Return the linear Jacobian as a linear operator.
The resulting object may be used as if it were a matrix, e.g., `J * v` or
`J' * v`. The values `Jv` and `Jtv` are used as preallocated storage for the
operations.
"""
function jac_lin_op!(
nlp::AbstractNLPModel{T, S},
x::AbstractVector{T},
Jv::AbstractVector,
Jtv::AbstractVector,
) where {T, S}
@lencheck nlp.meta.nvar x Jtv
@lencheck nlp.meta.nlin Jv
@lencheck nlp.meta.nvar Jtv
prod! = @closure (res, v, α, β) -> begin # res = α * J * v + β * res
jprod_lin!(nlp, x, v, Jv)
jprod_lin!(nlp, v, Jv)
if β == 0
res .= α .* Jv
else
Expand All @@ -775,7 +786,7 @@ function jac_lin_op!(
return res
end
ctprod! = @closure (res, v, α, β) -> begin
jtprod_lin!(nlp, x, v, Jtv)
jtprod_lin!(nlp, v, Jtv)
if β == 0
res .= α .* Jtv
else
Expand All @@ -786,6 +797,8 @@ function jac_lin_op!(
return LinearOperator{T}(nlp.meta.nlin, nlp.meta.nvar, false, false, prod!, ctprod!, ctprod!)
end

@deprecate jac_lin_op!(nlp::AbstractNLPModel, x::AbstractVector, Jv::AbstractVector, Jtv::AbstractVector) jac_lin_op!(nlp, Jv, Jtv)

"""
J = jac_lin_op!(nlp, rows, cols, vals, Jv, Jtv)

Expand Down
14 changes: 7 additions & 7 deletions test/nlp/api.jl
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,13 @@
@test cons_lin(nlp, x) == c(x)[1:1]
@test jac(nlp, x) ≈ J(x)
@test jac_nln(nlp, x) ≈ J(x)[2:2, :]
@test jac_lin(nlp, x) ≈ J(x)[1:1, :]
@test jac_lin(nlp) ≈ J(x)[1:1, :]
@test jprod(nlp, x, v) ≈ J(x) * v
@test jprod_nln(nlp, x, v) ≈ J(x)[2:2, :] * v
@test jprod_lin(nlp, x, v) ≈ J(x)[1:1, :] * v
@test jprod_lin(nlp, v) ≈ J(x)[1:1, :] * v
@test jtprod(nlp, x, w) ≈ J(x)' * w
@test jtprod_nln(nlp, x, w[2:2]) ≈ J(x)[2:2, :]' * w[2:2]
@test jtprod_lin(nlp, x, w[1:1]) ≈ J(x)[1:1, :]' * w[1:1]
@test jtprod_lin(nlp, w[1:1]) ≈ J(x)[1:1, :]' * w[1:1]
@test hess(nlp, x, y) ≈ tril(H(x, y))
@test hprod(nlp, x, y, v) ≈ H(x, y) * v

Expand All @@ -56,12 +56,12 @@
@test jprod!(nlp, jac_structure(nlp)..., jac_coord(nlp, x), v, Jv) ≈ J(x) * v
@test jprod_nln!(nlp, jac_nln_structure(nlp)..., jac_nln_coord(nlp, x), v, Jv[2:2]) ≈
J(x)[2:2, :] * v
@test jprod_lin!(nlp, jac_lin_structure(nlp)..., jac_lin_coord(nlp, x), v, Jv[1:1]) ≈
@test jprod_lin!(nlp, jac_lin_structure(nlp)..., jac_lin_coord(nlp), v, Jv[1:1]) ≈
J(x)[1:1, :] * v
@test jtprod!(nlp, jac_structure(nlp)..., jac_coord(nlp, x), w, Jtw) ≈ J(x)' * w
@test jtprod_nln!(nlp, jac_nln_structure(nlp)..., jac_nln_coord(nlp, x), w[2:2], Jtw) ≈
J(x)[2:2, :]' * w[2:2]
@test jtprod_lin!(nlp, jac_lin_structure(nlp)..., jac_lin_coord(nlp, x), w[1:1], Jtw) ≈
@test jtprod_lin!(nlp, jac_lin_structure(nlp)..., jac_lin_coord(nlp), w[1:1], Jtw) ≈
J(x)[1:1, :]' * w[1:1]
Jop = jac_op!(nlp, x, Jv, Jtw)
@test Jop * v ≈ J(x) * v
Expand Down Expand Up @@ -91,14 +91,14 @@
@test mul!(w[2:2], Jop, v, 1.0, -1.0) ≈ res
res = J(x)[2:2, :]' * w[2:2] - v
@test mul!(v, Jop', w[2:2], 1.0, -1.0) ≈ res
Jop = jac_lin_op!(nlp, x, Jv[1:1], Jtw)
Jop = jac_lin_op!(nlp, Jv[1:1], Jtw)
@test Jop * v ≈ J(x)[1:1, :] * v
@test Jop' * w[1:1] ≈ Jtw
res = J(x)[1:1, :] * v - w[1:1]
@test mul!(w[1:1], Jop, v, 1.0, -1.0) ≈ res
res = J(x)[1:1, :]' * w[1:1] - v
@test mul!(v, Jop', w[1:1], 1.0, -1.0) ≈ res
Jop = jac_lin_op!(nlp, jac_lin_structure(nlp)..., jac_lin_coord(nlp, x), Jv[1:1], Jtw)
Jop = jac_lin_op!(nlp, jac_lin_structure(nlp)..., jac_lin_coord(nlp), Jv[1:1], Jtw)
@test Jop * v ≈ J(x)[1:1, :] * v
@test Jop' * w[1:1] ≈ Jtw
res = J(x)[1:1, :] * v - w[1:1]
Expand Down
6 changes: 3 additions & 3 deletions test/nlp/dummy-model.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,15 @@ end
@test_throws(MethodError, jth_congrad(model, [0.0], 1))
@test_throws(MethodError, jth_sparse_congrad(model, [0.0], 1))
@test_throws(MethodError, jth_congrad!(model, [0.0], 1, [2.0]))
@test_throws(MethodError, jprod_lin!(model, [0.0], [1.0], [2.0]))
@test_throws(MethodError, jtprod_lin!(model, [0.0], [1.0], [2.0]))
@test_throws(MethodError, jprod_lin!(model, [0.0], [2.0]))
@test_throws(MethodError, jtprod_lin!(model, [0.0], [2.0]))
@test_throws(MethodError, jprod_nln!(model, [0.0], [1.0], [2.0]))
@test_throws(MethodError, jtprod_nln!(model, [0.0], [1.0], [2.0]))
@test_throws(MethodError, jth_hess_coord!(model, [0.0], 1))
@test_throws(MethodError, jth_hprod!(model, [0.0], [1.0], 2, [3.0]))
@test_throws(MethodError, ghjvprod!(model, [0.0], [1.0], [2.0], [3.0]))
@assert isa(hess_op(model, [0.0]), LinearOperator)
@assert isa(jac_op(model, [0.0]), LinearOperator)
@assert isa(jac_lin_op(model, [0.0]), LinearOperator)
@assert isa(jac_lin_op(model), LinearOperator)
@assert isa(jac_nln_op(model, [0.0]), LinearOperator)
end
25 changes: 8 additions & 17 deletions test/nlp/simple-model.jl
Original file line number Diff line number Diff line change
Expand Up @@ -139,8 +139,8 @@ function NLPModels.jac_nln_coord!(nlp::SimpleNLPModel, x::AbstractVector, vals::
return vals
end

function NLPModels.jac_lin_coord!(nlp::SimpleNLPModel, x::AbstractVector, vals::AbstractVector)
@lencheck 2 x vals
function NLPModels.jac_lin_coord!(nlp::SimpleNLPModel, vals::AbstractVector)
@lencheck 2 vals
increment!(nlp, :neval_jac_lin)
vals .= [1, -2]
return vals
Expand All @@ -159,16 +159,12 @@ function NLPModels.jprod_nln!(
return Jv
end

function NLPModels.jprod_lin!(
nlp::SimpleNLPModel,
x::AbstractVector,
v::AbstractVector,
Jv::AbstractVector,
)
@lencheck 2 x v

function NLPModels.jprod_lin!(nlp::SimpleNLPModel, v::AbstractVector, Jv::AbstractVector)
@lencheck 2 v
@lencheck 1 Jv
increment!(nlp, :neval_jprod_lin)
Jv .= [v[1] - 2 * v[2]]
Jv[1] = v[1] - 2 * v[2]
return Jv
end

Expand All @@ -185,13 +181,8 @@ function NLPModels.jtprod_nln!(
return Jtv
end

function NLPModels.jtprod_lin!(
nlp::SimpleNLPModel,
x::AbstractVector,
v::AbstractVector,
Jtv::AbstractVector,
)
@lencheck 2 x Jtv
function NLPModels.jtprod_lin!(nlp::SimpleNLPModel, v::AbstractVector, Jtv::AbstractVector)
@lencheck 2 Jtv
@lencheck 1 v
increment!(nlp, :neval_jtprod_lin)
Jtv .= [v[1]; -2 * v[1]]
Expand Down
Loading