Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,12 @@ version = "2.11.0"

[deps]
FastClosures = "9aa1b823-49e4-5ca5-8b0f-3971ec8bab6a"
GenericLinearAlgebra = "14197337-ba66-59df-a3e3-ca00e7dcff7a"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
Requires = "ae029012-a4dd-5104-9daa-d747884805df"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
TSVD = "9449cd9e-2762-5aa3-a617-5413e99d722e"
TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"

[weakdeps]
Expand All @@ -31,13 +33,15 @@ AMDGPU = "2"
CUDA = "5"
ChainRulesCore = "1"
FastClosures = "0.2, 0.3"
GenericLinearAlgebra = "0.3.18"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you expand a bit on why we need these new deps and should we have them by default and not in package extension?

JLArrays = "0.1, 0.2"
LDLFactorizations = "0.9, 0.10"
LinearAlgebra = "1"
Metal = "1.1"
Printf = "1"
Requires = "1"
SparseArrays = "1"
TSVD = "0.4.4"
TimerOutputs = "^0.5"
julia = "^1.6.0"

Expand Down
122 changes: 122 additions & 0 deletions src/utilities.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
export check_ctranspose,
check_hermitian, check_positive_definite, normest, solve_shifted_system!, ldiv!
import LinearAlgebra.ldiv!
import LinearAlgebra: opnorm
using GenericLinearAlgebra
using TSVD

export opnorm

"""
normest(S) estimates the matrix 2-norm of S.
Expand Down Expand Up @@ -287,3 +292,120 @@ function ldiv!(
solve_shifted_system!(x, B, b, T(0.0))
return x
end

"""
opnorm(B; kwargs...)

Compute the operator 2-norm (largest singular value) of a matrix or linear operator `B`.
This method dispatches to efficient algorithms depending on the type and size of `B`:
for small dense matrices, it uses direct LAPACK routines; for larger or structured operators,
it uses iterative methods (ARPACK or TSVD) to estimate the norm efficiently.

# Arguments
- `B`: A matrix or linear operator.
- `kwargs...`: Optional keyword arguments passed to the underlying norm estimation routines.

# Returns
- The estimated operator 2-norm of `B` (largest singular value or eigenvalue in absolute value).
"""

function LinearAlgebra.opnorm(B; kwargs...)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same question as before. Do we really want to override the default opnorm from LinearAlgebra?
If someone uses this function in its code and add LinearOperators.jl it will mess everything up.

I think it's fine to import function to extand them, but here you override a default behavior.

I would suggest finding a better name and document the similarity with opnorm

_opnorm(B, eltype(B); kwargs...)
end

# This method will be picked if eltype is one of the four types Arpack supports
# (Float32, Float64, ComplexF32, ComplexF64).
function _opnorm(
B,
::Type{T};
kwargs...,
) where {T <: Union{Float32, Float64, ComplexF32, ComplexF64}}
m, n = size(B)
return (m == n ? opnorm_eig : opnorm_svd)(B; kwargs...)
end

# Fallback for everything else
function _opnorm(B, ::Type{T}; kwargs...) where {T}
_, s, _ = tsvd(B)
return s[1], true # return largest singular value
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you avoid the documentation like this as it doesn't really provide much info

end

function opnorm_eig(B; max_attempts::Int = 3)
n = size(B, 1)
# 1) tiny dense Float64: direct LAPACK
if n ≤ 5
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

5 should be a parameter then

return maximum(abs, eigen(Matrix(B)).values), true
end

# 2) iterative ARPACK
nev, ncv = 1, max(20, 2*1 + 1)
attempt, λ, have_eig = 0, zero(eltype(B)), false

while !(have_eig || attempt >= max_attempts)
attempt += 1
try
# Estimate largest eigenvalue in absolute value
d, nconv, niter, nmult, resid =
eigs(B; nev = nev, ncv = ncv, which = :LM, ritzvec = false, check = 1)

# Check if eigenvalue has converged
have_eig = nconv == 1
if have_eig
λ = abs(d[1]) # Take absolute value of the largest eigenvalue
break # Exit loop if successful
else
# Increase NCV for the next attempt if convergence wasn't achieved
ncv = min(2 * ncv, n)
end
catch e
if occursin("XYAUPD_Exception", string(e)) && ncv < n
@warn "Arpack error: $e. Increasing NCV to $ncv and retrying."
ncv = min(2 * ncv, n) # Increase NCV but don't exceed matrix size
else
rethrow(e) # Re-raise if it's a different error
end
end
end

return λ, have_eig
end

function opnorm_svd(J; max_attempts::Int = 3)
m, n = size(J)
# 1) tiny dense Float64: direct LAPACK
if min(m, n) ≤ 5
return maximum(svd(Matrix(J)).S), true
end

# 2) iterative ARPACK‐SVD
nsv, ncv = 1, 10
attempt, σ, have_svd = 0, zero(eltype(J)), false
n = min(m, n)

while !(have_svd || attempt >= max_attempts)
attempt += 1
try
# Estimate largest singular value
s, nconv, niter, nmult, resid = svds(J; nsv = nsv, ncv = ncv, ritzvec = false, check = 1)

# Check if singular value has converged
have_svd = nconv >= 1
if have_svd
σ = maximum(s.S) # Take the largest singular value
break # Exit loop if successful
else
# Increase NCV for the next attempt if convergence wasn't achieved
ncv = min(2 * ncv, n)
end
catch e
if occursin("XYAUPD_Exception", string(e)) && ncv < n
@warn "Arpack error: $e. Increasing NCV to $ncv and retrying."
ncv = min(2 * ncv, n) # Increase NCV but don't exceed matrix size
else
rethrow(e) # Re-raise if it's a different error
end
end
end

return σ, have_svd
end
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can use the formatter to avoid this

1 change: 1 addition & 0 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ include("test_kron.jl")
include("test_callable.jl")
include("test_deprecated.jl")
include("test_normest.jl")
include("test_opnorm.jl")
include("test_diag.jl")
include("test_chainrules.jl")
include("test_solve_shifted_system.jl")
Expand Down
33 changes: 33 additions & 0 deletions test/test_opnorm.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
function test_opnorm()
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Avoid encapsulating test sets in a function

@testset "opnorm and TSVD tests (LinearOperators)" begin
# 1) Square Float64 via direct LAPACK or ARPACK
A_mat = [2.0 0.0; 0.0 -1.0]
A_op = LinearOperator(A_mat)
λ, ok = opnorm(A_op)
@test ok == true
@test isapprox(λ, 2.0; rtol=1e-12)

# 2) Rectangular Float64 via direct LAPACK or ARPACK SVD
J_mat = [3.0 0.0 0.0; 0.0 1.0 0.0]
J_op = LinearOperator(J_mat)
σ, ok_sv = opnorm(J_op)
@test ok_sv == true
@test isapprox(σ, 3.0; rtol=1e-12)

# 3) Square BigFloat via TSVD
B_mat = Matrix{BigFloat}([2.0 0.0; 0.0 -1.0])
B_op = LinearOperator(B_mat)
λ_bf, ok_bf = opnorm(B_op)
@test ok_bf == true
@test isapprox(λ_bf, BigFloat(2); rtol=1e-12)

# 4) Rectangular BigFloat via rectangular TSVD
JR_mat = Matrix{BigFloat}([3.0 0.0 0.0; 0.0 1.0 0.0])
JR_op = LinearOperator(JR_mat)
σ_bf, ok_bf2 = opnorm(JR_op)
@test ok_bf2 == true
@test isapprox(σ_bf, BigFloat(3); rtol=1e-12)
end
end

test_opnorm()
Loading