diff --git a/Project.toml b/Project.toml index 5b19138..03ab6a0 100644 --- a/Project.toml +++ b/Project.toml @@ -23,6 +23,7 @@ ManualNLPModels = "0.2" MLDatasets = "^0.7.4" NLPModels = "0.16, 0.17, 0.18, 0.19, 0.20, 0.21" NLPModelsModifiers = "0.7.3" +NLPModelsTest = "0.10.7" Noise = "0.2" Requires = "1" julia = "^1.6.0" @@ -33,6 +34,7 @@ DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" LinearOperators = "5c8ed15e-5a4c-59e4-a42b-c7e8811fb125" MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458" NLPModelsModifiers = "e01155f1-5c6f-4375-a9d8-616dd036575f" +NLPModelsTest = "7998695d-6960-4d3a-85c4-e1bceb8cd856" ProximalOperators = "a725b495-10eb-56fe-b38b-717eba820537" QuadraticModels = "f468eda6-eac5-11e8-05a5-ff9e497bcd19" ShiftedProximalOperators = "d4fd37fa-580c-4e43-9b30-361c21aae263" @@ -45,6 +47,7 @@ test = [ "LinearOperators", "MLDatasets", "NLPModelsModifiers", + "NLPModelsTest", "ProximalOperators", "QuadraticModels", "ShiftedProximalOperators", diff --git a/src/RegularizedProblems.jl b/src/RegularizedProblems.jl index bd69108..0104bf2 100644 --- a/src/RegularizedProblems.jl +++ b/src/RegularizedProblems.jl @@ -9,7 +9,6 @@ include("utils.jl") include("types.jl") include("bpdn_model.jl") include("lrcomp_model.jl") -include("matrand_model.jl") include("group_lasso_model.jl") include("nnmf.jl") @@ -24,6 +23,7 @@ function __init__() include("testset_group_lasso.jl") end @require ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a" begin + include("matrand_model.jl") @require DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" begin include("fh_model.jl") @require ProximalOperators = "a725b495-10eb-56fe-b38b-717eba820537" begin diff --git a/src/matrand_model.jl b/src/matrand_model.jl index 13500c1..02f87af 100644 --- a/src/matrand_model.jl +++ b/src/matrand_model.jl @@ -1,4 +1,4 @@ -export random_matrix_completion_model, MIT_matrix_completion_model +export mat_rand, random_matrix_completion_model, random_matrix_completion_eq_model, MIT_matrix_completion_model function mat_rand(m::Int, n::Int, r::Int, sr::Float64, va::Float64, vb::Float64, c::Float64) xl = rand(Uniform(-0.1, 0.3), m, r) @@ -6,10 +6,10 @@ function mat_rand(m::Int, n::Int, r::Int, sr::Float64, va::Float64, vb::Float64, xs = xl * xr' Ω = findall(<(sr), rand(m, n)) B = xs[Ω] - B = (1 - c) * add_gauss(B, va, 0; clip = true) + c * add_gauss(B, vb, 0; clip = true) + B = (1 - c) * add_gauss(B, va, 0; clip = false) + c * add_gauss(B, vb, 0; clip = false) ω = zeros(Int64, size(Ω, 1)) # Vectorize Omega for i = 1:size(Ω, 1) - ω[i] = Ω[i][1] + size(Ω, 2) * (Ω[i][2] - 1) + ω[i] = Ω[i][1] + n * (Ω[i][2] - 1) end return xs, B, ω end @@ -118,3 +118,172 @@ function MIT_matrix_completion_model() X, B, ω = perturb(I, 0.8, 0.8) matrix_completion_model(X, B, ω) end + +""" + model, sol = random_matrix_completion_eq_model(; kwargs...) + +Return an instance of an `NLPModel` representing the equality-constrained +matrix completion problem in forward mode, i.e., the linear constraint + +`` X_{ij} = A_{ij} \\ \\forall i,j \\in E, `` + +where `` E \\subseteq \\mathbb{R}^m \\times \\mathbb{R}^n ``. +Alternatively, one can also represent the equality-constrained +matrix completion problem in backward mode, i.e, the optimization problem + +`` \\min_{\\sigma, U, V} 0 \\quad \\text{s.t.} \\ U^T U = I, \\ V^T V = I, \\ \\sum_k \\sigma_k u_k^i v_k^j = A_{ij}.`` + +where `` \\sigma \\in \\mathbb{R}^l ``, `` U \\in \\mathbb{R}^m \\times \\mathbb{R}^l `` and +`` V \\in \\mathbb{R}^l \\times \\mathbb{R}^n `` with `` l \\coloneqq \\min\\{m, n\\} ``. + +## Keyword Arguments + +* `m :: Int = 100`: the number of rows of X and A; +* `n :: Int = 100`: the number of columns of X and A; +* `r :: Int = 5`: the desired rank of A; +* `sr :: Float64 = 0.8`: a threshold between 0 and 1 used to determine the set of pixels retained by the operator P; +* `mode :: Symbol = :forward`: either `:forward` or `:backward`, represents which form of the equality constrained matrix completion problem is to be represented (see above). + +## Return Value + +n instance of an `NLPModel` representing the +equality constrained matrix completion problem, and the exact solution. +""" +function random_matrix_completion_eq_model(; + m::Int = 100, + n::Int = 100, + r::Int = 5, + sr::Float64 = 0.8, + mode::Symbol = :forward +) + xs, B, ω = mat_rand(m, n, r, sr, 0.0, 0.0, 0.0) + return random_matrix_completion_eq_model(xs, B, ω, mode = mode) +end + +function random_matrix_completion_eq_model(xs, B, ω; mode = :forward) + nlp, x0 = mode == :forward ? random_matrix_completion_eq_model_forward(xs, B, ω) : + random_matrix_completion_eq_model_backward(xs, B, ω) +end + +function random_matrix_completion_eq_model_forward(xs, B, ω) + + m, n = size(xs) + xs = collect(vec(xs)) + + # Constrained API + function cons!(cx, x) + @views cx .= x[ω] .- B + end + + function jprod!(jv, x, v) + @views jv .= v[ω] + end + + function jtprod!(jtv, x, v) + jtv .= 0 + @views jtv[ω] .= v + end + + function hprod!(hv, x, y, v; obj_weight = one(T)) + return hv .= 0 + end + + function hess_coord!(vals, x, y; obj_weight = one(T)) + return zeros(Float64, 0) + end + + rows_jac = collect(1:length(ω)) + cols_jac = ω + + function jac_coord!(vals, x) + vals .= 1 + end + + # Unconstrained API + function obj(x) + return 0.0 + end + + function grad!(g, x) + g .= 0 + end + + function hprod!(hv, x, v; obj_weight = 1.0) + return hv .= 0 + end + + function hess_coord!(vals, x; obj_weight = 1.0) + return zeros(Float64, 0) + end + + nlp = NLPModel( + zero(xs), + obj, + grad = grad!, + hprod = hprod!, + hess_coord = (zeros(Float64, 0), zeros(Float64, 0), hess_coord!), + cons = (cons!, zero(B), zero(B)), + jprod = jprod!, + jtprod = jtprod!, + jac_coord = (rows_jac, cols_jac, jac_coord!) + ) + + return nlp, xs + +end + +function random_matrix_completion_eq_model_backward(xs, B, ω) + m, n = size(xs) + l = min(m, n) + xs_svd = svd(xs) + xs = collect(vcat(vec(xs_svd.U), xs_svd.S, vec(xs_svd.Vt))) + + # Constrained API + function c!(cx, x) + U = @views reshape(x[1:m*l], m, l) + σ = @views x[m*l + 1 : m*l + l] + V = @views reshape(x[m*l + l + 1:end], n, l) + + n_entries_l = Int(l*(l+1)/2) + + @inbounds for i = 1:l + @inbounds for j = 1:i + cx_idx = Int(j + i*(i-1)/2) + if i == j + @views cx[cx_idx] = dot(U[:, i], U[:, j]) - 1 + @views cx[n_entries_l + cx_idx] = dot(V[i, :], V[j, :]) - 1 + else + @views cx[cx_idx] = dot(U[:, i], U[:, j]) + @views cx[n_entries_l + cx_idx] = dot(V[i, :], V[j, :]) + end + end + end + + offset = 2*n_entries_l + @inbounds for i in eachindex(ω) + i_idx = mod(ω[i], n) + i_idx = i_idx == 0 ? n : i_idx + j_idx = Int((ω[i] - i_idx)/n) + 1 + cx[offset + i] = - B[i] + @inbounds for j = 1:l + cx[offset + i] += σ[j]*U[i_idx, j]*V[j, j_idx] + end + end + end + + + # Unconstrained API + function obj(x) + return 0.0 + end + x0 = zeros(Float64, m*l + n*l + l) + ncon = l*(l+1) + length(B) + return ADNLPModels.ADNLPModel!( + obj, + x0, + c!, + zeros(ncon), + zeros(ncon) + ), xs +end + diff --git a/test/runtests.jl b/test/runtests.jl index 9137b9f..425c77e 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,5 +1,6 @@ using LinearAlgebra, Test -using ADNLPModels, DifferentialEquations, LinearOperators, ManualNLPModels, MLDatasets, NLPModels, NLPModelsModifiers, QuadraticModels +using ADNLPModels, DifferentialEquations, LinearOperators, ManualNLPModels, MLDatasets, NLPModels, NLPModelsModifiers +using NLPModelsTest, QuadraticModels using RegularizedProblems function test_well_defined(model, nls_model, sol) @@ -59,6 +60,7 @@ end @test all(nls_model.meta.uvar .== Inf) end +""" @testset "FH" begin model, nls_model, sol = fh_model() test_objectives(model, nls_model) @@ -72,6 +74,7 @@ end @test nls_model.nls_meta.nequ == 202 @test all(nls_model.meta.x0 .== 1) end +""" @testset "low-rank completion" begin model, nls_model, A = lrcomp_model() @@ -91,6 +94,24 @@ end @test all(0 .<= model.meta.x0 .<= 1) @test nls_model.nls_meta.nequ == 10000 @test all(0 .<= nls_model.meta.x0 .<= 1) + + # Equality-constrained random matrix completion model in forward mode + + xs, B, ω = mat_rand(100, 100, 5, 0.8, 0.0, 0.0, 0.0) + model, sol = random_matrix_completion_eq_model(xs, B, ω, mode = :forward) + + # Test basics + @test model.meta.nvar == 100*100 + @test model.meta.ncon > 0 + @test model.meta.ncon < 100*100 + @test cons(model, sol) .≈ 0 + + # Construct ADModel to compare with ManualNLPModels model + f(x) = zero(eltype(x)) + c(x) = x[ω] - B + ad_model = ADNLPModel(f, model.meta.x0, c, zero(B), zero(B)) + consistent_nlps([model, ad_model], exclude = [hess, hess_coord, jth_hess, jth_hess_coord, jth_hprod, ghjvprod, jth_hprod], test_derivative = false) + end @testset "MIT" begin