From fdb9c6bc9565ac45e995234586a25dda03cfc7c7 Mon Sep 17 00:00:00 2001 From: michielstock Date: Thu, 30 Oct 2025 12:30:58 +0100 Subject: [PATCH 1/6] :tada: Fourier Holographically Reduced Representations --- src/HyperdimensionalComputing.jl | 4 +- src/inference.jl | 13 +++--- src/operations.jl | 76 ++++++++++++++++++++++---------- src/types.jl | 46 ++++++++++++++----- test/operations.jl | 17 ++++++- test/types.jl | 11 +++++ 6 files changed, 125 insertions(+), 42 deletions(-) diff --git a/src/HyperdimensionalComputing.jl b/src/HyperdimensionalComputing.jl index 681c28a..7ef4d7d 100644 --- a/src/HyperdimensionalComputing.jl +++ b/src/HyperdimensionalComputing.jl @@ -9,13 +9,15 @@ export AbstractHV, GradedBipolarHV, RealHV, GradedHV, - TernaryHV + TernaryHV, + FHRR include("representations.jl") include("operations.jl") export bundle, bind, + unbind, shift!, shift, ρ, diff --git a/src/inference.jl b/src/inference.jl index 2cd50d6..7df38e1 100644 --- a/src/inference.jl +++ b/src/inference.jl @@ -20,6 +20,7 @@ similarity(u::GradedBipolarHV, v::GradedBipolarHV) = sim_cos(u, v) similarity(u::RealHV, v::RealHV) = sim_cos(u, v) similarity(u::BinaryHV, v::BinaryHV) = sim_jacc(u, v) similarity(u::GradedHV, v::GradedHV) = sim_jacc(u, v) +similarity(u::FHRR, v::FHRR) = real(dot(u.v, v.v)) / length(u) """ similarity(u::AbstractVector, v::AbstractVector; method::Symbol) @@ -80,9 +81,9 @@ Alias for `similarity`. See `similarity` for the main documentation. nearest_neighbor(u::AbstractHV, collection; kwargs...) = maximum( - (similarity(u, xi; kwargs...), i, xi) + (similarity(u, xi; kwargs...), i, xi) for (i, xi) in enumerate(collection) -) + ) nearest_neighbor(u::AbstractHV, collection::Dict; kwargs...) = maximum((similarity(u, xi; kwargs...), k, xi) for (k, xi) in collection) @@ -103,15 +104,15 @@ list of `(τ, i)`. function nearest_neighbor(u::AbstractHV, collection, k::Int; kwargs...) sims = [ (similarity(u, xi; kwargs...), i) - for (i, xi) in enumerate(collection) + for (i, xi) in enumerate(collection) ] - return partialsort!(sims, 1:k, rev = true) + return partialsort!(sims, 1:k, rev=true) end function nearest_neighbor(u::AbstractHV, collection::Dict, k::Int; kwargs...) sims = [ (similarity(u, xi; kwargs...), i) - for (i, xi) in collection + for (i, xi) in collection ] - return partialsort!(sims, 1:k, rev = true) + return partialsort!(sims, 1:k, rev=true) end diff --git a/src/operations.jl b/src/operations.jl index 62b2b1d..3e66be7 100644 --- a/src/operations.jl +++ b/src/operations.jl @@ -58,7 +58,7 @@ end # computes `r[i] = f(x[i], y[i+offset])` # assumes postive offset (for now) -@inline function offsetcombine!(r, f, x, y, offset = 0) +@inline function offsetcombine!(r, f, x, y, offset=0) @assert length(r) == length(x) == length(y) n = length(r) if offset == 0 @@ -73,7 +73,7 @@ end return r end -@inline function offsetcombine(f, x::V, y::V, offset = 0) where {V <: AbstractVecOrMat} +@inline function offsetcombine(f, x::V, y::V, offset=0) where {V<:AbstractVecOrMat} @assert length(x) == length(y) r = similar(x) n = length(r) @@ -89,11 +89,11 @@ end return r end -# AGGREGATION -# ----------- +# BUNDLE +# ------ # binary and bipolar: use majority -function bundle(hvr::Union{BinaryHV, BipolarHV}, hdvs, r) +function bundle(hvr::Union{BinaryHV,BipolarHV}, hdvs, r) m = length(hdvs) for hv in hdvs r .+= hv.v @@ -108,9 +108,9 @@ end # ternary: just add them, no normalization by default function bundle( - ::TernaryHV, hdvs, r; - normalize = false - ) + ::TernaryHV, hdvs, r; + normalize=false +) for hv in hdvs r .+= hv.v end @@ -143,51 +143,76 @@ function bundle(::GradedBipolarHV, hdvs, r) return GradedBipolarHV(r) end +function bundle(::FHRR, hdvs, r) + for hv in hdvs + r .+= hv.v + end + r ./= abs.(r) + return FHRR(r) +end + function bundle(hdvs; kwargs...) hv = first(hdvs) r = empty_vector(hv) return bundle(hv, hdvs, r, kwargs...) end -Base.:+(hv1::HV, hv2::HV) where {HV <: AbstractHV} = bundle((hv1, hv2)) +Base.:+(hv1::HV, hv2::HV) where {HV<:AbstractHV} = bundle((hv1, hv2)) # BINDING # ------- - +Base.bind(hv1::HV, hv2::HV) where {HV<:AbstractHV} = HV(hv1.v .* hv2.v) # default Base.bind(hv1::BinaryHV, hv2::BinaryHV) = BinaryHV(hv1.v .⊻ hv2.v) Base.bind(hv1::BipolarHV, hv2::BipolarHV) = BipolarHV(hv1.v .⊻ hv2.v) Base.bind(hv1::TernaryHV, hv2::TernaryHV) = TernaryHV(hv1.v .* hv2.v) Base.bind(hv1::RealHV, hv2::RealHV) = RealHV(hv1.v .* hv2.v) Base.bind(hv1::GradedHV, hv2::GradedHV) = GradedHV(fuzzy_xor.(hv1.v, hv2.v)) Base.bind(hv1::GradedBipolarHV, hv2::GradedBipolarHV) = GradedBipolarHV(fuzzy_xor_bipol.(hv1.v, hv2.v)) -Base.:*(hv1::HV, hv2::HV) where {HV <: AbstractHV} = bind(hv1, hv2) -Base.bind(hvs::AbstractVector{HV}) where {HV <: AbstractHV} = prod(hvs) +Base.bind(hv1::FHRR, hv2::FHRR) = FHRR(hv1.v .* hv2.v) +Base.:*(hv1::HV, hv2::HV) where {HV<:AbstractHV} = bind(hv1, hv2) +Base.bind(hvs::AbstractVector{HV}) where {HV<:AbstractHV} = prod(hvs) + + +""" + unbind(hv1::HV, hv2::HV) + +Unbinds `hv2` from `hv1`. For many types of hypervectors, the binding operator is +idempotent, i.e., `u * v * v == u`. + +Aliases with `\`. +""" +unbind(hv1::HV, hv2::HV) where {HV<:AbstractHV} = bind(hv1, hv2) + +unbind(hv1::RealHV, hv2::RealHV) where {HV<:AbstractHV} = RealHV(hv1.v ./ hv2.v) +unbind(hv1::FHRR, hv2::FHRR) where {HV<:AbstractHV} = FHRR(hv1.v ./ hv2.v) + +Base.:/(hv1::HV, hv2::HV) where {HV<:AbstractHV} = unbind(hv1, hv2) # SHIFTING # -------- -shift!(hv::AbstractHV, k = 1) = circshift!(hv.v, k) +shift!(hv::AbstractHV, k=1) = circshift!(hv.v, k) -function shift(hv::AbstractHV, k = 1) +function shift(hv::AbstractHV, k=1) r = similar(hv) r.v .= circshift(hv.v, k) return r end -function shift!(hv::V, k = 1) where {V <: Union{BinaryHV, BipolarHV}} +function shift!(hv::V, k=1) where {V<:Union{BinaryHV,BipolarHV}} v = similar(hv.v) # empty bitvector hv.v .= circshift!(v, hv.v, k) return hv end -function shift(hv::V, k = 1) where {V <: Union{BinaryHV, BipolarHV}} +function shift(hv::V, k=1) where {V<:Union{BinaryHV,BipolarHV}} v = similar(hv.v) # empty bitvector return V(circshift!(v, hv.v, k)) end -ρ(hv::AbstractHV, k = 1) = shift(hv, k) -ρ!(hv::AbstractHV, k = 1) = shift!(hv, k) +ρ(hv::AbstractHV, k=1) = shift(hv, k) +ρ!(hv::AbstractHV, k=1) = shift!(hv, k) # COMPARISON @@ -206,7 +231,7 @@ One can specify either: - `atol=N/100` number of matches more than due to chance needed for being assumed similar - `ptol=0.01` threshold for seeing that many matches due to chance """ -function Base.isapprox(u::T, v::T; atol = length(u) / 100, ptol = 0.01) where {T <: Union{BinaryHV, BipolarHV}} +function Base.isapprox(u::T, v::T; atol=length(u) / 100, ptol=0.01) where {T<:Union{BinaryHV,BipolarHV}} @assert length(u) == length(v) "Vectors have to be of equal length" N = length(u) missmatches = sum(ui != vi for (ui, vi) in zip(u, v)) @@ -226,7 +251,7 @@ One can specify either: - `ptol=1e-10` threshold for seeing that many matches due to chance - `N_bootstap=200` number of samples for bootstrapping """ -function Base.isapprox(u::T, v::T; ptol = 1.0e-10, N_bootstrap = 500) where {T <: AbstractHV} +function Base.isapprox(u::T, v::T; ptol=1.0e-10, N_bootstrap=500) where {T<:AbstractHV} @assert length(u) == length(v) "Vectors have to be of equal length" N = length(u) # bootstrap to find the zero distr @@ -262,17 +287,17 @@ function randbv(n::Int, I) end -function perturbate!(::Type{HVByteVec}, hv::HV, I, dist = eldist(hv)) where {HV <: AbstractHV} +function perturbate!(::Type{HVByteVec}, hv::HV, I, dist=eldist(hv)) where {HV<:AbstractHV} hv.v[I] .= rand(dist, length(I)) return hv end -function perturbate!(::Type{HVByteVec}, hv::HV, M::BitVector, dist = eldist(hv)) where {HV <: AbstractHV} +function perturbate!(::Type{HVByteVec}, hv::HV, M::BitVector, dist=eldist(hv)) where {HV<:AbstractHV} hv.v[M] .= rand(dist, sum(M)) return hv end -function perturbate!(::Type{HVByteVec}, hv::HV, p::Number, args...) where {HV <: AbstractHV} +function perturbate!(::Type{HVByteVec}, hv::HV, p::Number, args...) where {HV<:AbstractHV} return perturbate!(hv, randbv(length(hv), p), args...) end @@ -286,3 +311,8 @@ end perturbate!(hv, args...) = perturbate!(vectype(hv), hv, args...) perturbate(hv::AbstractHV, args...; kwargs...) = perturbate!(copy(hv), args...; kwargs...) + +# OTHER +# ----- + +Base.:^(hv::FHRR, x::Number) = FHRR(hv.v .^ x) \ No newline at end of file diff --git a/src/types.jl b/src/types.jl index 9944974..9ed8249 100644 --- a/src/types.jl +++ b/src/types.jl @@ -27,11 +27,11 @@ abstract type AbstractHV{T} <: AbstractVector{T} end Base.sum(hv::AbstractHV) = sum(hv.v) Base.size(hv::AbstractHV) = size(hv.v) Base.getindex(hv::AbstractHV, i) = hv.v[i] -Base.similar(hv::T) where {T <: AbstractHV} = T(length(hv)) +Base.similar(hv::T) where {T<:AbstractHV} = T(length(hv)) LinearAlgebra.norm(hv::AbstractHV) = norm(hv.v) LinearAlgebra.normalize!(hv::AbstractHV) = hv Base.hash(hv::AbstractHV) = hash(hv.v) -Base.copy(hv::HV) where {HV <: AbstractHV} = HV(copy(hv.v)) +Base.copy(hv::HV) where {HV<:AbstractHV} = HV(copy(hv.v)) # Gives an empty Vector (filled with neutral elelment) that # the `hv::AbstractHV` type uses. @@ -51,7 +51,7 @@ struct BipolarHV <: AbstractHV{Int} BipolarHV(v::BitVector) = new(v) end -BipolarHV(n::Integer = 10_000) = BipolarHV(bitrand(n)) +BipolarHV(n::Integer=10_000) = BipolarHV(bitrand(n)) BipolarHV(v::AbstractVector) = BipolarHV(v .> 0) Base.getindex(hv::BipolarHV, i) = hv.v[i] ? 1 : -1 @@ -70,7 +70,7 @@ struct TernaryHV <: AbstractHV{Int} v::Vector{Int} end -TernaryHV(n::Int = 10_000) = TernaryHV(rand((-1, 1), n)) +TernaryHV(n::Int=10_000) = TernaryHV(rand((-1, 1), n)) function LinearAlgebra.normalize!(hv::TernaryHV) clamp!(hv.v, -1, 1) @@ -89,7 +89,7 @@ struct BinaryHV <: AbstractHV{Bool} v::BitVector end -BinaryHV(n::Integer = 10_000) = BinaryHV(bitrand(n)) +BinaryHV(n::Integer=10_000) = BinaryHV(bitrand(n)) BinaryHV(v::AbstractVector{Bool}) = BinaryHV(BitVector(v)) # needed for aggregation @@ -100,11 +100,11 @@ eldist(::Type{BinaryHV}) = Bernoulli(0.5) # `RealHV` contain real numbers, drawn from a distribution # -------- -struct RealHV{T <: Real} <: AbstractHV{T} +struct RealHV{T<:Real} <: AbstractHV{T} v::Vector{T} end -RealHV(n::Integer = 10_000, distr::Distribution = eldist(RealHV)) = RealHV(rand(distr, n)) +RealHV(n::Integer=10_000, distr::Distribution=eldist(RealHV)) = RealHV(rand(distr, n)) Base.similar(hv::RealHV) = RealHV(length(hv), eldist(RealHV)) @@ -120,12 +120,12 @@ eldist(::Type{<:RealHV}) = Normal() # GradedHV are vectors in $[0, 1]^n$, allowing for graded relations. # ---------------- -struct GradedHV{T <: Real} <: AbstractHV{T} +struct GradedHV{T<:Real} <: AbstractHV{T} v::Vector{T} #GradedHV(v::AbstractVector{T}) where {T<:Real} = new{T}(clamp!(v,0,1)) end -function GradedHV(n::Int = 10_000, distr = eldist(GradedHV)) +function GradedHV(n::Int=10_000, distr=eldist(GradedHV)) @assert 0 ≤ minimum(distr) < maximum(distr) ≤ 1 "Provide `distr` with support in [0,1]" return GradedHV(rand(distr, n)) end @@ -149,12 +149,12 @@ end # --------------- -struct GradedBipolarHV{T <: Real} <: AbstractHV{T} +struct GradedBipolarHV{T<:Real} <: AbstractHV{T} v::Vector{T} #GradedBipolarHV(v::AbstractVector{T}) where {T<:Real} = new{T}(clamp!(v,-1,1)) end -function GradedBipolarHV(n::Int = 10_000, distr::Distribution = eldist(GradedBipolarHV)) +function GradedBipolarHV(n::Int=10_000, distr::Distribution=eldist(GradedBipolarHV)) @assert -1 ≤ minimum(distr) < maximum(distr) ≤ 1 "Provide `distr` with support in [-1,1]" return GradedBipolarHV(rand(distr, n)) end @@ -167,6 +167,30 @@ eldist(::Type{<:GradedBipolarHV}) = 2eldist(GradedHV) - 1 Base.similar(hv::GradedBipolarHV) = GradedBipolarHV(length(hv)) LinearAlgebra.normalize!(hv::GradedBipolarHV) = clamp!(hv.v, -1, 1) +# Fourier Holographically Reduced Represenetations +# ------------------------------------------------ + +struct FHRR{T<:Complex} <: AbstractHV{T} + v::Vector{T} +end + +#Base.eltype(::FHRR{T}) where {T} = Complex{T} + +FHRR(n::Int=10_000) = FHRR(exp.(2π * im .* rand(n))) +FHRR(T::Type, n::Int=10_000) = FHRR(exp.(2π * im .* rand(T, n))) + +Base.similar(hv::FHRR{<:Complex{R}}) where {R} = FHRR(exp.(2π * im .* rand(R, length(hv)))) + +""" + LinearAlgebra.normalize!(hv::FHRR) + +A Fourier Holographically Reduced Represenetation is normalized by +setting the norm of each complex element to 1. +""" +function LinearAlgebra.normalize!(hv::FHRR) + hv.v ./= abs.(hv.v) + return hv +end # TRAITS # ------ diff --git a/test/operations.jl b/test/operations.jl index 8a54648..c215c51 100644 --- a/test/operations.jl +++ b/test/operations.jl @@ -52,7 +52,7 @@ using LinearAlgebra, Random end # currently not yet a good way of evaluating these - HV <: Union{TernaryHV, GradedHV, GradedBipolarHV, RealHV} && continue + HV <: Union{TernaryHV,GradedHV,GradedBipolarHV,RealHV} && continue @testset "similarity $HV" begin N = 10_000 @@ -69,4 +69,19 @@ using LinearAlgebra, Random end end end + @testset "FHRR" begin + hv1 = FHRR(n) + hv2 = FHRR(n) + + @test bundle([hv1, hv2]) isa FHRR + @test hv1 + hv2 isa FHRR + @test bind([hv1, hv2]) isa FHRR + @test norm(bind([hv1, hv2])) ≈ sqrt(n) + + @test shift(hv1, 2) isa FHRR + + @test similarity(hv1, hv2) < 0.5 + @test similarity(hv2, hv2) ≈ 1 + end + end diff --git a/test/types.jl b/test/types.jl index f5f493f..7fe095e 100644 --- a/test/types.jl +++ b/test/types.jl @@ -97,4 +97,15 @@ using Distributions, LinearAlgebra @test norm(hdv) ≈ norm(hdv.v) normalize!(hdv) end + + @testset "FHRR" begin + hdv = FHRR(n) + @test length(hdv) == n + @test eltype(hdv) <: Complex + @test hdv[2] isa Complex + + @test sum(hdv) ≈ sum(hdv.v) + @test norm(hdv) ≈ norm(hdv.v) + + end end From 5ade5a37086e0811e38045208284af4c91c3f52f Mon Sep 17 00:00:00 2001 From: michielstock Date: Thu, 30 Oct 2025 14:44:01 +0100 Subject: [PATCH 2/6] :workinprogress: levels FHRR --- src/encoding.jl | 55 ++++++++++++++++++++++++++++++++-------------- src/types.jl | 3 --- test/operations.jl | 2 ++ 3 files changed, 41 insertions(+), 19 deletions(-) diff --git a/src/encoding.jl b/src/encoding.jl index a7d4b26..5681afd 100644 --- a/src/encoding.jl +++ b/src/encoding.jl @@ -54,7 +54,7 @@ where `V` is the hypervector collection, `m` is the size of the hypervector coll - [`multibind`](@ref): Multibind encoding, binding-variant of this encoder """ -function multiset(vs::AbstractVector{<:T})::T where {T <: AbstractHV} +function multiset(vs::AbstractVector{<:T})::T where {T<:AbstractHV} return bundle(vs) end @@ -301,7 +301,7 @@ and `\\oplus` are the binding and bundling operations. - [Torchhd documentation](https://torchhd.readthedocs.io/en/stable/generated/torchhd.hash_table.html) """ -function hashtable(keys::T, values::T) where {T <: AbstractVector{<:AbstractHV}} +function hashtable(keys::T, values::T) where {T<:AbstractVector{<:AbstractHV}} @assert length(keys) == length(values) "Number of keys and values aren't equal" return bundle(map(prod, zip(keys, values))) end @@ -369,7 +369,7 @@ and binding operations. - [Torchhd documentation](https://torchhd.readthedocs.io/en/stable/generated/torchhd.cross_product.html) """ -function crossproduct(U::T, V::T) where {T <: AbstractVector{<:AbstractHV}} +function crossproduct(U::T, V::T) where {T<:AbstractVector{<:AbstractHV}} # TODO: This should be bundled without normalizing return bind(multiset(U), multiset(V)) end @@ -439,11 +439,11 @@ and shift operations. - [Torchhd documentation](https://torchhd.readthedocs.io/en/stable/generated/torchhd.ngrams.html) """ -function ngrams(vs::AbstractVector{<:AbstractHV}, n::Int = 3) +function ngrams(vs::AbstractVector{<:AbstractHV}, n::Int=3) l = length(vs) p = l - n + 1 @assert 1 <= n <= length(vs) "`n` must be 1 ≤ n ≤ $l" - return bundle([bind([shift(vs[i + j], j) for j in 0:(n - 1)]) for i in 1:p]) + return bundle([bind([shift(vs[i+j], j) for j in 0:(n-1)]) for i in 1:p]) end """ @@ -485,7 +485,7 @@ hypervector collection, `i` is the position of the entry in the collection, and - [Torchhd documentation](https://torchhd.readthedocs.io/en/stable/generated/torchhd.graph.html) """ -function graph(source::T, target::T; directed::Bool = false) where {T <: AbstractVector{<:AbstractHV}} +function graph(source::T, target::T; directed::Bool=false) where {T<:AbstractVector{<:AbstractHV}} @assert length(source) == length(target) "`source` and `target` must be the same length" return hashtable(source, shift.(target, convert(Int, directed))) end @@ -493,25 +493,25 @@ end """ level(v::HV, n::Int) where {HV <: AbstractHV} - level(HV::Type{<:AbstractHV}, n::Int; dims::Int = 10_000) + level(HV::Type{<:AbstractHV}, m::Int; n::Int = 10_000) Creates a set of level correlated hypervectors, where the first and last hypervectors are quasi-orthogonal. # Arguments - `v::HV`: Base hypervector -- `n::Int`: Number of levels (alternatively, provide a vector to be encoded) +- `m::Int`: Number of levels (alternatively, provide a vector to be encoded) """ -function level(v::HV, n::Int) where {HV <: AbstractHV} +function level(v::HV, m::Int) where {HV<:AbstractHV} hvs = [v] - p = 2 / n - while length(hvs) < n + p = 2 / m + while length(hvs) < m u = last(hvs) push!(hvs, perturbate(u, p)) end return hvs end -level(HV::Type{<:AbstractHV}, n::Int; dims::Int = 10_000) = level(HV(dims), n) +level(HV::Type{<:AbstractHV}, m::Int; n::Int=10_000) = level(HV(n), m) level(HVv, vals::AbstractVector) = level(HVv, length(vals)) level(HVv, vals::UnitRange) = level(HVv, length(vals)) @@ -538,7 +538,7 @@ encoder = encodelevel(hvlevels, numvalues) encoder(pi/3) # hypervector that best represents this numerical value ``` """ -function encodelevel(hvlevels::AbstractVector{<:AbstractHV}, numvalues; testbound = false) +function encodelevel(hvlevels::AbstractVector{<:AbstractHV}, numvalues; testbound=false) @assert length(hvlevels) == length(numvalues) "HV levels do not match numerical values" # construct the encoder function encoder(x::Number) @@ -554,9 +554,9 @@ end See `encodelevel`, same but provide lower (`a`) and upper (`b`) limit of the interval to be encoded. """ -encodelevel(hvlevels::AbstractVector{<:AbstractHV}, a::Number, b::Number; testbound = false) = encodelevel(hvlevels, range(a, b, length(hvlevels)); testbound) +encodelevel(hvlevels::AbstractVector{<:AbstractHV}, a::Number, b::Number; testbound=false) = encodelevel(hvlevels, range(a, b, length(hvlevels)); testbound) -encodelevel(HV, numvalues; testbound = false) = encodelevel(level(HV, length(numvalues)), numvalues; testbound) +encodelevel(HV, numvalues; testbound=false) = encodelevel(level(HV, length(numvalues)), numvalues; testbound) """ @@ -591,7 +591,7 @@ end decodelevel(hvlevels::AbstractVector{<:AbstractHV}, a::Number, b::Number) = decodelevel(hvlevels, range(a, b, length(hvlevels))) -decodelevel(HV, numvalues; testbound = false) = decodelevel(level(HV, length(numvalues)), numvalues) +decodelevel(HV, numvalues; testbound=false) = decodelevel(level(HV, length(numvalues)), numvalues; testbound) """ convertlevel(hvlevels, numvals..., kwargs...) @@ -600,3 +600,26 @@ Creates the `encoder` and `decoder` for a level incoding in one step. See `encod and `decodelevel` for their respective documentations. """ convertlevel(hvlevels, numvals...; kwargs...) = encodelevel(hvlevels, numvals...; kwargs...), decodelevel(hvlevels, numvals..., kwargs...) + + +# levels using FHRR + +function level(v::FHRR, m::Int; periodic=false) + u = periodic ? 2π : π + return [FHRR(v.v .* exp(θ * im)) for θ in range(0, u, m)] +end + + +function encodelevel(v::FHRR, numvalues; periodic=false) + a, b = extrema(numvalues) + r = b - a + u = periodic ? 2π : π + return x -> FHRR(v.v * exp((x - a) / r * u * im)) +end + +function decodelevel(v::FHRR, numvalues; periodic=false) + a, b = extrema(numvalues) + r = b - a + u = periodic ? 2π : π + return u -> imag(mean(log.(u.v ./ v.v) * r / u)) + a +end \ No newline at end of file diff --git a/src/types.jl b/src/types.jl index 9ed8249..6696fab 100644 --- a/src/types.jl +++ b/src/types.jl @@ -18,12 +18,10 @@ Every hypervector HV has the following basic functionality TODO: - [ ] SparseHV - [ ] support for different types -- [ ] complex HDC =# abstract type AbstractHV{T} <: AbstractVector{T} end -#Base.collect(hv::AbstractHV) = hv.v Base.sum(hv::AbstractHV) = sum(hv.v) Base.size(hv::AbstractHV) = size(hv.v) Base.getindex(hv::AbstractHV, i) = hv.v[i] @@ -106,7 +104,6 @@ end RealHV(n::Integer=10_000, distr::Distribution=eldist(RealHV)) = RealHV(rand(distr, n)) - Base.similar(hv::RealHV) = RealHV(length(hv), eldist(RealHV)) function normalize!(hv::RealHV) diff --git a/test/operations.jl b/test/operations.jl index c215c51..9ee0506 100644 --- a/test/operations.jl +++ b/test/operations.jl @@ -82,6 +82,8 @@ using LinearAlgebra, Random @test similarity(hv1, hv2) < 0.5 @test similarity(hv2, hv2) ≈ 1 + + @test norm(hv1^3) ≈ sqrt(n) end end From e85eb89f7b99a2a42f3977aeac8e43e416576c48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Vigil-V=C3=A1squez?= Date: Thu, 30 Oct 2025 20:45:41 +0100 Subject: [PATCH 3/6] style: format with Runic Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- src/encoding.jl | 32 ++++++++++++++++---------------- src/inference.jl | 12 ++++++------ src/operations.jl | 22 +++++++++++----------- 3 files changed, 33 insertions(+), 33 deletions(-) diff --git a/src/encoding.jl b/src/encoding.jl index 5681afd..4e04754 100644 --- a/src/encoding.jl +++ b/src/encoding.jl @@ -54,7 +54,7 @@ where `V` is the hypervector collection, `m` is the size of the hypervector coll - [`multibind`](@ref): Multibind encoding, binding-variant of this encoder """ -function multiset(vs::AbstractVector{<:T})::T where {T<:AbstractHV} +function multiset(vs::AbstractVector{<:T})::T where {T <: AbstractHV} return bundle(vs) end @@ -301,7 +301,7 @@ and `\\oplus` are the binding and bundling operations. - [Torchhd documentation](https://torchhd.readthedocs.io/en/stable/generated/torchhd.hash_table.html) """ -function hashtable(keys::T, values::T) where {T<:AbstractVector{<:AbstractHV}} +function hashtable(keys::T, values::T) where {T <: AbstractVector{<:AbstractHV}} @assert length(keys) == length(values) "Number of keys and values aren't equal" return bundle(map(prod, zip(keys, values))) end @@ -369,7 +369,7 @@ and binding operations. - [Torchhd documentation](https://torchhd.readthedocs.io/en/stable/generated/torchhd.cross_product.html) """ -function crossproduct(U::T, V::T) where {T<:AbstractVector{<:AbstractHV}} +function crossproduct(U::T, V::T) where {T <: AbstractVector{<:AbstractHV}} # TODO: This should be bundled without normalizing return bind(multiset(U), multiset(V)) end @@ -439,11 +439,11 @@ and shift operations. - [Torchhd documentation](https://torchhd.readthedocs.io/en/stable/generated/torchhd.ngrams.html) """ -function ngrams(vs::AbstractVector{<:AbstractHV}, n::Int=3) +function ngrams(vs::AbstractVector{<:AbstractHV}, n::Int = 3) l = length(vs) p = l - n + 1 @assert 1 <= n <= length(vs) "`n` must be 1 ≤ n ≤ $l" - return bundle([bind([shift(vs[i+j], j) for j in 0:(n-1)]) for i in 1:p]) + return bundle([bind([shift(vs[i + j], j) for j in 0:(n - 1)]) for i in 1:p]) end """ @@ -485,7 +485,7 @@ hypervector collection, `i` is the position of the entry in the collection, and - [Torchhd documentation](https://torchhd.readthedocs.io/en/stable/generated/torchhd.graph.html) """ -function graph(source::T, target::T; directed::Bool=false) where {T<:AbstractVector{<:AbstractHV}} +function graph(source::T, target::T; directed::Bool = false) where {T <: AbstractVector{<:AbstractHV}} @assert length(source) == length(target) "`source` and `target` must be the same length" return hashtable(source, shift.(target, convert(Int, directed))) end @@ -501,7 +501,7 @@ Creates a set of level correlated hypervectors, where the first and last hyperve - `v::HV`: Base hypervector - `m::Int`: Number of levels (alternatively, provide a vector to be encoded) """ -function level(v::HV, m::Int) where {HV<:AbstractHV} +function level(v::HV, m::Int) where {HV <: AbstractHV} hvs = [v] p = 2 / m while length(hvs) < m @@ -511,7 +511,7 @@ function level(v::HV, m::Int) where {HV<:AbstractHV} return hvs end -level(HV::Type{<:AbstractHV}, m::Int; n::Int=10_000) = level(HV(n), m) +level(HV::Type{<:AbstractHV}, m::Int; n::Int = 10_000) = level(HV(n), m) level(HVv, vals::AbstractVector) = level(HVv, length(vals)) level(HVv, vals::UnitRange) = level(HVv, length(vals)) @@ -538,7 +538,7 @@ encoder = encodelevel(hvlevels, numvalues) encoder(pi/3) # hypervector that best represents this numerical value ``` """ -function encodelevel(hvlevels::AbstractVector{<:AbstractHV}, numvalues; testbound=false) +function encodelevel(hvlevels::AbstractVector{<:AbstractHV}, numvalues; testbound = false) @assert length(hvlevels) == length(numvalues) "HV levels do not match numerical values" # construct the encoder function encoder(x::Number) @@ -554,9 +554,9 @@ end See `encodelevel`, same but provide lower (`a`) and upper (`b`) limit of the interval to be encoded. """ -encodelevel(hvlevels::AbstractVector{<:AbstractHV}, a::Number, b::Number; testbound=false) = encodelevel(hvlevels, range(a, b, length(hvlevels)); testbound) +encodelevel(hvlevels::AbstractVector{<:AbstractHV}, a::Number, b::Number; testbound = false) = encodelevel(hvlevels, range(a, b, length(hvlevels)); testbound) -encodelevel(HV, numvalues; testbound=false) = encodelevel(level(HV, length(numvalues)), numvalues; testbound) +encodelevel(HV, numvalues; testbound = false) = encodelevel(level(HV, length(numvalues)), numvalues; testbound) """ @@ -591,7 +591,7 @@ end decodelevel(hvlevels::AbstractVector{<:AbstractHV}, a::Number, b::Number) = decodelevel(hvlevels, range(a, b, length(hvlevels))) -decodelevel(HV, numvalues; testbound=false) = decodelevel(level(HV, length(numvalues)), numvalues; testbound) +decodelevel(HV, numvalues; testbound = false) = decodelevel(level(HV, length(numvalues)), numvalues; testbound) """ convertlevel(hvlevels, numvals..., kwargs...) @@ -604,22 +604,22 @@ convertlevel(hvlevels, numvals...; kwargs...) = encodelevel(hvlevels, numvals... # levels using FHRR -function level(v::FHRR, m::Int; periodic=false) +function level(v::FHRR, m::Int; periodic = false) u = periodic ? 2π : π return [FHRR(v.v .* exp(θ * im)) for θ in range(0, u, m)] end -function encodelevel(v::FHRR, numvalues; periodic=false) +function encodelevel(v::FHRR, numvalues; periodic = false) a, b = extrema(numvalues) r = b - a u = periodic ? 2π : π return x -> FHRR(v.v * exp((x - a) / r * u * im)) end -function decodelevel(v::FHRR, numvalues; periodic=false) +function decodelevel(v::FHRR, numvalues; periodic = false) a, b = extrema(numvalues) r = b - a u = periodic ? 2π : π return u -> imag(mean(log.(u.v ./ v.v) * r / u)) + a -end \ No newline at end of file +end diff --git a/src/inference.jl b/src/inference.jl index 7df38e1..625a2b5 100644 --- a/src/inference.jl +++ b/src/inference.jl @@ -81,9 +81,9 @@ Alias for `similarity`. See `similarity` for the main documentation. nearest_neighbor(u::AbstractHV, collection; kwargs...) = maximum( - (similarity(u, xi; kwargs...), i, xi) + (similarity(u, xi; kwargs...), i, xi) for (i, xi) in enumerate(collection) - ) +) nearest_neighbor(u::AbstractHV, collection::Dict; kwargs...) = maximum((similarity(u, xi; kwargs...), k, xi) for (k, xi) in collection) @@ -104,15 +104,15 @@ list of `(τ, i)`. function nearest_neighbor(u::AbstractHV, collection, k::Int; kwargs...) sims = [ (similarity(u, xi; kwargs...), i) - for (i, xi) in enumerate(collection) + for (i, xi) in enumerate(collection) ] - return partialsort!(sims, 1:k, rev=true) + return partialsort!(sims, 1:k, rev = true) end function nearest_neighbor(u::AbstractHV, collection::Dict, k::Int; kwargs...) sims = [ (similarity(u, xi; kwargs...), i) - for (i, xi) in collection + for (i, xi) in collection ] - return partialsort!(sims, 1:k, rev=true) + return partialsort!(sims, 1:k, rev = true) end diff --git a/src/operations.jl b/src/operations.jl index 3e66be7..6b64233 100644 --- a/src/operations.jl +++ b/src/operations.jl @@ -58,7 +58,7 @@ end # computes `r[i] = f(x[i], y[i+offset])` # assumes postive offset (for now) -@inline function offsetcombine!(r, f, x, y, offset=0) +@inline function offsetcombine!(r, f, x, y, offset = 0) @assert length(r) == length(x) == length(y) n = length(r) if offset == 0 @@ -73,7 +73,7 @@ end return r end -@inline function offsetcombine(f, x::V, y::V, offset=0) where {V<:AbstractVecOrMat} +@inline function offsetcombine(f, x::V, y::V, offset = 0) where {V <: AbstractVecOrMat} @assert length(x) == length(y) r = similar(x) n = length(r) @@ -93,7 +93,7 @@ end # ------ # binary and bipolar: use majority -function bundle(hvr::Union{BinaryHV,BipolarHV}, hdvs, r) +function bundle(hvr::Union{BinaryHV, BipolarHV}, hdvs, r) m = length(hdvs) for hv in hdvs r .+= hv.v @@ -108,9 +108,9 @@ end # ternary: just add them, no normalization by default function bundle( - ::TernaryHV, hdvs, r; - normalize=false -) + ::TernaryHV, hdvs, r; + normalize = false + ) for hv in hdvs r .+= hv.v end @@ -157,11 +157,11 @@ function bundle(hdvs; kwargs...) return bundle(hv, hdvs, r, kwargs...) end -Base.:+(hv1::HV, hv2::HV) where {HV<:AbstractHV} = bundle((hv1, hv2)) +Base.:+(hv1::HV, hv2::HV) where {HV <: AbstractHV} = bundle((hv1, hv2)) # BINDING # ------- -Base.bind(hv1::HV, hv2::HV) where {HV<:AbstractHV} = HV(hv1.v .* hv2.v) # default +Base.bind(hv1::HV, hv2::HV) where {HV <: AbstractHV} = HV(hv1.v .* hv2.v) # default Base.bind(hv1::BinaryHV, hv2::BinaryHV) = BinaryHV(hv1.v .⊻ hv2.v) Base.bind(hv1::BipolarHV, hv2::BipolarHV) = BipolarHV(hv1.v .⊻ hv2.v) Base.bind(hv1::TernaryHV, hv2::TernaryHV) = TernaryHV(hv1.v .* hv2.v) @@ -169,8 +169,8 @@ Base.bind(hv1::RealHV, hv2::RealHV) = RealHV(hv1.v .* hv2.v) Base.bind(hv1::GradedHV, hv2::GradedHV) = GradedHV(fuzzy_xor.(hv1.v, hv2.v)) Base.bind(hv1::GradedBipolarHV, hv2::GradedBipolarHV) = GradedBipolarHV(fuzzy_xor_bipol.(hv1.v, hv2.v)) Base.bind(hv1::FHRR, hv2::FHRR) = FHRR(hv1.v .* hv2.v) -Base.:*(hv1::HV, hv2::HV) where {HV<:AbstractHV} = bind(hv1, hv2) -Base.bind(hvs::AbstractVector{HV}) where {HV<:AbstractHV} = prod(hvs) +Base.:*(hv1::HV, hv2::HV) where {HV <: AbstractHV} = bind(hv1, hv2) +Base.bind(hvs::AbstractVector{HV}) where {HV <: AbstractHV} = prod(hvs) """ @@ -181,7 +181,7 @@ idempotent, i.e., `u * v * v == u`. Aliases with `\`. """ -unbind(hv1::HV, hv2::HV) where {HV<:AbstractHV} = bind(hv1, hv2) +unbind(hv1::HV, hv2::HV) where {HV <: AbstractHV} = bind(hv1, hv2) unbind(hv1::RealHV, hv2::RealHV) where {HV<:AbstractHV} = RealHV(hv1.v ./ hv2.v) unbind(hv1::FHRR, hv2::FHRR) where {HV<:AbstractHV} = FHRR(hv1.v ./ hv2.v) From 3ce46413ef6e74b7c1c8d01bfe8358a2d66e1497 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Vigil-V=C3=A1squez?= Date: Thu, 30 Oct 2025 20:56:00 +0100 Subject: [PATCH 4/6] style: format with Runic Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- src/operations.jl | 30 +++++++++++++++--------------- src/types.jl | 28 ++++++++++++++-------------- test/operations.jl | 2 +- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/src/operations.jl b/src/operations.jl index 6b64233..747950f 100644 --- a/src/operations.jl +++ b/src/operations.jl @@ -183,36 +183,36 @@ Aliases with `\`. """ unbind(hv1::HV, hv2::HV) where {HV <: AbstractHV} = bind(hv1, hv2) -unbind(hv1::RealHV, hv2::RealHV) where {HV<:AbstractHV} = RealHV(hv1.v ./ hv2.v) -unbind(hv1::FHRR, hv2::FHRR) where {HV<:AbstractHV} = FHRR(hv1.v ./ hv2.v) +unbind(hv1::RealHV, hv2::RealHV) where {HV <: AbstractHV} = RealHV(hv1.v ./ hv2.v) +unbind(hv1::FHRR, hv2::FHRR) where {HV <: AbstractHV} = FHRR(hv1.v ./ hv2.v) -Base.:/(hv1::HV, hv2::HV) where {HV<:AbstractHV} = unbind(hv1, hv2) +Base.:/(hv1::HV, hv2::HV) where {HV <: AbstractHV} = unbind(hv1, hv2) # SHIFTING # -------- -shift!(hv::AbstractHV, k=1) = circshift!(hv.v, k) +shift!(hv::AbstractHV, k = 1) = circshift!(hv.v, k) -function shift(hv::AbstractHV, k=1) +function shift(hv::AbstractHV, k = 1) r = similar(hv) r.v .= circshift(hv.v, k) return r end -function shift!(hv::V, k=1) where {V<:Union{BinaryHV,BipolarHV}} +function shift!(hv::V, k = 1) where {V <: Union{BinaryHV, BipolarHV}} v = similar(hv.v) # empty bitvector hv.v .= circshift!(v, hv.v, k) return hv end -function shift(hv::V, k=1) where {V<:Union{BinaryHV,BipolarHV}} +function shift(hv::V, k = 1) where {V <: Union{BinaryHV, BipolarHV}} v = similar(hv.v) # empty bitvector return V(circshift!(v, hv.v, k)) end -ρ(hv::AbstractHV, k=1) = shift(hv, k) -ρ!(hv::AbstractHV, k=1) = shift!(hv, k) +ρ(hv::AbstractHV, k = 1) = shift(hv, k) +ρ!(hv::AbstractHV, k = 1) = shift!(hv, k) # COMPARISON @@ -231,7 +231,7 @@ One can specify either: - `atol=N/100` number of matches more than due to chance needed for being assumed similar - `ptol=0.01` threshold for seeing that many matches due to chance """ -function Base.isapprox(u::T, v::T; atol=length(u) / 100, ptol=0.01) where {T<:Union{BinaryHV,BipolarHV}} +function Base.isapprox(u::T, v::T; atol = length(u) / 100, ptol = 0.01) where {T <: Union{BinaryHV, BipolarHV}} @assert length(u) == length(v) "Vectors have to be of equal length" N = length(u) missmatches = sum(ui != vi for (ui, vi) in zip(u, v)) @@ -251,7 +251,7 @@ One can specify either: - `ptol=1e-10` threshold for seeing that many matches due to chance - `N_bootstap=200` number of samples for bootstrapping """ -function Base.isapprox(u::T, v::T; ptol=1.0e-10, N_bootstrap=500) where {T<:AbstractHV} +function Base.isapprox(u::T, v::T; ptol = 1.0e-10, N_bootstrap = 500) where {T <: AbstractHV} @assert length(u) == length(v) "Vectors have to be of equal length" N = length(u) # bootstrap to find the zero distr @@ -287,17 +287,17 @@ function randbv(n::Int, I) end -function perturbate!(::Type{HVByteVec}, hv::HV, I, dist=eldist(hv)) where {HV<:AbstractHV} +function perturbate!(::Type{HVByteVec}, hv::HV, I, dist = eldist(hv)) where {HV <: AbstractHV} hv.v[I] .= rand(dist, length(I)) return hv end -function perturbate!(::Type{HVByteVec}, hv::HV, M::BitVector, dist=eldist(hv)) where {HV<:AbstractHV} +function perturbate!(::Type{HVByteVec}, hv::HV, M::BitVector, dist = eldist(hv)) where {HV <: AbstractHV} hv.v[M] .= rand(dist, sum(M)) return hv end -function perturbate!(::Type{HVByteVec}, hv::HV, p::Number, args...) where {HV<:AbstractHV} +function perturbate!(::Type{HVByteVec}, hv::HV, p::Number, args...) where {HV <: AbstractHV} return perturbate!(hv, randbv(length(hv), p), args...) end @@ -315,4 +315,4 @@ perturbate(hv::AbstractHV, args...; kwargs...) = perturbate!(copy(hv), args...; # OTHER # ----- -Base.:^(hv::FHRR, x::Number) = FHRR(hv.v .^ x) \ No newline at end of file +Base.:^(hv::FHRR, x::Number) = FHRR(hv.v .^ x) diff --git a/src/types.jl b/src/types.jl index 6696fab..f864b2a 100644 --- a/src/types.jl +++ b/src/types.jl @@ -25,11 +25,11 @@ abstract type AbstractHV{T} <: AbstractVector{T} end Base.sum(hv::AbstractHV) = sum(hv.v) Base.size(hv::AbstractHV) = size(hv.v) Base.getindex(hv::AbstractHV, i) = hv.v[i] -Base.similar(hv::T) where {T<:AbstractHV} = T(length(hv)) +Base.similar(hv::T) where {T <: AbstractHV} = T(length(hv)) LinearAlgebra.norm(hv::AbstractHV) = norm(hv.v) LinearAlgebra.normalize!(hv::AbstractHV) = hv Base.hash(hv::AbstractHV) = hash(hv.v) -Base.copy(hv::HV) where {HV<:AbstractHV} = HV(copy(hv.v)) +Base.copy(hv::HV) where {HV <: AbstractHV} = HV(copy(hv.v)) # Gives an empty Vector (filled with neutral elelment) that # the `hv::AbstractHV` type uses. @@ -49,7 +49,7 @@ struct BipolarHV <: AbstractHV{Int} BipolarHV(v::BitVector) = new(v) end -BipolarHV(n::Integer=10_000) = BipolarHV(bitrand(n)) +BipolarHV(n::Integer = 10_000) = BipolarHV(bitrand(n)) BipolarHV(v::AbstractVector) = BipolarHV(v .> 0) Base.getindex(hv::BipolarHV, i) = hv.v[i] ? 1 : -1 @@ -68,7 +68,7 @@ struct TernaryHV <: AbstractHV{Int} v::Vector{Int} end -TernaryHV(n::Int=10_000) = TernaryHV(rand((-1, 1), n)) +TernaryHV(n::Int = 10_000) = TernaryHV(rand((-1, 1), n)) function LinearAlgebra.normalize!(hv::TernaryHV) clamp!(hv.v, -1, 1) @@ -87,7 +87,7 @@ struct BinaryHV <: AbstractHV{Bool} v::BitVector end -BinaryHV(n::Integer=10_000) = BinaryHV(bitrand(n)) +BinaryHV(n::Integer = 10_000) = BinaryHV(bitrand(n)) BinaryHV(v::AbstractVector{Bool}) = BinaryHV(BitVector(v)) # needed for aggregation @@ -98,11 +98,11 @@ eldist(::Type{BinaryHV}) = Bernoulli(0.5) # `RealHV` contain real numbers, drawn from a distribution # -------- -struct RealHV{T<:Real} <: AbstractHV{T} +struct RealHV{T <: Real} <: AbstractHV{T} v::Vector{T} end -RealHV(n::Integer=10_000, distr::Distribution=eldist(RealHV)) = RealHV(rand(distr, n)) +RealHV(n::Integer = 10_000, distr::Distribution = eldist(RealHV)) = RealHV(rand(distr, n)) Base.similar(hv::RealHV) = RealHV(length(hv), eldist(RealHV)) @@ -117,12 +117,12 @@ eldist(::Type{<:RealHV}) = Normal() # GradedHV are vectors in $[0, 1]^n$, allowing for graded relations. # ---------------- -struct GradedHV{T<:Real} <: AbstractHV{T} +struct GradedHV{T <: Real} <: AbstractHV{T} v::Vector{T} #GradedHV(v::AbstractVector{T}) where {T<:Real} = new{T}(clamp!(v,0,1)) end -function GradedHV(n::Int=10_000, distr=eldist(GradedHV)) +function GradedHV(n::Int = 10_000, distr = eldist(GradedHV)) @assert 0 ≤ minimum(distr) < maximum(distr) ≤ 1 "Provide `distr` with support in [0,1]" return GradedHV(rand(distr, n)) end @@ -146,12 +146,12 @@ end # --------------- -struct GradedBipolarHV{T<:Real} <: AbstractHV{T} +struct GradedBipolarHV{T <: Real} <: AbstractHV{T} v::Vector{T} #GradedBipolarHV(v::AbstractVector{T}) where {T<:Real} = new{T}(clamp!(v,-1,1)) end -function GradedBipolarHV(n::Int=10_000, distr::Distribution=eldist(GradedBipolarHV)) +function GradedBipolarHV(n::Int = 10_000, distr::Distribution = eldist(GradedBipolarHV)) @assert -1 ≤ minimum(distr) < maximum(distr) ≤ 1 "Provide `distr` with support in [-1,1]" return GradedBipolarHV(rand(distr, n)) end @@ -167,14 +167,14 @@ LinearAlgebra.normalize!(hv::GradedBipolarHV) = clamp!(hv.v, -1, 1) # Fourier Holographically Reduced Represenetations # ------------------------------------------------ -struct FHRR{T<:Complex} <: AbstractHV{T} +struct FHRR{T <: Complex} <: AbstractHV{T} v::Vector{T} end #Base.eltype(::FHRR{T}) where {T} = Complex{T} -FHRR(n::Int=10_000) = FHRR(exp.(2π * im .* rand(n))) -FHRR(T::Type, n::Int=10_000) = FHRR(exp.(2π * im .* rand(T, n))) +FHRR(n::Int = 10_000) = FHRR(exp.(2π * im .* rand(n))) +FHRR(T::Type, n::Int = 10_000) = FHRR(exp.(2π * im .* rand(T, n))) Base.similar(hv::FHRR{<:Complex{R}}) where {R} = FHRR(exp.(2π * im .* rand(R, length(hv)))) diff --git a/test/operations.jl b/test/operations.jl index 9ee0506..7a5dc06 100644 --- a/test/operations.jl +++ b/test/operations.jl @@ -52,7 +52,7 @@ using LinearAlgebra, Random end # currently not yet a good way of evaluating these - HV <: Union{TernaryHV,GradedHV,GradedBipolarHV,RealHV} && continue + HV <: Union{TernaryHV, GradedHV, GradedBipolarHV, RealHV} && continue @testset "similarity $HV" begin N = 10_000 From f0428e6870c5278ddf990b320531c803ca05c359 Mon Sep 17 00:00:00 2001 From: michielstock Date: Thu, 6 Nov 2025 11:40:29 +0100 Subject: [PATCH 5/6] :wrench: better numerical encoding with FHRR --- src/encoding.jl | 52 +++++++++++++++++++++++++----------------------- test/encoding.jl | 35 +++++++++++++++++++++++++------- 2 files changed, 55 insertions(+), 32 deletions(-) diff --git a/src/encoding.jl b/src/encoding.jl index 4e04754..3dfe4d3 100644 --- a/src/encoding.jl +++ b/src/encoding.jl @@ -54,7 +54,7 @@ where `V` is the hypervector collection, `m` is the size of the hypervector coll - [`multibind`](@ref): Multibind encoding, binding-variant of this encoder """ -function multiset(vs::AbstractVector{<:T})::T where {T <: AbstractHV} +function multiset(vs::AbstractVector{<:T})::T where {T<:AbstractHV} return bundle(vs) end @@ -301,7 +301,7 @@ and `\\oplus` are the binding and bundling operations. - [Torchhd documentation](https://torchhd.readthedocs.io/en/stable/generated/torchhd.hash_table.html) """ -function hashtable(keys::T, values::T) where {T <: AbstractVector{<:AbstractHV}} +function hashtable(keys::T, values::T) where {T<:AbstractVector{<:AbstractHV}} @assert length(keys) == length(values) "Number of keys and values aren't equal" return bundle(map(prod, zip(keys, values))) end @@ -369,7 +369,7 @@ and binding operations. - [Torchhd documentation](https://torchhd.readthedocs.io/en/stable/generated/torchhd.cross_product.html) """ -function crossproduct(U::T, V::T) where {T <: AbstractVector{<:AbstractHV}} +function crossproduct(U::T, V::T) where {T<:AbstractVector{<:AbstractHV}} # TODO: This should be bundled without normalizing return bind(multiset(U), multiset(V)) end @@ -439,11 +439,11 @@ and shift operations. - [Torchhd documentation](https://torchhd.readthedocs.io/en/stable/generated/torchhd.ngrams.html) """ -function ngrams(vs::AbstractVector{<:AbstractHV}, n::Int = 3) +function ngrams(vs::AbstractVector{<:AbstractHV}, n::Int=3) l = length(vs) p = l - n + 1 @assert 1 <= n <= length(vs) "`n` must be 1 ≤ n ≤ $l" - return bundle([bind([shift(vs[i + j], j) for j in 0:(n - 1)]) for i in 1:p]) + return bundle([bind([shift(vs[i+j], j) for j in 0:(n-1)]) for i in 1:p]) end """ @@ -485,7 +485,7 @@ hypervector collection, `i` is the position of the entry in the collection, and - [Torchhd documentation](https://torchhd.readthedocs.io/en/stable/generated/torchhd.graph.html) """ -function graph(source::T, target::T; directed::Bool = false) where {T <: AbstractVector{<:AbstractHV}} +function graph(source::T, target::T; directed::Bool=false) where {T<:AbstractVector{<:AbstractHV}} @assert length(source) == length(target) "`source` and `target` must be the same length" return hashtable(source, shift.(target, convert(Int, directed))) end @@ -501,7 +501,7 @@ Creates a set of level correlated hypervectors, where the first and last hyperve - `v::HV`: Base hypervector - `m::Int`: Number of levels (alternatively, provide a vector to be encoded) """ -function level(v::HV, m::Int) where {HV <: AbstractHV} +function level(v::HV, m::Int) where {HV<:AbstractHV} hvs = [v] p = 2 / m while length(hvs) < m @@ -511,7 +511,7 @@ function level(v::HV, m::Int) where {HV <: AbstractHV} return hvs end -level(HV::Type{<:AbstractHV}, m::Int; n::Int = 10_000) = level(HV(n), m) +level(HV::Type{<:AbstractHV}, m::Int; n::Int=10_000) = level(HV(n), m) level(HVv, vals::AbstractVector) = level(HVv, length(vals)) level(HVv, vals::UnitRange) = level(HVv, length(vals)) @@ -538,7 +538,7 @@ encoder = encodelevel(hvlevels, numvalues) encoder(pi/3) # hypervector that best represents this numerical value ``` """ -function encodelevel(hvlevels::AbstractVector{<:AbstractHV}, numvalues; testbound = false) +function encodelevel(hvlevels::AbstractVector{<:AbstractHV}, numvalues; testbound=false) @assert length(hvlevels) == length(numvalues) "HV levels do not match numerical values" # construct the encoder function encoder(x::Number) @@ -554,9 +554,9 @@ end See `encodelevel`, same but provide lower (`a`) and upper (`b`) limit of the interval to be encoded. """ -encodelevel(hvlevels::AbstractVector{<:AbstractHV}, a::Number, b::Number; testbound = false) = encodelevel(hvlevels, range(a, b, length(hvlevels)); testbound) +encodelevel(hvlevels::AbstractVector{<:AbstractHV}, a::Number, b::Number; testbound=false) = encodelevel(hvlevels, range(a, b, length(hvlevels)); testbound) -encodelevel(HV, numvalues; testbound = false) = encodelevel(level(HV, length(numvalues)), numvalues; testbound) +encodelevel(HV, numvalues; testbound=false) = encodelevel(level(HV, length(numvalues)), numvalues; testbound) """ @@ -591,35 +591,37 @@ end decodelevel(hvlevels::AbstractVector{<:AbstractHV}, a::Number, b::Number) = decodelevel(hvlevels, range(a, b, length(hvlevels))) -decodelevel(HV, numvalues; testbound = false) = decodelevel(level(HV, length(numvalues)), numvalues; testbound) +decodelevel(HV, numvalues; testbound=false) = decodelevel(level(HV, length(numvalues)), numvalues; testbound) """ convertlevel(hvlevels, numvals..., kwargs...) + convertlevel(HV::AbstractHV, numvals..., kwargs...) Creates the `encoder` and `decoder` for a level incoding in one step. See `encodelevel` and `decodelevel` for their respective documentations. """ convertlevel(hvlevels, numvals...; kwargs...) = encodelevel(hvlevels, numvals...; kwargs...), decodelevel(hvlevels, numvals..., kwargs...) +convertlevel(hv::AbstractHV, numvals...; kwargs...) = encodelevel(hv, numvals...; kwargs...), decodelevel(hv, numvals..., kwargs...) + # levels using FHRR -function level(v::FHRR, m::Int; periodic = false) - u = periodic ? 2π : π - return [FHRR(v.v .* exp(θ * im)) for θ in range(0, u, m)] +function level(v::FHRR, m::Int; β=1 / m) + return [v^(x * β) for x in 1:m] end +function level(v::FHRR, vals::Union{AbstractVector{<:Number},UnitRange}; β=1 / (maximum(vals) - minimum(vals))) + return [v^(x * β) for x in vals] +end -function encodelevel(v::FHRR, numvalues; periodic = false) - a, b = extrema(numvalues) - r = b - a - u = periodic ? 2π : π - return x -> FHRR(v.v * exp((x - a) / r * u * im)) +function encodelevel(v::FHRR, vals=(0, 1); β=1 / (maximum(vals) - minimum(vals))) + return x -> v^(β * x) end -function decodelevel(v::FHRR, numvalues; periodic = false) - a, b = extrema(numvalues) - r = b - a - u = periodic ? 2π : π - return u -> imag(mean(log.(u.v ./ v.v) * r / u)) + a +function decodelevel(v::FHRR, vals=(0, 1); β=1 / (maximum(vals) - minimum(vals))) + return u -> @.(real(log(u.v) / log(v.v) / β)) |> mean end + + +convertlevel(v::FHRR, vals=(0, 1); kwargs...) = encodelevel(v, vals; kwargs...), decodelevel(v, vals; kwargs...) \ No newline at end of file diff --git a/test/encoding.jl b/test/encoding.jl index c4f76f1..e1d5ddc 100644 --- a/test/encoding.jl +++ b/test/encoding.jl @@ -1,12 +1,12 @@ @testset "encoding" begin hvs = BinaryHV.( [ - [1, 0, 0, 0, 0], - [1, 1, 0, 0, 0], - [1, 1, 1, 0, 0], - [1, 1, 1, 1, 0], - [1, 1, 1, 1, 1], - ] + [1, 0, 0, 0, 0], + [1, 1, 0, 0, 0], + [1, 1, 1, 0, 0], + [1, 1, 1, 1, 0], + [1, 1, 1, 1, 1], + ] ) @testset "multiset" begin @@ -46,7 +46,7 @@ s = [1, 3, 4, 2, 5] t = [3, 4, 2, 1, 4] @test graph(hvs[s], hvs[t]) == Bool.([0, 0, 0, 0, 0]) - @test graph(hvs[s], hvs[t]; directed = true) == Bool.([1, 0, 0, 1, 0]) + @test graph(hvs[s], hvs[t]; directed=true) == Bool.([1, 0, 0, 1, 0]) @test_throws AssertionError graph(hvs[s], hvs[[1, 2, 3]]) end @@ -63,4 +63,25 @@ x = decoder(hv) @test 1 ≤ x ≤ 2 end + + @testset "FHRR numbers" begin + + v = FHRR() + + numvals = 0:0.1:10 + + encoder, decoder = convertlevel(v, numvals) + + x, y, z = 2, 5, 10 + + hx, hy, hz = encoder.((x, y, z)) + + @test hx isa FHRR + @test similarity(hx, hy) > similarity(hx, hz) + + @test decoder(hx) < decoder(hy) < decoder(hz) + end end + + + From a639417ee07faf6275069c0a288ca04dec4e2687 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Vigil-V=C3=A1squez?= Date: Thu, 6 Nov 2025 12:04:57 +0100 Subject: [PATCH 6/6] style: format with Runic Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- src/encoding.jl | 34 +++++++++++++++++----------------- test/encoding.jl | 17 +++++++---------- 2 files changed, 24 insertions(+), 27 deletions(-) diff --git a/src/encoding.jl b/src/encoding.jl index 3dfe4d3..bbcec69 100644 --- a/src/encoding.jl +++ b/src/encoding.jl @@ -54,7 +54,7 @@ where `V` is the hypervector collection, `m` is the size of the hypervector coll - [`multibind`](@ref): Multibind encoding, binding-variant of this encoder """ -function multiset(vs::AbstractVector{<:T})::T where {T<:AbstractHV} +function multiset(vs::AbstractVector{<:T})::T where {T <: AbstractHV} return bundle(vs) end @@ -301,7 +301,7 @@ and `\\oplus` are the binding and bundling operations. - [Torchhd documentation](https://torchhd.readthedocs.io/en/stable/generated/torchhd.hash_table.html) """ -function hashtable(keys::T, values::T) where {T<:AbstractVector{<:AbstractHV}} +function hashtable(keys::T, values::T) where {T <: AbstractVector{<:AbstractHV}} @assert length(keys) == length(values) "Number of keys and values aren't equal" return bundle(map(prod, zip(keys, values))) end @@ -369,7 +369,7 @@ and binding operations. - [Torchhd documentation](https://torchhd.readthedocs.io/en/stable/generated/torchhd.cross_product.html) """ -function crossproduct(U::T, V::T) where {T<:AbstractVector{<:AbstractHV}} +function crossproduct(U::T, V::T) where {T <: AbstractVector{<:AbstractHV}} # TODO: This should be bundled without normalizing return bind(multiset(U), multiset(V)) end @@ -439,11 +439,11 @@ and shift operations. - [Torchhd documentation](https://torchhd.readthedocs.io/en/stable/generated/torchhd.ngrams.html) """ -function ngrams(vs::AbstractVector{<:AbstractHV}, n::Int=3) +function ngrams(vs::AbstractVector{<:AbstractHV}, n::Int = 3) l = length(vs) p = l - n + 1 @assert 1 <= n <= length(vs) "`n` must be 1 ≤ n ≤ $l" - return bundle([bind([shift(vs[i+j], j) for j in 0:(n-1)]) for i in 1:p]) + return bundle([bind([shift(vs[i + j], j) for j in 0:(n - 1)]) for i in 1:p]) end """ @@ -485,7 +485,7 @@ hypervector collection, `i` is the position of the entry in the collection, and - [Torchhd documentation](https://torchhd.readthedocs.io/en/stable/generated/torchhd.graph.html) """ -function graph(source::T, target::T; directed::Bool=false) where {T<:AbstractVector{<:AbstractHV}} +function graph(source::T, target::T; directed::Bool = false) where {T <: AbstractVector{<:AbstractHV}} @assert length(source) == length(target) "`source` and `target` must be the same length" return hashtable(source, shift.(target, convert(Int, directed))) end @@ -501,7 +501,7 @@ Creates a set of level correlated hypervectors, where the first and last hyperve - `v::HV`: Base hypervector - `m::Int`: Number of levels (alternatively, provide a vector to be encoded) """ -function level(v::HV, m::Int) where {HV<:AbstractHV} +function level(v::HV, m::Int) where {HV <: AbstractHV} hvs = [v] p = 2 / m while length(hvs) < m @@ -511,7 +511,7 @@ function level(v::HV, m::Int) where {HV<:AbstractHV} return hvs end -level(HV::Type{<:AbstractHV}, m::Int; n::Int=10_000) = level(HV(n), m) +level(HV::Type{<:AbstractHV}, m::Int; n::Int = 10_000) = level(HV(n), m) level(HVv, vals::AbstractVector) = level(HVv, length(vals)) level(HVv, vals::UnitRange) = level(HVv, length(vals)) @@ -538,7 +538,7 @@ encoder = encodelevel(hvlevels, numvalues) encoder(pi/3) # hypervector that best represents this numerical value ``` """ -function encodelevel(hvlevels::AbstractVector{<:AbstractHV}, numvalues; testbound=false) +function encodelevel(hvlevels::AbstractVector{<:AbstractHV}, numvalues; testbound = false) @assert length(hvlevels) == length(numvalues) "HV levels do not match numerical values" # construct the encoder function encoder(x::Number) @@ -554,9 +554,9 @@ end See `encodelevel`, same but provide lower (`a`) and upper (`b`) limit of the interval to be encoded. """ -encodelevel(hvlevels::AbstractVector{<:AbstractHV}, a::Number, b::Number; testbound=false) = encodelevel(hvlevels, range(a, b, length(hvlevels)); testbound) +encodelevel(hvlevels::AbstractVector{<:AbstractHV}, a::Number, b::Number; testbound = false) = encodelevel(hvlevels, range(a, b, length(hvlevels)); testbound) -encodelevel(HV, numvalues; testbound=false) = encodelevel(level(HV, length(numvalues)), numvalues; testbound) +encodelevel(HV, numvalues; testbound = false) = encodelevel(level(HV, length(numvalues)), numvalues; testbound) """ @@ -591,7 +591,7 @@ end decodelevel(hvlevels::AbstractVector{<:AbstractHV}, a::Number, b::Number) = decodelevel(hvlevels, range(a, b, length(hvlevels))) -decodelevel(HV, numvalues; testbound=false) = decodelevel(level(HV, length(numvalues)), numvalues; testbound) +decodelevel(HV, numvalues; testbound = false) = decodelevel(level(HV, length(numvalues)), numvalues; testbound) """ convertlevel(hvlevels, numvals..., kwargs...) @@ -607,21 +607,21 @@ convertlevel(hv::AbstractHV, numvals...; kwargs...) = encodelevel(hv, numvals... # levels using FHRR -function level(v::FHRR, m::Int; β=1 / m) +function level(v::FHRR, m::Int; β = 1 / m) return [v^(x * β) for x in 1:m] end -function level(v::FHRR, vals::Union{AbstractVector{<:Number},UnitRange}; β=1 / (maximum(vals) - minimum(vals))) +function level(v::FHRR, vals::Union{AbstractVector{<:Number}, UnitRange}; β = 1 / (maximum(vals) - minimum(vals))) return [v^(x * β) for x in vals] end -function encodelevel(v::FHRR, vals=(0, 1); β=1 / (maximum(vals) - minimum(vals))) +function encodelevel(v::FHRR, vals = (0, 1); β = 1 / (maximum(vals) - minimum(vals))) return x -> v^(β * x) end -function decodelevel(v::FHRR, vals=(0, 1); β=1 / (maximum(vals) - minimum(vals))) +function decodelevel(v::FHRR, vals = (0, 1); β = 1 / (maximum(vals) - minimum(vals))) return u -> @.(real(log(u.v) / log(v.v) / β)) |> mean end -convertlevel(v::FHRR, vals=(0, 1); kwargs...) = encodelevel(v, vals; kwargs...), decodelevel(v, vals; kwargs...) \ No newline at end of file +convertlevel(v::FHRR, vals = (0, 1); kwargs...) = encodelevel(v, vals; kwargs...), decodelevel(v, vals; kwargs...) diff --git a/test/encoding.jl b/test/encoding.jl index e1d5ddc..7a2af2d 100644 --- a/test/encoding.jl +++ b/test/encoding.jl @@ -1,12 +1,12 @@ @testset "encoding" begin hvs = BinaryHV.( [ - [1, 0, 0, 0, 0], - [1, 1, 0, 0, 0], - [1, 1, 1, 0, 0], - [1, 1, 1, 1, 0], - [1, 1, 1, 1, 1], - ] + [1, 0, 0, 0, 0], + [1, 1, 0, 0, 0], + [1, 1, 1, 0, 0], + [1, 1, 1, 1, 0], + [1, 1, 1, 1, 1], + ] ) @testset "multiset" begin @@ -46,7 +46,7 @@ s = [1, 3, 4, 2, 5] t = [3, 4, 2, 1, 4] @test graph(hvs[s], hvs[t]) == Bool.([0, 0, 0, 0, 0]) - @test graph(hvs[s], hvs[t]; directed=true) == Bool.([1, 0, 0, 1, 0]) + @test graph(hvs[s], hvs[t]; directed = true) == Bool.([1, 0, 0, 1, 0]) @test_throws AssertionError graph(hvs[s], hvs[[1, 2, 3]]) end @@ -82,6 +82,3 @@ @test decoder(hx) < decoder(hy) < decoder(hz) end end - - -