Skip to content
This repository was archived by the owner on Dec 17, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ else()
endif()


set(EIGEN3_REQUIRED_VERSION "3.4.0")
set(EIGEN3_REQUIRED_VERSION "3.4.1")
find_package (Eigen3 ${EIGEN3_REQUIRED_VERSION} QUIET NO_MODULE)
if(NOT Eigen3_FOUND)
message(STATUS "Eigen3 not found in system, fetching from GitHub...")
Expand Down
4 changes: 3 additions & 1 deletion backend/cxx/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,13 @@ if(SPARSEIR_DEBUG)
endif()

# Find Eigen3
set(EIGEN3_REQUIRED_VERSION "3.4.0")
set(EIGEN3_REQUIRED_VERSION "3.4.1")
find_package(Eigen3 ${EIGEN3_REQUIRED_VERSION} QUIET NO_MODULE)
if(NOT Eigen3_FOUND)
message(STATUS "Eigen3 not found, fetching from GitLab...")
include(FetchContent)
# Disable Eigen3's own tests to avoid running them in CI
set(EIGEN_BUILD_TESTING OFF CACHE BOOL "Build Eigen3 tests" FORCE)
FetchContent_Declare(Eigen3
GIT_REPOSITORY https://gitlab.com/libeigen/eigen.git
GIT_TAG ${EIGEN3_REQUIRED_VERSION}
Expand Down
108 changes: 105 additions & 3 deletions backend/cxx/include/sparseir/basis.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ class FiniteTempBasis : public AbstractBasis<S> {
std::shared_ptr<PiecewiseLegendreFTVector<S>> uhat;
std::shared_ptr<PiecewiseLegendreFTVector<S>> uhat_full;

std::function<double(double, double)> weight_func; // weight function especially for bosonic basis
std::function<double(double)> inv_weight_func; // inverse weight function (numerically stable), omega-only

template <typename K, typename = typename std::enable_if<is_concrete_kernel<K>::value>::type>
FiniteTempBasis(double beta, double omega_max, double epsilon,
Expand All @@ -90,7 +90,11 @@ class FiniteTempBasis : public AbstractBasis<S> {
this->beta = beta;
this->lambda = kernel.lambda_;
this->sve_result = std::make_shared<SVEResult>(sve_result);
this->weight_func = kernel.template weight_func<double>(S());
// Convert kernel's 2-arg inv_weight_func to omega-only function
auto kernel_inv_weight_func = kernel.template inv_weight_func<double>(S());
this->inv_weight_func = [kernel_inv_weight_func, beta](double omega) -> double {
return kernel_inv_weight_func(beta, omega);
};

double wmax = this->lambda / beta;

Expand Down Expand Up @@ -163,6 +167,93 @@ class FiniteTempBasis : public AbstractBasis<S> {
std::make_shared<PiecewiseLegendreFTVector<S>>(uhat_polyvec);
}

// Constructor with SVE result and inv_weight_func (no kernel required)
FiniteTempBasis(double beta, double omega_max, double epsilon,
double lambda, int ypower, double conv_radius,
SVEResult sve_result,
std::function<double(double)> inv_weight_func,
int max_size = -1)
{
if (sve_result.s.size() == 0) {
throw std::runtime_error("SVE result sve_result.s is empty");
}
if (beta <= 0.0) {
throw std::domain_error(
"Inverse temperature beta must be positive");
}
if (omega_max < 0.0) {
throw std::domain_error(
"Frequency cutoff omega_max must be non-negative");
}
if (std::fabs(beta * omega_max - lambda) > 1e-10) {
throw std::runtime_error("Product of beta and omega_max must be "
"equal to lambda");
}
this->beta = beta;
this->lambda = lambda;
this->sve_result = std::make_shared<SVEResult>(sve_result);
this->inv_weight_func = inv_weight_func;

double wmax = this->lambda / beta;

auto part_result = sve_result.part(epsilon, max_size);
PiecewiseLegendrePolyVector u_ = std::get<0>(part_result);
Eigen::VectorXd s_ = std::get<1>(part_result);
PiecewiseLegendrePolyVector v_ = std::get<2>(part_result);
double sve_result_s0 = sve_result.s(0);

if (sve_result.s.size() > s_.size()) {
this->accuracy = sve_result.s(s_.size()) / sve_result_s0;
} else {
this->accuracy = sve_result.s(s_.size() - 1) / sve_result_s0;
}

auto u_knots_ = u_.polyvec[0].knots;
auto v_knots_ = v_.polyvec[0].knots;

Eigen::VectorXd u_knots = (beta / 2) * (u_knots_.array() + 1);
Eigen::VectorXd v_knots = wmax * v_knots_;

Eigen::VectorXd deltax4u = (beta / 2) * u_.get_delta_x();
Eigen::VectorXd deltax4v = wmax * v_.get_delta_x();
std::vector<int> u_symm_vec;
for (std::size_t i = 0; i < u_.size(); ++i) {
u_symm_vec.push_back(u_.polyvec[i].get_symm());
}
std::vector<int> v_symm_vec;
for (std::size_t i = 0; i < v_.size(); ++i) {
v_symm_vec.push_back(v_.polyvec[i].get_symm());
}

Eigen::VectorXi u_symm =
Eigen::Map<Eigen::VectorXi>(u_symm_vec.data(), u_symm_vec.size());
Eigen::VectorXi v_symm =
Eigen::Map<Eigen::VectorXi>(v_symm_vec.data(), v_symm_vec.size());

this->u = std::make_shared<PeriodicFunctions<S, PiecewiseLegendrePolyVector>>(
std::make_shared<PiecewiseLegendrePolyVector>(u_, u_knots, deltax4u, u_symm), beta);
this->v = std::make_shared<PiecewiseLegendrePolyVector>(
v_, v_knots, deltax4v, v_symm);
this->s =
(std::sqrt(beta * wmax / 2) * std::pow(wmax, ypower)) *
s_;

Eigen::Tensor<double, 3> udata3d = sve_result.u->get_data();
PiecewiseLegendrePolyVector uhat_base_full =
PiecewiseLegendrePolyVector(sqrt(beta) * udata3d, *sve_result.u);
S statistics = S();

this->uhat_full = std::make_shared<PiecewiseLegendreFTVector<S>>(
uhat_base_full, statistics, conv_radius);

std::vector<PiecewiseLegendreFT<S>> uhat_polyvec;
for (int i = 0; i < this->s.size(); ++i) {
uhat_polyvec.push_back(this->uhat_full->operator[](i));
}
this->uhat =
std::make_shared<PiecewiseLegendreFTVector<S>>(uhat_polyvec);
}

FiniteTempBasis(double beta, double omega_max, double epsilon,
int max_size = -1)
{
Expand Down Expand Up @@ -332,10 +423,21 @@ inline void fence_matsubara_sampling(std::vector<MatsubaraFreq<S>> &wn,

for (const auto &wn_outer : outer_frequencies) {
int outer_val = wn_outer.n;
// In SparseIR.jl-v1, ωn_diff is always created as BosonicFreq
// This ensures diff_val is always even (valid for Bosonic)
int diff_val = 2 * static_cast<int>(std::round(0.025 * outer_val));
int wn_diff = MatsubaraFreq<S>(diff_val).n;

// Handle edge case: if diff_val is 0, set it to 2 (minimum even value for Bosonic)
if (diff_val == 0) {
diff_val = 2;
}

// Get the n value from BosonicFreq (same as diff_val since it's even)
int wn_diff = MatsubaraFreq<Bosonic>(diff_val).n;

if (wn.size() >= 20) {
// For Fermionic: wn_outer.n is odd, wn_diff is even, so wn_outer.n ± wn_diff is odd (valid)
// For Bosonic: wn_outer.n is even, wn_diff is even, so wn_outer.n ± wn_diff is even (valid)
wn.push_back(
MatsubaraFreq<S>(wn_outer.n - sign(wn_outer) * wn_diff));
}
Expand Down
65 changes: 33 additions & 32 deletions backend/cxx/include/sparseir/dlr.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include <algorithm>

#include "sparseir/funcs.hpp"
#include "sparseir/basis.hpp"

namespace sparseir {

Expand All @@ -16,24 +17,24 @@ class MatsubaraPoles {
double beta;
Eigen::VectorXd poles;
double wmax;
std::vector<double> weights;
std::function<double(double, double)> weight_func;
std::vector<double> inv_weights;
std::function<double(double)> inv_weight_func;

MatsubaraPoles(double beta, const Eigen::VectorXd &poles, double wmax, std::function<double(double, double)> weight_func)
: beta(beta), poles(poles), wmax(wmax), weights(poles.size(), 1.0), weight_func(weight_func)
MatsubaraPoles(double beta, const Eigen::VectorXd &poles, double wmax, std::function<double(double)> inv_weight_func)
: beta(beta), poles(poles), wmax(wmax), inv_weights(poles.size(), 1.0), inv_weight_func(inv_weight_func)
{
if (!weight_func) {
throw std::runtime_error("weight_func is nullptr in MatsubaraPoles constructor");
if (!inv_weight_func) {
throw std::runtime_error("inv_weight_func is nullptr in MatsubaraPoles constructor");
}
for (Eigen::Index i = 0; i < poles.size(); ++i) {
weights[i] = weight_func(beta, poles(i));
inv_weights[i] = inv_weight_func(poles(i));
}
}

template <typename T = Statistics>
MatsubaraPoles(double beta, const Eigen::VectorXd &poles, double wmax,
typename std::enable_if<std::is_same<T, Fermionic>::value>::type* = nullptr)
: beta(beta), poles(poles), wmax(wmax), weights(poles.size(), 1.0)
: beta(beta), poles(poles), wmax(wmax), inv_weights(poles.size(), 1.0)
{
}

Expand Down Expand Up @@ -63,8 +64,8 @@ class MatsubaraPoles {
Eigen::VectorXcd result(poles.size());
for (Eigen::Index i = 0; i < poles.size(); ++i) {
// double y = poles(i) / wmax;
double weight = weights[i];
result(i) = 1.0 / ((n.valueim(beta) - poles(i)) * weight);
double inv_weight = inv_weights[i];
result(i) = inv_weight / (n.valueim(beta) - poles(i));
}
return result;
}
Expand Down Expand Up @@ -97,16 +98,16 @@ class MatsubaraPoles {

// Prepare data for the new MatsubaraPoles
Eigen::VectorXd new_poles(indices.size());
std::vector<double> new_weights(indices.size());
std::vector<double> new_inv_weights(indices.size());

// Copy selected poles and weights
// Copy selected poles and inv_weights
for (size_t i = 0; i < indices.size(); ++i) {
new_poles(i) = poles(indices[i]);
new_weights[i] = weights[indices[i]];
new_inv_weights[i] = inv_weights[indices[i]];
}

// Create new MatsubaraPoles with selected poles and the same weight_func
return std::make_shared<MatsubaraPoles>(beta, new_poles, wmax, weight_func);
// Create new MatsubaraPoles with selected poles and the same inv_weight_func
return std::make_shared<MatsubaraPoles>(beta, new_poles, wmax, inv_weight_func);
}
};

Expand All @@ -117,16 +118,16 @@ class DLRBasisFunctions {
double beta;
Eigen::VectorXd poles;
double wmax;
Eigen::VectorXd weights;
Eigen::VectorXd inv_weights; // inverse weights (numerically stable)

DLRBasisFunctions(double beta, const Eigen::VectorXd &poles, double wmax, const Eigen::VectorXd &weights)
: beta(beta), poles(poles), wmax(wmax), weights(weights)
DLRBasisFunctions(double beta, const Eigen::VectorXd &poles, double wmax, const Eigen::VectorXd &inv_weights)
: beta(beta), poles(poles), wmax(wmax), inv_weights(inv_weights)
{
}

template <typename T = S>
DLRBasisFunctions(double beta, const Eigen::VectorXd &poles, double wmax, typename std::enable_if<std::is_same<T, Fermionic>::value>::type* = nullptr)
: beta(beta), poles(poles), wmax(wmax), weights(Eigen::VectorXd::Ones(poles.size()))
: beta(beta), poles(poles), wmax(wmax), inv_weights(Eigen::VectorXd::Ones(poles.size()))
{
}

Expand All @@ -140,7 +141,7 @@ class DLRBasisFunctions {
auto kernel = LogisticKernel(beta * wmax); // k(x, y)
double x = 2 * tau / beta - 1.0;
double y = poles(i) / wmax;
result(i) = - kernel.compute(x, y) / weights(i);
result(i) = - kernel.compute(x, y) * inv_weights(i);
}
return result;
}
Expand All @@ -156,7 +157,7 @@ class DLRBasisFunctions {
double x = 2.0 * tau / beta - 1.0;
double y = poles(i) / wmax;
double k_tau_omega = kernel.compute(x, y);
result(i) = - k_tau_omega / (y * weights(i));
result(i) = - k_tau_omega * inv_weights(i) / y;
}
return result;
}
Expand All @@ -175,7 +176,7 @@ class DLRBasisFunctions {
std::size_t size() const { return poles.size(); }

DLRBasisFunctions slice(size_t i) const {
return DLRBasisFunctions(beta, poles.segment(i, 1), wmax, weights.segment(i, 1));
return DLRBasisFunctions(beta, poles.segment(i, 1), wmax, inv_weights.segment(i, 1));
}

// Slice method to extract multiple functions by indices
Expand All @@ -185,16 +186,16 @@ class DLRBasisFunctions {

// Prepare data for the new DLRBasisFunctions
Eigen::VectorXd new_poles(indices.size());
Eigen::VectorXd new_weights(indices.size());
Eigen::VectorXd new_inv_weights(indices.size());

// Copy selected poles and weights
// Copy selected poles and inv_weights
for (size_t i = 0; i < indices.size(); ++i) {
new_poles(i) = poles(indices[i]);
new_weights(i) = weights(indices[i]);
new_inv_weights(i) = inv_weights(indices[i]);
}

// Create new DLRBasisFunctions with selected poles
return std::make_shared<DLRBasisFunctions>(beta, new_poles, wmax, new_weights);
return std::make_shared<DLRBasisFunctions>(beta, new_poles, wmax, new_inv_weights);
}

int nroots() const {
Expand Down Expand Up @@ -256,7 +257,7 @@ class DiscreteLehmannRepresentation : public AbstractBasis<S> {
std::shared_ptr<MatsubaraPoles<S>> uhat;
Eigen::MatrixXd fitmat;
sparseir::JacobiSVD<Eigen::MatrixXd> matrix;
std::function<double(double, double)> weight_func;
std::function<double(double)> inv_weight_func;
Eigen::VectorXd _ir_default_tau_sampling_points;


Expand All @@ -268,14 +269,14 @@ class DiscreteLehmannRepresentation : public AbstractBasis<S> {
wmax(b.get_wmax()),
accuracy(b.get_accuracy()),
u(nullptr),
uhat(std::make_shared<MatsubaraPoles<S>>(b.get_beta(), poles, b.get_wmax(), b.weight_func)),
uhat(std::make_shared<MatsubaraPoles<S>>(b.get_beta(), poles, b.get_wmax(), b.inv_weight_func)),
_ir_default_tau_sampling_points(b.default_tau_sampling_points())
{
Eigen::VectorXd weights(poles.size());
Eigen::VectorXd inv_weights(poles.size());
for (int i = 0; i < poles.size(); ++i) {
weights(i) = b.weight_func(beta, poles[i]);
inv_weights(i) = b.inv_weight_func(poles[i]);
}
auto base_u_funcs = std::make_shared<DLRBasisFunctions<S>>(b.get_beta(), poles, b.get_wmax(), weights);
auto base_u_funcs = std::make_shared<DLRBasisFunctions<S>>(b.get_beta(), poles, b.get_wmax(), inv_weights);
this->u = std::make_shared<DLRTauFuncsType<S>>(base_u_funcs, b.get_beta());

// Fitting matrix from IR
Expand All @@ -289,7 +290,7 @@ class DiscreteLehmannRepresentation : public AbstractBasis<S> {

matrix.compute(fitmat, Eigen::ComputeThinU | Eigen::ComputeThinV);

weight_func = b.weight_func;
inv_weight_func = b.inv_weight_func;
wmax = b.get_wmax();
}

Expand Down
Loading
Loading