diff --git a/CMakeLists.txt b/CMakeLists.txt index bab5a8ca..48ed306b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -78,7 +78,7 @@ else() endif() -set(EIGEN3_REQUIRED_VERSION "3.4.0") +set(EIGEN3_REQUIRED_VERSION "3.4.1") find_package (Eigen3 ${EIGEN3_REQUIRED_VERSION} QUIET NO_MODULE) if(NOT Eigen3_FOUND) message(STATUS "Eigen3 not found in system, fetching from GitHub...") diff --git a/backend/cxx/CMakeLists.txt b/backend/cxx/CMakeLists.txt index e0df5674..b28b72f3 100644 --- a/backend/cxx/CMakeLists.txt +++ b/backend/cxx/CMakeLists.txt @@ -36,11 +36,13 @@ if(SPARSEIR_DEBUG) endif() # Find Eigen3 -set(EIGEN3_REQUIRED_VERSION "3.4.0") +set(EIGEN3_REQUIRED_VERSION "3.4.1") find_package(Eigen3 ${EIGEN3_REQUIRED_VERSION} QUIET NO_MODULE) if(NOT Eigen3_FOUND) message(STATUS "Eigen3 not found, fetching from GitLab...") include(FetchContent) + # Disable Eigen3's own tests to avoid running them in CI + set(EIGEN_BUILD_TESTING OFF CACHE BOOL "Build Eigen3 tests" FORCE) FetchContent_Declare(Eigen3 GIT_REPOSITORY https://gitlab.com/libeigen/eigen.git GIT_TAG ${EIGEN3_REQUIRED_VERSION} diff --git a/backend/cxx/include/sparseir/basis.hpp b/backend/cxx/include/sparseir/basis.hpp index ed522cd9..742e6895 100644 --- a/backend/cxx/include/sparseir/basis.hpp +++ b/backend/cxx/include/sparseir/basis.hpp @@ -65,7 +65,7 @@ class FiniteTempBasis : public AbstractBasis { std::shared_ptr> uhat; std::shared_ptr> uhat_full; - std::function weight_func; // weight function especially for bosonic basis + std::function inv_weight_func; // inverse weight function (numerically stable), omega-only template ::value>::type> FiniteTempBasis(double beta, double omega_max, double epsilon, @@ -90,7 +90,11 @@ class FiniteTempBasis : public AbstractBasis { this->beta = beta; this->lambda = kernel.lambda_; this->sve_result = std::make_shared(sve_result); - this->weight_func = kernel.template weight_func(S()); + // Convert kernel's 2-arg inv_weight_func to omega-only function + auto kernel_inv_weight_func = kernel.template inv_weight_func(S()); + this->inv_weight_func = [kernel_inv_weight_func, beta](double omega) -> double { + return kernel_inv_weight_func(beta, omega); + }; double wmax = this->lambda / beta; @@ -163,6 +167,93 @@ class FiniteTempBasis : public AbstractBasis { std::make_shared>(uhat_polyvec); } + // Constructor with SVE result and inv_weight_func (no kernel required) + FiniteTempBasis(double beta, double omega_max, double epsilon, + double lambda, int ypower, double conv_radius, + SVEResult sve_result, + std::function inv_weight_func, + int max_size = -1) + { + if (sve_result.s.size() == 0) { + throw std::runtime_error("SVE result sve_result.s is empty"); + } + if (beta <= 0.0) { + throw std::domain_error( + "Inverse temperature beta must be positive"); + } + if (omega_max < 0.0) { + throw std::domain_error( + "Frequency cutoff omega_max must be non-negative"); + } + if (std::fabs(beta * omega_max - lambda) > 1e-10) { + throw std::runtime_error("Product of beta and omega_max must be " + "equal to lambda"); + } + this->beta = beta; + this->lambda = lambda; + this->sve_result = std::make_shared(sve_result); + this->inv_weight_func = inv_weight_func; + + double wmax = this->lambda / beta; + + auto part_result = sve_result.part(epsilon, max_size); + PiecewiseLegendrePolyVector u_ = std::get<0>(part_result); + Eigen::VectorXd s_ = std::get<1>(part_result); + PiecewiseLegendrePolyVector v_ = std::get<2>(part_result); + double sve_result_s0 = sve_result.s(0); + + if (sve_result.s.size() > s_.size()) { + this->accuracy = sve_result.s(s_.size()) / sve_result_s0; + } else { + this->accuracy = sve_result.s(s_.size() - 1) / sve_result_s0; + } + + auto u_knots_ = u_.polyvec[0].knots; + auto v_knots_ = v_.polyvec[0].knots; + + Eigen::VectorXd u_knots = (beta / 2) * (u_knots_.array() + 1); + Eigen::VectorXd v_knots = wmax * v_knots_; + + Eigen::VectorXd deltax4u = (beta / 2) * u_.get_delta_x(); + Eigen::VectorXd deltax4v = wmax * v_.get_delta_x(); + std::vector u_symm_vec; + for (std::size_t i = 0; i < u_.size(); ++i) { + u_symm_vec.push_back(u_.polyvec[i].get_symm()); + } + std::vector v_symm_vec; + for (std::size_t i = 0; i < v_.size(); ++i) { + v_symm_vec.push_back(v_.polyvec[i].get_symm()); + } + + Eigen::VectorXi u_symm = + Eigen::Map(u_symm_vec.data(), u_symm_vec.size()); + Eigen::VectorXi v_symm = + Eigen::Map(v_symm_vec.data(), v_symm_vec.size()); + + this->u = std::make_shared>( + std::make_shared(u_, u_knots, deltax4u, u_symm), beta); + this->v = std::make_shared( + v_, v_knots, deltax4v, v_symm); + this->s = + (std::sqrt(beta * wmax / 2) * std::pow(wmax, ypower)) * + s_; + + Eigen::Tensor udata3d = sve_result.u->get_data(); + PiecewiseLegendrePolyVector uhat_base_full = + PiecewiseLegendrePolyVector(sqrt(beta) * udata3d, *sve_result.u); + S statistics = S(); + + this->uhat_full = std::make_shared>( + uhat_base_full, statistics, conv_radius); + + std::vector> uhat_polyvec; + for (int i = 0; i < this->s.size(); ++i) { + uhat_polyvec.push_back(this->uhat_full->operator[](i)); + } + this->uhat = + std::make_shared>(uhat_polyvec); + } + FiniteTempBasis(double beta, double omega_max, double epsilon, int max_size = -1) { @@ -332,10 +423,21 @@ inline void fence_matsubara_sampling(std::vector> &wn, for (const auto &wn_outer : outer_frequencies) { int outer_val = wn_outer.n; + // In SparseIR.jl-v1, ωn_diff is always created as BosonicFreq + // This ensures diff_val is always even (valid for Bosonic) int diff_val = 2 * static_cast(std::round(0.025 * outer_val)); - int wn_diff = MatsubaraFreq(diff_val).n; + + // Handle edge case: if diff_val is 0, set it to 2 (minimum even value for Bosonic) + if (diff_val == 0) { + diff_val = 2; + } + + // Get the n value from BosonicFreq (same as diff_val since it's even) + int wn_diff = MatsubaraFreq(diff_val).n; if (wn.size() >= 20) { + // For Fermionic: wn_outer.n is odd, wn_diff is even, so wn_outer.n ± wn_diff is odd (valid) + // For Bosonic: wn_outer.n is even, wn_diff is even, so wn_outer.n ± wn_diff is even (valid) wn.push_back( MatsubaraFreq(wn_outer.n - sign(wn_outer) * wn_diff)); } diff --git a/backend/cxx/include/sparseir/dlr.hpp b/backend/cxx/include/sparseir/dlr.hpp index 50437657..04482671 100644 --- a/backend/cxx/include/sparseir/dlr.hpp +++ b/backend/cxx/include/sparseir/dlr.hpp @@ -7,6 +7,7 @@ #include #include "sparseir/funcs.hpp" +#include "sparseir/basis.hpp" namespace sparseir { @@ -16,24 +17,24 @@ class MatsubaraPoles { double beta; Eigen::VectorXd poles; double wmax; - std::vector weights; - std::function weight_func; + std::vector inv_weights; + std::function inv_weight_func; - MatsubaraPoles(double beta, const Eigen::VectorXd &poles, double wmax, std::function weight_func) - : beta(beta), poles(poles), wmax(wmax), weights(poles.size(), 1.0), weight_func(weight_func) + MatsubaraPoles(double beta, const Eigen::VectorXd &poles, double wmax, std::function inv_weight_func) + : beta(beta), poles(poles), wmax(wmax), inv_weights(poles.size(), 1.0), inv_weight_func(inv_weight_func) { - if (!weight_func) { - throw std::runtime_error("weight_func is nullptr in MatsubaraPoles constructor"); + if (!inv_weight_func) { + throw std::runtime_error("inv_weight_func is nullptr in MatsubaraPoles constructor"); } for (Eigen::Index i = 0; i < poles.size(); ++i) { - weights[i] = weight_func(beta, poles(i)); + inv_weights[i] = inv_weight_func(poles(i)); } } template MatsubaraPoles(double beta, const Eigen::VectorXd &poles, double wmax, typename std::enable_if::value>::type* = nullptr) - : beta(beta), poles(poles), wmax(wmax), weights(poles.size(), 1.0) + : beta(beta), poles(poles), wmax(wmax), inv_weights(poles.size(), 1.0) { } @@ -63,8 +64,8 @@ class MatsubaraPoles { Eigen::VectorXcd result(poles.size()); for (Eigen::Index i = 0; i < poles.size(); ++i) { // double y = poles(i) / wmax; - double weight = weights[i]; - result(i) = 1.0 / ((n.valueim(beta) - poles(i)) * weight); + double inv_weight = inv_weights[i]; + result(i) = inv_weight / (n.valueim(beta) - poles(i)); } return result; } @@ -97,16 +98,16 @@ class MatsubaraPoles { // Prepare data for the new MatsubaraPoles Eigen::VectorXd new_poles(indices.size()); - std::vector new_weights(indices.size()); + std::vector new_inv_weights(indices.size()); - // Copy selected poles and weights + // Copy selected poles and inv_weights for (size_t i = 0; i < indices.size(); ++i) { new_poles(i) = poles(indices[i]); - new_weights[i] = weights[indices[i]]; + new_inv_weights[i] = inv_weights[indices[i]]; } - // Create new MatsubaraPoles with selected poles and the same weight_func - return std::make_shared(beta, new_poles, wmax, weight_func); + // Create new MatsubaraPoles with selected poles and the same inv_weight_func + return std::make_shared(beta, new_poles, wmax, inv_weight_func); } }; @@ -117,16 +118,16 @@ class DLRBasisFunctions { double beta; Eigen::VectorXd poles; double wmax; - Eigen::VectorXd weights; + Eigen::VectorXd inv_weights; // inverse weights (numerically stable) - DLRBasisFunctions(double beta, const Eigen::VectorXd &poles, double wmax, const Eigen::VectorXd &weights) - : beta(beta), poles(poles), wmax(wmax), weights(weights) + DLRBasisFunctions(double beta, const Eigen::VectorXd &poles, double wmax, const Eigen::VectorXd &inv_weights) + : beta(beta), poles(poles), wmax(wmax), inv_weights(inv_weights) { } template DLRBasisFunctions(double beta, const Eigen::VectorXd &poles, double wmax, typename std::enable_if::value>::type* = nullptr) - : beta(beta), poles(poles), wmax(wmax), weights(Eigen::VectorXd::Ones(poles.size())) + : beta(beta), poles(poles), wmax(wmax), inv_weights(Eigen::VectorXd::Ones(poles.size())) { } @@ -140,7 +141,7 @@ class DLRBasisFunctions { auto kernel = LogisticKernel(beta * wmax); // k(x, y) double x = 2 * tau / beta - 1.0; double y = poles(i) / wmax; - result(i) = - kernel.compute(x, y) / weights(i); + result(i) = - kernel.compute(x, y) * inv_weights(i); } return result; } @@ -156,7 +157,7 @@ class DLRBasisFunctions { double x = 2.0 * tau / beta - 1.0; double y = poles(i) / wmax; double k_tau_omega = kernel.compute(x, y); - result(i) = - k_tau_omega / (y * weights(i)); + result(i) = - k_tau_omega * inv_weights(i) / y; } return result; } @@ -175,7 +176,7 @@ class DLRBasisFunctions { std::size_t size() const { return poles.size(); } DLRBasisFunctions slice(size_t i) const { - return DLRBasisFunctions(beta, poles.segment(i, 1), wmax, weights.segment(i, 1)); + return DLRBasisFunctions(beta, poles.segment(i, 1), wmax, inv_weights.segment(i, 1)); } // Slice method to extract multiple functions by indices @@ -185,16 +186,16 @@ class DLRBasisFunctions { // Prepare data for the new DLRBasisFunctions Eigen::VectorXd new_poles(indices.size()); - Eigen::VectorXd new_weights(indices.size()); + Eigen::VectorXd new_inv_weights(indices.size()); - // Copy selected poles and weights + // Copy selected poles and inv_weights for (size_t i = 0; i < indices.size(); ++i) { new_poles(i) = poles(indices[i]); - new_weights(i) = weights(indices[i]); + new_inv_weights(i) = inv_weights(indices[i]); } // Create new DLRBasisFunctions with selected poles - return std::make_shared(beta, new_poles, wmax, new_weights); + return std::make_shared(beta, new_poles, wmax, new_inv_weights); } int nroots() const { @@ -256,7 +257,7 @@ class DiscreteLehmannRepresentation : public AbstractBasis { std::shared_ptr> uhat; Eigen::MatrixXd fitmat; sparseir::JacobiSVD matrix; - std::function weight_func; + std::function inv_weight_func; Eigen::VectorXd _ir_default_tau_sampling_points; @@ -268,14 +269,14 @@ class DiscreteLehmannRepresentation : public AbstractBasis { wmax(b.get_wmax()), accuracy(b.get_accuracy()), u(nullptr), - uhat(std::make_shared>(b.get_beta(), poles, b.get_wmax(), b.weight_func)), + uhat(std::make_shared>(b.get_beta(), poles, b.get_wmax(), b.inv_weight_func)), _ir_default_tau_sampling_points(b.default_tau_sampling_points()) { - Eigen::VectorXd weights(poles.size()); + Eigen::VectorXd inv_weights(poles.size()); for (int i = 0; i < poles.size(); ++i) { - weights(i) = b.weight_func(beta, poles[i]); + inv_weights(i) = b.inv_weight_func(poles[i]); } - auto base_u_funcs = std::make_shared>(b.get_beta(), poles, b.get_wmax(), weights); + auto base_u_funcs = std::make_shared>(b.get_beta(), poles, b.get_wmax(), inv_weights); this->u = std::make_shared>(base_u_funcs, b.get_beta()); // Fitting matrix from IR @@ -289,7 +290,7 @@ class DiscreteLehmannRepresentation : public AbstractBasis { matrix.compute(fitmat, Eigen::ComputeThinU | Eigen::ComputeThinV); - weight_func = b.weight_func; + inv_weight_func = b.inv_weight_func; wmax = b.get_wmax(); } diff --git a/backend/cxx/include/sparseir/kernel.hpp b/backend/cxx/include/sparseir/kernel.hpp index 2cc74866..22eb5e7f 100644 --- a/backend/cxx/include/sparseir/kernel.hpp +++ b/backend/cxx/include/sparseir/kernel.hpp @@ -51,17 +51,21 @@ class AbstractKernel { } /** - * @brief Return the weight function for given statistics. + * @brief Return the inverse weight function for given statistics. + * + * This is numerically more stable than weight_func, avoiding division by zero. + * For fermions: inv_weight = 1.0 (since weight = 1.0) + * For bosons: inv_weight = weight^{-1}, computed in a stable way. * * @param statistics 'F' for fermions or 'B' for bosons. */ template - std::function weight_func(Fermionic) const { + std::function inv_weight_func(Fermionic) const { return [](T beta, T omega) { (void)beta; (void)omega; return 1.0; }; } template - std::function weight_func(Bosonic) const { + std::function inv_weight_func(Bosonic) const { return [](T beta, T omega) { (void)beta; (void)omega; return 1.0; }; } @@ -325,16 +329,18 @@ class LogisticKernel : public AbstractKernel { double conv_radius() const override { return 40.0 * this->lambda_; } template - std::function weight_func(Fermionic) const + std::function inv_weight_func(Fermionic) const { - return [](T beta , T omega) { (void)beta; (void)omega; return 1.0; }; + return [](T beta, T omega) { (void)beta; (void)omega; return 1.0; }; } template - std::function weight_func(Bosonic) const + std::function inv_weight_func(Bosonic) const { using std::tanh; - return [](T beta, T omega) { return 1.0 / tanh(0.5 * beta * omega); }; + // inv_weight = tanh(0.5 * beta * omega) is numerically stable + // This avoids division by zero when tanh(0.5 * beta * omega) approaches zero + return [](T beta, T omega) { return tanh(0.5 * beta * omega); }; } private: @@ -512,7 +518,7 @@ class RegularizedBoseKernel : public AbstractKernel { double conv_radius() const override { return 40.0 * this->lambda_; } template - std::function weight_func(Fermionic) const + std::function inv_weight_func(Fermionic) const { std::cerr << "RegularizedBoseKernel does not support fermionic functions" @@ -522,12 +528,12 @@ class RegularizedBoseKernel : public AbstractKernel { } template - std::function weight_func(Bosonic) const + std::function inv_weight_func(Bosonic) const { - using std::tanh; + // inv_weight = omega is numerically stable (no division) return [](T beta, T omega) { (void)beta; // Silence unused parameter warning - return static_cast(1.0) / omega; + return omega; }; } diff --git a/backend/cxx/include/sparseir/sparseir.h b/backend/cxx/include/sparseir/sparseir.h index 77a61d27..ddd9c88c 100644 --- a/backend/cxx/include/sparseir/sparseir.h +++ b/backend/cxx/include/sparseir/sparseir.h @@ -145,6 +145,182 @@ spir_kernel *spir_reg_bose_kernel_new(double lambda, int *status); int spir_kernel_domain(const spir_kernel *k, double *xmin, double *xmax, double *ymin, double *ymax); +/** + * @brief Get x-segments for SVE discretization hints from a kernel. + * + * Retrieves the x-segments (discretization points) for singular value expansion + * of the specified kernel. This function is useful for custom kernels that wrap + * standard kernels (like LogisticKernel or RegularizedBoseKernel) and need to + * delegate SVE hints to the wrapped kernel. + * + * @param k Pointer to the kernel object. + * @param epsilon Accuracy target for the basis. + * @param segments Pointer to store segments array. If NULL, only n_segments is set. + * @param n_segments [IN/OUT] Input: ignored when segments is NULL. Output: number of segments. + * @return An integer status code: + * - 0 (SPIR_COMPUTATION_SUCCESS) on success + * - A non-zero error code on failure + * + * @note The function should be called twice: + * 1. First call with segments=NULL: set n_segments to the required array size. + * 2. Second call with segments allocated: fill segments[0..n_segments-1] with values. + */ +int spir_kernel_get_sve_hints_segments_x(const spir_kernel *k, double epsilon, + double *segments, int *n_segments); + +/** + * @brief Get y-segments for SVE discretization hints from a kernel. + * + * Retrieves the y-segments (discretization points) for singular value expansion + * of the specified kernel. This function is useful for custom kernels that wrap + * standard kernels (like LogisticKernel or RegularizedBoseKernel) and need to + * delegate SVE hints to the wrapped kernel. + * + * @param k Pointer to the kernel object. + * @param epsilon Accuracy target for the basis. + * @param segments Pointer to store segments array. If NULL, only n_segments is set. + * @param n_segments [IN/OUT] Input: ignored when segments is NULL. Output: number of segments. + * @return An integer status code: + * - 0 (SPIR_COMPUTATION_SUCCESS) on success + * - A non-zero error code on failure + * + * @note The function should be called twice: + * 1. First call with segments=NULL: set n_segments to the required array size. + * 2. Second call with segments allocated: fill segments[0..n_segments-1] with values. + */ +int spir_kernel_get_sve_hints_segments_y(const spir_kernel *k, double epsilon, + double *segments, int *n_segments); + +/** + * @brief Get the number of singular values hint from a kernel. + * + * Retrieves the suggested number of singular values for singular value expansion + * of the specified kernel. This function is useful for custom kernels that wrap + * standard kernels (like LogisticKernel or RegularizedBoseKernel) and need to + * delegate SVE hints to the wrapped kernel. + * + * @param k Pointer to the kernel object. + * @param epsilon Accuracy target for the basis. + * @param nsvals Pointer to store the number of singular values. + * @return An integer status code: + * - 0 (SPIR_COMPUTATION_SUCCESS) on success + * - A non-zero error code on failure + */ +int spir_kernel_get_sve_hints_nsvals(const spir_kernel *k, double epsilon, int *nsvals); + +/** + * @brief Get the number of Gauss points hint from a kernel. + * + * Retrieves the suggested number of Gauss points for numerical integration + * in singular value expansion of the specified kernel. This function is useful + * for custom kernels that wrap standard kernels (like LogisticKernel or + * RegularizedBoseKernel) and need to delegate SVE hints to the wrapped kernel. + * + * @param k Pointer to the kernel object. + * @param epsilon Accuracy target for the basis. + * @param ngauss Pointer to store the number of Gauss points. + * @return An integer status code: + * - 0 (SPIR_COMPUTATION_SUCCESS) on success + * - A non-zero error code on failure + */ +int spir_kernel_get_sve_hints_ngauss(const spir_kernel *k, double epsilon, int *ngauss); + +/** + * @brief Choose the working type (Twork) based on epsilon value. + * + * This function determines the appropriate working precision type based on the + * target accuracy epsilon. It follows the same logic as SPIR_TWORK_AUTO: + * - Returns SPIR_TWORK_FLOAT64X2 if epsilon < 1e-8 or epsilon is NaN + * - Returns SPIR_TWORK_FLOAT64 otherwise + * + * @param epsilon Target accuracy (must be non-negative, or NaN for auto-selection) + * @return Working type constant: + * - SPIR_TWORK_FLOAT64 (0): Use double precision (64-bit) + * - SPIR_TWORK_FLOAT64X2 (1): Use extended precision (128-bit) + */ +int spir_choose_working_type(double epsilon); + +/** + * @brief Create a spir_funcs object from piecewise Legendre polynomial coefficients. + * + * Constructs a continuous function object from segments and Legendre polynomial + * expansion coefficients. The coefficients are organized per segment, with each + * segment containing nfuncs coefficients (degrees 0 to nfuncs-1). + * + * @param segments Array of segment boundaries (n_segments+1 elements). Must be + * monotonically increasing. + * @param n_segments Number of segments (must be >= 1). + * @param coeffs Array of Legendre coefficients. Layout: contiguous per segment, + * coefficients for segment i are stored at indices [i*nfuncs, (i+1)*nfuncs). + * Each segment has nfuncs coefficients for Legendre degrees 0 to nfuncs-1. + * @param nfuncs Number of basis functions per segment (Legendre polynomial degrees 0 to nfuncs-1). + * @param order Order parameter (currently unused, reserved for future use). + * @param status Pointer to store the status code. + * @return Pointer to the newly created funcs object, or NULL if creation fails. + * + * @note The function creates a single piecewise Legendre polynomial function. + * To create multiple functions, call this function multiple times. + */ +spir_funcs* spir_funcs_from_piecewise_legendre( + const double* segments, int n_segments, + const double* coeffs, int nfuncs, int order, + int* status); + +/** + * @brief Compute piecewise Gauss-Legendre quadrature rule (double precision). + * + * Generates a piecewise Gauss-Legendre quadrature rule with n points per segment. + * The rule is concatenated across all segments, with points and weights properly + * scaled for each segment interval. + * + * @param n Number of Gauss points per segment (must be >= 1). + * @param segments Array of segment boundaries (n_segments + 1 elements). + * Must be monotonically increasing. + * @param n_segments Number of segments (must be >= 1). + * @param x Output array for Gauss points (size n * n_segments). Must be pre-allocated. + * @param w Output array for Gauss weights (size n * n_segments). Must be pre-allocated. + * @param status Pointer to store the status code. + * @return Status code: + * - SPIR_COMPUTATION_SUCCESS (0) on success + * - Non-zero error code on failure + */ +int spir_gauss_legendre_rule_piecewise_double( + int n, + const double* segments, int n_segments, + double* x, double* w, + int* status); + +/** + * @brief Compute piecewise Gauss-Legendre quadrature rule (DDouble precision). + * + * Generates a piecewise Gauss-Legendre quadrature rule with n points per segment, + * computed using extended precision (DDouble). Returns high and low parts separately + * for maximum precision. + * + * @param n Number of Gauss points per segment (must be >= 1). + * @param segments Array of segment boundaries (n_segments + 1 elements). + * Must be monotonically increasing. + * @param n_segments Number of segments (must be >= 1). + * @param x_high Output array for high part of Gauss points (size n * n_segments). + * Must be pre-allocated. + * @param x_low Output array for low part of Gauss points (size n * n_segments). + * Must be pre-allocated. + * @param w_high Output array for high part of Gauss weights (size n * n_segments). + * Must be pre-allocated. + * @param w_low Output array for low part of Gauss weights (size n * n_segments). + * Must be pre-allocated. + * @param status Pointer to store the status code. + * @return Status code: + * - SPIR_COMPUTATION_SUCCESS (0) on success + * - Non-zero error code on failure + */ +int spir_gauss_legendre_rule_piecewise_ddouble( + int n, + const double* segments, int n_segments, + double* x_high, double* x_low, + double* w_high, double* w_low, + int* status); + /** * @brief Perform truncated singular value expansion (SVE) of a kernel. * @@ -170,8 +346,6 @@ int spir_kernel_domain(const spir_kernel *k, double *xmin, double *xmax, * @param epsilon Accuracy target for the basis. Determines: * - The relative magnitude for truncation of singular values * - The accuracy of computed singular values and vectors - * @param cutoff Cutoff value for singular values. Set to -1 to use default value, i.e., 2 * √ε, - * where ε is the machine epsilon of the working type. * @param lmax Maximum number of Legendre polynomials to use * @param n_gauss Number of Gauss points for numerical integration * @param Twork Working data type for computations (sve). Must be one of: @@ -187,15 +361,17 @@ int spir_kernel_domain(const spir_kernel *k, double *xmin, double *xmax, * adjusted to meet accuracy requirements based on epsilon * - If epsilon is below √ε (where ε is machine epsilon), a warning is * issued and higher precision arithmetic is used if possible. + * - The cutoff is automatically set to 2 * ε (where ε is the machine epsilon + * of the working type) to allow for later truncation with part(). * * @note The returned object must be freed using spir_release_sve_result when no * longer needed * @see spir_release_sve_result + * @see spir_sve_result_truncate */ spir_sve_result *spir_sve_result_new( const spir_kernel *k, double epsilon, - double cutoff, int lmax, int n_gauss, int Twork, @@ -250,6 +426,69 @@ spir_sve_result* spir_sve_result_truncate(const spir_sve_result *sve, double eps */ int spir_sve_result_get_svals(const spir_sve_result *sve, double *svals); +/** + * @brief Create a SVE result from a discretized kernel matrix. + * + * This function performs singular value expansion (SVE) on a discretized kernel + * matrix K. The matrix K should already be in the appropriate form (no weight + * application needed). The function supports both double and DDouble precision + * based on whether K_low is provided. + * + * @param K_high High part of the kernel matrix (required, size: nx * ny) + * @param K_low Low part of the kernel matrix (optional, nullptr for double precision) + * @param nx Number of rows in the matrix + * @param ny Number of columns in the matrix + * @param order Memory layout (SPIR_ORDER_ROW_MAJOR or SPIR_ORDER_COLUMN_MAJOR) + * @param segments_x X-direction segments (size: n_segments_x + 1) + * @param n_segments_x Number of segments in x direction + * @param segments_y Y-direction segments (size: n_segments_y + 1) + * @param n_segments_y Number of segments in y direction + * @param n_gauss Number of Gauss points per segment + * @param epsilon Target accuracy + * @param status Pointer to store status code + * @return Pointer to SVE result on success, nullptr on failure + */ +spir_sve_result* spir_sve_result_from_matrix( + const double* K_high, const double* K_low, + int nx, int ny, int order, + const double* segments_x, int n_segments_x, + const double* segments_y, int n_segments_y, + int n_gauss, double epsilon, + int* status); + +/** + * @brief Create a SVE result from centrosymmetric discretized kernel matrices. + * + * This function performs singular value expansion (SVE) on centrosymmetric + * discretized kernel matrices K_even and K_odd. The matrices should already be + * in the appropriate form (no weight application needed). The function supports + * both double and DDouble precision based on whether K_low is provided. + * + * @param K_even_high High part of the even kernel matrix (required, size: nx * ny) + * @param K_even_low Low part of the even kernel matrix (optional, nullptr for double precision) + * @param K_odd_high High part of the odd kernel matrix (required, size: nx * ny) + * @param K_odd_low Low part of the odd kernel matrix (optional, nullptr for double precision) + * @param nx Number of rows in each matrix + * @param ny Number of columns in each matrix + * @param order Memory layout (SPIR_ORDER_ROW_MAJOR or SPIR_ORDER_COLUMN_MAJOR) + * @param segments_x X-direction segments (size: n_segments_x + 1) + * @param n_segments_x Number of segments in x direction + * @param segments_y Y-direction segments (size: n_segments_y + 1) + * @param n_segments_y Number of segments in y direction + * @param n_gauss Number of Gauss points per segment + * @param epsilon Target accuracy + * @param status Pointer to store status code + * @return Pointer to SVE result on success, nullptr on failure + */ +spir_sve_result* spir_sve_result_from_matrix_centrosymmetric( + const double* K_even_high, const double* K_even_low, + const double* K_odd_high, const double* K_odd_low, + int nx, int ny, int order, + const double* segments_x, int n_segments_x, + const double* segments_y, int n_segments_y, + int n_gauss, double epsilon, + int* status); + /** * @brief Gets the number of functions in a functions object. * @@ -445,6 +684,34 @@ spir_basis *spir_basis_new(int statistics, double beta, double omega_max, int max_size, int *status); +/** + * @brief Create a basis from SVE result and inv_weight_func. + * + * This function creates a finite temperature basis from an SVE result and + * an inv_weight_func represented as spir_funcs. The inv_weight_func is + * evaluated as a function of omega (frequency), with beta as a fixed parameter. + * + * @param statistics Statistics type (SPIR_STATISTICS_FERMIONIC or SPIR_STATISTICS_BOSONIC) + * @param beta Inverse temperature + * @param omega_max Maximum frequency + * @param epsilon Target accuracy + * @param lambda Kernel parameter (beta * omega_max) + * @param ypower Power with which y coordinate scales (typically 0 or 1) + * @param conv_radius Convergence radius for Matsubara basis asymptotic model + * @param sve_result SVE result + * @param inv_weight_funcs spir_funcs representing inv_weight_func(omega) (omega-only function) + * @param max_size Maximum number of basis functions (-1 for no limit) + * @param status Pointer to store status code + * @return Pointer to basis on success, nullptr on failure + */ +spir_basis* spir_basis_new_from_sve_and_inv_weight( + int statistics, double beta, double omega_max, double epsilon, + double lambda, int ypower, double conv_radius, + const spir_sve_result *sve, + const spir_funcs *inv_weight_funcs, + int max_size, + int *status); + /** * @brief Gets the size (number of basis functions) of a finite temperature * basis. @@ -564,6 +831,29 @@ spir_funcs *spir_basis_get_v(const spir_basis *b, int *status); */ spir_funcs *spir_basis_get_uhat(const spir_basis *b, int *status); +/** + * @brief Gets the full (untruncated) Matsubara-frequency basis functions. + * + * This function returns an object representing all basis functions + * in the Matsubara-frequency domain, including those beyond the truncation + * threshold. Unlike `spir_basis_get_uhat`, which returns only the truncated + * basis functions (up to `basis.size()`), this function returns all basis + * functions from the SVE result (up to `sve_result.s.size()`). + * + * @param b Pointer to the finite temperature basis object (must be an IR basis) + * @param status Pointer to store the status code + * @return Pointer to the basis functions object, or NULL if creation fails + * + * @note The returned object must be freed using spir_funcs_release + * when no longer needed + * @note This function is only available for IR basis objects (not DLR) + * @note uhat_full.size() >= uhat.size() is always true + * @note The first uhat.size() functions in uhat_full are identical to uhat + * @see spir_basis_get_uhat + * @see spir_funcs_release + */ +spir_funcs *spir_basis_get_uhat_full(const spir_basis *b, int *status); + /** * @brief Gets the number of default tau sampling points for an IR basis. * @@ -751,13 +1041,42 @@ int spir_basis_get_n_default_matsus_ext(const spir_basis *b, bool positive_only, * * @param b Pointer to a finite temperature basis object (must be an IR basis) * @param positive_only If true, only positive frequencies are used - * @param n_points Number of requested sampling points. - * @param points Pre-allocated array to store the sampling points. The size of the array must be at least n_points. - * @param n_points_returned Number of sampling points returned. + * @param mitigate If true, enable mitigation (fencing) to improve conditioning by adding oversampling points + * @param n_points Number of requested sampling points (default: basis_size). When positive_only=true, this represents the total number of frequencies (both positive and negative), and the returned number of points will be approximately n_points/2 (positive frequencies only). + * @param points Pre-allocated array to store the sampling points. The size of the array must be sufficient for the returned points (may exceed n_points if mitigate is true). + * @param n_points_returned Pointer to store the number of sampling points returned (may exceed n_points if mitigate is true, or approximately n_points/2 when positive_only=true). + * @return An integer status code: + * - 0 (SPIR_COMPUTATION_SUCCESS) on success + * - A non-zero error code on failure + * + * @note This function is only available for IR basis objects + * @note When mitigate is true, the returned number of points may exceed n_points due to fencing + */ +int spir_basis_get_default_matsus_ext(const spir_basis *b, bool positive_only, bool mitigate, int n_points, int64_t *points, int *n_points_returned); + +/** + * @brief Gets the default Matsubara sampling points from a Matsubara-space spir_funcs. + * + * This function computes default sampling points in Matsubara frequencies (iωn) from + * a spir_funcs object that represents Matsubara-space basis functions (e.g., uhat or uhat_full). + * The statistics type (Fermionic/Bosonic) is automatically detected from the spir_funcs object type. + * + * @param uhat Pointer to a spir_funcs object representing Matsubara-space basis functions + * @param L Number of requested sampling points + * @param positive_only If true, only positive frequencies are used + * @param mitigate If true, enable mitigation (fencing) to improve conditioning by adding oversampling points + * @param points Pre-allocated array to store the sampling points. The size of the array must be sufficient for the returned points (may exceed L if mitigate is true). + * @param n_points_returned Pointer to store the number of sampling points returned (may exceed L if mitigate is true, or approximately L/2 when positive_only=true). * @return An integer status code: * - 0 (SPIR_COMPUTATION_SUCCESS) on success + * - A non-zero error code on failure + * + * @note This function is only available for spir_funcs objects representing Matsubara-space basis functions + * @note The statistics type is automatically detected from the spir_funcs object type + * @note The default sampling points are chosen to provide near-optimal conditioning + * @see spir_basis_get_default_matsus_ext */ -int spir_basis_get_default_matsus_ext(const spir_basis *b, bool positive_only, int n_points, int64_t *points, int *n_points_returned); +int spir_uhat_get_default_matsus(const spir_funcs *uhat, int L, bool positive_only, bool mitigate, int64_t *points, int *n_points_returned); /** * @brief Creates a new Discrete Lehmann Representation (DLR) basis. diff --git a/backend/cxx/src/cinterface.cpp b/backend/cxx/src/cinterface.cpp index 1fa0d3cd..cd2a7e97 100644 --- a/backend/cxx/src/cinterface.cpp +++ b/backend/cxx/src/cinterface.cpp @@ -5,6 +5,7 @@ #include #include #include +#include inline void DEBUG_LOG(const std::string& msg) { const char* env = std::getenv("SPARSEIR_DEBUG"); @@ -105,7 +106,367 @@ int spir_kernel_domain(const spir_kernel *k, double *xmin, double *xmax, DEBUG_LOG("Exception in spir_kernel_domain: " + std::string(e.what())); return SPIR_INTERNAL_ERROR; } catch (...) { - DEBUG_LOG("Unknown exception in spir_kernel_domain"); + return SPIR_INTERNAL_ERROR; + } +} + +int spir_kernel_get_sve_hints_segments_x(const spir_kernel *k, double epsilon, + double *segments, int *n_segments) +{ + try { + if (!n_segments) { + DEBUG_LOG("n_segments is nullptr"); + return SPIR_INVALID_ARGUMENT; + } + + std::shared_ptr impl = get_impl_kernel(k); + if (!impl) { + DEBUG_LOG("Failed to get kernel implementation"); + return SPIR_GET_IMPL_FAILED; + } + + auto hints = sparseir::sve_hints(impl, epsilon); + auto segs = hints->segments_x(); + + if (segments == nullptr) { + // First call: return the number of segments + *n_segments = static_cast(segs.size()); + return SPIR_COMPUTATION_SUCCESS; + } + + // Second call: copy segments to output array + if (*n_segments < static_cast(segs.size())) { + DEBUG_LOG("segments array is too small"); + return SPIR_INVALID_ARGUMENT; + } + + for (size_t i = 0; i < segs.size(); ++i) { + segments[i] = static_cast(segs[i]); + } + *n_segments = static_cast(segs.size()); + return SPIR_COMPUTATION_SUCCESS; + } catch (const std::exception &e) { + DEBUG_LOG("Exception in spir_kernel_get_sve_hints_segments_x: " + std::string(e.what())); + return SPIR_INTERNAL_ERROR; + } catch (...) { + DEBUG_LOG("Unknown exception in spir_kernel_get_sve_hints_segments_x"); + return SPIR_INTERNAL_ERROR; + } +} + +int spir_kernel_get_sve_hints_segments_y(const spir_kernel *k, double epsilon, + double *segments, int *n_segments) +{ + try { + if (!n_segments) { + DEBUG_LOG("n_segments is nullptr"); + return SPIR_INVALID_ARGUMENT; + } + + std::shared_ptr impl = get_impl_kernel(k); + if (!impl) { + DEBUG_LOG("Failed to get kernel implementation"); + return SPIR_GET_IMPL_FAILED; + } + + auto hints = sparseir::sve_hints(impl, epsilon); + auto segs = hints->segments_y(); + + if (segments == nullptr) { + // First call: return the number of segments + *n_segments = static_cast(segs.size()); + return SPIR_COMPUTATION_SUCCESS; + } + + // Second call: copy segments to output array + if (*n_segments < static_cast(segs.size())) { + DEBUG_LOG("segments array is too small"); + return SPIR_INVALID_ARGUMENT; + } + + for (size_t i = 0; i < segs.size(); ++i) { + segments[i] = static_cast(segs[i]); + } + *n_segments = static_cast(segs.size()); + return SPIR_COMPUTATION_SUCCESS; + } catch (const std::exception &e) { + DEBUG_LOG("Exception in spir_kernel_get_sve_hints_segments_y: " + std::string(e.what())); + return SPIR_INTERNAL_ERROR; + } catch (...) { + DEBUG_LOG("Unknown exception in spir_kernel_get_sve_hints_segments_y"); + return SPIR_INTERNAL_ERROR; + } +} + +int spir_kernel_get_sve_hints_nsvals(const spir_kernel *k, double epsilon, int *nsvals) +{ + try { + if (!nsvals) { + DEBUG_LOG("nsvals is nullptr"); + return SPIR_INVALID_ARGUMENT; + } + + std::shared_ptr impl = get_impl_kernel(k); + if (!impl) { + DEBUG_LOG("Failed to get kernel implementation"); + return SPIR_GET_IMPL_FAILED; + } + + auto hints = sparseir::sve_hints(impl, epsilon); + *nsvals = hints->nsvals(); + return SPIR_COMPUTATION_SUCCESS; + } catch (const std::exception &e) { + DEBUG_LOG("Exception in spir_kernel_get_sve_hints_nsvals: " + std::string(e.what())); + return SPIR_INTERNAL_ERROR; + } catch (...) { + DEBUG_LOG("Unknown exception in spir_kernel_get_sve_hints_nsvals"); + return SPIR_INTERNAL_ERROR; + } +} + +int spir_kernel_get_sve_hints_ngauss(const spir_kernel *k, double epsilon, int *ngauss) +{ + try { + if (!ngauss) { + DEBUG_LOG("ngauss is nullptr"); + return SPIR_INVALID_ARGUMENT; + } + + std::shared_ptr impl = get_impl_kernel(k); + if (!impl) { + DEBUG_LOG("Failed to get kernel implementation"); + return SPIR_GET_IMPL_FAILED; + } + + auto hints = sparseir::sve_hints(impl, epsilon); + *ngauss = hints->ngauss(); + return SPIR_COMPUTATION_SUCCESS; + } catch (const std::exception &e) { + DEBUG_LOG("Exception in spir_kernel_get_sve_hints_ngauss: " + std::string(e.what())); + return SPIR_INTERNAL_ERROR; + } catch (...) { + DEBUG_LOG("Unknown exception in spir_kernel_get_sve_hints_ngauss"); + return SPIR_INTERNAL_ERROR; + } +} + +int spir_choose_working_type(double epsilon) +{ + try { + if (std::isnan(epsilon) || epsilon < 1e-8) { + return SPIR_TWORK_FLOAT64X2; + } else { + return SPIR_TWORK_FLOAT64; + } + } catch (const std::exception &e) { + DEBUG_LOG("Exception in spir_choose_working_type: " + std::string(e.what())); + return SPIR_TWORK_FLOAT64; // Default to FLOAT64 on error + } catch (...) { + DEBUG_LOG("Unknown exception in spir_choose_working_type"); + return SPIR_TWORK_FLOAT64; // Default to FLOAT64 on error + } +} + +spir_funcs* spir_funcs_from_piecewise_legendre( + const double* segments, int n_segments, + const double* coeffs, int nfuncs, int order, + int* status) +{ + try { + if (!segments || !coeffs || !status) { + if (status) *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + + if (n_segments < 1) { + DEBUG_LOG("n_segments must be >= 1"); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + + if (nfuncs < 1) { + DEBUG_LOG("nfuncs must be >= 1"); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + + // Create knots vector from segments + Eigen::VectorXd knots(n_segments + 1); + for (int i = 0; i <= n_segments; ++i) { + knots(i) = segments[i]; + } + + // Verify segments are monotonically increasing + for (int i = 1; i <= n_segments; ++i) { + if (knots(i) <= knots(i-1)) { + DEBUG_LOG("segments must be monotonically increasing"); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + } + + // Create coefficient matrix: data is (nfuncs, n_segments) + // Each column represents one segment's coefficients + Eigen::MatrixXd data(nfuncs, n_segments); + for (int seg = 0; seg < n_segments; ++seg) { + for (int deg = 0; deg < nfuncs; ++deg) { + // Layout: coeffs[seg * nfuncs + deg] + data(deg, seg) = coeffs[seg * nfuncs + deg]; + } + } + + // Create PiecewiseLegendrePoly (l=-1 means not specified) + sparseir::PiecewiseLegendrePoly poly(data, knots, -1); + + // Create PiecewiseLegendrePolyVector (single function) + std::vector polyvec = {poly}; + auto polyvec_ptr = std::make_shared(polyvec); + + // Wrap in PiecewiseLegendrePolyFunctions + auto funcs_impl = std::make_shared(polyvec_ptr); + + // Create spir_funcs + *status = SPIR_COMPUTATION_SUCCESS; + return create_funcs(_safe_static_pointer_cast(funcs_impl)); + } catch (const std::exception &e) { + DEBUG_LOG("Exception in spir_funcs_from_piecewise_legendre: " + std::string(e.what())); + if (status) *status = SPIR_INTERNAL_ERROR; + return nullptr; + } catch (...) { + DEBUG_LOG("Unknown exception in spir_funcs_from_piecewise_legendre"); + if (status) *status = SPIR_INTERNAL_ERROR; + return nullptr; + } +} + +int spir_gauss_legendre_rule_piecewise_double( + int n, + const double* segments, int n_segments, + double* x, double* w, + int* status) +{ + try { + if (!segments || !x || !w || !status) { + if (status) *status = SPIR_INVALID_ARGUMENT; + return SPIR_INVALID_ARGUMENT; + } + + if (n < 1) { + DEBUG_LOG("n must be >= 1"); + *status = SPIR_INVALID_ARGUMENT; + return SPIR_INVALID_ARGUMENT; + } + + if (n_segments < 1) { + DEBUG_LOG("n_segments must be >= 1"); + *status = SPIR_INVALID_ARGUMENT; + return SPIR_INVALID_ARGUMENT; + } + + // Convert segments to vector + std::vector segs_vec(segments, segments + n_segments + 1); + + // Verify segments are monotonically increasing + for (int i = 1; i <= n_segments; ++i) { + if (segs_vec[i] <= segs_vec[i-1]) { + DEBUG_LOG("segments must be monotonically increasing"); + *status = SPIR_INVALID_ARGUMENT; + return SPIR_INVALID_ARGUMENT; + } + } + + // Generate base rule with DDouble precision, then convert to double + auto rule_dd = sparseir::legendre(n); + auto rule = sparseir::convert_rule(rule_dd); + + // Create piecewise rule + auto piecewise_rule = rule.piecewise(segs_vec); + + // Copy to output arrays + for (int i = 0; i < piecewise_rule.x.size(); ++i) { + x[i] = piecewise_rule.x(i); + w[i] = piecewise_rule.w(i); + } + + *status = SPIR_COMPUTATION_SUCCESS; + return SPIR_COMPUTATION_SUCCESS; + } catch (const std::exception &e) { + DEBUG_LOG("Exception in spir_gauss_legendre_rule_piecewise_double: " + std::string(e.what())); + if (status) *status = SPIR_INTERNAL_ERROR; + return SPIR_INTERNAL_ERROR; + } catch (...) { + DEBUG_LOG("Unknown exception in spir_gauss_legendre_rule_piecewise_double"); + if (status) *status = SPIR_INTERNAL_ERROR; + return SPIR_INTERNAL_ERROR; + } +} + +int spir_gauss_legendre_rule_piecewise_ddouble( + int n, + const double* segments, int n_segments, + double* x_high, double* x_low, + double* w_high, double* w_low, + int* status) +{ + try { + if (!segments || !x_high || !x_low || !w_high || !w_low || !status) { + if (status) *status = SPIR_INVALID_ARGUMENT; + return SPIR_INVALID_ARGUMENT; + } + + if (n < 1) { + DEBUG_LOG("n must be >= 1"); + *status = SPIR_INVALID_ARGUMENT; + return SPIR_INVALID_ARGUMENT; + } + + if (n_segments < 1) { + DEBUG_LOG("n_segments must be >= 1"); + *status = SPIR_INVALID_ARGUMENT; + return SPIR_INVALID_ARGUMENT; + } + + // Convert segments to vector + std::vector segs_vec(segments, segments + n_segments + 1); + + // Verify segments are monotonically increasing + for (int i = 1; i <= n_segments; ++i) { + if (segs_vec[i] <= segs_vec[i-1]) { + DEBUG_LOG("segments must be monotonically increasing"); + *status = SPIR_INVALID_ARGUMENT; + return SPIR_INVALID_ARGUMENT; + } + } + + // Generate base rule with DDouble precision + auto rule_dd = sparseir::legendre(n); + + // Convert segments to DDouble + std::vector segs_dd(segs_vec.size()); + for (size_t i = 0; i < segs_vec.size(); ++i) { + segs_dd[i] = xprec::DDouble(segs_vec[i]); + } + + // Create piecewise rule + auto piecewise_rule = rule_dd.piecewise(segs_dd); + + // Extract high and low parts + for (int i = 0; i < piecewise_rule.x.size(); ++i) { + x_high[i] = piecewise_rule.x(i).hi(); + x_low[i] = piecewise_rule.x(i).lo(); + w_high[i] = piecewise_rule.w(i).hi(); + w_low[i] = piecewise_rule.w(i).lo(); + } + + *status = SPIR_COMPUTATION_SUCCESS; + return SPIR_COMPUTATION_SUCCESS; + } catch (const std::exception &e) { + DEBUG_LOG("Exception in spir_gauss_legendre_rule_piecewise_ddouble: " + std::string(e.what())); + if (status) *status = SPIR_INTERNAL_ERROR; + return SPIR_INTERNAL_ERROR; + } catch (...) { + DEBUG_LOG("Unknown exception in spir_gauss_legendre_rule_piecewise_ddouble"); + if (status) *status = SPIR_INTERNAL_ERROR; return SPIR_INTERNAL_ERROR; } } @@ -113,7 +474,6 @@ int spir_kernel_domain(const spir_kernel *k, double *xmin, double *xmax, spir_sve_result* spir_sve_result_new( const spir_kernel *k, double epsilon, - double cutoff, int lmax, int n_gauss, int Twork, @@ -121,58 +481,920 @@ spir_sve_result* spir_sve_result_new( ) { try { - std::shared_ptr impl = get_impl_kernel(k); - if (!impl) { - DEBUG_LOG("Failed to get a sve result implementation"); + std::shared_ptr impl = get_impl_kernel(k); + if (!impl) { + DEBUG_LOG("Failed to get a sve result implementation"); + *status = SPIR_GET_IMPL_FAILED; + return nullptr; + } + + // Set cutoff to NaN to use default value (2 * eps) internally + double cutoff = std::numeric_limits::quiet_NaN(); + + if (lmax < 0) { + lmax = std::numeric_limits::max(); + } + + sparseir::TworkType Twork_type; + if (Twork == SPIR_TWORK_FLOAT64) { + Twork_type = sparseir::TworkType::FLOAT64; + } else if (Twork == SPIR_TWORK_FLOAT64X2) { + Twork_type = sparseir::TworkType::FLOAT64X2; + } else if (Twork == SPIR_TWORK_AUTO) { + Twork_type = sparseir::TworkType::AUTO; + } else { + DEBUG_LOG("Error: Invalid Twork"); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + + std::shared_ptr sve_result; + + if (auto logistic = std::dynamic_pointer_cast(impl)) { + sve_result = std::make_shared(sparseir::compute_sve( + *logistic, epsilon, cutoff, lmax, n_gauss, Twork_type + )); + } else if (auto bose = std::dynamic_pointer_cast(impl)) { + sve_result = std::make_shared(sparseir::compute_sve( + *bose, epsilon, cutoff, lmax, n_gauss, Twork_type + )); + } else { + DEBUG_LOG("Unknown kernel type"); + *status = SPIR_INTERNAL_ERROR; + return nullptr; + } + + *status = SPIR_COMPUTATION_SUCCESS; + return create_sve_result(sve_result); + } catch (const std::exception &e) { + DEBUG_LOG("Exception in spir_sve_result_new: " + std::string(e.what())); + *status = SPIR_INTERNAL_ERROR; + return nullptr; + } catch (...) { + DEBUG_LOG("Unknown exception in spir_sve_result_new"); + *status = SPIR_INTERNAL_ERROR; + return nullptr; + } +} + +spir_sve_result* spir_sve_result_from_matrix( + const double* K_high, const double* K_low, + int nx, int ny, int order, + const double* segments_x, int n_segments_x, + const double* segments_y, int n_segments_y, + int n_gauss, double epsilon, + int* status) +{ + try { + // Input validation + if (!K_high || !segments_x || !segments_y || !status) { + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + + if (nx < 1 || ny < 1 || n_segments_x < 1 || n_segments_y < 1 || n_gauss < 1) { + DEBUG_LOG("Invalid dimensions or parameters"); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + + // Verify segments are monotonically increasing + for (int i = 1; i <= n_segments_x; ++i) { + if (segments_x[i] <= segments_x[i-1]) { + DEBUG_LOG("segments_x must be monotonically increasing"); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + } + for (int i = 1; i <= n_segments_y; ++i) { + if (segments_y[i] <= segments_y[i-1]) { + DEBUG_LOG("segments_y must be monotonically increasing"); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + } + + // Determine if using DDouble precision + bool use_ddouble = (K_low != nullptr); + + // Convert segments to vectors + std::vector segs_x_vec(segments_x, segments_x + n_segments_x + 1); + std::vector segs_y_vec(segments_y, segments_y + n_segments_y + 1); + + // Reconstruct Gauss rules + auto rule_base_dd = sparseir::legendre(n_gauss); + + if (use_ddouble) { + // DDouble precision + using T = xprec::DDouble; + + // Convert segments to DDouble + std::vector segs_x_dd(segs_x_vec.size()); + std::vector segs_y_dd(segs_y_vec.size()); + for (size_t i = 0; i < segs_x_vec.size(); ++i) { + segs_x_dd[i] = T(segs_x_vec[i]); + } + for (size_t i = 0; i < segs_y_vec.size(); ++i) { + segs_y_dd[i] = T(segs_y_vec[i]); + } + + auto rule = sparseir::convert_rule(rule_base_dd); + auto gauss_x = rule.piecewise(segs_x_dd); + auto gauss_y = rule.piecewise(segs_y_dd); + + // Convert input matrix K to Eigen::MatrixX + Eigen::MatrixX K(nx, ny); + if (order == SPIR_ORDER_ROW_MAJOR) { + for (int i = 0; i < nx; ++i) { + for (int j = 0; j < ny; ++j) { + int idx = i * ny + j; + K(i, j) = xprec::DDouble(K_high[idx], K_low[idx]); + } + } + } else { // COLUMN_MAJOR + for (int j = 0; j < ny; ++j) { + for (int i = 0; i < nx; ++i) { + int idx = j * nx + i; + K(i, j) = xprec::DDouble(K_high[idx], K_low[idx]); + } + } + } + + // Compute SVD + auto svd_result = sparseir::compute_svd(K); + auto u = std::get<0>(svd_result); + auto s_ = std::get<1>(svd_result); + auto v = std::get<2>(svd_result); + + // Postprocess similar to SamplingSVE::postprocess + Eigen::VectorXd s = s_.template cast(); + Eigen::VectorX gauss_x_w = Eigen::VectorX::Map(gauss_x.w.data(), gauss_x.w.size()); + Eigen::VectorX gauss_y_w = Eigen::VectorX::Map(gauss_y.w.data(), gauss_y.w.size()); + + // Normalize u and v by weights + Eigen::MatrixX u_x_ = u; + for (int i = 0; i < u_x_.rows(); ++i) { + for (int j = 0; j < u_x_.cols(); ++j) { + u_x_(i, j) = u(i, j) / sparseir::sqrt_impl(gauss_x_w[i]); + } + } + + Eigen::MatrixX v_y_ = v; + for (int i = 0; i < v_y_.rows(); ++i) { + for (int j = 0; j < v_y_.cols(); ++j) { + v_y_(i, j) = v(i, j) / sparseir::sqrt_impl(gauss_y_w[i]); + } + } + + // Reshape to Tensor + Eigen::Tensor u_x(n_gauss, n_segments_x, s.size()); + Eigen::Tensor v_y(n_gauss, n_segments_y, s.size()); + + for (int i = 0; i < u_x.dimension(0); ++i) { + for (int j = 0; j < u_x.dimension(1); ++j) { + for (int k = 0; k < u_x.dimension(2); ++k) { + u_x(i, j, k) = u_x_(j * n_gauss + i, k); + } + } + } + + for (int i = 0; i < v_y.dimension(0); ++i) { + for (int j = 0; j < v_y.dimension(1); ++j) { + for (int k = 0; k < v_y.dimension(2); ++k) { + v_y(i, j, k) = v_y_(j * n_gauss + i, k); + } + } + } + + // Apply Legendre collocation + Eigen::MatrixX cmat = sparseir::legendre_collocation(rule); + Eigen::Tensor u_data(cmat.rows(), n_segments_x, s.size()); + Eigen::Tensor v_data(cmat.rows(), n_segments_y, s.size()); + + for (int j = 0; j < u_data.dimension(1); ++j) { + for (int k = 0; k < u_data.dimension(2); ++k) { + for (int i = 0; i < u_data.dimension(0); ++i) { + u_data(i, j, k) = T(0); + for (int l = 0; l < cmat.cols(); ++l) { + u_data(i, j, k) += cmat(i, l) * u_x(l, j, k); + } + } + } + } + + for (int j = 0; j < v_data.dimension(1); ++j) { + for (int k = 0; k < v_data.dimension(2); ++k) { + for (int i = 0; i < v_data.dimension(0); ++i) { + v_data(i, j, k) = T(0); + for (int l = 0; l < cmat.cols(); ++l) { + v_data(i, j, k) += cmat(i, l) * v_y(l, j, k); + } + } + } + } + + // Apply segment scaling + auto dsegs_x = sparseir::diff(segs_x_dd); + auto dsegs_y = sparseir::diff(segs_y_dd); + + for (int j = 0; j < u_data.dimension(1); ++j) { + for (int i = 0; i < u_data.dimension(0); ++i) { + for (int k = 0; k < u_data.dimension(2); ++k) { + u_data(i, j, k) *= sparseir::sqrt_impl(T(0.5) * dsegs_x[j]); + } + } + } + + for (int j = 0; j < v_data.dimension(1); ++j) { + for (int i = 0; i < v_data.dimension(0); ++i) { + for (int k = 0; k < v_data.dimension(2); ++k) { + v_data(i, j, k) *= sparseir::sqrt_impl(T(0.5) * dsegs_y[j]); + } + } + } + + // Convert to PiecewiseLegendrePoly + std::vector polyvec_u; + std::vector polyvec_v; + Eigen::VectorXd knots_x = Eigen::Map(segs_x_vec.data(), segs_x_vec.size()); + Eigen::VectorXd knots_y = Eigen::Map(segs_y_vec.data(), segs_y_vec.size()); + + for (int i = 0; i < u_data.dimension(2); ++i) { + Eigen::MatrixXd slice_double(u_data.dimension(0), u_data.dimension(1)); + for (int j = 0; j < u_data.dimension(0); ++j) { + for (int k = 0; k < u_data.dimension(1); ++k) { + slice_double(j, k) = static_cast(u_data(j, k, i)); + } + } + polyvec_u.push_back( + sparseir::PiecewiseLegendrePoly(slice_double, knots_x, i, sparseir::diff(knots_x))); + } + + for (int i = 0; i < v_data.dimension(2); ++i) { + Eigen::MatrixXd slice_double(v_data.dimension(0), v_data.dimension(1)); + for (int j = 0; j < v_data.dimension(0); ++j) { + for (int k = 0; k < v_data.dimension(1); ++k) { + slice_double(j, k) = static_cast(v_data(j, k, i)); + } + } + polyvec_v.push_back( + sparseir::PiecewiseLegendrePoly(slice_double, knots_y, i, sparseir::diff(knots_y))); + } + + sparseir::PiecewiseLegendrePolyVector ulx(polyvec_u); + sparseir::PiecewiseLegendrePolyVector vly(polyvec_v); + sparseir::canonicalize(ulx, vly); + + auto sve_result = std::make_shared(ulx, s, vly, epsilon); + *status = SPIR_COMPUTATION_SUCCESS; + return create_sve_result(sve_result); + + } else { + // Double precision + using T = double; + + auto rule = sparseir::convert_rule(rule_base_dd); + auto gauss_x = rule.piecewise(segs_x_vec); + auto gauss_y = rule.piecewise(segs_y_vec); + + // Convert input matrix K to Eigen::MatrixXd + Eigen::MatrixXd K(nx, ny); + if (order == SPIR_ORDER_ROW_MAJOR) { + for (int i = 0; i < nx; ++i) { + for (int j = 0; j < ny; ++j) { + K(i, j) = K_high[i * ny + j]; + } + } + } else { // COLUMN_MAJOR + for (int j = 0; j < ny; ++j) { + for (int i = 0; i < nx; ++i) { + K(i, j) = K_high[j * nx + i]; + } + } + } + + // Compute SVD + auto svd_result = sparseir::compute_svd(K); + auto u = std::get<0>(svd_result); + auto s_ = std::get<1>(svd_result); + auto v = std::get<2>(svd_result); + + // Postprocess similar to SamplingSVE::postprocess + Eigen::VectorXd s = s_; + Eigen::VectorXd gauss_x_w = Eigen::Map(gauss_x.w.data(), gauss_x.w.size()); + Eigen::VectorXd gauss_y_w = Eigen::Map(gauss_y.w.data(), gauss_y.w.size()); + + // Normalize u and v by weights + Eigen::MatrixXd u_x_ = u; + for (int i = 0; i < u_x_.rows(); ++i) { + for (int j = 0; j < u_x_.cols(); ++j) { + u_x_(i, j) = u(i, j) / std::sqrt(gauss_x_w[i]); + } + } + + Eigen::MatrixXd v_y_ = v; + for (int i = 0; i < v_y_.rows(); ++i) { + for (int j = 0; j < v_y_.cols(); ++j) { + v_y_(i, j) = v(i, j) / std::sqrt(gauss_y_w[i]); + } + } + + // Reshape to Tensor + Eigen::Tensor u_x(n_gauss, n_segments_x, s.size()); + Eigen::Tensor v_y(n_gauss, n_segments_y, s.size()); + + for (int i = 0; i < u_x.dimension(0); ++i) { + for (int j = 0; j < u_x.dimension(1); ++j) { + for (int k = 0; k < u_x.dimension(2); ++k) { + u_x(i, j, k) = u_x_(j * n_gauss + i, k); + } + } + } + + for (int i = 0; i < v_y.dimension(0); ++i) { + for (int j = 0; j < v_y.dimension(1); ++j) { + for (int k = 0; k < v_y.dimension(2); ++k) { + v_y(i, j, k) = v_y_(j * n_gauss + i, k); + } + } + } + + // Apply Legendre collocation + Eigen::MatrixXd cmat = sparseir::legendre_collocation(rule); + Eigen::Tensor u_data(cmat.rows(), n_segments_x, s.size()); + Eigen::Tensor v_data(cmat.rows(), n_segments_y, s.size()); + + for (int j = 0; j < u_data.dimension(1); ++j) { + for (int k = 0; k < u_data.dimension(2); ++k) { + for (int i = 0; i < u_data.dimension(0); ++i) { + u_data(i, j, k) = 0.0; + for (int l = 0; l < cmat.cols(); ++l) { + u_data(i, j, k) += cmat(i, l) * u_x(l, j, k); + } + } + } + } + + for (int j = 0; j < v_data.dimension(1); ++j) { + for (int k = 0; k < v_data.dimension(2); ++k) { + for (int i = 0; i < v_data.dimension(0); ++i) { + v_data(i, j, k) = 0.0; + for (int l = 0; l < cmat.cols(); ++l) { + v_data(i, j, k) += cmat(i, l) * v_y(l, j, k); + } + } + } + } + + // Apply segment scaling + std::vector dsegs_x(n_segments_x); + std::vector dsegs_y(n_segments_y); + for (int i = 0; i < n_segments_x; ++i) { + dsegs_x[i] = segs_x_vec[i+1] - segs_x_vec[i]; + } + for (int i = 0; i < n_segments_y; ++i) { + dsegs_y[i] = segs_y_vec[i+1] - segs_y_vec[i]; + } + + for (int j = 0; j < u_data.dimension(1); ++j) { + for (int i = 0; i < u_data.dimension(0); ++i) { + for (int k = 0; k < u_data.dimension(2); ++k) { + u_data(i, j, k) *= std::sqrt(0.5 * dsegs_x[j]); + } + } + } + + for (int j = 0; j < v_data.dimension(1); ++j) { + for (int i = 0; i < v_data.dimension(0); ++i) { + for (int k = 0; k < v_data.dimension(2); ++k) { + v_data(i, j, k) *= std::sqrt(0.5 * dsegs_y[j]); + } + } + } + + // Convert to PiecewiseLegendrePoly + std::vector polyvec_u; + std::vector polyvec_v; + Eigen::VectorXd knots_x = Eigen::Map(segs_x_vec.data(), segs_x_vec.size()); + Eigen::VectorXd knots_y = Eigen::Map(segs_y_vec.data(), segs_y_vec.size()); + + for (int i = 0; i < u_data.dimension(2); ++i) { + Eigen::MatrixXd slice_double(u_data.dimension(0), u_data.dimension(1)); + for (int j = 0; j < u_data.dimension(0); ++j) { + for (int k = 0; k < u_data.dimension(1); ++k) { + slice_double(j, k) = u_data(j, k, i); + } + } + polyvec_u.push_back( + sparseir::PiecewiseLegendrePoly(slice_double, knots_x, i, sparseir::diff(knots_x))); + } + + for (int i = 0; i < v_data.dimension(2); ++i) { + Eigen::MatrixXd slice_double(v_data.dimension(0), v_data.dimension(1)); + for (int j = 0; j < v_data.dimension(0); ++j) { + for (int k = 0; k < v_data.dimension(1); ++k) { + slice_double(j, k) = v_data(j, k, i); + } + } + polyvec_v.push_back( + sparseir::PiecewiseLegendrePoly(slice_double, knots_y, i, sparseir::diff(knots_y))); + } + + sparseir::PiecewiseLegendrePolyVector ulx(polyvec_u); + sparseir::PiecewiseLegendrePolyVector vly(polyvec_v); + sparseir::canonicalize(ulx, vly); + + auto sve_result = std::make_shared(ulx, s, vly, epsilon); + *status = SPIR_COMPUTATION_SUCCESS; + return create_sve_result(sve_result); + } + } catch (const std::exception &e) { + DEBUG_LOG("Exception in spir_sve_result_from_matrix: " + std::string(e.what())); + *status = SPIR_INTERNAL_ERROR; + return nullptr; + } catch (...) { + DEBUG_LOG("Unknown exception in spir_sve_result_from_matrix"); + *status = SPIR_INTERNAL_ERROR; + return nullptr; + } +} + +spir_sve_result* spir_sve_result_from_matrix_centrosymmetric( + const double* K_even_high, const double* K_even_low, + const double* K_odd_high, const double* K_odd_low, + int nx, int ny, int order, + const double* segments_x, int n_segments_x, + const double* segments_y, int n_segments_y, + int n_gauss, double epsilon, + int* status) +{ + try { + // Input validation + if (!K_even_high || !K_odd_high || !segments_x || !segments_y || !status) { + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + + // Validate segments for centrosymmetric kernels: + // segments_x and segments_y must start at 0 and end at positive values + // This is required because even/odd matrices are computed on [0, xmax] x [0, ymax] + const double eps_tol = 1e-12; + + // Check segments_x + if (n_segments_x < 1) { + DEBUG_LOG("Invalid n_segments_x: must be at least 1, got " + std::to_string(n_segments_x)); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + if (std::abs(segments_x[0]) > eps_tol) { + DEBUG_LOG("segments_x must start at 0, but got " + std::to_string(segments_x[0])); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + if (segments_x[n_segments_x] <= 0.0) { + DEBUG_LOG("segments_x must end at a positive value, but got " + std::to_string(segments_x[n_segments_x])); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + + // Check segments_y + if (n_segments_y < 1) { + DEBUG_LOG("Invalid n_segments_y: must be at least 1, got " + std::to_string(n_segments_y)); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + if (std::abs(segments_y[0]) > eps_tol) { + DEBUG_LOG("segments_y must start at 0, but got " + std::to_string(segments_y[0])); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + if (segments_y[n_segments_y] <= 0.0) { + DEBUG_LOG("segments_y must end at a positive value, but got " + std::to_string(segments_y[n_segments_y])); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + + // Verify segments are monotonically increasing (additional check) + for (int i = 1; i <= n_segments_x; ++i) { + if (segments_x[i] <= segments_x[i-1]) { + DEBUG_LOG("segments_x must be monotonically increasing, but segments_x[" + + std::to_string(i-1) + "]=" + std::to_string(segments_x[i-1]) + + " >= segments_x[" + std::to_string(i) + "]=" + std::to_string(segments_x[i])); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + } + for (int i = 1; i <= n_segments_y; ++i) { + if (segments_y[i] <= segments_y[i-1]) { + DEBUG_LOG("segments_y must be monotonically increasing, but segments_y[" + + std::to_string(i-1) + "]=" + std::to_string(segments_y[i-1]) + + " >= segments_y[" + std::to_string(i) + "]=" + std::to_string(segments_y[i])); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + } + + // Debug: Check matrix sizes and non-zero elements + DEBUG_LOG("Input matrices: nx=" + std::to_string(nx) + ", ny=" + std::to_string(ny)); + DEBUG_LOG("n_segments_x=" + std::to_string(n_segments_x) + ", n_segments_y=" + std::to_string(n_segments_y)); + DEBUG_LOG("n_gauss=" + std::to_string(n_gauss)); + if (segments_x && n_segments_x > 0) { + DEBUG_LOG("segments_x[0]=" + std::to_string(segments_x[0]) + + ", segments_x[" + std::to_string(n_segments_x) + "]=" + + std::to_string(segments_x[n_segments_x])); + } + if (segments_y && n_segments_y > 0) { + DEBUG_LOG("segments_y[0]=" + std::to_string(segments_y[0]) + + ", segments_y[" + std::to_string(n_segments_y) + "]=" + + std::to_string(segments_y[n_segments_y])); + } + + // Check if matrices are non-empty + if (nx <= 0 || ny <= 0) { + DEBUG_LOG("Error: Empty matrices: nx=" + std::to_string(nx) + ", ny=" + std::to_string(ny)); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + + // Compute SVE for even and odd matrices separately + int status_even, status_odd; + DEBUG_LOG("Computing SVE for even matrix..."); + auto sve_even = spir_sve_result_from_matrix( + K_even_high, K_even_low, nx, ny, order, + segments_x, n_segments_x, segments_y, n_segments_y, + n_gauss, epsilon, &status_even); + + if (status_even != SPIR_COMPUTATION_SUCCESS || !sve_even) { + DEBUG_LOG("Error: Failed to compute SVE for even matrix: status=" + std::to_string(status_even)); + *status = status_even; + return nullptr; + } + DEBUG_LOG("Successfully computed SVE for even matrix"); + + DEBUG_LOG("Computing SVE for odd matrix..."); + auto sve_odd = spir_sve_result_from_matrix( + K_odd_high, K_odd_low, nx, ny, order, + segments_x, n_segments_x, segments_y, n_segments_y, + n_gauss, epsilon, &status_odd); + + if (status_odd != SPIR_COMPUTATION_SUCCESS || !sve_odd) { + DEBUG_LOG("Error: Failed to compute SVE for odd matrix: status=" + std::to_string(status_odd)); + spir_sve_result_release(sve_even); + *status = status_odd; + return nullptr; + } + DEBUG_LOG("Successfully computed SVE for odd matrix"); + + // Get implementations + auto sve_even_impl = get_impl_sve_result(sve_even); + auto sve_odd_impl = get_impl_sve_result(sve_odd); + + if (!sve_even_impl || !sve_odd_impl) { + DEBUG_LOG("Error: Failed to get SVE result implementations"); + spir_sve_result_release(sve_even); + spir_sve_result_release(sve_odd); *status = SPIR_GET_IMPL_FAILED; return nullptr; } - - if (cutoff < 0) { - cutoff = std::numeric_limits::quiet_NaN(); + + // Debug: Check sizes of even and odd results + DEBUG_LOG("Even SVE result: s.size()=" + std::to_string(sve_even_impl->s.size()) + + ", u->size()=" + std::to_string(sve_even_impl->u->size()) + + ", v->size()=" + std::to_string(sve_even_impl->v->size())); + DEBUG_LOG("Odd SVE result: s.size()=" + std::to_string(sve_odd_impl->s.size()) + + ", u->size()=" + std::to_string(sve_odd_impl->u->size()) + + ", v->size()=" + std::to_string(sve_odd_impl->v->size())); + + // Check if even and odd results are non-empty + if (sve_even_impl->s.size() == 0 || sve_even_impl->u->size() == 0 || sve_even_impl->v->size() == 0) { + DEBUG_LOG("Error: Even SVE result is empty"); + spir_sve_result_release(sve_even); + spir_sve_result_release(sve_odd); + *status = SPIR_INTERNAL_ERROR; + return nullptr; } - - if (lmax < 0) { - lmax = std::numeric_limits::max(); + if (sve_odd_impl->s.size() == 0 || sve_odd_impl->u->size() == 0 || sve_odd_impl->v->size() == 0) { + DEBUG_LOG("Error: Odd SVE result is empty"); + spir_sve_result_release(sve_even); + spir_sve_result_release(sve_odd); + *status = SPIR_INTERNAL_ERROR; + return nullptr; + } + + // Merge even and odd results similar to CentrosymmSVE::postprocess + std::vector u_merged; + u_merged.reserve(sve_even_impl->u->size() + sve_odd_impl->u->size()); + u_merged.insert(u_merged.end(), sve_even_impl->u->begin(), sve_even_impl->u->end()); + u_merged.insert(u_merged.end(), sve_odd_impl->u->begin(), sve_odd_impl->u->end()); + + Eigen::VectorXd s_merged(sve_even_impl->s.size() + sve_odd_impl->s.size()); + s_merged << sve_even_impl->s, sve_odd_impl->s; + + std::vector v_merged; + v_merged.reserve(sve_even_impl->v->size() + sve_odd_impl->v->size()); + v_merged.insert(v_merged.end(), sve_even_impl->v->begin(), sve_even_impl->v->end()); + v_merged.insert(v_merged.end(), sve_odd_impl->v->begin(), sve_odd_impl->v->end()); + + sparseir::PiecewiseLegendrePolyVector _u_complete(u_merged); + sparseir::PiecewiseLegendrePolyVector _v_complete(v_merged); + + Eigen::VectorXi sign_even = Eigen::VectorXi::Ones(sve_even_impl->s.size()); + Eigen::VectorXi sign_odd = -Eigen::VectorXi::Ones(sve_odd_impl->s.size()); + Eigen::VectorXi signs = Eigen::VectorXi::Zero(s_merged.size()); + signs << sign_even, sign_odd; + + // Sort by singular values (descending) + std::vector sorted_indices = sparseir::sortperm_rev(s_merged); + + std::vector u_sorted(sorted_indices.size()); + std::vector v_sorted(sorted_indices.size()); + Eigen::VectorXi signs_sorted(sorted_indices.size()); + Eigen::VectorXd s_sorted(sorted_indices.size()); + + for (size_t i = 0; i < sorted_indices.size(); ++i) { + u_sorted[i] = u_merged[sorted_indices[i]]; + v_sorted[i] = v_merged[sorted_indices[i]]; + s_sorted[i] = s_merged[sorted_indices[i]]; + signs_sorted[i] = signs[sorted_indices[i]]; + } + + // Convert segments to vectors + std::vector segs_x_vec(segments_x, segments_x + n_segments_x + 1); + std::vector segs_y_vec(segments_y, segments_y + n_segments_y + 1); + Eigen::VectorXd segs_x = Eigen::Map(segs_x_vec.data(), segs_x_vec.size()); + Eigen::VectorXd segs_y = Eigen::Map(segs_y_vec.data(), segs_y_vec.size()); + + // Build complete domain polynomials + std::vector u_complete_vec; + std::vector v_complete_vec; + + // Check if we have any singular values + if (u_sorted.empty()) { + spir_sve_result_release(sve_even); + spir_sve_result_release(sve_odd); + *status = SPIR_INTERNAL_ERROR; + DEBUG_LOG("No singular values found in merged SVE result"); + return nullptr; + } + + Eigen::VectorXd poly_flip_x(u_sorted[0].data.rows()); + for (int i = 0; i < u_sorted[0].data.rows(); ++i) { + poly_flip_x(i) = (i % 2 == 0) ? 1.0 : -1.0; + } + + DEBUG_LOG("Processing " + std::to_string(u_sorted.size()) + " sorted singular values"); + + for (size_t i = 0; i < u_sorted.size(); ++i) { + try { + DEBUG_LOG("Processing singular value " + std::to_string(i) + "/" + std::to_string(u_sorted.size())); + Eigen::MatrixXd u_pos_data = u_sorted[i].data / std::sqrt(2); + Eigen::MatrixXd v_pos_data = v_sorted[i].data / std::sqrt(2); + + DEBUG_LOG("u_pos_data size: " + std::to_string(u_pos_data.rows()) + "x" + std::to_string(u_pos_data.cols())); + DEBUG_LOG("v_pos_data size: " + std::to_string(v_pos_data.rows()) + "x" + std::to_string(v_pos_data.cols())); + + // Check dimensions + if (u_pos_data.cols() == 0 || v_pos_data.cols() == 0) { + DEBUG_LOG("Zero columns in u_pos_data or v_pos_data at i=" + std::to_string(i)); + spir_sve_result_release(sve_even); + spir_sve_result_release(sve_odd); + *status = SPIR_INTERNAL_ERROR; + return nullptr; + } + + Eigen::MatrixXd u_neg_data = u_pos_data.rowwise().reverse(); + u_neg_data = u_neg_data.array().colwise() * (poly_flip_x * signs_sorted[i]).array(); + Eigen::MatrixXd v_neg_data = v_pos_data.rowwise().reverse(); + v_neg_data = v_neg_data.array().colwise() * (poly_flip_x * signs_sorted[i]).array(); + + DEBUG_LOG("u_neg_data size: " + std::to_string(u_neg_data.rows()) + "x" + std::to_string(u_neg_data.cols())); + DEBUG_LOG("v_neg_data size: " + std::to_string(v_neg_data.rows()) + "x" + std::to_string(v_neg_data.cols())); + + // The merged data should have the same number of columns as the full domain segments + // u_pos_data already covers the full domain, so we shouldn't double the columns + // Instead, we need to properly extend to negative side + // Check if segments are symmetric + DEBUG_LOG("segs_x.size()=" + std::to_string(segs_x.size()) + ", u_pos_data.cols()=" + std::to_string(u_pos_data.cols())); + if (segs_x.size() != u_pos_data.cols() + 1) { + DEBUG_LOG("Segment size mismatch: segs_x.size()=" + std::to_string(segs_x.size()) + + ", u_pos_data.cols()=" + std::to_string(u_pos_data.cols())); + // Try to construct symmetric segments from positive half + // For now, use the existing data structure + Eigen::MatrixXd u_data = Eigen::MatrixXd::Zero( + u_pos_data.rows(), u_neg_data.cols() + u_pos_data.cols()); + u_data.leftCols(u_neg_data.cols()) = u_neg_data; + u_data.rightCols(u_pos_data.cols()) = u_pos_data; + Eigen::MatrixXd v_data = Eigen::MatrixXd::Zero( + v_pos_data.rows(), v_neg_data.cols() + v_pos_data.cols()); + v_data.leftCols(v_neg_data.cols()) = v_neg_data; + v_data.rightCols(v_pos_data.cols()) = v_pos_data; + + // Construct symmetric segments + Eigen::VectorXd segs_x_symmetric = Eigen::VectorXd::Zero(u_data.cols() + 1); + int half = u_pos_data.cols(); + segs_x_symmetric.head(half) = -segs_x.tail(half).reverse(); + segs_x_symmetric.tail(half + 1) = segs_x.tail(half + 1); + + Eigen::VectorXd segs_y_symmetric = Eigen::VectorXd::Zero(v_data.cols() + 1); + half = v_pos_data.cols(); + segs_y_symmetric.head(half) = -segs_y.tail(half).reverse(); + segs_y_symmetric.tail(half + 1) = segs_y.tail(half + 1); + + Eigen::VectorXd segs_x_diff = segs_x_symmetric.tail(segs_x_symmetric.size() - 1) - segs_x_symmetric.head(segs_x_symmetric.size() - 1); + Eigen::VectorXd segs_y_diff = segs_y_symmetric.tail(segs_y_symmetric.size() - 1) - segs_y_symmetric.head(segs_y_symmetric.size() - 1); + + u_complete_vec.push_back( + sparseir::PiecewiseLegendrePoly(u_data, segs_x_symmetric, static_cast(i), segs_x_diff, signs_sorted[i])); + v_complete_vec.push_back( + sparseir::PiecewiseLegendrePoly(v_data, segs_y_symmetric, static_cast(i), segs_y_diff, signs_sorted[i])); + } else { + // Segments already match, but we need to extend to full domain [-xmax, xmax] + // Current segments are [0, xmax], need to construct full domain + DEBUG_LOG("Segments match, extending to full domain"); + DEBUG_LOG("Current segs_x: [" + std::to_string(segs_x(0)) + ", ..., " + std::to_string(segs_x(segs_x.size()-1)) + "]"); + + // Construct symmetric segments for full domain + int half = u_pos_data.cols(); + Eigen::VectorXd segs_x_full = Eigen::VectorXd::Zero(2 * half + 1); + segs_x_full.head(half) = -segs_x.tail(half).reverse(); + segs_x_full(half) = 0.0; + segs_x_full.tail(half) = segs_x.tail(half); + + int half_y = v_pos_data.cols(); + Eigen::VectorXd segs_y_full = Eigen::VectorXd::Zero(2 * half_y + 1); + segs_y_full.head(half_y) = -segs_y.tail(half_y).reverse(); + segs_y_full(half_y) = 0.0; + segs_y_full.tail(half_y) = segs_y.tail(half_y); + + // Combine u_neg and u_pos data + Eigen::MatrixXd u_data = Eigen::MatrixXd::Zero( + u_pos_data.rows(), u_neg_data.cols() + u_pos_data.cols()); + u_data.leftCols(u_neg_data.cols()) = u_neg_data; + u_data.rightCols(u_pos_data.cols()) = u_pos_data; + Eigen::MatrixXd v_data = Eigen::MatrixXd::Zero( + v_pos_data.rows(), v_neg_data.cols() + v_pos_data.cols()); + v_data.leftCols(v_neg_data.cols()) = v_neg_data; + v_data.rightCols(v_pos_data.cols()) = v_pos_data; + + DEBUG_LOG("u_data size: " + std::to_string(u_data.rows()) + "x" + std::to_string(u_data.cols()) + + ", segs_x_full size: " + std::to_string(segs_x_full.size())); + DEBUG_LOG("v_data size: " + std::to_string(v_data.rows()) + "x" + std::to_string(v_data.cols()) + + ", segs_y_full size: " + std::to_string(segs_y_full.size())); + + Eigen::VectorXd segs_x_diff = segs_x_full.tail(segs_x_full.size() - 1) - segs_x_full.head(segs_x_full.size() - 1); + Eigen::VectorXd segs_y_diff = segs_y_full.tail(segs_y_full.size() - 1) - segs_y_full.head(segs_y_full.size() - 1); + + DEBUG_LOG("Creating PiecewiseLegendrePoly..."); + u_complete_vec.push_back( + sparseir::PiecewiseLegendrePoly(u_data, segs_x_full, static_cast(i), segs_x_diff, signs_sorted[i])); + v_complete_vec.push_back( + sparseir::PiecewiseLegendrePoly(v_data, segs_y_full, static_cast(i), segs_y_diff, signs_sorted[i])); + DEBUG_LOG("Successfully created PiecewiseLegendrePoly for singular value " + std::to_string(i)); + } + DEBUG_LOG("Successfully processed singular value " + std::to_string(i)); + } catch (const std::exception &e) { + DEBUG_LOG("Exception while processing singular value " + std::to_string(i) + ": " + std::string(e.what())); + std::cerr << "[ERROR] Exception while processing singular value " << i << ": " << e.what() << std::endl; + spir_sve_result_release(sve_even); + spir_sve_result_release(sve_odd); + *status = SPIR_INTERNAL_ERROR; + return nullptr; + } } + + sparseir::PiecewiseLegendrePolyVector u_complete(u_complete_vec); + sparseir::PiecewiseLegendrePolyVector v_complete(v_complete_vec); + + auto sve_result = std::make_shared(u_complete, s_sorted, v_complete, epsilon); + + // Clean up temporary results + spir_sve_result_release(sve_even); + spir_sve_result_release(sve_odd); + + *status = SPIR_COMPUTATION_SUCCESS; + return create_sve_result(sve_result); + } catch (const std::exception &e) { + std::string error_msg = "Exception in spir_sve_result_from_matrix_centrosymmetric: " + std::string(e.what()); + DEBUG_LOG(error_msg); + std::cerr << "[ERROR] " << error_msg << std::endl; + *status = SPIR_INTERNAL_ERROR; + return nullptr; + } catch (const std::string &e) { + std::string error_msg = "String exception in spir_sve_result_from_matrix_centrosymmetric: " + e; + DEBUG_LOG(error_msg); + std::cerr << "[ERROR] " << error_msg << std::endl; + *status = SPIR_INTERNAL_ERROR; + return nullptr; + } catch (...) { + std::string error_msg = "Unknown exception in spir_sve_result_from_matrix_centrosymmetric"; + DEBUG_LOG(error_msg); + std::cerr << "[ERROR] " << error_msg << std::endl; + *status = SPIR_INTERNAL_ERROR; + return nullptr; + } +} - sparseir::TworkType Twork_type; - if (Twork == SPIR_TWORK_FLOAT64) { - Twork_type = sparseir::TworkType::FLOAT64; - } else if (Twork == SPIR_TWORK_FLOAT64X2) { - Twork_type = sparseir::TworkType::FLOAT64X2; - } else if (Twork == SPIR_TWORK_AUTO) { - Twork_type = sparseir::TworkType::AUTO; - } else { - DEBUG_LOG("Error: Invalid Twork"); +spir_basis* spir_basis_new_from_sve_and_inv_weight( + int statistics, double beta, double omega_max, double epsilon, + double lambda, int ypower, double conv_radius, + const spir_sve_result *sve, + const spir_funcs *inv_weight_funcs, + int max_size, + int *status) +{ + DEBUG_LOG("spir_basis_new_from_sve_and_inv_weight called"); + DEBUG_LOG(" statistics=" + std::to_string(statistics) + + ", beta=" + std::to_string(beta) + + ", omega_max=" + std::to_string(omega_max) + + ", epsilon=" + std::to_string(epsilon) + + ", lambda=" + std::to_string(lambda) + + ", ypower=" + std::to_string(ypower) + + ", conv_radius=" + std::to_string(conv_radius) + + ", max_size=" + std::to_string(max_size)); + DEBUG_LOG(" sve=" + (sve ? std::to_string(reinterpret_cast(sve)) : std::string("null"))); + DEBUG_LOG(" inv_weight_funcs=" + (inv_weight_funcs ? std::to_string(reinterpret_cast(inv_weight_funcs)) : std::string("null"))); + DEBUG_LOG(" status=" + (status ? std::to_string(reinterpret_cast(status)) : std::string("null"))); + + try { + // Input validation + if (!sve || !inv_weight_funcs || !status) { + DEBUG_LOG("Input validation failed: null pointer"); *status = SPIR_INVALID_ARGUMENT; return nullptr; } - - std::shared_ptr sve_result; - - if (auto logistic = std::dynamic_pointer_cast(impl)) { - sve_result = std::make_shared(sparseir::compute_sve( - *logistic, epsilon, cutoff, lmax, n_gauss, Twork_type - )); - } else if (auto bose = std::dynamic_pointer_cast(impl)) { - sve_result = std::make_shared(sparseir::compute_sve( - *bose, epsilon, cutoff, lmax, n_gauss, Twork_type - )); + + if (beta <= 0.0 || omega_max < 0.0) { + DEBUG_LOG("Invalid beta or omega_max: beta=" + std::to_string(beta) + ", omega_max=" + std::to_string(omega_max)); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + + // Get SVE result implementation + DEBUG_LOG("Getting SVE result implementation"); + auto sve_impl = get_impl_sve_result(sve); + if (!sve_impl) { + DEBUG_LOG("Failed to get SVE result implementation"); + *status = SPIR_GET_IMPL_FAILED; + return nullptr; + } + DEBUG_LOG("SVE result implementation obtained successfully"); + + // Create inv_weight_func from spir_funcs + // inv_weight_funcs represents inv_weight_func(omega) for fixed beta + // Create omega-only function that evaluates spir_funcs + DEBUG_LOG("Creating inv_weight_func from spir_funcs"); + std::function inv_weight_func = + [inv_weight_funcs](double omega) -> double { + int funcs_size; + int eval_status = spir_funcs_get_size(inv_weight_funcs, &funcs_size); + if (eval_status != SPIR_COMPUTATION_SUCCESS || funcs_size < 1) { + return 1.0; // Default to 1.0 on error + } + + // Evaluate at omega (treating omega as x coordinate) + std::vector values(funcs_size); + eval_status = spir_funcs_eval(inv_weight_funcs, omega, values.data()); + if (eval_status != SPIR_COMPUTATION_SUCCESS) { + return 1.0; // Default to 1.0 on error + } + + // Return the first function value (assuming single function) + return values[0]; + }; + + // Create basis using new constructor + DEBUG_LOG("Creating FiniteTempBasis with statistics=" + std::to_string(statistics)); + spir_basis* result = nullptr; + if (statistics == SPIR_STATISTICS_FERMIONIC) { + using FiniteTempBasisType = sparseir::FiniteTempBasis; + auto impl = std::make_shared( + beta, omega_max, epsilon, lambda, ypower, conv_radius, + *sve_impl, inv_weight_func, max_size); + result = create_basis( + std::make_shared<_IRBasis>(impl)); } else { - DEBUG_LOG("Unknown kernel type"); + using FiniteTempBasisType = sparseir::FiniteTempBasis; + auto impl = std::make_shared( + beta, omega_max, epsilon, lambda, ypower, conv_radius, + *sve_impl, inv_weight_func, max_size); + result = create_basis( + std::make_shared<_IRBasis>(impl)); + } + + if (result) { + *status = SPIR_COMPUTATION_SUCCESS; + return result; + } else { + DEBUG_LOG("Failed to create basis: create_basis returned nullptr"); *status = SPIR_INTERNAL_ERROR; return nullptr; } - - *status = SPIR_COMPUTATION_SUCCESS; - return create_sve_result(sve_result); } catch (const std::exception &e) { - DEBUG_LOG("Exception in spir_sve_result_new: " + std::string(e.what())); + DEBUG_LOG("Exception in spir_basis_new_from_sve_and_inv_weight: " + std::string(e.what())); *status = SPIR_INTERNAL_ERROR; return nullptr; } catch (...) { - DEBUG_LOG("Unknown exception in spir_sve_result_new"); + DEBUG_LOG("Unknown exception in spir_basis_new_from_sve_and_inv_weight"); *status = SPIR_INTERNAL_ERROR; return nullptr; } @@ -836,6 +2058,52 @@ spir_funcs* spir_basis_get_uhat(const spir_basis *b, int *status) } } +spir_funcs* spir_basis_get_uhat_full(const spir_basis *b, int *status) +{ + if (!b) { + DEBUG_LOG("Error in spir_basis_get_uhat_full: invalid pointer b"); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + if (!status) { + DEBUG_LOG("Error in spir_basis_get_uhat_full: invalid pointer status"); + *status = SPIR_INVALID_ARGUMENT; + return nullptr; + } + + auto impl = get_impl_basis(b); + if (!impl) { + *status = SPIR_GET_IMPL_FAILED; + return nullptr; + } + + try { + if (impl->get_statistics() == SPIR_STATISTICS_FERMIONIC) { + spir_funcs *uhat_full = nullptr; + int ret = _spir_basis_get_uhat_full(b, &uhat_full); + if (ret != SPIR_COMPUTATION_SUCCESS) { + *status = ret; + return nullptr; + } + *status = SPIR_COMPUTATION_SUCCESS; + return uhat_full; + } else { + spir_funcs *uhat_full = nullptr; + int ret = _spir_basis_get_uhat_full(b, &uhat_full); + if (ret != SPIR_COMPUTATION_SUCCESS) { + *status = ret; + return nullptr; + } + *status = SPIR_COMPUTATION_SUCCESS; + return uhat_full; + } + } catch (const std::exception &e) { + DEBUG_LOG("Exception in spir_basis_get_uhat_full: " + std::string(e.what())); + *status = SPIR_GET_IMPL_FAILED; + return nullptr; + } +} + // TODO: USE THIS int spir_sampling_get_npoints(const spir_sampling *s, int *num_points) @@ -1047,27 +2315,69 @@ int spir_basis_get_default_taus_ext( Eigen::VectorXd tau_points; if (impl->get_statistics() == SPIR_STATISTICS_FERMIONIC) { auto ir_basis = _safe_static_pointer_cast<_IRBasis>(impl); - tau_points = sparseir::default_sampling_points( + // Use default_tau_sampling_points() method which handles augmentation correctly + // But we need to request n_points, not size() + int sz = ir_basis->get_impl()->size(); + Eigen::VectorXd x = sparseir::default_sampling_points( *(ir_basis->get_impl()->sve_result->u), n_points ); - *n_points_returned = tau_points.size(); + + // Process like default_tau_sampling_points() but with n_points + std::vector unique_x; + if (x.size() % 2 == 0) { + for (auto i = 0; i < x.size() / 2; ++i) { + unique_x.push_back(x(i)); + } + } else { + for (auto i = 0; i < x.size() / 2; ++i) { + unique_x.push_back(x(i)); + } + auto x_new = 0.5 * (unique_x.back() + 0.5); + unique_x.push_back(x_new); + } + + tau_points = Eigen::VectorXd(2 * unique_x.size()); + double beta = impl->get_beta(); + for (auto i = 0; i < unique_x.size(); ++i) { + tau_points(i) = (beta / 2.0) * (unique_x[i] + 1.0); + tau_points(unique_x.size() + i) = -tau_points(i); + } + std::sort(tau_points.data(), tau_points.data() + tau_points.size()); + *n_points_returned = std::min(n_points, static_cast(tau_points.size())); } else { auto ir_basis = _safe_static_pointer_cast<_IRBasis>(impl); - std::cout << "debug " << ir_basis->get_impl()->sve_result->u->size() << std::endl; - tau_points = sparseir::default_sampling_points( + // Use default_tau_sampling_points() method which handles augmentation correctly + // But we need to request n_points, not size() + int sz = ir_basis->get_impl()->size(); + Eigen::VectorXd x = sparseir::default_sampling_points( *(ir_basis->get_impl()->sve_result->u), n_points ); - *n_points_returned = tau_points.size(); - } + + // Process like default_tau_sampling_points() but with n_points + std::vector unique_x; + if (x.size() % 2 == 0) { + for (auto i = 0; i < x.size() / 2; ++i) { + unique_x.push_back(x(i)); + } + } else { + for (auto i = 0; i < x.size() / 2; ++i) { + unique_x.push_back(x(i)); + } + auto x_new = 0.5 * (unique_x.back() + 0.5); + unique_x.push_back(x_new); + } - // Copy the requested number of points - // rescale the points to the original domain - for (int i = 0; i < *n_points_returned; ++i) { - tau_points(i) = (tau_points(i) + 1) / 2 * beta; - if (tau_points(i) > 0.5 * beta) { - tau_points(i) -= beta; + tau_points = Eigen::VectorXd(2 * unique_x.size()); + double beta = impl->get_beta(); + for (auto i = 0; i < unique_x.size(); ++i) { + tau_points(i) = (beta / 2.0) * (unique_x[i] + 1.0); + tau_points(unique_x.size() + i) = -tau_points(i); } + std::sort(tau_points.data(), tau_points.data() + tau_points.size()); + *n_points_returned = std::min(n_points, static_cast(tau_points.size())); } + + // Copy the requested number of points (or available points if less) std::copy(tau_points.data(), tau_points.data() + *n_points_returned, points); return SPIR_COMPUTATION_SUCCESS; } catch (const std::exception &e) { @@ -1083,6 +2393,7 @@ int spir_basis_get_n_default_matsus(const spir_basis *b, bool positive_only, int auto impl = get_impl_basis(b); if (!impl) { + DEBUG_LOG("Error: Failed to get basis implementation"); return SPIR_GET_IMPL_FAILED; } @@ -1104,6 +2415,7 @@ int spir_basis_get_n_default_matsus(const spir_basis *b, bool positive_only, int return SPIR_COMPUTATION_SUCCESS; } } catch (const std::exception &e) { + DEBUG_LOG("Error in spir_basis_get_n_default_matsus: " + std::string(e.what())); return SPIR_GET_IMPL_FAILED; } } @@ -1174,37 +2486,50 @@ int spir_basis_get_n_default_matsus_ext(const spir_basis *b, bool positive_only, } } -int spir_basis_get_default_matsus_ext(const spir_basis *b, bool positive_only, int L, int64_t *points, int *n_points_returned) +int spir_basis_get_default_matsus_ext(const spir_basis *b, bool positive_only, bool mitigate, int n_points, int64_t *points, int *n_points_returned) { - if (!b || !points) { + if (!b || !points || !n_points_returned) { + DEBUG_LOG("Error: Invalid arguments in spir_basis_get_default_matsus_ext"); return SPIR_INVALID_ARGUMENT; } auto impl = get_impl_basis(b); if (!impl) { + DEBUG_LOG("Error: Failed to get basis implementation in spir_basis_get_default_matsus_ext"); return SPIR_GET_IMPL_FAILED; } if (!is_ir_basis(b)) { - DEBUG_LOG("Error: The basis is not an IR basis"); + DEBUG_LOG("Error: The basis is not an IR basis in spir_basis_get_default_matsus_ext"); return SPIR_INVALID_ARGUMENT; } try { + DEBUG_LOG("spir_basis_get_default_matsus_ext: statistics=" + std::to_string(impl->get_statistics()) + ", n_points=" + std::to_string(n_points) + ", positive_only=" + std::to_string(positive_only) + ", mitigate=" + std::to_string(mitigate)); if (impl->get_statistics() == SPIR_STATISTICS_FERMIONIC) { + DEBUG_LOG("spir_basis_get_default_matsus_ext: casting to Fermionic"); auto ir_basis = _safe_static_pointer_cast<_IRBasis>(impl); - auto matsubara_points = ir_basis->default_matsubara_sampling_points_ext(L, positive_only); + DEBUG_LOG("spir_basis_get_default_matsus_ext: calling default_matsubara_sampling_points_ext"); + auto matsubara_points = ir_basis->default_matsubara_sampling_points_ext(n_points, positive_only, mitigate); + DEBUG_LOG("spir_basis_get_default_matsus_ext: got " + std::to_string(matsubara_points.size()) + " points"); *n_points_returned = matsubara_points.size(); std::copy(matsubara_points.begin(), matsubara_points.end(), points); return SPIR_COMPUTATION_SUCCESS; } else { + DEBUG_LOG("spir_basis_get_default_matsus_ext: casting to Bosonic"); auto ir_basis = _safe_static_pointer_cast<_IRBasis>(impl); - auto matsubara_points = ir_basis->default_matsubara_sampling_points_ext(L, positive_only); + DEBUG_LOG("spir_basis_get_default_matsus_ext: calling default_matsubara_sampling_points_ext"); + auto matsubara_points = ir_basis->default_matsubara_sampling_points_ext(n_points, positive_only, mitigate); + DEBUG_LOG("spir_basis_get_default_matsus_ext: got " + std::to_string(matsubara_points.size()) + " points"); *n_points_returned = matsubara_points.size(); std::copy(matsubara_points.begin(), matsubara_points.end(), points); return SPIR_COMPUTATION_SUCCESS; } } catch (const std::exception &e) { + DEBUG_LOG("Exception in spir_basis_get_default_matsus_ext: " + std::string(e.what())); + return SPIR_GET_IMPL_FAILED; + } catch (...) { + DEBUG_LOG("Unknown exception in spir_basis_get_default_matsus_ext"); return SPIR_GET_IMPL_FAILED; } } @@ -1227,6 +2552,74 @@ int spir_basis_get_stats(const spir_basis *b, } } +int spir_uhat_get_default_matsus(const spir_funcs *uhat, int L, bool positive_only, bool mitigate, int64_t *points, int *n_points_returned) +{ + if (!uhat || !points || !n_points_returned) { + return SPIR_INVALID_ARGUMENT; + } + + auto impl = get_impl_funcs(uhat); + if (!impl) { + return SPIR_GET_IMPL_FAILED; + } + + // Check if this is a MatsubaraBasisFunctions (not continuous functions) + if (impl->is_continuous_funcs()) { + DEBUG_LOG("Error: uhat must be a MatsubaraBasisFunctions (PiecewiseLegendreFTVector)"); + return SPIR_INVALID_ARGUMENT; + } + + try { + // Cast to AbstractMatsubaraFunctions + auto matsubara_impl = std::dynamic_pointer_cast(impl); + if (!matsubara_impl) { + DEBUG_LOG("Error: uhat is not a MatsubaraBasisFunctions"); + return SPIR_INVALID_ARGUMENT; + } + + // Try to cast to MatsubaraBasisFunctions> + auto fermionic_uhat = std::dynamic_pointer_cast>>(matsubara_impl); + if (fermionic_uhat) { + auto uhat_impl = fermionic_uhat->get_impl(); + bool fence = mitigate; + std::vector> matsubara_points = + sparseir::default_matsubara_sampling_points_impl(*uhat_impl, L, fence, positive_only); + *n_points_returned = matsubara_points.size(); + std::transform( + matsubara_points.begin(), matsubara_points.end(), points, + [](const sparseir::MatsubaraFreq &freq) { + return static_cast(freq.get_n()); + }); + return SPIR_COMPUTATION_SUCCESS; + } + + // Try to cast to MatsubaraBasisFunctions> + auto bosonic_uhat = std::dynamic_pointer_cast>>(matsubara_impl); + if (bosonic_uhat) { + auto uhat_impl = bosonic_uhat->get_impl(); + bool fence = mitigate; + std::vector> matsubara_points = + sparseir::default_matsubara_sampling_points_impl(*uhat_impl, L, fence, positive_only); + *n_points_returned = matsubara_points.size(); + std::transform( + matsubara_points.begin(), matsubara_points.end(), points, + [](const sparseir::MatsubaraFreq &freq) { + return static_cast(freq.get_n()); + }); + return SPIR_COMPUTATION_SUCCESS; + } + + DEBUG_LOG("Error: uhat is not a PiecewiseLegendreFTVector (Fermionic or Bosonic)"); + return SPIR_INVALID_ARGUMENT; + } catch (const std::exception &e) { + DEBUG_LOG("Exception in spir_uhat_get_default_matsus: " + std::string(e.what())); + return SPIR_INTERNAL_ERROR; + } catch (...) { + DEBUG_LOG("Unknown exception in spir_uhat_get_default_matsus"); + return SPIR_INTERNAL_ERROR; + } +} + int spir_funcs_eval(const spir_funcs *funcs, double x, double *out) { if (!out) { diff --git a/backend/cxx/src/cinterface_impl/helper_funcs.hpp b/backend/cxx/src/cinterface_impl/helper_funcs.hpp index 8db1bfef..72d74e38 100644 --- a/backend/cxx/src/cinterface_impl/helper_funcs.hpp +++ b/backend/cxx/src/cinterface_impl/helper_funcs.hpp @@ -470,6 +470,25 @@ int _spir_basis_get_uhat(const spir_basis *b, spir_funcs **uhat) } } +template +int _spir_basis_get_uhat_full(const spir_basis *b, spir_funcs **uhat_full) +{ + try { + auto impl = get_impl_basis(b); + if (!impl) { + return SPIR_GET_IMPL_FAILED; + } + if (!is_ir_basis(b)) { + // DLR basis does not have uhat_full + return SPIR_NOT_SUPPORTED; + } + *uhat_full = create_funcs(impl->get_uhat_full()); + return SPIR_COMPUTATION_SUCCESS; + } catch (const std::exception &e) { + return SPIR_GET_IMPL_FAILED; + } +} + template spir_basis* _spir_basis_new( diff --git a/backend/cxx/src/cinterface_impl/helper_types.hpp b/backend/cxx/src/cinterface_impl/helper_types.hpp index 2c29ceb1..b117fd16 100644 --- a/backend/cxx/src/cinterface_impl/helper_types.hpp +++ b/backend/cxx/src/cinterface_impl/helper_types.hpp @@ -1,6 +1,16 @@ #include "sparseir/sparseir.hpp" #include "_util.hpp" #include +#include +#include + +// DEBUG_LOG macro for helper_types.hpp +inline void DEBUG_LOG_HELPER(const std::string& msg) { + const char* env = std::getenv("SPARSEIR_DEBUG"); + if (env && std::string(env) == "1") { + std::cerr << "[DEBUG] " << msg << std::endl; + } +} class _AbstractFuncs { public: @@ -38,6 +48,11 @@ class MatsubaraBasisFunctions : public AbstractMatsubaraFunctions { { return _safe_dynamic_pointer_cast<_AbstractFuncs>(std::make_shared>(impl->slice(indices))); } + + // Getter for the underlying implementation + std::shared_ptr get_impl() const { + return impl; + } }; // Abstract class for functions of a single variable (in the imaginary time @@ -240,6 +255,7 @@ class AbstractFiniteTempBasis { virtual std::shared_ptr get_u() const = 0; virtual std::shared_ptr get_v() const = 0; virtual std::shared_ptr get_uhat() const = 0; + virtual std::shared_ptr get_uhat_full() const = 0; }; @@ -294,6 +310,14 @@ class _IRBasis : public AbstractFiniteTempBasis { sparseir::PiecewiseLegendreFTVector>>(impl->uhat)); } + virtual std::shared_ptr + get_uhat_full() const override + { + return std::static_pointer_cast( + std::make_shared>>(impl->uhat_full)); + } + std::shared_ptr> get_impl() const { return impl; @@ -310,29 +334,42 @@ class _IRBasis : public AbstractFiniteTempBasis { { bool fence = false; int L = size(); - - std::vector> matsubara_points = impl->default_matsubara_sampling_points(L, fence, positive_only); - std::vector points(matsubara_points.size()); - std::transform( - matsubara_points.begin(), matsubara_points.end(), points.begin(), - [](const sparseir::MatsubaraFreq &freq) { - return static_cast(freq.get_n()); - }); - return points; + + try { + std::vector> matsubara_points = impl->default_matsubara_sampling_points(L, fence, positive_only); + std::vector points(matsubara_points.size()); + std::transform( + matsubara_points.begin(), matsubara_points.end(), points.begin(), + [](const sparseir::MatsubaraFreq &freq) { + return static_cast(freq.get_n()); + }); + return points; + } catch (const std::exception &e) { + throw; + } } - std::vector default_matsubara_sampling_points_ext(int n_points, bool positive_only) const + std::vector default_matsubara_sampling_points_ext(int n_points, bool positive_only, bool mitigate = false) const { - bool fence = false; - - std::vector> matsubara_points = impl->default_matsubara_sampling_points(n_points, fence, positive_only); - std::vector points(matsubara_points.size()); - std::transform( - matsubara_points.begin(), matsubara_points.end(), points.begin(), - [](const sparseir::MatsubaraFreq &freq) { - return static_cast(freq.get_n()); - }); - return points; + bool fence = mitigate; + + try { + DEBUG_LOG_HELPER("[DEBUG _IRBasis::default_matsubara_sampling_points_ext] n_points=" + std::to_string(n_points) + ", positive_only=" + std::to_string(positive_only) + ", mitigate=" + std::to_string(mitigate) + ", fence=" + std::to_string(fence)); + DEBUG_LOG_HELPER("[DEBUG _IRBasis::default_matsubara_sampling_points_ext] calling impl->default_matsubara_sampling_points"); + std::vector> matsubara_points = impl->default_matsubara_sampling_points(n_points, fence, positive_only); + DEBUG_LOG_HELPER("[DEBUG _IRBasis::default_matsubara_sampling_points_ext] got " + std::to_string(matsubara_points.size()) + " points"); + std::vector points(matsubara_points.size()); + std::transform( + matsubara_points.begin(), matsubara_points.end(), points.begin(), + [](const sparseir::MatsubaraFreq &freq) { + return static_cast(freq.get_n()); + }); + DEBUG_LOG_HELPER("[DEBUG _IRBasis::default_matsubara_sampling_points_ext] returning " + std::to_string(points.size()) + " points"); + return points; + } catch (const std::exception &e) { + DEBUG_LOG_HELPER("[DEBUG _IRBasis::default_matsubara_sampling_points_ext] Exception: " + std::string(e.what())); + throw; + } } std::vector default_omega_sampling_points() const @@ -412,6 +449,13 @@ class DLRAdapter: public AbstractDLR { impl->uhat)); } + virtual std::shared_ptr + get_uhat_full() const override + { + // DLR basis does not have uhat_full + throw std::runtime_error("get_uhat_full is not supported for DLR basis"); + } + virtual std::vector get_poles() const override { Eigen::VectorXd eigen_poles = impl->poles; diff --git a/backend/cxx/test/_utils.hpp b/backend/cxx/test/_utils.hpp index 3c05deea..abf5f01f 100644 --- a/backend/cxx/test/_utils.hpp +++ b/backend/cxx/test/_utils.hpp @@ -22,11 +22,11 @@ inline spir_basis *_spir_basis_new(int32_t statistics, double beta, } // Create a pre-computed SVE result - double cutoff = -1.0; + // cutoff parameter is removed; it's automatically set to NaN internally to use default (2*eps) int lmax = -1; int n_gauss = -1; int Twork = SPIR_TWORK_AUTO; - sve = spir_sve_result_new(kernel, epsilon, cutoff, lmax, n_gauss, Twork, &status_); + sve = spir_sve_result_new(kernel, epsilon, lmax, n_gauss, Twork, &status_); if (status_ != SPIR_COMPUTATION_SUCCESS || sve == nullptr) { *status = status_; spir_kernel_release(kernel); diff --git a/backend/cxx/test/cinterface_core.cxx b/backend/cxx/test/cinterface_core.cxx index 914b71af..29cfe1b9 100644 --- a/backend/cxx/test/cinterface_core.cxx +++ b/backend/cxx/test/cinterface_core.cxx @@ -158,11 +158,11 @@ void test_finite_temp_basis_constructor_with_sve() REQUIRE(kernel != nullptr); int sve_status; - double cutoff = -1.0; + // cutoff parameter is removed; it's automatically set to NaN internally to use default (2*eps) int lmax = -1; int n_gauss = -1; int Twork = SPIR_TWORK_AUTO; - spir_sve_result *sve_result = spir_sve_result_new(kernel, epsilon, cutoff, lmax, n_gauss, Twork, &sve_status); + spir_sve_result *sve_result = spir_sve_result_new(kernel, epsilon, lmax, n_gauss, Twork, &sve_status); REQUIRE(sve_status == SPIR_COMPUTATION_SUCCESS); REQUIRE(sve_result != nullptr); @@ -222,11 +222,11 @@ TEST_CASE("FiniteTempBasis", "[cinterface]") REQUIRE(kernel != nullptr); int sve_status; - double cutoff = -1.0; + // cutoff parameter is removed; it's automatically set to NaN internally to use default (2*eps) int lmax = -1; int n_gauss = -1; int Twork = SPIR_TWORK_AUTO; - spir_sve_result *sve_result = spir_sve_result_new(kernel, epsilon, cutoff, lmax, n_gauss, Twork, &sve_status); + spir_sve_result *sve_result = spir_sve_result_new(kernel, epsilon, lmax, n_gauss, Twork, &sve_status); REQUIRE(sve_status == SPIR_COMPUTATION_SUCCESS); REQUIRE(sve_result != nullptr); diff --git a/backend/cxx/test/dlr.cxx b/backend/cxx/test/dlr.cxx index 22a33233..48a42bf2 100644 --- a/backend/cxx/test/dlr.cxx +++ b/backend/cxx/test/dlr.cxx @@ -250,7 +250,12 @@ TEST_CASE("DLR Tests", "[dlr]") sparseir::LogisticKernel kernel(beta * wmax); - sparseir::MatsubaraPoles mbp(beta, poles, wmax, kernel.template weight_func(sparseir::Bosonic{})); + // Convert kernel's 2-arg inv_weight_func to omega-only function + auto kernel_inv_weight_func = kernel.template inv_weight_func(sparseir::Bosonic{}); + std::function inv_weight_func = [kernel_inv_weight_func, beta](double omega) -> double { + return kernel_inv_weight_func(beta, omega); + }; + sparseir::MatsubaraPoles mbp(beta, poles, wmax, inv_weight_func); // Choose 100 random even integers between -234 and 13898 std::vector n; std::mt19937 gen(42); diff --git a/backend/cxx/test/kernel.cxx b/backend/cxx/test/kernel.cxx index b84e3736..d19d31ec 100644 --- a/backend/cxx/test/kernel.cxx +++ b/backend/cxx/test/kernel.cxx @@ -351,25 +351,43 @@ TEST_CASE("Kernel Accuracy Tests", "[kernel]") } } -TEST_CASE("weight_func", "[kernel]") +TEST_CASE("inv_weight_func", "[kernel]") { double lambda = 100.; double beta = 10.0; double omega_max = lambda / beta; + // LogisticKernel - Fermionic { sparseir::LogisticKernel K(lambda); - auto wf = K.weight_func(sparseir::Bosonic()); + auto inv_wf = K.inv_weight_func(sparseir::Fermionic()); double omega = 0.1 * omega_max; - REQUIRE(wf(beta, omega) == 1.0 / tanh(0.5 * beta * omega)); + REQUIRE(inv_wf(beta, omega) == 1.0); } - // RegularizedBoseKernel + // LogisticKernel - Bosonic + { + sparseir::LogisticKernel K(lambda); + auto inv_wf = K.inv_weight_func(sparseir::Bosonic()); + double omega = 0.1 * omega_max; + using std::tanh; + REQUIRE(inv_wf(beta, omega) == tanh(0.5 * beta * omega)); + } + + // RegularizedBoseKernel - Bosonic { sparseir::RegularizedBoseKernel K(lambda); - auto wf = K.weight_func(sparseir::Bosonic()); + auto inv_wf = K.inv_weight_func(sparseir::Bosonic()); double omega = 0.1 * omega_max; - REQUIRE(wf(beta, omega) == 1.0 / omega); + REQUIRE(inv_wf(beta, omega) == omega); + } + + // Test numerical stability: inv_weight_func should handle omega=0 better + { + sparseir::RegularizedBoseKernel K(lambda); + auto inv_wf = K.inv_weight_func(sparseir::Bosonic()); + double omega = 0.0; + REQUIRE(inv_wf(beta, omega) == 0.0); // No division by zero } } diff --git a/capi_benchmark/run_with_cxx_blas.sh b/capi_benchmark/run_with_cxx_blas.sh index 2eab0565..adc65fd0 100755 --- a/capi_benchmark/run_with_cxx_blas.sh +++ b/capi_benchmark/run_with_cxx_blas.sh @@ -17,6 +17,7 @@ echo -e "${GREEN}=== Building C++ backend with BLAS ===${NC}" BUILD_DIR="work_cxx_blas" INSTALL_DIR="$BUILD_DIR/install" +rm -rf "$BUILD_DIR/build_backend" mkdir -p "$BUILD_DIR/build_backend" mkdir -p "$INSTALL_DIR" diff --git a/capi_benchmark/run_with_cxx_wo_blas.sh b/capi_benchmark/run_with_cxx_wo_blas.sh index 344f82df..cc7d8765 100755 --- a/capi_benchmark/run_with_cxx_wo_blas.sh +++ b/capi_benchmark/run_with_cxx_wo_blas.sh @@ -17,6 +17,7 @@ echo -e "${GREEN}=== Building C++ backend with BLAS ===${NC}" BUILD_DIR="work_cxx_blas" INSTALL_DIR="$BUILD_DIR/install" +rm -rf "$BUILD_DIR/build_backend" mkdir -p "$BUILD_DIR/build_backend" mkdir -p "$INSTALL_DIR" diff --git a/capi_sample/run_sample.sh b/capi_sample/run_sample.sh index 0aba5327..f64d1f08 100755 --- a/capi_sample/run_sample.sh +++ b/capi_sample/run_sample.sh @@ -22,7 +22,11 @@ mkdir -p "$INSTALL_DIR" # Step 1: Build C++ backend with BLAS echo -e "${YELLOW}Building C++ backend with BLAS support...${NC}" -cd "$BUILD_DIR/build_backend" +cd "$BUILD_DIR" +# Clean build directory to remove old CMake cache with incorrect SDK paths +rm -rf build_backend +mkdir -p build_backend +cd build_backend cmake ../../../backend/cxx \ -DCMAKE_BUILD_TYPE=Release \ diff --git a/capi_sample/sample1.c b/capi_sample/sample1.c index c64092e9..5633162a 100644 --- a/capi_sample/sample1.c +++ b/capi_sample/sample1.c @@ -22,11 +22,11 @@ assert(status == SPIR_COMPUTATION_SUCCESS); assert(kernel != NULL); // Create a pre-computed SVE result -double cutoff = -1.0; +// cutoff parameter is removed; it's automatically set to NaN internally to use default (2*eps) int lmax = -1; int n_gauss = -1; int Twork = SPIR_TWORK_FLOAT64X2; -spir_sve_result* sve = spir_sve_result_new(kernel, epsilon, cutoff, lmax, n_gauss, Twork, &status); +spir_sve_result* sve = spir_sve_result_new(kernel, epsilon, lmax, n_gauss, Twork, &status); assert(status == SPIR_COMPUTATION_SUCCESS); assert(sve != NULL); diff --git a/capi_sample/sample2.c b/capi_sample/sample2.c index 7fe4b4af..55e354b2 100644 --- a/capi_sample/sample2.c +++ b/capi_sample/sample2.c @@ -22,11 +22,11 @@ assert(status == SPIR_COMPUTATION_SUCCESS); assert(logistic_kernel != NULL); // Create a pre-computed SVE result -double cutoff = -1.0; +// cutoff parameter is removed; it's automatically set to NaN internally to use default (2*eps) int lmax = -1; int n_gauss = -1; int Twork = SPIR_TWORK_FLOAT64X2; -spir_sve_result* sve_logistic = spir_sve_result_new(logistic_kernel, epsilon, cutoff, lmax, n_gauss, Twork, &status); +spir_sve_result* sve_logistic = spir_sve_result_new(logistic_kernel, epsilon, lmax, n_gauss, Twork, &status); assert(status == SPIR_COMPUTATION_SUCCESS); assert(sve_logistic != NULL); @@ -45,7 +45,7 @@ spir_kernel* regularized_kernel = spir_reg_bose_kernel_new(beta * omega_max, &st assert(status == SPIR_COMPUTATION_SUCCESS); assert(regularized_kernel != NULL); -spir_sve_result* sve_regularized = spir_sve_result_new(regularized_kernel, epsilon, cutoff, lmax, n_gauss, Twork, &status); +spir_sve_result* sve_regularized = spir_sve_result_new(regularized_kernel, epsilon, lmax, n_gauss, Twork, &status); assert(status == SPIR_COMPUTATION_SUCCESS); assert(sve_regularized != NULL); diff --git a/capi_sample/sample3.c b/capi_sample/sample3.c index f8a683a2..85b20031 100644 --- a/capi_sample/sample3.c +++ b/capi_sample/sample3.c @@ -21,11 +21,11 @@ assert(status == SPIR_COMPUTATION_SUCCESS); assert(kernel != NULL); // Create a pre-computed SVE result -double cutoff = -1.0; +// cutoff parameter is removed; it's automatically set to NaN internally to use default (2*eps) int lmax = -1; int n_gauss = -1; int Twork = SPIR_TWORK_FLOAT64X2; -spir_sve_result* sve_logistic = spir_sve_result_new(kernel, epsilon, cutoff, lmax, n_gauss, Twork, &status); +spir_sve_result* sve_logistic = spir_sve_result_new(kernel, epsilon, lmax, n_gauss, Twork, &status); assert(status == SPIR_COMPUTATION_SUCCESS); assert(sve_logistic != NULL); diff --git a/capi_test/CMakeLists.txt b/capi_test/CMakeLists.txt index 42c6c604..ab304cd7 100644 --- a/capi_test/CMakeLists.txt +++ b/capi_test/CMakeLists.txt @@ -8,13 +8,13 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON) find_package(SparseIR REQUIRED) # Find Eigen3 -find_package(Eigen3 3.4.0 QUIET NO_MODULE) +find_package(Eigen3 3.4.1 QUIET NO_MODULE) if(NOT Eigen3_FOUND) message(STATUS "Eigen3 not found in system, fetching from GitHub...") include(FetchContent) FetchContent_Declare(Eigen3 GIT_REPOSITORY https://gitlab.com/libeigen/eigen.git - GIT_TAG 3.4.0 + GIT_TAG 3.4.1 ) FetchContent_MakeAvailable(Eigen3) endif() @@ -62,6 +62,26 @@ if(CMAKE_CXX_COMPILER_ID MATCHES "Intel") target_compile_options(cinterface_integration PRIVATE -fp-model=precise) endif() +# C-API core test +add_executable(cinterface_core cinterface_core.cxx) + +target_include_directories(cinterface_core PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR} + ${EIGEN3_INCLUDE_DIR} +) + +target_link_libraries(cinterface_core PRIVATE + Catch2::Catch2WithMain + SparseIR::sparseir + Eigen3::Eigen + Threads::Threads +) + +# Intel Compiler: disable fastmath optimizations that break xprec's extended precision +if(CMAKE_CXX_COMPILER_ID MATCHES "Intel") + target_compile_options(cinterface_core PRIVATE -fp-model=precise) +endif() + # Optional BLAS linking # Check if ILP64 BLAS is requested (via environment variable or CMake option) if(DEFINED ENV{SPARSEIR_USE_BLAS_ILP64} AND "$ENV{SPARSEIR_USE_BLAS_ILP64}") @@ -75,8 +95,10 @@ endif() find_package(BLAS QUIET) if(BLAS_FOUND) target_link_libraries(cinterface_integration PRIVATE ${BLAS_LIBRARIES}) + target_link_libraries(cinterface_core PRIVATE ${BLAS_LIBRARIES}) endif() # Enable testing enable_testing() add_test(NAME cinterface_integration COMMAND cinterface_integration -d yes) +add_test(NAME cinterface_core COMMAND cinterface_core -d yes) diff --git a/capi_test/_utils.hpp b/capi_test/_utils.hpp index 205d431e..9df27e7d 100644 --- a/capi_test/_utils.hpp +++ b/capi_test/_utils.hpp @@ -22,11 +22,11 @@ inline spir_basis *_spir_basis_new(int32_t statistics, double beta, } // Create a pre-computed SVE result - double cutoff = -1.0; + // cutoff parameter is removed; it's automatically set to NaN internally to use default (2*eps) int lmax = -1; int n_gauss = -1; int Twork = SPIR_TWORK_AUTO; - sve = spir_sve_result_new(kernel, epsilon, cutoff, lmax, n_gauss, Twork, &status_); + sve = spir_sve_result_new(kernel, epsilon, lmax, n_gauss, Twork, &status_); if (status_ != SPIR_COMPUTATION_SUCCESS || sve == nullptr) { *status = status_; spir_kernel_release(kernel); diff --git a/capi_test/cinterface_core.cxx b/capi_test/cinterface_core.cxx new file mode 100644 index 00000000..2b3d7297 --- /dev/null +++ b/capi_test/cinterface_core.cxx @@ -0,0 +1,787 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include // C interface + +using Catch::Approx; + +TEST_CASE("Test spir_kernel_get_sve_hints_nsvals", "[cinterface]") +{ + double lambda = 10.0; + double epsilon = 1e-8; + + int status; + spir_kernel* kernel = spir_logistic_kernel_new(lambda, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(kernel != nullptr); + + int nsvals; + status = spir_kernel_get_sve_hints_nsvals(kernel, epsilon, &nsvals); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(nsvals > 0); + + // For lambda=10, log10(10)=1, so nsvals should be around (25+1)*1 = 26 + // But actual formula may vary, so just check it's reasonable + REQUIRE(nsvals >= 10); + REQUIRE(nsvals <= 1000); + + spir_kernel_release(kernel); +} + +TEST_CASE("Test spir_kernel_get_sve_hints_ngauss", "[cinterface]") +{ + double lambda = 10.0; + double epsilon_coarse = 1e-6; + double epsilon_fine = 1e-10; + + int status; + spir_kernel* kernel = spir_logistic_kernel_new(lambda, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(kernel != nullptr); + + int ngauss_coarse; + status = spir_kernel_get_sve_hints_ngauss(kernel, epsilon_coarse, &ngauss_coarse); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(ngauss_coarse > 0); + + int ngauss_fine; + status = spir_kernel_get_sve_hints_ngauss(kernel, epsilon_fine, &ngauss_fine); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(ngauss_fine > 0); + + // For epsilon >= 1e-8, ngauss should be 10 + // For epsilon < 1e-8, ngauss should be 16 + REQUIRE(ngauss_coarse == 10); + REQUIRE(ngauss_fine == 16); + + spir_kernel_release(kernel); +} + +TEST_CASE("Test spir_kernel_get_sve_hints_segments_x", "[cinterface]") +{ + double lambda = 10.0; + double epsilon = 1e-8; + + int status; + spir_kernel* kernel = spir_logistic_kernel_new(lambda, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(kernel != nullptr); + + // First call: get the number of segments + int n_segments = 0; + status = spir_kernel_get_sve_hints_segments_x(kernel, epsilon, nullptr, &n_segments); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(n_segments > 0); + + // Second call: get the actual segments + std::vector segments(n_segments); + int n_segments_out = n_segments; + status = spir_kernel_get_sve_hints_segments_x(kernel, epsilon, segments.data(), &n_segments_out); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(n_segments_out == n_segments); + + // Verify segments are valid + REQUIRE(segments.size() == static_cast(n_segments)); + REQUIRE(segments[0] == Approx(-1.0).margin(1e-10)); + REQUIRE(segments[n_segments - 1] == Approx(1.0).margin(1e-10)); + + // Verify segments are in ascending order + for (int i = 1; i < n_segments; ++i) { + REQUIRE(segments[i] > segments[i-1]); + } + + spir_kernel_release(kernel); +} + +TEST_CASE("Test spir_kernel_get_sve_hints_segments_y", "[cinterface]") +{ + double lambda = 10.0; + double epsilon = 1e-8; + + int status; + spir_kernel* kernel = spir_logistic_kernel_new(lambda, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(kernel != nullptr); + + // First call: get the number of segments + int n_segments = 0; + status = spir_kernel_get_sve_hints_segments_y(kernel, epsilon, nullptr, &n_segments); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(n_segments > 0); + + // Second call: get the actual segments + std::vector segments(n_segments); + int n_segments_out = n_segments; + status = spir_kernel_get_sve_hints_segments_y(kernel, epsilon, segments.data(), &n_segments_out); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(n_segments_out == n_segments); + + // Verify segments are valid + REQUIRE(segments.size() == static_cast(n_segments)); + REQUIRE(segments[0] == Approx(-1.0).margin(1e-10)); + REQUIRE(segments[n_segments - 1] == Approx(1.0).margin(1e-10)); + + // Verify segments are in ascending order + for (int i = 1; i < n_segments; ++i) { + REQUIRE(segments[i] > segments[i-1]); + } + + spir_kernel_release(kernel); +} + +TEST_CASE("Test spir_kernel_get_sve_hints with RegularizedBoseKernel", "[cinterface]") +{ + double lambda = 10.0; + double epsilon = 1e-8; + + int status; + spir_kernel* kernel = spir_reg_bose_kernel_new(lambda, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(kernel != nullptr); + + // Test nsvals + int nsvals; + status = spir_kernel_get_sve_hints_nsvals(kernel, epsilon, &nsvals); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(nsvals > 0); + + // Test ngauss + int ngauss; + status = spir_kernel_get_sve_hints_ngauss(kernel, epsilon, &ngauss); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(ngauss > 0); + + // Test segments_x + int n_segments_x = 0; + status = spir_kernel_get_sve_hints_segments_x(kernel, epsilon, nullptr, &n_segments_x); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(n_segments_x > 0); + + // Test segments_y + int n_segments_y = 0; + status = spir_kernel_get_sve_hints_segments_y(kernel, epsilon, nullptr, &n_segments_y); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(n_segments_y > 0); + + spir_kernel_release(kernel); +} + +TEST_CASE("Test spir_kernel_get_sve_hints error handling", "[cinterface]") +{ + double lambda = 10.0; + double epsilon = 1e-8; + + int status; + spir_kernel* kernel = spir_logistic_kernel_new(lambda, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(kernel != nullptr); + + // Test with nullptr kernel + int nsvals; + status = spir_kernel_get_sve_hints_nsvals(nullptr, epsilon, &nsvals); + REQUIRE(status != SPIR_COMPUTATION_SUCCESS); + + // Test with nullptr output parameter + status = spir_kernel_get_sve_hints_nsvals(kernel, epsilon, nullptr); + REQUIRE(status != SPIR_COMPUTATION_SUCCESS); + + spir_kernel_release(kernel); +} + +TEST_CASE("Test spir_choose_working_type", "[cinterface]") +{ + // Test with epsilon >= 1e-8 -> should return FLOAT64 + { + int twork = spir_choose_working_type(1e-6); + REQUIRE(twork == SPIR_TWORK_FLOAT64); + } + + { + int twork = spir_choose_working_type(1e-8); + REQUIRE(twork == SPIR_TWORK_FLOAT64); + } + + // Test with epsilon < 1e-8 -> should return FLOAT64X2 + { + int twork = spir_choose_working_type(1e-10); + REQUIRE(twork == SPIR_TWORK_FLOAT64X2); + } + + { + int twork = spir_choose_working_type(1e-15); + REQUIRE(twork == SPIR_TWORK_FLOAT64X2); + } + + // Test with NaN -> should return FLOAT64X2 + { + int twork = spir_choose_working_type(std::numeric_limits::quiet_NaN()); + REQUIRE(twork == SPIR_TWORK_FLOAT64X2); + } + + // Test boundary case: epsilon = 1e-8 exactly + { + int twork = spir_choose_working_type(1e-8); + REQUIRE(twork == SPIR_TWORK_FLOAT64); + } + + // Test boundary case: epsilon just below 1e-8 + { + int twork = spir_choose_working_type(0.99e-8); + REQUIRE(twork == SPIR_TWORK_FLOAT64X2); + } +} + +TEST_CASE("Test spir_funcs_from_piecewise_legendre", "[cinterface]") +{ + // Create a simple piecewise polynomial: constant function = 1.0 on [-1, 1] + // Single segment, nfuncs=1 (only degree 0 Legendre polynomial) + { + int n_segments = 1; + double segments[2] = {-1.0, 1.0}; + double coeffs[1] = {1.0}; // Only constant term + int nfuncs = 1; + int order = 0; + + int status; + spir_funcs* funcs = spir_funcs_from_piecewise_legendre( + segments, n_segments, coeffs, nfuncs, order, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(funcs != nullptr); + + // Test evaluation at x=0 (should be normalized, so value depends on normalization) + int size; + status = spir_funcs_get_size(funcs, &size); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(size == 1); + + // Evaluate at x=0 + double x = 0.0; + double values[1]; + status = spir_funcs_eval(funcs, x, values); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + // Value should be approximately 1.0 * normalization factor + REQUIRE(values[0] == Approx(1.0).margin(1e-10)); + + spir_funcs_release(funcs); + } + + // Create a linear function: f(x) = x on [-1, 1] + // Single segment, nfuncs=2 (degrees 0 and 1) + { + int n_segments = 1; + double segments[2] = {-1.0, 1.0}; + // Legendre expansion: P0(x) = 1, P1(x) = x + // For f(x) = x, we need coefficient 0 for P0 and 1 for P1 + // But normalization affects the actual values + double coeffs[2] = {0.0, 1.0}; // Constant=0, linear=1 + int nfuncs = 2; + int order = 0; + + int status; + spir_funcs* funcs = spir_funcs_from_piecewise_legendre( + segments, n_segments, coeffs, nfuncs, order, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(funcs != nullptr); + + // Evaluate at x=0.5 + double x = 0.5; + double values[2]; + status = spir_funcs_eval(funcs, x, values); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + // Value should be approximately 0.5 (with normalization) + // Note: PiecewiseLegendrePoly applies normalization, so the actual value may differ + // We just check that evaluation succeeds and returns a reasonable value + REQUIRE(std::abs(values[0]) < 10.0); // Reasonable bound + + spir_funcs_release(funcs); + } + + // Test error handling: invalid arguments + { + int status; + spir_funcs* funcs = spir_funcs_from_piecewise_legendre( + nullptr, 1, nullptr, 1, 0, &status); + REQUIRE(status != SPIR_COMPUTATION_SUCCESS); + REQUIRE(funcs == nullptr); + } + + // Test error handling: n_segments < 1 + { + double segments[2] = {-1.0, 1.0}; + double coeffs[1] = {1.0}; + int status; + spir_funcs* funcs = spir_funcs_from_piecewise_legendre( + segments, 0, coeffs, 1, 0, &status); + REQUIRE(status != SPIR_COMPUTATION_SUCCESS); + REQUIRE(funcs == nullptr); + } + + // Test error handling: non-monotonic segments + { + double segments[2] = {1.0, -1.0}; // Wrong order + double coeffs[1] = {1.0}; + int status; + spir_funcs* funcs = spir_funcs_from_piecewise_legendre( + segments, 1, coeffs, 1, 0, &status); + REQUIRE(status != SPIR_COMPUTATION_SUCCESS); + REQUIRE(funcs == nullptr); + } +} + +TEST_CASE("Test spir_gauss_legendre_rule_piecewise_double", "[cinterface]") +{ + // Test with single segment [-1, 1] + { + int n = 5; + double segments[2] = {-1.0, 1.0}; + int n_segments = 1; + double x[5], w[5]; + int status; + + status = spir_gauss_legendre_rule_piecewise_double( + n, segments, n_segments, x, w, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + // Verify we got n points + // Points should be in [-1, 1] and sorted + REQUIRE(x[0] >= -1.0); + REQUIRE(x[n-1] <= 1.0); + for (int i = 1; i < n; ++i) { + REQUIRE(x[i] > x[i-1]); + } + + // Weights should be positive + for (int i = 0; i < n; ++i) { + REQUIRE(w[i] > 0.0); + } + + // Weight sum should be approximately 1.0 (integral over [-1, 1] is 2, but normalized) + // Actually, for Gauss-Legendre on [-1, 1], sum of weights should be 2.0 + double weight_sum = 0.0; + for (int i = 0; i < n; ++i) { + weight_sum += w[i]; + } + REQUIRE(weight_sum == Approx(2.0).margin(1e-10)); // Integral over [-1, 1] is 2 + } + + // Test with two segments [-1, 0, 1] + { + int n = 3; + double segments[3] = {-1.0, 0.0, 1.0}; + int n_segments = 2; + double x[6], w[6]; // n * n_segments + int status; + + status = spir_gauss_legendre_rule_piecewise_double( + n, segments, n_segments, x, w, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + // Verify we got n * n_segments points + // Points should be sorted across segments + REQUIRE(x[0] >= -1.0); + REQUIRE(x[5] <= 1.0); + for (int i = 1; i < 6; ++i) { + REQUIRE(x[i] > x[i-1]); + } + + // Weights should be positive + for (int i = 0; i < 6; ++i) { + REQUIRE(w[i] > 0.0); + } + + // Weight sum should be approximately 2.0 (integral over [-1, 1]) + double weight_sum = 0.0; + for (int i = 0; i < 6; ++i) { + weight_sum += w[i]; + } + REQUIRE(weight_sum == Approx(2.0).margin(1e-10)); + } + + // Test error handling + { + int status; + status = spir_gauss_legendre_rule_piecewise_double( + 5, nullptr, 1, nullptr, nullptr, &status); + REQUIRE(status != SPIR_COMPUTATION_SUCCESS); + } + + { + double segments[2] = {-1.0, 1.0}; + double x[5], w[5]; + int status; + status = spir_gauss_legendre_rule_piecewise_double( + 0, segments, 1, x, w, &status); + REQUIRE(status != SPIR_COMPUTATION_SUCCESS); + } + + { + double segments[2] = {1.0, -1.0}; // Wrong order + double x[5], w[5]; + int status; + status = spir_gauss_legendre_rule_piecewise_double( + 5, segments, 1, x, w, &status); + REQUIRE(status != SPIR_COMPUTATION_SUCCESS); + } +} + +TEST_CASE("Test spir_gauss_legendre_rule_piecewise_ddouble", "[cinterface]") +{ + // Test with single segment [-1, 1] + { + int n = 5; + double segments[2] = {-1.0, 1.0}; + int n_segments = 1; + double x_high[5], x_low[5], w_high[5], w_low[5]; + int status; + + status = spir_gauss_legendre_rule_piecewise_ddouble( + n, segments, n_segments, x_high, x_low, w_high, w_low, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + // Verify we got n points + // Points should be in [-1, 1] and sorted + REQUIRE(x_high[0] >= -1.0); + REQUIRE(x_high[n-1] <= 1.0); + for (int i = 1; i < n; ++i) { + // Reconstruct DDouble and compare + double x_val = x_high[i] + x_low[i]; + double x_prev = x_high[i-1] + x_low[i-1]; + REQUIRE(x_val > x_prev); + } + + // Weights should be positive + double weight_sum = 0.0; + for (int i = 0; i < n; ++i) { + double w_val = w_high[i] + w_low[i]; + REQUIRE(w_val > 0.0); + weight_sum += w_val; + } + // Weight sum should be approximately 2.0 (integral over [-1, 1]) + REQUIRE(weight_sum == Approx(2.0).margin(1e-10)); + } + + // Test with two segments [-1, 0, 1] + { + int n = 3; + double segments[3] = {-1.0, 0.0, 1.0}; + int n_segments = 2; + double x_high[6], x_low[6], w_high[6], w_low[6]; + int status; + + status = spir_gauss_legendre_rule_piecewise_ddouble( + n, segments, n_segments, x_high, x_low, w_high, w_low, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + // Verify points are sorted + for (int i = 1; i < 6; ++i) { + double x_val = x_high[i] + x_low[i]; + double x_prev = x_high[i-1] + x_low[i-1]; + REQUIRE(x_val > x_prev); + } + + // Weight sum should be approximately 2.0 (integral over [-1, 1]) + double weight_sum = 0.0; + for (int i = 0; i < 6; ++i) { + double w_val = w_high[i] + w_low[i]; + weight_sum += w_val; + } + REQUIRE(weight_sum == Approx(2.0).margin(1e-10)); + } + + // Test error handling + { + int status; + status = spir_gauss_legendre_rule_piecewise_ddouble( + 5, nullptr, 1, nullptr, nullptr, nullptr, nullptr, &status); + REQUIRE(status != SPIR_COMPUTATION_SUCCESS); + } +} + +TEST_CASE("Test spir_sve_result_from_matrix", "[cinterface]") +{ + double lambda = 10.0; + double epsilon = 1e-8; + + // Create kernel and get SVE hints + int status; + spir_kernel* kernel = spir_logistic_kernel_new(lambda, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(kernel != nullptr); + + // Get SVE hints + int n_gauss; + status = spir_kernel_get_sve_hints_ngauss(kernel, epsilon, &n_gauss); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + int n_segments_x = 0; + status = spir_kernel_get_sve_hints_segments_x(kernel, epsilon, nullptr, &n_segments_x); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + int n_segments_y = 0; + status = spir_kernel_get_sve_hints_segments_y(kernel, epsilon, nullptr, &n_segments_y); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + // Get segments + std::vector segments_x(n_segments_x + 1); + int n_segments_x_out = n_segments_x + 1; + status = spir_kernel_get_sve_hints_segments_x(kernel, epsilon, segments_x.data(), &n_segments_x_out); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + std::vector segments_y(n_segments_y + 1); + int n_segments_y_out = n_segments_y + 1; + status = spir_kernel_get_sve_hints_segments_y(kernel, epsilon, segments_y.data(), &n_segments_y_out); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + // Get Gauss points and weights + // Note: n_segments_x and n_segments_y are the number of boundary points (n_segments + 1), + // but spir_gauss_legendre_rule_piecewise_double expects the number of segments (n_segments) + int nx = n_gauss * (n_segments_x - 1); // n_segments_x - 1 is the number of segments + int ny = n_gauss * (n_segments_y - 1); // n_segments_y - 1 is the number of segments + std::vector x(nx), w_x(nx); + std::vector y(ny), w_y(ny); + + int status_gauss; + status_gauss = spir_gauss_legendre_rule_piecewise_double( + n_gauss, segments_x.data(), n_segments_x - 1, x.data(), w_x.data(), &status_gauss); // n_segments_x - 1 is the number of segments + REQUIRE(status_gauss == SPIR_COMPUTATION_SUCCESS); + + status_gauss = spir_gauss_legendre_rule_piecewise_double( + n_gauss, segments_y.data(), n_segments_y - 1, y.data(), w_y.data(), &status_gauss); // n_segments_y - 1 is the number of segments + REQUIRE(status_gauss == SPIR_COMPUTATION_SUCCESS); + + // Create a simple test kernel matrix + // Note: In practice, this would be computed from the actual kernel + std::vector K_high(nx * ny); + for (int i = 0; i < nx; ++i) { + for (int j = 0; j < ny; ++j) { + // Simple test: scaled identity-like matrix + double k_val = (i == j) ? std::sqrt(w_x[i] * w_y[j]) : 0.0; + K_high[i * ny + j] = k_val; + } + } + + // Create SVE result from matrix (row major) + spir_sve_result* sve_from_matrix = spir_sve_result_from_matrix( + K_high.data(), nullptr, nx, ny, SPIR_ORDER_ROW_MAJOR, + segments_x.data(), n_segments_x, + segments_y.data(), n_segments_y, + n_gauss, epsilon, &status); + + // Note: The test matrix is very simple, so the SVE result may not be meaningful + // But we can at least verify the function doesn't crash and returns a valid result + if (status == SPIR_COMPUTATION_SUCCESS && sve_from_matrix != nullptr) { + int sve_size; + status = spir_sve_result_get_size(sve_from_matrix, &sve_size); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + spir_sve_result_release(sve_from_matrix); + } + + // Test error handling + { + spir_sve_result* sve_err = spir_sve_result_from_matrix( + nullptr, nullptr, nx, ny, SPIR_ORDER_ROW_MAJOR, + segments_x.data(), n_segments_x, + segments_y.data(), n_segments_y, + n_gauss, epsilon, &status); + REQUIRE(status != SPIR_COMPUTATION_SUCCESS); + REQUIRE(sve_err == nullptr); + } + + // Test column major order + { + std::vector K_col(nx * ny); + for (int j = 0; j < ny; ++j) { + for (int i = 0; i < nx; ++i) { + K_col[j * nx + i] = (i == j) ? std::sqrt(w_x[i] * w_y[j]) : 0.0; + } + } + + spir_sve_result* sve_col = spir_sve_result_from_matrix( + K_col.data(), nullptr, nx, ny, SPIR_ORDER_COLUMN_MAJOR, + segments_x.data(), n_segments_x, + segments_y.data(), n_segments_y, + n_gauss, epsilon, &status); + + if (status == SPIR_COMPUTATION_SUCCESS && sve_col != nullptr) { + spir_sve_result_release(sve_col); + } + } + + spir_kernel_release(kernel); +} + +TEST_CASE("Test spir_basis_new_from_sve_and_inv_weight", "[cinterface]") +{ + double lambda = 10.0; + double beta = 1.0; + double omega_max = lambda / beta; + double epsilon = 1e-8; + + // Create kernel and SVE result + int status; + spir_kernel* kernel = spir_logistic_kernel_new(lambda, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(kernel != nullptr); + + // cutoff parameter is removed; it's automatically set to NaN internally to use default (2*eps) + spir_sve_result* sve = spir_sve_result_new( + kernel, epsilon, -1, -1, SPIR_TWORK_AUTO, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(sve != nullptr); + + // Create inv_weight_func as spir_funcs + // For LogisticKernel with Fermionic statistics, inv_weight_func(omega) = 1.0 + // We'll create a simple constant function + int n_segments = 1; + double segments[2] = {-omega_max, omega_max}; // Full omega range + double coeffs[1] = {1.0}; // Constant function = 1.0 + int nfuncs = 1; + int order = 0; + + spir_funcs* inv_weight_funcs = spir_funcs_from_piecewise_legendre( + segments, n_segments, coeffs, nfuncs, order, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(inv_weight_funcs != nullptr); + + // Create basis using new function + // For LogisticKernel: ypower=0, conv_radius=1.0 (typical values) + spir_basis* basis = spir_basis_new_from_sve_and_inv_weight( + SPIR_STATISTICS_FERMIONIC, beta, omega_max, epsilon, + lambda, 0, 1.0, // ypower=0, conv_radius=1.0 + sve, inv_weight_funcs, -1, &status); + + if (status == SPIR_COMPUTATION_SUCCESS && basis != nullptr) { + int basis_size; + status = spir_basis_get_size(basis, &basis_size); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(basis_size > 0); + + spir_basis_release(basis); + } + + // Test error handling + { + spir_basis* basis_err = spir_basis_new_from_sve_and_inv_weight( + SPIR_STATISTICS_FERMIONIC, beta, omega_max, epsilon, + lambda, 0, 1.0, + nullptr, inv_weight_funcs, -1, &status); + REQUIRE(status != SPIR_COMPUTATION_SUCCESS); + REQUIRE(basis_err == nullptr); + } + + { + spir_basis* basis_err = spir_basis_new_from_sve_and_inv_weight( + SPIR_STATISTICS_FERMIONIC, beta, omega_max, epsilon, + lambda, 0, 1.0, + sve, nullptr, -1, &status); + REQUIRE(status != SPIR_COMPUTATION_SUCCESS); + REQUIRE(basis_err == nullptr); + } + + spir_funcs_release(inv_weight_funcs); + spir_sve_result_release(sve); + spir_kernel_release(kernel); +} + +TEST_CASE("Test spir_basis_get_default_matsus_ext with fence", "[cinterface]") +{ + double beta = 10.0; + double wmax = 10.0; + double epsilon = 1e-8; + + int status; + spir_kernel* kernel = spir_logistic_kernel_new(beta * wmax, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + // cutoff parameter is removed; it's automatically set to NaN internally to use default (2*eps) + spir_sve_result* sve = spir_sve_result_new(kernel, epsilon, -1, -1, SPIR_TWORK_AUTO, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + spir_basis* basis = spir_basis_new(SPIR_STATISTICS_FERMIONIC, beta, wmax, epsilon, kernel, sve, -1, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + int basis_size; + status = spir_basis_get_size(basis, &basis_size); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + // Test without mitigation (fence = false) + { + bool positive_only = false; + bool mitigate = false; + int n_points_requested = basis_size; + + int n_points_returned = 0; + std::vector points(n_points_requested + 10); + + status = spir_basis_get_default_matsus_ext( + basis, positive_only, mitigate, n_points_requested, points.data(), &n_points_returned); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(n_points_returned >= n_points_requested); + REQUIRE(n_points_returned <= n_points_requested + 10); + + // Verify points are valid fermionic frequencies (odd integers) + for (int i = 0; i < n_points_returned; ++i) { + REQUIRE(llabs(points[i]) % 2 == 1); + } + } + + // Test with mitigation (fence = true) + { + bool positive_only = false; + bool mitigate = true; + int n_points_requested = basis_size; + + int n_points_returned = 0; + std::vector points(n_points_requested + 20); // Extra space for fence points + + status = spir_basis_get_default_matsus_ext( + basis, positive_only, mitigate, n_points_requested, points.data(), &n_points_returned); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(n_points_returned >= n_points_requested); // May exceed when mitigate is true + + // Verify points are valid fermionic frequencies (odd integers) + for (int i = 0; i < n_points_returned; ++i) { + REQUIRE(llabs(points[i]) % 2 == 1); + } + + // When mitigate is true and basis_size >= 20, we should have more points + // Note: This may not always be true due to rounding, so we just check it's >= requested + if (basis_size >= 20 && n_points_returned > n_points_requested) { + // Extra points due to fencing were added + } + } + + // Test positive_only = true with mitigation + { + bool positive_only = true; + bool mitigate = true; + int n_points_requested = basis_size; + + int n_points_returned = 0; + std::vector points(n_points_requested + 20); + + status = spir_basis_get_default_matsus_ext( + basis, positive_only, mitigate, n_points_requested, points.data(), &n_points_returned); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(n_points_returned > 0); + REQUIRE(n_points_returned <= basis_size + 10); // May have some extra points due to fencing + + // Verify all points are positive and odd + for (int i = 0; i < n_points_returned; ++i) { + REQUIRE(points[i] > 0); + REQUIRE(points[i] % 2 == 1); + } + } + + spir_basis_release(basis); + spir_sve_result_release(sve); + spir_kernel_release(kernel); +} diff --git a/capi_test/cinterface_integration.cxx b/capi_test/cinterface_integration.cxx index 177e15a3..962718c0 100644 --- a/capi_test/cinterface_integration.cxx +++ b/capi_test/cinterface_integration.cxx @@ -397,7 +397,8 @@ void integration_test(double beta, double wmax, double epsilon, } int status; - spir_sve_result* sve = spir_sve_result_new(kernel, epsilon, -1.0, -1, -1, SPIR_TWORK_AUTO, &status); + // cutoff parameter is removed; it's automatically set to NaN internally to use default (2*eps) + spir_sve_result* sve = spir_sve_result_new(kernel, epsilon, -1, -1, SPIR_TWORK_AUTO, &status); REQUIRE(status == SPIR_COMPUTATION_SUCCESS); REQUIRE(sve != nullptr); diff --git a/capi_test/test_with_cxx_backend.sh b/capi_test/test_with_cxx_backend.sh index ebe30c09..37abc352 100755 --- a/capi_test/test_with_cxx_backend.sh +++ b/capi_test/test_with_cxx_backend.sh @@ -23,11 +23,12 @@ echo -e "${GREEN}=== Testing capi_test with C++ backend ===${NC}" # Step 1: Build and install backend/cxx (without tests) echo -e "${YELLOW}Step 1: Building backend/cxx...${NC}" +rm -rf "$WORK_DIR/build_backend" mkdir -p "$WORK_DIR/build_backend" cd "$WORK_DIR/build_backend" cmake "$BACKEND_DIR" \ - -DCMAKE_BUILD_TYPE=Debug \ + -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_INSTALL_PREFIX="$INSTALL_DIR" \ -DSPARSEIR_BUILD_TESTING=OFF \ ${SPARSEIR_USE_BLAS_ILP64:+-DSPARSEIR_USE_BLAS_ILP64=ON} @@ -44,8 +45,11 @@ cd "$SCRIPT_DIR" mkdir -p "$WORK_DIR/build_test" cd "$WORK_DIR/build_test" +# Set SDK path explicitly to avoid CMake auto-detection issues +#export SDKROOT=$(xcrun --show-sdk-path) + cmake "$SCRIPT_DIR" \ - -DCMAKE_BUILD_TYPE=Debug \ + -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_PREFIX_PATH="$INSTALL_DIR" echo -e "${YELLOW}Building capi_test...${NC}" diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index bb294e3c..1173e72c 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -48,7 +48,7 @@ if(SPARSEIR_USE_EXTERN_FBLAS_PTR) message(STATUS "External FBLAS pointer support enabled") endif() -set(EIGEN3_REQUIRED_VERSION "3.4.0") +set(EIGEN3_REQUIRED_VERSION "3.4.1") find_package (Eigen3 ${EIGEN3_REQUIRED_VERSION} QUIET NO_MODULE) if(NOT Eigen3_FOUND) message(STATUS "Eigen3 not found in system, fetching from GitHub...")