From 42b41f46e1f52749048f74636917d6866a5ed403 Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 09:04:17 +0900 Subject: [PATCH 01/21] WIP: Restructure libsparseir to move src/include/test to backend/cxx - Created backend/cxx directory - Moved src/, include/, and test/ to backend/cxx/ - Updated CMakeLists.txt paths for new directory structure - Updated test/CMakeLists.txt paths (cpp and fortran) - Build tests successful --- CMakeLists.txt | 32 +++++++++---------- .../cxx/include}/sparseir/augment.hpp | 0 .../cxx/include}/sparseir/basis.hpp | 0 .../cxx/include}/sparseir/basis_set.hpp | 0 .../cxx/include}/sparseir/dlr.hpp | 0 .../cxx/include}/sparseir/freq.hpp | 0 .../cxx/include}/sparseir/funcs.hpp | 0 .../cxx/include}/sparseir/gauss.hpp | 0 .../cxx/include}/sparseir/gemm.hpp | 0 .../include}/sparseir/impl/kernel_impl.ipp | 0 .../include}/sparseir/impl/linalg_impl.ipp | 0 .../cxx/include}/sparseir/impl/poly_impl.ipp | 0 .../cxx/include}/sparseir/impl/root_impl.ipp | 0 .../include}/sparseir/impl/sampling_impl.ipp | 0 .../include}/sparseir/impl/specfuncs_impl.ipp | 0 .../cxx/include}/sparseir/impl/svd_impl.ipp | 0 .../cxx/include}/sparseir/impl/sve_impl.ipp | 0 .../cxx/include}/sparseir/jacobi_svd.hpp | 0 .../cxx/include}/sparseir/kernel.hpp | 0 .../cxx/include}/sparseir/linalg.hpp | 0 .../cxx/include}/sparseir/poly.hpp | 0 .../cxx/include}/sparseir/root.hpp | 0 .../cxx/include}/sparseir/sampling.hpp | 0 .../cxx/include}/sparseir/sparseir-fwd.hpp | 0 .../cxx/include}/sparseir/sparseir.h | 0 .../cxx/include}/sparseir/sparseir.hpp | 0 .../cxx/include}/sparseir/specfuncs.hpp | 0 .../cxx/include}/sparseir/spir_status.h | 0 .../cxx/include}/sparseir/svd.hpp | 0 .../cxx/include}/sparseir/sve.hpp | 0 .../cxx/include}/sparseir/utils.hpp | 0 .../cxx/include}/sparseir/version.h | 0 {src => backend/cxx/src}/cinterface.cpp | 0 .../cxx/src}/cinterface_impl/_util.hpp | 0 .../cxx/src}/cinterface_impl/helper_funcs.hpp | 0 .../cxx/src}/cinterface_impl/helper_types.hpp | 0 .../cxx/src}/cinterface_impl/opaque_types.hpp | 0 {src => backend/cxx/src}/gemm.cpp | 0 {src => backend/cxx/src}/kernel.cpp | 0 {src => backend/cxx/src}/linalg.cpp | 0 {src => backend/cxx/src}/poly.cpp | 0 {src => backend/cxx/src}/root.cpp | 0 {src => backend/cxx/src}/specfuncs.cpp | 0 {src => backend/cxx/src}/svd.cpp | 0 {src => backend/cxx/src}/sve.cpp | 0 {src => backend/cxx/src}/utils.cpp | 0 {test => backend/cxx/test}/CMakeLists.txt | 0 {test => backend/cxx/test}/cpp/CMakeLists.txt | 2 +- {test => backend/cxx/test}/cpp/_utils.hpp | 0 {test => backend/cxx/test}/cpp/augment.cxx | 0 {test => backend/cxx/test}/cpp/basis.cxx | 0 .../cxx/test}/cpp/cinterface_core.cxx | 0 .../cxx/test}/cpp/cinterface_dlr.cxx | 0 .../cxx/test}/cpp/cinterface_integration.cxx | 0 {test => backend/cxx/test}/cpp/dlr.cxx | 0 {test => backend/cxx/test}/cpp/freq.cxx | 0 {test => backend/cxx/test}/cpp/gauss.cxx | 0 {test => backend/cxx/test}/cpp/gemm.cxx | 0 {test => backend/cxx/test}/cpp/kernel.cxx | 0 {test => backend/cxx/test}/cpp/linalg.cxx | 0 {test => backend/cxx/test}/cpp/poly.cxx | 0 {test => backend/cxx/test}/cpp/root.cxx | 0 {test => backend/cxx/test}/cpp/sampling.cxx | 0 {test => backend/cxx/test}/cpp/specfuncs.cxx | 0 {test => backend/cxx/test}/cpp/svd.cxx | 0 {test => backend/cxx/test}/cpp/sve.cxx | 0 {test => backend/cxx/test}/cpp/sve_cache.hpp | 0 {test => backend/cxx/test}/cpp/utils.cxx | 0 .../cxx/test}/fortran/CMakeLists.txt | 2 +- .../test}/fortran/generate_fortran_test.py | 0 {test => backend/cxx/test}/fortran/input.in | 0 .../cxx/test}/fortran/test_ext.f90 | 0 .../cxx/test}/fortran/test_integration.f90 | 0 73 files changed, 18 insertions(+), 18 deletions(-) rename {include => backend/cxx/include}/sparseir/augment.hpp (100%) rename {include => backend/cxx/include}/sparseir/basis.hpp (100%) rename {include => backend/cxx/include}/sparseir/basis_set.hpp (100%) rename {include => backend/cxx/include}/sparseir/dlr.hpp (100%) rename {include => backend/cxx/include}/sparseir/freq.hpp (100%) rename {include => backend/cxx/include}/sparseir/funcs.hpp (100%) rename {include => backend/cxx/include}/sparseir/gauss.hpp (100%) rename {include => backend/cxx/include}/sparseir/gemm.hpp (100%) rename {include => backend/cxx/include}/sparseir/impl/kernel_impl.ipp (100%) rename {include => backend/cxx/include}/sparseir/impl/linalg_impl.ipp (100%) rename {include => backend/cxx/include}/sparseir/impl/poly_impl.ipp (100%) rename {include => backend/cxx/include}/sparseir/impl/root_impl.ipp (100%) rename {include => backend/cxx/include}/sparseir/impl/sampling_impl.ipp (100%) rename {include => backend/cxx/include}/sparseir/impl/specfuncs_impl.ipp (100%) rename {include => backend/cxx/include}/sparseir/impl/svd_impl.ipp (100%) rename {include => backend/cxx/include}/sparseir/impl/sve_impl.ipp (100%) rename {include => backend/cxx/include}/sparseir/jacobi_svd.hpp (100%) rename {include => backend/cxx/include}/sparseir/kernel.hpp (100%) rename {include => backend/cxx/include}/sparseir/linalg.hpp (100%) rename {include => backend/cxx/include}/sparseir/poly.hpp (100%) rename {include => backend/cxx/include}/sparseir/root.hpp (100%) rename {include => backend/cxx/include}/sparseir/sampling.hpp (100%) rename {include => backend/cxx/include}/sparseir/sparseir-fwd.hpp (100%) rename {include => backend/cxx/include}/sparseir/sparseir.h (100%) rename {include => backend/cxx/include}/sparseir/sparseir.hpp (100%) rename {include => backend/cxx/include}/sparseir/specfuncs.hpp (100%) rename {include => backend/cxx/include}/sparseir/spir_status.h (100%) rename {include => backend/cxx/include}/sparseir/svd.hpp (100%) rename {include => backend/cxx/include}/sparseir/sve.hpp (100%) rename {include => backend/cxx/include}/sparseir/utils.hpp (100%) rename {include => backend/cxx/include}/sparseir/version.h (100%) rename {src => backend/cxx/src}/cinterface.cpp (100%) rename {src => backend/cxx/src}/cinterface_impl/_util.hpp (100%) rename {src => backend/cxx/src}/cinterface_impl/helper_funcs.hpp (100%) rename {src => backend/cxx/src}/cinterface_impl/helper_types.hpp (100%) rename {src => backend/cxx/src}/cinterface_impl/opaque_types.hpp (100%) rename {src => backend/cxx/src}/gemm.cpp (100%) rename {src => backend/cxx/src}/kernel.cpp (100%) rename {src => backend/cxx/src}/linalg.cpp (100%) rename {src => backend/cxx/src}/poly.cpp (100%) rename {src => backend/cxx/src}/root.cpp (100%) rename {src => backend/cxx/src}/specfuncs.cpp (100%) rename {src => backend/cxx/src}/svd.cpp (100%) rename {src => backend/cxx/src}/sve.cpp (100%) rename {src => backend/cxx/src}/utils.cpp (100%) rename {test => backend/cxx/test}/CMakeLists.txt (100%) rename {test => backend/cxx/test}/cpp/CMakeLists.txt (96%) rename {test => backend/cxx/test}/cpp/_utils.hpp (100%) rename {test => backend/cxx/test}/cpp/augment.cxx (100%) rename {test => backend/cxx/test}/cpp/basis.cxx (100%) rename {test => backend/cxx/test}/cpp/cinterface_core.cxx (100%) rename {test => backend/cxx/test}/cpp/cinterface_dlr.cxx (100%) rename {test => backend/cxx/test}/cpp/cinterface_integration.cxx (100%) rename {test => backend/cxx/test}/cpp/dlr.cxx (100%) rename {test => backend/cxx/test}/cpp/freq.cxx (100%) rename {test => backend/cxx/test}/cpp/gauss.cxx (100%) rename {test => backend/cxx/test}/cpp/gemm.cxx (100%) rename {test => backend/cxx/test}/cpp/kernel.cxx (100%) rename {test => backend/cxx/test}/cpp/linalg.cxx (100%) rename {test => backend/cxx/test}/cpp/poly.cxx (100%) rename {test => backend/cxx/test}/cpp/root.cxx (100%) rename {test => backend/cxx/test}/cpp/sampling.cxx (100%) rename {test => backend/cxx/test}/cpp/specfuncs.cxx (100%) rename {test => backend/cxx/test}/cpp/svd.cxx (100%) rename {test => backend/cxx/test}/cpp/sve.cxx (100%) rename {test => backend/cxx/test}/cpp/sve_cache.hpp (100%) rename {test => backend/cxx/test}/cpp/utils.cxx (100%) rename {test => backend/cxx/test}/fortran/CMakeLists.txt (92%) rename {test => backend/cxx/test}/fortran/generate_fortran_test.py (100%) rename {test => backend/cxx/test}/fortran/input.in (100%) rename {test => backend/cxx/test}/fortran/test_ext.f90 (100%) rename {test => backend/cxx/test}/fortran/test_integration.f90 (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2e9cf3ac..c6d31e10 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,7 +16,7 @@ include(GNUInstallDirs) if (NOT SPARSEIR_VERSION) include("VersionFromHeader") version_from_header(SPARSEIR_VERSION - HEADER "include/sparseir/version.h" + HEADER "backend/cxx/include/sparseir/version.h" MACROS SPARSEIR_VERSION_MAJOR SPARSEIR_VERSION_MINOR SPARSEIR_VERSION_PATCH ) message(STATUS "Extracted package version: ${SPARSEIR_VERSION}") @@ -156,16 +156,16 @@ endif() set(CMAKE_INSTALL_DEFAULT_COMPONENT_NAME sparseir) add_library(sparseir SHARED - src/utils.cpp - src/linalg.cpp - src/root.cpp - src/specfuncs.cpp - src/svd.cpp - src/sve.cpp - src/poly.cpp - src/kernel.cpp - src/gemm.cpp - src/cinterface.cpp + backend/cxx/src/utils.cpp + backend/cxx/src/linalg.cpp + backend/cxx/src/root.cpp + backend/cxx/src/specfuncs.cpp + backend/cxx/src/svd.cpp + backend/cxx/src/sve.cpp + backend/cxx/src/poly.cpp + backend/cxx/src/kernel.cpp + backend/cxx/src/gemm.cpp + backend/cxx/src/cinterface.cpp ) if(NOT MSVC) target_compile_options(sparseir PRIVATE -Wall -Wextra -pedantic) @@ -173,7 +173,7 @@ endif() target_include_directories(sparseir PUBLIC - $ + $ $ PRIVATE $ @@ -236,7 +236,7 @@ if (SPARSEIR_BUILD_TESTING) FetchContent_MakeAvailable(Catch2) # Add test directory - add_subdirectory("test") + add_subdirectory("backend/cxx/test") endif() # ------------------------------------- @@ -253,9 +253,9 @@ install(TARGETS sparseir # Install only necessary header files (C API) install(FILES - include/sparseir/sparseir.h - include/sparseir/version.h - include/sparseir/spir_status.h + backend/cxx/include/sparseir/sparseir.h + backend/cxx/include/sparseir/version.h + backend/cxx/include/sparseir/spir_status.h DESTINATION "${SPARSEIR_INSTALL_INCLUDEDIR}/sparseir" COMPONENT sparseir ) diff --git a/include/sparseir/augment.hpp b/backend/cxx/include/sparseir/augment.hpp similarity index 100% rename from include/sparseir/augment.hpp rename to backend/cxx/include/sparseir/augment.hpp diff --git a/include/sparseir/basis.hpp b/backend/cxx/include/sparseir/basis.hpp similarity index 100% rename from include/sparseir/basis.hpp rename to backend/cxx/include/sparseir/basis.hpp diff --git a/include/sparseir/basis_set.hpp b/backend/cxx/include/sparseir/basis_set.hpp similarity index 100% rename from include/sparseir/basis_set.hpp rename to backend/cxx/include/sparseir/basis_set.hpp diff --git a/include/sparseir/dlr.hpp b/backend/cxx/include/sparseir/dlr.hpp similarity index 100% rename from include/sparseir/dlr.hpp rename to backend/cxx/include/sparseir/dlr.hpp diff --git a/include/sparseir/freq.hpp b/backend/cxx/include/sparseir/freq.hpp similarity index 100% rename from include/sparseir/freq.hpp rename to backend/cxx/include/sparseir/freq.hpp diff --git a/include/sparseir/funcs.hpp b/backend/cxx/include/sparseir/funcs.hpp similarity index 100% rename from include/sparseir/funcs.hpp rename to backend/cxx/include/sparseir/funcs.hpp diff --git a/include/sparseir/gauss.hpp b/backend/cxx/include/sparseir/gauss.hpp similarity index 100% rename from include/sparseir/gauss.hpp rename to backend/cxx/include/sparseir/gauss.hpp diff --git a/include/sparseir/gemm.hpp b/backend/cxx/include/sparseir/gemm.hpp similarity index 100% rename from include/sparseir/gemm.hpp rename to backend/cxx/include/sparseir/gemm.hpp diff --git a/include/sparseir/impl/kernel_impl.ipp b/backend/cxx/include/sparseir/impl/kernel_impl.ipp similarity index 100% rename from include/sparseir/impl/kernel_impl.ipp rename to backend/cxx/include/sparseir/impl/kernel_impl.ipp diff --git a/include/sparseir/impl/linalg_impl.ipp b/backend/cxx/include/sparseir/impl/linalg_impl.ipp similarity index 100% rename from include/sparseir/impl/linalg_impl.ipp rename to backend/cxx/include/sparseir/impl/linalg_impl.ipp diff --git a/include/sparseir/impl/poly_impl.ipp b/backend/cxx/include/sparseir/impl/poly_impl.ipp similarity index 100% rename from include/sparseir/impl/poly_impl.ipp rename to backend/cxx/include/sparseir/impl/poly_impl.ipp diff --git a/include/sparseir/impl/root_impl.ipp b/backend/cxx/include/sparseir/impl/root_impl.ipp similarity index 100% rename from include/sparseir/impl/root_impl.ipp rename to backend/cxx/include/sparseir/impl/root_impl.ipp diff --git a/include/sparseir/impl/sampling_impl.ipp b/backend/cxx/include/sparseir/impl/sampling_impl.ipp similarity index 100% rename from include/sparseir/impl/sampling_impl.ipp rename to backend/cxx/include/sparseir/impl/sampling_impl.ipp diff --git a/include/sparseir/impl/specfuncs_impl.ipp b/backend/cxx/include/sparseir/impl/specfuncs_impl.ipp similarity index 100% rename from include/sparseir/impl/specfuncs_impl.ipp rename to backend/cxx/include/sparseir/impl/specfuncs_impl.ipp diff --git a/include/sparseir/impl/svd_impl.ipp b/backend/cxx/include/sparseir/impl/svd_impl.ipp similarity index 100% rename from include/sparseir/impl/svd_impl.ipp rename to backend/cxx/include/sparseir/impl/svd_impl.ipp diff --git a/include/sparseir/impl/sve_impl.ipp b/backend/cxx/include/sparseir/impl/sve_impl.ipp similarity index 100% rename from include/sparseir/impl/sve_impl.ipp rename to backend/cxx/include/sparseir/impl/sve_impl.ipp diff --git a/include/sparseir/jacobi_svd.hpp b/backend/cxx/include/sparseir/jacobi_svd.hpp similarity index 100% rename from include/sparseir/jacobi_svd.hpp rename to backend/cxx/include/sparseir/jacobi_svd.hpp diff --git a/include/sparseir/kernel.hpp b/backend/cxx/include/sparseir/kernel.hpp similarity index 100% rename from include/sparseir/kernel.hpp rename to backend/cxx/include/sparseir/kernel.hpp diff --git a/include/sparseir/linalg.hpp b/backend/cxx/include/sparseir/linalg.hpp similarity index 100% rename from include/sparseir/linalg.hpp rename to backend/cxx/include/sparseir/linalg.hpp diff --git a/include/sparseir/poly.hpp b/backend/cxx/include/sparseir/poly.hpp similarity index 100% rename from include/sparseir/poly.hpp rename to backend/cxx/include/sparseir/poly.hpp diff --git a/include/sparseir/root.hpp b/backend/cxx/include/sparseir/root.hpp similarity index 100% rename from include/sparseir/root.hpp rename to backend/cxx/include/sparseir/root.hpp diff --git a/include/sparseir/sampling.hpp b/backend/cxx/include/sparseir/sampling.hpp similarity index 100% rename from include/sparseir/sampling.hpp rename to backend/cxx/include/sparseir/sampling.hpp diff --git a/include/sparseir/sparseir-fwd.hpp b/backend/cxx/include/sparseir/sparseir-fwd.hpp similarity index 100% rename from include/sparseir/sparseir-fwd.hpp rename to backend/cxx/include/sparseir/sparseir-fwd.hpp diff --git a/include/sparseir/sparseir.h b/backend/cxx/include/sparseir/sparseir.h similarity index 100% rename from include/sparseir/sparseir.h rename to backend/cxx/include/sparseir/sparseir.h diff --git a/include/sparseir/sparseir.hpp b/backend/cxx/include/sparseir/sparseir.hpp similarity index 100% rename from include/sparseir/sparseir.hpp rename to backend/cxx/include/sparseir/sparseir.hpp diff --git a/include/sparseir/specfuncs.hpp b/backend/cxx/include/sparseir/specfuncs.hpp similarity index 100% rename from include/sparseir/specfuncs.hpp rename to backend/cxx/include/sparseir/specfuncs.hpp diff --git a/include/sparseir/spir_status.h b/backend/cxx/include/sparseir/spir_status.h similarity index 100% rename from include/sparseir/spir_status.h rename to backend/cxx/include/sparseir/spir_status.h diff --git a/include/sparseir/svd.hpp b/backend/cxx/include/sparseir/svd.hpp similarity index 100% rename from include/sparseir/svd.hpp rename to backend/cxx/include/sparseir/svd.hpp diff --git a/include/sparseir/sve.hpp b/backend/cxx/include/sparseir/sve.hpp similarity index 100% rename from include/sparseir/sve.hpp rename to backend/cxx/include/sparseir/sve.hpp diff --git a/include/sparseir/utils.hpp b/backend/cxx/include/sparseir/utils.hpp similarity index 100% rename from include/sparseir/utils.hpp rename to backend/cxx/include/sparseir/utils.hpp diff --git a/include/sparseir/version.h b/backend/cxx/include/sparseir/version.h similarity index 100% rename from include/sparseir/version.h rename to backend/cxx/include/sparseir/version.h diff --git a/src/cinterface.cpp b/backend/cxx/src/cinterface.cpp similarity index 100% rename from src/cinterface.cpp rename to backend/cxx/src/cinterface.cpp diff --git a/src/cinterface_impl/_util.hpp b/backend/cxx/src/cinterface_impl/_util.hpp similarity index 100% rename from src/cinterface_impl/_util.hpp rename to backend/cxx/src/cinterface_impl/_util.hpp diff --git a/src/cinterface_impl/helper_funcs.hpp b/backend/cxx/src/cinterface_impl/helper_funcs.hpp similarity index 100% rename from src/cinterface_impl/helper_funcs.hpp rename to backend/cxx/src/cinterface_impl/helper_funcs.hpp diff --git a/src/cinterface_impl/helper_types.hpp b/backend/cxx/src/cinterface_impl/helper_types.hpp similarity index 100% rename from src/cinterface_impl/helper_types.hpp rename to backend/cxx/src/cinterface_impl/helper_types.hpp diff --git a/src/cinterface_impl/opaque_types.hpp b/backend/cxx/src/cinterface_impl/opaque_types.hpp similarity index 100% rename from src/cinterface_impl/opaque_types.hpp rename to backend/cxx/src/cinterface_impl/opaque_types.hpp diff --git a/src/gemm.cpp b/backend/cxx/src/gemm.cpp similarity index 100% rename from src/gemm.cpp rename to backend/cxx/src/gemm.cpp diff --git a/src/kernel.cpp b/backend/cxx/src/kernel.cpp similarity index 100% rename from src/kernel.cpp rename to backend/cxx/src/kernel.cpp diff --git a/src/linalg.cpp b/backend/cxx/src/linalg.cpp similarity index 100% rename from src/linalg.cpp rename to backend/cxx/src/linalg.cpp diff --git a/src/poly.cpp b/backend/cxx/src/poly.cpp similarity index 100% rename from src/poly.cpp rename to backend/cxx/src/poly.cpp diff --git a/src/root.cpp b/backend/cxx/src/root.cpp similarity index 100% rename from src/root.cpp rename to backend/cxx/src/root.cpp diff --git a/src/specfuncs.cpp b/backend/cxx/src/specfuncs.cpp similarity index 100% rename from src/specfuncs.cpp rename to backend/cxx/src/specfuncs.cpp diff --git a/src/svd.cpp b/backend/cxx/src/svd.cpp similarity index 100% rename from src/svd.cpp rename to backend/cxx/src/svd.cpp diff --git a/src/sve.cpp b/backend/cxx/src/sve.cpp similarity index 100% rename from src/sve.cpp rename to backend/cxx/src/sve.cpp diff --git a/src/utils.cpp b/backend/cxx/src/utils.cpp similarity index 100% rename from src/utils.cpp rename to backend/cxx/src/utils.cpp diff --git a/test/CMakeLists.txt b/backend/cxx/test/CMakeLists.txt similarity index 100% rename from test/CMakeLists.txt rename to backend/cxx/test/CMakeLists.txt diff --git a/test/cpp/CMakeLists.txt b/backend/cxx/test/cpp/CMakeLists.txt similarity index 96% rename from test/cpp/CMakeLists.txt rename to backend/cxx/test/cpp/CMakeLists.txt index 35263eb1..45b22fd6 100644 --- a/test/cpp/CMakeLists.txt +++ b/backend/cxx/test/cpp/CMakeLists.txt @@ -3,7 +3,7 @@ set(CMAKE_CXX_STANDARD 11) # Set include paths for tests set(TEST_INCLUDE_DIRS ${EIGEN3_INCLUDE_DIR} - ${CMAKE_CURRENT_SOURCE_DIR}/../../include + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../backend/cxx/include ${xprec_SOURCE_DIR}/include ) diff --git a/test/cpp/_utils.hpp b/backend/cxx/test/cpp/_utils.hpp similarity index 100% rename from test/cpp/_utils.hpp rename to backend/cxx/test/cpp/_utils.hpp diff --git a/test/cpp/augment.cxx b/backend/cxx/test/cpp/augment.cxx similarity index 100% rename from test/cpp/augment.cxx rename to backend/cxx/test/cpp/augment.cxx diff --git a/test/cpp/basis.cxx b/backend/cxx/test/cpp/basis.cxx similarity index 100% rename from test/cpp/basis.cxx rename to backend/cxx/test/cpp/basis.cxx diff --git a/test/cpp/cinterface_core.cxx b/backend/cxx/test/cpp/cinterface_core.cxx similarity index 100% rename from test/cpp/cinterface_core.cxx rename to backend/cxx/test/cpp/cinterface_core.cxx diff --git a/test/cpp/cinterface_dlr.cxx b/backend/cxx/test/cpp/cinterface_dlr.cxx similarity index 100% rename from test/cpp/cinterface_dlr.cxx rename to backend/cxx/test/cpp/cinterface_dlr.cxx diff --git a/test/cpp/cinterface_integration.cxx b/backend/cxx/test/cpp/cinterface_integration.cxx similarity index 100% rename from test/cpp/cinterface_integration.cxx rename to backend/cxx/test/cpp/cinterface_integration.cxx diff --git a/test/cpp/dlr.cxx b/backend/cxx/test/cpp/dlr.cxx similarity index 100% rename from test/cpp/dlr.cxx rename to backend/cxx/test/cpp/dlr.cxx diff --git a/test/cpp/freq.cxx b/backend/cxx/test/cpp/freq.cxx similarity index 100% rename from test/cpp/freq.cxx rename to backend/cxx/test/cpp/freq.cxx diff --git a/test/cpp/gauss.cxx b/backend/cxx/test/cpp/gauss.cxx similarity index 100% rename from test/cpp/gauss.cxx rename to backend/cxx/test/cpp/gauss.cxx diff --git a/test/cpp/gemm.cxx b/backend/cxx/test/cpp/gemm.cxx similarity index 100% rename from test/cpp/gemm.cxx rename to backend/cxx/test/cpp/gemm.cxx diff --git a/test/cpp/kernel.cxx b/backend/cxx/test/cpp/kernel.cxx similarity index 100% rename from test/cpp/kernel.cxx rename to backend/cxx/test/cpp/kernel.cxx diff --git a/test/cpp/linalg.cxx b/backend/cxx/test/cpp/linalg.cxx similarity index 100% rename from test/cpp/linalg.cxx rename to backend/cxx/test/cpp/linalg.cxx diff --git a/test/cpp/poly.cxx b/backend/cxx/test/cpp/poly.cxx similarity index 100% rename from test/cpp/poly.cxx rename to backend/cxx/test/cpp/poly.cxx diff --git a/test/cpp/root.cxx b/backend/cxx/test/cpp/root.cxx similarity index 100% rename from test/cpp/root.cxx rename to backend/cxx/test/cpp/root.cxx diff --git a/test/cpp/sampling.cxx b/backend/cxx/test/cpp/sampling.cxx similarity index 100% rename from test/cpp/sampling.cxx rename to backend/cxx/test/cpp/sampling.cxx diff --git a/test/cpp/specfuncs.cxx b/backend/cxx/test/cpp/specfuncs.cxx similarity index 100% rename from test/cpp/specfuncs.cxx rename to backend/cxx/test/cpp/specfuncs.cxx diff --git a/test/cpp/svd.cxx b/backend/cxx/test/cpp/svd.cxx similarity index 100% rename from test/cpp/svd.cxx rename to backend/cxx/test/cpp/svd.cxx diff --git a/test/cpp/sve.cxx b/backend/cxx/test/cpp/sve.cxx similarity index 100% rename from test/cpp/sve.cxx rename to backend/cxx/test/cpp/sve.cxx diff --git a/test/cpp/sve_cache.hpp b/backend/cxx/test/cpp/sve_cache.hpp similarity index 100% rename from test/cpp/sve_cache.hpp rename to backend/cxx/test/cpp/sve_cache.hpp diff --git a/test/cpp/utils.cxx b/backend/cxx/test/cpp/utils.cxx similarity index 100% rename from test/cpp/utils.cxx rename to backend/cxx/test/cpp/utils.cxx diff --git a/test/fortran/CMakeLists.txt b/backend/cxx/test/fortran/CMakeLists.txt similarity index 92% rename from test/fortran/CMakeLists.txt rename to backend/cxx/test/fortran/CMakeLists.txt index 12e67202..c1227491 100644 --- a/test/fortran/CMakeLists.txt +++ b/backend/cxx/test/fortran/CMakeLists.txt @@ -15,7 +15,7 @@ get_target_property(SPARSEIR_FORTRAN_MODDIR sparseir_fortran Fortran_MODULE_DIRE foreach(TEST ${TEST_FILES}) target_include_directories(${TEST} PRIVATE ${SPARSEIR_FORTRAN_MODDIR} - ${CMAKE_CURRENT_SOURCE_DIR}/../../fortran + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../fortran ) target_link_libraries(${TEST} PRIVATE sparseir_fortran sparseir) diff --git a/test/fortran/generate_fortran_test.py b/backend/cxx/test/fortran/generate_fortran_test.py similarity index 100% rename from test/fortran/generate_fortran_test.py rename to backend/cxx/test/fortran/generate_fortran_test.py diff --git a/test/fortran/input.in b/backend/cxx/test/fortran/input.in similarity index 100% rename from test/fortran/input.in rename to backend/cxx/test/fortran/input.in diff --git a/test/fortran/test_ext.f90 b/backend/cxx/test/fortran/test_ext.f90 similarity index 100% rename from test/fortran/test_ext.f90 rename to backend/cxx/test/fortran/test_ext.f90 diff --git a/test/fortran/test_integration.f90 b/backend/cxx/test/fortran/test_integration.f90 similarity index 100% rename from test/fortran/test_integration.f90 rename to backend/cxx/test/fortran/test_integration.f90 From 8827fe83c5ddb60963c5d202beddf46037b78321 Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 09:11:42 +0900 Subject: [PATCH 02/21] WIP: Move Fortran tests from backend/cxx/test/fortran to fortran/test - Created fortran/test/ directory - Moved Fortran test files from backend/cxx/test/fortran to fortran/test/ - Updated CMakeLists.txt to add fortran/test as subdirectory - Updated backend/cxx/test/CMakeLists.txt to remove fortran reference - Fixed fortran/test/CMakeLists.txt relative paths - All tests passing (20/20: 18 C++ + 2 Fortran) --- CMakeLists.txt | 5 +++++ backend/cxx/test/CMakeLists.txt | 4 ---- {backend/cxx/test/fortran => fortran/test}/CMakeLists.txt | 2 +- .../test/fortran => fortran/test}/generate_fortran_test.py | 0 {backend/cxx/test/fortran => fortran/test}/input.in | 0 {backend/cxx/test/fortran => fortran/test}/test_ext.f90 | 0 .../cxx/test/fortran => fortran/test}/test_integration.f90 | 0 7 files changed, 6 insertions(+), 5 deletions(-) rename {backend/cxx/test/fortran => fortran/test}/CMakeLists.txt (92%) rename {backend/cxx/test/fortran => fortran/test}/generate_fortran_test.py (100%) rename {backend/cxx/test/fortran => fortran/test}/input.in (100%) rename {backend/cxx/test/fortran => fortran/test}/test_ext.f90 (100%) rename {backend/cxx/test/fortran => fortran/test}/test_integration.f90 (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index c6d31e10..1d707759 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -237,6 +237,11 @@ if (SPARSEIR_BUILD_TESTING) # Add test directory add_subdirectory("backend/cxx/test") + + # Add Fortran test directory if Fortran is enabled + if(SPARSEIR_BUILD_FORTRAN) + add_subdirectory("fortran/test") + endif() endif() # ------------------------------------- diff --git a/backend/cxx/test/CMakeLists.txt b/backend/cxx/test/CMakeLists.txt index c76e0e45..dc0ad1f7 100644 --- a/backend/cxx/test/CMakeLists.txt +++ b/backend/cxx/test/CMakeLists.txt @@ -1,6 +1,2 @@ add_subdirectory(cpp) -if (SPARSEIR_BUILD_FORTRAN) - add_subdirectory(fortran) -endif() - diff --git a/backend/cxx/test/fortran/CMakeLists.txt b/fortran/test/CMakeLists.txt similarity index 92% rename from backend/cxx/test/fortran/CMakeLists.txt rename to fortran/test/CMakeLists.txt index c1227491..ffdf7939 100644 --- a/backend/cxx/test/fortran/CMakeLists.txt +++ b/fortran/test/CMakeLists.txt @@ -15,7 +15,7 @@ get_target_property(SPARSEIR_FORTRAN_MODDIR sparseir_fortran Fortran_MODULE_DIRE foreach(TEST ${TEST_FILES}) target_include_directories(${TEST} PRIVATE ${SPARSEIR_FORTRAN_MODDIR} - ${CMAKE_CURRENT_SOURCE_DIR}/../../../../fortran + ${CMAKE_CURRENT_SOURCE_DIR}/.. ) target_link_libraries(${TEST} PRIVATE sparseir_fortran sparseir) diff --git a/backend/cxx/test/fortran/generate_fortran_test.py b/fortran/test/generate_fortran_test.py similarity index 100% rename from backend/cxx/test/fortran/generate_fortran_test.py rename to fortran/test/generate_fortran_test.py diff --git a/backend/cxx/test/fortran/input.in b/fortran/test/input.in similarity index 100% rename from backend/cxx/test/fortran/input.in rename to fortran/test/input.in diff --git a/backend/cxx/test/fortran/test_ext.f90 b/fortran/test/test_ext.f90 similarity index 100% rename from backend/cxx/test/fortran/test_ext.f90 rename to fortran/test/test_ext.f90 diff --git a/backend/cxx/test/fortran/test_integration.f90 b/fortran/test/test_integration.f90 similarity index 100% rename from backend/cxx/test/fortran/test_integration.f90 rename to fortran/test/test_integration.f90 From 11d30e70f04fad32c7807a863b81258f2843557d Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 09:12:59 +0900 Subject: [PATCH 03/21] WIP: Flatten backend/cxx/test/cpp to backend/cxx/test - Moved all test files from backend/cxx/test/cpp/ to backend/cxx/test/ - Updated CMakeLists.txt include paths - Flattened directory structure for simplicity --- backend/cxx/test/CMakeLists.txt | 59 ++++++++++++++++++- backend/cxx/test/{cpp => }/_utils.hpp | 0 backend/cxx/test/{cpp => }/augment.cxx | 0 backend/cxx/test/{cpp => }/basis.cxx | 0 .../cxx/test/{cpp => }/cinterface_core.cxx | 0 backend/cxx/test/{cpp => }/cinterface_dlr.cxx | 0 .../test/{cpp => }/cinterface_integration.cxx | 0 backend/cxx/test/cpp/CMakeLists.txt | 59 ------------------- backend/cxx/test/{cpp => }/dlr.cxx | 0 backend/cxx/test/{cpp => }/freq.cxx | 0 backend/cxx/test/{cpp => }/gauss.cxx | 0 backend/cxx/test/{cpp => }/gemm.cxx | 0 backend/cxx/test/{cpp => }/kernel.cxx | 0 backend/cxx/test/{cpp => }/linalg.cxx | 0 backend/cxx/test/{cpp => }/poly.cxx | 0 backend/cxx/test/{cpp => }/root.cxx | 0 backend/cxx/test/{cpp => }/sampling.cxx | 0 backend/cxx/test/{cpp => }/specfuncs.cxx | 0 backend/cxx/test/{cpp => }/svd.cxx | 0 backend/cxx/test/{cpp => }/sve.cxx | 0 backend/cxx/test/{cpp => }/sve_cache.hpp | 0 backend/cxx/test/{cpp => }/utils.cxx | 0 22 files changed, 58 insertions(+), 60 deletions(-) rename backend/cxx/test/{cpp => }/_utils.hpp (100%) rename backend/cxx/test/{cpp => }/augment.cxx (100%) rename backend/cxx/test/{cpp => }/basis.cxx (100%) rename backend/cxx/test/{cpp => }/cinterface_core.cxx (100%) rename backend/cxx/test/{cpp => }/cinterface_dlr.cxx (100%) rename backend/cxx/test/{cpp => }/cinterface_integration.cxx (100%) delete mode 100644 backend/cxx/test/cpp/CMakeLists.txt rename backend/cxx/test/{cpp => }/dlr.cxx (100%) rename backend/cxx/test/{cpp => }/freq.cxx (100%) rename backend/cxx/test/{cpp => }/gauss.cxx (100%) rename backend/cxx/test/{cpp => }/gemm.cxx (100%) rename backend/cxx/test/{cpp => }/kernel.cxx (100%) rename backend/cxx/test/{cpp => }/linalg.cxx (100%) rename backend/cxx/test/{cpp => }/poly.cxx (100%) rename backend/cxx/test/{cpp => }/root.cxx (100%) rename backend/cxx/test/{cpp => }/sampling.cxx (100%) rename backend/cxx/test/{cpp => }/specfuncs.cxx (100%) rename backend/cxx/test/{cpp => }/svd.cxx (100%) rename backend/cxx/test/{cpp => }/sve.cxx (100%) rename backend/cxx/test/{cpp => }/sve_cache.hpp (100%) rename backend/cxx/test/{cpp => }/utils.cxx (100%) diff --git a/backend/cxx/test/CMakeLists.txt b/backend/cxx/test/CMakeLists.txt index dc0ad1f7..35263eb1 100644 --- a/backend/cxx/test/CMakeLists.txt +++ b/backend/cxx/test/CMakeLists.txt @@ -1,2 +1,59 @@ -add_subdirectory(cpp) +set(CMAKE_CXX_STANDARD 11) +# Set include paths for tests +set(TEST_INCLUDE_DIRS + ${EIGEN3_INCLUDE_DIR} + ${CMAKE_CURRENT_SOURCE_DIR}/../../include + ${xprec_SOURCE_DIR}/include +) + +# List of all test files +file(GLOB TEST_FILES "*.cxx") + +# Create separate test executable for each .cxx file +foreach(TEST_FILE ${TEST_FILES}) + # Get the base name without extension + get_filename_component(TEST_NAME ${TEST_FILE} NAME_WE) + + # Create executable for this test + add_executable(${TEST_NAME} ${TEST_FILE}) + + # Link libraries + target_link_libraries(${TEST_NAME} PRIVATE Catch2::Catch2WithMain) + target_link_libraries(${TEST_NAME} PRIVATE SparseIR::sparseir) + target_link_libraries(${TEST_NAME} PRIVATE Eigen3::Eigen Threads::Threads) + + # Add BLAS/LAPACK linking for tests if enabled + if(SPARSEIR_USE_BLAS AND BLAS_FOUND) + target_link_libraries(${TEST_NAME} PRIVATE ${BLAS_LIBRARIES}) + endif() + + if(SPARSEIR_USE_LAPACKE AND LAPACK_FOUND) + target_link_libraries(${TEST_NAME} PRIVATE ${LAPACK_LIBRARIES}) + endif() + + # macOS-specific configuration for tests + if(APPLE AND ACCELERATE_FRAMEWORK) + target_link_libraries(${TEST_NAME} PRIVATE ${ACCELERATE_FRAMEWORK}) + endif() + + # Add include paths + if(DEFINED TEST_INCLUDE_DIRS) + target_include_directories(${TEST_NAME} PRIVATE ${TEST_INCLUDE_DIRS}) + endif() + + # Also explicitly include Eigen3 and XPrec include directories + if(DEFINED EIGEN3_INCLUDE_DIR) + target_include_directories(${TEST_NAME} PRIVATE ${EIGEN3_INCLUDE_DIR}) + endif() + + if(TARGET XPrec::xprec) + get_target_property(XPREC_INCLUDE_DIRS XPrec::xprec INTERFACE_INCLUDE_DIRECTORIES) + if(XPREC_INCLUDE_DIRS) + target_include_directories(${TEST_NAME} PRIVATE ${XPREC_INCLUDE_DIRS}) + endif() + endif() + + # Add test + add_test(NAME ${TEST_NAME} COMMAND ${TEST_NAME} -d yes) +endforeach() \ No newline at end of file diff --git a/backend/cxx/test/cpp/_utils.hpp b/backend/cxx/test/_utils.hpp similarity index 100% rename from backend/cxx/test/cpp/_utils.hpp rename to backend/cxx/test/_utils.hpp diff --git a/backend/cxx/test/cpp/augment.cxx b/backend/cxx/test/augment.cxx similarity index 100% rename from backend/cxx/test/cpp/augment.cxx rename to backend/cxx/test/augment.cxx diff --git a/backend/cxx/test/cpp/basis.cxx b/backend/cxx/test/basis.cxx similarity index 100% rename from backend/cxx/test/cpp/basis.cxx rename to backend/cxx/test/basis.cxx diff --git a/backend/cxx/test/cpp/cinterface_core.cxx b/backend/cxx/test/cinterface_core.cxx similarity index 100% rename from backend/cxx/test/cpp/cinterface_core.cxx rename to backend/cxx/test/cinterface_core.cxx diff --git a/backend/cxx/test/cpp/cinterface_dlr.cxx b/backend/cxx/test/cinterface_dlr.cxx similarity index 100% rename from backend/cxx/test/cpp/cinterface_dlr.cxx rename to backend/cxx/test/cinterface_dlr.cxx diff --git a/backend/cxx/test/cpp/cinterface_integration.cxx b/backend/cxx/test/cinterface_integration.cxx similarity index 100% rename from backend/cxx/test/cpp/cinterface_integration.cxx rename to backend/cxx/test/cinterface_integration.cxx diff --git a/backend/cxx/test/cpp/CMakeLists.txt b/backend/cxx/test/cpp/CMakeLists.txt deleted file mode 100644 index 45b22fd6..00000000 --- a/backend/cxx/test/cpp/CMakeLists.txt +++ /dev/null @@ -1,59 +0,0 @@ -set(CMAKE_CXX_STANDARD 11) - -# Set include paths for tests -set(TEST_INCLUDE_DIRS - ${EIGEN3_INCLUDE_DIR} - ${CMAKE_CURRENT_SOURCE_DIR}/../../../../backend/cxx/include - ${xprec_SOURCE_DIR}/include -) - -# List of all test files -file(GLOB TEST_FILES "*.cxx") - -# Create separate test executable for each .cxx file -foreach(TEST_FILE ${TEST_FILES}) - # Get the base name without extension - get_filename_component(TEST_NAME ${TEST_FILE} NAME_WE) - - # Create executable for this test - add_executable(${TEST_NAME} ${TEST_FILE}) - - # Link libraries - target_link_libraries(${TEST_NAME} PRIVATE Catch2::Catch2WithMain) - target_link_libraries(${TEST_NAME} PRIVATE SparseIR::sparseir) - target_link_libraries(${TEST_NAME} PRIVATE Eigen3::Eigen Threads::Threads) - - # Add BLAS/LAPACK linking for tests if enabled - if(SPARSEIR_USE_BLAS AND BLAS_FOUND) - target_link_libraries(${TEST_NAME} PRIVATE ${BLAS_LIBRARIES}) - endif() - - if(SPARSEIR_USE_LAPACKE AND LAPACK_FOUND) - target_link_libraries(${TEST_NAME} PRIVATE ${LAPACK_LIBRARIES}) - endif() - - # macOS-specific configuration for tests - if(APPLE AND ACCELERATE_FRAMEWORK) - target_link_libraries(${TEST_NAME} PRIVATE ${ACCELERATE_FRAMEWORK}) - endif() - - # Add include paths - if(DEFINED TEST_INCLUDE_DIRS) - target_include_directories(${TEST_NAME} PRIVATE ${TEST_INCLUDE_DIRS}) - endif() - - # Also explicitly include Eigen3 and XPrec include directories - if(DEFINED EIGEN3_INCLUDE_DIR) - target_include_directories(${TEST_NAME} PRIVATE ${EIGEN3_INCLUDE_DIR}) - endif() - - if(TARGET XPrec::xprec) - get_target_property(XPREC_INCLUDE_DIRS XPrec::xprec INTERFACE_INCLUDE_DIRECTORIES) - if(XPREC_INCLUDE_DIRS) - target_include_directories(${TEST_NAME} PRIVATE ${XPREC_INCLUDE_DIRS}) - endif() - endif() - - # Add test - add_test(NAME ${TEST_NAME} COMMAND ${TEST_NAME} -d yes) -endforeach() \ No newline at end of file diff --git a/backend/cxx/test/cpp/dlr.cxx b/backend/cxx/test/dlr.cxx similarity index 100% rename from backend/cxx/test/cpp/dlr.cxx rename to backend/cxx/test/dlr.cxx diff --git a/backend/cxx/test/cpp/freq.cxx b/backend/cxx/test/freq.cxx similarity index 100% rename from backend/cxx/test/cpp/freq.cxx rename to backend/cxx/test/freq.cxx diff --git a/backend/cxx/test/cpp/gauss.cxx b/backend/cxx/test/gauss.cxx similarity index 100% rename from backend/cxx/test/cpp/gauss.cxx rename to backend/cxx/test/gauss.cxx diff --git a/backend/cxx/test/cpp/gemm.cxx b/backend/cxx/test/gemm.cxx similarity index 100% rename from backend/cxx/test/cpp/gemm.cxx rename to backend/cxx/test/gemm.cxx diff --git a/backend/cxx/test/cpp/kernel.cxx b/backend/cxx/test/kernel.cxx similarity index 100% rename from backend/cxx/test/cpp/kernel.cxx rename to backend/cxx/test/kernel.cxx diff --git a/backend/cxx/test/cpp/linalg.cxx b/backend/cxx/test/linalg.cxx similarity index 100% rename from backend/cxx/test/cpp/linalg.cxx rename to backend/cxx/test/linalg.cxx diff --git a/backend/cxx/test/cpp/poly.cxx b/backend/cxx/test/poly.cxx similarity index 100% rename from backend/cxx/test/cpp/poly.cxx rename to backend/cxx/test/poly.cxx diff --git a/backend/cxx/test/cpp/root.cxx b/backend/cxx/test/root.cxx similarity index 100% rename from backend/cxx/test/cpp/root.cxx rename to backend/cxx/test/root.cxx diff --git a/backend/cxx/test/cpp/sampling.cxx b/backend/cxx/test/sampling.cxx similarity index 100% rename from backend/cxx/test/cpp/sampling.cxx rename to backend/cxx/test/sampling.cxx diff --git a/backend/cxx/test/cpp/specfuncs.cxx b/backend/cxx/test/specfuncs.cxx similarity index 100% rename from backend/cxx/test/cpp/specfuncs.cxx rename to backend/cxx/test/specfuncs.cxx diff --git a/backend/cxx/test/cpp/svd.cxx b/backend/cxx/test/svd.cxx similarity index 100% rename from backend/cxx/test/cpp/svd.cxx rename to backend/cxx/test/svd.cxx diff --git a/backend/cxx/test/cpp/sve.cxx b/backend/cxx/test/sve.cxx similarity index 100% rename from backend/cxx/test/cpp/sve.cxx rename to backend/cxx/test/sve.cxx diff --git a/backend/cxx/test/cpp/sve_cache.hpp b/backend/cxx/test/sve_cache.hpp similarity index 100% rename from backend/cxx/test/cpp/sve_cache.hpp rename to backend/cxx/test/sve_cache.hpp diff --git a/backend/cxx/test/cpp/utils.cxx b/backend/cxx/test/utils.cxx similarity index 100% rename from backend/cxx/test/cpp/utils.cxx rename to backend/cxx/test/utils.cxx From 3f1f25b8dafb44949111f51476c317c25cbcef2c Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 09:20:49 +0900 Subject: [PATCH 04/21] WIP: Move cinterface_integration.cxx to backend/test and remove C++ API dependencies - Created backend/test/ directory for C-API only tests - Moved cinterface_integration.cxx from backend/cxx/test to backend/test - Removed sparseir.hpp dependency by: - Replacing sparseir::movedim() with local movedim_local() implementation - Removing template parameters for sparseir::Bosonic/Fermionic, sparseir::LogisticKernel - Making kernel and stat runtime parameters instead of template parameters - Added backend/test/CMakeLists.txt for C-API test configuration - Updated main CMakeLists.txt to add backend/test subdirectory --- CMakeLists.txt | 3 +- backend/test/CMakeLists.txt | 29 ++++ backend/test/_utils.hpp | 139 ++++++++++++++++++ .../{cxx => }/test/cinterface_integration.cxx | 115 +++++++++------ 4 files changed, 237 insertions(+), 49 deletions(-) create mode 100644 backend/test/CMakeLists.txt create mode 100644 backend/test/_utils.hpp rename backend/{cxx => }/test/cinterface_integration.cxx (91%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1d707759..52fb81dc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -235,8 +235,9 @@ if (SPARSEIR_BUILD_TESTING) ) FetchContent_MakeAvailable(Catch2) - # Add test directory + # Add test directories add_subdirectory("backend/cxx/test") + add_subdirectory("backend/test") # Add Fortran test directory if Fortran is enabled if(SPARSEIR_BUILD_FORTRAN) diff --git a/backend/test/CMakeLists.txt b/backend/test/CMakeLists.txt new file mode 100644 index 00000000..70380c91 --- /dev/null +++ b/backend/test/CMakeLists.txt @@ -0,0 +1,29 @@ +set(CMAKE_CXX_STANDARD 11) + +# C-API integration test +add_executable(cinterface_integration cinterface_integration.cxx) + +target_include_directories(cinterface_integration PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR}/../cxx/include + ${CMAKE_CURRENT_SOURCE_DIR}/../cxx/test + ${EIGEN3_INCLUDE_DIR} + ${xprec_SOURCE_DIR}/include +) + +target_link_libraries(cinterface_integration PRIVATE + Catch2::Catch2WithMain + SparseIR::sparseir + Eigen3::Eigen + Threads::Threads +) + +if(SPARSEIR_USE_BLAS AND BLAS_FOUND) + target_link_libraries(cinterface_integration PRIVATE ${BLAS_LIBRARIES}) +endif() + +if(APPLE AND ACCELERATE_FRAMEWORK) + target_link_libraries(cinterface_integration PRIVATE ${ACCELERATE_FRAMEWORK}) +endif() + +add_test(NAME cinterface_integration COMMAND cinterface_integration -d yes) + diff --git a/backend/test/_utils.hpp b/backend/test/_utils.hpp new file mode 100644 index 00000000..3c05deea --- /dev/null +++ b/backend/test/_utils.hpp @@ -0,0 +1,139 @@ +#pragma once + +#include + +#include // C interface + +inline spir_basis *_spir_basis_new(int32_t statistics, double beta, + double omega_max, double epsilon, + int32_t *status) +{ + int32_t status_; + spir_kernel *kernel = nullptr; + spir_sve_result *sve = nullptr; + spir_basis *basis = nullptr; + + try { + // Create a logistic kernel + kernel = spir_logistic_kernel_new(beta * omega_max, &status_); + if (status_ != SPIR_COMPUTATION_SUCCESS || kernel == nullptr) { + *status = status_; + return nullptr; + } + + // Create a pre-computed SVE result + double cutoff = -1.0; + int lmax = -1; + int n_gauss = -1; + int Twork = SPIR_TWORK_AUTO; + sve = spir_sve_result_new(kernel, epsilon, cutoff, lmax, n_gauss, Twork, &status_); + if (status_ != SPIR_COMPUTATION_SUCCESS || sve == nullptr) { + *status = status_; + spir_kernel_release(kernel); + return nullptr; + } + + int sve_size; + status_ = spir_sve_result_get_size(sve, &sve_size); + if (status_ != SPIR_COMPUTATION_SUCCESS) { + *status = status_; + spir_sve_result_release(sve); + spir_kernel_release(kernel); + return nullptr; + } + REQUIRE(sve_size > 0); + + // Create a fermionic finite temperature basis with pre-computed SVE result + int max_size = -1; + basis = spir_basis_new( + statistics, beta, omega_max, epsilon, kernel, sve, max_size, &status_); + if (status_ != SPIR_COMPUTATION_SUCCESS || basis == nullptr) { + *status = status_; + spir_sve_result_release(sve); + spir_kernel_release(kernel); + return nullptr; + } + + int basis_size; + status_ = spir_basis_get_size(basis, &basis_size); + if (status_ != SPIR_COMPUTATION_SUCCESS) { + *status = status_; + spir_basis_release(basis); + spir_sve_result_release(sve); + spir_kernel_release(kernel); + } + + std::vector svals(sve_size); + int svals_status = spir_sve_result_get_svals(sve, svals.data()); + REQUIRE(svals_status == SPIR_COMPUTATION_SUCCESS); + + REQUIRE(basis_size <= sve_size); + if (sve_size > basis_size) { + REQUIRE(svals[basis_size] / svals[0] <= epsilon); + } + + // Success case - clean up intermediate objects + spir_sve_result_release(sve); + spir_kernel_release(kernel); + *status = SPIR_COMPUTATION_SUCCESS; + return basis; + + } catch (...) { + // Clean up in case of exception + if (basis) spir_basis_release(basis); + if (sve) spir_sve_result_release(sve); + if (kernel) spir_kernel_release(kernel); + *status = SPIR_INTERNAL_ERROR; + return nullptr; + } +} + + +template +sparseir::PiecewiseLegendrePoly _singleout_poly_from_irtaufuncs(const sparseir::IRTauFuncsType &funcs, std::size_t idx) { + sparseir::IRTauFuncsType funcs_slice = funcs[idx]; + sparseir::PiecewiseLegendrePolyVector u0 = funcs_slice.get_obj(); + return u0[0]; +} + + +//template +//sparseir::IRBasisType _singleout_from_irtaufuncs(const sparseir::IRTauFuncsType &funcs, std::size_t idx) { + //sparseir::IRTauFuncsType funcs_slice = funcs[idx]; +//} + + + +template +bool compare_tensors_with_relative_error(const Eigen::Tensor &a, + const Eigen::Tensor &b, + double tol) +{ + // Convert to double tensor for absolute values + Eigen::Tensor diff(a.dimensions()); + Eigen::Tensor ref(a.dimensions()); + + // Compute absolute values element-wise + for (Eigen::Index i = 0; i < a.size(); ++i) { + diff.data()[i] = std::abs(a.data()[i] - b.data()[i]); + ref.data()[i] = std::abs(a.data()[i]); + } + + // Map tensors to matrices and use maxCoeff + Eigen::Map> diff_vec( + diff.data(), diff.size()); + Eigen::Map> ref_vec( + ref.data(), ref.size()); + + double max_diff = diff_vec.maxCoeff(); + double max_ref = ref_vec.maxCoeff(); + + // debug + if (max_diff > tol * max_ref) { + std::cout << "max_diff: " << max_diff << std::endl; + std::cout << "max_ref: " << max_ref << std::endl; + std::cout << "tol " << tol << std::endl; + } + + return max_diff <= tol * max_ref; +} diff --git a/backend/cxx/test/cinterface_integration.cxx b/backend/test/cinterface_integration.cxx similarity index 91% rename from backend/cxx/test/cinterface_integration.cxx rename to backend/test/cinterface_integration.cxx index e8a50098..177e15a3 100644 --- a/backend/cxx/test/cinterface_integration.cxx +++ b/backend/test/cinterface_integration.cxx @@ -14,22 +14,52 @@ #include #include -#include // C++ interface +// C++ interface removed for C-API test #include // C interface #include "_utils.hpp" -using Catch::Approx; +// Helper function for movedim (replacing sparseir::movedim) +template +Eigen::array getperm_local(int src, int dst) +{ + Eigen::array perm; + if (src == dst) { + for (int i = 0; i < N; ++i) { + perm[i] = i; + } + return perm; + } + + int pos = 0; + for (int i = 0; i < N; ++i) { + if (i == dst) { + perm[i] = src; + } else { + // Skip src position + if (pos == src) + ++pos; + perm[i] = pos; + ++pos; + } + } + return perm; +} -template -int get_stat() +template +Eigen::Tensor movedim_local(const Eigen::Tensor &arr, int src, int dst) { - if (std::is_same::value) { - return SPIR_STATISTICS_FERMIONIC; - } else { - return SPIR_STATISTICS_BOSONIC; + if (src == dst) { + return arr; } + auto perm = getperm_local(src, dst); + return arr.shuffle(perm); } +using Catch::Approx; + +// Remove template, use runtime parameter +// get_stat function is now replaced with direct parameter usage + // int get_order(Eigen::StorageOptions order) //{ // if (order == Eigen::ColMajor) { @@ -160,7 +190,7 @@ _transform_coefficients(const Eigen::Tensor &coeffs, // Move target dimension to the first position Eigen::Tensor coeffs_targetdim0 = - sparseir::movedim(coeffs, target_dim, 0); + movedim_local(coeffs, target_dim, 0); // Calculate the size of extra dimensions Eigen::Index extra_size = 1; @@ -188,7 +218,7 @@ _transform_coefficients(const Eigen::Tensor &coeffs, result_mat = basis_eval * coeffs_mat.template cast(); // Move dimensions back to original order - return sparseir::movedim(result, 0, target_dim); + return movedim_local(result, 0, target_dim); } template @@ -216,19 +246,15 @@ _evaluate_giw(const Eigen::Tensor &coeffs, return result; } -template -spir_kernel* _kernel_new(double lambda); - -template <> -spir_kernel* _kernel_new(double lambda) +// C-API function for kernel creation +spir_kernel* _kernel_new_logistic(double lambda) { int status; spir_kernel* kernel = spir_logistic_kernel_new(lambda, &status); return kernel; } -template <> -spir_kernel* _kernel_new(double lambda) +spir_kernel* _kernel_new_reg_bose(double lambda) { int status; spir_kernel* kernel = spir_reg_bose_kernel_new(lambda, &status); @@ -350,10 +376,11 @@ struct tensor_converter { /* T: double or std::complex, scalar type of coeffs */ -template +template void integration_test(double beta, double wmax, double epsilon, const std::vector &extra_dims, int target_dim, - const int order, double tol, bool positive_only) + const int order, int stat, spir_kernel* kernel, + double tol, bool positive_only) { // positive_only is not supported for complex numbers REQUIRE (!(std::is_same>::value && positive_only)); @@ -369,11 +396,7 @@ void integration_test(double beta, double wmax, double epsilon, REQUIRE(order == SPIR_ORDER_ROW_MAJOR); } - auto stat = get_stat(); int status; - - // IR basis - spir_kernel* kernel = _kernel_new(beta * wmax); spir_sve_result* sve = spir_sve_result_new(kernel, epsilon, -1.0, -1, -1, SPIR_TWORK_AUTO, &status); REQUIRE(status == SPIR_COMPUTATION_SUCCESS); REQUIRE(sve != nullptr); @@ -492,7 +515,7 @@ void integration_test(double beta, double wmax, double epsilon, // Move the axis for the poles from the first to the target dimension Eigen::Tensor coeffs = - sparseir::movedim(coeffs_targetdim0, 0, target_dim); + movedim_local(coeffs_targetdim0, 0, target_dim); // Convert DLR coefficients to IR coefficients Eigen::Tensor g_IR( @@ -674,19 +697,21 @@ TEST_CASE("Integration Test", "[cinterface]") { double tol = 10 * epsilon; + // Create kernel once for all tests + spir_kernel* kernel = _kernel_new_logistic(beta * wmax); + int stat = SPIR_STATISTICS_BOSONIC; + for (bool positive_only : {false, true}) { std::cout << "positive_only = " << positive_only << std::endl; { std::vector extra_dims = {}; std::cout << "Integration test for bosonic LogisticKernel" << std::endl; - integration_test(beta, wmax, epsilon, extra_dims, 0, - SPIR_ORDER_COLUMN_MAJOR, tol, positive_only); + integration_test(beta, wmax, epsilon, extra_dims, 0, + SPIR_ORDER_COLUMN_MAJOR, stat, kernel, tol, positive_only); if (!positive_only) { - integration_test, sparseir::Bosonic, sparseir::LogisticKernel, 1, - Eigen::ColMajor>(beta, wmax, epsilon, extra_dims, 0, - SPIR_ORDER_COLUMN_MAJOR, tol, positive_only); + integration_test, 1, Eigen::ColMajor>(beta, wmax, epsilon, extra_dims, 0, + SPIR_ORDER_COLUMN_MAJOR, stat, kernel, tol, positive_only); } } @@ -694,13 +719,11 @@ TEST_CASE("Integration Test", "[cinterface]") { int target_dim = 0; std::vector extra_dims = {}; std::cout << "Integration test for bosonic LogisticKernel, ColMajor, target_dim = " << target_dim << std::endl; - integration_test(beta, wmax, epsilon, extra_dims, target_dim, - SPIR_ORDER_COLUMN_MAJOR, tol, positive_only); + integration_test(beta, wmax, epsilon, extra_dims, target_dim, + SPIR_ORDER_COLUMN_MAJOR, stat, kernel, tol, positive_only); if (!positive_only) { - integration_test, sparseir::Bosonic, sparseir::LogisticKernel, 1, - Eigen::ColMajor>(beta, wmax, epsilon, extra_dims, target_dim, - SPIR_ORDER_COLUMN_MAJOR, tol, positive_only); + integration_test, 1, Eigen::ColMajor>(beta, wmax, epsilon, extra_dims, target_dim, + SPIR_ORDER_COLUMN_MAJOR, stat, kernel, tol, positive_only); } } @@ -708,13 +731,11 @@ TEST_CASE("Integration Test", "[cinterface]") { int target_dim = 0; std::vector extra_dims = {}; std::cout << "Integration test for bosonic LogisticKernel, RowMajor, target_dim = " << target_dim << std::endl; - integration_test(beta, wmax, epsilon, extra_dims, target_dim, - SPIR_ORDER_ROW_MAJOR, tol, positive_only); + integration_test(beta, wmax, epsilon, extra_dims, target_dim, + SPIR_ORDER_ROW_MAJOR, stat, kernel, tol, positive_only); if (!positive_only) { - integration_test, sparseir::Bosonic, sparseir::LogisticKernel, 1, - Eigen::RowMajor>(beta, wmax, epsilon, extra_dims, target_dim, - SPIR_ORDER_ROW_MAJOR, tol, positive_only); + integration_test, 1, Eigen::RowMajor>(beta, wmax, epsilon, extra_dims, target_dim, + SPIR_ORDER_ROW_MAJOR, stat, kernel, tol, positive_only); } } @@ -722,17 +743,15 @@ TEST_CASE("Integration Test", "[cinterface]") { for (int target_dim = 0; target_dim < 4; ++target_dim) { std::vector extra_dims = {2,3,4}; std::cout << "Integration test for bosonic LogisticKernel, ColMajor, target_dim = " << target_dim << std::endl; - integration_test(beta, wmax, epsilon, extra_dims, target_dim, - SPIR_ORDER_COLUMN_MAJOR, tol, positive_only); + integration_test(beta, wmax, epsilon, extra_dims, target_dim, + SPIR_ORDER_COLUMN_MAJOR, stat, kernel, tol, positive_only); } for (int target_dim = 0; target_dim < 4; ++target_dim) { std::vector extra_dims = {2,3,4}; std::cout << "Integration test for bosonic LogisticKernel, RowMajor, target_dim = " << target_dim << std::endl; - integration_test(beta, wmax, epsilon, extra_dims, target_dim, - SPIR_ORDER_ROW_MAJOR, tol, positive_only); + integration_test(beta, wmax, epsilon, extra_dims, target_dim, + SPIR_ORDER_ROW_MAJOR, stat, kernel, tol, positive_only); } } } From 10dcf02232c66da78167535307ccc31fc0c09c62 Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 09:22:45 +0900 Subject: [PATCH 05/21] Rename backend/test to backend/capi_test for C-API tests - Renamed backend/test to backend/capi_test for better clarity - Updated CMakeLists.txt to use backend/capi_test - Directory contains C-API only tests (no C++ dependencies) --- CMakeLists.txt | 2 +- backend/capi_test/CMakeLists.txt | 29 + backend/capi_test/_utils.hpp | 139 ++++ backend/capi_test/cinterface_integration.cxx | 757 +++++++++++++++++++ 4 files changed, 926 insertions(+), 1 deletion(-) create mode 100644 backend/capi_test/CMakeLists.txt create mode 100644 backend/capi_test/_utils.hpp create mode 100644 backend/capi_test/cinterface_integration.cxx diff --git a/CMakeLists.txt b/CMakeLists.txt index 52fb81dc..e89ec56d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -237,7 +237,7 @@ if (SPARSEIR_BUILD_TESTING) # Add test directories add_subdirectory("backend/cxx/test") - add_subdirectory("backend/test") + add_subdirectory("backend/capi_test") # Add Fortran test directory if Fortran is enabled if(SPARSEIR_BUILD_FORTRAN) diff --git a/backend/capi_test/CMakeLists.txt b/backend/capi_test/CMakeLists.txt new file mode 100644 index 00000000..70380c91 --- /dev/null +++ b/backend/capi_test/CMakeLists.txt @@ -0,0 +1,29 @@ +set(CMAKE_CXX_STANDARD 11) + +# C-API integration test +add_executable(cinterface_integration cinterface_integration.cxx) + +target_include_directories(cinterface_integration PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR}/../cxx/include + ${CMAKE_CURRENT_SOURCE_DIR}/../cxx/test + ${EIGEN3_INCLUDE_DIR} + ${xprec_SOURCE_DIR}/include +) + +target_link_libraries(cinterface_integration PRIVATE + Catch2::Catch2WithMain + SparseIR::sparseir + Eigen3::Eigen + Threads::Threads +) + +if(SPARSEIR_USE_BLAS AND BLAS_FOUND) + target_link_libraries(cinterface_integration PRIVATE ${BLAS_LIBRARIES}) +endif() + +if(APPLE AND ACCELERATE_FRAMEWORK) + target_link_libraries(cinterface_integration PRIVATE ${ACCELERATE_FRAMEWORK}) +endif() + +add_test(NAME cinterface_integration COMMAND cinterface_integration -d yes) + diff --git a/backend/capi_test/_utils.hpp b/backend/capi_test/_utils.hpp new file mode 100644 index 00000000..3c05deea --- /dev/null +++ b/backend/capi_test/_utils.hpp @@ -0,0 +1,139 @@ +#pragma once + +#include + +#include // C interface + +inline spir_basis *_spir_basis_new(int32_t statistics, double beta, + double omega_max, double epsilon, + int32_t *status) +{ + int32_t status_; + spir_kernel *kernel = nullptr; + spir_sve_result *sve = nullptr; + spir_basis *basis = nullptr; + + try { + // Create a logistic kernel + kernel = spir_logistic_kernel_new(beta * omega_max, &status_); + if (status_ != SPIR_COMPUTATION_SUCCESS || kernel == nullptr) { + *status = status_; + return nullptr; + } + + // Create a pre-computed SVE result + double cutoff = -1.0; + int lmax = -1; + int n_gauss = -1; + int Twork = SPIR_TWORK_AUTO; + sve = spir_sve_result_new(kernel, epsilon, cutoff, lmax, n_gauss, Twork, &status_); + if (status_ != SPIR_COMPUTATION_SUCCESS || sve == nullptr) { + *status = status_; + spir_kernel_release(kernel); + return nullptr; + } + + int sve_size; + status_ = spir_sve_result_get_size(sve, &sve_size); + if (status_ != SPIR_COMPUTATION_SUCCESS) { + *status = status_; + spir_sve_result_release(sve); + spir_kernel_release(kernel); + return nullptr; + } + REQUIRE(sve_size > 0); + + // Create a fermionic finite temperature basis with pre-computed SVE result + int max_size = -1; + basis = spir_basis_new( + statistics, beta, omega_max, epsilon, kernel, sve, max_size, &status_); + if (status_ != SPIR_COMPUTATION_SUCCESS || basis == nullptr) { + *status = status_; + spir_sve_result_release(sve); + spir_kernel_release(kernel); + return nullptr; + } + + int basis_size; + status_ = spir_basis_get_size(basis, &basis_size); + if (status_ != SPIR_COMPUTATION_SUCCESS) { + *status = status_; + spir_basis_release(basis); + spir_sve_result_release(sve); + spir_kernel_release(kernel); + } + + std::vector svals(sve_size); + int svals_status = spir_sve_result_get_svals(sve, svals.data()); + REQUIRE(svals_status == SPIR_COMPUTATION_SUCCESS); + + REQUIRE(basis_size <= sve_size); + if (sve_size > basis_size) { + REQUIRE(svals[basis_size] / svals[0] <= epsilon); + } + + // Success case - clean up intermediate objects + spir_sve_result_release(sve); + spir_kernel_release(kernel); + *status = SPIR_COMPUTATION_SUCCESS; + return basis; + + } catch (...) { + // Clean up in case of exception + if (basis) spir_basis_release(basis); + if (sve) spir_sve_result_release(sve); + if (kernel) spir_kernel_release(kernel); + *status = SPIR_INTERNAL_ERROR; + return nullptr; + } +} + + +template +sparseir::PiecewiseLegendrePoly _singleout_poly_from_irtaufuncs(const sparseir::IRTauFuncsType &funcs, std::size_t idx) { + sparseir::IRTauFuncsType funcs_slice = funcs[idx]; + sparseir::PiecewiseLegendrePolyVector u0 = funcs_slice.get_obj(); + return u0[0]; +} + + +//template +//sparseir::IRBasisType _singleout_from_irtaufuncs(const sparseir::IRTauFuncsType &funcs, std::size_t idx) { + //sparseir::IRTauFuncsType funcs_slice = funcs[idx]; +//} + + + +template +bool compare_tensors_with_relative_error(const Eigen::Tensor &a, + const Eigen::Tensor &b, + double tol) +{ + // Convert to double tensor for absolute values + Eigen::Tensor diff(a.dimensions()); + Eigen::Tensor ref(a.dimensions()); + + // Compute absolute values element-wise + for (Eigen::Index i = 0; i < a.size(); ++i) { + diff.data()[i] = std::abs(a.data()[i] - b.data()[i]); + ref.data()[i] = std::abs(a.data()[i]); + } + + // Map tensors to matrices and use maxCoeff + Eigen::Map> diff_vec( + diff.data(), diff.size()); + Eigen::Map> ref_vec( + ref.data(), ref.size()); + + double max_diff = diff_vec.maxCoeff(); + double max_ref = ref_vec.maxCoeff(); + + // debug + if (max_diff > tol * max_ref) { + std::cout << "max_diff: " << max_diff << std::endl; + std::cout << "max_ref: " << max_ref << std::endl; + std::cout << "tol " << tol << std::endl; + } + + return max_diff <= tol * max_ref; +} diff --git a/backend/capi_test/cinterface_integration.cxx b/backend/capi_test/cinterface_integration.cxx new file mode 100644 index 00000000..177e15a3 --- /dev/null +++ b/backend/capi_test/cinterface_integration.cxx @@ -0,0 +1,757 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +// C++ interface removed for C-API test +#include // C interface +#include "_utils.hpp" + +// Helper function for movedim (replacing sparseir::movedim) +template +Eigen::array getperm_local(int src, int dst) +{ + Eigen::array perm; + if (src == dst) { + for (int i = 0; i < N; ++i) { + perm[i] = i; + } + return perm; + } + + int pos = 0; + for (int i = 0; i < N; ++i) { + if (i == dst) { + perm[i] = src; + } else { + // Skip src position + if (pos == src) + ++pos; + perm[i] = pos; + ++pos; + } + } + return perm; +} + +template +Eigen::Tensor movedim_local(const Eigen::Tensor &arr, int src, int dst) +{ + if (src == dst) { + return arr; + } + auto perm = getperm_local(src, dst); + return arr.shuffle(perm); +} + +using Catch::Approx; + +// Remove template, use runtime parameter +// get_stat function is now replaced with direct parameter usage + +// int get_order(Eigen::StorageOptions order) +//{ +// if (order == Eigen::ColMajor) { +// return SPIR_ORDER_COLUMN_MAJOR; +//} else { +// return SPIR_ORDER_ROW_MAJOR; +//} +//} + +template +std::array _get_dims(int target_dim_size, + const std::vector &extra_dims, + int target_dim) +{ + std::array dims; + dims[target_dim] = static_cast(target_dim_size); + int pos = 0; + for (int i = 0; i < ndim; ++i) { + if (i == target_dim) { + continue; + } + dims[i] = static_cast(extra_dims[pos]); + ++pos; + } + return dims; +} + +TEST_CASE("Test _get_dims", "[cinterface]") +{ + std::vector extra_dims = {2,3,4}; + { + int target_dim = 0; + auto dims = _get_dims<4>(100, extra_dims, target_dim); + REQUIRE(dims[0] == 100); + REQUIRE(dims[1] == 2); + REQUIRE(dims[2] == 3); + REQUIRE(dims[3] == 4); + } + { + int target_dim = 1; + auto dims = _get_dims<4>(100, extra_dims, target_dim); + REQUIRE(dims[0] == 2); + REQUIRE(dims[1] == 100); + REQUIRE(dims[2] == 3); + REQUIRE(dims[3] == 4); + } + { + int target_dim = 2; + auto dims = _get_dims<4>(100, extra_dims, target_dim); + REQUIRE(dims[0] == 2); + REQUIRE(dims[1] == 3); + REQUIRE(dims[2] == 100); + REQUIRE(dims[3] == 4); + } + { + int target_dim = 3; + auto dims = _get_dims<4>(100, extra_dims, target_dim); + REQUIRE(dims[0] == 2); + REQUIRE(dims[1] == 3); + REQUIRE(dims[2] == 4); + REQUIRE(dims[3] == 100); + } +} + +// Helper function to evaluate basis functions at multiple points +template +Eigen::Matrix +_evaluate_basis_functions(const spir_funcs *u, const Eigen::VectorXd &x_values) +{ + int status; + int funcs_size; + status = spir_funcs_get_size(u, &funcs_size); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(funcs_size > 0); + + Eigen::Matrix u_eval_mat( + x_values.size(), funcs_size); + for (Eigen::Index i = 0; i < x_values.size(); ++i) { + Eigen::VectorXd u_eval(funcs_size); + REQUIRE(u_eval.data() != nullptr); + status = spir_funcs_eval(u, x_values(i), u_eval.data()); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + u_eval_mat.row(i) = u_eval.transpose().cast(); + } + return u_eval_mat; +} + +// Helper function to evaluate Matsubara basis functions at multiple frequencies +template +Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic, ORDER> +_evaluate_matsubara_basis_functions(const spir_funcs *uhat, + const Eigen::Vector &matsubara_indices) +{ + int status; + int funcs_size; + status = spir_funcs_get_size(uhat, &funcs_size); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + // Allocate output matrix with shape (nfreqs, nfuncs) + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic, ORDER> + uhat_eval_mat(matsubara_indices.size(), funcs_size); + + // Create a non-const copy of the Matsubara indices + std::vector freq_indices(matsubara_indices.data(), + matsubara_indices.data() + + matsubara_indices.size()); + + // Evaluate all frequencies at once + status = spir_funcs_batch_eval_matsu( + uhat, + ORDER == Eigen::ColMajor ? SPIR_ORDER_COLUMN_MAJOR + : SPIR_ORDER_ROW_MAJOR, + matsubara_indices.size(), freq_indices.data(), + reinterpret_cast(uhat_eval_mat.data())); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + return uhat_eval_mat; +} + +// Helper function to perform the tensor transformation +template +Eigen::Tensor +_transform_coefficients(const Eigen::Tensor &coeffs, + const BasisEvalType &basis_eval, int target_dim) +{ + using PromotedType = decltype(T() * typename BasisEvalType::Scalar()); + + // Move target dimension to the first position + Eigen::Tensor coeffs_targetdim0 = + movedim_local(coeffs, target_dim, 0); + + // Calculate the size of extra dimensions + Eigen::Index extra_size = 1; + for (int i = 1; i < ndim; ++i) { + extra_size *= coeffs_targetdim0.dimension(i); + } + + // Create result tensor with correct dimensions + std::array dims; + dims[0] = basis_eval.rows(); + for (int i = 1; i < ndim; ++i) { + dims[i] = coeffs_targetdim0.dimension(i); + } + Eigen::Tensor result(dims); + + // Map tensors to matrices for multiplication + Eigen::Map> + coeffs_mat(coeffs_targetdim0.data(), coeffs_targetdim0.dimension(0), + extra_size); + Eigen::Map> + result_mat(result.data(), basis_eval.rows(), extra_size); + + // Perform matrix multiplication with proper type promotion + result_mat = basis_eval * coeffs_mat.template cast(); + + // Move dimensions back to original order + return movedim_local(result, 0, target_dim); +} + +template +Eigen::Tensor +_evaluate_gtau(const Eigen::Tensor &coeffs, const spir_funcs *u, + int target_dim, const Eigen::VectorXd &x_values) +{ + auto u_eval_mat = _evaluate_basis_functions(u, x_values); + return _transform_coefficients(coeffs, u_eval_mat, + target_dim); +} + +template +Eigen::Tensor, ndim, ORDER> +_evaluate_giw(const Eigen::Tensor &coeffs, + const spir_funcs *uhat, int target_dim, + const Eigen::Vector &matsubara_indices) +{ + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic, ORDER> uhat_eval_mat = + _evaluate_matsubara_basis_functions(uhat, matsubara_indices); + + Eigen::Tensor, ndim, ORDER> result = + _transform_coefficients(coeffs, uhat_eval_mat, + target_dim); + return result; +} + +// C-API function for kernel creation +spir_kernel* _kernel_new_logistic(double lambda) +{ + int status; + spir_kernel* kernel = spir_logistic_kernel_new(lambda, &status); + return kernel; +} + +spir_kernel* _kernel_new_reg_bose(double lambda) +{ + int status; + spir_kernel* kernel = spir_reg_bose_kernel_new(lambda, &status); + return kernel; +} + +// Add these template functions before the integration_test function +template +T generate_random_coeff(double random_value_real, double random_value_imag, double pole) { + // Silence unused parameter warning + (void)random_value_imag; + return (2.0 * random_value_real - 1.0) * std::sqrt(std::abs(pole)); +} + +template <> +std::complex generate_random_coeff>(double random_value_real, double random_value_imag, double pole) { + return std::complex( + (2.0 * random_value_real - 1.0) * std::sqrt(std::abs(pole)), + (2.0 * random_value_imag - 1.0) * std::sqrt(std::abs(pole)) + ); +} + +// Add these template functions before the integration_test function +template +int dlr_to_IR(spir_basis* dlr, int order, int ndim, + const int* dims, int target_dim, + const T* coeffs, T* g_IR) { + if (std::is_same::value) { + return spir_dlr2ir_dd(dlr, order, ndim, dims, target_dim, + reinterpret_cast(coeffs), + reinterpret_cast(g_IR)); + } else if (std::is_same>::value) { + return spir_dlr2ir_zz(dlr, order, ndim, dims, target_dim, + reinterpret_cast(coeffs), + reinterpret_cast(g_IR)); + } + return SPIR_INVALID_ARGUMENT; +} + + +template +int dlr_from_IR(spir_basis* dlr, int order, int ndim, + const int* dims, int target_dim, + const T* coeffs, T* g_IR) { + if (std::is_same::value) { + return spir_ir2dlr_dd(dlr, order, ndim, dims, target_dim, + reinterpret_cast(coeffs), + reinterpret_cast(g_IR)); + } else if (std::is_same>::value) { + return spir_ir2dlr_zz(dlr, order, ndim, dims, target_dim, + reinterpret_cast(coeffs), + reinterpret_cast(g_IR)); + } + return SPIR_INVALID_ARGUMENT; +} + +template +int _tau_sampling_evaluate(spir_sampling* sampling, int order, int ndim, + const int* dims, int target_dim, + const T* gIR, T* gtau) { + if (std::is_same::value) { + return spir_sampling_eval_dd(sampling, order, ndim, dims, target_dim, + reinterpret_cast(gIR), + reinterpret_cast(gtau)); + } else if (std::is_same>::value) { + return spir_sampling_eval_zz(sampling, order, ndim, dims, target_dim, + reinterpret_cast(gIR), + reinterpret_cast(gtau)); + } + return SPIR_INVALID_ARGUMENT; +} + +template +int _tau_sampling_fit(spir_sampling* sampling, int order, int ndim, + const int* dims, int target_dim, + const T* gtau, T* gIR) { + if (std::is_same::value) { + return spir_sampling_fit_dd(sampling, order, ndim, dims, target_dim, + reinterpret_cast(gtau), + reinterpret_cast(gIR)); + } else if (std::is_same>::value) { + return spir_sampling_fit_zz(sampling, order, ndim, dims, target_dim, + reinterpret_cast(gtau), + reinterpret_cast(gIR)); + } + return SPIR_INVALID_ARGUMENT; +} + +template +int _matsubara_sampling_evaluate(spir_sampling* sampling, int order, int ndim, + const int* dims, int target_dim, + const T* gIR, std::complex* giw) { + if (std::is_same::value) { + return spir_sampling_eval_dz(sampling, order, ndim, dims, target_dim, + reinterpret_cast(gIR), + reinterpret_cast(giw)); + } else if (std::is_same>::value) { + return spir_sampling_eval_zz(sampling, order, ndim, dims, target_dim, + reinterpret_cast(gIR), + reinterpret_cast(giw)); + } + return SPIR_INVALID_ARGUMENT; +} + +template +struct tensor_converter { + static void convert(const Eigen::Tensor, ndim, ORDER>& src, Eigen::Tensor& dst) { + dst = src.template cast(); + } +}; + +template +struct tensor_converter { + static void convert(const Eigen::Tensor, ndim, ORDER>& src, Eigen::Tensor& dst) { + dst = src.real(); + } +}; + +/* +T: double or std::complex, scalar type of coeffs +*/ +template +void integration_test(double beta, double wmax, double epsilon, + const std::vector &extra_dims, int target_dim, + const int order, int stat, spir_kernel* kernel, + double tol, bool positive_only) +{ + // positive_only is not supported for complex numbers + REQUIRE (!(std::is_same>::value && positive_only)); + + if (ndim != 1 + extra_dims.size()) { + std::cerr << "ndim must be 1 + extra_dims.size()" << std::endl; + } + + // Verify that the template parameter matches the runtime parameter + if (ORDER == Eigen::ColMajor) { + REQUIRE(order == SPIR_ORDER_COLUMN_MAJOR); + } else { + REQUIRE(order == SPIR_ORDER_ROW_MAJOR); + } + + int status; + spir_sve_result* sve = spir_sve_result_new(kernel, epsilon, -1.0, -1, -1, SPIR_TWORK_AUTO, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(sve != nullptr); + + spir_basis *basis = _spir_basis_new(stat, beta, wmax, epsilon, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(basis != nullptr); + + int basis_size; + status = spir_basis_get_size(basis, &basis_size); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + // Tau Sampling + std::cout << "Tau sampling" << std::endl; + int num_tau_points; + status = spir_basis_get_n_default_taus(basis, &num_tau_points); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(num_tau_points > 0); + + Eigen::VectorXd tau_points_org(num_tau_points); + status = spir_basis_get_default_taus(basis, tau_points_org.data()); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + spir_sampling *tau_sampling = spir_tau_sampling_new(basis, num_tau_points, tau_points_org.data(), &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(tau_sampling != nullptr); + + status = spir_sampling_get_npoints(tau_sampling, &num_tau_points); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + Eigen::VectorXd tau_points(num_tau_points); + status = spir_sampling_get_taus(tau_sampling, tau_points.data()); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(num_tau_points >= basis_size); + // compare tau_points and tau_points_org + for (int i = 0; i < num_tau_points; i++) { + REQUIRE(tau_points(i) == Approx(tau_points_org(i))); + } + + // Matsubara Sampling + std::cout << "Matsubara sampling" << std::endl; + int num_matsubara_points_org; + status = spir_basis_get_n_default_matsus(basis, positive_only, &num_matsubara_points_org); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(num_matsubara_points_org > 0); + Eigen::Vector matsubara_points_org(num_matsubara_points_org); + status = spir_basis_get_default_matsus(basis, positive_only, matsubara_points_org.data()); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + spir_sampling *matsubara_sampling = spir_matsu_sampling_new(basis, positive_only, num_matsubara_points_org, matsubara_points_org.data(), &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(matsubara_sampling != nullptr); + if (positive_only) { + REQUIRE(num_matsubara_points_org >= basis_size / 2); + } else { + REQUIRE(num_matsubara_points_org >= basis_size); + } + + int num_matsubara_points; + status = + spir_sampling_get_npoints(matsubara_sampling, &num_matsubara_points); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + Eigen::Vector matsubara_points( + num_matsubara_points); + status = spir_sampling_get_matsus(matsubara_sampling, + matsubara_points.data()); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + // compare matsubara_points and matsubara_points_org + for (int i = 0; i < num_matsubara_points; i++) { + REQUIRE(matsubara_points(i) == matsubara_points_org(i)); + } + + + // DLR + std::cout << "DLR" << std::endl; + spir_basis *dlr = spir_dlr_new(basis, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(dlr != nullptr); + int npoles; + status = spir_dlr_get_npoles(dlr, &npoles); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(npoles >= basis_size); + Eigen::VectorXd poles(npoles); + status = spir_dlr_get_poles(dlr, poles.data()); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + // Calculate total size of extra dimensions + Eigen::Index extra_size = std::accumulate( + extra_dims.begin(), extra_dims.end(), 1, std::multiplies<>()); + // Generate random DLR coefficients + Eigen::Tensor coeffs_targetdim0( + _get_dims(npoles, extra_dims, 0)); + std::mt19937 gen(982743); + std::uniform_real_distribution<> dis(0.0, 1.0); + { + Eigen::TensorMap> coeffs_2d( + coeffs_targetdim0.data(), npoles, extra_size); + for (Eigen::Index i = 0; i < npoles; ++i) { + for (Eigen::Index j = 0; j < extra_size; ++j) { + coeffs_2d(i, j) = generate_random_coeff(dis(gen), dis(gen), poles(i)); + } + } + } + + // DLR sampling objects + spir_sampling *tau_sampling_dlr = spir_tau_sampling_new(dlr, num_tau_points, tau_points_org.data(), &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(tau_sampling_dlr != nullptr); + + spir_funcs *dlr_uhat2 = spir_basis_get_uhat(dlr, &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(dlr_uhat2 != nullptr); + REQUIRE(spir_funcs_is_assigned(dlr_uhat2)); + + spir_sampling *matsubara_sampling_dlr = spir_matsu_sampling_new(dlr, positive_only, num_matsubara_points_org, matsubara_points_org.data(), &status); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(matsubara_sampling_dlr != nullptr); + + + // Move the axis for the poles from the first to the target dimension + Eigen::Tensor coeffs = + movedim_local(coeffs_targetdim0, 0, target_dim); + + // Convert DLR coefficients to IR coefficients + Eigen::Tensor g_IR( + _get_dims(basis_size, extra_dims, target_dim)); + status = dlr_to_IR(dlr, order, ndim, + _get_dims(npoles, extra_dims, target_dim).data(), target_dim, + coeffs.data(), g_IR.data()); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + // Convert IR coefficients to DLR coefficients + Eigen::Tensor g_DLR_reconst( + _get_dims(npoles, extra_dims, target_dim)); + status = dlr_from_IR(dlr, order, ndim, + _get_dims(npoles, extra_dims, target_dim).data(), target_dim, + g_IR.data(), g_DLR_reconst.data()); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + // From DLR to IR + Eigen::Tensor g_dlr( + _get_dims(basis_size, extra_dims, target_dim)); + status = dlr_from_IR(dlr, order, ndim, + _get_dims(basis_size, extra_dims, target_dim).data(), target_dim, + g_IR.data(), g_dlr.data()); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + // DLR basis functions + int dlr_u_status; + spir_funcs *dlr_u = spir_basis_get_u(dlr, &dlr_u_status); + REQUIRE(dlr_u_status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(dlr_u != nullptr); + + int dlr_uhat_status; + spir_funcs *dlr_uhat = spir_basis_get_uhat(dlr, &dlr_uhat_status); + REQUIRE(dlr_uhat_status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(dlr_uhat != nullptr); + + // IR basis functions + int ir_u_status; + spir_funcs *ir_u = spir_basis_get_u(basis, &ir_u_status); + REQUIRE(ir_u_status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(ir_u != nullptr); + + int ir_uhat_status; + spir_funcs *ir_uhat = spir_basis_get_uhat(basis, &ir_uhat_status); + REQUIRE(ir_uhat_status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(ir_uhat != nullptr); + + // Compare the Greens function at all tau points between IR and DLR + Eigen::Tensor gtau_from_IR = + _evaluate_gtau(g_IR, ir_u, target_dim, tau_points); + Eigen::Tensor gtau_from_DLR = + _evaluate_gtau(coeffs, dlr_u, target_dim, + tau_points); + Eigen::Tensor gtau_from_DLR_reconst = + _evaluate_gtau(g_DLR_reconst, dlr_u, target_dim, + tau_points); + REQUIRE(compare_tensors_with_relative_error( + gtau_from_IR, gtau_from_DLR, tol)); + REQUIRE(compare_tensors_with_relative_error( + gtau_from_IR, gtau_from_DLR_reconst, tol)); + + // Use sampling to evaluate the Greens function at all tau points between IR and DLR + if (std::is_same::value) { + status = spir_sampling_eval_dd( + tau_sampling_dlr, order, ndim, + _get_dims(npoles, extra_dims, target_dim).data(), + target_dim, + reinterpret_cast(coeffs.data()), + reinterpret_cast(gtau_from_DLR.data())); + } else if (std::is_same>::value) { + status = spir_sampling_eval_zz( + tau_sampling_dlr, order, ndim, + _get_dims(npoles, extra_dims, target_dim).data(), + target_dim, + reinterpret_cast(coeffs.data()), + reinterpret_cast(gtau_from_DLR.data())); + } + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + REQUIRE(compare_tensors_with_relative_error( + gtau_from_IR, gtau_from_DLR, tol)); + + + // Compare the Greens function at all Matsubara frequencies between IR and + // DLR + Eigen::Tensor, ndim, ORDER> giw_from_IR = + _evaluate_giw(g_IR, ir_uhat, target_dim, + matsubara_points); + Eigen::Tensor, ndim, ORDER> giw_from_DLR = + _evaluate_giw(coeffs, dlr_uhat, target_dim, + matsubara_points); + REQUIRE( + compare_tensors_with_relative_error, ndim, ORDER>( + giw_from_IR, giw_from_DLR, tol)); + + // Use sampling to evaluate the Greens function at all Matsubara frequencies between IR and DLR + { + if (std::is_same::value) { + status = spir_sampling_eval_dz( + matsubara_sampling_dlr, order, ndim, + _get_dims(npoles, extra_dims, target_dim).data(), + target_dim, + reinterpret_cast(coeffs.data()), + reinterpret_cast(giw_from_DLR.data())); + } else if (std::is_same>::value) { + status = spir_sampling_eval_zz( + matsubara_sampling_dlr, order, ndim, + _get_dims(npoles, extra_dims, target_dim).data(), + target_dim, + reinterpret_cast(coeffs.data()), + reinterpret_cast(giw_from_DLR.data())); + } + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + REQUIRE(compare_tensors_with_relative_error, ndim, ORDER>(giw_from_IR, giw_from_DLR, tol)); + } + + auto dims_matsubara = + _get_dims(num_matsubara_points, extra_dims, target_dim); + auto dims_IR = _get_dims(basis_size, extra_dims, target_dim); + auto dims_tau = + _get_dims(num_tau_points, extra_dims, target_dim); + + Eigen::Tensor gIR( + _get_dims(basis_size, extra_dims, target_dim)); + Eigen::Tensor gIR2( + _get_dims(basis_size, extra_dims, target_dim)); + Eigen::Tensor gtau( + _get_dims(num_tau_points, extra_dims, target_dim)); + Eigen::Tensor, ndim, ORDER> giw_reconst( + _get_dims(num_matsubara_points, extra_dims, + target_dim)); + + // Matsubara -> IR + { + Eigen::Tensor, ndim, ORDER> + gIR_work( _get_dims(basis_size, extra_dims, target_dim)); + status = spir_sampling_fit_zz( + matsubara_sampling, order, ndim, dims_matsubara.data(), target_dim, + reinterpret_cast(giw_from_DLR.data()), + reinterpret_cast(gIR_work.data())); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + tensor_converter::convert(gIR_work, gIR); + } + + // IR -> tau + status = _tau_sampling_evaluate( + tau_sampling, order, ndim, dims_IR.data(), target_dim, + gIR.data(), gtau.data()); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + // tau -> IR + status = _tau_sampling_fit( + tau_sampling, order, ndim, dims_tau.data(), target_dim, + gtau.data(), gIR2.data()); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + // IR -> Matsubara + status = _matsubara_sampling_evaluate( + matsubara_sampling, order, ndim, dims_IR.data(), target_dim, + gIR2.data(), giw_reconst.data()); + REQUIRE(status == SPIR_COMPUTATION_SUCCESS); + + Eigen::Tensor, ndim, ORDER> giw_from_IR_reconst = + _evaluate_giw(gIR2, ir_uhat, target_dim, matsubara_points); + REQUIRE(compare_tensors_with_relative_error, ndim, ORDER>(giw_from_DLR, giw_from_IR_reconst, tol)); + + spir_basis_release(basis); + spir_basis_release(dlr); + spir_funcs_release(dlr_u); + spir_funcs_release(ir_u); + spir_sampling_release(tau_sampling); +} + +TEST_CASE("Integration Test", "[cinterface]") { + double beta = 1e+3; + double wmax = 2.0; + double epsilon = 1e-6; + + double tol = 10 * epsilon; + + + // Create kernel once for all tests + spir_kernel* kernel = _kernel_new_logistic(beta * wmax); + int stat = SPIR_STATISTICS_BOSONIC; + + for (bool positive_only : {false, true}) { + std::cout << "positive_only = " << positive_only << std::endl; + { + std::vector extra_dims = {}; + std::cout << "Integration test for bosonic LogisticKernel" << std::endl; + integration_test(beta, wmax, epsilon, extra_dims, 0, + SPIR_ORDER_COLUMN_MAJOR, stat, kernel, tol, positive_only); + + if (!positive_only) { + integration_test, 1, Eigen::ColMajor>(beta, wmax, epsilon, extra_dims, 0, + SPIR_ORDER_COLUMN_MAJOR, stat, kernel, tol, positive_only); + } + } + + { + int target_dim = 0; + std::vector extra_dims = {}; + std::cout << "Integration test for bosonic LogisticKernel, ColMajor, target_dim = " << target_dim << std::endl; + integration_test(beta, wmax, epsilon, extra_dims, target_dim, + SPIR_ORDER_COLUMN_MAJOR, stat, kernel, tol, positive_only); + if (!positive_only) { + integration_test, 1, Eigen::ColMajor>(beta, wmax, epsilon, extra_dims, target_dim, + SPIR_ORDER_COLUMN_MAJOR, stat, kernel, tol, positive_only); + } + } + + { + int target_dim = 0; + std::vector extra_dims = {}; + std::cout << "Integration test for bosonic LogisticKernel, RowMajor, target_dim = " << target_dim << std::endl; + integration_test(beta, wmax, epsilon, extra_dims, target_dim, + SPIR_ORDER_ROW_MAJOR, stat, kernel, tol, positive_only); + if (!positive_only) { + integration_test, 1, Eigen::RowMajor>(beta, wmax, epsilon, extra_dims, target_dim, + SPIR_ORDER_ROW_MAJOR, stat, kernel, tol, positive_only); + } + } + + // extra dims = {2,3,4} + for (int target_dim = 0; target_dim < 4; ++target_dim) { + std::vector extra_dims = {2,3,4}; + std::cout << "Integration test for bosonic LogisticKernel, ColMajor, target_dim = " << target_dim << std::endl; + integration_test(beta, wmax, epsilon, extra_dims, target_dim, + SPIR_ORDER_COLUMN_MAJOR, stat, kernel, tol, positive_only); + } + + for (int target_dim = 0; target_dim < 4; ++target_dim) { + std::vector extra_dims = {2,3,4}; + std::cout << "Integration test for bosonic LogisticKernel, RowMajor, target_dim = " << target_dim << std::endl; + integration_test(beta, wmax, epsilon, extra_dims, target_dim, + SPIR_ORDER_ROW_MAJOR, stat, kernel, tol, positive_only); + } + } +} From 62ced595c3b22622cf0613a499d72fb1b7b21825 Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 10:05:21 +0900 Subject: [PATCH 06/21] refactor: modularize CMake structure with independent components - Create independent backend/cxx/CMakeLists.txt for C++ library - Standalone build system with dependency resolution - Can be built and installed independently - Add build_capi_with_tests.sh script - Create independent fortran/CMakeLists.txt - Uses find_package(SparseIR) for C library dependency - Standalone Fortran bindings build - Move backend/capi_test to capi_test at top level - Update CMakeLists.txt to be fully independent - Add test_with_cxx_backend.sh to test against installed backend - Remove unused C++ API dependencies from _utils.hpp - Rename benchmark to capi_benchmark for clarity - Remove old build scripts and backup to back/ directory Each component (backend/cxx, fortran, capi_test) can now be built independently using find_package(SparseIR) to locate dependencies. --- _build_fortran.sh => back/_build_fortran.sh | 0 build_capi.sh => back/build_capi.sh | 0 back/build_capi_with_asan.sh | 30 + back/build_optimized.sh | 32 + .../build_with_tests.sh | 0 backend/capi_test/CMakeLists.txt | 29 - backend/cxx/CMakeLists.txt | 194 +++++ backend/cxx/build_capi_with_tests.sh | 37 + backend/test/CMakeLists.txt | 29 - backend/test/_utils.hpp | 139 ---- backend/test/cinterface_integration.cxx | 757 ------------------ {benchmark => capi_benchmark}/CMakeLists.txt | 0 {benchmark => capi_benchmark}/README.md | 0 {benchmark => capi_benchmark}/benchmark1.c | 0 capi_benchmark/result-blas | 33 + capi_benchmark/result-eigen-internal | 40 + capi_benchmark/run.sh | 3 + capi_benchmark/tmp | 33 + capi_test/CMakeLists.txt | 76 ++ {backend/capi_test => capi_test}/_utils.hpp | 15 - .../cinterface_integration.cxx | 0 capi_test/test_with_cxx_backend.sh | 58 ++ fortran/CMakeLists.txt | 49 +- 23 files changed, 570 insertions(+), 984 deletions(-) rename _build_fortran.sh => back/_build_fortran.sh (100%) rename build_capi.sh => back/build_capi.sh (100%) create mode 100644 back/build_capi_with_asan.sh create mode 100644 back/build_optimized.sh rename build_with_tests.sh => back/build_with_tests.sh (100%) delete mode 100644 backend/capi_test/CMakeLists.txt create mode 100644 backend/cxx/CMakeLists.txt create mode 100755 backend/cxx/build_capi_with_tests.sh delete mode 100644 backend/test/CMakeLists.txt delete mode 100644 backend/test/_utils.hpp delete mode 100644 backend/test/cinterface_integration.cxx rename {benchmark => capi_benchmark}/CMakeLists.txt (100%) rename {benchmark => capi_benchmark}/README.md (100%) rename {benchmark => capi_benchmark}/benchmark1.c (100%) create mode 100644 capi_benchmark/result-blas create mode 100644 capi_benchmark/result-eigen-internal create mode 100644 capi_benchmark/run.sh create mode 100644 capi_benchmark/tmp create mode 100644 capi_test/CMakeLists.txt rename {backend/capi_test => capi_test}/_utils.hpp (89%) rename {backend/capi_test => capi_test}/cinterface_integration.cxx (100%) create mode 100755 capi_test/test_with_cxx_backend.sh diff --git a/_build_fortran.sh b/back/_build_fortran.sh similarity index 100% rename from _build_fortran.sh rename to back/_build_fortran.sh diff --git a/build_capi.sh b/back/build_capi.sh similarity index 100% rename from build_capi.sh rename to back/build_capi.sh diff --git a/back/build_capi_with_asan.sh b/back/build_capi_with_asan.sh new file mode 100644 index 00000000..9c3905c4 --- /dev/null +++ b/back/build_capi_with_asan.sh @@ -0,0 +1,30 @@ +#!/bin/bash +set -e + +# Get script directory +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd "$SCRIPT_DIR" + +# Create build directory +mkdir -p build +cd build + +# Configure with tests enabled +cmake .. \ + -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX:-$HOME/opt/libsparseir} \ + -DCMAKE_BUILD_TYPE=Debug \ + -DCMAKE_C_FLAGS="-w -fsanitize=address -static-libasan" \ + -DCMAKE_CXX_FLAGS="-w -fsanitize=address -static-libasan" \ + -DCMAKE_EXE_LINKER_FLAGS="-static-libasan" \ + -DSPARSEIR_BUILD_FORTRAN=OFF \ + -DSPARSEIR_BUILD_TESTING=OFF \ + -DSPARSEIR_USE_BLAS=ON + +# Build (including tests) +cmake --build . --config Release -j 4 + +# Run tests +ctest --output-on-failure + +echo "SparseIR was built with tests successfully." +echo "You can install it using: cd build && cmake --install ." diff --git a/back/build_optimized.sh b/back/build_optimized.sh new file mode 100644 index 00000000..cce0cedc --- /dev/null +++ b/back/build_optimized.sh @@ -0,0 +1,32 @@ +#!/bin/bash +set -e + +# Get script directory +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd "$SCRIPT_DIR" + +# Clean build directory to ensure clean state +rm -rf build +mkdir -p build +cd build + +# Configure with maximum optimization for benchmarking +cmake .. \ + -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX:-$HOME/opt/libsparseir} \ + -DSPARSEIR_BUILD_FORTRAN=ON \ + -DSPARSEIR_BUILD_TESTING=OFF \ + -DSPARSEIR_USE_BLAS=OFF \ + -DBUILD_TESTING=OFF \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_FLAGS="-O3 -DNDEBUG -g0 -march=native" \ + -DCMAKE_EXE_LINKER_FLAGS="-flto" \ + -DCMAKE_VERBOSE_MAKEFILE=ON + +# Build with maximum parallelization +cmake --build . --config Release -- -j $(nproc) + +# Install +cmake --install . + +echo "Optimized SparseIR library has been built and installed successfully." +echo "Debug symbols removed, maximum optimization enabled." diff --git a/build_with_tests.sh b/back/build_with_tests.sh similarity index 100% rename from build_with_tests.sh rename to back/build_with_tests.sh diff --git a/backend/capi_test/CMakeLists.txt b/backend/capi_test/CMakeLists.txt deleted file mode 100644 index 70380c91..00000000 --- a/backend/capi_test/CMakeLists.txt +++ /dev/null @@ -1,29 +0,0 @@ -set(CMAKE_CXX_STANDARD 11) - -# C-API integration test -add_executable(cinterface_integration cinterface_integration.cxx) - -target_include_directories(cinterface_integration PRIVATE - ${CMAKE_CURRENT_SOURCE_DIR}/../cxx/include - ${CMAKE_CURRENT_SOURCE_DIR}/../cxx/test - ${EIGEN3_INCLUDE_DIR} - ${xprec_SOURCE_DIR}/include -) - -target_link_libraries(cinterface_integration PRIVATE - Catch2::Catch2WithMain - SparseIR::sparseir - Eigen3::Eigen - Threads::Threads -) - -if(SPARSEIR_USE_BLAS AND BLAS_FOUND) - target_link_libraries(cinterface_integration PRIVATE ${BLAS_LIBRARIES}) -endif() - -if(APPLE AND ACCELERATE_FRAMEWORK) - target_link_libraries(cinterface_integration PRIVATE ${ACCELERATE_FRAMEWORK}) -endif() - -add_test(NAME cinterface_integration COMMAND cinterface_integration -d yes) - diff --git a/backend/cxx/CMakeLists.txt b/backend/cxx/CMakeLists.txt new file mode 100644 index 00000000..5d2675a6 --- /dev/null +++ b/backend/cxx/CMakeLists.txt @@ -0,0 +1,194 @@ +cmake_minimum_required(VERSION 3.10) + +# Extract version from header +include(GNUInstallDirs) + +set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../../cmake/modules") +include("VersionFromHeader") +version_from_header(SPARSEIR_VERSION + HEADER "include/sparseir/version.h" + MACROS SPARSEIR_VERSION_MAJOR SPARSEIR_VERSION_MINOR SPARSEIR_VERSION_PATCH +) + +project(SparseIR + VERSION ${SPARSEIR_VERSION} + LANGUAGES CXX + DESCRIPTION "SparseIR C++ library" +) + +set(CMAKE_CXX_STANDARD 11) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +# Options +option(SPARSEIR_USE_BLAS "Enable BLAS support" OFF) +option(SPARSEIR_USE_ILP64 "Enable ILP64 BLAS interface" OFF) +option(SPARSEIR_USE_LAPACKE "Enable LAPACKE support" OFF) +option(SPARSEIR_BUILD_TESTING "Enable testing" OFF) +option(SPARSEIR_DEBUG "Enable debug logging" OFF) + +if(SPARSEIR_DEBUG) + add_definitions(-DDEBUG_SPIR) +endif() + +# Find Eigen3 +set(EIGEN3_REQUIRED_VERSION "3.4.0") +find_package(Eigen3 ${EIGEN3_REQUIRED_VERSION} QUIET NO_MODULE) +if(NOT Eigen3_FOUND) + message(STATUS "Eigen3 not found, fetching from GitLab...") + include(FetchContent) + FetchContent_Declare(Eigen3 + GIT_REPOSITORY https://gitlab.com/libeigen/eigen.git + GIT_TAG ${EIGEN3_REQUIRED_VERSION} + ) + FetchContent_MakeAvailable(Eigen3) +endif() + +# Find Threads +find_package(Threads REQUIRED) + +# Fetch XPrec +include(FetchContent) +FetchContent_Declare(XPrec + GIT_REPOSITORY https://github.com/tuwien-cms/libxprec + GIT_TAG v0.7.0 +) +FetchContent_GetProperties(XPrec) +if(NOT xprec_POPULATED) + FetchContent_Populate(XPrec) +endif() + +# BLAS/LAPACK configuration +if(SPARSEIR_USE_BLAS) + add_compile_definitions(SPARSEIR_USE_BLAS) + if(SPARSEIR_USE_ILP64) + add_compile_definitions(SPARSEIR_USE_ILP64) + # Try to find ILP64 BLAS + if(NOT BLAS_LIBRARIES) + find_library(BLAS_LIBRARIES NAMES openblas64) + endif() + else() + find_package(BLAS REQUIRED) + endif() +endif() + +# C++ Library Build +add_library(sparseir SHARED + src/utils.cpp + src/linalg.cpp + src/root.cpp + src/specfuncs.cpp + src/svd.cpp + src/sve.cpp + src/poly.cpp + src/kernel.cpp + src/gemm.cpp + src/cinterface.cpp +) + +if(NOT MSVC) + target_compile_options(sparseir PRIVATE -Wall -Wextra -pedantic) +endif() + +target_include_directories(sparseir + PUBLIC + $ + $ + PRIVATE + $ +) + +set_target_properties(sparseir PROPERTIES + VERSION ${PROJECT_VERSION} + SOVERSION ${PROJECT_VERSION_MAJOR} + CXX_STANDARD 11 + CXX_STANDARD_REQUIRED ON +) + +target_link_libraries(sparseir PRIVATE Eigen3::Eigen Threads::Threads) + +# BLAS/LAPACK linking +if(SPARSEIR_USE_BLAS) + if(BLAS_LIBRARIES OR BLAS_FOUND) + target_link_libraries(sparseir PRIVATE ${BLAS_LIBRARIES}) + message(STATUS "Linked BLAS libraries: ${BLAS_LIBRARIES}") + else() + message(FATAL_ERROR "BLAS libraries not found") + endif() +else() + message(STATUS "BLAS linking disabled") +endif() + +if(SPARSEIR_USE_LAPACKE AND LAPACK_FOUND) + target_link_libraries(sparseir PRIVATE ${LAPACK_LIBRARIES}) +endif() + +# macOS Accelerate framework +if(APPLE) + find_library(ACCELERATE_FRAMEWORK Accelerate) + if(ACCELERATE_FRAMEWORK) + target_link_libraries(sparseir PRIVATE ${ACCELERATE_FRAMEWORK}) + endif() +endif() + +# Library convention alias +add_library(SparseIR::sparseir ALIAS sparseir) + +# Testing (if enabled) +if(SPARSEIR_BUILD_TESTING) + enable_testing() + + # Fetch Catch2 + FetchContent_Declare( + Catch2 + GIT_REPOSITORY https://github.com/catchorg/Catch2.git + GIT_TAG v3.4.0 + ) + FetchContent_MakeAvailable(Catch2) + + add_subdirectory(test) +endif() + +# Installation +install(TARGETS sparseir + EXPORT sparseirTargets + LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}" + ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" + RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}" + COMPONENT sparseir +) + +# Install C API headers only +install(FILES + include/sparseir/sparseir.h + include/sparseir/version.h + include/sparseir/spir_status.h + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/sparseir" + COMPONENT sparseir +) + +# CMake package config +include(CMakePackageConfigHelpers) +write_basic_package_version_file( + "${CMAKE_CURRENT_BINARY_DIR}/SparseIRConfigVersion.cmake" + VERSION ${PROJECT_VERSION} + COMPATIBILITY SameMajorVersion +) + +# Create SparseIRConfig.cmake +configure_package_config_file( + "${CMAKE_CURRENT_SOURCE_DIR}/../../cmake/sparseirConfig.cmake.in" + "${CMAKE_CURRENT_BINARY_DIR}/SparseIRConfig.cmake" + INSTALL_DESTINATION "${CMAKE_INSTALL_DATADIR}/cmake/SparseIR" +) + +install(EXPORT sparseirTargets + FILE SparseIRTargets.cmake + NAMESPACE SparseIR:: + DESTINATION "${CMAKE_INSTALL_DATADIR}/cmake/SparseIR" +) + +install(FILES + "${CMAKE_CURRENT_BINARY_DIR}/SparseIRConfig.cmake" + "${CMAKE_CURRENT_BINARY_DIR}/SparseIRConfigVersion.cmake" + DESTINATION "${CMAKE_INSTALL_DATADIR}/cmake/SparseIR" +) diff --git a/backend/cxx/build_capi_with_tests.sh b/backend/cxx/build_capi_with_tests.sh new file mode 100755 index 00000000..81860d87 --- /dev/null +++ b/backend/cxx/build_capi_with_tests.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Build backend/cxx and run C++ tests +# Build directory: build + +set -e # Exit on error + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +echo -e "${GREEN}=== Building backend/cxx with tests ===${NC}" + +# Create build directory +BUILD_DIR="build" +mkdir -p "$BUILD_DIR" + +# Configure CMake +echo -e "${YELLOW}Configuring CMake...${NC}" +cd "$BUILD_DIR" +cmake .. \ + -DCMAKE_BUILD_TYPE=Release \ + -DSPARSEIR_BUILD_TESTING=ON \ + -DSPARSEIR_USE_BLAS=ON + +# Build +echo -e "${YELLOW}Building...${NC}" +cmake --build . -j$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4) + +# Run tests +echo -e "${YELLOW}Running C++ tests...${NC}" +ctest --output-on-failure --verbose + +echo -e "${GREEN}=== Build and test completed successfully ===${NC}" + diff --git a/backend/test/CMakeLists.txt b/backend/test/CMakeLists.txt deleted file mode 100644 index 70380c91..00000000 --- a/backend/test/CMakeLists.txt +++ /dev/null @@ -1,29 +0,0 @@ -set(CMAKE_CXX_STANDARD 11) - -# C-API integration test -add_executable(cinterface_integration cinterface_integration.cxx) - -target_include_directories(cinterface_integration PRIVATE - ${CMAKE_CURRENT_SOURCE_DIR}/../cxx/include - ${CMAKE_CURRENT_SOURCE_DIR}/../cxx/test - ${EIGEN3_INCLUDE_DIR} - ${xprec_SOURCE_DIR}/include -) - -target_link_libraries(cinterface_integration PRIVATE - Catch2::Catch2WithMain - SparseIR::sparseir - Eigen3::Eigen - Threads::Threads -) - -if(SPARSEIR_USE_BLAS AND BLAS_FOUND) - target_link_libraries(cinterface_integration PRIVATE ${BLAS_LIBRARIES}) -endif() - -if(APPLE AND ACCELERATE_FRAMEWORK) - target_link_libraries(cinterface_integration PRIVATE ${ACCELERATE_FRAMEWORK}) -endif() - -add_test(NAME cinterface_integration COMMAND cinterface_integration -d yes) - diff --git a/backend/test/_utils.hpp b/backend/test/_utils.hpp deleted file mode 100644 index 3c05deea..00000000 --- a/backend/test/_utils.hpp +++ /dev/null @@ -1,139 +0,0 @@ -#pragma once - -#include - -#include // C interface - -inline spir_basis *_spir_basis_new(int32_t statistics, double beta, - double omega_max, double epsilon, - int32_t *status) -{ - int32_t status_; - spir_kernel *kernel = nullptr; - spir_sve_result *sve = nullptr; - spir_basis *basis = nullptr; - - try { - // Create a logistic kernel - kernel = spir_logistic_kernel_new(beta * omega_max, &status_); - if (status_ != SPIR_COMPUTATION_SUCCESS || kernel == nullptr) { - *status = status_; - return nullptr; - } - - // Create a pre-computed SVE result - double cutoff = -1.0; - int lmax = -1; - int n_gauss = -1; - int Twork = SPIR_TWORK_AUTO; - sve = spir_sve_result_new(kernel, epsilon, cutoff, lmax, n_gauss, Twork, &status_); - if (status_ != SPIR_COMPUTATION_SUCCESS || sve == nullptr) { - *status = status_; - spir_kernel_release(kernel); - return nullptr; - } - - int sve_size; - status_ = spir_sve_result_get_size(sve, &sve_size); - if (status_ != SPIR_COMPUTATION_SUCCESS) { - *status = status_; - spir_sve_result_release(sve); - spir_kernel_release(kernel); - return nullptr; - } - REQUIRE(sve_size > 0); - - // Create a fermionic finite temperature basis with pre-computed SVE result - int max_size = -1; - basis = spir_basis_new( - statistics, beta, omega_max, epsilon, kernel, sve, max_size, &status_); - if (status_ != SPIR_COMPUTATION_SUCCESS || basis == nullptr) { - *status = status_; - spir_sve_result_release(sve); - spir_kernel_release(kernel); - return nullptr; - } - - int basis_size; - status_ = spir_basis_get_size(basis, &basis_size); - if (status_ != SPIR_COMPUTATION_SUCCESS) { - *status = status_; - spir_basis_release(basis); - spir_sve_result_release(sve); - spir_kernel_release(kernel); - } - - std::vector svals(sve_size); - int svals_status = spir_sve_result_get_svals(sve, svals.data()); - REQUIRE(svals_status == SPIR_COMPUTATION_SUCCESS); - - REQUIRE(basis_size <= sve_size); - if (sve_size > basis_size) { - REQUIRE(svals[basis_size] / svals[0] <= epsilon); - } - - // Success case - clean up intermediate objects - spir_sve_result_release(sve); - spir_kernel_release(kernel); - *status = SPIR_COMPUTATION_SUCCESS; - return basis; - - } catch (...) { - // Clean up in case of exception - if (basis) spir_basis_release(basis); - if (sve) spir_sve_result_release(sve); - if (kernel) spir_kernel_release(kernel); - *status = SPIR_INTERNAL_ERROR; - return nullptr; - } -} - - -template -sparseir::PiecewiseLegendrePoly _singleout_poly_from_irtaufuncs(const sparseir::IRTauFuncsType &funcs, std::size_t idx) { - sparseir::IRTauFuncsType funcs_slice = funcs[idx]; - sparseir::PiecewiseLegendrePolyVector u0 = funcs_slice.get_obj(); - return u0[0]; -} - - -//template -//sparseir::IRBasisType _singleout_from_irtaufuncs(const sparseir::IRTauFuncsType &funcs, std::size_t idx) { - //sparseir::IRTauFuncsType funcs_slice = funcs[idx]; -//} - - - -template -bool compare_tensors_with_relative_error(const Eigen::Tensor &a, - const Eigen::Tensor &b, - double tol) -{ - // Convert to double tensor for absolute values - Eigen::Tensor diff(a.dimensions()); - Eigen::Tensor ref(a.dimensions()); - - // Compute absolute values element-wise - for (Eigen::Index i = 0; i < a.size(); ++i) { - diff.data()[i] = std::abs(a.data()[i] - b.data()[i]); - ref.data()[i] = std::abs(a.data()[i]); - } - - // Map tensors to matrices and use maxCoeff - Eigen::Map> diff_vec( - diff.data(), diff.size()); - Eigen::Map> ref_vec( - ref.data(), ref.size()); - - double max_diff = diff_vec.maxCoeff(); - double max_ref = ref_vec.maxCoeff(); - - // debug - if (max_diff > tol * max_ref) { - std::cout << "max_diff: " << max_diff << std::endl; - std::cout << "max_ref: " << max_ref << std::endl; - std::cout << "tol " << tol << std::endl; - } - - return max_diff <= tol * max_ref; -} diff --git a/backend/test/cinterface_integration.cxx b/backend/test/cinterface_integration.cxx deleted file mode 100644 index 177e15a3..00000000 --- a/backend/test/cinterface_integration.cxx +++ /dev/null @@ -1,757 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -// C++ interface removed for C-API test -#include // C interface -#include "_utils.hpp" - -// Helper function for movedim (replacing sparseir::movedim) -template -Eigen::array getperm_local(int src, int dst) -{ - Eigen::array perm; - if (src == dst) { - for (int i = 0; i < N; ++i) { - perm[i] = i; - } - return perm; - } - - int pos = 0; - for (int i = 0; i < N; ++i) { - if (i == dst) { - perm[i] = src; - } else { - // Skip src position - if (pos == src) - ++pos; - perm[i] = pos; - ++pos; - } - } - return perm; -} - -template -Eigen::Tensor movedim_local(const Eigen::Tensor &arr, int src, int dst) -{ - if (src == dst) { - return arr; - } - auto perm = getperm_local(src, dst); - return arr.shuffle(perm); -} - -using Catch::Approx; - -// Remove template, use runtime parameter -// get_stat function is now replaced with direct parameter usage - -// int get_order(Eigen::StorageOptions order) -//{ -// if (order == Eigen::ColMajor) { -// return SPIR_ORDER_COLUMN_MAJOR; -//} else { -// return SPIR_ORDER_ROW_MAJOR; -//} -//} - -template -std::array _get_dims(int target_dim_size, - const std::vector &extra_dims, - int target_dim) -{ - std::array dims; - dims[target_dim] = static_cast(target_dim_size); - int pos = 0; - for (int i = 0; i < ndim; ++i) { - if (i == target_dim) { - continue; - } - dims[i] = static_cast(extra_dims[pos]); - ++pos; - } - return dims; -} - -TEST_CASE("Test _get_dims", "[cinterface]") -{ - std::vector extra_dims = {2,3,4}; - { - int target_dim = 0; - auto dims = _get_dims<4>(100, extra_dims, target_dim); - REQUIRE(dims[0] == 100); - REQUIRE(dims[1] == 2); - REQUIRE(dims[2] == 3); - REQUIRE(dims[3] == 4); - } - { - int target_dim = 1; - auto dims = _get_dims<4>(100, extra_dims, target_dim); - REQUIRE(dims[0] == 2); - REQUIRE(dims[1] == 100); - REQUIRE(dims[2] == 3); - REQUIRE(dims[3] == 4); - } - { - int target_dim = 2; - auto dims = _get_dims<4>(100, extra_dims, target_dim); - REQUIRE(dims[0] == 2); - REQUIRE(dims[1] == 3); - REQUIRE(dims[2] == 100); - REQUIRE(dims[3] == 4); - } - { - int target_dim = 3; - auto dims = _get_dims<4>(100, extra_dims, target_dim); - REQUIRE(dims[0] == 2); - REQUIRE(dims[1] == 3); - REQUIRE(dims[2] == 4); - REQUIRE(dims[3] == 100); - } -} - -// Helper function to evaluate basis functions at multiple points -template -Eigen::Matrix -_evaluate_basis_functions(const spir_funcs *u, const Eigen::VectorXd &x_values) -{ - int status; - int funcs_size; - status = spir_funcs_get_size(u, &funcs_size); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(funcs_size > 0); - - Eigen::Matrix u_eval_mat( - x_values.size(), funcs_size); - for (Eigen::Index i = 0; i < x_values.size(); ++i) { - Eigen::VectorXd u_eval(funcs_size); - REQUIRE(u_eval.data() != nullptr); - status = spir_funcs_eval(u, x_values(i), u_eval.data()); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - u_eval_mat.row(i) = u_eval.transpose().cast(); - } - return u_eval_mat; -} - -// Helper function to evaluate Matsubara basis functions at multiple frequencies -template -Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic, ORDER> -_evaluate_matsubara_basis_functions(const spir_funcs *uhat, - const Eigen::Vector &matsubara_indices) -{ - int status; - int funcs_size; - status = spir_funcs_get_size(uhat, &funcs_size); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - - // Allocate output matrix with shape (nfreqs, nfuncs) - Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic, ORDER> - uhat_eval_mat(matsubara_indices.size(), funcs_size); - - // Create a non-const copy of the Matsubara indices - std::vector freq_indices(matsubara_indices.data(), - matsubara_indices.data() + - matsubara_indices.size()); - - // Evaluate all frequencies at once - status = spir_funcs_batch_eval_matsu( - uhat, - ORDER == Eigen::ColMajor ? SPIR_ORDER_COLUMN_MAJOR - : SPIR_ORDER_ROW_MAJOR, - matsubara_indices.size(), freq_indices.data(), - reinterpret_cast(uhat_eval_mat.data())); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - - return uhat_eval_mat; -} - -// Helper function to perform the tensor transformation -template -Eigen::Tensor -_transform_coefficients(const Eigen::Tensor &coeffs, - const BasisEvalType &basis_eval, int target_dim) -{ - using PromotedType = decltype(T() * typename BasisEvalType::Scalar()); - - // Move target dimension to the first position - Eigen::Tensor coeffs_targetdim0 = - movedim_local(coeffs, target_dim, 0); - - // Calculate the size of extra dimensions - Eigen::Index extra_size = 1; - for (int i = 1; i < ndim; ++i) { - extra_size *= coeffs_targetdim0.dimension(i); - } - - // Create result tensor with correct dimensions - std::array dims; - dims[0] = basis_eval.rows(); - for (int i = 1; i < ndim; ++i) { - dims[i] = coeffs_targetdim0.dimension(i); - } - Eigen::Tensor result(dims); - - // Map tensors to matrices for multiplication - Eigen::Map> - coeffs_mat(coeffs_targetdim0.data(), coeffs_targetdim0.dimension(0), - extra_size); - Eigen::Map> - result_mat(result.data(), basis_eval.rows(), extra_size); - - // Perform matrix multiplication with proper type promotion - result_mat = basis_eval * coeffs_mat.template cast(); - - // Move dimensions back to original order - return movedim_local(result, 0, target_dim); -} - -template -Eigen::Tensor -_evaluate_gtau(const Eigen::Tensor &coeffs, const spir_funcs *u, - int target_dim, const Eigen::VectorXd &x_values) -{ - auto u_eval_mat = _evaluate_basis_functions(u, x_values); - return _transform_coefficients(coeffs, u_eval_mat, - target_dim); -} - -template -Eigen::Tensor, ndim, ORDER> -_evaluate_giw(const Eigen::Tensor &coeffs, - const spir_funcs *uhat, int target_dim, - const Eigen::Vector &matsubara_indices) -{ - Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic, ORDER> uhat_eval_mat = - _evaluate_matsubara_basis_functions(uhat, matsubara_indices); - - Eigen::Tensor, ndim, ORDER> result = - _transform_coefficients(coeffs, uhat_eval_mat, - target_dim); - return result; -} - -// C-API function for kernel creation -spir_kernel* _kernel_new_logistic(double lambda) -{ - int status; - spir_kernel* kernel = spir_logistic_kernel_new(lambda, &status); - return kernel; -} - -spir_kernel* _kernel_new_reg_bose(double lambda) -{ - int status; - spir_kernel* kernel = spir_reg_bose_kernel_new(lambda, &status); - return kernel; -} - -// Add these template functions before the integration_test function -template -T generate_random_coeff(double random_value_real, double random_value_imag, double pole) { - // Silence unused parameter warning - (void)random_value_imag; - return (2.0 * random_value_real - 1.0) * std::sqrt(std::abs(pole)); -} - -template <> -std::complex generate_random_coeff>(double random_value_real, double random_value_imag, double pole) { - return std::complex( - (2.0 * random_value_real - 1.0) * std::sqrt(std::abs(pole)), - (2.0 * random_value_imag - 1.0) * std::sqrt(std::abs(pole)) - ); -} - -// Add these template functions before the integration_test function -template -int dlr_to_IR(spir_basis* dlr, int order, int ndim, - const int* dims, int target_dim, - const T* coeffs, T* g_IR) { - if (std::is_same::value) { - return spir_dlr2ir_dd(dlr, order, ndim, dims, target_dim, - reinterpret_cast(coeffs), - reinterpret_cast(g_IR)); - } else if (std::is_same>::value) { - return spir_dlr2ir_zz(dlr, order, ndim, dims, target_dim, - reinterpret_cast(coeffs), - reinterpret_cast(g_IR)); - } - return SPIR_INVALID_ARGUMENT; -} - - -template -int dlr_from_IR(spir_basis* dlr, int order, int ndim, - const int* dims, int target_dim, - const T* coeffs, T* g_IR) { - if (std::is_same::value) { - return spir_ir2dlr_dd(dlr, order, ndim, dims, target_dim, - reinterpret_cast(coeffs), - reinterpret_cast(g_IR)); - } else if (std::is_same>::value) { - return spir_ir2dlr_zz(dlr, order, ndim, dims, target_dim, - reinterpret_cast(coeffs), - reinterpret_cast(g_IR)); - } - return SPIR_INVALID_ARGUMENT; -} - -template -int _tau_sampling_evaluate(spir_sampling* sampling, int order, int ndim, - const int* dims, int target_dim, - const T* gIR, T* gtau) { - if (std::is_same::value) { - return spir_sampling_eval_dd(sampling, order, ndim, dims, target_dim, - reinterpret_cast(gIR), - reinterpret_cast(gtau)); - } else if (std::is_same>::value) { - return spir_sampling_eval_zz(sampling, order, ndim, dims, target_dim, - reinterpret_cast(gIR), - reinterpret_cast(gtau)); - } - return SPIR_INVALID_ARGUMENT; -} - -template -int _tau_sampling_fit(spir_sampling* sampling, int order, int ndim, - const int* dims, int target_dim, - const T* gtau, T* gIR) { - if (std::is_same::value) { - return spir_sampling_fit_dd(sampling, order, ndim, dims, target_dim, - reinterpret_cast(gtau), - reinterpret_cast(gIR)); - } else if (std::is_same>::value) { - return spir_sampling_fit_zz(sampling, order, ndim, dims, target_dim, - reinterpret_cast(gtau), - reinterpret_cast(gIR)); - } - return SPIR_INVALID_ARGUMENT; -} - -template -int _matsubara_sampling_evaluate(spir_sampling* sampling, int order, int ndim, - const int* dims, int target_dim, - const T* gIR, std::complex* giw) { - if (std::is_same::value) { - return spir_sampling_eval_dz(sampling, order, ndim, dims, target_dim, - reinterpret_cast(gIR), - reinterpret_cast(giw)); - } else if (std::is_same>::value) { - return spir_sampling_eval_zz(sampling, order, ndim, dims, target_dim, - reinterpret_cast(gIR), - reinterpret_cast(giw)); - } - return SPIR_INVALID_ARGUMENT; -} - -template -struct tensor_converter { - static void convert(const Eigen::Tensor, ndim, ORDER>& src, Eigen::Tensor& dst) { - dst = src.template cast(); - } -}; - -template -struct tensor_converter { - static void convert(const Eigen::Tensor, ndim, ORDER>& src, Eigen::Tensor& dst) { - dst = src.real(); - } -}; - -/* -T: double or std::complex, scalar type of coeffs -*/ -template -void integration_test(double beta, double wmax, double epsilon, - const std::vector &extra_dims, int target_dim, - const int order, int stat, spir_kernel* kernel, - double tol, bool positive_only) -{ - // positive_only is not supported for complex numbers - REQUIRE (!(std::is_same>::value && positive_only)); - - if (ndim != 1 + extra_dims.size()) { - std::cerr << "ndim must be 1 + extra_dims.size()" << std::endl; - } - - // Verify that the template parameter matches the runtime parameter - if (ORDER == Eigen::ColMajor) { - REQUIRE(order == SPIR_ORDER_COLUMN_MAJOR); - } else { - REQUIRE(order == SPIR_ORDER_ROW_MAJOR); - } - - int status; - spir_sve_result* sve = spir_sve_result_new(kernel, epsilon, -1.0, -1, -1, SPIR_TWORK_AUTO, &status); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(sve != nullptr); - - spir_basis *basis = _spir_basis_new(stat, beta, wmax, epsilon, &status); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(basis != nullptr); - - int basis_size; - status = spir_basis_get_size(basis, &basis_size); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - - // Tau Sampling - std::cout << "Tau sampling" << std::endl; - int num_tau_points; - status = spir_basis_get_n_default_taus(basis, &num_tau_points); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(num_tau_points > 0); - - Eigen::VectorXd tau_points_org(num_tau_points); - status = spir_basis_get_default_taus(basis, tau_points_org.data()); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - - spir_sampling *tau_sampling = spir_tau_sampling_new(basis, num_tau_points, tau_points_org.data(), &status); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(tau_sampling != nullptr); - - status = spir_sampling_get_npoints(tau_sampling, &num_tau_points); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - Eigen::VectorXd tau_points(num_tau_points); - status = spir_sampling_get_taus(tau_sampling, tau_points.data()); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(num_tau_points >= basis_size); - // compare tau_points and tau_points_org - for (int i = 0; i < num_tau_points; i++) { - REQUIRE(tau_points(i) == Approx(tau_points_org(i))); - } - - // Matsubara Sampling - std::cout << "Matsubara sampling" << std::endl; - int num_matsubara_points_org; - status = spir_basis_get_n_default_matsus(basis, positive_only, &num_matsubara_points_org); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(num_matsubara_points_org > 0); - Eigen::Vector matsubara_points_org(num_matsubara_points_org); - status = spir_basis_get_default_matsus(basis, positive_only, matsubara_points_org.data()); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - spir_sampling *matsubara_sampling = spir_matsu_sampling_new(basis, positive_only, num_matsubara_points_org, matsubara_points_org.data(), &status); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(matsubara_sampling != nullptr); - if (positive_only) { - REQUIRE(num_matsubara_points_org >= basis_size / 2); - } else { - REQUIRE(num_matsubara_points_org >= basis_size); - } - - int num_matsubara_points; - status = - spir_sampling_get_npoints(matsubara_sampling, &num_matsubara_points); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - Eigen::Vector matsubara_points( - num_matsubara_points); - status = spir_sampling_get_matsus(matsubara_sampling, - matsubara_points.data()); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - // compare matsubara_points and matsubara_points_org - for (int i = 0; i < num_matsubara_points; i++) { - REQUIRE(matsubara_points(i) == matsubara_points_org(i)); - } - - - // DLR - std::cout << "DLR" << std::endl; - spir_basis *dlr = spir_dlr_new(basis, &status); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(dlr != nullptr); - int npoles; - status = spir_dlr_get_npoles(dlr, &npoles); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(npoles >= basis_size); - Eigen::VectorXd poles(npoles); - status = spir_dlr_get_poles(dlr, poles.data()); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - // Calculate total size of extra dimensions - Eigen::Index extra_size = std::accumulate( - extra_dims.begin(), extra_dims.end(), 1, std::multiplies<>()); - // Generate random DLR coefficients - Eigen::Tensor coeffs_targetdim0( - _get_dims(npoles, extra_dims, 0)); - std::mt19937 gen(982743); - std::uniform_real_distribution<> dis(0.0, 1.0); - { - Eigen::TensorMap> coeffs_2d( - coeffs_targetdim0.data(), npoles, extra_size); - for (Eigen::Index i = 0; i < npoles; ++i) { - for (Eigen::Index j = 0; j < extra_size; ++j) { - coeffs_2d(i, j) = generate_random_coeff(dis(gen), dis(gen), poles(i)); - } - } - } - - // DLR sampling objects - spir_sampling *tau_sampling_dlr = spir_tau_sampling_new(dlr, num_tau_points, tau_points_org.data(), &status); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(tau_sampling_dlr != nullptr); - - spir_funcs *dlr_uhat2 = spir_basis_get_uhat(dlr, &status); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(dlr_uhat2 != nullptr); - REQUIRE(spir_funcs_is_assigned(dlr_uhat2)); - - spir_sampling *matsubara_sampling_dlr = spir_matsu_sampling_new(dlr, positive_only, num_matsubara_points_org, matsubara_points_org.data(), &status); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(matsubara_sampling_dlr != nullptr); - - - // Move the axis for the poles from the first to the target dimension - Eigen::Tensor coeffs = - movedim_local(coeffs_targetdim0, 0, target_dim); - - // Convert DLR coefficients to IR coefficients - Eigen::Tensor g_IR( - _get_dims(basis_size, extra_dims, target_dim)); - status = dlr_to_IR(dlr, order, ndim, - _get_dims(npoles, extra_dims, target_dim).data(), target_dim, - coeffs.data(), g_IR.data()); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - - // Convert IR coefficients to DLR coefficients - Eigen::Tensor g_DLR_reconst( - _get_dims(npoles, extra_dims, target_dim)); - status = dlr_from_IR(dlr, order, ndim, - _get_dims(npoles, extra_dims, target_dim).data(), target_dim, - g_IR.data(), g_DLR_reconst.data()); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - - // From DLR to IR - Eigen::Tensor g_dlr( - _get_dims(basis_size, extra_dims, target_dim)); - status = dlr_from_IR(dlr, order, ndim, - _get_dims(basis_size, extra_dims, target_dim).data(), target_dim, - g_IR.data(), g_dlr.data()); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - - // DLR basis functions - int dlr_u_status; - spir_funcs *dlr_u = spir_basis_get_u(dlr, &dlr_u_status); - REQUIRE(dlr_u_status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(dlr_u != nullptr); - - int dlr_uhat_status; - spir_funcs *dlr_uhat = spir_basis_get_uhat(dlr, &dlr_uhat_status); - REQUIRE(dlr_uhat_status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(dlr_uhat != nullptr); - - // IR basis functions - int ir_u_status; - spir_funcs *ir_u = spir_basis_get_u(basis, &ir_u_status); - REQUIRE(ir_u_status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(ir_u != nullptr); - - int ir_uhat_status; - spir_funcs *ir_uhat = spir_basis_get_uhat(basis, &ir_uhat_status); - REQUIRE(ir_uhat_status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(ir_uhat != nullptr); - - // Compare the Greens function at all tau points between IR and DLR - Eigen::Tensor gtau_from_IR = - _evaluate_gtau(g_IR, ir_u, target_dim, tau_points); - Eigen::Tensor gtau_from_DLR = - _evaluate_gtau(coeffs, dlr_u, target_dim, - tau_points); - Eigen::Tensor gtau_from_DLR_reconst = - _evaluate_gtau(g_DLR_reconst, dlr_u, target_dim, - tau_points); - REQUIRE(compare_tensors_with_relative_error( - gtau_from_IR, gtau_from_DLR, tol)); - REQUIRE(compare_tensors_with_relative_error( - gtau_from_IR, gtau_from_DLR_reconst, tol)); - - // Use sampling to evaluate the Greens function at all tau points between IR and DLR - if (std::is_same::value) { - status = spir_sampling_eval_dd( - tau_sampling_dlr, order, ndim, - _get_dims(npoles, extra_dims, target_dim).data(), - target_dim, - reinterpret_cast(coeffs.data()), - reinterpret_cast(gtau_from_DLR.data())); - } else if (std::is_same>::value) { - status = spir_sampling_eval_zz( - tau_sampling_dlr, order, ndim, - _get_dims(npoles, extra_dims, target_dim).data(), - target_dim, - reinterpret_cast(coeffs.data()), - reinterpret_cast(gtau_from_DLR.data())); - } - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - - REQUIRE(compare_tensors_with_relative_error( - gtau_from_IR, gtau_from_DLR, tol)); - - - // Compare the Greens function at all Matsubara frequencies between IR and - // DLR - Eigen::Tensor, ndim, ORDER> giw_from_IR = - _evaluate_giw(g_IR, ir_uhat, target_dim, - matsubara_points); - Eigen::Tensor, ndim, ORDER> giw_from_DLR = - _evaluate_giw(coeffs, dlr_uhat, target_dim, - matsubara_points); - REQUIRE( - compare_tensors_with_relative_error, ndim, ORDER>( - giw_from_IR, giw_from_DLR, tol)); - - // Use sampling to evaluate the Greens function at all Matsubara frequencies between IR and DLR - { - if (std::is_same::value) { - status = spir_sampling_eval_dz( - matsubara_sampling_dlr, order, ndim, - _get_dims(npoles, extra_dims, target_dim).data(), - target_dim, - reinterpret_cast(coeffs.data()), - reinterpret_cast(giw_from_DLR.data())); - } else if (std::is_same>::value) { - status = spir_sampling_eval_zz( - matsubara_sampling_dlr, order, ndim, - _get_dims(npoles, extra_dims, target_dim).data(), - target_dim, - reinterpret_cast(coeffs.data()), - reinterpret_cast(giw_from_DLR.data())); - } - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - REQUIRE(compare_tensors_with_relative_error, ndim, ORDER>(giw_from_IR, giw_from_DLR, tol)); - } - - auto dims_matsubara = - _get_dims(num_matsubara_points, extra_dims, target_dim); - auto dims_IR = _get_dims(basis_size, extra_dims, target_dim); - auto dims_tau = - _get_dims(num_tau_points, extra_dims, target_dim); - - Eigen::Tensor gIR( - _get_dims(basis_size, extra_dims, target_dim)); - Eigen::Tensor gIR2( - _get_dims(basis_size, extra_dims, target_dim)); - Eigen::Tensor gtau( - _get_dims(num_tau_points, extra_dims, target_dim)); - Eigen::Tensor, ndim, ORDER> giw_reconst( - _get_dims(num_matsubara_points, extra_dims, - target_dim)); - - // Matsubara -> IR - { - Eigen::Tensor, ndim, ORDER> - gIR_work( _get_dims(basis_size, extra_dims, target_dim)); - status = spir_sampling_fit_zz( - matsubara_sampling, order, ndim, dims_matsubara.data(), target_dim, - reinterpret_cast(giw_from_DLR.data()), - reinterpret_cast(gIR_work.data())); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - tensor_converter::convert(gIR_work, gIR); - } - - // IR -> tau - status = _tau_sampling_evaluate( - tau_sampling, order, ndim, dims_IR.data(), target_dim, - gIR.data(), gtau.data()); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - - // tau -> IR - status = _tau_sampling_fit( - tau_sampling, order, ndim, dims_tau.data(), target_dim, - gtau.data(), gIR2.data()); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - - // IR -> Matsubara - status = _matsubara_sampling_evaluate( - matsubara_sampling, order, ndim, dims_IR.data(), target_dim, - gIR2.data(), giw_reconst.data()); - REQUIRE(status == SPIR_COMPUTATION_SUCCESS); - - Eigen::Tensor, ndim, ORDER> giw_from_IR_reconst = - _evaluate_giw(gIR2, ir_uhat, target_dim, matsubara_points); - REQUIRE(compare_tensors_with_relative_error, ndim, ORDER>(giw_from_DLR, giw_from_IR_reconst, tol)); - - spir_basis_release(basis); - spir_basis_release(dlr); - spir_funcs_release(dlr_u); - spir_funcs_release(ir_u); - spir_sampling_release(tau_sampling); -} - -TEST_CASE("Integration Test", "[cinterface]") { - double beta = 1e+3; - double wmax = 2.0; - double epsilon = 1e-6; - - double tol = 10 * epsilon; - - - // Create kernel once for all tests - spir_kernel* kernel = _kernel_new_logistic(beta * wmax); - int stat = SPIR_STATISTICS_BOSONIC; - - for (bool positive_only : {false, true}) { - std::cout << "positive_only = " << positive_only << std::endl; - { - std::vector extra_dims = {}; - std::cout << "Integration test for bosonic LogisticKernel" << std::endl; - integration_test(beta, wmax, epsilon, extra_dims, 0, - SPIR_ORDER_COLUMN_MAJOR, stat, kernel, tol, positive_only); - - if (!positive_only) { - integration_test, 1, Eigen::ColMajor>(beta, wmax, epsilon, extra_dims, 0, - SPIR_ORDER_COLUMN_MAJOR, stat, kernel, tol, positive_only); - } - } - - { - int target_dim = 0; - std::vector extra_dims = {}; - std::cout << "Integration test for bosonic LogisticKernel, ColMajor, target_dim = " << target_dim << std::endl; - integration_test(beta, wmax, epsilon, extra_dims, target_dim, - SPIR_ORDER_COLUMN_MAJOR, stat, kernel, tol, positive_only); - if (!positive_only) { - integration_test, 1, Eigen::ColMajor>(beta, wmax, epsilon, extra_dims, target_dim, - SPIR_ORDER_COLUMN_MAJOR, stat, kernel, tol, positive_only); - } - } - - { - int target_dim = 0; - std::vector extra_dims = {}; - std::cout << "Integration test for bosonic LogisticKernel, RowMajor, target_dim = " << target_dim << std::endl; - integration_test(beta, wmax, epsilon, extra_dims, target_dim, - SPIR_ORDER_ROW_MAJOR, stat, kernel, tol, positive_only); - if (!positive_only) { - integration_test, 1, Eigen::RowMajor>(beta, wmax, epsilon, extra_dims, target_dim, - SPIR_ORDER_ROW_MAJOR, stat, kernel, tol, positive_only); - } - } - - // extra dims = {2,3,4} - for (int target_dim = 0; target_dim < 4; ++target_dim) { - std::vector extra_dims = {2,3,4}; - std::cout << "Integration test for bosonic LogisticKernel, ColMajor, target_dim = " << target_dim << std::endl; - integration_test(beta, wmax, epsilon, extra_dims, target_dim, - SPIR_ORDER_COLUMN_MAJOR, stat, kernel, tol, positive_only); - } - - for (int target_dim = 0; target_dim < 4; ++target_dim) { - std::vector extra_dims = {2,3,4}; - std::cout << "Integration test for bosonic LogisticKernel, RowMajor, target_dim = " << target_dim << std::endl; - integration_test(beta, wmax, epsilon, extra_dims, target_dim, - SPIR_ORDER_ROW_MAJOR, stat, kernel, tol, positive_only); - } - } -} diff --git a/benchmark/CMakeLists.txt b/capi_benchmark/CMakeLists.txt similarity index 100% rename from benchmark/CMakeLists.txt rename to capi_benchmark/CMakeLists.txt diff --git a/benchmark/README.md b/capi_benchmark/README.md similarity index 100% rename from benchmark/README.md rename to capi_benchmark/README.md diff --git a/benchmark/benchmark1.c b/capi_benchmark/benchmark1.c similarity index 100% rename from benchmark/benchmark1.c rename to capi_benchmark/benchmark1.c diff --git a/capi_benchmark/result-blas b/capi_benchmark/result-blas new file mode 100644 index 00000000..003768af --- /dev/null +++ b/capi_benchmark/result-blas @@ -0,0 +1,33 @@ +Benchmark (positive only = false) +beta: 100000.000000 +omega_max: 1.000000 +epsilon: 0.000000 +Extra size: 1000 +Number of runs: 10000 +Kernel creation : 0.037000 ms +SVE computation : 8738.458000 ms +n_basis: 95 +n_tau: 95 +n_matsubara: 96 +fit_zz (Matsubara) : 47126.428000 ms +eval_zz (Matsubara) : 5542.738000 ms +eval_dz (Matsubara) : 10316.618000 ms +fit_zz (Tau) : 41147.279000 ms +eval_zz (Tau) : 6352.628000 ms + +Benchmark (positive only = true) +beta: 100000.000000 +omega_max: 1.000000 +epsilon: 0.000000 +Extra size: 1000 +Number of runs: 10000 +Kernel creation : 0.001000 ms +SVE computation : 8621.689000 ms +n_basis: 95 +n_tau: 95 +n_matsubara: 48 +fit_zz (Matsubara) : 7846.872000 ms +eval_zz (Matsubara) : 3648.960000 ms +eval_dz (Matsubara) : 5139.793000 ms +fit_zz (Tau) : 38652.935000 ms +eval_zz (Tau) : 6444.498000 ms diff --git a/capi_benchmark/result-eigen-internal b/capi_benchmark/result-eigen-internal new file mode 100644 index 00000000..576cef0e --- /dev/null +++ b/capi_benchmark/result-eigen-internal @@ -0,0 +1,40 @@ +Running all tests... +---------------------------------------- +Running benchmark1... +Benchmark (positive only = false) +beta: 100000.000000 +omega_max: 1.000000 +epsilon: 0.000000 +Extra size: 1000 +Number of runs: 10000 +Kernel creation : 0.034000 ms +SVE computation : 8783.172000 ms +n_basis: 95 +n_tau: 95 +n_matsubara: 96 +fit_zz (Matsubara) : 60689.285000 ms +eval_zz (Matsubara) : 29683.097000 ms +eval_dz (Matsubara) : 10119.379000 ms +fit_zz (Tau) : 29547.235000 ms +eval_zz (Tau) : 28734.945000 ms + +Benchmark (positive only = true) +beta: 100000.000000 +omega_max: 1.000000 +epsilon: 0.000000 +Extra size: 1000 +Number of runs: 10000 +Kernel creation : 0.001000 ms +SVE computation : 8597.571000 ms +n_basis: 95 +n_tau: 95 +n_matsubara: 48 +fit_zz (Matsubara) : 15342.874000 ms +eval_zz (Matsubara) : 14869.731000 ms +eval_dz (Matsubara) : 5389.729000 ms +fit_zz (Tau) : 30142.329000 ms +eval_zz (Tau) : 29850.478000 ms + +---------------------------------------- +All tests completed! +[100%] Built target test diff --git a/capi_benchmark/run.sh b/capi_benchmark/run.sh new file mode 100644 index 00000000..1efe4ef1 --- /dev/null +++ b/capi_benchmark/run.sh @@ -0,0 +1,3 @@ +export SparseIR_DIR=$HOME/opt/libsparseir/share/cmake +cmake -S . -B ./build +cmake --build build --target test diff --git a/capi_benchmark/tmp b/capi_benchmark/tmp new file mode 100644 index 00000000..8821810d --- /dev/null +++ b/capi_benchmark/tmp @@ -0,0 +1,33 @@ +---------------------------------------- +Running benchmark1... +Benchmark (positive only = false) +beta: 100000.000000 +omega_max: 1.000000 +epsilon: 0.000000 +Extra size: 1000 +Number of runs: 10000 +Kernel creation : 0.003000 ms +SVE computation : 8369.650000 ms +n_basis: 95 +n_tau: 95 +n_matsubara: 96 +fit_zz (Matsubara) : 53336.191000 ms +eval_zz (Matsubara) : 32080.965000 ms +fit_zz (Tau) : 34555.816000 ms +eval_zz (Tau) : 5998.359000 ms + +Benchmark (positive only = true) +beta: 100000.000000 +omega_max: 1.000000 +epsilon: 0.000000 +Extra size: 1000 +Number of runs: 10000 +Kernel creation : 0.002000 ms +SVE computation : 8551.958000 ms +n_basis: 95 +n_tau: 95 +n_matsubara: 48 +fit_zz (Matsubara) : 11195.550000 ms +eval_zz (Matsubara) : 16474.556000 ms +fit_zz (Tau) : 31909.953000 ms +eval_zz (Tau) : 6620.095000 ms diff --git a/capi_test/CMakeLists.txt b/capi_test/CMakeLists.txt new file mode 100644 index 00000000..a944741b --- /dev/null +++ b/capi_test/CMakeLists.txt @@ -0,0 +1,76 @@ +cmake_minimum_required(VERSION 3.15) +project(SparseIR_CAPI_Tests LANGUAGES CXX) + +set(CMAKE_CXX_STANDARD 11) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +# Find dependencies +find_package(SparseIR REQUIRED) + +# Find Eigen3 +find_package(Eigen3 3.4.0 QUIET NO_MODULE) +if(NOT Eigen3_FOUND) + message(STATUS "Eigen3 not found in system, fetching from GitHub...") + include(FetchContent) + FetchContent_Declare(Eigen3 + GIT_REPOSITORY https://gitlab.com/libeigen/eigen.git + GIT_TAG 3.4.0 + ) + FetchContent_MakeAvailable(Eigen3) +endif() + +# Find Threads +find_package(Threads REQUIRED) + +# Find or fetch Catch2 +include(FetchContent) +FetchContent_Declare( + Catch2 + GIT_REPOSITORY https://github.com/catchorg/Catch2.git + GIT_TAG v3.4.0 +) +FetchContent_MakeAvailable(Catch2) + +# Fetch XPrec for test utilities +FetchContent_Declare(XPrec + GIT_REPOSITORY https://github.com/tuwien-cms/libxprec + GIT_TAG v0.7.0 +) +FetchContent_GetProperties(XPrec) +if(NOT xprec_POPULATED) + FetchContent_Populate(XPrec) +endif() + +# C-API integration test +add_executable(cinterface_integration cinterface_integration.cxx) + +target_include_directories(cinterface_integration PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR} + ${EIGEN3_INCLUDE_DIR} + ${xprec_SOURCE_DIR}/include +) + +target_link_libraries(cinterface_integration PRIVATE + Catch2::Catch2WithMain + SparseIR::sparseir + Eigen3::Eigen + Threads::Threads +) + +# Optional BLAS linking +find_package(BLAS QUIET) +if(BLAS_FOUND) + target_link_libraries(cinterface_integration PRIVATE ${BLAS_LIBRARIES}) +endif() + +# macOS Accelerate framework +if(APPLE) + find_library(ACCELERATE_FRAMEWORK Accelerate) + if(ACCELERATE_FRAMEWORK) + target_link_libraries(cinterface_integration PRIVATE ${ACCELERATE_FRAMEWORK}) + endif() +endif() + +# Enable testing +enable_testing() +add_test(NAME cinterface_integration COMMAND cinterface_integration -d yes) diff --git a/backend/capi_test/_utils.hpp b/capi_test/_utils.hpp similarity index 89% rename from backend/capi_test/_utils.hpp rename to capi_test/_utils.hpp index 3c05deea..205d431e 100644 --- a/backend/capi_test/_utils.hpp +++ b/capi_test/_utils.hpp @@ -89,21 +89,6 @@ inline spir_basis *_spir_basis_new(int32_t statistics, double beta, } -template -sparseir::PiecewiseLegendrePoly _singleout_poly_from_irtaufuncs(const sparseir::IRTauFuncsType &funcs, std::size_t idx) { - sparseir::IRTauFuncsType funcs_slice = funcs[idx]; - sparseir::PiecewiseLegendrePolyVector u0 = funcs_slice.get_obj(); - return u0[0]; -} - - -//template -//sparseir::IRBasisType _singleout_from_irtaufuncs(const sparseir::IRTauFuncsType &funcs, std::size_t idx) { - //sparseir::IRTauFuncsType funcs_slice = funcs[idx]; -//} - - - template bool compare_tensors_with_relative_error(const Eigen::Tensor &a, const Eigen::Tensor &b, diff --git a/backend/capi_test/cinterface_integration.cxx b/capi_test/cinterface_integration.cxx similarity index 100% rename from backend/capi_test/cinterface_integration.cxx rename to capi_test/cinterface_integration.cxx diff --git a/capi_test/test_with_cxx_backend.sh b/capi_test/test_with_cxx_backend.sh new file mode 100755 index 00000000..9bb6b9ed --- /dev/null +++ b/capi_test/test_with_cxx_backend.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +# Build backend/cxx, install it, then build and run capi_test against it +# Directory structure: +# work_cxx/build_backend - Build directory for backend/cxx +# work_cxx/install_backend - Install directory for backend/cxx +# work_cxx/build_test - Build directory for capi_test + +set -e # Exit on error + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +WORK_DIR="$SCRIPT_DIR/work_cxx" +BACKEND_DIR="$SCRIPT_DIR/../backend/cxx" +INSTALL_DIR="$WORK_DIR/install_backend" + +echo -e "${GREEN}=== Testing capi_test with C++ backend ===${NC}" + +# Step 1: Build and install backend/cxx (without tests) +echo -e "${YELLOW}Step 1: Building backend/cxx...${NC}" +mkdir -p "$WORK_DIR/build_backend" +cd "$WORK_DIR/build_backend" + +cmake "$BACKEND_DIR" \ + -DCMAKE_BUILD_TYPE=Debug \ + -DCMAKE_INSTALL_PREFIX="$INSTALL_DIR" \ + -DSPARSEIR_BUILD_TESTING=OFF \ + -DSPARSEIR_USE_BLAS=OFF + +echo -e "${YELLOW}Building backend/cxx...${NC}" +cmake --build . -j$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4) + +echo -e "${YELLOW}Installing backend/cxx to $INSTALL_DIR...${NC}" +cmake --install . + +# Step 2: Build and test capi_test +echo -e "${YELLOW}Step 2: Building capi_test...${NC}" +cd "$SCRIPT_DIR" +mkdir -p "$WORK_DIR/build_test" +cd "$WORK_DIR/build_test" + +cmake "$SCRIPT_DIR" \ + -DCMAKE_BUILD_TYPE=Debug \ + -DCMAKE_PREFIX_PATH="$INSTALL_DIR" + +echo -e "${YELLOW}Building capi_test...${NC}" +cmake --build . -j$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4) + +echo -e "${YELLOW}Running capi_test...${NC}" +ctest --output-on-failure --verbose + +echo -e "${GREEN}=== All tests completed successfully ===${NC}" + diff --git a/fortran/CMakeLists.txt b/fortran/CMakeLists.txt index d6b35b8a..ab91df54 100644 --- a/fortran/CMakeLists.txt +++ b/fortran/CMakeLists.txt @@ -1,4 +1,8 @@ -# Fortran bindings for SparseIR +cmake_minimum_required(VERSION 3.10) +project(SparseIR_Fortran LANGUAGES Fortran) + +# Find dependencies +find_package(SparseIR REQUIRED) # Enable Fortran preprocessor set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -cpp") @@ -35,32 +39,47 @@ target_include_directories(sparseir_fortran INTERFACE set_target_properties(sparseir_fortran PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR} - #Fortran_MODULE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/fortran POSITION_INDEPENDENT_CODE ON ) -target_link_libraries(sparseir_fortran PUBLIC sparseir) +target_link_libraries(sparseir_fortran PUBLIC SparseIR::sparseir) # Add alias add_library(SparseIR::sparseir_fortran ALIAS sparseir_fortran) +# Testing (if enabled via option) +option(SPARSEIR_BUILD_TESTING "Enable creation of SparseIR tests" OFF) +if(SPARSEIR_BUILD_TESTING) + enable_testing() + add_subdirectory(test) +endif() + # Install Fortran library install(TARGETS sparseir_fortran - EXPORT sparseirTargets - LIBRARY DESTINATION "${SPARSEIR_INSTALL_LIBDIR}" - ARCHIVE DESTINATION "${SPARSEIR_INSTALL_LIBDIR}" - RUNTIME DESTINATION "${SPARSEIR_INSTALL_BINDIR}" - COMPONENT sparseir + EXPORT sparseirFortranTargets + LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}" + ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" + RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}" + COMPONENT sparseir_fortran ) # Install Fortran module files -message(STATUS "SPARSEIR_INSTALL_LIBDIR: ${SPARSEIR_INSTALL_LIBDIR}") -message(STATUS "SPARSEIR_INSTALL_BINDIR: ${SPARSEIR_INSTALL_BINDIR}") -message(STATUS "SPARSEIR_INSTALL_INCLUDEDIR: ${SPARSEIR_INSTALL_INCLUDEDIR}") -message(STATUS "CMAKE_INSTALL_INCLUDEDIR: ${CMAKE_INSTALL_INCLUDEDIR}") -message(STATUS "Installing Fortran module files to ${SPARSEIR_INSTALL_INCLUDEDIR}/sparseir") install( DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/fortran/ - DESTINATION "${SPARSEIR_INSTALL_INCLUDEDIR}/sparseir" + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/sparseir" FILES_MATCHING PATTERN "*.mod" -) \ No newline at end of file +) + +# CMake package config for Fortran bindings +include(CMakePackageConfigHelpers) +write_basic_package_version_file( + "${CMAKE_CURRENT_BINARY_DIR}/SparseIRFortranConfigVersion.cmake" + VERSION ${PROJECT_VERSION} + COMPATIBILITY SameMajorVersion +) + +install(EXPORT sparseirFortranTargets + FILE SparseIRFortranTargets.cmake + NAMESPACE SparseIR:: + DESTINATION "${CMAKE_INSTALL_DATADIR}/cmake/SparseIRFortran" +) From 150863dd13ecc46c331fe879bc22046568316617 Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 10:15:24 +0900 Subject: [PATCH 07/21] ci: add simplified GitHub Actions workflow for C++ backend testing - Move old workflows to back/workflows/ for backup - Create new test_cxx_backend.yml workflow - Uses shell scripts for simplicity - Tests on Ubuntu and macOS - BLAS enabled by default - Supports manual trigger (workflow_dispatch) - Triggers on push to main and 459-restructure-directories branches - Update capi_test/test_with_cxx_backend.sh to enable BLAS Workflow steps: 1. Build C++ backend with tests and run them 2. Build and install C++ backend (no tests) 3. Build and run capi_test against installed backend --- .github/workflows/test_cxx_backend.yml | 38 +++++++++++++++++++ .../workflows/CI_C_API_Python.yml | 0 .../workflows/CI_PublishTestPyPI.yml | 0 .../workflows/CI_cmake_intel.yml | 0 .../workflows/CI_cmake_no_blas.yml | 0 .../workflows/CI_cmake_with_blas.yml | 0 .../workflows/CI_cmake_with_blas_ilp64.yml | 0 {.github => back}/workflows/CreateTag.yml | 0 {.github => back}/workflows/PublishPyPI.yml | 0 .../workflows/VersionConsistencyCheck.yml | 0 {.github => back}/workflows/conda.yml | 0 capi_test/test_with_cxx_backend.sh | 2 +- 12 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/test_cxx_backend.yml rename {.github => back}/workflows/CI_C_API_Python.yml (100%) rename {.github => back}/workflows/CI_PublishTestPyPI.yml (100%) rename {.github => back}/workflows/CI_cmake_intel.yml (100%) rename {.github => back}/workflows/CI_cmake_no_blas.yml (100%) rename {.github => back}/workflows/CI_cmake_with_blas.yml (100%) rename {.github => back}/workflows/CI_cmake_with_blas_ilp64.yml (100%) rename {.github => back}/workflows/CreateTag.yml (100%) rename {.github => back}/workflows/PublishPyPI.yml (100%) rename {.github => back}/workflows/VersionConsistencyCheck.yml (100%) rename {.github => back}/workflows/conda.yml (100%) diff --git a/.github/workflows/test_cxx_backend.yml b/.github/workflows/test_cxx_backend.yml new file mode 100644 index 00000000..106620c3 --- /dev/null +++ b/.github/workflows/test_cxx_backend.yml @@ -0,0 +1,38 @@ +name: Test C++ Backend + +on: + push: + branches: [ "main", "459-restructure-directories" ] + pull_request: + branches: [ "main" ] + workflow_dispatch: + +jobs: + test-cxx-backend: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, macos-latest] + + steps: + - uses: actions/checkout@v5 + + - name: Install dependencies (Ubuntu) + if: runner.os == 'Linux' + run: | + sudo apt-get update + sudo apt-get install -y cmake libopenblas-dev libeigen3-dev + + - name: Install dependencies (macOS) + if: runner.os == 'macOS' + run: | + brew install cmake eigen openblas + + - name: Build and test C++ backend + working-directory: libsparseir/backend/cxx + run: ./build_capi_with_tests.sh + + - name: Build and test capi_test against backend + working-directory: libsparseir/capi_test + run: ./test_with_cxx_backend.sh + diff --git a/.github/workflows/CI_C_API_Python.yml b/back/workflows/CI_C_API_Python.yml similarity index 100% rename from .github/workflows/CI_C_API_Python.yml rename to back/workflows/CI_C_API_Python.yml diff --git a/.github/workflows/CI_PublishTestPyPI.yml b/back/workflows/CI_PublishTestPyPI.yml similarity index 100% rename from .github/workflows/CI_PublishTestPyPI.yml rename to back/workflows/CI_PublishTestPyPI.yml diff --git a/.github/workflows/CI_cmake_intel.yml b/back/workflows/CI_cmake_intel.yml similarity index 100% rename from .github/workflows/CI_cmake_intel.yml rename to back/workflows/CI_cmake_intel.yml diff --git a/.github/workflows/CI_cmake_no_blas.yml b/back/workflows/CI_cmake_no_blas.yml similarity index 100% rename from .github/workflows/CI_cmake_no_blas.yml rename to back/workflows/CI_cmake_no_blas.yml diff --git a/.github/workflows/CI_cmake_with_blas.yml b/back/workflows/CI_cmake_with_blas.yml similarity index 100% rename from .github/workflows/CI_cmake_with_blas.yml rename to back/workflows/CI_cmake_with_blas.yml diff --git a/.github/workflows/CI_cmake_with_blas_ilp64.yml b/back/workflows/CI_cmake_with_blas_ilp64.yml similarity index 100% rename from .github/workflows/CI_cmake_with_blas_ilp64.yml rename to back/workflows/CI_cmake_with_blas_ilp64.yml diff --git a/.github/workflows/CreateTag.yml b/back/workflows/CreateTag.yml similarity index 100% rename from .github/workflows/CreateTag.yml rename to back/workflows/CreateTag.yml diff --git a/.github/workflows/PublishPyPI.yml b/back/workflows/PublishPyPI.yml similarity index 100% rename from .github/workflows/PublishPyPI.yml rename to back/workflows/PublishPyPI.yml diff --git a/.github/workflows/VersionConsistencyCheck.yml b/back/workflows/VersionConsistencyCheck.yml similarity index 100% rename from .github/workflows/VersionConsistencyCheck.yml rename to back/workflows/VersionConsistencyCheck.yml diff --git a/.github/workflows/conda.yml b/back/workflows/conda.yml similarity index 100% rename from .github/workflows/conda.yml rename to back/workflows/conda.yml diff --git a/capi_test/test_with_cxx_backend.sh b/capi_test/test_with_cxx_backend.sh index 9bb6b9ed..5488ba39 100755 --- a/capi_test/test_with_cxx_backend.sh +++ b/capi_test/test_with_cxx_backend.sh @@ -30,7 +30,7 @@ cmake "$BACKEND_DIR" \ -DCMAKE_BUILD_TYPE=Debug \ -DCMAKE_INSTALL_PREFIX="$INSTALL_DIR" \ -DSPARSEIR_BUILD_TESTING=OFF \ - -DSPARSEIR_USE_BLAS=OFF + -DSPARSEIR_USE_BLAS=ON echo -e "${YELLOW}Building backend/cxx...${NC}" cmake --build . -j$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4) From 66448e88639aa4befd5f7c2403b7ae9284208273 Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 10:16:30 +0900 Subject: [PATCH 08/21] fix: correct working-directory paths in GitHub Actions workflow Remove 'libsparseir/' prefix from working-directory paths. The repository root is already at libsparseir/ after checkout. --- .github/workflows/test_cxx_backend.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test_cxx_backend.yml b/.github/workflows/test_cxx_backend.yml index 106620c3..c581673a 100644 --- a/.github/workflows/test_cxx_backend.yml +++ b/.github/workflows/test_cxx_backend.yml @@ -29,10 +29,10 @@ jobs: brew install cmake eigen openblas - name: Build and test C++ backend - working-directory: libsparseir/backend/cxx + working-directory: backend/cxx run: ./build_capi_with_tests.sh - name: Build and test capi_test against backend - working-directory: libsparseir/capi_test + working-directory: capi_test run: ./test_with_cxx_backend.sh From 29bd3f243143efb5a27ad309f85858641a13bc3a Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 10:37:41 +0900 Subject: [PATCH 09/21] feat: add test scripts and workflows for Fortran bindings - Add fortran/test_with_cxx_backend.sh - Builds C++ backend and installs to work_cxx/install_backend - Builds Fortran bindings against installed backend - Runs Fortran tests with library paths set - Add .github/workflows/test_fortran.yml - Tests with GCC/gfortran on Ubuntu - Tests with Intel oneAPI compilers on Ubuntu - Uses shell scripts for simplicity - Update fortran/CMakeLists.txt - Find SparseIR package before project() to get version - Make independent build system - Update fortran/test/CMakeLists.txt - Use SparseIR::sparseir target instead of plain sparseir Note: Python uses its own independent build system (setup_build.py + CMakeLists.txt) that copies source files and builds internally --- .github/workflows/test_fortran.yml | 58 +++++++++++++++++++++++++++ fortran/CMakeLists.txt | 9 ++++- fortran/test/CMakeLists.txt | 2 +- fortran/test_with_cxx_backend.sh | 63 ++++++++++++++++++++++++++++++ 4 files changed, 129 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/test_fortran.yml create mode 100755 fortran/test_with_cxx_backend.sh diff --git a/.github/workflows/test_fortran.yml b/.github/workflows/test_fortran.yml new file mode 100644 index 00000000..68d2fbd9 --- /dev/null +++ b/.github/workflows/test_fortran.yml @@ -0,0 +1,58 @@ +name: Test Fortran Bindings + +on: + push: + branches: [ "main", "459-restructure-directories" ] + pull_request: + branches: [ "main" ] + workflow_dispatch: + +jobs: + test-fortran-gcc: + runs-on: ubuntu-latest + name: Fortran with GCC/gfortran + + steps: + - uses: actions/checkout@v5 + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install -y cmake gfortran libopenblas-dev libeigen3-dev + + - name: Build and test Fortran bindings with GCC + working-directory: fortran + run: ./test_with_cxx_backend.sh + + test-fortran-intel: + runs-on: ubuntu-latest + name: Fortran with Intel compilers + + steps: + - uses: actions/checkout@v5 + + - name: Install Intel oneAPI + run: | + # Download Intel oneAPI GPG public key + wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | sudo tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null + + # Add Intel oneAPI repository + echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list + + # Update and install Intel oneAPI compilers and MKL + sudo apt-get update + sudo apt-get install -y intel-oneapi-compiler-dpcpp-cpp intel-oneapi-compiler-fortran intel-oneapi-mkl-devel + + - name: Install dependencies + run: | + sudo apt-get install -y cmake libeigen3-dev + + - name: Build and test Fortran bindings with Intel + working-directory: fortran + run: | + source /opt/intel/oneapi/setvars.sh + export CC=icx + export CXX=icpx + export FC=ifx + ./test_with_cxx_backend.sh + diff --git a/fortran/CMakeLists.txt b/fortran/CMakeLists.txt index ab91df54..5547496a 100644 --- a/fortran/CMakeLists.txt +++ b/fortran/CMakeLists.txt @@ -1,9 +1,14 @@ cmake_minimum_required(VERSION 3.10) -project(SparseIR_Fortran LANGUAGES Fortran) -# Find dependencies +# Find dependencies first to get version find_package(SparseIR REQUIRED) +# Set project with version from SparseIR +project(SparseIR_Fortran + VERSION ${SparseIR_VERSION} + LANGUAGES Fortran +) + # Enable Fortran preprocessor set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -cpp") diff --git a/fortran/test/CMakeLists.txt b/fortran/test/CMakeLists.txt index ffdf7939..007e3797 100644 --- a/fortran/test/CMakeLists.txt +++ b/fortran/test/CMakeLists.txt @@ -18,7 +18,7 @@ foreach(TEST ${TEST_FILES}) ${CMAKE_CURRENT_SOURCE_DIR}/.. ) - target_link_libraries(${TEST} PRIVATE sparseir_fortran sparseir) + target_link_libraries(${TEST} PRIVATE sparseir_fortran SparseIR::sparseir) set_target_properties(${TEST} PROPERTIES Fortran_MODULE_DIRECTORY ${SPARSEIR_FORTRAN_MODDIR} diff --git a/fortran/test_with_cxx_backend.sh b/fortran/test_with_cxx_backend.sh new file mode 100755 index 00000000..563f4602 --- /dev/null +++ b/fortran/test_with_cxx_backend.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +# Build backend/cxx, install it, then build and run Fortran tests against it +# Directory structure: +# work_cxx/build_backend - Build directory for backend/cxx +# work_cxx/install_backend - Install directory for backend/cxx +# work_cxx/build_fortran - Build directory for Fortran bindings +# work_cxx/build_test - Build directory for Fortran tests + +set -e # Exit on error + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +WORK_DIR="$SCRIPT_DIR/work_cxx" +BACKEND_DIR="$SCRIPT_DIR/../backend/cxx" +INSTALL_DIR="$WORK_DIR/install_backend" + +echo -e "${GREEN}=== Testing Fortran with C++ backend ===${NC}" + +# Step 1: Build and install backend/cxx (without tests) +echo -e "${YELLOW}Step 1: Building backend/cxx...${NC}" +mkdir -p "$WORK_DIR/build_backend" +cd "$WORK_DIR/build_backend" + +cmake "$BACKEND_DIR" \ + -DCMAKE_BUILD_TYPE=Debug \ + -DCMAKE_INSTALL_PREFIX="$INSTALL_DIR" \ + -DSPARSEIR_BUILD_TESTING=OFF \ + -DSPARSEIR_USE_BLAS=ON + +echo -e "${YELLOW}Building backend/cxx...${NC}" +cmake --build . -j$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4) + +echo -e "${YELLOW}Installing backend/cxx to $INSTALL_DIR...${NC}" +cmake --install . + +# Step 2: Build Fortran bindings +echo -e "${YELLOW}Step 2: Building Fortran bindings...${NC}" +cd "$SCRIPT_DIR" +mkdir -p "$WORK_DIR/build_fortran" +cd "$WORK_DIR/build_fortran" + +cmake "$SCRIPT_DIR" \ + -DCMAKE_BUILD_TYPE=Debug \ + -DCMAKE_PREFIX_PATH="$INSTALL_DIR" \ + -DSPARSEIR_BUILD_TESTING=ON + +echo -e "${YELLOW}Building Fortran bindings...${NC}" +cmake --build . -j$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4) + +echo -e "${YELLOW}Running Fortran tests...${NC}" +# Set library paths for test execution +export DYLD_LIBRARY_PATH="$INSTALL_DIR/lib:$DYLD_LIBRARY_PATH" +export LD_LIBRARY_PATH="$INSTALL_DIR/lib:$LD_LIBRARY_PATH" +ctest --output-on-failure --verbose + +echo -e "${GREEN}=== All tests completed successfully ===${NC}" + From 7bfac66c7f2a3c4f73d8182c16e86cfd370dcc46 Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 10:53:49 +0900 Subject: [PATCH 10/21] feat: add Python bindings test workflow - Add .github/workflows/test_python.yml - Tests on Ubuntu and macOS - Python versions: 3.10, 3.11, 3.12 - Uses astral-sh/setup-uv@v7 for uv management - Enables BLAS support via SPARSEIR_USE_BLAS=1 - Update python/setup_build.py - Copy files from backend/cxx instead of top-level - Do not copy CMakeLists.txt (Python has its own) - Do not copy fortran directory - Update .gitignore - Add work_cxx to ignore build artifacts --- .github/workflows/test_python.yml | 70 +++++++++++++++++++++++++++++++ .gitignore | 1 + python/setup_build.py | 23 +++++----- 3 files changed, 82 insertions(+), 12 deletions(-) create mode 100644 .github/workflows/test_python.yml diff --git a/.github/workflows/test_python.yml b/.github/workflows/test_python.yml new file mode 100644 index 00000000..aaacaf71 --- /dev/null +++ b/.github/workflows/test_python.yml @@ -0,0 +1,70 @@ +name: Test Python Bindings + +on: + push: + branches: + - main + - develop + - 459-restructure-directories + pull_request: + branches: + - main + - develop + workflow_dispatch: + +jobs: + test-python: + name: Test Python on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest] + python-version: ['3.10', '3.11', '3.12'] + + steps: + - name: Checkout code + uses: actions/checkout@v5 + + - name: Install system dependencies (Ubuntu) + if: runner.os == 'Linux' + run: | + sudo apt-get update + sudo apt-get install -y \ + libeigen3-dev \ + libopenblas-dev \ + libblas-dev \ + liblapack-dev + + - name: Install system dependencies (macOS) + if: runner.os == 'macOS' + run: | + brew install eigen openblas + + - name: Install uv and set the python version + uses: astral-sh/setup-uv@v7 + with: + version: "0.8.15" + python-version: ${{ matrix.python-version }} + + - name: Enable caching + uses: astral-sh/setup-uv@v7 + with: + enable-cache: true + + - name: Setup build environment + working-directory: python + run: python3 setup_build.py + + - name: Run uv sync + working-directory: python + run: uv sync + env: + SPARSEIR_USE_BLAS: 1 + + - name: Run tests + working-directory: python + run: uv run pytest tests/ -v + env: + SPARSEIR_USE_BLAS: 1 + diff --git a/.gitignore b/.gitignore index 52fac89e..03291f14 100644 --- a/.gitignore +++ b/.gitignore @@ -33,6 +33,7 @@ # CMake build +work_cxx .DS_Store .vscode/settings.json diff --git a/python/setup_build.py b/python/setup_build.py index d26041ae..93e6ba12 100644 --- a/python/setup_build.py +++ b/python/setup_build.py @@ -14,21 +14,20 @@ def setup_build_environment(): parent_dir = os.path.dirname(current_dir) # Files and directories to copy + # Note: CMakeLists.txt is NOT copied - Python has its own CMakeLists.txt items_to_copy = [ - 'CMakeLists.txt', - 'LICENSE', - 'include', - 'src', - 'fortran', - 'cmake' + ('LICENSE', 'LICENSE'), + ('backend/cxx/include', 'include'), + ('backend/cxx/src', 'src'), + ('cmake', 'cmake') ] print(f"Setting up build environment in: {current_dir}") print(f"Copying from parent directory: {parent_dir}") - for item in items_to_copy: - src_path = os.path.join(parent_dir, item) - dst_path = os.path.join(current_dir, item) + for src_item, dst_item in items_to_copy: + src_path = os.path.join(parent_dir, src_item) + dst_path = os.path.join(current_dir, dst_item) if os.path.exists(src_path): # Always remove existing and copy fresh @@ -40,12 +39,12 @@ def setup_build_environment(): if os.path.isdir(src_path): shutil.copytree(src_path, dst_path) - print(f"Copied directory: {item}") + print(f"Copied directory: {src_item} -> {dst_item}") else: shutil.copy2(src_path, dst_path) - print(f"Copied file: {item}") + print(f"Copied file: {src_item} -> {dst_item}") else: - print(f"Warning: {item} not found in parent directory") + print(f"Warning: {src_item} not found in parent directory") if __name__ == "__main__": setup_build_environment() From 9f64fa80dde19f0a5c767c13f6b2995e28d4bb49 Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 11:28:51 +0900 Subject: [PATCH 11/21] fix: disable fastmath for Intel Compiler to preserve xprec precision - Add -fp-model=precise flag for Intel C++ compiler in backend/cxx - Add -fp-model=precise flag for Intel C++ compiler in capi_test - Add comment in fortran/CMakeLists.txt explaining the fix This prevents Intel Compiler from applying aggressive floating-point optimizations that break xprec's compensated arithmetic operations. The fix is needed because Intel icx/icpx enable unsafe math optimizations by default, which reassociate operations and break compensated addition/subtraction used in xprec's extended precision arithmetic. --- backend/cxx/CMakeLists.txt | 5 +++++ capi_test/CMakeLists.txt | 5 +++++ fortran/CMakeLists.txt | 4 +++- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/backend/cxx/CMakeLists.txt b/backend/cxx/CMakeLists.txt index 5d2675a6..02d8eae8 100644 --- a/backend/cxx/CMakeLists.txt +++ b/backend/cxx/CMakeLists.txt @@ -89,6 +89,11 @@ if(NOT MSVC) target_compile_options(sparseir PRIVATE -Wall -Wextra -pedantic) endif() +# Intel Compiler: disable fastmath optimizations that break xprec's extended precision +if(CMAKE_CXX_COMPILER_ID MATCHES "Intel") + target_compile_options(sparseir PRIVATE -fp-model=precise) +endif() + target_include_directories(sparseir PUBLIC $ diff --git a/capi_test/CMakeLists.txt b/capi_test/CMakeLists.txt index a944741b..59af36d3 100644 --- a/capi_test/CMakeLists.txt +++ b/capi_test/CMakeLists.txt @@ -57,6 +57,11 @@ target_link_libraries(cinterface_integration PRIVATE Threads::Threads ) +# Intel Compiler: disable fastmath optimizations that break xprec's extended precision +if(CMAKE_CXX_COMPILER_ID MATCHES "Intel") + target_compile_options(cinterface_integration PRIVATE -fp-model=precise) +endif() + # Optional BLAS linking find_package(BLAS QUIET) if(BLAS_FOUND) diff --git a/fortran/CMakeLists.txt b/fortran/CMakeLists.txt index 5547496a..0c8d2af6 100644 --- a/fortran/CMakeLists.txt +++ b/fortran/CMakeLists.txt @@ -23,10 +23,12 @@ add_library(sparseir_fortran SHARED ${TYPE_FILES} ) -# Add compiler-specific flags for heap arrays +# Add compiler-specific flags for heap arrays and fastmath if(CMAKE_Fortran_COMPILER_ID MATCHES "Intel") # Intel Fortran: Put all arrays on heap to avoid stack overflow #target_compile_options(sparseir_fortran PRIVATE -heap-arrays 0) + # Intel C++ uses -fp-model=precise to disable fastmath for xprec compatibility + # This is handled in the C++ backend CMakeLists.txt endif() # Add include directory From 6f7696fc42b8b412128b0dbaf03f13d925637d2b Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 11:37:40 +0900 Subject: [PATCH 12/21] restore: workflows and update for restructured directory - Restore CI_PublishTestPyPI.yml, conda.yml, PublishPyPI.yml, and VersionConsistencyCheck.yml from back/workflows/ to .github/workflows/ - Update VersionConsistencyCheck.yml paths to use backend/cxx/include/sparseir/version.h - Update update_version.py to use backend/cxx/include/sparseir/version.h - conda-recipe/meta.yaml stays at ../../include/sparseir/version.h since setup_build.py copies it there These workflows support publishing Python wheels and conda packages. --- .github/workflows/CI_PublishTestPyPI.yml | 87 ++++++++++++ .github/workflows/PublishPyPI.yml | 132 ++++++++++++++++++ .github/workflows/VersionConsistencyCheck.yml | 55 ++++++++ .github/workflows/conda.yml | 40 ++++++ python/conda-recipe/meta.yaml | 6 +- update_version.py | 10 +- 6 files changed, 322 insertions(+), 8 deletions(-) create mode 100644 .github/workflows/CI_PublishTestPyPI.yml create mode 100644 .github/workflows/PublishPyPI.yml create mode 100644 .github/workflows/VersionConsistencyCheck.yml create mode 100644 .github/workflows/conda.yml diff --git a/.github/workflows/CI_PublishTestPyPI.yml b/.github/workflows/CI_PublishTestPyPI.yml new file mode 100644 index 00000000..059a7cd2 --- /dev/null +++ b/.github/workflows/CI_PublishTestPyPI.yml @@ -0,0 +1,87 @@ +name: CI Python Build & Publish to TestPyPI (Trusted) + +on: + push: + tags: ["v*"] + workflow_dispatch: + +jobs: + build-wheels: + runs-on: ${{ matrix.os }} + strategy: + matrix: + include: + # Linux builds + - os: ubuntu-latest + cibw_archs: "auto" + deployment_target: "" + openblas_path: "" + py_tag: "cp310" + # macOS Intel builds + - os: macos-13 # Intel Mac + cibw_archs: "x86_64" + deployment_target: "13.0" + openblas_path: "/usr/local/opt/openblas" + py_tag: "cp310" + # macOS Apple Silicon builds + - os: macos-latest # Apple Silicon Mac + cibw_archs: "arm64" + deployment_target: "15.0" + openblas_path: "/opt/homebrew/opt/openblas" + py_tag: "cp310" + steps: + - uses: actions/checkout@v5 + - name: remove python/.gitignore + shell: bash + run: | + rm -f python/.gitignore + - name: prepare build + shell: bash + run: | + cd python + python prepare_build.py + - uses: pypa/cibuildwheel@v3.2.1 + env: + CIBW_DEPENDENCY_VERSIONS: "latest" + CIBW_BUILD_VERBOSITY: 1 + CIBW_MANYLINUX_X86_64_IMAGE: "manylinux_2_28" + CIBW_BUILD: "${{ matrix.py_tag }}-*" + CIBW_ENVIRONMENT_MACOS: "MACOSX_DEPLOYMENT_TARGET=${{ matrix.deployment_target || '11.0' }} SPARSEIR_USE_BLAS=1" + CIBW_ARCHS: ${{ matrix.cibw_archs }} + CIBW_SKIP: "*-manylinux_i686 *-musllinux_i686" + # Install OpenBLAS using micromamba (conda-forge) - separate for manylinux and musllinux + CIBW_BEFORE_ALL_MANYLINUX: "curl -Ls https://micro.mamba.pm/api/micromamba/linux-64/latest | tar -xvj bin/micromamba && ./bin/micromamba create -y -p /opt/openblas && ./bin/micromamba install -y -c conda-forge openblas -p /opt/openblas --no-deps" + CIBW_BEFORE_ALL_MUSLLINUX: "apk add --no-cache bzip2 && curl -Ls https://micro.mamba.pm/api/micromamba/linux-64/latest | tar -xvj bin/micromamba && ./bin/micromamba create -y -p /opt/openblas && ./bin/micromamba install -y -c conda-forge openblas -p /opt/openblas --no-deps" + CIBW_BEFORE_ALL_MACOS: "brew install openblas" + # Set environment variables to help CMake find OpenBLAS + CIBW_ENVIRONMENT_MANYLINUX: "SPARSEIR_USE_BLAS=1" + CIBW_ENVIRONMENT_MUSLLINUX: "SPARSEIR_USE_BLAS=1" + with: + package-dir: ./python + output-dir: dist + - uses: actions/upload-artifact@v5 + with: + name: wheels-${{ matrix.os }}-${{ matrix.cibw_archs }}-${{ matrix.py_tag }} + path: dist/* + + publish-testpypi: + needs: build-wheels + runs-on: ubuntu-latest + environment: + name: testpypi + permissions: + id-token: write # ← Trusted Publishing に必須 + contents: read + steps: + - uses: actions/download-artifact@v6 + with: + path: dist + merge-multiple: true + - name: Publish to TestPyPI (Trusted) + uses: pypa/gh-action-pypi-publish@release/v1 + with: + packages-dir: dist + repository-url: https://test.pypi.org/legacy/ + verbose: true + skip-existing: true + attestations: false # Disable attestations due to Sigstore service issues \ No newline at end of file diff --git a/.github/workflows/PublishPyPI.yml b/.github/workflows/PublishPyPI.yml new file mode 100644 index 00000000..6f4b2bf6 --- /dev/null +++ b/.github/workflows/PublishPyPI.yml @@ -0,0 +1,132 @@ +name: Build & Publish + +on: + push: + tags: ["v*"] + workflow_dispatch: + +jobs: + build-wheels: + runs-on: ${{ matrix.os }} + strategy: + matrix: + include: + # Linux builds + - os: ubuntu-latest + cibw_archs: "auto" + deployment_target: "" + openblas_path: "" + py_tag: "cp310" + - os: ubuntu-latest + cibw_archs: "auto" + deployment_target: "" + openblas_path: "" + py_tag: "cp311" + - os: ubuntu-latest + cibw_archs: "auto" + deployment_target: "" + openblas_path: "" + py_tag: "cp312" + - os: ubuntu-latest + cibw_archs: "auto" + deployment_target: "" + openblas_path: "" + py_tag: "cp313" + # macOS Intel builds + - os: macos-13 # Intel Mac + cibw_archs: "x86_64" + deployment_target: "13.0" + openblas_path: "/usr/local/opt/openblas" + py_tag: "cp310" + - os: macos-13 # Intel Mac + cibw_archs: "x86_64" + deployment_target: "13.0" + openblas_path: "/usr/local/opt/openblas" + py_tag: "cp311" + - os: macos-13 # Intel Mac + cibw_archs: "x86_64" + deployment_target: "13.0" + openblas_path: "/usr/local/opt/openblas" + py_tag: "cp312" + - os: macos-13 # Intel Mac + cibw_archs: "x86_64" + deployment_target: "13.0" + openblas_path: "/usr/local/opt/openblas" + py_tag: "cp313" + # macOS Apple Silicon builds + - os: macos-latest # Apple Silicon Mac + cibw_archs: "arm64" + deployment_target: "15.0" + openblas_path: "/opt/homebrew/opt/openblas" + py_tag: "cp310" + - os: macos-latest # Apple Silicon Mac + cibw_archs: "arm64" + deployment_target: "15.0" + openblas_path: "/opt/homebrew/opt/openblas" + py_tag: "cp311" + - os: macos-latest # Apple Silicon Mac + cibw_archs: "arm64" + deployment_target: "15.0" + openblas_path: "/opt/homebrew/opt/openblas" + py_tag: "cp312" + - os: macos-latest # Apple Silicon Mac + cibw_archs: "arm64" + deployment_target: "15.0" + openblas_path: "/opt/homebrew/opt/openblas" + py_tag: "cp313" + steps: + - uses: actions/checkout@v5 + - name: remove python/.gitignore + shell: bash + run: | + rm -f python/.gitignore + - name: prepare build + shell: bash + run: | + cd python + python prepare_build.py + - uses: pypa/cibuildwheel@v3.2.1 + env: + CIBW_DEPENDENCY_VERSIONS: "latest" + CIBW_BUILD_VERBOSITY: 1 + CIBW_MANYLINUX_X86_64_IMAGE: "manylinux_2_28" + CIBW_BUILD: "${{ matrix.py_tag }}-*" + CIBW_ENVIRONMENT_MACOS: "MACOSX_DEPLOYMENT_TARGET=${{ matrix.deployment_target || '11.0' }} SPARSEIR_USE_BLAS=1" + CIBW_ARCHS: ${{ matrix.cibw_archs }} + CIBW_SKIP: "*-manylinux_i686 *-musllinux_i686" + # Install OpenBLAS using micromamba (conda-forge) - separate for manylinux and musllinux + CIBW_BEFORE_ALL_MANYLINUX: "curl -Ls https://micro.mamba.pm/api/micromamba/linux-64/latest | tar -xvj bin/micromamba && ./bin/micromamba create -y -p /opt/openblas && ./bin/micromamba install -y -c conda-forge openblas -p /opt/openblas --no-deps" + CIBW_BEFORE_ALL_MUSLLINUX: "apk add --no-cache bzip2 && curl -Ls https://micro.mamba.pm/api/micromamba/linux-64/latest | tar -xvj bin/micromamba && ./bin/micromamba create -y -p /opt/openblas && ./bin/micromamba install -y -c conda-forge openblas -p /opt/openblas --no-deps" + CIBW_BEFORE_ALL_MACOS: "brew install openblas" + # Set environment variables to help CMake find OpenBLAS + CIBW_ENVIRONMENT_MANYLINUX: "SPARSEIR_USE_BLAS=1" + CIBW_ENVIRONMENT_MUSLLINUX: "SPARSEIR_USE_BLAS=1" + with: + package-dir: ./python + output-dir: dist + - uses: actions/upload-artifact@v5 + with: + name: wheels-${{ matrix.os }}-${{ matrix.cibw_archs }}-${{ matrix.py_tag }} + path: dist/* + + publish-pypi: + needs: build-wheels + runs-on: ubuntu-latest + environment: + name: pypi + permissions: + id-token: write # Required for Trusted Publishing + contents: read + steps: + - uses: actions/download-artifact@v6 + with: + path: dist + merge-multiple: true + - name: Publish to PyPI (Trusted) + uses: pypa/gh-action-pypi-publish@release/v1 + with: + packages-dir: dist + repository-url: https://upload.pypi.org/legacy/ + verbose: true + skip-existing: true + attestations: false # Disable attestations due to Sigstore service issues \ No newline at end of file diff --git a/.github/workflows/VersionConsistencyCheck.yml b/.github/workflows/VersionConsistencyCheck.yml new file mode 100644 index 00000000..627ef958 --- /dev/null +++ b/.github/workflows/VersionConsistencyCheck.yml @@ -0,0 +1,55 @@ +name: Version Consistency Check + +on: + pull_request: + branches: [main] + paths: + - 'backend/cxx/include/sparseir/version.h' + - 'python/pyproject.toml' + - 'update_version.py' + push: + branches: [main] + paths: + - 'backend/cxx/include/sparseir/version.h' + - 'python/pyproject.toml' + - 'update_version.py' + +jobs: + check-version-consistency: + runs-on: ubuntu-latest + name: Check Version Consistency + + steps: + - name: Checkout code + uses: actions/checkout@v5 + + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: '3.11' + + - name: Check version consistency + run: | + echo "🔍 Checking version consistency across components..." + echo "==================================================" + echo + + python update_version.py + exit_code=$? + + echo + if [ $exit_code -eq 0 ]; then + echo "✅ All versions are consistent and in sync!" + echo " Ready for release process." + else + echo "❌ Version inconsistency detected!" + echo + echo "💡 To fix this issue:" + echo " 1. Run: python update_version.py " + echo " 2. Commit the changes: git add -A && git commit -m 'Fix version consistency'" + echo " 3. Push the changes to update this PR" + echo + echo "Example:" + echo " python update_version.py 0.4.3" + exit 1 + fi \ No newline at end of file diff --git a/.github/workflows/conda.yml b/.github/workflows/conda.yml new file mode 100644 index 00000000..3cdc7b67 --- /dev/null +++ b/.github/workflows/conda.yml @@ -0,0 +1,40 @@ +name: Build and upload conda packages + +# Triggered a new tag starting with "v" is pushed +on: + push: + tags: + - 'v*' + workflow_dispatch: + +jobs: + build: + runs-on: ${{ matrix.runner_label }} + strategy: + matrix: + include: + - name: ubuntu-x64 + runner_label: ubuntu-latest + - name: macos-arm64 + runner_label: macos-latest + + steps: + - uses: actions/checkout@v5 + - uses: conda-incubator/setup-miniconda@v3 + with: + auto-update-conda: true + - name: Conda info + shell: bash -el {0} + run: conda info + - name: Install dependencies + run: | + conda install conda-build anaconda-client -y + + - name: Bulid and upload + working-directory: python + env: + ANACONDA_API_TOKEN: ${{secrets.ANACONDA_TOKEN}} + run: | + python3 --version + conda config --set anaconda_upload yes + conda build conda-recipe --user SpM-lab --output-folder conda-bld diff --git a/python/conda-recipe/meta.yaml b/python/conda-recipe/meta.yaml index e68122bf..29ade09d 100644 --- a/python/conda-recipe/meta.yaml +++ b/python/conda-recipe/meta.yaml @@ -1,7 +1,7 @@ {% set name = "pylibsparseir" %} -{% set version_major = load_file_regex(load_file="../../include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_MAJOR (\\d+)", from_recipe_dir=True).group(1) %} -{% set version_minor = load_file_regex(load_file="../../include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_MINOR (\\d+)", from_recipe_dir=True).group(1) %} -{% set version_patch = load_file_regex(load_file="../../include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_PATCH (\\d+)", from_recipe_dir=True).group(1) %} +{% set version_major = load_file_regex(load_file="../../backend/cxx/include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_MAJOR (\\d+)", from_recipe_dir=True).group(1) %} +{% set version_minor = load_file_regex(load_file="../../backend/cxx/include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_MINOR (\\d+)", from_recipe_dir=True).group(1) %} +{% set version_patch = load_file_regex(load_file="../../backend/cxx/include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_PATCH (\\d+)", from_recipe_dir=True).group(1) %} {% set version = version_major + "." + version_minor + "." + version_patch %} package: diff --git a/update_version.py b/update_version.py index f47e8ed4..f91fd6e8 100644 --- a/update_version.py +++ b/update_version.py @@ -17,8 +17,8 @@ from pathlib import Path def update_cpp_version(version_parts, repo_root): - """Update C++ version in include/sparseir/version.h""" - version_h_path = repo_root / "include" / "sparseir" / "version.h" + """Update C++ version in backend/cxx/include/sparseir/version.h""" + version_h_path = repo_root / "backend" / "cxx" / "include" / "sparseir" / "version.h" if not version_h_path.exists(): print(f"Error: {version_h_path} not found") @@ -103,7 +103,7 @@ def parse_version(version_string): def get_cpp_version(repo_root): """Get current C++ version from version.h""" - version_h_path = repo_root / "include" / "sparseir" / "version.h" + version_h_path = repo_root / "backend" / "cxx" / "include" / "sparseir" / "version.h" if not version_h_path.exists(): return None @@ -157,7 +157,7 @@ def check_version_consistency(repo_root): print() if not cpp_version: - print("❌ Could not read C++ version from include/sparseir/version.h") + print("❌ Could not read C++ version from backend/cxx/include/sparseir/version.h") return False if not python_version: @@ -234,7 +234,7 @@ def main(): print(f"✅ Successfully updated all versions to {version_string}") print() print("Updated files:") - print(" - include/sparseir/version.h (C++ library)") + print(" - backend/cxx/include/sparseir/version.h (C++ library)") print(" - python/pyproject.toml (Python package)") print(" - Conda recipe will automatically use version.h") print() From b84efa94496b9b1cf653cddc7ab4c0aa0c23f6a8 Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 11:46:14 +0900 Subject: [PATCH 13/21] fix: macOS RPATH issues for Fortran tests - Add RPATH properties to sparseir_fortran and test executables - Update test_with_cxx_backend.sh to fix install_name on macOS - Set BUILD_WITH_INSTALL_RPATH and INSTALL_RPATH_USE_LINK_PATH This ensures test executables can find libsparseir.0.dylib at runtime on macOS by embedding the correct library paths. --- fortran/CMakeLists.txt | 2 ++ fortran/test/CMakeLists.txt | 3 +++ fortran/test_with_cxx_backend.sh | 18 +++++++++++++++++- 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/fortran/CMakeLists.txt b/fortran/CMakeLists.txt index 0c8d2af6..23c08622 100644 --- a/fortran/CMakeLists.txt +++ b/fortran/CMakeLists.txt @@ -47,6 +47,8 @@ set_target_properties(sparseir_fortran PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR} POSITION_INDEPENDENT_CODE ON + BUILD_WITH_INSTALL_RPATH ON + INSTALL_RPATH_USE_LINK_PATH ON ) target_link_libraries(sparseir_fortran PUBLIC SparseIR::sparseir) diff --git a/fortran/test/CMakeLists.txt b/fortran/test/CMakeLists.txt index 007e3797..3fa600f3 100644 --- a/fortran/test/CMakeLists.txt +++ b/fortran/test/CMakeLists.txt @@ -22,6 +22,9 @@ foreach(TEST ${TEST_FILES}) set_target_properties(${TEST} PROPERTIES Fortran_MODULE_DIRECTORY ${SPARSEIR_FORTRAN_MODDIR} + BUILD_WITH_INSTALL_RPATH ON + INSTALL_RPATH_USE_LINK_PATH ON + INSTALL_RPATH "$;$" ) add_test(NAME fortran_${TEST} diff --git a/fortran/test_with_cxx_backend.sh b/fortran/test_with_cxx_backend.sh index 563f4602..71de7ed0 100755 --- a/fortran/test_with_cxx_backend.sh +++ b/fortran/test_with_cxx_backend.sh @@ -28,7 +28,7 @@ mkdir -p "$WORK_DIR/build_backend" cd "$WORK_DIR/build_backend" cmake "$BACKEND_DIR" \ - -DCMAKE_BUILD_TYPE=Debug \ + -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_INSTALL_PREFIX="$INSTALL_DIR" \ -DSPARSEIR_BUILD_TESTING=OFF \ -DSPARSEIR_USE_BLAS=ON @@ -57,6 +57,22 @@ echo -e "${YELLOW}Running Fortran tests...${NC}" # Set library paths for test execution export DYLD_LIBRARY_PATH="$INSTALL_DIR/lib:$DYLD_LIBRARY_PATH" export LD_LIBRARY_PATH="$INSTALL_DIR/lib:$LD_LIBRARY_PATH" + +# macOS: Update install_name for Fortran library to find C++ backend +if [[ "$OSTYPE" == "darwin"* ]]; then + echo -e "${YELLOW}Updating install_name for macOS...${NC}" + # Update install_name for Fortran library to point to installed location + install_name_tool -id "$INSTALL_DIR/lib/libsparseir_fortran.0.dylib" \ + "$WORK_DIR/build_fortran/libsparseir_fortran.0.dylib" 2>/dev/null || true + + # Update rpath for test executables + for test_bin in "$WORK_DIR/build_fortran/test/"*.exe "$WORK_DIR/build_fortran/test/test_"* 2>/dev/null; do + if [ -f "$test_bin" ] && file "$test_bin" | grep -q "Mach-O"; then + install_name_tool -add_rpath "$INSTALL_DIR/lib" "$test_bin" 2>/dev/null || true + fi + done +fi + ctest --output-on-failure --verbose echo -e "${GREEN}=== All tests completed successfully ===${NC}" From 15a3e6591fed54f0322119178f477020ea454c26 Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 11:47:15 +0900 Subject: [PATCH 14/21] fix: shell syntax error in test_with_cxx_backend.sh - Move 2>/dev/null to the end of for loop instead of in loop condition - This fixes 'syntax error near unexpected token 2' --- fortran/test_with_cxx_backend.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fortran/test_with_cxx_backend.sh b/fortran/test_with_cxx_backend.sh index 71de7ed0..2d0540d4 100755 --- a/fortran/test_with_cxx_backend.sh +++ b/fortran/test_with_cxx_backend.sh @@ -66,11 +66,11 @@ if [[ "$OSTYPE" == "darwin"* ]]; then "$WORK_DIR/build_fortran/libsparseir_fortran.0.dylib" 2>/dev/null || true # Update rpath for test executables - for test_bin in "$WORK_DIR/build_fortran/test/"*.exe "$WORK_DIR/build_fortran/test/test_"* 2>/dev/null; do + for test_bin in "$WORK_DIR/build_fortran/test/"*.exe "$WORK_DIR/build_fortran/test/test_"*; do if [ -f "$test_bin" ] && file "$test_bin" | grep -q "Mach-O"; then install_name_tool -add_rpath "$INSTALL_DIR/lib" "$test_bin" 2>/dev/null || true fi - done + done 2>/dev/null || true fi ctest --output-on-failure --verbose From d009dbfeb91b57b0ceae4df3587ff962b148a7b0 Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 11:48:54 +0900 Subject: [PATCH 15/21] fix: remove develop branch from workflow triggers and revert meta.yaml path --- .github/workflows/test_python.yml | 2 -- python/conda-recipe/meta.yaml | 6 +++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test_python.yml b/.github/workflows/test_python.yml index aaacaf71..d3ca1b11 100644 --- a/.github/workflows/test_python.yml +++ b/.github/workflows/test_python.yml @@ -4,12 +4,10 @@ on: push: branches: - main - - develop - 459-restructure-directories pull_request: branches: - main - - develop workflow_dispatch: jobs: diff --git a/python/conda-recipe/meta.yaml b/python/conda-recipe/meta.yaml index 29ade09d..e68122bf 100644 --- a/python/conda-recipe/meta.yaml +++ b/python/conda-recipe/meta.yaml @@ -1,7 +1,7 @@ {% set name = "pylibsparseir" %} -{% set version_major = load_file_regex(load_file="../../backend/cxx/include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_MAJOR (\\d+)", from_recipe_dir=True).group(1) %} -{% set version_minor = load_file_regex(load_file="../../backend/cxx/include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_MINOR (\\d+)", from_recipe_dir=True).group(1) %} -{% set version_patch = load_file_regex(load_file="../../backend/cxx/include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_PATCH (\\d+)", from_recipe_dir=True).group(1) %} +{% set version_major = load_file_regex(load_file="../../include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_MAJOR (\\d+)", from_recipe_dir=True).group(1) %} +{% set version_minor = load_file_regex(load_file="../../include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_MINOR (\\d+)", from_recipe_dir=True).group(1) %} +{% set version_patch = load_file_regex(load_file="../../include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_PATCH (\\d+)", from_recipe_dir=True).group(1) %} {% set version = version_major + "." + version_minor + "." + version_patch %} package: From 30e7724b7a0435a7fc32ec09d97e7e3adebb0df7 Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 13:18:32 +0900 Subject: [PATCH 16/21] fix: remove unnecessary array copies and add proper error codes - Remove arr_c and res_c temporary arrays from generated code - Use direct pointer passing with c_loc() for better performance - Fix test to use g_ir instead of g_dlr for Matsubara evaluation - Change all 'stop' to 'stop 1' to return proper error codes for CI - Update generate_evaluate_matsubara.py, generate_evaluate_tau.py, and generate_fit_tau.py --- fortran/evaluate_matsubara_impl.inc | 196 +++++++------------------ fortran/evaluate_tau_impl.inc | 182 ++++++----------------- fortran/fit_tau_impl.inc | 182 +++++++---------------- fortran/generate_evaluate_matsubara.py | 14 +- fortran/generate_evaluate_tau.py | 13 +- fortran/generate_fit_tau.py | 13 +- fortran/test/test_ext.f90 | 18 +-- fortran/test/test_integration.f90 | 56 +++---- 8 files changed, 202 insertions(+), 472 deletions(-) diff --git a/fortran/evaluate_matsubara_impl.inc b/fortran/evaluate_matsubara_impl.inc index f5236d07..be5ae827 100644 --- a/fortran/evaluate_matsubara_impl.inc +++ b/fortran/evaluate_matsubara_impl.inc @@ -7,11 +7,9 @@ SUBROUTINE evaluate_matsubara_zz_1d(obj, statistics, target_dim, arr, res) INTEGER, INTENT(IN) :: statistics INTEGER, INTENT(IN) :: target_dim - COMPLEX(KIND=DP), INTENT(IN) :: arr (:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:) + COMPLEX(KIND=DP), INTENT(IN), TARGET :: arr (:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(1), output_dims_c(1) input_dims_c = SHAPE(arr) @@ -38,15 +36,13 @@ SUBROUTINE evaluate_matsubara_zz_1d(obj, statistics, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) SELECT CASE (statistics) CASE (SPIR_STATISTICS_FERMIONIC) status_c = c_spir_sampling_eval_zz(obj%matsu_f_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE (SPIR_STATISTICS_BOSONIC) status_c = c_spir_sampling_eval_zz(obj%matsu_b_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE DEFAULT CALL errore('evaluate_matsubara_zz_1d', 'Invalid statistics', 1) RETURN @@ -54,8 +50,6 @@ SUBROUTINE evaluate_matsubara_zz_1d(obj, statistics, target_dim, arr, res) IF (status_c /= 0) THEN CALL errore('evaluate_matsubara_zz_1d', 'Error evaluating on Matsubara frequencies', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_matsubara_zz_2d(obj, statistics, target_dim, arr, res) @@ -63,11 +57,9 @@ SUBROUTINE evaluate_matsubara_zz_2d(obj, statistics, target_dim, arr, res) INTEGER, INTENT(IN) :: statistics INTEGER, INTENT(IN) :: target_dim - COMPLEX(KIND=DP), INTENT(IN) :: arr (:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:) + COMPLEX(KIND=DP), INTENT(IN), TARGET :: arr (:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(2), output_dims_c(2) input_dims_c = SHAPE(arr) @@ -94,15 +86,13 @@ SUBROUTINE evaluate_matsubara_zz_2d(obj, statistics, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) SELECT CASE (statistics) CASE (SPIR_STATISTICS_FERMIONIC) status_c = c_spir_sampling_eval_zz(obj%matsu_f_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE (SPIR_STATISTICS_BOSONIC) status_c = c_spir_sampling_eval_zz(obj%matsu_b_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE DEFAULT CALL errore('evaluate_matsubara_zz_2d', 'Invalid statistics', 1) RETURN @@ -110,8 +100,6 @@ SUBROUTINE evaluate_matsubara_zz_2d(obj, statistics, target_dim, arr, res) IF (status_c /= 0) THEN CALL errore('evaluate_matsubara_zz_2d', 'Error evaluating on Matsubara frequencies', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_matsubara_zz_3d(obj, statistics, target_dim, arr, res) @@ -119,11 +107,9 @@ SUBROUTINE evaluate_matsubara_zz_3d(obj, statistics, target_dim, arr, res) INTEGER, INTENT(IN) :: statistics INTEGER, INTENT(IN) :: target_dim - COMPLEX(KIND=DP), INTENT(IN) :: arr (:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:) + COMPLEX(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(3), output_dims_c(3) input_dims_c = SHAPE(arr) @@ -150,15 +136,13 @@ SUBROUTINE evaluate_matsubara_zz_3d(obj, statistics, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) SELECT CASE (statistics) CASE (SPIR_STATISTICS_FERMIONIC) status_c = c_spir_sampling_eval_zz(obj%matsu_f_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE (SPIR_STATISTICS_BOSONIC) status_c = c_spir_sampling_eval_zz(obj%matsu_b_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE DEFAULT CALL errore('evaluate_matsubara_zz_3d', 'Invalid statistics', 1) RETURN @@ -166,8 +150,6 @@ SUBROUTINE evaluate_matsubara_zz_3d(obj, statistics, target_dim, arr, res) IF (status_c /= 0) THEN CALL errore('evaluate_matsubara_zz_3d', 'Error evaluating on Matsubara frequencies', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_matsubara_zz_4d(obj, statistics, target_dim, arr, res) @@ -175,11 +157,9 @@ SUBROUTINE evaluate_matsubara_zz_4d(obj, statistics, target_dim, arr, res) INTEGER, INTENT(IN) :: statistics INTEGER, INTENT(IN) :: target_dim - COMPLEX(KIND=DP), INTENT(IN) :: arr (:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:) + COMPLEX(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(4), output_dims_c(4) input_dims_c = SHAPE(arr) @@ -206,15 +186,13 @@ SUBROUTINE evaluate_matsubara_zz_4d(obj, statistics, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) SELECT CASE (statistics) CASE (SPIR_STATISTICS_FERMIONIC) status_c = c_spir_sampling_eval_zz(obj%matsu_f_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE (SPIR_STATISTICS_BOSONIC) status_c = c_spir_sampling_eval_zz(obj%matsu_b_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE DEFAULT CALL errore('evaluate_matsubara_zz_4d', 'Invalid statistics', 1) RETURN @@ -222,8 +200,6 @@ SUBROUTINE evaluate_matsubara_zz_4d(obj, statistics, target_dim, arr, res) IF (status_c /= 0) THEN CALL errore('evaluate_matsubara_zz_4d', 'Error evaluating on Matsubara frequencies', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_matsubara_zz_5d(obj, statistics, target_dim, arr, res) @@ -231,11 +207,9 @@ SUBROUTINE evaluate_matsubara_zz_5d(obj, statistics, target_dim, arr, res) INTEGER, INTENT(IN) :: statistics INTEGER, INTENT(IN) :: target_dim - COMPLEX(KIND=DP), INTENT(IN) :: arr (:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(5), output_dims_c(5) input_dims_c = SHAPE(arr) @@ -262,15 +236,13 @@ SUBROUTINE evaluate_matsubara_zz_5d(obj, statistics, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) SELECT CASE (statistics) CASE (SPIR_STATISTICS_FERMIONIC) status_c = c_spir_sampling_eval_zz(obj%matsu_f_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE (SPIR_STATISTICS_BOSONIC) status_c = c_spir_sampling_eval_zz(obj%matsu_b_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE DEFAULT CALL errore('evaluate_matsubara_zz_5d', 'Invalid statistics', 1) RETURN @@ -278,8 +250,6 @@ SUBROUTINE evaluate_matsubara_zz_5d(obj, statistics, target_dim, arr, res) IF (status_c /= 0) THEN CALL errore('evaluate_matsubara_zz_5d', 'Error evaluating on Matsubara frequencies', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_matsubara_zz_6d(obj, statistics, target_dim, arr, res) @@ -287,11 +257,9 @@ SUBROUTINE evaluate_matsubara_zz_6d(obj, statistics, target_dim, arr, res) INTEGER, INTENT(IN) :: statistics INTEGER, INTENT(IN) :: target_dim - COMPLEX(KIND=DP), INTENT(IN) :: arr (:,:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:,:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(6), output_dims_c(6) input_dims_c = SHAPE(arr) @@ -318,15 +286,13 @@ SUBROUTINE evaluate_matsubara_zz_6d(obj, statistics, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) SELECT CASE (statistics) CASE (SPIR_STATISTICS_FERMIONIC) status_c = c_spir_sampling_eval_zz(obj%matsu_f_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE (SPIR_STATISTICS_BOSONIC) status_c = c_spir_sampling_eval_zz(obj%matsu_b_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE DEFAULT CALL errore('evaluate_matsubara_zz_6d', 'Invalid statistics', 1) RETURN @@ -334,8 +300,6 @@ SUBROUTINE evaluate_matsubara_zz_6d(obj, statistics, target_dim, arr, res) IF (status_c /= 0) THEN CALL errore('evaluate_matsubara_zz_6d', 'Error evaluating on Matsubara frequencies', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_matsubara_zz_7d(obj, statistics, target_dim, arr, res) @@ -343,11 +307,9 @@ SUBROUTINE evaluate_matsubara_zz_7d(obj, statistics, target_dim, arr, res) INTEGER, INTENT(IN) :: statistics INTEGER, INTENT(IN) :: target_dim - COMPLEX(KIND=DP), INTENT(IN) :: arr (:,:,:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:,:,:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(7), output_dims_c(7) input_dims_c = SHAPE(arr) @@ -374,15 +336,13 @@ SUBROUTINE evaluate_matsubara_zz_7d(obj, statistics, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) SELECT CASE (statistics) CASE (SPIR_STATISTICS_FERMIONIC) status_c = c_spir_sampling_eval_zz(obj%matsu_f_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE (SPIR_STATISTICS_BOSONIC) status_c = c_spir_sampling_eval_zz(obj%matsu_b_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE DEFAULT CALL errore('evaluate_matsubara_zz_7d', 'Invalid statistics', 1) RETURN @@ -390,8 +350,6 @@ SUBROUTINE evaluate_matsubara_zz_7d(obj, statistics, target_dim, arr, res) IF (status_c /= 0) THEN CALL errore('evaluate_matsubara_zz_7d', 'Error evaluating on Matsubara frequencies', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_matsubara_dz_1d(obj, statistics, target_dim, arr, res) @@ -399,11 +357,9 @@ SUBROUTINE evaluate_matsubara_dz_1d(obj, statistics, target_dim, arr, res) INTEGER, INTENT(IN) :: statistics INTEGER, INTENT(IN) :: target_dim - REAL(KIND=DP), INTENT(IN) :: arr (:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:) + REAL(KIND=DP), INTENT(IN), TARGET :: arr (:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(1), output_dims_c(1) input_dims_c = SHAPE(arr) @@ -430,15 +386,13 @@ SUBROUTINE evaluate_matsubara_dz_1d(obj, statistics, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) SELECT CASE (statistics) CASE (SPIR_STATISTICS_FERMIONIC) status_c = c_spir_sampling_eval_dz(obj%matsu_f_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE (SPIR_STATISTICS_BOSONIC) status_c = c_spir_sampling_eval_dz(obj%matsu_b_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE DEFAULT CALL errore('evaluate_matsubara_dz_1d', 'Invalid statistics', 1) RETURN @@ -446,8 +400,6 @@ SUBROUTINE evaluate_matsubara_dz_1d(obj, statistics, target_dim, arr, res) IF (status_c /= 0) THEN CALL errore('evaluate_matsubara_dz_1d', 'Error evaluating on Matsubara frequencies', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_matsubara_dz_2d(obj, statistics, target_dim, arr, res) @@ -455,11 +407,9 @@ SUBROUTINE evaluate_matsubara_dz_2d(obj, statistics, target_dim, arr, res) INTEGER, INTENT(IN) :: statistics INTEGER, INTENT(IN) :: target_dim - REAL(KIND=DP), INTENT(IN) :: arr (:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:) + REAL(KIND=DP), INTENT(IN), TARGET :: arr (:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(2), output_dims_c(2) input_dims_c = SHAPE(arr) @@ -486,15 +436,13 @@ SUBROUTINE evaluate_matsubara_dz_2d(obj, statistics, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) SELECT CASE (statistics) CASE (SPIR_STATISTICS_FERMIONIC) status_c = c_spir_sampling_eval_dz(obj%matsu_f_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE (SPIR_STATISTICS_BOSONIC) status_c = c_spir_sampling_eval_dz(obj%matsu_b_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE DEFAULT CALL errore('evaluate_matsubara_dz_2d', 'Invalid statistics', 1) RETURN @@ -502,8 +450,6 @@ SUBROUTINE evaluate_matsubara_dz_2d(obj, statistics, target_dim, arr, res) IF (status_c /= 0) THEN CALL errore('evaluate_matsubara_dz_2d', 'Error evaluating on Matsubara frequencies', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_matsubara_dz_3d(obj, statistics, target_dim, arr, res) @@ -511,11 +457,9 @@ SUBROUTINE evaluate_matsubara_dz_3d(obj, statistics, target_dim, arr, res) INTEGER, INTENT(IN) :: statistics INTEGER, INTENT(IN) :: target_dim - REAL(KIND=DP), INTENT(IN) :: arr (:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:) + REAL(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(3), output_dims_c(3) input_dims_c = SHAPE(arr) @@ -542,15 +486,13 @@ SUBROUTINE evaluate_matsubara_dz_3d(obj, statistics, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) SELECT CASE (statistics) CASE (SPIR_STATISTICS_FERMIONIC) status_c = c_spir_sampling_eval_dz(obj%matsu_f_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE (SPIR_STATISTICS_BOSONIC) status_c = c_spir_sampling_eval_dz(obj%matsu_b_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE DEFAULT CALL errore('evaluate_matsubara_dz_3d', 'Invalid statistics', 1) RETURN @@ -558,8 +500,6 @@ SUBROUTINE evaluate_matsubara_dz_3d(obj, statistics, target_dim, arr, res) IF (status_c /= 0) THEN CALL errore('evaluate_matsubara_dz_3d', 'Error evaluating on Matsubara frequencies', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_matsubara_dz_4d(obj, statistics, target_dim, arr, res) @@ -567,11 +507,9 @@ SUBROUTINE evaluate_matsubara_dz_4d(obj, statistics, target_dim, arr, res) INTEGER, INTENT(IN) :: statistics INTEGER, INTENT(IN) :: target_dim - REAL(KIND=DP), INTENT(IN) :: arr (:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:) + REAL(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(4), output_dims_c(4) input_dims_c = SHAPE(arr) @@ -598,15 +536,13 @@ SUBROUTINE evaluate_matsubara_dz_4d(obj, statistics, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) SELECT CASE (statistics) CASE (SPIR_STATISTICS_FERMIONIC) status_c = c_spir_sampling_eval_dz(obj%matsu_f_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE (SPIR_STATISTICS_BOSONIC) status_c = c_spir_sampling_eval_dz(obj%matsu_b_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE DEFAULT CALL errore('evaluate_matsubara_dz_4d', 'Invalid statistics', 1) RETURN @@ -614,8 +550,6 @@ SUBROUTINE evaluate_matsubara_dz_4d(obj, statistics, target_dim, arr, res) IF (status_c /= 0) THEN CALL errore('evaluate_matsubara_dz_4d', 'Error evaluating on Matsubara frequencies', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_matsubara_dz_5d(obj, statistics, target_dim, arr, res) @@ -623,11 +557,9 @@ SUBROUTINE evaluate_matsubara_dz_5d(obj, statistics, target_dim, arr, res) INTEGER, INTENT(IN) :: statistics INTEGER, INTENT(IN) :: target_dim - REAL(KIND=DP), INTENT(IN) :: arr (:,:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:) + REAL(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(5), output_dims_c(5) input_dims_c = SHAPE(arr) @@ -654,15 +586,13 @@ SUBROUTINE evaluate_matsubara_dz_5d(obj, statistics, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) SELECT CASE (statistics) CASE (SPIR_STATISTICS_FERMIONIC) status_c = c_spir_sampling_eval_dz(obj%matsu_f_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE (SPIR_STATISTICS_BOSONIC) status_c = c_spir_sampling_eval_dz(obj%matsu_b_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE DEFAULT CALL errore('evaluate_matsubara_dz_5d', 'Invalid statistics', 1) RETURN @@ -670,8 +600,6 @@ SUBROUTINE evaluate_matsubara_dz_5d(obj, statistics, target_dim, arr, res) IF (status_c /= 0) THEN CALL errore('evaluate_matsubara_dz_5d', 'Error evaluating on Matsubara frequencies', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_matsubara_dz_6d(obj, statistics, target_dim, arr, res) @@ -679,11 +607,9 @@ SUBROUTINE evaluate_matsubara_dz_6d(obj, statistics, target_dim, arr, res) INTEGER, INTENT(IN) :: statistics INTEGER, INTENT(IN) :: target_dim - REAL(KIND=DP), INTENT(IN) :: arr (:,:,:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:,:) + REAL(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:,:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(6), output_dims_c(6) input_dims_c = SHAPE(arr) @@ -710,15 +636,13 @@ SUBROUTINE evaluate_matsubara_dz_6d(obj, statistics, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) SELECT CASE (statistics) CASE (SPIR_STATISTICS_FERMIONIC) status_c = c_spir_sampling_eval_dz(obj%matsu_f_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE (SPIR_STATISTICS_BOSONIC) status_c = c_spir_sampling_eval_dz(obj%matsu_b_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE DEFAULT CALL errore('evaluate_matsubara_dz_6d', 'Invalid statistics', 1) RETURN @@ -726,8 +650,6 @@ SUBROUTINE evaluate_matsubara_dz_6d(obj, statistics, target_dim, arr, res) IF (status_c /= 0) THEN CALL errore('evaluate_matsubara_dz_6d', 'Error evaluating on Matsubara frequencies', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_matsubara_dz_7d(obj, statistics, target_dim, arr, res) @@ -735,11 +657,9 @@ SUBROUTINE evaluate_matsubara_dz_7d(obj, statistics, target_dim, arr, res) INTEGER, INTENT(IN) :: statistics INTEGER, INTENT(IN) :: target_dim - REAL(KIND=DP), INTENT(IN) :: arr (:,:,:,:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:,:,:) + REAL(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:,:,:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(7), output_dims_c(7) input_dims_c = SHAPE(arr) @@ -766,15 +686,13 @@ SUBROUTINE evaluate_matsubara_dz_7d(obj, statistics, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) SELECT CASE (statistics) CASE (SPIR_STATISTICS_FERMIONIC) status_c = c_spir_sampling_eval_dz(obj%matsu_f_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE (SPIR_STATISTICS_BOSONIC) status_c = c_spir_sampling_eval_dz(obj%matsu_b_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE DEFAULT CALL errore('evaluate_matsubara_dz_7d', 'Invalid statistics', 1) RETURN @@ -782,6 +700,4 @@ SUBROUTINE evaluate_matsubara_dz_7d(obj, statistics, target_dim, arr, res) IF (status_c /= 0) THEN CALL errore('evaluate_matsubara_dz_7d', 'Error evaluating on Matsubara frequencies', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE diff --git a/fortran/evaluate_tau_impl.inc b/fortran/evaluate_tau_impl.inc index 8594dc95..5fedc74f 100644 --- a/fortran/evaluate_tau_impl.inc +++ b/fortran/evaluate_tau_impl.inc @@ -5,10 +5,8 @@ SUBROUTINE evaluate_tau_zz_1d(obj, target_dim, arr, res) TYPE(IR), INTENT(IN) :: obj INTEGER, INTENT(IN) :: target_dim - COMPLEX(KIND=DP), INTENT(IN) :: arr (:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:) + COMPLEX(KIND=DP), INTENT(IN), TARGET :: arr (:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(1), output_dims_c(1) @@ -30,26 +28,19 @@ SUBROUTINE evaluate_tau_zz_1d(obj, target_dim, arr, res) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - arr_c = arr - ALLOCATE(res_c, MOLD=res) status_c = c_spir_sampling_eval_zz(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('evaluate_tau_zz_1d', 'Error evaluating on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_tau_zz_2d(obj, target_dim, arr, res) TYPE(IR), INTENT(IN) :: obj INTEGER, INTENT(IN) :: target_dim - COMPLEX(KIND=DP), INTENT(IN) :: arr (:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:) + COMPLEX(KIND=DP), INTENT(IN), TARGET :: arr (:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(2), output_dims_c(2) @@ -71,26 +62,19 @@ SUBROUTINE evaluate_tau_zz_2d(obj, target_dim, arr, res) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - arr_c = arr - ALLOCATE(res_c, MOLD=res) status_c = c_spir_sampling_eval_zz(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('evaluate_tau_zz_2d', 'Error evaluating on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_tau_zz_3d(obj, target_dim, arr, res) TYPE(IR), INTENT(IN) :: obj INTEGER, INTENT(IN) :: target_dim - COMPLEX(KIND=DP), INTENT(IN) :: arr (:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:) + COMPLEX(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(3), output_dims_c(3) @@ -112,26 +96,19 @@ SUBROUTINE evaluate_tau_zz_3d(obj, target_dim, arr, res) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - arr_c = arr - ALLOCATE(res_c, MOLD=res) status_c = c_spir_sampling_eval_zz(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('evaluate_tau_zz_3d', 'Error evaluating on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_tau_zz_4d(obj, target_dim, arr, res) TYPE(IR), INTENT(IN) :: obj INTEGER, INTENT(IN) :: target_dim - COMPLEX(KIND=DP), INTENT(IN) :: arr (:,:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:) + COMPLEX(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(4), output_dims_c(4) @@ -153,26 +130,19 @@ SUBROUTINE evaluate_tau_zz_4d(obj, target_dim, arr, res) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - arr_c = arr - ALLOCATE(res_c, MOLD=res) status_c = c_spir_sampling_eval_zz(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('evaluate_tau_zz_4d', 'Error evaluating on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_tau_zz_5d(obj, target_dim, arr, res) TYPE(IR), INTENT(IN) :: obj INTEGER, INTENT(IN) :: target_dim - COMPLEX(KIND=DP), INTENT(IN) :: arr (:,:,:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(5), output_dims_c(5) @@ -194,26 +164,19 @@ SUBROUTINE evaluate_tau_zz_5d(obj, target_dim, arr, res) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - arr_c = arr - ALLOCATE(res_c, MOLD=res) status_c = c_spir_sampling_eval_zz(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('evaluate_tau_zz_5d', 'Error evaluating on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_tau_zz_6d(obj, target_dim, arr, res) TYPE(IR), INTENT(IN) :: obj INTEGER, INTENT(IN) :: target_dim - COMPLEX(KIND=DP), INTENT(IN) :: arr (:,:,:,:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(6), output_dims_c(6) @@ -235,26 +198,19 @@ SUBROUTINE evaluate_tau_zz_6d(obj, target_dim, arr, res) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - arr_c = arr - ALLOCATE(res_c, MOLD=res) status_c = c_spir_sampling_eval_zz(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('evaluate_tau_zz_6d', 'Error evaluating on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_tau_zz_7d(obj, target_dim, arr, res) TYPE(IR), INTENT(IN) :: obj INTEGER, INTENT(IN) :: target_dim - COMPLEX(KIND=DP), INTENT(IN) :: arr (:,:,:,:,:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(7), output_dims_c(7) @@ -276,26 +232,19 @@ SUBROUTINE evaluate_tau_zz_7d(obj, target_dim, arr, res) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - arr_c = arr - ALLOCATE(res_c, MOLD=res) status_c = c_spir_sampling_eval_zz(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('evaluate_tau_zz_7d', 'Error evaluating on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_tau_dd_1d(obj, target_dim, arr, res) TYPE(IR), INTENT(IN) :: obj INTEGER, INTENT(IN) :: target_dim - REAL(KIND=DP), INTENT(IN) :: arr (:) - REAL(KIND=DP), INTENT(OUT) :: res (:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:) + REAL(KIND=DP), INTENT(IN), TARGET :: arr (:) + REAL(KIND=DP), INTENT(OUT), TARGET :: res (:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(1), output_dims_c(1) @@ -317,26 +266,19 @@ SUBROUTINE evaluate_tau_dd_1d(obj, target_dim, arr, res) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - arr_c = arr - ALLOCATE(res_c, MOLD=res) status_c = c_spir_sampling_eval_dd(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('evaluate_tau_dd_1d', 'Error evaluating on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_tau_dd_2d(obj, target_dim, arr, res) TYPE(IR), INTENT(IN) :: obj INTEGER, INTENT(IN) :: target_dim - REAL(KIND=DP), INTENT(IN) :: arr (:,:) - REAL(KIND=DP), INTENT(OUT) :: res (:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:) + REAL(KIND=DP), INTENT(IN), TARGET :: arr (:,:) + REAL(KIND=DP), INTENT(OUT), TARGET :: res (:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(2), output_dims_c(2) @@ -358,26 +300,19 @@ SUBROUTINE evaluate_tau_dd_2d(obj, target_dim, arr, res) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - arr_c = arr - ALLOCATE(res_c, MOLD=res) status_c = c_spir_sampling_eval_dd(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('evaluate_tau_dd_2d', 'Error evaluating on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_tau_dd_3d(obj, target_dim, arr, res) TYPE(IR), INTENT(IN) :: obj INTEGER, INTENT(IN) :: target_dim - REAL(KIND=DP), INTENT(IN) :: arr (:,:,:) - REAL(KIND=DP), INTENT(OUT) :: res (:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:) + REAL(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:) + REAL(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(3), output_dims_c(3) @@ -399,26 +334,19 @@ SUBROUTINE evaluate_tau_dd_3d(obj, target_dim, arr, res) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - arr_c = arr - ALLOCATE(res_c, MOLD=res) status_c = c_spir_sampling_eval_dd(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('evaluate_tau_dd_3d', 'Error evaluating on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_tau_dd_4d(obj, target_dim, arr, res) TYPE(IR), INTENT(IN) :: obj INTEGER, INTENT(IN) :: target_dim - REAL(KIND=DP), INTENT(IN) :: arr (:,:,:,:) - REAL(KIND=DP), INTENT(OUT) :: res (:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:) + REAL(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:,:) + REAL(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(4), output_dims_c(4) @@ -440,26 +368,19 @@ SUBROUTINE evaluate_tau_dd_4d(obj, target_dim, arr, res) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - arr_c = arr - ALLOCATE(res_c, MOLD=res) status_c = c_spir_sampling_eval_dd(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('evaluate_tau_dd_4d', 'Error evaluating on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_tau_dd_5d(obj, target_dim, arr, res) TYPE(IR), INTENT(IN) :: obj INTEGER, INTENT(IN) :: target_dim - REAL(KIND=DP), INTENT(IN) :: arr (:,:,:,:,:) - REAL(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:) + REAL(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:,:,:) + REAL(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(5), output_dims_c(5) @@ -481,26 +402,19 @@ SUBROUTINE evaluate_tau_dd_5d(obj, target_dim, arr, res) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - arr_c = arr - ALLOCATE(res_c, MOLD=res) status_c = c_spir_sampling_eval_dd(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('evaluate_tau_dd_5d', 'Error evaluating on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_tau_dd_6d(obj, target_dim, arr, res) TYPE(IR), INTENT(IN) :: obj INTEGER, INTENT(IN) :: target_dim - REAL(KIND=DP), INTENT(IN) :: arr (:,:,:,:,:,:) - REAL(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:,:) + REAL(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:,:,:,:) + REAL(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(6), output_dims_c(6) @@ -522,26 +436,19 @@ SUBROUTINE evaluate_tau_dd_6d(obj, target_dim, arr, res) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - arr_c = arr - ALLOCATE(res_c, MOLD=res) status_c = c_spir_sampling_eval_dd(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('evaluate_tau_dd_6d', 'Error evaluating on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE evaluate_tau_dd_7d(obj, target_dim, arr, res) TYPE(IR), INTENT(IN) :: obj INTEGER, INTENT(IN) :: target_dim - REAL(KIND=DP), INTENT(IN) :: arr (:,:,:,:,:,:,:) - REAL(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:,:,:) + REAL(KIND=DP), INTENT(IN), TARGET :: arr (:,:,:,:,:,:,:) + REAL(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(7), output_dims_c(7) @@ -563,15 +470,10 @@ SUBROUTINE evaluate_tau_dd_7d(obj, target_dim, arr, res) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - arr_c = arr - ALLOCATE(res_c, MOLD=res) status_c = c_spir_sampling_eval_dd(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('evaluate_tau_dd_7d', 'Error evaluating on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE diff --git a/fortran/fit_tau_impl.inc b/fortran/fit_tau_impl.inc index 484318ca..713a44a1 100644 --- a/fortran/fit_tau_impl.inc +++ b/fortran/fit_tau_impl.inc @@ -5,10 +5,8 @@ SUBROUTINE fit_tau_dd_1d(obj, target_dim, arr, res) TYPE(IR), intent(in) :: obj INTEGER, intent(in) :: target_dim - REAL(KIND=DP), intent(in) :: arr (:) - REAL(KIND=DP), INTENT(OUT) :: res (:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:) + REAL(KIND=DP), intent(in), TARGET :: arr (:) + REAL(KIND=DP), INTENT(OUT), TARGET :: res (:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(1), output_dims_c(1) @@ -29,24 +27,19 @@ SUBROUTINE fit_tau_dd_1d(obj, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) + status_c = c_spir_sampling_fit_dd(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('fit_tau_dd_1d', 'Error fitting on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE fit_tau_dd_2d(obj, target_dim, arr, res) TYPE(IR), intent(in) :: obj INTEGER, intent(in) :: target_dim - REAL(KIND=DP), intent(in) :: arr (:,:) - REAL(KIND=DP), INTENT(OUT) :: res (:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:) + REAL(KIND=DP), intent(in), TARGET :: arr (:,:) + REAL(KIND=DP), INTENT(OUT), TARGET :: res (:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(2), output_dims_c(2) @@ -67,24 +60,19 @@ SUBROUTINE fit_tau_dd_2d(obj, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) + status_c = c_spir_sampling_fit_dd(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('fit_tau_dd_2d', 'Error fitting on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE fit_tau_dd_3d(obj, target_dim, arr, res) TYPE(IR), intent(in) :: obj INTEGER, intent(in) :: target_dim - REAL(KIND=DP), intent(in) :: arr (:,:,:) - REAL(KIND=DP), INTENT(OUT) :: res (:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:) + REAL(KIND=DP), intent(in), TARGET :: arr (:,:,:) + REAL(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(3), output_dims_c(3) @@ -105,24 +93,19 @@ SUBROUTINE fit_tau_dd_3d(obj, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) + status_c = c_spir_sampling_fit_dd(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('fit_tau_dd_3d', 'Error fitting on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE fit_tau_dd_4d(obj, target_dim, arr, res) TYPE(IR), intent(in) :: obj INTEGER, intent(in) :: target_dim - REAL(KIND=DP), intent(in) :: arr (:,:,:,:) - REAL(KIND=DP), INTENT(OUT) :: res (:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:) + REAL(KIND=DP), intent(in), TARGET :: arr (:,:,:,:) + REAL(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(4), output_dims_c(4) @@ -143,24 +126,19 @@ SUBROUTINE fit_tau_dd_4d(obj, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) + status_c = c_spir_sampling_fit_dd(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('fit_tau_dd_4d', 'Error fitting on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE fit_tau_dd_5d(obj, target_dim, arr, res) TYPE(IR), intent(in) :: obj INTEGER, intent(in) :: target_dim - REAL(KIND=DP), intent(in) :: arr (:,:,:,:,:) - REAL(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:) + REAL(KIND=DP), intent(in), TARGET :: arr (:,:,:,:,:) + REAL(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(5), output_dims_c(5) @@ -181,24 +159,19 @@ SUBROUTINE fit_tau_dd_5d(obj, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) + status_c = c_spir_sampling_fit_dd(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('fit_tau_dd_5d', 'Error fitting on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE fit_tau_dd_6d(obj, target_dim, arr, res) TYPE(IR), intent(in) :: obj INTEGER, intent(in) :: target_dim - REAL(KIND=DP), intent(in) :: arr (:,:,:,:,:,:) - REAL(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:,:) + REAL(KIND=DP), intent(in), TARGET :: arr (:,:,:,:,:,:) + REAL(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(6), output_dims_c(6) @@ -219,24 +192,19 @@ SUBROUTINE fit_tau_dd_6d(obj, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) + status_c = c_spir_sampling_fit_dd(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('fit_tau_dd_6d', 'Error fitting on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE fit_tau_dd_7d(obj, target_dim, arr, res) TYPE(IR), intent(in) :: obj INTEGER, intent(in) :: target_dim - REAL(KIND=DP), intent(in) :: arr (:,:,:,:,:,:,:) - REAL(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:,:,:) - REAL(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:,:,:) + REAL(KIND=DP), intent(in), TARGET :: arr (:,:,:,:,:,:,:) + REAL(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(7), output_dims_c(7) @@ -257,24 +225,19 @@ SUBROUTINE fit_tau_dd_7d(obj, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) + status_c = c_spir_sampling_fit_dd(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('fit_tau_dd_7d', 'Error fitting on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE fit_tau_zz_1d(obj, target_dim, arr, res) TYPE(IR), intent(in) :: obj INTEGER, intent(in) :: target_dim - COMPLEX(KIND=DP), intent(in) :: arr (:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:) + COMPLEX(KIND=DP), intent(in), TARGET :: arr (:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(1), output_dims_c(1) @@ -295,24 +258,19 @@ SUBROUTINE fit_tau_zz_1d(obj, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) + status_c = c_spir_sampling_fit_zz(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('fit_tau_zz_1d', 'Error fitting on tau sampling points', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE fit_tau_zz_2d(obj, target_dim, arr, res) TYPE(IR), intent(in) :: obj INTEGER, intent(in) :: target_dim - COMPLEX(KIND=DP), intent(in) :: arr (:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:) + COMPLEX(KIND=DP), intent(in), TARGET :: arr (:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(2), output_dims_c(2) @@ -333,24 +291,19 @@ SUBROUTINE fit_tau_zz_2d(obj, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) + status_c = c_spir_sampling_fit_zz(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('fit_tau_zz_2d', 'Error fitting on tau sampling points', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE fit_tau_zz_3d(obj, target_dim, arr, res) TYPE(IR), intent(in) :: obj INTEGER, intent(in) :: target_dim - COMPLEX(KIND=DP), intent(in) :: arr (:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:) + COMPLEX(KIND=DP), intent(in), TARGET :: arr (:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(3), output_dims_c(3) @@ -371,24 +324,19 @@ SUBROUTINE fit_tau_zz_3d(obj, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) + status_c = c_spir_sampling_fit_zz(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('fit_tau_zz_3d', 'Error fitting on tau sampling points', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE fit_tau_zz_4d(obj, target_dim, arr, res) TYPE(IR), intent(in) :: obj INTEGER, intent(in) :: target_dim - COMPLEX(KIND=DP), intent(in) :: arr (:,:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:) + COMPLEX(KIND=DP), intent(in), TARGET :: arr (:,:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(4), output_dims_c(4) @@ -409,24 +357,19 @@ SUBROUTINE fit_tau_zz_4d(obj, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) + status_c = c_spir_sampling_fit_zz(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('fit_tau_zz_4d', 'Error fitting on tau sampling points', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE fit_tau_zz_5d(obj, target_dim, arr, res) TYPE(IR), intent(in) :: obj INTEGER, intent(in) :: target_dim - COMPLEX(KIND=DP), intent(in) :: arr (:,:,:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:) + COMPLEX(KIND=DP), intent(in), TARGET :: arr (:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(5), output_dims_c(5) @@ -447,24 +390,19 @@ SUBROUTINE fit_tau_zz_5d(obj, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) + status_c = c_spir_sampling_fit_zz(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('fit_tau_zz_5d', 'Error fitting on tau sampling points', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE fit_tau_zz_6d(obj, target_dim, arr, res) TYPE(IR), intent(in) :: obj INTEGER, intent(in) :: target_dim - COMPLEX(KIND=DP), intent(in) :: arr (:,:,:,:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:,:) + COMPLEX(KIND=DP), intent(in), TARGET :: arr (:,:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(6), output_dims_c(6) @@ -485,24 +423,19 @@ SUBROUTINE fit_tau_zz_6d(obj, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) + status_c = c_spir_sampling_fit_zz(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('fit_tau_zz_6d', 'Error fitting on tau sampling points', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE SUBROUTINE fit_tau_zz_7d(obj, target_dim, arr, res) TYPE(IR), intent(in) :: obj INTEGER, intent(in) :: target_dim - COMPLEX(KIND=DP), intent(in) :: arr (:,:,:,:,:,:,:) - COMPLEX(KIND=DP), INTENT(OUT) :: res (:,:,:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: arr_c(:,:,:,:,:,:,:) - COMPLEX(KIND=c_double), ALLOCATABLE, TARGET :: res_c(:,:,:,:,:,:,:) + COMPLEX(KIND=DP), intent(in), TARGET :: arr (:,:,:,:,:,:,:) + COMPLEX(KIND=DP), INTENT(OUT), TARGET :: res (:,:,:,:,:,:,:) INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c INTEGER(KIND=c_int), TARGET :: input_dims_c(7), output_dims_c(7) @@ -523,13 +456,10 @@ SUBROUTINE fit_tau_zz_7d(obj, target_dim, arr, res) 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) + status_c = c_spir_sampling_fit_zz(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('fit_tau_zz_7d', 'Error fitting on tau sampling points', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE diff --git a/fortran/generate_evaluate_matsubara.py b/fortran/generate_evaluate_matsubara.py index ed9ec1ef..1f071b9a 100644 --- a/fortran/generate_evaluate_matsubara.py +++ b/fortran/generate_evaluate_matsubara.py @@ -66,11 +66,9 @@ def generate_evaluate_matsubara_function(ndim, input_type, output_type): INTEGER, INTENT(IN) :: statistics INTEGER, INTENT(IN) :: target_dim - {input_fortran_type}, INTENT(IN) :: arr {shape_str} - {input_c_type}, ALLOCATABLE, TARGET :: arr_c{shape_str} + {input_fortran_type}, INTENT(IN), TARGET :: arr {shape_str} - {output_fortran_type}, INTENT(OUT) :: res {shape_str} - {output_c_type}, ALLOCATABLE, TARGET :: res_c{shape_str} + {output_fortran_type}, INTENT(OUT), TARGET :: res {shape_str} INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c {dim_array_decl} input_dims_c = SHAPE(arr) @@ -97,15 +95,13 @@ def generate_evaluate_matsubara_function(ndim, input_type, output_type): 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) SELECT CASE (statistics) CASE (SPIR_STATISTICS_FERMIONIC) status_c = {c_function}(obj%matsu_f_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE (SPIR_STATISTICS_BOSONIC) status_c = {c_function}(obj%matsu_b_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) CASE DEFAULT CALL errore('evaluate_matsubara_{func_suffix}_{ndim}d', 'Invalid statistics', 1) RETURN @@ -113,8 +109,6 @@ def generate_evaluate_matsubara_function(ndim, input_type, output_type): IF (status_c /= 0) THEN CALL errore('evaluate_matsubara_{func_suffix}_{ndim}d', 'Error evaluating on Matsubara frequencies', status_c) ENDIF - res = CMPLX(res_c, KIND=DP) - DEALLOCATE(arr_c, res_c) END SUBROUTINE""" return function diff --git a/fortran/generate_evaluate_tau.py b/fortran/generate_evaluate_tau.py index 4ec2a672..09c6fc62 100644 --- a/fortran/generate_evaluate_tau.py +++ b/fortran/generate_evaluate_tau.py @@ -64,10 +64,8 @@ def generate_evaluate_tau_function(ndim, input_type, output_type): function = f"""SUBROUTINE evaluate_tau_{func_suffix}_{ndim}d(obj, target_dim, arr, res) TYPE(IR), INTENT(IN) :: obj INTEGER, INTENT(IN) :: target_dim - {input_fortran_type}, INTENT(IN) :: arr {shape_str} - {output_fortran_type}, INTENT(OUT) :: res {shape_str} - {input_c_type}, ALLOCATABLE, TARGET :: arr_c{shape_str} - {output_c_type}, ALLOCATABLE, TARGET :: res_c{shape_str} + {input_fortran_type}, INTENT(IN), TARGET :: arr {shape_str} + {output_fortran_type}, INTENT(OUT), TARGET :: res {shape_str} INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c {dim_array_decl} @@ -89,16 +87,11 @@ def generate_evaluate_tau_function(ndim, input_type, output_type): ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - arr_c = arr - ALLOCATE(res_c, MOLD=res) status_c = {c_function}(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('evaluate_tau_{func_suffix}_{ndim}d', 'Error evaluating on tau sampling points', status_c) ENDIF - res = res_c - DEALLOCATE(arr_c, res_c) END SUBROUTINE""" diff --git a/fortran/generate_fit_tau.py b/fortran/generate_fit_tau.py index 46279ba8..11140ce0 100644 --- a/fortran/generate_fit_tau.py +++ b/fortran/generate_fit_tau.py @@ -70,10 +70,8 @@ def generate_fit_tau_function(ndim, input_type, output_type): function = f"""SUBROUTINE fit_tau_{func_suffix}_{ndim}d(obj, target_dim, arr, res) TYPE(IR), intent(in) :: obj INTEGER, intent(in) :: target_dim - {input_fortran_type}, intent(in) :: arr {shape_str} - {output_fortran_type}, INTENT(OUT) :: res {shape_str} - {input_c_type}, ALLOCATABLE, TARGET :: arr_c{shape_str} - {output_c_type}, ALLOCATABLE, TARGET :: res_c{shape_str} + {input_fortran_type}, intent(in), TARGET :: arr {shape_str} + {output_fortran_type}, INTENT(OUT), TARGET :: res {shape_str} INTEGER(KIND=c_int) :: ndim_c, target_dim_c, status_c {dim_array_decl} @@ -94,15 +92,12 @@ def generate_fit_tau_function(ndim, input_type, output_type): 'Output dimensions are not the same as the input dimensions except for the TARGET dimension', 1) ENDIF target_dim_c = target_dim - 1 - ALLOCATE(arr_c, MOLD=arr) - ALLOCATE(res_c, MOLD=res) + status_c = {c_function}(obj%tau_smpl_ptr, SPIR_ORDER_COLUMN_MAJOR, & - ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr_c), c_loc(res_c)) + ndim_c, c_loc(input_dims_c), target_dim_c, c_loc(arr), c_loc(res)) IF (status_c /= 0) THEN CALL errore('fit_tau_{func_suffix}_{ndim}d', 'Error fitting on tau sampling points', status_c) ENDIF - {res_assignment} - DEALLOCATE(arr_c, res_c) END SUBROUTINE""" return function diff --git a/fortran/test/test_ext.f90 b/fortran/test/test_ext.f90 index aff9c8ce..b0c07a47 100644 --- a/fortran/test/test_ext.f90 +++ b/fortran/test/test_ext.f90 @@ -81,7 +81,7 @@ subroutine test_case_target_dim_1(obj, statistics, case_name) nfreq = obj%nfreq_b else print *, "Error: Invalid statistics" - stop +stop 1 end if ! Allocate arrays @@ -112,7 +112,7 @@ subroutine test_case_target_dim_1(obj, statistics, case_name) call dlr2ir(obj, target_dim, g_dlr, g_ir) ! Evaluate Green's function at Matsubara frequencies from IR - call evaluate_matsubara(obj, statistics, target_dim, g_dlr, giw) + call evaluate_matsubara(obj, statistics, target_dim, g_ir, giw) ! Convert Matsubara frequencies back to IR call fit_matsubara(obj, statistics, target_dim, giw, g_ir2_z) @@ -120,7 +120,7 @@ subroutine test_case_target_dim_1(obj, statistics, case_name) ! Compare IR coefficients (using real part of g_ir2_z) if (.not. compare_with_relative_error_d(coeffs, real(g_ir2_z), 10.0_DP * obj%eps)) then print *, "Error: IR coefficients do not match after transformation cycle" - stop +stop 1 end if ! Evaluate Green's function at tau points @@ -139,7 +139,7 @@ subroutine test_case_target_dim_1(obj, statistics, case_name) if (abs(imag_tmp(1) - gtau_z(1, 1)) / max(abs(imag_tmp(1)), abs(gtau_z(1, 1))) & > 10.0_DP * obj%eps) then print *, "Error: Tau evaluation does not match direct calculation" - stop +stop 1 end if deallocate(u_tau) deallocate(imag_tmp) @@ -153,7 +153,7 @@ subroutine test_case_target_dim_1(obj, statistics, case_name) ! Compare the original and reconstructed Matsubara frequencies if (.not. compare_with_relative_error_z(giw, giw_reconst, 10.0_DP * obj%eps)) then print *, "Error: Matsubara frequencies do not match after transformation cycle" - stop +stop 1 end if ! Deallocate arrays @@ -192,7 +192,7 @@ subroutine test_case_target_dim_2(obj, statistics, case_name) nfreq = obj%nfreq_b else print *, "Error: Invalid statistics" - stop +stop 1 end if ! Allocate arrays with correct dimension order for target_dim=2 @@ -223,7 +223,7 @@ subroutine test_case_target_dim_2(obj, statistics, case_name) call dlr2ir(obj, target_dim, g_dlr, g_ir) ! Evaluate Green's function at Matsubara frequencies from IR - call evaluate_matsubara(obj, statistics, target_dim, g_dlr, giw) + call evaluate_matsubara(obj, statistics, target_dim, g_ir, giw) ! Convert Matsubara frequencies back to IR call fit_matsubara(obj, statistics, target_dim, giw, g_ir2_z) @@ -231,7 +231,7 @@ subroutine test_case_target_dim_2(obj, statistics, case_name) ! Compare IR coefficients (using real part of g_ir2_z) if (.not. compare_with_relative_error_d(coeffs, real(g_ir2_z), 10.0_DP * obj%eps)) then print *, "Error: IR coefficients do not match after transformation cycle" - stop +stop 1 end if ! Evaluate Green's function at tau points @@ -246,7 +246,7 @@ subroutine test_case_target_dim_2(obj, statistics, case_name) ! Compare the original and reconstructed Matsubara frequencies if (.not. compare_with_relative_error_z(giw, giw_reconst, 10.0_DP * obj%eps)) then print *, "Error: Matsubara frequencies do not match after transformation cycle" - stop +stop 1 end if ! Deallocate arrays diff --git a/fortran/test/test_integration.f90 b/fortran/test/test_integration.f90 index 8c8a6953..58f01643 100644 --- a/fortran/test/test_integration.f90 +++ b/fortran/test/test_integration.f90 @@ -52,17 +52,17 @@ subroutine test_case(statistics, case_name) k_ptr = c_spir_logistic_kernel_new(lambda, c_loc(status)) if (status /= 0) then print *, "Error creating kernel" - stop +stop 1 end if if (.not. c_associated(k_ptr)) then print *, "Error: kernel is not assigned" - stop +stop 1 end if status = c_spir_kernel_domain(k_ptr, c_loc(xmin), c_loc(xmax), c_loc(ymin), c_loc(ymax)) if (status /= 0) then print *, "Error: kernel domain is not assigned" - stop +stop 1 end if print *, "Kernel domain =", xmin, xmax, ymin, ymax @@ -75,11 +75,11 @@ subroutine test_case(statistics, case_name) sve_ptr = c_spir_sve_result_new(k_ptr, epsilon, -1.0_c_double, -1_c_int, -1_c_int, SPIR_TWORK_AUTO, c_loc(status)) if (status /= 0) then print *, "Error creating SVE result" - stop +stop 1 end if if (.not. c_associated(sve_ptr)) then print *, "Error: SVE result is not assigned" - stop +stop 1 end if ! Get the size of the SVE result @@ -87,7 +87,7 @@ subroutine test_case(statistics, case_name) status = c_spir_sve_result_get_size(sve_ptr, c_loc(sve_size)) if (status /= 0) then print *, "Error getting SVE result size" - stop +stop 1 end if ! Get the singular values of the SVE result @@ -97,7 +97,7 @@ subroutine test_case(statistics, case_name) status = c_spir_sve_result_get_svals(sve_ptr, c_loc(svals)) if (status /= 0) then print *, "Error getting SVE result singular values" - stop +stop 1 end if print *, "SVE result singular values =", svals @@ -107,11 +107,11 @@ subroutine test_case(statistics, case_name) basis_ptr = c_spir_basis_new(statistics, beta, omega_max, epsilon, k_ptr, sve_ptr, max_size, c_loc(status)) if (status /= 0) then print *, "Error creating finite temperature basis" - stop +stop 1 end if if (.not. c_associated(basis_ptr)) then print *, "Error: basis is not assigned" - stop +stop 1 end if ! Get the size of the basis @@ -119,7 +119,7 @@ subroutine test_case(statistics, case_name) status = c_spir_basis_get_size(basis_ptr, c_loc(basis_size)) if (status /= 0) then print *, "Error getting basis size" - stop +stop 1 end if print *, "Basis size =", basis_size @@ -128,21 +128,21 @@ subroutine test_case(statistics, case_name) status = c_spir_basis_get_n_default_taus(basis_ptr, c_loc(ntaus)) if (status /= 0) then print *, "Error getting number of tau points" - stop +stop 1 end if print *, "Number of tau points =", ntaus allocate(taus(ntaus)) status = c_spir_basis_get_default_taus(basis_ptr, c_loc(taus)) if (status /= 0) then print *, "Error getting tau points" - stop +stop 1 end if print *, "Tau =", taus tau_sampling_ptr = c_spir_tau_sampling_new( & basis_ptr, ntaus, c_loc(taus), c_loc(status)) if (status /= 0) then print *, "Error sampling tau" - stop +stop 1 end if ! Matsubara sampling @@ -150,21 +150,21 @@ subroutine test_case(statistics, case_name) status = c_spir_basis_get_n_default_matsus(basis_ptr, positive_only, c_loc(nmatsus)) if (status /= 0) then print *, "Error getting number of matsubara points" - stop +stop 1 end if print *, "Number of matsubara points =", nmatsus allocate(matsus(nmatsus)) status = c_spir_basis_get_default_matsus(basis_ptr, positive_only, c_loc(matsus)) if (status /= 0) then print *, "Error getting matsubara points" - stop +stop 1 end if print *, "Matsubara =", matsus matsu_sampling_ptr = c_spir_matsu_sampling_new( & basis_ptr, positive_only, nmatsus, c_loc(matsus), c_loc(status)) if (status /= 0) then print *, "Error sampling matsubara" - stop +stop 1 end if ! Create a new DLR @@ -175,11 +175,11 @@ subroutine test_case(statistics, case_name) print *, "dlr_ptr is associated:", c_associated(dlr_ptr) if (status /= 0) then print *, "Error creating DLR" - stop +stop 1 end if if (.not. c_associated(dlr_ptr)) then print *, "Error: DLR is not assigned" - stop +stop 1 end if ! Get the number of poles @@ -190,7 +190,7 @@ subroutine test_case(statistics, case_name) print *, "After get_npoles - npoles:", npoles if (status /= 0) then print *, "Error getting number of poles" - stop +stop 1 end if ! Get the poles @@ -203,7 +203,7 @@ subroutine test_case(statistics, case_name) print *, "After get_poles - status:", status if (status /= 0) then print *, "Error getting poles" - stop +stop 1 end if ! Test 2D operations @@ -264,7 +264,7 @@ subroutine test_2d_operations(dlr_ptr, basis_ptr, tau_sampling_ptr, matsu_sampli c_loc(coeffs), c_loc(g_ir)) if (status /= 0) then print *, "Error converting DLR to IR" - stop +stop 1 end if ! Evaluate Green's function at Matsubara frequencies from IR @@ -274,7 +274,7 @@ subroutine test_2d_operations(dlr_ptr, basis_ptr, tau_sampling_ptr, matsu_sampli c_loc(g_ir), c_loc(giw)) if (status /= 0) then print *, "Error evaluating Green's function at Matsubara frequencies" - stop +stop 1 end if ! Convert Matsubara frequencies back to IR @@ -283,13 +283,13 @@ subroutine test_2d_operations(dlr_ptr, basis_ptr, tau_sampling_ptr, matsu_sampli c_loc(giw), c_loc(g_ir2_z)) if (status /= 0) then print *, "Error converting Matsubara frequencies back to IR" - stop +stop 1 end if ! Compare IR coefficients (using real part of g_ir2_z) if (.not. compare_with_relative_error_d(g_ir, real(g_ir2_z), tol)) then print *, "Error: IR coefficients do not match after transformation cycle" - stop +stop 1 end if ! Evaluate Green's function at tau points @@ -298,7 +298,7 @@ subroutine test_2d_operations(dlr_ptr, basis_ptr, tau_sampling_ptr, matsu_sampli c_loc(g_ir2_z), c_loc(gtau_z)) if (status /= 0) then print *, "Error evaluating Green's function at tau points" - stop +stop 1 end if ! Convert tau points back to IR @@ -307,7 +307,7 @@ subroutine test_2d_operations(dlr_ptr, basis_ptr, tau_sampling_ptr, matsu_sampli c_loc(gtau_z), c_loc(g_ir2_z)) if (status /= 0) then print *, "Error converting tau points back to IR" - stop +stop 1 end if ! Evaluate Green's function at Matsubara frequencies again @@ -316,13 +316,13 @@ subroutine test_2d_operations(dlr_ptr, basis_ptr, tau_sampling_ptr, matsu_sampli c_loc(g_ir2_z), c_loc(giw_reconst)) if (status /= 0) then print *, "Error evaluating Green's function at Matsubara frequencies again" - stop +stop 1 end if ! Compare the original and reconstructed Matsubara frequencies if (.not. compare_with_relative_error_z(giw, giw_reconst, tol)) then print *, "Error: Matsubara frequencies do not match after transformation cycle" - stop +stop 1 end if ! Deallocate arrays From 0d59329f5b290453e68c0e36fb58c0e3ed8c4c9a Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 13:20:41 +0900 Subject: [PATCH 17/21] fix: use correct comparison for IR coefficients - Replace compare_with_relative_error_d(coeffs, real(g_ir2_z)) with compare_with_relative_error_z(g_ir, g_ir2_z) - This compares the correct complex IR coefficients instead of incorrect types --- fortran/test/test_ext.f90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fortran/test/test_ext.f90 b/fortran/test/test_ext.f90 index b0c07a47..8443e111 100644 --- a/fortran/test/test_ext.f90 +++ b/fortran/test/test_ext.f90 @@ -118,7 +118,7 @@ subroutine test_case_target_dim_1(obj, statistics, case_name) call fit_matsubara(obj, statistics, target_dim, giw, g_ir2_z) ! Compare IR coefficients (using real part of g_ir2_z) - if (.not. compare_with_relative_error_d(coeffs, real(g_ir2_z), 10.0_DP * obj%eps)) then + if (.not. compare_with_relative_error_z(g_ir, g_ir2_z, 10.0_DP * obj%eps)) then print *, "Error: IR coefficients do not match after transformation cycle" stop 1 end if @@ -229,7 +229,7 @@ subroutine test_case_target_dim_2(obj, statistics, case_name) call fit_matsubara(obj, statistics, target_dim, giw, g_ir2_z) ! Compare IR coefficients (using real part of g_ir2_z) - if (.not. compare_with_relative_error_d(coeffs, real(g_ir2_z), 10.0_DP * obj%eps)) then + if (.not. compare_with_relative_error_z(g_ir, g_ir2_z, 10.0_DP * obj%eps)) then print *, "Error: IR coefficients do not match after transformation cycle" stop 1 end if From 9ed5905fa859e2e4b91f1f8ab8dcb7e5a87900bb Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 13:21:48 +0900 Subject: [PATCH 18/21] fix: install Fortran library before running tests - Add cmake --install step for Fortran bindings before running tests - This ensures libsparseir_fortran.0.dylib is available at expected location --- fortran/test_with_cxx_backend.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fortran/test_with_cxx_backend.sh b/fortran/test_with_cxx_backend.sh index 2d0540d4..2a6bff64 100755 --- a/fortran/test_with_cxx_backend.sh +++ b/fortran/test_with_cxx_backend.sh @@ -53,6 +53,9 @@ cmake "$SCRIPT_DIR" \ echo -e "${YELLOW}Building Fortran bindings...${NC}" cmake --build . -j$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4) +echo -e "${YELLOW}Installing Fortran bindings...${NC}" +cmake --install . --prefix "$INSTALL_DIR" + echo -e "${YELLOW}Running Fortran tests...${NC}" # Set library paths for test execution export DYLD_LIBRARY_PATH="$INSTALL_DIR/lib:$DYLD_LIBRARY_PATH" From 5749891c060b982f4745f2095cd9664ebcbdef08 Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 13:23:07 +0900 Subject: [PATCH 19/21] fix: install Fortran module files correctly - Change from DIRECTORY install to FILES install for module files - Use OPTIONAL flag to avoid errors if modules don't exist --- fortran/CMakeLists.txt | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fortran/CMakeLists.txt b/fortran/CMakeLists.txt index 23c08622..e4b60cdf 100644 --- a/fortran/CMakeLists.txt +++ b/fortran/CMakeLists.txt @@ -74,9 +74,11 @@ install(TARGETS sparseir_fortran # Install Fortran module files install( - DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/fortran/ + FILES + ${CMAKE_Fortran_MODULE_DIRECTORY}/sparseir.mod + ${CMAKE_Fortran_MODULE_DIRECTORY}/sparseir_ext.mod DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/sparseir" - FILES_MATCHING PATTERN "*.mod" + OPTIONAL ) # CMake package config for Fortran bindings From b82c6b6bc884c1012a804995c6549ad133cb07a7 Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 13:25:40 +0900 Subject: [PATCH 20/21] fix: add GNUInstallDirs for proper install paths - Include GNUInstallDirs to set CMAKE_INSTALL_INCLUDEDIR correctly - This fixes the '/sparseir' path issue when installing modules --- fortran/CMakeLists.txt | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/fortran/CMakeLists.txt b/fortran/CMakeLists.txt index e4b60cdf..0492ab01 100644 --- a/fortran/CMakeLists.txt +++ b/fortran/CMakeLists.txt @@ -1,5 +1,8 @@ cmake_minimum_required(VERSION 3.10) +# Include GNUInstallDirs for standard install paths +include(GNUInstallDirs) + # Find dependencies first to get version find_package(SparseIR REQUIRED) @@ -74,11 +77,9 @@ install(TARGETS sparseir_fortran # Install Fortran module files install( - FILES - ${CMAKE_Fortran_MODULE_DIRECTORY}/sparseir.mod - ${CMAKE_Fortran_MODULE_DIRECTORY}/sparseir_ext.mod + DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/ DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/sparseir" - OPTIONAL + FILES_MATCHING PATTERN "*.mod" ) # CMake package config for Fortran bindings From 0fc77081f0ad48c1e65e41ecd775d2ab03ea07aa Mon Sep 17 00:00:00 2001 From: Hiroshi Shinaoka Date: Tue, 28 Oct 2025 14:08:46 +0900 Subject: [PATCH 21/21] fix: correct conda recipe paths - use ../../backend for proper relative path from python/conda-recipe/ --- python/conda-recipe/meta.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/conda-recipe/meta.yaml b/python/conda-recipe/meta.yaml index e68122bf..29ade09d 100644 --- a/python/conda-recipe/meta.yaml +++ b/python/conda-recipe/meta.yaml @@ -1,7 +1,7 @@ {% set name = "pylibsparseir" %} -{% set version_major = load_file_regex(load_file="../../include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_MAJOR (\\d+)", from_recipe_dir=True).group(1) %} -{% set version_minor = load_file_regex(load_file="../../include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_MINOR (\\d+)", from_recipe_dir=True).group(1) %} -{% set version_patch = load_file_regex(load_file="../../include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_PATCH (\\d+)", from_recipe_dir=True).group(1) %} +{% set version_major = load_file_regex(load_file="../../backend/cxx/include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_MAJOR (\\d+)", from_recipe_dir=True).group(1) %} +{% set version_minor = load_file_regex(load_file="../../backend/cxx/include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_MINOR (\\d+)", from_recipe_dir=True).group(1) %} +{% set version_patch = load_file_regex(load_file="../../backend/cxx/include/sparseir/version.h", regex_pattern="#define SPARSEIR_VERSION_PATCH (\\d+)", from_recipe_dir=True).group(1) %} {% set version = version_major + "." + version_minor + "." + version_patch %} package: