Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
53 commits
Select commit Hold shift + click to select a range
26ee276
claude says that all tests are passing and it is done
jtramm Feb 9, 2026
8e70d52
claude fixes submodule issue?
jtramm Feb 9, 2026
cc52ab0
added tensor.h
jtramm Feb 9, 2026
f570c8c
claude added views to the tensor.h
jtramm Feb 9, 2026
f1b0014
Switched back to simpler tensor.h implementation with fewer advanced/…
jtramm Feb 10, 2026
c626028
simplified weight windows xtensor usage
jtramm Feb 10, 2026
5fc19ce
removed dead code
jtramm Feb 10, 2026
347a6fe
Added better comments and code cleanup
jtramm Feb 10, 2026
430a7c3
Cleaning up the tensor implementation to group data/methods/construct…
jtramm Feb 10, 2026
4e43192
removed dead code
jtramm Feb 10, 2026
7fdfb8d
refactored to simpler (non-xtensor-like) interface.
jtramm Feb 11, 2026
5d81f7a
Refactor from std::size_t to size_t, and from openmc::vector to vector
jtramm Feb 11, 2026
933296f
code cleanup - making variable names and comments match new API
jtramm Feb 11, 2026
7c96034
Removal of development jargon from code comments
jtramm Feb 11, 2026
d638322
greatly simplified the sum function
jtramm Feb 11, 2026
d27d4f1
Made several functions rank agnostic rather than assuming a max rank
jtramm Feb 11, 2026
0871ab4
fixed some issues with includes
jtramm Feb 11, 2026
e9249df
Making the naming of classes more clear: Tensor, StaticTensor2D, and …
jtramm Feb 11, 2026
25a22bf
Replaced View1D with a true n-dimensional View class.
jtramm Feb 11, 2026
866a022
knocked off TODO item that was due to a long term bug in xtensor
jtramm Feb 11, 2026
8cf5d3a
cleanup of terminology
jtramm Feb 11, 2026
2e7ce3b
made more use of the view where it makes sense
jtramm Feb 11, 2026
649dc31
made more use of view, and added a few small needed functions to impr…
jtramm Feb 11, 2026
64e2cd0
renamed slice_at to select to make it more clear what it is actually …
jtramm Feb 11, 2026
987d583
Made explicit view variable declarations rather than using auto.
jtramm Feb 12, 2026
5983579
removed several Tensor constructors to simplify things
jtramm Feb 12, 2026
8b3e22a
Use T in view class instead of converting to value_type
jtramm Feb 12, 2026
64e347a
more view simplification
jtramm Feb 12, 2026
52370cb
more view simplification
jtramm Feb 12, 2026
5c3cb83
Simplification of view iterators
jtramm Feb 12, 2026
9bb639a
tensor.h cleanup
jtramm Feb 12, 2026
b14afa9
add some docstrings to more tensor functions
jtramm Feb 12, 2026
470bf02
tensor comment cleanup
jtramm Feb 12, 2026
9835b1f
removed a few unneeded functions
jtramm Feb 12, 2026
0ffbcff
removed xtensor mention in docs, and added ctest unit tests for tenso…
jtramm Feb 12, 2026
c1d5b9d
cleanup and code review incorporation
jtramm Feb 12, 2026
255ea57
removed 2D-wrappers row and col.
jtramm Feb 12, 2026
631a2e0
merged with develop
jtramm Feb 12, 2026
6f3a73e
ran clang format
jtramm Feb 12, 2026
052ec17
reverted unneeded comment mod
jtramm Feb 12, 2026
07c4143
fixed MPI error
jtramm Feb 12, 2026
49a3474
refactored to allow for ranges. Much more rank agnostic now
jtramm Feb 12, 2026
2cf4c08
ran clang format again
jtramm Feb 12, 2026
f7783c5
LLVM 15 clang format
jtramm Feb 13, 2026
312851d
fixed header include
jtramm Feb 13, 2026
728dafe
fixed header issue
jtramm Feb 13, 2026
0a7a804
fix cassert issue (previously transitively included by xtensor)
jtramm Feb 13, 2026
7160afe
made tensor::ones function, and changed range API to follow standard …
jtramm Feb 16, 2026
f665829
ran clang-format 15
Feb 16, 2026
cd54c70
added front and back helper functions
jtramm Feb 16, 2026
7343ac8
Merge branch 'develop' into no_xtensor_claude
jtramm Feb 16, 2026
e85a781
Moved front/back methods from view to tensor.
jtramm Feb 16, 2026
11e869a
update unit tests
jtramm Feb 16, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 0 additions & 6 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -1,12 +1,6 @@
[submodule "vendor/pugixml"]
path = vendor/pugixml
url = https://github.com/zeux/pugixml.git
[submodule "vendor/xtensor"]
path = vendor/xtensor
url = https://github.com/xtensor-stack/xtensor.git
[submodule "vendor/xtl"]
path = vendor/xtl
url = https://github.com/xtensor-stack/xtl.git
[submodule "vendor/fmt"]
path = vendor/fmt
url = https://github.com/fmtlib/fmt.git
Expand Down
19 changes: 1 addition & 18 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -266,23 +266,6 @@ else()
endif()
endif()

#===============================================================================
# xtensor header-only library
#===============================================================================

if(OPENMC_FORCE_VENDORED_LIBS)
add_subdirectory(vendor/xtl)
set(xtl_DIR ${CMAKE_CURRENT_BINARY_DIR}/vendor/xtl)
add_subdirectory(vendor/xtensor)
else()
find_package_write_status(xtensor)
if (NOT xtensor_FOUND)
add_subdirectory(vendor/xtl)
set(xtl_DIR ${CMAKE_CURRENT_BINARY_DIR}/vendor/xtl)
add_subdirectory(vendor/xtensor)
endif()
endif()

#===============================================================================
# Catch2 library
#===============================================================================
Expand Down Expand Up @@ -498,7 +481,7 @@ endif()
# target_link_libraries treats any arguments starting with - but not -l as
# linker flags. Thus, we can pass both linker flags and libraries together.
target_link_libraries(libopenmc ${ldflags} ${HDF5_LIBRARIES} ${HDF5_HL_LIBRARIES}
xtensor fmt::fmt ${CMAKE_DL_LIBS})
fmt::fmt ${CMAKE_DL_LIBS})

if(TARGET pugixml::pugixml)
target_link_libraries(libopenmc pugixml::pugixml)
Expand Down
2 changes: 0 additions & 2 deletions cmake/OpenMCConfig.cmake.in
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@ get_filename_component(_OPENMC_PREFIX "${OpenMC_CMAKE_DIR}/../../.." ABSOLUTE)

find_package(fmt CONFIG REQUIRED HINTS ${_OPENMC_PREFIX})
find_package(pugixml CONFIG REQUIRED HINTS ${_OPENMC_PREFIX})
find_package(xtl CONFIG REQUIRED HINTS ${_OPENMC_PREFIX})
find_package(xtensor CONFIG REQUIRED HINTS ${_OPENMC_PREFIX})
if(@OPENMC_USE_DAGMC@)
find_package(DAGMC REQUIRED HINTS @DAGMC_DIR@)
endif()
Expand Down
2 changes: 1 addition & 1 deletion docs/source/quickinstall.rst
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ packages should be installed, for example in Homebrew via:

.. code-block:: sh

brew install llvm cmake xtensor hdf5 python libomp libpng
brew install llvm cmake hdf5 python libomp libpng

The compiler provided by the above LLVM package should be used in place of the
one provisioned by XCode, which does not support the multithreading library used
Expand Down
12 changes: 6 additions & 6 deletions include/openmc/bremsstrahlung.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

#include "openmc/particle.h"

#include "xtensor/xtensor.hpp"
#include "openmc/tensor.h"

namespace openmc {

Expand All @@ -14,9 +14,9 @@ namespace openmc {
class BremsstrahlungData {
public:
// Data
xt::xtensor<double, 2> pdf; //!< Bremsstrahlung energy PDF
xt::xtensor<double, 2> cdf; //!< Bremsstrahlung energy CDF
xt::xtensor<double, 1> yield; //!< Photon yield
tensor::Tensor<double> pdf; //!< Bremsstrahlung energy PDF
tensor::Tensor<double> cdf; //!< Bremsstrahlung energy CDF
tensor::Tensor<double> yield; //!< Photon yield
};

class Bremsstrahlung {
Expand All @@ -32,9 +32,9 @@ class Bremsstrahlung {

namespace data {

extern xt::xtensor<double, 1>
extern tensor::Tensor<double>
ttb_e_grid; //! energy T of incident electron in [eV]
extern xt::xtensor<double, 1>
extern tensor::Tensor<double>
ttb_k_grid; //! reduced energy W/T of emitted photon

} // namespace data
Expand Down
8 changes: 4 additions & 4 deletions include/openmc/distribution_energy.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
#define OPENMC_DISTRIBUTION_ENERGY_H

#include "hdf5.h"
#include "xtensor/xtensor.hpp"
#include "openmc/tensor.h"

#include "openmc/constants.h"
#include "openmc/endf.h"
Expand Down Expand Up @@ -86,9 +86,9 @@ class ContinuousTabular : public EnergyDistribution {
struct CTTable {
Interpolation interpolation; //!< Interpolation law
int n_discrete; //!< Number of of discrete energies
xt::xtensor<double, 1> e_out; //!< Outgoing energies in [eV]
xt::xtensor<double, 1> p; //!< Probability density
xt::xtensor<double, 1> c; //!< Cumulative distribution
tensor::Tensor<double> e_out; //!< Outgoing energies in [eV]
tensor::Tensor<double> p; //!< Probability density
tensor::Tensor<double> c; //!< Cumulative distribution
};

int n_region_; //!< Number of inteprolation regions
Expand Down
4 changes: 2 additions & 2 deletions include/openmc/eigenvalue.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

#include <cstdint> // for int64_t

#include "xtensor/xtensor.hpp"
#include "openmc/tensor.h"
#include <hdf5.h>

#include "openmc/array.h"
Expand All @@ -24,7 +24,7 @@ namespace simulation {
extern double keff_generation; //!< Single-generation k on each processor
extern array<double, 2> k_sum; //!< Used to reduce sum and sum_sq
extern vector<double> entropy; //!< Shannon entropy at each generation
extern xt::xtensor<double, 1> source_frac; //!< Source fraction for UFS
extern tensor::Tensor<double> source_frac; //!< Source fraction for UFS

} // namespace simulation

Expand Down
108 changes: 34 additions & 74 deletions include/openmc/hdf5_interface.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,7 @@

#include "hdf5.h"
#include "hdf5_hl.h"
#include "xtensor/xadapt.hpp"
#include "xtensor/xarray.hpp"
#include "openmc/tensor.h"

#include "openmc/array.h"
#include "openmc/error.h"
Expand Down Expand Up @@ -166,24 +165,19 @@ void read_attribute(hid_t obj_id, const char* name, vector<T>& vec)
read_attr(obj_id, name, H5TypeMap<T>::type_id, vec.data());
}

// Generic array version
// Tensor version
template<typename T>
void read_attribute(hid_t obj_id, const char* name, xt::xarray<T>& arr)
void read_attribute(hid_t obj_id, const char* name, tensor::Tensor<T>& tensor)
{
// Get shape of attribute array
// Get shape of attribute
auto shape = attribute_shape(obj_id, name);

// Allocate new array to read data into
std::size_t size = 1;
for (const auto x : shape)
size *= x;
vector<T> buffer(size);
// Resize tensor and read data directly
vector<size_t> tshape(shape.begin(), shape.end());
tensor.resize(tshape);

// Read data from attribute
read_attr(obj_id, name, H5TypeMap<T>::type_id, buffer.data());

// Adapt array into xarray
arr = xt::adapt(buffer, shape);
read_attr(obj_id, name, H5TypeMap<T>::type_id, tensor.data());
}

// overload for std::string
Expand Down Expand Up @@ -290,61 +284,32 @@ void read_dataset(
}

template<typename T>
void read_dataset(hid_t dset, xt::xarray<T>& arr, bool indep = false)
void read_dataset(hid_t dset, tensor::Tensor<T>& tensor, bool indep = false)
{
// Get shape of dataset
vector<hsize_t> shape = object_shape(dset);

// Allocate space in the array to read data into
std::size_t size = 1;
for (const auto x : shape)
size *= x;
arr.resize(shape);
// Resize tensor and read data directly
vector<size_t> tshape(shape.begin(), shape.end());
tensor.resize(tshape);

// Read data from attribute
// Read data from dataset
read_dataset_lowlevel(
dset, nullptr, H5TypeMap<T>::type_id, H5S_ALL, indep, arr.data());
dset, nullptr, H5TypeMap<T>::type_id, H5S_ALL, indep, tensor.data());
}

template<>
void read_dataset(
hid_t dset, xt::xarray<std::complex<double>>& arr, bool indep);
hid_t dset, tensor::Tensor<std::complex<double>>& tensor, bool indep);

template<typename T>
void read_dataset(
hid_t obj_id, const char* name, xt::xarray<T>& arr, bool indep = false)
{
// Open dataset and read array
hid_t dset = open_dataset(obj_id, name);
read_dataset(dset, arr, indep);
close_dataset(dset);
}

template<typename T, std::size_t N>
void read_dataset(
hid_t obj_id, const char* name, xt::xtensor<T, N>& arr, bool indep = false)
hid_t obj_id, const char* name, tensor::Tensor<T>& tensor, bool indep = false)
{
// Open dataset and read array
// Open dataset and read tensor
hid_t dset = open_dataset(obj_id, name);

// Get shape of dataset
vector<hsize_t> hsize_t_shape = object_shape(dset);
read_dataset(dset, tensor, indep);
close_dataset(dset);

// cast from hsize_t to size_t
vector<size_t> shape(hsize_t_shape.size());
for (int i = 0; i < shape.size(); i++) {
shape[i] = static_cast<size_t>(hsize_t_shape[i]);
}

// Allocate new xarray to read data into
xt::xarray<T> xarr(shape);

// Read data from the dataset
read_dataset(obj_id, name, xarr);

// Copy into xtensor
arr = xarr;
}

// overload for Position
Expand All @@ -358,31 +323,22 @@ inline void read_dataset(
r.z = x[2];
}

template<typename T, std::size_t N>
template<typename T>
inline void read_dataset_as_shape(
hid_t obj_id, const char* name, xt::xtensor<T, N>& arr, bool indep = false)
hid_t obj_id, const char* name, tensor::Tensor<T>& tensor, bool indep = false)
{
hid_t dset = open_dataset(obj_id, name);

// Allocate new array to read data into
std::size_t size = 1;
for (const auto x : arr.shape())
size *= x;
vector<T> buffer(size);

// Read data from attribute
// Read data directly into pre-shaped tensor
read_dataset_lowlevel(
dset, nullptr, H5TypeMap<T>::type_id, H5S_ALL, indep, buffer.data());

// Adapt into xarray
arr = xt::adapt(buffer, arr.shape());
dset, nullptr, H5TypeMap<T>::type_id, H5S_ALL, indep, tensor.data());

close_dataset(dset);
}

template<typename T, std::size_t N>
inline void read_nd_vector(hid_t obj_id, const char* name,
xt::xtensor<T, N>& result, bool must_have = false)
template<typename T>
inline void read_nd_tensor(hid_t obj_id, const char* name,
tensor::Tensor<T>& result, bool must_have = false)
{
if (object_exists(obj_id, name)) {
read_dataset_as_shape(obj_id, name, result, true);
Expand Down Expand Up @@ -496,12 +452,16 @@ inline void write_dataset(
false, buffer.data());
}

// Template for xarray, xtensor, etc.
template<typename D>
inline void write_dataset(
hid_t obj_id, const char* name, const xt::xcontainer<D>& arr)
// Template for Tensor and StaticTensor2D. A SFINAE guard is used here to
// prevent this template from matching vector/string types that have their own
// overloads above. A generic Container parameter avoids duplicating the body
// for both Tensor<T> and StaticTensor2D<T,R,C>.
template<typename Container,
typename =
std::enable_if_t<tensor::is_tensor<std::decay_t<Container>>::value>>
inline void write_dataset(hid_t obj_id, const char* name, const Container& arr)
{
using T = typename D::value_type;
using T = typename std::decay_t<Container>::value_type;
auto s = arr.shape();
vector<hsize_t> dims {s.cbegin(), s.cend()};
write_dataset_lowlevel(obj_id, dims.size(), dims.data(), name,
Expand Down
4 changes: 2 additions & 2 deletions include/openmc/material.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
#include <unordered_map>

#include "openmc/span.h"
#include "openmc/tensor.h"
#include "pugixml.hpp"
#include "xtensor/xtensor.hpp"
#include <hdf5.h>

#include "openmc/bremsstrahlung.h"
Expand Down Expand Up @@ -189,7 +189,7 @@ class Material {
vector<int> nuclide_; //!< Indices in nuclides vector
vector<int> element_; //!< Indices in elements vector
NCrystalMat ncrystal_mat_; //!< NCrystal material object
xt::xtensor<double, 1> atom_density_; //!< Nuclide atom density in [atom/b-cm]
tensor::Tensor<double> atom_density_; //!< Nuclide atom density in [atom/b-cm]
double density_; //!< Total atom density in [atom/b-cm]
double density_gpcc_; //!< Total atom density in [g/cm^3]
double charge_density_; //!< Total charge density in [e/b-cm]
Expand Down
16 changes: 8 additions & 8 deletions include/openmc/mesh.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
#include <unordered_map>

#include "hdf5.h"
#include "openmc/tensor.h"
#include "pugixml.hpp"
#include "xtensor/xtensor.hpp"

#include "openmc/bounding_box.h"
#include "openmc/error.h"
Expand Down Expand Up @@ -284,8 +284,8 @@ class Mesh {
virtual Position upper_right() const = 0;

// Data members
xt::xtensor<double, 1> lower_left_; //!< Lower-left coordinates of mesh
xt::xtensor<double, 1> upper_right_; //!< Upper-right coordinates of mesh
tensor::Tensor<double> lower_left_; //!< Lower-left coordinates of mesh
tensor::Tensor<double> upper_right_; //!< Upper-right coordinates of mesh
int id_ {-1}; //!< Mesh ID
std::string name_; //!< User-specified name
int n_dimension_ {-1}; //!< Number of dimensions
Expand Down Expand Up @@ -348,7 +348,7 @@ class StructuredMesh : public Mesh {
//! \param[in] Pointer to bank sites
//! \param[in] Number of bank sites
//! \param[out] Whether any bank sites are outside the mesh
xt::xtensor<double, 1> count_sites(
tensor::Tensor<double> count_sites(
const SourceSite* bank, int64_t length, bool* outside) const;

//! Get bin given mesh indices
Expand Down Expand Up @@ -419,8 +419,8 @@ class StructuredMesh : public Mesh {
//! Get a label for the mesh bin
std::string bin_label(int bin) const override;

//! Get shape as xt::xtensor
xt::xtensor<int, 1> get_x_shape() const;
//! Get mesh dimensions as a tensor
tensor::Tensor<int> get_shape_tensor() const;

double volume(int bin) const override
{
Expand Down Expand Up @@ -515,7 +515,7 @@ class RegularMesh : public StructuredMesh {
//! \param[in] bank Array of bank sites
//! \param[out] Whether any bank sites are outside the mesh
//! \return Array indicating number of sites in each mesh/energy bin
xt::xtensor<double, 1> count_sites(
tensor::Tensor<double> count_sites(
const SourceSite* bank, int64_t length, bool* outside) const;

//! Return the volume for a given mesh index
Expand All @@ -526,7 +526,7 @@ class RegularMesh : public StructuredMesh {
// Data members
double volume_frac_; //!< Volume fraction of each mesh element
double element_volume_; //!< Volume of each mesh element
xt::xtensor<double, 1> width_; //!< Width of each mesh element
tensor::Tensor<double> width_; //!< Width of each mesh element
};

class RectilinearMesh : public StructuredMesh {
Expand Down
Loading
Loading