diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 00000000..18fdbbff
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,10 @@
+# This file allows setting automatically reviewers for pull requests.
+# Each line is a file pattern followed by one or more owners.
+# The last match takes precedence over previous ones.
+# Do not edit unless specifically mandated to do so.
+
+# Global/fallback and technical modifications.
+* @maarten-ic @prasad-sawantdesai @olivhoenen
+
+# Modifications to CODEOWNERS and action workflows
+.github/ @SimonPinches @olivhoenen
diff --git a/README.md b/README.md
index 35c1ae46..42d8d486 100644
--- a/README.md
+++ b/README.md
@@ -8,13 +8,20 @@ Data Model.
## Install
-Install steps are described in the documentation generated from `/docs/source/installing.rst`.
+Simply install IMAS-Python with ``pip``:
+```bash
+pip install imas-python
+```
+or with optional dependencies for netCDF and xarray support:
+```bash
+pip install imas-python[netcdf,xarray]
+```
-Documentation is autogenerated from the source using [Sphinx](http://sphinx-doc.org/)
+More details are described in the documentation generated from `/docs/source/installing.rst`.
+The documentation is autogenerated from the source using [Sphinx](http://sphinx-doc.org/)
and can be found at the [readthedocs](https://imas-python.readthedocs.io/en/latest/)
-The documentation can be manually generated by installing sphinx and running:
-
+To generate the documentation yourself, install the ``docs`` optional dependencies and do:
```bash
make -C docs html
```
diff --git a/ci/build_dd_zip.sh b/ci/build_dd_zip.sh
deleted file mode 100755
index 1b95bc4b..00000000
--- a/ci/build_dd_zip.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-# Bamboo CI script to build IDSDef.zip
-# Note: this script should be run from the root of the git repository
-
-# Debuggging:
-if [[ "$(uname -n)" == *"bamboo"* ]]; then
- set -e -o pipefail
-fi
-echo "Loading modules..."
-
-# Set up environment such that module files can be loaded
-source /etc/profile.d/modules.sh
-module purge
-# Modules are supplied as arguments in the CI job:
-if [ -z "$@" ]; then
- module load Python
-else
- module load $@
-fi
-
-# Debuggging:
-echo "Done loading modules"
-
-# Build the DD zip
-rm -rf venv # Environment should be clean, but remove directory to be sure
-python -m venv venv
-source venv/bin/activate
-pip install gitpython saxonche packaging
-python imas/dd_helpers.py
-deactivate
diff --git a/ci/build_docs_and_dist.sh b/ci/build_docs_and_dist.sh
deleted file mode 100755
index f0084b8a..00000000
--- a/ci/build_docs_and_dist.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/bash
-# Bamboo CI script to install imas Python module and run all tests
-# Note: this script should be run from the root of the git repository
-
-# Debuggging:
-if [[ "$(uname -n)" == *"bamboo"* ]]; then
- set -e -o pipefail
-fi
-echo "Loading modules:" $@
-
-# Set up environment such that module files can be loaded
-source /etc/profile.d/modules.sh
-module purge
-# Modules are supplied as arguments in the CI job:
-module load $@
-
-# Debuggging:
-echo "Done loading modules"
-
-# Set up the testing venv
-rm -rf venv # Environment should be clean, but remove directory to be sure
-python -m venv venv
-source venv/bin/activate
-
-# Create sdist and wheel
-pip install --upgrade pip setuptools wheel build
-rm -rf dist
-python -m build .
-
-# Install imas Python module and documentation dependencies from the just-built wheel
-pip install "`readlink -f dist/*.whl`[docs,netcdf]"
-
-# Debugging:
-pip freeze
-
-# Enable sphinx options:
-# - `-W`: turn warnings into errors
-# - `-n`: nit-picky mode, warn about all missing references
-# - `--keep-going`: with -W, keep going when getting warnings
-export SPHINXOPTS='-W -n --keep-going'
-
-# Run sphinx to create the documentation
-make -C docs clean html
-
-deactivate
diff --git a/ci/linting.sh b/ci/linting.sh
deleted file mode 100755
index d9164777..00000000
--- a/ci/linting.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-# Bamboo CI script for linting
-# Note: this script should be run from the root of the git repository
-
-# Debuggging:
-if [[ "$(uname -n)" == *"bamboo"* ]]; then
- set -e -o pipefail
-fi
-echo "Loading modules..."
-
-# Set up environment such that module files can be loaded
-source /etc/profile.d/modules.sh
-module purge
-# Modules are supplied as arguments in the CI job:
-if [ -z "$@" ]; then
- module load Python
-else
- module load $@
-fi
-
-# Debuggging:
-echo "Done loading modules"
-
-# Create a venv
-rm -rf venv
-python -m venv venv
-. venv/bin/activate
-
-# Install and run linters
-pip install --upgrade 'black >=24,<25' flake8
-
-black --check imas
-flake8 imas
-
-deactivate
\ No newline at end of file
diff --git a/ci/run_benchmark.sh b/ci/run_benchmark.sh
deleted file mode 100755
index ae24ce2d..00000000
--- a/ci/run_benchmark.sh
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/bin/bash
-# Bamboo CI script to install imas Python module and run all tests
-# Note: this script should be run from the root of the git repository
-
-# Debuggging:
-
-echo "Loading modules:" $@
-BENCHMARKS_DIR=$(realpath "$PWD/imas_benchmarks")
-if [[ "$(uname -n)" == *"bamboo"* ]]; then
- set -e -o pipefail
- # create
- BENCHMARKS_DIR=$(realpath "/mnt/bamboo_deploy/imas/benchmarks/")
-fi
-
-# Set up environment such that module files can be loaded
-source /etc/profile.d/modules.sh
-module purge
-# Modules are supplied as arguments in the CI job:
-# IMAS-AL-Python/5.2.1-intel-2023b-DD-3.41.0 Saxon-HE/12.4-Java-21
-if [ -z "$@" ]; then
- module load IMAS-AL-Core
-else
- module load $@
-fi
-
-
-
-# Debuggging:
-echo "Done loading modules"
-
-# Export current PYTHONPATH so ASV benchmarks can import imas
-export ASV_PYTHONPATH="$PYTHONPATH"
-
-# Set up the testing venv
-rm -rf venv # Environment should be clean, but remove directory to be sure
-python -m venv venv
-source venv/bin/activate
-
-# Install asv and imas
-pip install --upgrade pip setuptools wheel
-pip install virtualenv .[test]
-
-# Generate MDS+ models cache
-python -c 'import imas.backends.imas_core.mdsplus_model; print(imas.backends.imas_core.mdsplus_model.mdsplus_model_dir(imas.IDSFactory()))'
-
-# Copy previous results (if any)
-mkdir -p "$BENCHMARKS_DIR/results"
-mkdir -p .asv
-cp -rf "$BENCHMARKS_DIR/results" .asv/
-
-# Ensure numpy won't do multi-threading
-export OPENBLAS_NUM_THREADS=1
-export MKL_NUM_THREADS=1
-export OMP_NUM_THREADS=1
-
-# Ensure there is a machine configuration
-asv machine --yes
-
-# Run ASV for the current commit, develop and main
-asv run --skip-existing-successful HEAD^!
-asv run --skip-existing-successful develop^!
-asv run --skip-existing-successful main^!
-
-# Compare results
-if [ `git rev-parse --abbrev-ref HEAD` == develop ]
-then
- asv compare main develop --machine $(hostname) || echo "asv compare failed"
-else
- asv compare develop HEAD --machine $(hostname) || echo "asv compare failed"
-fi
-
-# Publish results
-asv publish
-
-# And persistently store them
-cp -rf .asv/{results,html} "$BENCHMARKS_DIR"
-
-
-
diff --git a/ci/run_pytest.sh b/ci/run_pytest.sh
deleted file mode 100755
index 4af184dc..00000000
--- a/ci/run_pytest.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-# Bamboo CI script to install imas Python module and run all tests
-# Note: this script should be run from the root of the git repository
-
-# Debuggging:
-if [[ "$(uname -n)" == *"bamboo"* ]]; then
- set -e -o pipefail
-fi
-echo "Loading modules:" $@
-
-# Set up environment such that module files can be loaded
-source /etc/profile.d/modules.sh
-module purge
-# Modules are supplied as arguments in the CI job:
-if [ -z "$@" ]; then
- module load IMAS-AL-Core Java MDSplus
-else
- module load $@
-fi
-
-# Debuggging:
-echo "Done loading modules"
-
-# Set up the testing venv
-rm -rf venv # Environment should be clean, but remove directory to be sure
-python -m venv venv
-source venv/bin/activate
-
-# Install imas and test dependencies
-pip install --upgrade pip setuptools wheel
-pip install .[h5py,netcdf,test]
-
-# Debugging:
-pip freeze
-
-# Run pytest
-# Clean artifacts created by pytest
-rm -f junit.xml
-rm -rf htmlcov
-
-# setups local directory to not to full /tmp directory with pytest temporary files
-# mkdir -p ~/tmp
-# export PYTEST_DEBUG_TEMPROOT=~/tmp
-python -m pytest -n=auto --cov=imas --cov-report=term-missing --cov-report=html --junit-xml=junit.xml
-
-
diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
index ae995b0e..f99e24d2 100644
--- a/docs/source/changelog.rst
+++ b/docs/source/changelog.rst
@@ -3,6 +3,21 @@
Changelog
=========
+What's new in IMAS-Python 2.0.1
+-------------------------------
+
+Improvements
+''''''''''''
+
+- improve DD3-->DD4 conversion (sign conversion to dodpsi_like)
+- improve conversion of pulse_schedule IDS >= 3.39.0
+- numpy 2 compatibility
+- improve UDA data fetch
+- improve documentation
+- new dependency on `imas-data-dictionaries package `__ (remove internal build via saxonche, except for the optional MDSplus models)
+- full compatibility of tests with netCDF<1.7 (no complex numbers)
+
+
What's new in IMAS-Python 2.0.0
-------------------------------
diff --git a/docs/source/courses/advanced/dd_versions.rst b/docs/source/courses/advanced/dd_versions.rst
index 3f7f19fa..ab87097e 100644
--- a/docs/source/courses/advanced/dd_versions.rst
+++ b/docs/source/courses/advanced/dd_versions.rst
@@ -60,7 +60,7 @@ Bundled Data Dictionary definitions
IMAS-Python comes bundled [#DDdefs]_ with many versions of the Data Dictionary definitions.
You can find out which versions are available by calling
-:py:meth:`imas.dd_zip.dd_xml_versions`.
+``imas.dd_zip.dd_xml_versions``.
Converting an IDS between Data Dictionary versions
@@ -290,6 +290,6 @@ build, you can use them like you normally would.
.. rubric:: Footnotes
-.. [#DDdefs] To be more precise, the Data Dictionary definitions are generated when the
- IMAS-Python package is created. See :ref:`this reference
` for more
- details.
+.. [#DDdefs] To be more precise, the Data Dictionary definitions are provided by the
+ `IMAS Data Dictionaries `__
+ package.
diff --git a/docs/source/courses/basic/analyze.rst b/docs/source/courses/basic/analyze.rst
index d1ae1434..21a7c68b 100644
--- a/docs/source/courses/basic/analyze.rst
+++ b/docs/source/courses/basic/analyze.rst
@@ -246,3 +246,20 @@ Exercise 5
A plot of :math:`T_e` vs :math:`t`.
.. seealso:: :ref:`Lazy loading`
+
+
+Explore the DBEntry and occurrences
+'''''''''''''''''''''''''''''''''''
+
+You may not know a priori which types of IDSs are available within an IMAS database entry.
+It can also happen that several IDSs objects of the same type are stored within
+this entry, in that case each IDS is stored as a separate `occurrence`
+(occurrences are identified with an integer value, 0 being the default).
+
+In IMAS-Python, the function :meth:`~imas.db_entry.DBEntry.list_all_occurrences()` will
+help you finding which occurrences are available in a given database entry and for a given
+IDS type.
+
+The following snippet shows how to list the available IDSs in a given database entry:
+
+.. literalinclude:: imas_snippets/explore_data_entry.py
diff --git a/docs/source/courses/basic/explore.rst b/docs/source/courses/basic/explore.rst
index e3395eda..348d9ab4 100644
--- a/docs/source/courses/basic/explore.rst
+++ b/docs/source/courses/basic/explore.rst
@@ -7,8 +7,8 @@ In this part of the training, we will learn how to use Python to explore data
saved in IDSs.
-Explore which IDSs are available
---------------------------------
+Explore which IDS structures are available
+------------------------------------------
Most codes will touch multiple IDSs inside a single IMAS data entry. For example
a heating code using a magnetic equilibrium from the ``equilibrium`` IDS with a
diff --git a/docs/source/courses/basic/imas_snippets/explore_data_entry.py b/docs/source/courses/basic/imas_snippets/explore_data_entry.py
new file mode 100644
index 00000000..2ec02698
--- /dev/null
+++ b/docs/source/courses/basic/imas_snippets/explore_data_entry.py
@@ -0,0 +1,11 @@
+import imas
+
+# Open input data entry
+entry = imas.DBEntry("imas:hdf5?path=<...>", "r")
+
+# Print the list of available IDSs with their occurrence
+for idsname in imas.IDSFactory().ids_names():
+ for occ in entry.list_all_occurrences(idsname):
+ print(idsname, occ)
+
+entry.close()
diff --git a/docs/source/imas_architecture.rst b/docs/source/imas_architecture.rst
index b1764bed..182d2a0c 100644
--- a/docs/source/imas_architecture.rst
+++ b/docs/source/imas_architecture.rst
@@ -72,11 +72,7 @@ Data Dictionary building and loading
The following submodules are responsible for building the Data Dictionary and loading DD
definitions at runtime.
-- :py:mod:`imas.dd_helpers` handles building the ``IDSDef.zip`` file, containing all
- versions of the Data Dictionary since ``3.22.0``.
-
- :py:mod:`imas.dd_zip` handles loading the Data Dictionary definitions at run time.
- These definitions can be loaded from an ``IDSDef.zip`` or from a custom XML file.
.. _imas_architecture/IDS_nodes:
diff --git a/docs/source/installing.rst b/docs/source/installing.rst
index 0f2129ca..800a42ff 100644
--- a/docs/source/installing.rst
+++ b/docs/source/installing.rst
@@ -13,6 +13,29 @@ To get started, you can install it from `pypi.org `__).
+ This migration constructs a common time base per subgroup, and interpolates
+ the dynamic quantities within the group to the new time base. Resampling
+ uses `previous neighbour` interpolation for integer quantities, and linear
+ interpolation otherwise. See also:
+ https://github.com/iterorganization/IMAS-Python/issues/21.
.. _`DD background`:
@@ -197,21 +207,14 @@ Automated tests have been provided that check the loading of all of the DD
versions tagged in the data-dictionary git repository.
-Extending the DD set
-''''''''''''''''''''
+Data Dictionary definitions
+'''''''''''''''''''''''''''
-Use the command ``python setup.py build_DD`` to build a new ``IDSDef.zip``. This
-fetches all tags from the data dictionary git repository and builds the ``IDSDef.zip``.
+The Data Dictionary definitions used by IMAS-Python are provided by the `IMAS Data
+Dictionaries `__ package.
+Please update this package if you need a more recent version of the data dictionary. For
+example, using ``pip``:
-IMAS-Python searches for an ``IDSDef.zip`` in the following locations:
+.. code-block:: bash
-1. The environment variable ``$IMAS_DDZIP`` (path to a zip file)
-2. The file ``./IDSDef.zip`` in the current working directory
-3. In the local configuration folder: ``~/.config/imas/IDSDef.zip``, or
- ``$XDG_CONFIG_DIR/imas/IDSDef.zip`` (if the environment variable
- ``$XDG_CONFIG_DIR`` is set)
-4. The zipfile bundled with the IMAS-Python installation: ``assets/IDSDef.zip``
-
-All paths are searched in order when loading the definitions of a specific data
-dictionary version: the first zip file that contains the definitions of the requested
-version is used.
+ pip install --upgrade imas-data-dictionaries
diff --git a/imas/__init__.py b/imas/__init__.py
index 0ed10404..58a66994 100644
--- a/imas/__init__.py
+++ b/imas/__init__.py
@@ -20,7 +20,6 @@
# Load the IMAS-Python IMAS AL/DD core
from . import (
db_entry,
- dd_helpers,
dd_zip,
util,
)
diff --git a/imas/backends/imas_core/al_context.py b/imas/backends/imas_core/al_context.py
index 3341121b..1685e384 100644
--- a/imas/backends/imas_core/al_context.py
+++ b/imas/backends/imas_core/al_context.py
@@ -1,7 +1,6 @@
# This file is part of IMAS-Python.
# You should have received the IMAS-Python LICENSE file with this project.
-"""Object-oriented interface to the IMAS lowlevel.
-"""
+"""Object-oriented interface to the IMAS lowlevel."""
import logging
import weakref
@@ -61,17 +60,21 @@ def __enter__(self) -> "ALContext":
def __exit__(self, exc_type, exc_value, traceback) -> None:
ll_interface.end_action(self.ctx)
- def global_action(self, path: str, rwmode: int) -> "ALContext":
+ def global_action(self, path: str, rwmode: int, datapath: str = "") -> "ALContext":
"""Begin a new global action for use in a ``with`` context.
Args:
path: access layer path for this global action: ``[/]``
rwmode: read-only or read-write operation mode: ``READ_OP``/``WRITE_OP``
+ datapath: used by UDA backend to fetch only part of the data.
Returns:
The created context.
"""
- status, ctx = ll_interface.begin_global_action(self.ctx, path, rwmode)
+ args = [self.ctx, path, rwmode]
+ if datapath: # AL4 compatibility: datapath arg was added in AL5
+ args.append(datapath)
+ status, ctx = ll_interface.begin_global_action(*args)
if status != 0:
raise LowlevelError("global_action", status)
return ALContext(ctx)
diff --git a/imas/backends/imas_core/db_entry_al.py b/imas/backends/imas_core/db_entry_al.py
index 52d82fe6..b3240ebd 100644
--- a/imas/backends/imas_core/db_entry_al.py
+++ b/imas/backends/imas_core/db_entry_al.py
@@ -257,7 +257,8 @@ def get(
if occurrence != 0:
ll_path += f"/{occurrence}"
- with self._db_ctx.global_action(ll_path, READ_OP) as read_ctx:
+ datapath = "ids_properties" if self.backend == "uda" else ""
+ with self._db_ctx.global_action(ll_path, READ_OP, datapath) as read_ctx:
time_mode_path = "ids_properties/homogeneous_time"
time_mode = read_ctx.read_data(time_mode_path, "", INTEGER_DATA, 0)
# This is already checked by read_dd_version, but ensure:
@@ -314,7 +315,8 @@ def read_dd_version(self, ids_name: str, occurrence: int) -> str:
if occurrence != 0:
ll_path += f"/{occurrence}"
- with self._db_ctx.global_action(ll_path, READ_OP) as read_ctx:
+ datapath = "ids_properties" if self.backend == "uda" else ""
+ with self._db_ctx.global_action(ll_path, READ_OP, datapath) as read_ctx:
time_mode_path = "ids_properties/homogeneous_time"
time_mode = read_ctx.read_data(time_mode_path, "", INTEGER_DATA, 0)
dd_version_path = "ids_properties/version_put/data_dictionary"
diff --git a/imas/backends/imas_core/mdsplus_model.py b/imas/backends/imas_core/mdsplus_model.py
index 48864346..3c91cefb 100644
--- a/imas/backends/imas_core/mdsplus_model.py
+++ b/imas/backends/imas_core/mdsplus_model.py
@@ -12,7 +12,6 @@
import time
import uuid
from pathlib import Path
-from saxonche import PySaxonProcessor
from subprocess import CalledProcessError, check_output
from zlib import crc32
@@ -244,11 +243,20 @@ def transform_with_xslt(xslt_processor, source, xslfile, output_file):
def create_model_ids_xml(cache_dir_path, fname, version):
"""Use Saxon/C to compile an ids.xml suitable for creating an MDSplus model."""
+ try:
+ import saxonche
+ except ImportError:
+ raise RuntimeError(
+ "Building mdsplus models requires the 'saxonche' python package. "
+ "Please install this package (for example with 'pip install saxonche') "
+ "and try again."
+ )
+
try:
with as_file(files("imas") / "assets" / "IDSDef2MDSpreTree.xsl") as xslfile:
output_file = Path(cache_dir_path) / "ids.xml"
- with PySaxonProcessor(license=False) as proc:
+ with saxonche.PySaxonProcessor(license=False) as proc:
xslt_processor = proc.new_xslt30_processor()
xdm_ddgit = proc.make_string_value(str(version) or fname)
xslt_processor.set_parameter("DD_GIT_DESCRIBE", xdm_ddgit)
diff --git a/imas/dd_helpers.py b/imas/dd_helpers.py
deleted file mode 100644
index 446a9991..00000000
--- a/imas/dd_helpers.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# This file is part of IMAS-Python.
-# You should have received the IMAS-Python LICENSE file with this project.
-"""Helper functions to build IDSDef.xml"""
-
-import logging
-import os
-import shutil
-from pathlib import Path
-from typing import Tuple
-from zipfile import ZIP_DEFLATED, ZipFile
-
-from packaging.version import Version as V
-from saxonche import PySaxonProcessor
-
-logger = logging.getLogger(__name__)
-
-_idsdef_zip_relpath = Path("imas/assets/IDSDef.zip")
-_build_dir = Path("build")
-
-
-def prepare_data_dictionaries():
- """Build IMAS IDSDef.xml files for each tagged version in the DD repository
- 1. Use saxonche for transformations
- 2. Clone the DD repository (ask for user/pass unless ssh key access is available)
- 3. Generate IDSDef.xml and rename to IDSDef_${version}.xml
- 4. Zip all these IDSDefs together and include in wheel
- """
- from git import Repo
-
- repo: Repo = get_data_dictionary_repo()
- if repo:
- newest_version_and_tag = (V("0"), None)
- for tag in repo.tags:
- version_and_tag = (V(str(tag)), tag)
- if V(str(tag)) > V("3.21.1"):
- newest_version_and_tag = max(newest_version_and_tag, version_and_tag)
- logger.debug("Building data dictionary version %s", tag)
- build_data_dictionary(repo, tag)
-
- logger.info("Creating zip file of DD versions")
-
- if _idsdef_zip_relpath.is_file():
- logger.warning("Overwriting '%s'", _idsdef_zip_relpath)
-
- with ZipFile(
- _idsdef_zip_relpath,
- mode="w", # this needs w, since zip can have multiple same entries
- compression=ZIP_DEFLATED,
- ) as dd_zip:
- for filename in _build_dir.glob("[0-9]*.xml"):
- arcname = Path("data-dictionary").joinpath(*filename.parts[1:])
- dd_zip.write(filename, arcname=arcname)
- # Include identifiers from latest tag in zip file
- repo.git.checkout(newest_version_and_tag[1], force=True)
- # DD layout <= 4.0.0
- for filename in Path("data-dictionary").glob("*/*identifier.xml"):
- arcname = Path("identifiers").joinpath(*filename.parts[1:])
- dd_zip.write(filename, arcname=arcname)
- # DD layout > 4.0.0
- for filename in Path("data-dictionary").glob("schemas/*/*identifier.xml"):
- arcname = Path("identifiers").joinpath(*filename.parts[2:])
- dd_zip.write(filename, arcname=arcname)
-
-
-def get_data_dictionary_repo() -> Tuple[bool, bool]:
- try:
- import git # Import git here, the user might not have it!
- except ModuleNotFoundError:
- raise RuntimeError(
- "Could not find 'git' module, try 'pip install gitpython'. \
- Will not build Data Dictionaries!"
- )
-
- # We need the actual source code (for now) so grab it from ITER
- dd_repo_path = "data-dictionary"
-
- if "DD_DIRECTORY" in os.environ:
- logger.info("Found DD_DIRECTORY, copying")
- try:
- shutil.copytree(os.environ["DD_DIRECTORY"], dd_repo_path)
- except FileExistsError:
- pass
- else:
- logger.info("Trying to pull data dictionary git repo from ITER")
-
- # Set up a bare repo and fetch the data-dictionary repository in it
- os.makedirs(dd_repo_path, exist_ok=True)
- try:
- repo = git.Repo(dd_repo_path)
- except git.exc.InvalidGitRepositoryError:
- repo = git.Repo.init(dd_repo_path)
- logger.info("Set up local git repository {!s}".format(repo))
-
- try:
- origin = repo.remote()
- except ValueError:
- dd_repo_url = "https://github.com/iterorganization/imas-data-dictionary.git"
- origin = repo.create_remote("origin", url=dd_repo_url)
- logger.info("Set up remote '{!s}' linking to '{!s}'".format(origin, origin.url))
-
- try:
- origin.fetch(tags=True)
- except git.exc.GitCommandError as ee:
- logger.warning(
- "Could not fetch tags from %s. Git reports:\n %s." "\nTrying to continue",
- list(origin.urls),
- ee,
- )
- else:
- logger.info("Remote tags fetched")
- return repo
-
-
-def _run_xsl_transformation(
- xsd_file: Path, xsl_file: Path, tag: str, output_file: Path
-) -> None:
- """
- This function performs an XSL transformation using Saxon-HE (saxonche)
- with the provided XSD file, XSL file, tag, and output file.
-
- Args:
- xsd_file (Path): XML Schema Definition (XSD) file
- xsl_file (Path): The `xsl_file` parameter
- tag (str): tag name to provide to 'DD_GIT_DESCRIBE' parameter
- output_file (Path): The `output_file` parameter for resulting xml
- """
- with PySaxonProcessor(license=False) as proc:
- logger.debug("Initializing Saxon Processor")
- xsltproc = proc.new_xslt30_processor()
- xdm_ddgit = proc.make_string_value(tag)
- xsltproc.set_parameter("DD_GIT_DESCRIBE", xdm_ddgit)
- xsltproc.transform_to_file(
- source_file=str(xsd_file),
- stylesheet_file=str(xsl_file),
- output_file=str(output_file),
- )
-
-
-def build_data_dictionary(repo, tag: str, rebuild=False) -> None:
- """Build a single version of the data dictionary given by the tag argument
- if the IDS does not already exist.
-
- In the data-dictionary repository sometimes IDSDef.xml is stored
- directly, in which case we do not call make.
-
- Args:
- repo: Repository object containing the DD source code
- tag: The DD version tag that will be build
- rebuild: If true, overwrites existing pre-build tagged DD version
- """
- _build_dir.mkdir(exist_ok=True)
- result_xml = _build_dir / f"{tag}.xml"
-
- if result_xml.exists() and not rebuild:
- logger.debug(f"XML for tag '{tag}' already exists, skipping")
- return
-
- repo.git.checkout(tag, force=True)
-
- # Perform the XSL transformation with saxonche
- dd_xsd = Path("data-dictionary/dd_data_dictionary.xml.xsd")
- dd_xsl = Path("data-dictionary/dd_data_dictionary.xml.xsl")
- _run_xsl_transformation(dd_xsd, dd_xsl, tag.name, result_xml)
-
-
-if __name__ == "__main__":
- prepare_data_dictionaries()
diff --git a/imas/dd_zip.py b/imas/dd_zip.py
index 2d62224a..e4cce369 100644
--- a/imas/dd_zip.py
+++ b/imas/dd_zip.py
@@ -1,103 +1,27 @@
# This file is part of IMAS-Python.
# You should have received the IMAS-Python LICENSE file with this project.
-""" Extract DD versions from a zip file.
+"""Extract DD versions from the imas-data-dictionaries distribution."""
-The zip file contains files as
-* `data-dictionary/3.30.0.xml`
-* `data-dictionary/3.29.0.xml`
-
-multiple paths are checked. See `ZIPFILE_LOCATIONS`.
-First the environment variable IMAS_DDZIP is checked.
-If that exists and points to a file we will attempt to open it.
-Then, IDSDef.zip is searched in site-packages, the current folder,
-in .config/imas/ (`$$XDG_CONFIG_HOME`) and in
-the assets/ folder within the IMAS-Python package.
-
-1. `$$IMAS_DDZIP`
-2. The virtual environment
-3. USER_BASE`imas/IDSDef.zip`
-4. All `site-packages/imas/IDSDef.zip`
-5. `./IDSDef.zip`
-6. `~/.config/imas/IDSDef.zip`
-7. `__file__/../../imas/assets/IDSDef.zip`
-
-All files are checked, i.e. if your .config/imas/IDSDef.zip is outdated
-the IMAS-Python-packaged version will be used.
-
-The `assets/IDSDef.zip` provided with the package can be updated
-with the `python setup.py build_DD` command, which is also performed on install
-if you have access to the ITER data-dictionary git repo.
-Reinstalling imas thus also will give you access to the latest DD versions.
-"""
import logging
import os
-import re
import xml.etree.ElementTree as ET
-from contextlib import contextmanager, nullcontext
from functools import lru_cache
from pathlib import Path
-from typing import Dict, Iterator, List, Tuple, Union
-from zipfile import ZipFile
-
-try:
- from importlib.resources import as_file, files
-
- try:
- from importlib.resources.abc import Traversable
- except ModuleNotFoundError: # Python 3.9/3.10 support
- from importlib.abc import Traversable
-
-except ImportError: # Python 3.8 support
- from importlib_resources import as_file, files
- from importlib_resources.abc import Traversable
-from packaging.version import InvalidVersion, Version
+# These methods in imas_data_dictionaries used to be defined here. We import them here
+# for backwards compatibility:
+from imas_data_dictionaries import dd_identifiers # noqa: F401
+from imas_data_dictionaries import get_dd_xml_crc # noqa: F401
+from imas_data_dictionaries import get_identifier_xml # noqa: F401
+from imas_data_dictionaries import dd_xml_versions, get_dd_xml, parse_dd_version
+from packaging.version import InvalidVersion
import imas
-from imas.exception import UnknownDDVersion
+from imas.exception import UnknownDDVersion # noqa: F401
logger = logging.getLogger(__name__)
-def _get_xdg_config_dir():
- """
- Return the XDG config directory, according to the XDG base directory spec:
-
- https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
- """
- return os.environ.get("XDG_CONFIG_HOME") or str(Path.home() / ".config")
-
-
-def _generate_zipfile_locations() -> Iterator[Union[Path, Traversable]]:
- """Build a list of potential data dictionary locations.
- We start with the path (if any) of the IMAS_DDZIP env var.
- Then we look for IDSDef.zip in the current folder, in the
- default XDG config dir (~/.config/imas/IDSDef.zip) and
- finally in the assets distributed with this package.
- """
- zip_name = "IDSDef.zip"
-
- environ = os.environ.get("IMAS_DDZIP")
- if environ:
- yield Path(environ).resolve()
-
- yield Path(zip_name).resolve()
- yield Path(_get_xdg_config_dir()).resolve() / "imas" / zip_name
- yield files(imas) / "assets" / zip_name
-
-
-def parse_dd_version(version: str) -> Version:
- try:
- return Version(version)
- except InvalidVersion:
- # This is probably a dev build of the DD, of which the version is obtained with
- # `git describe` in the format X.Y.Z--g with X.Y.Z the previous
- # released version: try again after converting the first dash to a + and treat
- # it like a `local` version specifier, which is recognized as newer.
- # https://packaging.python.org/en/latest/specifications/version-specifiers/
- return Version(version.replace("-", "+", 1))
-
-
# Expected use case is one, maximum two DD versions
# Cache is bigger than that: in pytest we currently use the following DD versions:
# - 3.22.0
@@ -112,7 +36,6 @@ def parse_dd_version(version: str) -> Version:
# - IDS_minimal_struct_array.xml
# - IDS_minimal_types.xml
_DD_CACHE_SIZE = 8
-ZIPFILE_LOCATIONS = list(_generate_zipfile_locations())
def dd_etree(version=None, xml_path=None):
@@ -168,117 +91,6 @@ def _load_etree(version, xml_path):
return tree
-@contextmanager
-def _open_zipfile(path: Union[Path, Traversable]) -> Iterator[ZipFile]:
- """Open a zipfile, given a Path or Traversable."""
- if isinstance(path, Path):
- ctx = nullcontext(path)
- else:
- ctx = as_file(path)
- with ctx as file:
- with ZipFile(file) as zipfile:
- yield zipfile
-
-
-@lru_cache
-def _read_dd_versions() -> Dict[str, Tuple[Union[Path, Traversable], str]]:
- """Traverse all possible DD zip files and return a map of known versions.
-
- Returns:
- version_map: version -> (zipfile path, filename)
- """
- versions = {}
- xml_re = re.compile(r"^data-dictionary/([0-9.]+)\.xml$")
- for path in ZIPFILE_LOCATIONS:
- if not path.is_file():
- continue
- with _open_zipfile(path) as zipfile:
- for fname in zipfile.namelist():
- match = xml_re.match(fname)
- if match:
- version = match.group(1)
- if version not in versions:
- versions[version] = (path, fname)
- if not versions:
- raise RuntimeError(
- "Could not find any data dictionary definitions. "
- f"Looked in: {', '.join(map(repr, ZIPFILE_LOCATIONS))}."
- )
- return versions
-
-
-@lru_cache
-def _read_identifiers() -> Dict[str, Tuple[Union[Path, Traversable], str]]:
- """Traverse all possible DD zip files and return a map of known identifiers.
-
- Returns:
- identifier_map: identifier -> (zipfile path, filename)
- """
- identifiers = {}
- xml_re = re.compile(r"^identifiers/\w+/(\w+_identifier).xml$")
- for path in ZIPFILE_LOCATIONS:
- if not path.is_file():
- continue
- with _open_zipfile(path) as zipfile:
- for fname in zipfile.namelist():
- match = xml_re.match(fname)
- if match:
- identifier_name = match.group(1)
- if identifier_name not in identifiers:
- identifiers[identifier_name] = (path, fname)
- return identifiers
-
-
-@lru_cache
-def dd_xml_versions() -> List[str]:
- """Parse IDSDef.zip to find version numbers available"""
-
- def sort_key(version):
- try:
- return parse_dd_version(version)
- except InvalidVersion:
- # Don't fail when a malformatted version is present in the DD zip
- logger.error(
- f"Could not convert DD XML version {version} to a Version.", exc_info=1
- )
- return Version(0)
-
- return sorted(_read_dd_versions(), key=sort_key)
-
-
-@lru_cache
-def dd_identifiers() -> List[str]:
- """Parse IDSDef.zip to find available identifiers"""
-
- return sorted(_read_identifiers())
-
-
-def get_dd_xml(version):
- """Read XML file for the given data dictionary version."""
- dd_versions = dd_xml_versions()
- if version not in dd_versions:
- raise UnknownDDVersion(version, dd_versions)
- path, fname = _read_dd_versions()[version]
- with _open_zipfile(path) as zipfile:
- return zipfile.read(fname)
-
-
-def get_dd_xml_crc(version):
- """Given a version string, return its CRC checksum"""
- # Note, by this time get_dd_xml is already called, so we don't need to check if the
- # version is known
- path, fname = _read_dd_versions()[version]
- with _open_zipfile(path) as zipfile:
- return zipfile.getinfo(fname).CRC
-
-
-def get_identifier_xml(identifier_name):
- """Get identifier XML for the given identifier name"""
- path, fname = _read_identifiers()[identifier_name]
- with _open_zipfile(path) as zipfile:
- return zipfile.read(fname)
-
-
def print_supported_version_warning(version):
try:
if parse_dd_version(version) < imas.OLDEST_SUPPORTED_VERSION:
diff --git a/imas/exception.py b/imas/exception.py
index 513c2caa..737680c2 100644
--- a/imas/exception.py
+++ b/imas/exception.py
@@ -1,11 +1,14 @@
# This file is part of IMAS-Python.
# You should have received the IMAS-Python LICENSE file with this project.
-"""Exception classes used in IMAS-Python.
-"""
+"""Exception classes used in IMAS-Python."""
import difflib
import logging
-from typing import TYPE_CHECKING, List
+from typing import TYPE_CHECKING
+
+# This exception from imas_data_dictionaries used to be defined here. We import it here
+# for backwards compatibility:
+from imas_data_dictionaries import UnknownDDVersion # noqa: F401
from imas.backends.imas_core import imas_interface as _imas_interface
@@ -23,20 +26,6 @@
ALException = None
-class UnknownDDVersion(ValueError):
- """Error raised when an unknown DD version is specified."""
-
- def __init__(self, version: str, available: List[str], note: str = "") -> None:
- close_matches = difflib.get_close_matches(version, available, n=1)
- if close_matches:
- suggestions = f"Did you mean {close_matches[0]!r}?"
- else:
- suggestions = f"Available versions are {', '.join(reversed(available))}"
- super().__init__(
- f"Data dictionary version {version!r} cannot be found. {suggestions}{note}"
- )
-
-
class IDSNameError(ValueError):
"""Error raised by DBEntry.get(_slice) when providing an invalid IDS name."""
diff --git a/imas/ids_convert.py b/imas/ids_convert.py
index a52db521..c4e752e0 100644
--- a/imas/ids_convert.py
+++ b/imas/ids_convert.py
@@ -1,36 +1,35 @@
# This file is part of IMAS-Python.
# You should have received the IMAS-Python LICENSE file with this project.
-"""Functionality for converting IDSToplevels between DD versions.
-"""
+"""Functionality for converting IDSToplevels between DD versions."""
import copy
import datetime
import logging
-from functools import lru_cache
+from functools import lru_cache, partial
from pathlib import Path
from typing import Callable, Dict, Iterator, Optional, Set, Tuple
from xml.etree.ElementTree import Element, ElementTree
import numpy
from packaging.version import InvalidVersion, Version
+from scipy.interpolate import interp1d
import imas
from imas.dd_zip import parse_dd_version
from imas.ids_base import IDSBase
from imas.ids_data_type import IDSDataType
+from imas.ids_defs import IDS_TIME_MODE_HETEROGENEOUS
from imas.ids_factory import IDSFactory
from imas.ids_path import IDSPath
-from imas.ids_primitive import (
- IDSNumeric0D,
- IDSNumericArray,
- IDSPrimitive,
- IDSString0D,
-)
+from imas.ids_primitive import IDSNumeric0D, IDSNumericArray, IDSPrimitive, IDSString0D
from imas.ids_struct_array import IDSStructArray
from imas.ids_structure import IDSStructure
from imas.ids_toplevel import IDSToplevel
logger = logging.getLogger(__name__)
+# Store for which paths we already emitted a warning that the target could not be found
+# to prevent polluting the output with lots of repeated items.
+_missing_paths_warning = set()
def iter_parents(path: str) -> Iterator[str]:
@@ -334,12 +333,13 @@ def add_rename(old_path: str, new_path: str):
# Additional conversion rules for DDv3 to DDv4
if self.version_old.major == 3 and new_version and new_version.major == 4:
# Postprocessing for COCOS definition change:
- xpath_query = ".//field[@cocos_label_transformation='psi_like']"
- for old_item in old.iterfind(xpath_query):
- old_path = old_item.get("path")
- new_path = self.old_to_new.path.get(old_path, old_path)
- self.new_to_old.post_process[new_path] = _cocos_change
- self.old_to_new.post_process[old_path] = _cocos_change
+ for psi_like in ["psi_like", "dodpsi_like"]:
+ xpath_query = f".//field[@cocos_label_transformation='{psi_like}']"
+ for old_item in old.iterfind(xpath_query):
+ old_path = old_item.get("path")
+ new_path = self.old_to_new.path.get(old_path, old_path)
+ self.new_to_old.post_process[new_path] = _cocos_change
+ self.old_to_new.post_process[old_path] = _cocos_change
# Definition change for pf_active circuit/connections
if self.ids_name == "pf_active":
path = "circuit/connections"
@@ -474,32 +474,48 @@ def convert_ids(
raise RuntimeError(
f"There is no IDS with name {ids_name} in DD version {version}."
)
- target_ids = factory.new(ids_name)
- else:
- target_ids = target
+ target = factory.new(ids_name)
source_version = parse_dd_version(toplevel._version)
- target_version = parse_dd_version(target_ids._version)
+ target_version = parse_dd_version(target._version)
logger.info(
"Starting conversion of IDS %s from version %s to version %s.",
ids_name,
source_version,
target_version,
)
+ global _missing_paths_warning
+ _missing_paths_warning = set() # clear for which paths we emitted a warning
- source_is_new = source_version > target_version
source_tree = toplevel._parent._etree
- target_tree = target_ids._parent._etree
- if source_is_new:
+ target_tree = target._parent._etree
+ if source_version > target_version:
version_map = _DDVersionMap(ids_name, target_tree, source_tree, target_version)
+ rename_map = version_map.new_to_old
else:
version_map = _DDVersionMap(ids_name, source_tree, target_tree, source_version)
+ rename_map = version_map.old_to_new
+
+ # Special case for DD3to4 pulse_schedule conversion
+ if (
+ toplevel.metadata.name == "pulse_schedule"
+ and toplevel.ids_properties.homogeneous_time == IDS_TIME_MODE_HETEROGENEOUS
+ and source_version < Version("3.40.0")
+ and target_version >= Version("3.40.0")
+ ):
+ try:
+ # Suppress "'.../time' does not exist in the target IDS." log messages.
+ logger.addFilter(_pulse_schedule_3to4_logfilter)
+ _pulse_schedule_3to4(toplevel, target, deepcopy, rename_map)
+ finally:
+ logger.removeFilter(_pulse_schedule_3to4_logfilter)
+ else:
+ _copy_structure(toplevel, target, deepcopy, rename_map)
- _copy_structure(toplevel, target_ids, deepcopy, source_is_new, version_map)
logger.info("Conversion of IDS %s finished.", ids_name)
if provenance_origin_uri:
- _add_provenance_entry(target_ids, toplevel._version, provenance_origin_uri)
- return target_ids
+ _add_provenance_entry(target, toplevel._version, provenance_origin_uri)
+ return target
def _add_provenance_entry(
@@ -541,12 +557,50 @@ def _add_provenance_entry(
node.sources.append(source_txt) # sources is a STR_1D (=list of strings)
+def _get_target_item(
+ item: IDSBase, target: IDSStructure, rename_map: NBCPathMap
+) -> Optional[IDSBase]:
+ """Find and return the corresponding target item if it exists.
+
+ This method follows NBC renames (as stored in the rename map). It returns None if
+ there is no corresponding target item in the target structure.
+ """
+ path = item.metadata.path_string
+
+ # Follow NBC renames:
+ if path in rename_map:
+ if rename_map.path[path] is None:
+ if path not in rename_map.ignore_missing_paths:
+ # Only warn the first time that we encounter this path:
+ if path not in _missing_paths_warning:
+ if path in rename_map.type_change:
+ msg = "Element %r changed type in the target IDS."
+ else:
+ msg = "Element %r does not exist in the target IDS."
+ logger.warning(msg + " Data is not copied.", path)
+ _missing_paths_warning.add(path)
+ return None
+ else:
+ return IDSPath(rename_map.path[path]).goto(target)
+
+ # No NBC renames:
+ try:
+ return target[item.metadata.name]
+ except AttributeError:
+ # In exceptional cases the item does not exist in the target. Example:
+ # neutron_diagnostic IDS between DD 3.40.1 and 3.41.0. has renamed
+ # synthetic_signals/fusion_power -> fusion_power. The synthetic_signals
+ # structure no longer exists but we need to descend into it to get the
+ # total_neutron_flux.
+ return target
+
+
def _copy_structure(
source: IDSStructure,
target: IDSStructure,
deepcopy: bool,
- source_is_new: bool,
- version_map: DDVersionMap,
+ rename_map: NBCPathMap,
+ callback: Optional[Callable] = None,
):
"""Recursively copy data, following NBC renames.
@@ -557,31 +611,14 @@ def _copy_structure(
source_is_new: True iff the DD version of the source is newer than that of the
target.
version_map: Version map containing NBC renames.
+ callback: Optional callback that is called for every copied node.
"""
- rename_map = version_map.new_to_old if source_is_new else version_map.old_to_new
for item in source.iter_nonempty_():
path = item.metadata.path_string
- if path in rename_map:
- if rename_map.path[path] is None:
- if path not in rename_map.ignore_missing_paths:
- if path in rename_map.type_change:
- msg = "Element %r changed type in the target IDS."
- else:
- msg = "Element %r does not exist in the target IDS."
- logger.warning(msg + " Data is not copied.", path)
- continue
- else:
- target_item = IDSPath(rename_map.path[path]).goto(target)
- else:
- try:
- target_item = target[item.metadata.name]
- except AttributeError:
- # In exceptional cases the item does not exist in the target. Example:
- # neutron_diagnostic IDS between DD 3.40.1 and 3.41.0. has renamed
- # synthetic_signals/fusion_power -> fusion_power. The synthetic_signals
- # structure no longer exists but we need to descend into it to get the
- # total_neutron_flux.
- target_item = target
+ target_item = _get_target_item(item, target, rename_map)
+ if target_item is None:
+ continue
+
if path in rename_map.type_change:
# Handle type change
new_items = rename_map.type_change[path](item, target_item)
@@ -594,21 +631,17 @@ def _copy_structure(
size = len(item)
target_item.resize(size)
for i in range(size):
- _copy_structure(
- item[i], target_item[i], deepcopy, source_is_new, version_map
- )
+ _copy_structure(item[i], target_item[i], deepcopy, rename_map, callback)
elif isinstance(item, IDSStructure):
- _copy_structure(item, target_item, deepcopy, source_is_new, version_map)
+ _copy_structure(item, target_item, deepcopy, rename_map, callback)
else:
- if deepcopy:
- # No nested types are used as data, so a shallow copy is sufficient
- target_item.value = copy.copy(item.value)
- else:
- target_item.value = item.value
+ target_item.value = copy.copy(item.value) if deepcopy else item.value
# Post-process the node:
if path in rename_map.post_process:
rename_map.post_process[path](target_item)
+ if callback is not None:
+ callback(item, target_item)
########################################################################################
@@ -919,3 +952,82 @@ def _ids_properties_source(source: IDSString0D, provenance: IDSStructure) -> Non
provenance.node.resize(1)
provenance.node[0].reference.resize(1)
provenance.node[0].reference[0].name = source.value
+
+
+def _pulse_schedule_3to4(
+ source: IDSStructure,
+ target: IDSStructure,
+ deepcopy: bool,
+ rename_map: NBCPathMap,
+):
+ """Recursively copy data, following NBC renames, and converting time bases for the
+ pulse_schedule IDS.
+
+ Args:
+ source: Source structure.
+ target: Target structure.
+ deepcopy: See :func:`convert_ids`.
+ rename_map: Map containing NBC renames.
+ """
+ # All prerequisites are checked before calling this function:
+ # - source and target are pulse_schedule IDSs
+ # - source has DD version < 3.40.0
+ # - target has DD version >= 4.0.0, < 5.0
+ # - IDS is using heterogeneous time
+
+ for item in source.iter_nonempty_():
+ name = item.metadata.name
+ target_item = _get_target_item(item, target, rename_map)
+ if target_item is None:
+ continue
+
+ # Special cases for non-dynamic stuff
+ if name in ["ids_properties", "code"]:
+ _copy_structure(item, target_item, deepcopy, rename_map)
+ elif name == "time":
+ target_item.value = item.value if not deepcopy else copy.copy(item.value)
+ elif name == "event":
+ size = len(item)
+ target_item.resize(size)
+ for i in range(size):
+ _copy_structure(item[i], target_item[i], deepcopy, rename_map)
+ else:
+ # Find all time bases
+ time_bases = [
+ node.value
+ for node in imas.util.tree_iter(item)
+ if node.metadata.name == "time"
+ ]
+ # Construct the common time base
+ timebase = numpy.unique(numpy.concatenate(time_bases)) if time_bases else []
+ target_item.time = timebase
+ # Do the conversion
+ callback = partial(_pulse_schedule_resample_callback, timebase)
+ _copy_structure(item, target_item, deepcopy, rename_map, callback)
+
+
+def _pulse_schedule_3to4_logfilter(logrecord: logging.LogRecord) -> bool:
+ """Suppress "'.../time' does not exist in the target IDS." log messages."""
+ return not (logrecord.args and str(logrecord.args[0]).endswith("/time"))
+
+
+def _pulse_schedule_resample_callback(timebase, item: IDSBase, target_item: IDSBase):
+ """Callback from _copy_structure to resample dynamic data on the new timebase"""
+ if item.metadata.ndim == 1 and item.metadata.coordinates[0].is_time_coordinate:
+ # Interpolate 1D dynamic quantities to the common time base
+ time = item.coordinates[0]
+ if len(item) != len(time):
+ raise ValueError(
+ f"Array {item} has a different size than its time base {time}."
+ )
+ is_integer = item.metadata.data_type is IDSDataType.INT
+ value = interp1d(
+ time.value,
+ item.value,
+ "previous" if is_integer else "linear",
+ copy=False,
+ bounds_error=False,
+ fill_value=(item[0], item[-1]),
+ assume_sorted=True,
+ )(timebase)
+ target_item.value = value.astype(numpy.int32) if is_integer else value
diff --git a/imas/test/test_dd_helpers.py b/imas/test/test_dd_helpers.py
deleted file mode 100644
index 07d1d2b0..00000000
--- a/imas/test/test_dd_helpers.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from pathlib import Path
-import shutil
-import pytest
-import os
-import zipfile
-
-from imas.dd_helpers import prepare_data_dictionaries, _idsdef_zip_relpath, _build_dir
-
-_idsdef_unzipped_relpath = Path("idsdef_unzipped")
-
-
-@pytest.mark.skip(reason="skipping IDSDef.zip generation")
-def test_prepare_data_dictionaries():
- prepare_data_dictionaries()
- assert os.path.exists(
- _idsdef_zip_relpath
- ), f"IDSDef.zip file does not exist at path: {_idsdef_zip_relpath}"
-
- expected_xml_files = [
- _build_dir / "3.40.0.xml",
- _build_dir / "3.41.0.xml",
- _build_dir / "3.42.0.xml",
- _build_dir / "4.0.0.xml",
- ]
-
- for xml_file in expected_xml_files:
- assert os.path.exists(xml_file), f"{xml_file} does not exist"
-
- with zipfile.ZipFile(_idsdef_zip_relpath, "r") as zip_ref:
- zip_ref.extractall(_idsdef_unzipped_relpath)
-
- expected_ids_directories = [
- _idsdef_unzipped_relpath / "data-dictionary" / "3.40.0.xml",
- _idsdef_unzipped_relpath / "data-dictionary" / "3.41.0.xml",
- _idsdef_unzipped_relpath / "data-dictionary" / "3.42.0.xml",
- _idsdef_unzipped_relpath / "data-dictionary" / "4.0.0.xml",
- _idsdef_unzipped_relpath
- / "identifiers"
- / "core_sources"
- / "core_source_identifier.xml",
- _idsdef_unzipped_relpath
- / "identifiers"
- / "equilibrium"
- / "equilibrium_profiles_2d_identifier.xml",
- ]
-
- for file_path in expected_ids_directories:
- assert os.path.exists(
- file_path
- ), f"Expected_ids_directories {file_path} does not exist"
-
- if _build_dir.exists():
- shutil.rmtree(_idsdef_unzipped_relpath)
diff --git a/imas/test/test_ids_convert.py b/imas/test/test_ids_convert.py
index 750c44e4..f2b9b7f7 100644
--- a/imas/test/test_ids_convert.py
+++ b/imas/test/test_ids_convert.py
@@ -7,6 +7,7 @@
from unittest.mock import MagicMock
import numpy
+from numpy import array_equal
import pytest
from imas import identifiers
@@ -27,7 +28,7 @@
from imas.ids_factory import IDSFactory
from imas.ids_struct_array import IDSStructArray
from imas.ids_structure import IDSStructure
-from imas.test.test_helpers import compare_children, open_dbentry
+from imas.test.test_helpers import compare_children, fill_consistent, open_dbentry
UTC = timezone.utc
@@ -287,22 +288,22 @@ def test_3to4_repeat_children_first_point_conditional(dd4factory):
for i in range(2):
outline_inner = wall4.description_2d[0].vessel.unit[i].annular.outline_inner
if i == 0: # open outline, first point not repeated:
- assert numpy.array_equal(outline_inner.r, [1.0, 2.0, 3.0])
- assert numpy.array_equal(outline_inner.z, [-1.0, -2.0, -3.0])
+ assert array_equal(outline_inner.r, [1.0, 2.0, 3.0])
+ assert array_equal(outline_inner.z, [-1.0, -2.0, -3.0])
else: # closed outline, first point repeated:
- assert numpy.array_equal(outline_inner.r, [1.0, 2.0, 3.0, 1.0])
- assert numpy.array_equal(outline_inner.z, [-1.0, -2.0, -3.0, -1.0])
+ assert array_equal(outline_inner.r, [1.0, 2.0, 3.0, 1.0])
+ assert array_equal(outline_inner.z, [-1.0, -2.0, -3.0, -1.0])
# Test conversion for case 2:
assert len(wall4.description_2d[0].limiter.unit) == 2
for i in range(2):
unit = wall4.description_2d[0].limiter.unit[i]
if i == 0: # open outline, first point not repeated:
- assert numpy.array_equal(unit.outline.r, [1.0, 2.0, 3.0])
- assert numpy.array_equal(unit.outline.z, [-1.0, -2.0, -3.0])
+ assert array_equal(unit.outline.r, [1.0, 2.0, 3.0])
+ assert array_equal(unit.outline.z, [-1.0, -2.0, -3.0])
else: # closed outline, first point repeated:
- assert numpy.array_equal(unit.outline.r, [1.0, 2.0, 3.0, 1.0])
- assert numpy.array_equal(unit.outline.z, [-1.0, -2.0, -3.0, -1.0])
+ assert array_equal(unit.outline.r, [1.0, 2.0, 3.0, 1.0])
+ assert array_equal(unit.outline.z, [-1.0, -2.0, -3.0, -1.0])
# Test conversion for case 3:
assert len(wall4.description_2d[0].mobile.unit) == 2
@@ -310,11 +311,11 @@ def test_3to4_repeat_children_first_point_conditional(dd4factory):
unit = wall4.description_2d[0].mobile.unit[i]
for j in range(3):
if i == 0: # open outline, first point not repeated:
- assert numpy.array_equal(unit.outline[j].r, [1.0, 2.0, 3.0])
- assert numpy.array_equal(unit.outline[j].z, [-1.0, -2.0, -3.0])
+ assert array_equal(unit.outline[j].r, [1.0, 2.0, 3.0])
+ assert array_equal(unit.outline[j].z, [-1.0, -2.0, -3.0])
else: # closed outline, first point repeated:
- assert numpy.array_equal(unit.outline[j].r, [1.0, 2.0, 3.0, 1.0])
- assert numpy.array_equal(unit.outline[j].z, [-1.0, -2.0, -3.0, -1.0])
+ assert array_equal(unit.outline[j].r, [1.0, 2.0, 3.0, 1.0])
+ assert array_equal(unit.outline[j].z, [-1.0, -2.0, -3.0, -1.0])
assert unit.outline[j].time == pytest.approx(j / 5)
# Test conversion for case 4:
@@ -322,9 +323,9 @@ def test_3to4_repeat_children_first_point_conditional(dd4factory):
for i in range(2):
thickness = wall4.description_2d[1].vessel.unit[i].annular.thickness
if i == 0: # open outline, there was one value too many, drop the last one
- assert numpy.array_equal(thickness, [1, 0.9])
+ assert array_equal(thickness, [1, 0.9])
else: # closed outline, thickness values kept
- assert numpy.array_equal(thickness, [1, 0.9, 0.9])
+ assert array_equal(thickness, [1, 0.9, 0.9])
# Test conversion back
wall3 = convert_ids(wall4, "3.39.0")
@@ -340,8 +341,8 @@ def test_3to4_repeat_children_first_point(dd4factory):
iron_core4 = convert_ids(iron_core, None, factory=dd4factory)
geometry = iron_core4.segment[0].geometry
- assert numpy.array_equal(geometry.outline.r, [1.0, 2.0, 3.0, 1.0])
- assert numpy.array_equal(geometry.outline.z, [-1.0, -2.0, -3.0, -1.0])
+ assert array_equal(geometry.outline.r, [1.0, 2.0, 3.0, 1.0])
+ assert array_equal(geometry.outline.z, [-1.0, -2.0, -3.0, -1.0])
iron_core3 = convert_ids(iron_core4, "3.39.0")
compare_children(iron_core, iron_core3)
@@ -356,11 +357,11 @@ def test_3to4_cocos_change(dd4factory):
cp.profiles_1d[0].grid.psi = numpy.linspace(10, 20, 11)
cp4 = convert_ids(cp, None, factory=dd4factory)
- assert numpy.array_equal(
+ assert array_equal(
cp4.profiles_1d[0].grid.rho_tor_norm,
cp.profiles_1d[0].grid.rho_tor_norm,
)
- assert numpy.array_equal(
+ assert array_equal(
cp4.profiles_1d[0].grid.psi,
-cp.profiles_1d[0].grid.psi,
)
@@ -368,6 +369,26 @@ def test_3to4_cocos_change(dd4factory):
cp3 = convert_ids(cp4, "3.39.0")
compare_children(cp, cp3)
+ eq = IDSFactory("3.39.0").equilibrium()
+ eq.ids_properties.homogeneous_time = IDS_TIME_MODE_HOMOGENEOUS
+ eq.time = [1.0]
+ eq.time_slice.resize(1)
+ eq.time_slice[0].profiles_1d.psi = numpy.linspace(0, 1, 11)
+ eq.time_slice[0].profiles_1d.dpressure_dpsi = numpy.linspace(1, 2, 11)
+
+ eq4 = convert_ids(eq, None, factory=dd4factory)
+ assert array_equal(
+ eq4.time_slice[0].profiles_1d.psi,
+ -eq.time_slice[0].profiles_1d.psi,
+ )
+ assert array_equal(
+ eq4.time_slice[0].profiles_1d.dpressure_dpsi,
+ -eq.time_slice[0].profiles_1d.dpressure_dpsi,
+ )
+
+ eq3 = convert_ids(eq4, "3.39.0")
+ compare_children(eq, eq3)
+
def test_3to4_circuit_connections(dd4factory, caplog):
pfa = IDSFactory("3.39.0").pf_active()
@@ -380,7 +401,7 @@ def test_3to4_circuit_connections(dd4factory, caplog):
]
pfa4 = convert_ids(pfa, None, factory=dd4factory)
- assert numpy.array_equal(
+ assert array_equal(
pfa4.circuit[0].connections, [[-1, 0, 1], [0, 1, -1], [1, -1, 0]]
)
@@ -397,7 +418,7 @@ def test_3to4_circuit_connections(dd4factory, caplog):
with caplog.at_level(logging.ERROR):
pfa4 = convert_ids(pfa, None, factory=dd4factory)
# Incorrect shape, data is not converted:
- assert numpy.array_equal(pfa.circuit[0].connections, pfa4.circuit[0].connections)
+ assert array_equal(pfa.circuit[0].connections, pfa4.circuit[0].connections)
# Check that a message with ERROR severity was logged
assert len(caplog.record_tuples) == 1
assert caplog.record_tuples[0][1] == logging.ERROR
@@ -410,7 +431,53 @@ def test_3to4_cocos_magnetics_workaround(dd4factory):
mag.flux_loop[0].flux.data = [1.0, 2.0]
mag4 = convert_ids(mag, None, factory=dd4factory)
- assert numpy.array_equal(mag4.flux_loop[0].flux.data, [-1.0, -2.0])
+ assert array_equal(mag4.flux_loop[0].flux.data, [-1.0, -2.0])
mag3 = convert_ids(mag4, "3.39.0")
compare_children(mag, mag3)
+
+
+def test_3to4_pulse_schedule():
+ ps = IDSFactory("3.39.0").pulse_schedule()
+ ps.ids_properties.homogeneous_time = IDS_TIME_MODE_HETEROGENEOUS
+
+ ps.ec.launcher.resize(3)
+ ps.ec.launcher[0].power.reference.data = [1.0, 2.0, 3.0]
+ ps.ec.launcher[0].power.reference.time = [1.0, 2.0, 3.0]
+ ps.ec.launcher[1].power.reference.data = [0.0, 2.0, 5.0]
+ ps.ec.launcher[1].power.reference.time = [0.0, 2.0, 5.0]
+ ps.ec.launcher[2].power.reference.data = [1.0, 1.5]
+ ps.ec.launcher[2].power.reference.time = [1.0, 1.5]
+
+ ps.ec.mode.data = [1, 2, 5]
+ ps.ec.mode.time = [1.0, 2.0, 5.0]
+
+ ps4 = convert_ids(ps, "4.0.0")
+ assert array_equal(ps4.ec.time, [0.0, 1.0, 1.5, 2.0, 3.0, 5.0])
+ item = "power_launched/reference"
+ assert array_equal(ps4.ec.beam[0][item], [1.0, 1.0, 1.5, 2.0, 3.0, 3.0])
+ assert array_equal(ps4.ec.beam[1][item], [0.0, 1.0, 1.5, 2.0, 3.0, 5.0])
+ assert array_equal(ps4.ec.beam[2][item], [1.0, 1.0, 1.5, 1.5, 1.5, 1.5])
+ assert array_equal(ps4.ec.mode, [1, 1, 1, 2, 2, 5])
+
+
+def test_3to4_pulse_schedule_exceptions():
+ ps = IDSFactory("3.39.0").pulse_schedule()
+ ps.ids_properties.homogeneous_time = IDS_TIME_MODE_HETEROGENEOUS
+
+ ps.ec.launcher.resize(3)
+ ps.ec.launcher[0].power.reference.data = [1.0, 2.0, 3.0]
+ with pytest.raises(ValueError): # missing time base
+ convert_ids(ps, "4.0.0")
+
+ ps.ec.launcher[0].power.reference.time = [1.0, 2.0]
+ with pytest.raises(ValueError): # incorrect size of time base
+ convert_ids(ps, "4.0.0")
+
+
+def test_3to4_pulse_schedule_fuzz():
+ ps = IDSFactory("3.39.0").pulse_schedule()
+ ps.ids_properties.homogeneous_time = IDS_TIME_MODE_HETEROGENEOUS
+
+ fill_consistent(ps)
+ convert_ids(ps, "4.0.0")
diff --git a/imas/test/test_lazy_loading.py b/imas/test/test_lazy_loading.py
index 9023a795..ff241016 100644
--- a/imas/test/test_lazy_loading.py
+++ b/imas/test/test_lazy_loading.py
@@ -78,7 +78,7 @@ def test_lazy_loading_distributions_random_netcdf(tmp_path):
def run_lazy_loading_distributions_random(dbentry):
ids = IDSFactory().new("distributions")
- fill_consistent(ids)
+ fill_consistent(ids, skip_complex=True)
dbentry.put(ids)
def iterate(structure):
diff --git a/pyproject.toml b/pyproject.toml
index 56e6dc1b..066e0ea9 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,7 +1,7 @@
[build-system]
# Minimum requirements for the build system to execute.
# Keep this on a single line for the grep magic of build scripts to work
-requires = ["setuptools>=61", "wheel", "numpy", "gitpython", "saxonche","packaging", "tomli;python_version<'3.11'", "setuptools_scm>8"]
+requires = ["setuptools>=61", "wheel", "numpy", "packaging", "tomli;python_version<'3.11'", "setuptools_scm>8"]
build-backend = "setuptools.build_meta"
@@ -56,16 +56,14 @@ classifiers = [
]
dynamic = ["version"]
dependencies = [
- # FIXME: numpy 2.0 compatibility
- "numpy>=1.15.4,<2",
+ "numpy>=1.15.4",
"rich",
"scipy",
"click",
"importlib_resources;python_version<'3.9'",
"packaging",
"xxhash >= 2",
- "saxonche",
- "gitpython"
+ "imas_data_dictionaries",
]
[project.optional-dependencies]
@@ -92,6 +90,9 @@ h5py = [
xarray = [
"xarray",
]
+saxonche = [
+ "saxonche",
+]
test = [
"pytest>=5.4.1",
"pytest-cov>=0.6",
@@ -101,15 +102,14 @@ test = [
"asv == 0.6.1",
# virtualenv is a dependency of asv
"virtualenv",
- # Pint and xarray are used in training snippets
+ # Pint is used in training snippets
"pint",
# Optional dependencies
# TODO add imas-core when it is available on pypi
- "imas-python[netcdf,h5py,xarray]",
+ "imas-python[netcdf,h5py,xarray,saxonche]",
]
[project.scripts]
-build_DD = "imas.dd_helpers:prepare_data_dictionaries"
imas = "imas.command.cli:cli"
[project.urls]
diff --git a/setup.py b/setup.py
deleted file mode 100644
index 486b56d6..00000000
--- a/setup.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# pylint: disable=wrong-import-position
-# This file is part of IMAS-Python.
-# You should have received the IMAS-Python LICENSE file with this project.
-"""
-Packaging settings. Inspired by a minimal setup.py file, the Pandas cython build
-and the access-layer setup template.
-
-The installable IMAS-Python package tries to follow in the following order:
-- The style guide for Python code [PEP8](https://www.python.org/dev/peps/pep-0008/)
-- The [PyPA guide on packaging projects](
- https://packaging.python.org/guides/distributing-packages-using-setuptools/#distributing-packages)
-- The [PyPA tool recommendations](
- https://packaging.python.org/guides/tool-recommendations/), specifically:
- * Installing: [pip](https://pip.pypa.io/en/stable/)
- * Environment management: [venv](https://docs.python.org/3/library/venv.html)
- * Dependency management: [pip-tools](https://github.com/jazzband/pip-tools)
- * Packaging source distributions: [setuptools](https://setuptools.readthedocs.io/)
- * Packaging built distributions: [wheels](https://pythonwheels.com/)
-
-On the ITER cluster we handle the environment by using the `IMAS` module load.
-So instead, we install packages to the `USER_SITE` there, and do not use
-`pip`s `build-isolation`. See [IMAS-584](https://jira.iter.org/browse/IMAS-584)
-"""
-import importlib
-import importlib.util
-import site
-import traceback
-# Allow importing local files, see https://snarky.ca/what-the-heck-is-pyproject-toml/
-import sys
-import warnings
-# Import other stdlib packages
-from pathlib import Path
-
-# Use setuptools to build packages. Advised to import setuptools before distutils
-import setuptools
-from packaging.version import Version as V
-from setuptools import __version__ as setuptools_version
-from setuptools import setup
-from setuptools.command.build_ext import build_ext
-from setuptools.command.build_py import build_py
-from setuptools.command.sdist import sdist
-
-try:
- from wheel.bdist_wheel import bdist_wheel
-except ImportError:
- bdist_wheel = None
-
-# Ensure the current folder is on the import path:
-sys.path.append(str(Path(__file__).parent.resolve()))
-
-cannonical_python_command = "module load Python/3.8.6-GCCcore-10.2.0"
-
-if sys.version_info < (3, 7):
- sys.exit(
- "Sorry, Python < 3.7 is not supported. Use a different"
- f" python e.g. '{cannonical_python_command}'"
- )
-if sys.version_info < (3, 8):
- warnings.warn("Python < 3.8 support on best-effort basis", FutureWarning)
-
-
-# Check setuptools version before continuing for legacy builds
-# Version 61 is required for pyproject.toml support
-if V(setuptools_version) < V("61"):
- raise RuntimeError(
- "Setuptools version outdated. Found"
- f" {V(setuptools_version)} need at least {V('61')}"
- )
-
-# Workaround for https://github.com/pypa/pip/issues/7953
-# Cannot install into user site directory with editable source
-site.ENABLE_USER_SITE = "--user" in sys.argv[1:]
-
-
-# We need to know where we are for many things
-this_file = Path(__file__)
-this_dir = this_file.parent.resolve()
-
-# Start: Load dd_helpers
-dd_helpers_file = this_dir / "imas/dd_helpers.py"
-assert dd_helpers_file.is_file()
-spec = importlib.util.spec_from_file_location("dd_helpers", dd_helpers_file)
-module = importlib.util.module_from_spec(spec)
-spec.loader.exec_module(module)
-sys.modules["imas.dd_helpers"] = module
-from imas.dd_helpers import prepare_data_dictionaries # noqa
-
-# End: Load dd_helpers
-
-
-# Define building of the Data Dictionary as custom build step
-class BuildDDCommand(setuptools.Command):
- """A custom command to build the data dictionaries."""
-
- description = "build IDSDef.zip"
- user_options = []
-
- def initialize_options(self):
- pass
-
- def finalize_options(self):
- pass
-
- def run(self):
- """Prepare DDs if they can be git pulled"""
- prepare_data_dictionaries()
-
-
-# Inject prepare_data_dictionaries() into the setuptool's build steps. So far it covers
-# all installation cases:
-# - `pip install -e .`` (from git clone)
-# - `python -m build``
-# - Source tarball from git-archive. Note: version only picked up when doing git-archive
-# from a tagged release,
-# `git archive HEAD -v -o imas.tar.gz && pip install imas.tar.gz`
-cmd_class = {}
-build_overrides = {"build_ext": build_ext, "build_py": build_py, "sdist": sdist}
-if bdist_wheel:
- build_overrides["bdist_wheel"] = bdist_wheel
-for name, cls in build_overrides.items():
-
- class build_DD_before(cls):
- """Build DD before executing original distutils command"""
-
- def run(self):
- try:
- prepare_data_dictionaries()
- except Exception:
- traceback.print_exc()
- print("Failed to build DD during setup, continuing without.")
- super().run()
-
- cmd_class[name] = build_DD_before
-
-
-if __name__ == "__main__":
- setup(
- zip_safe=False, # https://mypy.readthedocs.io/en/latest/installed_packages.html
- cmdclass={"build_DD": BuildDDCommand, **cmd_class}
- )
\ No newline at end of file