diff --git a/examples/ivp_1D_simulation/README.md b/examples/ivp_1D_simulation/README.md new file mode 100644 index 00000000..cf6be017 --- /dev/null +++ b/examples/ivp_1D_simulation/README.md @@ -0,0 +1,7 @@ +Simulations In One Dimension Example + + [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/waltsims/k-wave-python/blob/HEAD/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb) + +This example provides a simple demonstration for the simulation and detection of the pressure field generated by an initial pressure distribution within a one-dimensional heterogeneous propagation medium. + +To read more, visit the [original example page](http://www.k-wave.org/documentation/example_ivp_1D_simulation.php). \ No newline at end of file diff --git a/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb new file mode 100644 index 00000000..37569b11 --- /dev/null +++ b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb @@ -0,0 +1,214 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "RQYRmOfG27FV" + }, + "source": [ + "First install the package using the latest version." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "5bq8_I2z29l6" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!pip install k-wave-python" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7lktiAv5XtsS" + }, + "source": [ + "Now import dependencies, and parts of library which are required." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "VCcwZsQ2ASoj" + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", + "from kwave.data import Vector\n", + "from kwave.kgrid import kWaveGrid\n", + "from kwave.kmedium import kWaveMedium\n", + "from kwave.ksensor import kSensor\n", + "from kwave.ksource import kSource\n", + "from kwave.kspaceFirstOrder1D import kspace_first_order_1D\n", + "from kwave.options.simulation_execution_options import SimulationExecutionOptions\n", + "from kwave.options.simulation_options import SimulationOptions" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xUqsDgayASoj" + }, + "source": [ + "Set parameters to build the grid" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "FwbGsg_gASok" + }, + "outputs": [], + "source": [ + "# create the computational grid\n", + "Nx: int = 512 # number of grid points in the x (row) direction\n", + "dx: float = 0.05e-3 # grid point spacing in the x direction [m]\n", + "\n", + "grid_size_points = Vector([Nx, ])\n", + "grid_spacing_meters = Vector([dx, ])\n", + "\n", + "# create the k-space grid\n", + "kgrid = kWaveGrid(grid_size_points, grid_spacing_meters)\n", + "\n", + "# define the properties of the propagation medium\n", + "sound_speed = 1500.0 * np.ones((Nx, 1)) # [m/s]\n", + "sound_speed[:np.round(Nx / 3).astype(int) - 1] = 2000.0\t # [m/s]\n", + "density = 1000.0 * np.ones((Nx, 1)) # [kg/m^3]\n", + "density[np.round(4 * Nx / 5).astype(int) - 1:] = 1500.0 # [kg/m^3]\n", + "medium = kWaveMedium(sound_speed=sound_speed, density=density)\n", + "\n", + "# set the simulation time to capture the reflections\n", + "c_max = np.max(medium.sound_speed.flatten()) # [m/s]\n", + "t_end = 2.5 * kgrid.x_size / c_max # [s]\n", + "\n", + "# define the time array\n", + "kgrid.makeTime(c_max, t_end=t_end)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "misFfiLaASok" + }, + "source": [ + "Build source and sensor" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "maEowzD6ASok" + }, + "outputs": [], + "source": [ + "# Create the source object\n", + "source = kSource()\n", + "\n", + "# create initial pressure distribution using a smoothly shaped sinusoid\n", + "x_pos: int = 280 # [grid points]\n", + "width: int = 100 # [grid points]\n", + "height: int = 1 # [au]\n", + "p0 = np.linspace(0.0, 2.0 * np.pi, width + 1)\n", + "\n", + "part1 = np.zeros(x_pos).astype(float)\n", + "part2 = (height / 2.0) * np.sin(p0 - np.pi / 2.0) + (height / 2.0)\n", + "part3 = np.zeros(Nx - x_pos - width - 1).astype(float)\n", + "source.p0 = np.concatenate([part1, part2, part3])\n", + "\n", + "# create a Cartesian sensor mask recording the pressure\n", + "sensor = kSensor()\n", + "sensor.record = [\"p\"]\n", + "\n", + "# this hack is needed to ensure that the sensor is in [1,2] dimensions\n", + "mask = np.array([-10e-3, 10e-3]) # [m]\n", + "mask = mask[:, np.newaxis].T\n", + "sensor.mask = mask" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZSmPpm6X3fm5" + }, + "source": [ + "Set options and run simulation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BiFT0YEE3fyE" + }, + "outputs": [], + "source": [ + "# define the simulation options\n", + "simulation_options = SimulationOptions(data_cast=\"off\", save_to_disk=False)\n", + "execution_options = SimulationExecutionOptions(is_gpu_simulation=True)\n", + "\n", + "# run the simulation\n", + "sensor_data = kspace_first_order_1D(kgrid, source, sensor, medium, simulation_options=simulation_options, execution_options=execution_options)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "a4WRzAZ_ASol" + }, + "source": [ + "Visualisations" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "s4ZO890kASol" + }, + "outputs": [], + "source": [ + "# plot the recorded time signals\n", + "_, ax1 = plt.subplots()\n", + "ax1.plot(sensor_data['p'][0, :], 'b-')\n", + "ax1.plot(sensor_data['p'][1, :], 'r-')\n", + "ax1.grid(True)\n", + "ax1.set_ylim(-0.1, 0.7)\n", + "ax1.set_ylabel('Pressure')\n", + "ax1.set_xlabel('Time Step')\n", + "ax1.legend(['Sensor Position 1', 'Sensor Position 2'])\n", + "plt.show()" + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/examples/ivp_1D_simulation/ivp_1D_simulation.py b/examples/ivp_1D_simulation/ivp_1D_simulation.py new file mode 100644 index 00000000..33f0401b --- /dev/null +++ b/examples/ivp_1D_simulation/ivp_1D_simulation.py @@ -0,0 +1,94 @@ +""" +Simulations In One Dimension Example + +This example provides a simple demonstration of using k-Wave for the +simulation and detection of the pressure field generated by an initial +pressure distribution within a one-dimensional heterogeneous propagation +medium. It builds on the Homogeneous Propagation Medium and Heterogeneous +Propagation Medium examples. +""" + +import matplotlib.pyplot as plt +import numpy as np + +from kwave.data import Vector +from kwave.kgrid import kWaveGrid +from kwave.kmedium import kWaveMedium +from kwave.ksensor import kSensor +from kwave.ksource import kSource +from kwave.kspaceFirstOrder1D import kspace_first_order_1D +from kwave.options.simulation_execution_options import SimulationExecutionOptions +from kwave.options.simulation_options import SimulationOptions + +# ========================================================================= +# SIMULATION +# ========================================================================= + +# create the computational grid +Nx: int = 512 # number of grid points in the x (row) direction +dx: float = 0.05e-3 # grid point spacing in the x direction [m] + +grid_size_points = Vector([Nx, ]) +grid_spacing_meters = Vector([dx, ]) + +# create the k-space grid +kgrid = kWaveGrid(grid_size_points, grid_spacing_meters) + +# define the properties of the propagation medium +sound_speed = 1500.0 * np.ones((Nx, 1)) # [m/s] +sound_speed[:np.round(Nx / 3).astype(int) - 1] = 2000.0 # [m/s] +density = 1000.0 * np.ones((Nx, 1)) # [kg/m^3] +density[np.round(4 * Nx / 5).astype(int) - 1:] = 1500.0 # [kg/m^3] +medium = kWaveMedium(sound_speed=sound_speed, density=density) + +# Create the source object +source = kSource() + +# create initial pressure distribution using a smoothly shaped sinusoid +x_pos: int = 280 # [grid points] +width: int = 100 # [grid points] +height: int = 1 # [au] +p0 = np.linspace(0.0, 2.0 * np.pi, width + 1) + +part1 = np.zeros(x_pos).astype(float) +part2 = (height / 2.0) * np.sin(p0 - np.pi / 2.0) + (height / 2.0) +part3 = np.zeros(Nx - x_pos - width - 1).astype(float) +source.p0 = np.concatenate([part1, part2, part3]) + +# create a Cartesian sensor mask recording the pressure +sensor = kSensor() +sensor.record = ["p"] + +# this hack is needed to ensure that the sensor is in [1,2] dimensions +mask = np.array([-10e-3, 10e-3]) # [m] +mask = mask[:, np.newaxis].T +sensor.mask = mask + +# set the simulation time to capture the reflections +c_max = np.max(medium.sound_speed.flatten()) # [m/s] +t_end = 2.5 * kgrid.x_size / c_max # [s] + +# define the time array +kgrid.makeTime(c_max, t_end=t_end) + +# define the simulation options +simulation_options = SimulationOptions(data_cast="off", save_to_disk=False) +execution_options = SimulationExecutionOptions(is_gpu_simulation=True) + +# run the simulation +sensor_data = kspace_first_order_1D(kgrid, source, sensor, medium, simulation_options=simulation_options, execution_options=execution_options) + +# ========================================================================= +# VISUALISATION +# ========================================================================= + +# plot the recorded time signals +_, ax1 = plt.subplots() +ax1.plot(sensor_data['p'][0, :], 'b-') +ax1.plot(sensor_data['p'][1, :], 'r-') +ax1.grid(True) +ax1.set_ylim(-0.1, 0.7) +ax1.set_ylabel('Pressure') +ax1.set_xlabel('Time Step') +ax1.legend(['Sensor Position 1', 'Sensor Position 2']) +plt.show() diff --git a/kwave/kWaveSimulation.py b/kwave/kWaveSimulation.py index 8cd31e03..1f0a201b 100644 --- a/kwave/kWaveSimulation.py +++ b/kwave/kWaveSimulation.py @@ -1,3 +1,4 @@ +import copy import logging import warnings from dataclasses import dataclass @@ -13,6 +14,7 @@ from kwave.ktransducer import NotATransducer from kwave.kWaveSimulation_helper import ( create_absorption_variables, + create_storage_variables, display_simulation_params, expand_grid_matrices, scale_source_terms_func, @@ -72,13 +74,13 @@ def __init__( self.sensor = kSensor(mask=np.ones((Nx, Ny, max(1, Nz))), record=["p_final"]) # set time reversal flag - self.userarg_time_rev = True + #self.time_rev = None self.record = Recorder() self.record.p = False else: # set time reversal flag - self.userarg_time_rev = False + # self.time_rev = False #: Whether sensor.mask should be re-ordered. #: True if sensor.mask is Cartesian with nearest neighbour interpolation which is calculated using a binary mask @@ -142,6 +144,14 @@ def __init__( self.c0 = None #: Alias to medium.sound_speed self.index_data_type = None + @property + def is_nonlinear(self): + """ + Returns: + Set simulation to nonlinear if medium is nonlinear. + """ + return self.medium.is_nonlinear() + @property def equation_of_state(self): """ @@ -154,7 +164,7 @@ def equation_of_state(self): else: return "absorbing" else: - return "loseless" + return "lossless" @property def use_sensor(self): @@ -200,7 +210,7 @@ def nonuniform_grid(self): def time_rev(self) -> bool: if self.sensor and not isinstance(self.sensor, NotATransducer): return not self.options.simulation_type.is_elastic_simulation() and self.sensor.time_reversal_boundary_data is not None - return self.userarg_time_rev + return self.time_rev @property @deprecated(version="0.4.1", reason="Use TimeReversal class instead") @@ -246,7 +256,6 @@ def source_p0(self): # initial pressure """ Returns: Whether initial pressure source is present (default=False) - """ flag = False # default if not isinstance(self.source, NotATransducer) and self.source.p0 is not None: @@ -254,22 +263,22 @@ def source_p0(self): # initial pressure flag = True return flag + @property def source_p0_elastic(self): # initial pressure in the elastic code """ Returns: Whether initial pressure source is present in the elastic code (default=False) - """ # Not clear where this flag is set return False + @property def source_p(self): """ Returns: Whether time-varying pressure source is present (default=False) - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.p is not None: @@ -278,12 +287,12 @@ def source_p(self): flag = len(self.source.p[0]) return flag + @property - def source_p_labelled(self): # time-varying pressure with labelled source mask + def source_p_labelled(self) -> bool: # time-varying pressure with labelled source mask """ Returns: - True/False if labelled/binary source mask, respectively. - + True/False if labelled/binary source mask, respectively (default=False) """ flag = False if not isinstance(self.source, NotATransducer) and self.source.p is not None: @@ -292,12 +301,12 @@ def source_p_labelled(self): # time-varying pressure with labelled source mask flag = not (p_unique.size <= 2 and p_unique.sum() == 1) return flag + @property - def source_ux(self) -> bool: + def source_ux(self): """ Returns: Whether time-varying particle velocity source is used in X-direction - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.ux is not None: @@ -306,12 +315,12 @@ def source_ux(self) -> bool: flag = len(self.source.ux[0]) return flag + @property - def source_uy(self) -> bool: + def source_uy(self): """ Returns: Whether time-varying particle velocity source is used in Y-direction - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.uy is not None: @@ -320,12 +329,12 @@ def source_uy(self) -> bool: flag = len(self.source.uy[0]) return flag + @property - def source_uz(self) -> bool: + def source_uz(self): """ Returns: Whether time-varying particle velocity source is used in Z-direction - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.uz is not None: @@ -334,12 +343,12 @@ def source_uz(self) -> bool: flag = len(self.source.uz[0]) return flag + @property def source_u_labelled(self): """ Returns: Whether time-varying velocity source with labelled source mask is present (default=False) - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.u_mask is not None: @@ -353,6 +362,7 @@ def source_u_labelled(self): flag = True return flag + @property def source_sxx(self): """ @@ -364,72 +374,72 @@ def source_sxx(self): flag = len(self.source.sxx[0]) return flag + @property def source_syy(self): """ Returns: Whether time-varying stress source in Y->Y direction is present (default=False) - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.syy is not None: flag = len(self.source.syy[0]) return flag + @property def source_szz(self): """ Returns: Whether time-varying stress source in Z->Z direction is present (default=False) - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.szz is not None: flag = len(self.source.szz[0]) return flag + @property def source_sxy(self): """ Returns: Whether time-varying stress source in X->Y direction is present (default=False) - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.sxy is not None: flag = len(self.source.sxy[0]) return flag + @property def source_sxz(self): """ Returns: Whether time-varying stress source in X->Z direction is present (default=False) - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.sxz is not None: flag = len(self.source.sxz[0]) return flag + @property def source_syz(self): """ Returns: Whether time-varying stress source in Y->Z direction is present (default=False) - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.syz is not None: flag = len(self.source.syz[0]) return flag + @property def source_s_labelled(self): """ Returns: Whether time-varying stress source with labelled source mask is present (default=False) - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.s_mask is not None: @@ -443,6 +453,7 @@ def source_s_labelled(self): flag = True return flag + @property def use_w_source_correction_p(self): """ @@ -455,6 +466,7 @@ def use_w_source_correction_p(self): flag = True return flag + @property def use_w_source_correction_u(self): """ @@ -468,6 +480,7 @@ def use_w_source_correction_u(self): flag = True return flag + def input_checking(self, calling_func_name) -> None: """ Check the input fields for correctness and validness @@ -515,6 +528,80 @@ def input_checking(self, calling_func_name) -> None: self.create_absorption_vars() self.assign_pseudonyms(self.medium, self.kgrid) self.scale_source_terms(opt.scale_source_terms) + + # move all this inside create_storage_variables? + # a copy of record is passed through, and use to update the + if is_elastic_code: + record_old = copy.deepcopy(self.record) + if not self.blank_sensor: + sensor_x = self.sensor.mask[0, :] + else: + sensor_x = None + + values = dotdict({"sensor_x": sensor_x, + "sensor_mask_index": self.sensor_mask_index, + "record": record_old, + "sensor_data_buffer_size": self.s_source_pos_index}) + + if self.record.u_split_field: + self.record_u_split_field = self.record.u_split_field + + flags = dotdict({"use_sensor": self.use_sensor, + "blank_sensor": self.blank_sensor, + "binary_sensor_mask": self.binary_sensor_mask, + "record_u_split_field": self.record.u_split_field, + "time_rev": None, + "reorder_data": self.reorder_data, + "transducer_receive_elevation_focus": self.transducer_receive_elevation_focus, + "axisymmetric": opt.simulation_type.is_axisymmetric(), + "transducer_sensor": self.transducer_sensor, + "use_cuboid_corners": self.cuboid_corners}) + + # this creates the storage variables by determining the spatial locations of the data which is in self.record. + flags, self.record, self.sensor_data, self.num_recorded_time_points = create_storage_variables(self.kgrid, + self.sensor, + opt, + values, + flags, + self.record) + else: + record_old = copy.deepcopy(self.record) + if not self.blank_sensor: + if k_dim == 1: + # this has been declared in check_sensor + sensor_x = self.sensor_x + else: + sensor_x = self.sensor.mask[0, :] + else: + sensor_x = None + + values = dotdict({"sensor_x": sensor_x, + "sensor_mask_index": self.sensor_mask_index, + "record": record_old, + "sensor_data_buffer_size": self.s_source_pos_index}) + + if self.record.u_split_field: + self.record_u_split_field = self.record.u_split_field + + flags = dotdict({"use_sensor": self.use_sensor, + "blank_sensor": self.blank_sensor, + "binary_sensor_mask": self.binary_sensor_mask, + "record_u_split_field": self.record.u_split_field, + "time_rev": None, + "reorder_data": self.reorder_data, + "transducer_receive_elevation_focus": self.transducer_receive_elevation_focus, + "axisymmetric": opt.simulation_type.is_axisymmetric(), + "transducer_sensor": self.transducer_sensor, + "use_cuboid_corners": self.cuboid_corners}) + + # this creates the storage variables by determining the spatial locations of the data which is in self.record. + flags, self.record, self.sensor_data, self.num_recorded_time_points = create_storage_variables(self.kgrid, + self.sensor, + opt, + values, + flags, + self.record) + # create perfectly matched layer indices indices self.create_pml_indices( kgrid_dim=self.kgrid.dim, kgrid_N=Vector(self.kgrid.N), @@ -523,6 +610,7 @@ def input_checking(self, calling_func_name) -> None: is_axisymmetric=opt.simulation_type.is_axisymmetric(), ) + @staticmethod def check_calling_func_name_and_dim(calling_func_name, kgrid_dim) -> None: """ @@ -544,6 +632,7 @@ def check_calling_func_name_and_dim(calling_func_name, kgrid_dim) -> None: elif calling_func_name in ["kspaceFirstOrder3D", "pstdElastic3D", "kspaceElastic3D"]: assert kgrid_dim == 3, f"kgrid has the wrong dimensionality for {calling_func_name}." + @staticmethod def print_start_status(is_elastic_code: bool) -> None: """ @@ -561,6 +650,7 @@ def print_start_status(is_elastic_code: bool) -> None: logging.log(logging.INFO, "Running k-Wave simulation...") logging.log(logging.INFO, f" start time: {get_date_string()}") + def set_index_data_type(self) -> None: """ Pre-calculate the data type needed to store the matrix indices given the @@ -572,6 +662,7 @@ def set_index_data_type(self) -> None: total_grid_points = self.kgrid.total_grid_points self.index_data_type = get_smallest_possible_type(total_grid_points, "uint", default="double") + @staticmethod def check_medium(medium, kgrid_k, simulation_type: SimulationType) -> bool: """ @@ -604,6 +695,7 @@ def check_medium(medium, kgrid_k, simulation_type: SimulationType) -> bool: medium.check_fields(kgrid_k.shape) return user_medium_density_input + def check_sensor(self, kgrid_dim) -> None: """ Check the Sensor properties for correctness and validity @@ -657,9 +749,10 @@ def check_sensor(self, kgrid_dim) -> None: # given, the time history of the acoustic pressure is recorded by # default if self.sensor.record is not None: + # check for time reversal data - if self.time_rev: - logging.log(logging.WARN, "sensor.record is not used for time reversal reconstructions") + # if self.time_rev: + # logging.log(logging.WARN, "sensor.record is not used for time reversal reconstructions") # check the input is a cell array assert isinstance(self.sensor.record, list), 'sensor.record must be given as a list, e.g. ["p", "u"]' @@ -762,12 +855,11 @@ def check_sensor(self, kgrid_dim) -> None: # align sensor data as a column vector to be the # same as kgrid.x_vec so that calls to interp1 # return data in the correct dimension - self.sensor_x = np.reshape((self.sensor.mask, (-1, 1))) + self.sensor_x = np.reshape(self.sensor.mask, (-1, 1)) # add sensor_x to the record structure for use with # the _extractSensorData subfunction self.record.sensor_x = self.sensor_x - "record.sensor_x = sensor_x;" elif kgrid_dim == 2: self.sensor_x = self.sensor.mask[0, :] @@ -788,19 +880,19 @@ def check_sensor(self, kgrid_dim) -> None: # if in time reversal mode, reorder the p0 input data in # the order of the binary sensor_mask - if self.time_rev: - raise NotImplementedError - """ - # append the reordering data - new_col_pos = length(sensor.time_reversal_boundary_data(1, :)) + 1; - sensor.time_reversal_boundary_data(:, new_col_pos) = order_index; - - # reorder p0 based on the order_index - sensor.time_reversal_boundary_data = sort_rows(sensor.time_reversal_boundary_data, new_col_pos); - - # remove the reordering data - sensor.time_reversal_boundary_data = sensor.time_reversal_boundary_data(:, 1:new_col_pos - 1); - """ + # if self.time_rev: + # raise NotImplementedError + # """ + # # append the reordering data + # new_col_pos = length(sensor.time_reversal_boundary_data(1, :)) + 1; + # sensor.time_reversal_boundary_data(:, new_col_pos) = order_index; + + # # reorder p0 based on the order_index + # sensor.time_reversal_boundary_data = sort_rows(sensor.time_reversal_boundary_data, new_col_pos); + + # # remove the reordering data + # sensor.time_reversal_boundary_data = sensor.time_reversal_boundary_data(:, 1:new_col_pos - 1); + # """ else: # set transducer sensor flag self.transducer_sensor = True @@ -819,6 +911,7 @@ def check_sensor(self, kgrid_dim) -> None: if kgrid_dim == 2 and self.use_sensor and self.compute_directivity and self.time_rev: logging.log(logging.WARN, "sensor directivity fields are not used for time reversal.") + def check_source(self, k_dim, k_Nt) -> None: """ Check the source properties for correctness and validity @@ -1002,6 +1095,7 @@ def check_source(self, k_dim, k_Nt) -> None: # clean up unused variables del active_elements_mask + def check_kgrid_time(self) -> None: """ Check time-related kWaveGrid inputs @@ -1013,8 +1107,8 @@ def check_kgrid_time(self) -> None: # check kgrid for t_array existence, and create if not defined if isinstance(self.kgrid.t_array, str) and self.kgrid.t_array == "auto": # check for time reversal mode - if self.time_rev: - raise ValueError("kgrid.t_array (Nt and dt) must be defined explicitly in time reversal mode.") + # if self.time_rev: + # raise ValueError("kgrid.t_array (Nt and dt) must be defined explicitly in time reversal mode.") # check for time varying sources if (not self.source_p0_elastic) and ( @@ -1044,6 +1138,7 @@ def check_kgrid_time(self) -> None: if self.kgrid.dt > dt_stability_limit: logging.log(logging.WARN, " time step may be too large for a stable simulation.") + @staticmethod def select_precision(opt: SimulationOptions): """ @@ -1077,6 +1172,7 @@ def select_precision(opt: SimulationOptions): raise ValueError("'Unknown ''DataCast'' option'") return precision + def check_input_combinations(self, opt: SimulationOptions, user_medium_density_input: bool, k_dim, pml_size, kgrid_N) -> None: """ Check the input combinations for correctness and validity @@ -1178,14 +1274,15 @@ def check_input_combinations(self, opt: SimulationOptions, user_medium_density_i raise NotImplementedError(f"diary({self.LOG_NAME}.txt');") # update command line status - if self.time_rev: - logging.log(logging.INFO, " time reversal mode") + # if self.time_rev: + # logging.log(logging.INFO, " time reversal mode") # cleanup unused variables for k in list(self.__dict__.keys()): if k.endswith("_DEF"): delattr(self, k) + def smooth_and_enlarge(self, source, k_dim, kgrid_N, opt: SimulationOptions) -> None: """ Smooth and enlarge grids @@ -1308,6 +1405,7 @@ def smooth_and_enlarge(self, source, k_dim, kgrid_N, opt: SimulationOptions) -> logging.log(logging.INFO, "smoothing density distribution...") self.medium.density = smooth(self.medium.density) + def create_sensor_variables(self) -> None: """ Create the sensor related variables @@ -1365,6 +1463,7 @@ def create_sensor_variables(self) -> None: # set the sensor mask index variable to be empty self.sensor_mask_index = [] + def create_absorption_vars(self) -> None: """ Create absorption variables for the fluid code based on @@ -1378,6 +1477,7 @@ def create_absorption_vars(self) -> None: self.kgrid, self.medium, self.equation_of_state ) + def assign_pseudonyms(self, medium: kWaveMedium, kgrid: kWaveGrid) -> None: """ Shorten commonly used field names (these act only as pointers provided that the values aren't modified) @@ -1394,6 +1494,7 @@ def assign_pseudonyms(self, medium: kWaveMedium, kgrid: kWaveGrid) -> None: self.rho0 = medium.density self.c0 = medium.sound_speed + def scale_source_terms(self, is_scale_source_terms) -> None: """ Scale the source terms based on the expanded and smoothed values of the medium parameters @@ -1451,6 +1552,7 @@ def scale_source_terms(self, is_scale_source_terms) -> None: ), ) + def create_pml_indices(self, kgrid_dim, kgrid_N: Vector, pml_size, pml_inside, is_axisymmetric): """ Define index variables to remove the PML from the display if the optional @@ -1491,10 +1593,12 @@ def create_pml_indices(self, kgrid_dim, kgrid_N: Vector, pml_size, pml_inside, i # self.record = Recorder() self.record.set_index_variables(self.kgrid, pml_size, pml_inside, is_axisymmetric) + @deprecated(version="0.4.1", reason="Use TimeReversal class instead") def check_time_reversal(self) -> bool: return self.time_rev + def _is_binary_sensor_mask(self, kgrid_dim: int) -> bool: """ Check if the sensor mask is a binary grid matching the kgrid dimensions. @@ -1533,6 +1637,7 @@ def _is_binary_sensor_mask(self, kgrid_dim: int) -> bool: return mask_shape == expected_shape + def _is_cuboid_corners_mask(self, kgrid_dim: int) -> bool: """ Check if the sensor mask is defined as cuboid corners. diff --git a/kwave/kWaveSimulation_helper/__init__.py b/kwave/kWaveSimulation_helper/__init__.py index a2eb6e82..34577614 100644 --- a/kwave/kWaveSimulation_helper/__init__.py +++ b/kwave/kWaveSimulation_helper/__init__.py @@ -1,7 +1,13 @@ from kwave.kWaveSimulation_helper.create_absorption_variables import create_absorption_variables +from kwave.kWaveSimulation_helper.create_storage_variables import create_storage_variables from kwave.kWaveSimulation_helper.display_simulation_params import display_simulation_params from kwave.kWaveSimulation_helper.expand_grid_matrices import expand_grid_matrices +from kwave.kWaveSimulation_helper.extract_sensor_data import extract_sensor_data from kwave.kWaveSimulation_helper.retract_transducer_grid_size import retract_transducer_grid_size from kwave.kWaveSimulation_helper.save_to_disk_func import save_to_disk_func from kwave.kWaveSimulation_helper.scale_source_terms_func import scale_source_terms_func from kwave.kWaveSimulation_helper.set_sound_speed_ref import set_sound_speed_ref + +__all__ = ["create_absorption_variables", "create_storage_variables", "display_simulation_params", "expand_grid_matrices", + "extract_sensor_data", "retract_transducer_grid_size", "save_to_disk_func", + "scale_source_terms_func", "set_sound_speed_ref"] \ No newline at end of file diff --git a/kwave/kWaveSimulation_helper/create_absorption_variables.py b/kwave/kWaveSimulation_helper/create_absorption_variables.py index 56ae2d38..9434f827 100644 --- a/kwave/kWaveSimulation_helper/create_absorption_variables.py +++ b/kwave/kWaveSimulation_helper/create_absorption_variables.py @@ -14,6 +14,8 @@ def create_absorption_variables(kgrid: kWaveGrid, medium: kWaveMedium, equation_ return create_absorbing_medium_variables(kgrid.k, medium) elif equation_of_state == "stokes": return create_stokes_medium_variables(medium) + elif equation_of_state == "lossless": + return None, None, None, None else: raise NotImplementedError diff --git a/kwave/kWaveSimulation_helper/create_storage_variables.py b/kwave/kWaveSimulation_helper/create_storage_variables.py new file mode 100644 index 00000000..85511019 --- /dev/null +++ b/kwave/kWaveSimulation_helper/create_storage_variables.py @@ -0,0 +1,593 @@ +from copy import deepcopy +from typing import List, Union + +import numpy as np +from numpy.fft import ifftshift +from scipy.spatial import Delaunay + +from kwave.data import Vector +from kwave.kgrid import kWaveGrid +from kwave.options.simulation_options import SimulationOptions +from kwave.recorder import Recorder +from kwave.utils.dotdictionary import dotdict + + +def gridDataFast2D(x, y, xi, yi): + """ + Delauney triangulation in 2D + """ + x = np.ravel(x) + y = np.ravel(y) + xi = np.ravel(xi) + yi = np.ravel(yi) + + points = np.squeeze(np.dstack((x, y))) + interpolation_points = np.squeeze(np.dstack((xi, yi))) + + tri = Delaunay(points) + + indices = tri.find_simplex(interpolation_points) + + bc = tri.transform[indices, :2].dot(np.transpose(tri.points[indices, :] - tri.transform[indices, 2])) + + return tri.points[indices, :], bc + + +def gridDataFast3D(x, y, z, xi, yi, zi): + """ + Delauney triangulation in 3D + """ + x = np.ravel(x) + y = np.ravel(y) + z = np.ravel(z) + xi = np.ravel(xi) + yi = np.ravel(yi) + zi = np.ravel(zi) + + grid_points = np.squeeze(np.dstack((x, y, z))) + interpolation_points = np.squeeze(np.dstack((xi, yi, zi))) + + tri = Delaunay(grid_points) + + simplex_indices = tri.find_simplex(interpolation_points) + + # barycentric coordinates + bc = tri.transform[simplex_indices, :2].dot(np.transpose(tri.points[simplex_indices, :] - tri.transform[simplex_indices, 2])) + + return tri.points[simplex_indices, :], bc + + +class OutputSensor(object): + """ + Class which holds information about which spatial locations are used to save data + """ + flags = None + x_shift_neg = None + p = None + + +def create_storage_variables(kgrid: kWaveGrid, sensor, opt: SimulationOptions, + values: dotdict, flags: dotdict, record: Recorder): + """ + Creates the storage variable sensor + """ + + # ========================================================================= + # PREPARE DATA MASKS AND STORAGE VARIABLES + # ========================================================================= + + sensor_data = OutputSensor() + + flags = set_flags(flags, values.sensor_x, sensor.mask, opt.cartesian_interp) + + # preallocate output variables - this keeps the time reversal flag, which is, at present, is always None, so that is allocating data + # for intermediate steps in time reversal simulations debugging may be possible. + if flags.time_rev is not None: + return flags + + num_sensor_points = get_num_of_sensor_points(flags.blank_sensor, + flags.binary_sensor_mask, + kgrid.k, + values.sensor_mask_index, + values.sensor_x) + + num_recorded_time_points, _ = \ + get_num_recorded_time_points(kgrid.dim, kgrid.Nt, opt.stream_to_disk, sensor.record_start_index) + + record = create_shift_operators(record, values.record, kgrid, opt.use_sg) + + create_normalized_wavenumber_vectors(record, kgrid, flags.record_u_split_field) + + pml_size = [opt.pml_x_size, opt.pml_y_size, opt.pml_z_size] + pml_size = Vector(pml_size[:kgrid.dim]) + all_vars_size = calculate_all_vars_size(kgrid, opt.pml_inside, pml_size) + + sensor_data = create_sensor_variables(values.record, kgrid, num_sensor_points, num_recorded_time_points, + all_vars_size, values.sensor_mask_index, flags.use_cuboid_corners) + + create_transducer_buffer(flags.transducer_sensor, values.transducer_receive_elevation_focus, sensor, + num_sensor_points, num_recorded_time_points, values.sensor_data_buffer_size, + flags, sensor_data) + + record = compute_triangulation_points(flags, kgrid, record, sensor.mask) + + return flags, record, sensor_data, num_recorded_time_points + + +def set_flags(flags: dotdict, sensor_x, sensor_mask, is_cartesian_interp): + """ + check sensor mask based on the Cartesian interpolation setting + """ + + if not flags.binary_sensor_mask and is_cartesian_interp == 'nearest': + + # extract the data using the binary sensor mask created in + # input_checking, but switch on Cartesian reorder flag so that the + # final data is returned in the correct order (not in time + # reversal mode). + # Again, as time reversal is not yet implemented, time_rev is always None here, so this is just setting reorder_data to True + flags.binary_sensor_mask = True + if flags.time_rev is None: + flags.reorder_data = True + + # check if any duplicate points have been discarded in the + # conversion from a Cartesian to binary mask + num_discarded_points = len(sensor_x) - sensor_mask.sum() + if num_discarded_points != 0: + print(f' WARNING: {num_discarded_points} duplicated sensor points discarded (nearest neighbour interpolation)') + + return flags + + +def get_num_of_sensor_points(is_blank_sensor, is_binary_sensor_mask, kgrid_k, sensor_mask_index, sensor_x): + """ + Returns the number of sensor points for a given set of sensor parameters. + + Args: + is_blank_sensor (bool): Whether the sensor is blank or not. + is_binary_sensor_mask (bool): Whether the sensor mask is binary or not. + kgrid_k (ndarray): An array of k-values for the k-Wave grid. + sensor_mask_index (list): List of sensor mask indices. + sensor_x (list): List of sensor x-coordinates. + + Returns: + int: The number of sensor points. + """ + if is_blank_sensor: + num_sensor_points = kgrid_k.size + elif is_binary_sensor_mask: + num_sensor_points = len(sensor_mask_index) + else: + num_sensor_points = len(sensor_x) + return num_sensor_points + + +def get_num_recorded_time_points(kgrid_dim, Nt, stream_to_disk, record_start_index): + """ + calculate the number of time points that are stored + - if streaming data to disk, reduce to the size of the + sensor_data matrix based on the value of self.options.stream_to_disk + - if a user input for sensor.record_start_index is given, reduce + the size of the sensor_data matrix based on the value given + Args: + kgrid_dim: + Nt: + stream_to_disk: + record_start_index: + + Returns: + + """ + if kgrid_dim == 3 and stream_to_disk: + + # set the number of points + num_recorded_time_points = stream_to_disk + + # initialise the file index variable + stream_data_index = 1 + + else: + num_recorded_time_points = Nt - record_start_index + 1 + stream_data_index = None # ??? + + return num_recorded_time_points, stream_data_index + + +def create_shift_operators(record: Recorder, record_old: Recorder, kgrid: kWaveGrid, is_use_sg: bool): + """ + create shift operators used for calculating the components of the + particle velocity field on the non-staggered grids (these are used + for both binary and cartesian sensor masks) + """ + + if (record_old.u_non_staggered or record_old.u_split_field or record_old.I or record_old.I_avg): + if is_use_sg: + if kgrid.dim == 1: + record.x_shift_neg = ifftshift(np.exp(-1j * kgrid.k_vec.x * kgrid.dx / 2)) + elif kgrid.dim == 2: + record.x_shift_neg = ifftshift(np.exp(-1j * kgrid.k_vec.x * kgrid.dx / 2)) + record.y_shift_neg = ifftshift(np.exp(-1j * kgrid.k_vec.y * kgrid.dy / 2)).T + elif kgrid.dim == 3: + record.x_shift_neg = ifftshift(np.exp(-1j * kgrid.k_vec.x * kgrid.dx / 2)) + record.y_shift_neg = ifftshift(np.exp(-1j * kgrid.k_vec.y * kgrid.dy / 2)) + record.z_shift_neg = ifftshift(np.exp(-1j * kgrid.k_vec.z * kgrid.dz / 2)) + + record.x_shift_neg = np.expand_dims(record.x_shift_neg, axis=-1) + + record.y_shift_neg = np.expand_dims(record.y_shift_neg, axis=0) + + record.z_shift_neg = np.squeeze(record.z_shift_neg) + record.z_shift_neg = np.expand_dims(record.z_shift_neg, axis=0) + record.z_shift_neg = np.expand_dims(record.z_shift_neg, axis=0) + + else: + if kgrid.dim == 1: + record.x_shift_neg = 1 + elif kgrid.dim == 2: + record.x_shift_neg = 1 + record.y_shift_neg = 1 + elif kgrid.dim == 3: + record.x_shift_neg = 1 + record.y_shift_neg = 1 + record.z_shift_neg = 1 + return record + + +def create_normalized_wavenumber_vectors(record: Recorder, kgrid: kWaveGrid, is_record_u_split_field): + """ + create normalised wavenumber vectors for k-space dyadics used to + split the particule velocity into compressional and shear components + """ + if not is_record_u_split_field: + return record + + # x-dimension + record.kx_norm = kgrid.kx / kgrid.k + record.kx_norm[kgrid.k == 0] = 0 + record.kx_norm = ifftshift(record.kx_norm) + + # y-dimension + if kgrid.dim >= 2: + record.ky_norm = kgrid.ky / kgrid.k + record.ky_norm[kgrid.k == 0] = 0 + record.ky_norm = ifftshift(record.ky_norm) + + # z-dimension + if kgrid.dim == 3: + record.kz_norm = kgrid.kz / kgrid.k + record.kz_norm[kgrid.k == 0] = 0 + record.kz_norm = ifftshift(record.kz_norm) + + return record + + +def create_sensor_variables(record_old: Recorder, kgrid, num_sensor_points, num_recorded_time_points, + all_vars_size, sensor_mask_index, use_cuboid_corners) -> Union[dotdict, List[dotdict]]: + """ + create storage and scaling variables - all variables are saved as fields of + a container called sensor_data. If cuboid corners are used this is a list, else a dictionary-like container + """ + + if use_cuboid_corners: + + # as a list + sensor_data = [] + + # get number of doctdicts in the list for each set of cuboid corners + n_cuboids: int = np.shape(record_old.cuboid_corners_list)[1] + + # for each set of cuboid corners + for cuboid_index in np.arange(n_cuboids, dtype=int): + + # add an entry to the list + sensor_data.append(dotdict()) + + # get size of cuboid for indexing regions of computational grid + if kgrid.dim == 1: + cuboid_size_x = [record_old.cuboid_corners_list[1, cuboid_index] - record_old.cuboid_corners_list[0, cuboid_index] + 1, 1] + elif kgrid.dim == 2: + cuboid_size_x = [record_old.cuboid_corners_list[2, cuboid_index] - record_old.cuboid_corners_list[0, cuboid_index] + 1, + record_old.cuboid_corners_list[3, cuboid_index] - record_old.cuboid_corners_list[1, cuboid_index] + 1] + elif kgrid.dim == 3: + cuboid_size_x = [record_old.cuboid_corners_list[3, cuboid_index] - record_old.cuboid_corners_list[0, cuboid_index] + 1, + record_old.cuboid_corners_list[4, cuboid_index] - record_old.cuboid_corners_list[1, cuboid_index] + 1, + record_old.cuboid_corners_list[5, cuboid_index] - record_old.cuboid_corners_list[2, cuboid_index] + 1] + + cuboid_size_xt = deepcopy(cuboid_size_x) + cuboid_size_xt.append(num_recorded_time_points) + + # time history of the acoustic pressure + if record_old.p or record_old.I or record_old.I_avg: + sensor_data[cuboid_index].p = np.zeros(cuboid_size_xt) + + # maximum pressure + if record_old.p_max: + sensor_data[cuboid_index].p_max = np.zeros(cuboid_size_x) + + # minimum pressure + if record_old.p_min: + sensor_data[cuboid_index].p_min = np.zeros(cuboid_size_x) + + # rms pressure + if record_old.p_rms: + sensor_data[cuboid_index].p_rms = np.zeros(cuboid_size_x) + + # time history of the acoustic particle velocity + if record_old.u: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data[cuboid_index].ux = np.zeros(cuboid_size_xt) + elif kgrid.dim == 2: + sensor_data[cuboid_index].ux = np.zeros(cuboid_size_xt) + sensor_data[cuboid_index].uy = np.zeros(cuboid_size_xt) + elif kgrid.dim == 3: + sensor_data[cuboid_index].ux = np.zeros(cuboid_size_xt) + sensor_data[cuboid_index].uy = np.zeros(cuboid_size_xt) + sensor_data[cuboid_index].uz = np.zeros(cuboid_size_xt) + + # store the time history of the particle velocity on staggered grid + if record_old.u_non_staggered or record_old.I or record_old.I_avg: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data[cuboid_index].ux_non_staggered = np.zeros(cuboid_size_xt) + elif kgrid.dim == 2: + sensor_data[cuboid_index].ux_non_staggered = np.zeros(cuboid_size_xt) + sensor_data[cuboid_index].uy_non_staggered = np.zeros(cuboid_size_xt) + elif kgrid.dim == 3: + sensor_data[cuboid_index].ux_non_staggered = np.zeros(cuboid_size_xt) + sensor_data[cuboid_index].uy_non_staggered = np.zeros(cuboid_size_xt) + sensor_data[cuboid_index].uz_non_staggered = np.zeros(cuboid_size_xt) + + # time history of the acoustic particle velocity split into compressional and shear components + if record_old.u_split_field: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 2: + sensor_data[cuboid_index].ux_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data[cuboid_index].ux_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data[cuboid_index].uy_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data[cuboid_index].uy_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + if kgrid.dim == 3: + sensor_data[cuboid_index].ux_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data[cuboid_index].ux_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data[cuboid_index].uy_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data[cuboid_index].uy_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data[cuboid_index].uz_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data[cuboid_index].uz_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + + else: + + # allocate empty sensor + sensor_data = dotdict() + + # if only p is being stored (i.e., if no user input is given for + # sensor.record), then sensor_data.p is copied to sensor_data at the + # end of the simulation + + # time history of the acoustic pressure + if record_old.p or record_old.I or record_old.I_avg: + sensor_data.p = np.zeros([num_sensor_points, num_recorded_time_points]) + + # maximum pressure + if record_old.p_max: + sensor_data.p_max = np.zeros([num_sensor_points,]) + + # minimum pressure + if record_old.p_min: + sensor_data.p_min = np.zeros([num_sensor_points,]) + + # rms pressure + if record_old.p_rms: + sensor_data.p_rms = np.zeros([num_sensor_points,]) + + # maximum pressure over all grid points + if record_old.p_max_all: + sensor_data.p_max_all = np.zeros(all_vars_size) + + # minimum pressure over all grid points + if record_old.p_min_all: + sensor_data.p_min_all = np.zeros(all_vars_size) + + # time history of the acoustic particle velocity + if record_old.u: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data.ux = np.zeros([num_sensor_points, num_recorded_time_points]) + elif kgrid.dim == 2: + sensor_data.ux = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uy = np.zeros([num_sensor_points, num_recorded_time_points]) + elif kgrid.dim == 3: + sensor_data.ux = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uy = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uz = np.zeros([num_sensor_points, num_recorded_time_points]) + + # maximum particle velocity + if record_old.u_max: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data.ux_max = np.zeros([num_sensor_points,]) + if kgrid.dim == 2: + sensor_data.ux_max = np.zeros([num_sensor_points,]) + sensor_data.uy_max = np.zeros([num_sensor_points,]) + if kgrid.dim == 3: + sensor_data.ux_max = np.zeros([num_sensor_points,]) + sensor_data.uy_max = np.zeros([num_sensor_points,]) + sensor_data.uz_max = np.zeros([num_sensor_points,]) + + # minimum particle velocity + if record_old.u_min: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data.ux_min = np.zeros([num_sensor_points,]) + if kgrid.dim == 2: + sensor_data.ux_min = np.zeros([num_sensor_points,]) + sensor_data.uy_min = np.zeros([num_sensor_points,]) + if kgrid.dim == 3: + sensor_data.ux_min = np.zeros([num_sensor_points,]) + sensor_data.uy_min = np.zeros([num_sensor_points,]) + sensor_data.uz_min = np.zeros([num_sensor_points,]) + + # rms particle velocity + if record_old.u_rms: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data.ux_rms = np.zeros([num_sensor_points,]) + if kgrid.dim == 2: + sensor_data.ux_rms = np.zeros([num_sensor_points,]) + sensor_data.uy_rms = np.zeros([num_sensor_points,]) + if kgrid.dim == 3: + sensor_data.ux_rms = np.zeros([num_sensor_points,]) + sensor_data.uy_rms = np.zeros([num_sensor_points,]) + sensor_data.uz_rms = np.zeros([num_sensor_points,]) + + # maximum particle velocity over all grid points + if record_old.u_max_all: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data.ux_max_all = np.zeros(all_vars_size) + if kgrid.dim == 2: + sensor_data.ux_max_all = np.zeros(all_vars_size) + sensor_data.uy_max_all = np.zeros(all_vars_size) + if kgrid.dim == 3: + sensor_data.ux_max_all = np.zeros(all_vars_size) + sensor_data.uy_max_all = np.zeros(all_vars_size) + sensor_data.uz_max_all = np.zeros(all_vars_size) + + # minimum particle velocity over all grid points + if record_old.u_min_all: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data.ux_min_all = np.zeros(all_vars_size) + if kgrid.dim == 2: + sensor_data.ux_min_all = np.zeros(all_vars_size) + sensor_data.uy_min_all = np.zeros(all_vars_size) + if kgrid.dim == 3: + sensor_data.ux_min_all = np.zeros(all_vars_size) + sensor_data.uy_min_all = np.zeros(all_vars_size) + sensor_data.uz_min_all = np.zeros(all_vars_size) + + # time history of the acoustic particle velocity on the non-staggered grid points + if record_old.u_non_staggered or record_old.I or record_old.I_avg: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data.ux_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points]) + if kgrid.dim == 2: + sensor_data.ux_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uy_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points]) + if kgrid.dim == 3: + sensor_data.ux_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uy_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uz_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points]) + + # time history of the acoustic particle velocity split into compressional and shear components + if record_old.u_split_field: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 2: + sensor_data.ux_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.ux_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uy_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uy_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + if kgrid.dim == 3: + sensor_data.ux_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.ux_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uy_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uy_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uz_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uz_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + + # if use_cuboid_corners: + # info = "using cuboid_corners (create storage variables)," + str(len(sensor_data)) + ", " + str(np.shape(sensor_data[0].p)) + # else: + # info = "binary_mask (create storage variables), ", np.shape(sensor_data.p) + + return sensor_data + + +def create_transducer_buffer(is_transducer_sensor, is_transducer_receive_elevation_focus, sensor, + num_sensor_points, num_recorded_time_points, sensor_data_buffer_size, + flags, sensor_data): + # object of the kWaveTransducer class is being used as a sensor + + if is_transducer_sensor: + if is_transducer_receive_elevation_focus: + + # if there is elevation focusing, a buffer is + # needed to store a short time history at each + # sensor point before averaging + # ??? + sensor_data_buffer_size = sensor.elevation_beamforming_delays.max() + 1 + if sensor_data_buffer_size > 1: + sensor_data_buffer = np.zeros([num_sensor_points, sensor_data_buffer_size]) # noqa: F841 + else: + del sensor_data_buffer_size + flags.transducer_receive_elevation_focus = False + + # the grid points can be summed on the fly and so the + # sensor is the size of the number of active elements + sensor_data.transducer = np.zeros([int(sensor.number_active_elements), num_recorded_time_points]) + else: + pass + + +def compute_triangulation_points(flags, kgrid, record, mask): + """ + precomputate the triangulation points if a Cartesian sensor mask is used + with linear interpolation (tri and bc are the Delaunay TRIangulation and + Barycentric Coordinates) + """ + + if not flags.binary_sensor_mask: + + if kgrid.dim == 1: + + # assign pseudonym for Cartesain grid points in 1D (this is later used for data casting) + record.grid_x = kgrid.x_vec + + else: + + if kgrid.dim == 1: + # align sensor data as a column vector to be the same as kgrid.x_vec + # so that calls to interp return data in the correct dimension + sensor_x = np.reshape((mask, (-1, 1))) + elif kgrid.dim == 2: + sensor_x = mask[0, :] + sensor_y = mask[1, :] + elif kgrid.dim == 3: + sensor_x = mask[0, :] + sensor_y = mask[1, :] + sensor_z = mask[2, :] + + # update command line status + print(' calculating Delaunay triangulation...') + + # compute triangulation + if kgrid.dim == 2: + if flags.axisymmetric: + record.tri, record.bc = gridDataFast2D(kgrid.x, kgrid.y - kgrid.y_vec.min(), sensor_x, sensor_y) + else: + record.tri, record.bc = gridDataFast2D(kgrid.x, kgrid.y, sensor_x, sensor_y) + elif kgrid.dim == 3: + record.tri, record.bc = gridDataFast3D(kgrid.x, kgrid.y, kgrid.z, sensor_x, sensor_y, sensor_z) + + print("done") + + return record + + +def calculate_all_vars_size(kgrid, is_pml_inside, pml_size): + """ + calculate the size of the _all and _final output variables - if the + PML is set to be outside the grid, these will be the same size as the + user input, rather than the expanded grid + """ + if is_pml_inside: + all_vars_size = kgrid.k.shape + else: + if kgrid.dim == 1: + all_vars_size = [kgrid.Nx - 2 * pml_size.x, 1] + elif kgrid.dim == 2: + all_vars_size = [kgrid.Nx - 2 * pml_size.x, kgrid.Ny - 2 * pml_size.y] + elif kgrid.dim == 3: + all_vars_size = [kgrid.Nx - 2 * pml_size.x, kgrid.Ny - 2 * pml_size.y, kgrid.Nz - 2 * pml_size.z] + else: + raise NotImplementedError + return all_vars_size \ No newline at end of file diff --git a/kwave/kWaveSimulation_helper/extract_sensor_data.py b/kwave/kWaveSimulation_helper/extract_sensor_data.py new file mode 100644 index 00000000..37733b62 --- /dev/null +++ b/kwave/kWaveSimulation_helper/extract_sensor_data.py @@ -0,0 +1,715 @@ +import numpy as np + +try: + import cupy as cp +except ImportError: + cp = None + +def extract_sensor_data(dim: int, xp, sensor_data, file_index, sensor_mask_index, + flags, record, p, ux_sgx, uy_sgy=None, uz_sgz=None): + """ + extract_sensor_data Sample field variables at the sensor locations. + """ + + # ========================================================================= + # GRID STAGGERING + # ========================================================================= + + # shift the components of the velocity field onto the non-staggered + # grid if required for output + if (flags.record_u_non_staggered or flags.record_I or flags.record_I_avg): + if (dim == 1): + ux_shifted = np.real(np.fft.ifft(record.x_shift_neg * np.fft.fft(ux_sgx, axis=0), axis=0)) + elif (dim == 2): + ux_shifted = np.real(np.fft.ifft(record.x_shift_neg * np.fft.fft(ux_sgx, axis=0), axis=0)) + uy_shifted = np.real(np.fft.ifft(record.y_shift_neg * np.fft.fft(uy_sgy, axis=1), axis=1)) + elif (dim == 3): + ux_shifted = np.real(np.fft.ifft(record.x_shift_neg * np.fft.fft(ux_sgx, axis=0), axis=0)) + uy_shifted = np.real(np.fft.ifft(record.y_shift_neg * np.fft.fft(uy_sgy, axis=1), axis=1)) + uz_shifted = np.real(np.fft.ifft(record.z_shift_neg * np.fft.fft(uz_sgz, axis=2), axis=2)) + else: + raise RuntimeError("Wrong dimensions") + + # ========================================================================= + # BINARY SENSOR MASK + # ========================================================================= + + if flags.binary_sensor_mask and not flags.use_cuboid_corners: + + # store the time history of the acoustic pressure + if (flags.record_p or flags.record_I or flags.record_I_avg): + if not flags.compute_directivity: + sensor_data.p[:, file_index] = np.squeeze(p[np.unravel_index(sensor_mask_index, np.shape(p), order='F')]) + else: + raise NotImplementedError('directivity not used at the moment') + + # store the maximum acoustic pressure + if flags.record_p_max: + if file_index == 0: + sensor_data.p_max = p[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(p), order='F')] + else: + sensor_data.p_max = np.maximum(sensor_data.p_max, + p[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(p), order='F')]) + + # store the minimum acoustic pressure + if flags.record_p_min: + if file_index == 0: + sensor_data.p_min = p[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(p), order='F')] + else: + sensor_data.p_min = np.minimum(sensor_data.p_min, + p[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(p), order='F')]) + + # store the rms acoustic pressure + if flags.record_p_rms: + if file_index == 0: + sensor_data.p_rms = p[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(p), order='F')]**2 + else: + sensor_data.p_rms = np.sqrt((sensor_data.p_rms**2 * file_index + + p[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(p), order='F')]**2) / (file_index + 1) ) + + # store the time history of the particle velocity on the staggered grid + if flags.record_u: + if (dim == 1): + sensor_data.ux[:, file_index] = ux_sgx[sensor_mask_index] + elif (dim == 2): + sensor_data.ux[:, file_index] = ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')] + sensor_data.uy[:, file_index] = uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')] + elif (dim == 3): + sensor_data.ux[:, file_index] = ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(ux_sgx), order='F')] + sensor_data.uy[:, file_index] = uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(uy_sgy), order='F')] + sensor_data.uz[:, file_index] = uz_sgz[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(uz_sgz), order='F')] + else: + raise RuntimeError("Wrong dimensions") + + # store the time history of the particle velocity + if flags.record_u_non_staggered or flags.record_I or flags.record_I_avg: + if (dim == 1): + sensor_data.ux_non_staggered[:, file_index] = ux_shifted[sensor_mask_index] + elif (dim == 2): + sensor_data.ux_non_staggered[:, file_index] = ux_shifted[np.unravel_index(np.squeeze(sensor_mask_index), ux_shifted.shape, order='F')] + sensor_data.uy_non_staggered[:, file_index] = uy_shifted[np.unravel_index(np.squeeze(sensor_mask_index), uy_shifted.shape, order='F')] + elif (dim == 3): + sensor_data.ux_non_staggered[:, file_index] = ux_shifted[np.unravel_index(np.squeeze(sensor_mask_index), ux_shifted.shape, order='F')] + sensor_data.uy_non_staggered[:, file_index] = uy_shifted[np.unravel_index(np.squeeze(sensor_mask_index), uy_shifted.shape, order='F')] + sensor_data.uz_non_staggered[:, file_index] = uz_shifted[np.unravel_index(np.squeeze(sensor_mask_index), uz_shifted.shape, order='F')] + else: + raise RuntimeError("Wrong dimensions") + + # store the split components of the particle velocity + if flags.record_u_split_field: + if (dim == 2): + + # compute forward FFTs + ux_k = record.x_shift_neg * np.fft.fftn(ux_sgx) + uy_k = record.y_shift_neg * np.fft.fftn(uy_sgy) + + # ux compressional + split_field = np.real(np.fft.ifftn(record.kx_norm**2 * ux_k + record.kx_norm * record.ky_norm * uy_k)) + sensor_data.ux_split_p[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # ux shear + split_field = np.real(np.fft.ifftn((1.0 - record.kx_norm**2) * ux_k - record.kx_norm * record.ky_norm * uy_k)) + sensor_data.ux_split_s[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # uy compressional + split_field = np.real(np.fft.ifftn(record.ky_norm * record.kx_norm * ux_k + record.ky_norm **2 * uy_k)) + sensor_data.uy_split_p[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # uy shear + split_field = np.real(np.fft.ifftn(-record.ky_norm * record.kx_norm * ux_k + (1.0 - record.ky_norm**2) * uy_k)) + sensor_data.uy_split_s[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + elif (dim == 3): + + # compute forward FFTs + ux_k = np.multiply(record.x_shift_neg, np.fft.fftn(ux_sgx), order='F') + uy_k = np.multiply(record.y_shift_neg, np.fft.fftn(uy_sgy), order='F') + uz_k = np.multiply(record.z_shift_neg, np.fft.fftn(uz_sgz), order='F') + + # ux compressional + split_field = np.real(np.fft.ifftn(record.kx_norm**2 * ux_k + + record.kx_norm * record.ky_norm * uy_k + + record.kx_norm * record.kz_norm * uz_k)) + sensor_data.ux_split_p[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # ux shear + split_field = np.real(np.fft.ifftn((1.0 - record.kx_norm**2) * ux_k - + record.kx_norm * record.ky_norm * uy_k - + record.kx_norm * record.kz_norm * uz_k)) + sensor_data.ux_split_s[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # uy compressional + split_field = np.real(np.fft.ifftn(record.ky_norm * record.kx_norm * ux_k + + record.ky_norm**2 * uy_k + + record.ky_norm * record.kz_norm * uz_k)) + sensor_data.uy_split_p[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # uy shear + split_field = np.real(np.fft.ifftn(- record.ky_norm * record.kx_norm * ux_k + + (1.0 - record.ky_norm**2) * uy_k - + record.ky_norm * record.kz_norm * uz_k)) + sensor_data.uy_split_s[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # uz compressional + split_field = np.real(np.fft.ifftn(record.kz_norm * record.kx_norm * ux_k + + record.kz_norm * record.ky_norm * uy_k + + record.kz_norm**2 * uz_k)) + sensor_data.uz_split_p[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # uz shear + split_field = np.real(np.fft.ifftn( -record.kz_norm * record.kx_norm * ux_k - + record.kz_norm * record.ky_norm * uy_k + + (1.0 - record.kz_norm**2) * uz_k)) + sensor_data.uz_split_s[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + else: + raise RuntimeError("Wrong dimensions") + + # store the maximum particle velocity + if flags.record_u_max: + if file_index == 0: + if (dim == 1): + sensor_data.ux_max = ux_sgx[sensor_mask_index] + elif (dim == 2): + sensor_data.ux_max = ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')] + sensor_data.uy_max = uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')] + elif (dim == 3): + sensor_data.ux_max = ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')] + sensor_data.uy_max = uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')] + sensor_data.uz_max = uz_sgz[np.unravel_index(np.squeeze(sensor_mask_index), uz_sgz.shape, order='F')] + else: + raise RuntimeError("Wrong dimensions") + else: + if (dim == 1): + sensor_data.ux_max = np.maximum(sensor_data.ux_max, ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')]) + elif (dim == 2): + sensor_data.ux_max = np.maximum(sensor_data.ux_max, ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')]) + sensor_data.uy_max = np.maximum(sensor_data.uy_max, uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')]) + elif (dim == 3): + sensor_data.ux_max = np.maximum(sensor_data.ux_max, ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')]) + sensor_data.uy_max = np.maximum(sensor_data.uy_max, uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')]) + sensor_data.uz_max = np.maximum(sensor_data.uz_max, uz_sgz[np.unravel_index(np.squeeze(sensor_mask_index), uz_sgz.shape, order='F')]) + else: + raise RuntimeError("Wrong dimensions") + + # store the minimum particle velocity + if flags.record_u_min: + if file_index == 0: + if (dim == 1): + sensor_data.ux_min = ux_sgx[sensor_mask_index] + elif (dim == 2): + sensor_data.ux_min = ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')] + sensor_data.uy_min = uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')] + elif (dim == 3): + sensor_data.ux_min = ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')] + sensor_data.uy_min = uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')] + sensor_data.uz_min = uz_sgz[np.unravel_index(np.squeeze(sensor_mask_index), uz_sgz.shape, order='F')] + else: + raise RuntimeError("Wrong dimensions") + else: + if (dim == 1): + sensor_data.ux_min = np.minimum(sensor_data.ux_min, ux_sgx[sensor_mask_index]) + elif (dim == 2): + sensor_data.ux_min = np.minimum(sensor_data.ux_min, ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')]) + sensor_data.uy_min = np.minimum(sensor_data.uy_min, uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')]) + elif (dim == 3): + sensor_data.ux_min = np.minimum(sensor_data.ux_min, ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')]) + sensor_data.uy_min = np.minimum(sensor_data.uy_min, uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')]) + sensor_data.uz_min = np.minimum(sensor_data.uz_min, uz_sgz[np.unravel_index(np.squeeze(sensor_mask_index), uz_sgz.shape, order='F')]) + else: + raise RuntimeError("Wrong dimensions") + + # store the rms particle velocity + if flags.record_u_rms: + if (dim == 1): + sensor_data.ux_rms = np.sqrt((sensor_data.ux_rms**2 * (file_index - 0) + + ux_sgx[sensor_mask_index]**2) / (file_index +1)) + elif (dim == 2): + sensor_data.ux_rms = np.sqrt((sensor_data.ux_rms**2 * (file_index - 0) + + ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')]**2) / (file_index +1)) + sensor_data.uy_rms = np.sqrt((sensor_data.uy_rms**2 * (file_index - 0) + + uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')]**2) / (file_index +1)) + elif (dim == 3): + sensor_data.ux_rms = np.sqrt((sensor_data.ux_rms**2 * (file_index - 0) + + ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')]**2) / (file_index +1)) + sensor_data.uy_rms = np.sqrt((sensor_data.uy_rms**2 * (file_index - 0) + + uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')]**2) / (file_index +1)) + sensor_data.uz_rms = np.sqrt((sensor_data.uz_rms**2 * (file_index - 0) + + uz_sgz[np.unravel_index(np.squeeze(sensor_mask_index), uz_sgz.shape, order='F')]**2) / (file_index +1)) + + + # ========================================================================= + # CARTESIAN SENSOR MASK + # ========================================================================= + + elif flags.use_cuboid_corners: + + n_cuboids: int = np.shape(record.cuboid_corners_list)[1] + + # s_start: int = 0 + + # for each cuboid + for cuboid_index in np.arange(n_cuboids): + + # get size of cuboid for indexing regions of computational grid + if dim == 1: + cuboid_size_x = [record.cuboid_corners_list[1, cuboid_index] - record.cuboid_corners_list[0, cuboid_index], 1] + elif dim == 2: + cuboid_size_x = [record.cuboid_corners_list[2, cuboid_index] - record.cuboid_corners_list[0, cuboid_index], + record.cuboid_corners_list[3, cuboid_index] - record.cuboid_corners_list[1, cuboid_index]] + elif dim == 3: + cuboid_size_x = [record.cuboid_corners_list[3, cuboid_index] + 1 - record.cuboid_corners_list[0, cuboid_index], + record.cuboid_corners_list[4, cuboid_index] + 1 - record.cuboid_corners_list[1, cuboid_index], + record.cuboid_corners_list[5, cuboid_index] + 1 - record.cuboid_corners_list[2, cuboid_index]] + + # cuboid_num_points: int = np.prod(cuboid_size_x) + + # s_end: int = s_start + cuboid_num_points + + # sensor_mask_sub_index = sensor_mask_index[s_start:s_end] + + # s_start = s_end + + x_indices = np.arange(record.cuboid_corners_list[0, cuboid_index], record.cuboid_corners_list[3, cuboid_index]+1, dtype=int) + y_indices = np.arange(record.cuboid_corners_list[1, cuboid_index], record.cuboid_corners_list[4, cuboid_index]+1, dtype=int) + z_indices = np.arange(record.cuboid_corners_list[2, cuboid_index], record.cuboid_corners_list[5, cuboid_index]+1, dtype=int) + + # Create a meshgrid + xx, yy, zz = np.meshgrid(x_indices, y_indices, z_indices, indexing='ij') + + # Combine into a list of indices + cuboid_indices = np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T + + result = p[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + + # store the time history of the acoustic pressure + if (flags.record_p or flags.record_I or flags.record_I_avg): + if not flags.compute_directivity: + # Use the indices to index into p + sensor_data[cuboid_index].p[..., file_index] = result + else: + raise NotImplementedError('directivity not implemented at the moment') + + # store the maximum acoustic pressure + if flags.record_p_max: + if file_index == 0: + sensor_data[cuboid_index].p_max = result + else: + sensor_data[cuboid_index].p_max = np.maximum(sensor_data[cuboid_index].p_max, result) + + # store the minimum acoustic pressure + if flags.record_p_min: + if file_index == 0: + sensor_data[cuboid_index].p_min = result + else: + sensor_data[cuboid_index].p_min = np.minimum(sensor_data[cuboid_index].p_min, result) + + # store the rms acoustic pressure + if flags.record_p_rms: + if file_index == 0: + sensor_data[cuboid_index].p_rms = result**2 + else: + sensor_data[cuboid_index].p_rms = np.sqrt((sensor_data[cuboid_index].p_rms**2 * file_index + result**2) / (file_index + 1) ) + + # store the time history of the particle velocity on the staggered grid + if flags.record_u: + if (dim == 1): + sensor_data[cuboid_index].ux[..., file_index] = ux_sgx[cuboid_indices] + elif (dim == 2): + sensor_data[cuboid_index].ux[..., file_index] = ux_sgx[cuboid_indices] + sensor_data[cuboid_index].uy[..., file_index] = uy_sgy[cuboid_indices] + elif (dim == 3): + sensor_data[cuboid_index].ux[..., file_index] = ux_sgx[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + sensor_data[cuboid_index].uy[..., file_index] = uy_sgy[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + sensor_data[cuboid_index].uz[..., file_index] = uz_sgz[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + else: + raise RuntimeError("Wrong dimensions") + + # store the time history of the particle velocity + if flags.record_u_non_staggered or flags.record_I or flags.record_I_avg: + if (dim == 1): + sensor_data[cuboid_index].ux_non_staggered[..., file_index] = ux_shifted[cuboid_indices] + elif (dim == 2): + sensor_data[cuboid_index].ux_non_staggered[..., file_index] = ux_shifted[cuboid_indices] + sensor_data[cuboid_index].uy_non_staggered[..., file_index] = uy_shifted[cuboid_indices] + elif (dim == 3): + sensor_data[cuboid_index].ux_non_staggered[..., file_index] = ux_shifted[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + sensor_data[cuboid_index].uy_non_staggered[..., file_index] = uy_shifted[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + sensor_data[cuboid_index].uz_non_staggered[..., file_index] = uz_shifted[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + else: + raise RuntimeError("Wrong dimensions") + + # store the split components of the particle velocity + if flags.record_u_split_field: + if (dim == 2): + + # compute forward FFTs + ux_k = record.x_shift_neg * np.fft.fftn(ux_sgx) + uy_k = record.y_shift_neg * np.fft.fftn(uy_sgy) + + # ux compressional + split_field = np.real(np.fft.ifftn(record.kx_norm**2 * ux_k + record.kx_norm * record.ky_norm * uy_k)) + sensor_data.ux_split_p[..., file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # ux shear + split_field = np.real(np.fft.ifftn((1.0 - record.kx_norm**2) * ux_k - record.kx_norm * record.ky_norm * uy_k)) + sensor_data[cuboid_index].ux_split_s[..., file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # uy compressional + split_field = np.real(np.fft.ifftn(record.ky_norm * record.kx_norm * ux_k + record.ky_norm **2 * uy_k)) + sensor_data[cuboid_index].uy_split_p[..., file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # uy shear + split_field = np.real(np.fft.ifftn(-record.ky_norm * record.kx_norm * ux_k + (1.0 - record.ky_norm**2) * uy_k)) + sensor_data[cuboid_index].uy_split_s[..., file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + elif (dim == 3): + + # compute forward FFTs + ux_k = np.multiply(record.x_shift_neg, np.fft.fftn(ux_sgx), order='F') + uy_k = np.multiply(record.y_shift_neg, np.fft.fftn(uy_sgy), order='F') + uz_k = np.multiply(record.z_shift_neg, np.fft.fftn(uz_sgz), order='F') + + # ux compressional + split_field = np.real(np.fft.ifftn(record.kx_norm**2 * ux_k + + record.kx_norm * record.ky_norm * uy_k + + record.kx_norm * record.kz_norm * uz_k)) + sensor_data[cuboid_index].ux_split_p[..., file_index] = split_field[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + # ux shear + split_field = np.real(np.fft.ifftn((1.0 - record.kx_norm**2) * ux_k - + record.kx_norm * record.ky_norm * uy_k - + record.kx_norm * record.kz_norm * uz_k)) + sensor_data[cuboid_index].ux_split_s[..., file_index] = split_field[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + + # uy compressional + split_field = np.real(np.fft.ifftn(record.ky_norm * record.kx_norm * ux_k + + record.ky_norm**2 * uy_k + + record.ky_norm * record.kz_norm * uz_k)) + sensor_data[cuboid_index].uy_split_p[..., file_index] = split_field[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + + # uy shear + split_field = np.real(np.fft.ifftn(- record.ky_norm * record.kx_norm * ux_k + + (1.0 - record.ky_norm**2) * uy_k - + record.ky_norm * record.kz_norm * uz_k)) + sensor_data[cuboid_index].uy_split_s[..., file_index] = split_field[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + + # uz compressional + split_field = np.real(np.fft.ifftn(record.kz_norm * record.kx_norm * ux_k + + record.kz_norm * record.ky_norm * uy_k + + record.kz_norm**2 * uz_k)) + sensor_data[cuboid_index].uz_split_p[..., file_index] = split_field[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + + # uz shear + split_field = np.real(np.fft.ifftn( -record.kz_norm * record.kx_norm * ux_k - + record.kz_norm * record.ky_norm * uy_k + + (1.0 - record.kz_norm**2) * uz_k)) + sensor_data[cuboid_index].uz_split_s[..., file_index] = split_field[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + else: + raise RuntimeError("Wrong dimensions") + + + + + + + + + + # ========================================================================= + # CARTESIAN SENSOR MASK + # ========================================================================= + + # extract data from specified Cartesian coordinates using interpolation + # (record.tri and record.bc are the Delaunay triangulation and Barycentric coordinates + # returned by gridDataFast3D) + else: + + # store the time history of the acoustic pressure + if flags.record_p or flags.record_I or flags.record_I_avg: + if dim == 1: + sensor_data.p[:, file_index] = xp.interp(xp.squeeze(record.sensor_x), xp.squeeze(record.grid_x), xp.real(p)) + else: + sensor_data.p[:, file_index] = np.sum(p[record.tri] * record.bc, axis=1) + + # store the maximum acoustic pressure + if flags.record_p_max: + if dim == 1: + if file_index == 0: + sensor_data.p_max = np.interp(record.grid_x, p, record.sensor_x) + else: + sensor_data.p_max = np.maximum(sensor_data.p_max, np.interp(record.grid_x, p, record.sensor_x)) + else: + if file_index == 0: + sensor_data.p_max = np.sum(p[record.tri] * record.bc, axis=1) + else: + sensor_data.p_max = np.maximum(sensor_data.p_max, np.sum(p[record.tri] * record.bc, axis=1)) + + # store the minimum acoustic pressure + if flags.record_p_min: + if dim == 1: + if file_index == 0: + sensor_data.p_min = np.interp(record.grid_x, p, record.sensor_x) + else: + sensor_data.p_min = np.minimum(sensor_data.p_min, np.interp(record.grid_x, p, record.sensor_x)) + else: + if file_index == 0: + sensor_data.p_min = np.sum(p[record.tri] * record.bc, axis=1) + else: + sensor_data.p_min = np.minimum(sensor_data.p_min, np.sum(p[record.tri] * record.bc, axis=1)) + + # store the rms acoustic pressure + if flags.record_p_rms: + if dim == 1: + sensor_data.p_rms = np.sqrt((sensor_data.p_rms**2 * (file_index - 0) + (np.interp(record.grid_x, p, record.sensor_x))**2) / (file_index +1)) + else: + sensor_data.p_rms[:] = np.sqrt((sensor_data.p_rms[:]**2 * (file_index - 0) + + (np.sum(p[record.tri] * record.bc, axis=1))**2) / (file_index +1)) + + # store the time history of the particle velocity on the staggered grid + if flags.record_u: + if (dim == 1): + sensor_data.ux[:, file_index] = np.interp(record.grid_x, ux_sgx, record.sensor_x) + elif (dim == 2): + sensor_data.ux[:, file_index] = np.sum(ux_sgx[record.tri] * record.bc, axis=1) + sensor_data.uy[:, file_index] = np.sum(uy_sgy[record.tri] * record.bc, axis=1) + elif (dim == 3): + sensor_data.ux[:, file_index] = np.sum(ux_sgx[record.tri] * record.bc, axis=1) + sensor_data.uy[:, file_index] = np.sum(uy_sgy[record.tri] * record.bc, axis=1) + sensor_data.uz[:, file_index] = np.sum(uz_sgz[record.tri] * record.bc, axis=1) + else: + raise RuntimeError("Wrong dimensions") + + # store the time history of the particle velocity + if flags.record_u_non_staggered or flags.record_I or flags.record_I_avg: + if (dim == 1): + sensor_data.ux_non_staggered[:, file_index] = np.interp(record.grid_x, ux_shifted, record.sensor_x) + elif (dim == 2): + sensor_data.ux_non_staggered[:, file_index] = np.sum(ux_shifted[record.tri] * record.bc, axis=1) + sensor_data.uy_non_staggered[:, file_index] = np.sum(uy_shifted[record.tri] * record.bc, axis=1) + elif (dim == 3): + sensor_data.ux_non_staggered[:, file_index] = np.sum(ux_shifted[record.tri] * record.bc, axis=1) + sensor_data.uy_non_staggered[:, file_index] = np.sum(uy_shifted[record.tri] * record.bc, axis=1) + sensor_data.uz_non_staggered[:, file_index] = np.sum(uz_shifted[record.tri] * record.bc, axis=1) + else: + raise RuntimeError("Wrong dimensions") + + # store the maximum particle velocity + if flags.record_u_max: + if file_index == 0: + if (dim == 1): + sensor_data.ux_max = np.interp(record.grid_x, ux_sgx, record.sensor_x) + elif (dim == 2): + sensor_data.ux_max = np.sum(ux_sgx[record.tri] * record.bc, axis=1) + sensor_data.uy_max = np.sum(uy_sgy[record.tri] * record.bc, axis=1) + elif (dim == 3): + sensor_data.ux_max = np.sum(ux_sgx[record.tri] * record.bc, axis=1) + sensor_data.uy_max = np.sum(uy_sgy[record.tri] * record.bc, axis=1) + sensor_data.uz_max = np.sum(uz_sgz[record.tri] * record.bc, axis=1) + else: + raise RuntimeError("Wrong dimensions") + else: + if (dim == 1): + sensor_data.ux_max = np.maximum(sensor_data.ux_max, np.interp(record.grid_x, ux_sgx, record.sensor_x)) + elif (dim == 2): + sensor_data.ux_max = np.maximum(sensor_data.ux_max, np.sum(ux_sgx[record.tri] * record.bc, axis=1)) + sensor_data.uy_max = np.maximum(sensor_data.uy_max, np.sum(uy_sgy[record.tri] * record.bc, axis=1)) + elif (dim == 3): + sensor_data.ux_max = np.maximum(sensor_data.ux_max, np.sum(ux_sgx[record.tri] * record.bc, axis=1)) + sensor_data.uy_max = np.maximum(sensor_data.uy_max, np.sum(uy_sgy[record.tri] * record.bc, axis=1)) + sensor_data.uz_max = np.maximum(sensor_data.uz_max, np.sum(uz_sgz[record.tri] * record.bc, axis=1)) + else: + raise RuntimeError("Wrong dimensions") + + # store the minimum particle velocity + if flags.record_u_min: + if file_index == 0: + if (dim == 1): + sensor_data.ux_min = np.interp(record.grid_x, ux_sgx, record.sensor_x) + elif (dim == 2): + sensor_data.ux_min = np.sum(ux_sgx[record.tri] * record.bc, axis=1) + sensor_data.uy_min = np.sum(uy_sgy[record.tri] * record.bc, axis=1) + elif (dim == 3): + sensor_data.ux_min = np.sum(ux_sgx[record.tri] * record.bc, axis=1) + sensor_data.uy_min = np.sum(uy_sgy[record.tri] * record.bc, axis=1) + sensor_data.uz_min = np.sum(uz_sgz[record.tri] * record.bc, axis=1) + else: + raise RuntimeError("Wrong dimensions") + + else: + if (dim == 1): + sensor_data.ux_min = np.minimum(sensor_data.ux_min, np.interp(record.grid_x, ux_sgx, record.sensor_x)) + elif (dim == 2): + sensor_data.ux_min = np.minimum(sensor_data.ux_min, np.sum(ux_sgx[record.tri] * record.bc, axis=1)) + sensor_data.uy_min = np.minimum(sensor_data.uy_min, np.sum(uy_sgy[record.tri] * record.bc, axis=1)) + elif (dim == 3): + sensor_data.ux_min = np.minimum(sensor_data.ux_min, np.sum(ux_sgx[record.tri] * record.bc, axis=1)) + sensor_data.uy_min = np.minimum(sensor_data.uy_min, np.sum(uy_sgy[record.tri] * record.bc, axis=1)) + sensor_data.uz_min = np.minimum(sensor_data.uz_min, np.sum(uz_sgz[record.tri] * record.bc, axis=1)) + else: + raise RuntimeError("Wrong dimensions") + + # store the rms particle velocity + if flags.record_u_rms: + if (dim == 1): + sensor_data.ux_rms = np.sqrt((sensor_data.ux_rms**2 * (file_index - 0) + (np.interp(record.grid_x, ux_sgx, record.sensor_x))**2) / (file_index +1)) + elif (dim == 2): + sensor_data.ux_rms[:] = np.sqrt((sensor_data.ux_rms[:]**2 * (file_index - 0) + (np.sum(ux_sgx[record.tri] * record.bc, axis=1))**2) / (file_index +1)) + sensor_data.uy_rms[:] = np.sqrt((sensor_data.uy_rms[:]**2 * (file_index - 0) + (np.sum(uy_sgy[record.tri] * record.bc, axis=1))**2) / (file_index +1)) + elif (dim == 3): + sensor_data.ux_rms[:] = np.sqrt((sensor_data.ux_rms[:]**2 * (file_index - 0) + (np.sum(ux_sgx[record.tri] * record.bc, axis=1))**2) / (file_index +1)) + sensor_data.uy_rms[:] = np.sqrt((sensor_data.uy_rms[:]**2 * (file_index - 0) + (np.sum(uy_sgy[record.tri] * record.bc, axis=1))**2) / (file_index +1)) + sensor_data.uz_rms[:] = np.sqrt((sensor_data.uz_rms[:]**2 * (file_index - 0) + (np.sum(uz_sgz[record.tri] * record.bc, axis=1))**2) / (file_index +1)) + else: + raise RuntimeError("Wrong dimensions") + + # ========================================================================= + # RECORDED VARIABLES OVER ENTIRE GRID + # ========================================================================= + + # store the maximum acoustic pressure over all the grid elements + if flags.record_p_max_all and (not flags.use_cuboid_corners): + if (dim == 1): + if file_index == 0: + sensor_data.p_max_all = p[record.x1_inside:record.x2_inside] + else: + sensor_data.p_max_all = np.maximum(sensor_data.p_max_all, p[record.x1_inside:record.x2_inside]) + + elif (dim == 2): + if file_index == 0: + sensor_data.p_max_all = p[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside] + else: + sensor_data.p_max_all = np.maximum(sensor_data.p_max_all, p[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside]) + + elif (dim == 3): + if file_index == 0: + sensor_data.p_max_all = p[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside] + else: + sensor_data.p_max_all = np.maximum(sensor_data.p_max_all, + p[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside]) + else: + raise RuntimeError("Wrong dimensions") + + # store the minimum acoustic pressure over all the grid elements + if flags.record_p_min_all and (not flags.use_cuboid_corners): + if (dim == 1): + if file_index == 0: + sensor_data.p_min_all = p[record.x1_inside:record.x2_inside] + else: + sensor_data.p_min_all = np.minimum(sensor_data.p_min_all, p[record.x1_inside:record.x2_inside]) + + elif (dim == 2): + if file_index == 0: + sensor_data.p_min_all = p[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside] + else: + sensor_data.p_min_all = np.minimum(sensor_data.p_min_all, p[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside]) + + elif (dim == 3): + if file_index == 0: + sensor_data.p_min_all = p[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside] + else: + sensor_data.p_min_all = np.minimum(sensor_data.p_min_all, + p[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside]) + else: + raise RuntimeError("Wrong dimensions") + + # store the maximum particle velocity over all the grid elements + if flags.record_u_max_all and (not flags.use_cuboid_corners): + if (dim == 1): + if file_index == 0: + sensor_data.ux_max_all = ux_sgx[record.x1_inside:record.x2_inside] + else: + sensor_data.ux_max_all = np.maximum(sensor_data.ux_max_all, ux_sgx[record.x1_inside:record.x2_inside]) + + elif (dim == 2): + if file_index == 0: + sensor_data.ux_max_all = ux_sgx[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside] + sensor_data.uy_max_all = uy_sgy[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside] + else: + sensor_data.ux_max_all = np.maximum(sensor_data.ux_max_all, + ux_sgx[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside]) + sensor_data.uy_max_all = np.maximum(sensor_data.uy_max_all, + uy_sgy[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside]) + + elif (dim == 3): + if file_index == 0: + sensor_data.ux_max_all = ux_sgx[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside] + sensor_data.uy_max_all = uy_sgy[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside] + sensor_data.uz_max_all = uz_sgz[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside] + else: + sensor_data.ux_max_all = np.maximum(sensor_data.ux_max_all, + ux_sgx[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside]) + sensor_data.uy_max_all = np.maximum(sensor_data.uy_max_all, + uy_sgy[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside]) + sensor_data.uz_max_all = np.maximum(sensor_data.uz_max_all, + uz_sgz[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside]) + + else: + raise RuntimeError("Wrong dimensions") + + # store the minimum particle velocity over all the grid elements + if flags.record_u_min_all and (not flags.use_cuboid_corners): + if (dim == 1): + if file_index == 0: + sensor_data.ux_min_all = ux_sgx[record.x1_inside:record.x2_inside] + else: + sensor_data.ux_min_all = np.minimum(sensor_data.ux_min_all, ux_sgx[record.x1_inside:record.x2_inside]) + + elif (dim == 2): + if file_index == 0: + sensor_data.ux_min_all = ux_sgx[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside] + sensor_data.uy_min_all = uy_sgy[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside] + else: + sensor_data.ux_min_all = np.minimum(sensor_data.ux_min_all, + ux_sgx[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside]) + sensor_data.uy_min_all = np.minimum(sensor_data.uy_min_all, + uy_sgy[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside]) + + elif (dim == 3): + if file_index == 0: + sensor_data.ux_min_all = ux_sgx[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside] + sensor_data.uy_min_all = uy_sgy[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside] + sensor_data.uz_min_all = uz_sgz[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside] + else: + sensor_data.ux_min_all = np.minimum(sensor_data.ux_min_all, + ux_sgx[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside]) + sensor_data.uy_min_all = np.minimum(sensor_data.uy_min_all, + uy_sgy[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside]) + sensor_data.uz_min_all = np.minimum(sensor_data.uz_min_all, + uz_sgz[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside]) + else: + raise RuntimeError("Wrong dimensions") + + return sensor_data \ No newline at end of file diff --git a/kwave/ksource.py b/kwave/ksource.py index 95fd958e..bd6014bf 100644 --- a/kwave/ksource.py +++ b/kwave/ksource.py @@ -76,7 +76,8 @@ def validate(self, kgrid: kWaveGrid) -> None: None """ if self.p0 is not None: - if self.p0.shape != kgrid.k.shape: + if self.p0.shape != np.squeeze(kgrid.k).shape: + # throw an error if p0 is not the correct size raise ValueError("source.p0 must be the same size as the computational grid.") diff --git a/kwave/kspaceFirstOrder1D.py b/kwave/kspaceFirstOrder1D.py new file mode 100644 index 00000000..b5a0106d --- /dev/null +++ b/kwave/kspaceFirstOrder1D.py @@ -0,0 +1,715 @@ +import warnings +from typing import Union + +try: + import cupy as cp +except ImportError: + cp = None + +import importlib + +import numpy as np +import scipy.fft +from scipy.interpolate import interp1d +from tqdm import tqdm + +from kwave.kgrid import kWaveGrid +from kwave.kmedium import kWaveMedium +from kwave.ksensor import kSensor +from kwave.ksource import kSource +from kwave.ktransducer import NotATransducer +from kwave.kWaveSimulation import kWaveSimulation +from kwave.kWaveSimulation_helper import extract_sensor_data +from kwave.options.simulation_execution_options import SimulationExecutionOptions +from kwave.options.simulation_options import SimulationOptions +from kwave.utils.data import scale_time +from kwave.utils.dotdictionary import dotdict +from kwave.utils.math import sinc +from kwave.utils.pml import get_pml +from kwave.utils.tictoc import TicToc + + +def get_array_module(verbose: bool = False): + """Get the appropriate array module (numpy or cupy) based on GPU + availability - if cupy is available and a GPU is present, cupy is used.""" + # check if cupy is installed + cupy_spec = importlib.util.find_spec("cupy") + if verbose: + print(cupy_spec) + + if cupy_spec is None: + # if cupy not installed, use numpy + return np, False + + # Try to import cupy and check for a GPU + try: + import cupy as cp + # this returns the number of visible GPUs + ngpus = cp.cuda.runtime.getDeviceCount() + if ngpus > 0: + return cp, True + except (ImportError, RuntimeError, AttributeError): + pass + + # Fallback (cupy but no gpu) to numpy + return np, False + + +def to_gpu(obj, verbose: bool = False): + """Convert all numpy arrays in the given object to cupy arrays.""" + if cp is None: + raise RuntimeError("CuPy is not available; cannot convert to GPU arrays.") + for name, val in vars(obj).items(): + if isinstance(val, np.ndarray): + if verbose: + print(f"Converting {name} to cupy array") + setattr(obj, name, cp.asarray(val)) + return obj + + +def dotdict_to_gpu(obj, verbose: bool = False): + """Convert all numpy arrays in the given dotdict to cupy arrays.""" + if cp is None: + raise RuntimeError("CuPy is not available; cannot convert to GPU arrays.") + for name, val in obj.items(): + if isinstance(val, np.ndarray): + if verbose: + print(f"Converting {name} to cupy array") + obj[name] = cp.asarray(val) + return obj + + +def dotdict_to_cpu(obj, verbose: bool = False): + """Convert all cupy arrays in the given dotdict to numpy arrays.""" + # case in which gpu is present, but cupy is not used + if cp is None: + return obj + for name, val in obj.items(): + if isinstance(val, cp.ndarray): + if verbose: + print(f"Converting {name} to numpy array") + obj[name] = val.get() + return obj + + +def kspace_first_order_1D(kgrid: kWaveGrid, + source: kSource, + sensor: Union[NotATransducer, kSensor], + medium: kWaveMedium, + simulation_options: SimulationOptions, + execution_options: SimulationExecutionOptions): + + """ + 1D time-domain simulation of wave propagation. + + DESCRIPTION: + kspaceFirstOrder1D simulates the time-domain propagation of + compressional waves through a one-dimensional homogeneous or + heterogeneous acoustic medium given four input structures: kgrid, + medium, source, and sensor. The computation is based on a first-order + k-space model which accounts for power law absorption and a + heterogeneous sound speed and density. If medium.BonA is specified, + cumulative nonlinear effects are also modelled. At each time-step + (defined by kgrid.dt and kgrid.Nt or kgrid.t_array), the acoustic + field parameters at the positions defined by sensor.mask are recorded + and stored. If kgrid.t_array is set to 'auto', this array is + automatically generated using the makeTime method of the kWaveGrid + class. An anisotropic absorbing boundary layer called a perfectly + matched layer (PML) is implemented to prevent waves that leave one + side of the domain being reintroduced from the opposite side (a + consequence of using the FFT to compute the spatial derivatives in + the wave equation). This allows infinite domain simulations to be + computed using small computational grids. + + For a homogeneous medium the formulation is exact and the time-steps + are only limited by the effectiveness of the perfectly matched layer. + For a heterogeneous medium, the solution represents a leap-frog + pseudospectral method with a k-space correction that improves the + accuracy of computing the temporal derivatives. This allows larger + time-steps to be taken for the same level of accuracy compared to + conventional pseudospectral time-domain methods. The computational + grids are staggered both spatially and temporally. + + An initial pressure distribution can be specified by assigning a + matrix (the same size as the computational grid) of arbitrary numeric + values to source.p0. A time varying pressure source can similarly be + specified by assigning a binary matrix (i.e., a matrix of 1's and 0's + with the same dimensions as the computational grid) to source.p_mask + where the 1's represent the grid points that form part of the source. + The time varying input signals are then assigned to source.p. This + can be a single time series (in which case it is applied to all + source elements), or a matrix of time series following the source + elements using MATLAB's standard column-wise linear matrix index + ordering. A time varying velocity source can be specified in an + analogous fashion, where the source location is specified by + source.u_mask, and the time varying input velocity is assigned to + source.ux. + + The field values are returned as arrays of time series at the sensor + locations defined by sensor.mask. This can be defined in three + different ways. (1) As a binary matrix (i.e., a matrix of 1's and 0's + with the same dimensions as the computational grid) representing the + grid points within the computational grid that will collect the data. + (2) As the grid coordinates of two opposing ends of a line in the + form [x1; x2]. This is equivalent to using a binary sensor mask + covering the same region, however, the output is indexed differently + as discussed below. (3) As a series of Cartesian coordinates within + the grid which specify the location of the pressure values stored at + each time step. If the Cartesian coordinates don't exactly match the + coordinates of a grid point, the output values are calculated via + interpolation. The Cartesian points must be given as a 1 by N matrix + corresponding to the x positions, where the Cartesian origin is + assumed to be in the center of the grid. If no output is required, + the sensor input can be replaced with an empty array []. + + If sensor.mask is given as a set of Cartesian coordinates, the + computed sensor_data is returned in the same order. If sensor.mask is + given as a binary matrix, sensor_data is returned using MATLAB's + standard column-wise linear matrix index ordering. In both cases, the + recorded data is indexed as sensor_data(sensor_point_index, + time_index). For a binary sensor mask, the field values at a + particular time can be restored to the sensor positions within the + computation grid using unmaskSensorData. If sensor.mask is given as a + list of opposing ends of a line, the recorded data is indexed as + sensor_data(line_index).p(x_index, time_index), where x_index + corresponds to the grid index within the line, and line_index + corresponds to the number of lines if more than one is specified. + + By default, the recorded acoustic pressure field is passed directly + to the output sensor_data. However, other acoustic parameters can + also be recorded by setting sensor.record to a cell array of the form + {'p', 'u', 'p_max', ...}. For example, both the particle velocity and + the acoustic pressure can be returned by setting sensor.record = + {'p', 'u'}. If sensor.record is given, the output sensor_data is + returned as a structure with the different outputs appended as + structure fields. For example, if sensor.record = {'p', 'p_final', + 'p_max', 'u'}, the output would contain fields sensor_data.p, + sensor_data.p_final, sensor_data.p_max, and sensor_data.ux. Most of + the output parameters are recorded at the given sensor positions and + are indexed as sensor_data.field(sensor_point_index, time_index) or + sensor_data(line_index).field(x_index, time_index) if using a sensor + mask defined as opposing ends of a line. The exceptions are the + averaged quantities ('p_max', 'p_rms', 'u_max', 'p_rms', 'I_avg'), + the 'all' quantities ('p_max_all', 'p_min_all', 'u_max_all', + 'u_min_all'), and the final quantities ('p_final', 'u_final'). The + averaged quantities are indexed as + sensor_data.p_max(sensor_point_index) or + sensor_data(line_index).p_max(x_index) if using line ends, while the + final and 'all' quantities are returned over the entire grid and are + always indexed as sensor_data.p_final(nx), regardless of the type of + sensor mask. + + kspaceFirstOrder1D may also be used for time reversal image + reconstruction by assigning the time varying pressure recorded over + an arbitrary sensor surface to the input field + sensor.time_reversal_boundary_data. This data is then enforced in + time reversed order as a time varying Dirichlet boundary condition + over the sensor surface given by sensor.mask. The boundary data must + be indexed as sensor.time_reversal_boundary_data(sensor_point_index, + time_index). If sensor.mask is given as a set of Cartesian + coordinates, the boundary data must be given in the same order. An + equivalent binary sensor mask (computed using nearest neighbour + interpolation) is then used to place the pressure values into the + computational grid at each time step. If sensor.mask is given as a + binary matrix of sensor points, the boundary data must be ordered + using MATLAB's standard column-wise linear matrix indexing. If no + additional inputs are required, the source input can be replaced with + an empty array []. + + Acoustic attenuation compensation can also be included during time + reversal image reconstruction by assigning the absorption parameters + medium.alpha_coeff and medium.alpha_power and reversing the sign of + the absorption term by setting medium.alpha_sign = [-1, 1]. This + forces the propagating waves to grow according to the absorption + parameters instead of decay. The reconstruction should then be + regularised by assigning a filter to medium.alpha_filter (this can be + created using getAlphaFilter). + + Note: To run a simple photoacoustic image reconstruction example + using time reversal (that commits the 'inverse crime' of using the + same numerical parameters and model for data simulation and image + reconstruction), the sensor_data returned from a k-Wave simulation + can be passed directly to sensor.time_reversal_boundary_data with the + input fields source.p0 and source.p removed or set to zero. + + USAGE: + sensor_data = kspaceFirstOrder1D(kgrid, medium, source, sensor) + sensor_data = kspaceFirstOrder1D(kgrid, medium, source, sensor, ...) + """ + + # ========================================================================= + # CHECK INPUT STRUCTURES AND OPTIONAL INPUTS + # ========================================================================= + + # start the timer and store the start time + timer = TicToc() + timer.tic() + + if execution_options.is_gpu_simulation: + xp, using_gpu = get_array_module() + if not using_gpu: + warnings.warn("GPU simulation requested but no GPU available. Falling back to CPU (NumPy).") + else: + print("Using cupy (GPU)") + else: + xp = np + using_gpu = False + + # run script to check inputs and create the required arrays + k_sim = kWaveSimulation(kgrid=kgrid, source=source, sensor=sensor, medium=medium, + simulation_options=simulation_options) + + # this will create the sensor_data dotdict + k_sim.input_checking("kspaceFirstOrder1D") + + # TODO: Move to input checking + # If the user passed a 1D time‐series, promote it to shape (1, Nt) + src = k_sim.source + if getattr(k_sim.source, "ux", False) is not False: + # only promote if it has fewer than 2 dims + if k_sim.source.ux.ndim < 2: + warnings.warn("A spatially and temporally varying velocity source was passed, but is the wrong shape.") + k_sim.source.ux = np.atleast_2d(k_sim.source.ux) + + if getattr(k_sim.source, "p", False) is not False: + if k_sim.source.p.ndim < 2: + warnings.warn("A spatially and temporally varying pressure source was passed, but is the wrong shape.") + k_sim.source.p = np.atleast_2d(k_sim.source.p) + + + # ========================================================================= + # DEFINE CAST TYPE + # ========================================================================= + + # aliases from kWaveSimulation class + sensor_data = k_sim.sensor_data + options = k_sim.options + record = k_sim.record + + # define the extract_options dictionary + if k_sim.use_sensor: + # run sub-function to extract the required data + extract_options = dotdict({'record_u_non_staggered': k_sim.record.u_non_staggered, + 'record_u_split_field': k_sim.record.u_split_field, + 'record_I': k_sim.record.I, + 'record_I_avg': k_sim.record.I_avg, + 'binary_sensor_mask': k_sim.binary_sensor_mask, + 'record_p': k_sim.record.p, + 'record_p_max': k_sim.record.p_max, + 'record_p_min': k_sim.record.p_min, + 'record_p_rms': k_sim.record.p_rms, + 'record_p_max_all': k_sim.record.p_max_all, + 'record_p_min_all': k_sim.record.p_min_all, + 'record_u': k_sim.record.u, + 'record_u_max': k_sim.record.u_max, + 'record_u_min': k_sim.record.u_min, + 'record_u_rms': k_sim.record.u_rms, + 'record_u_max_all': k_sim.record.u_max_all, + 'record_u_min_all': k_sim.record.u_min_all, + 'compute_directivity': False}) + + # ========================================================================= + # CALCULATE MEDIUM PROPERTIES ON STAGGERED GRID + # ========================================================================= + + # interpolate the values of the density at the staggered grid locations + # where sgx = (x + dx/2) + rho0 = np.squeeze(k_sim.rho0) + m_rho0: int = rho0.ndim + + if (m_rho0 > 0 and options.use_sg): + + points = np.squeeze(k_sim.kgrid.x_vec) + + # rho0 is heterogeneous and staggered grids are used + f = interp1d(points, rho0, bounds_error=False, fill_value=np.nan) + rho0_sgx = f(points + k_sim.kgrid.dx / 2.0) + + # set values outside of the interpolation range to original values + rho0_sgx[np.isnan(rho0_sgx)] = rho0[np.isnan(rho0_sgx)] + + else: + # rho0 is homogeneous or staggered grids are not used + rho0_sgx = k_sim.rho0 + + # invert rho0 so it doesn't have to be done each time step + rho0_sgx_inv = 1.0 / rho0_sgx + + # clear unused variables + del rho0_sgx + + # ========================================================================= + # PREPARE DERIVATIVE AND PML OPERATORS + # ========================================================================= + + # get the regular PML operators based on the reference sound speed and PML settings + Nx = k_sim.kgrid.Nx + dx = k_sim.kgrid.dx + dt = k_sim.kgrid.dt + Nt = k_sim.kgrid.Nt + + pml_x_alpha = options.pml_x_alpha + pml_x_size = options.pml_x_size + c_ref = k_sim.c_ref + + kx_vec = np.squeeze(k_sim.kgrid.k_vec[0]) + + c0 = np.asarray(medium.sound_speed) + + # get the PML operators based on the reference sound speed and PML settings + pml_x = get_pml(Nx, dx, dt, c_ref, pml_x_size, pml_x_alpha, False, 1) + pml_x_sgx = get_pml(Nx, dx, dt, c_ref, pml_x_size, pml_x_alpha, True, 1) + + pml_x = np.squeeze(pml_x) + pml_x_sgx = np.squeeze(pml_x_sgx) + + # define the k-space derivative operator + ddx_k = scipy.fft.ifftshift(1j * kx_vec) + + # define the staggered grid shift operators (the option options.use_sg exists for debugging) + if options.use_sg: + ddx_k_shift_pos = scipy.fft.ifftshift( np.exp( 1j * kx_vec * dx / 2.0)) + ddx_k_shift_neg = scipy.fft.ifftshift( np.exp(-1j * kx_vec * dx / 2.0)) + else: + ddx_k_shift_pos = 1.0 + ddx_k_shift_neg = 1.0 + + # create k-space operator (the option options.use_kspace exists for debugging) + if options.use_kspace: + kappa = scipy.fft.ifftshift(sinc(c_ref * kgrid.k * dt / 2.0)) + kappa = np.squeeze(kappa) + source_kappa = 1.0 + if (hasattr(k_sim, 'source_p') and getattr(k_sim.source, 'p_mode', None) == 'additive') or \ + (hasattr(k_sim, 'source_ux') and getattr(k_sim.source, 'u_mode', None) == 'additive'): + source_kappa = scipy.fft.ifftshift(np.cos (c_ref * kgrid.k * dt / 2.0)) + else: + kappa = 1.0 + source_kappa = 1.0 + + + # ========================================================================= + # CREATE INDEX VARIABLES + # ========================================================================= + + # setup the time index variable + index_start: int = 0 + index_step: int = 1 + index_end: int = Nt + + # These should be zero indexed + if hasattr(k_sim, 's_source_pos_index') and k_sim.s_source_pos_index is not None: + k_sim.s_source_pos_index = np.squeeze(k_sim.s_source_pos_index) - int(1) + + if hasattr(k_sim, 'u_source_pos_index') and k_sim.u_source_pos_index is not None: + k_sim.u_source_pos_index = np.squeeze(k_sim.u_source_pos_index) - int(1) + + if hasattr(k_sim, 'p_source_pos_index') and k_sim.p_source_pos_index is not None: + k_sim.p_source_pos_index = np.squeeze(k_sim.p_source_pos_index) - int(1) + + if hasattr(k_sim, 's_source_sig_index') and k_sim.s_source_sig_index is not None: + k_sim.s_source_sig_index = np.squeeze(k_sim.s_source_sig_index) - int(1) + + if hasattr(k_sim, 'u_source_sig_index') and k_sim.u_source_sig_index is not None: + k_sim.u_source_sig_index = np.squeeze(k_sim.u_source_sig_index) - int(1) + + if hasattr(k_sim, 'p_source_sig_index') and k_sim.p_source_sig_index is not None: + k_sim.p_source_sig_index = np.squeeze(k_sim.p_source_sig_index) - int(1) + + # make the sensor record index zero indexed + record_start_index = k_sim.sensor.record_start_index - int(1) + + # ========================================================================= + # PREPARE VISUALISATIONS + # ========================================================================= + + # # pre-compute suitable axes scaling factor + # if options.plot_layout or options.plot_sim + # [x_sc, scale, prefix] = scaleSI(max(kgrid.x)); ##ok + # end + + # # run subscript to plot the simulation layout if 'PlotLayout' is set to true + # if options.plot_layout + # kspaceFirstOrder_plotLayout; + # end + + # # initialise the figure used for animation if 'PlotSim' is set to 'true' + # if options.plot_sim + # kspaceFirstOrder_initialiseFigureWindow; + # end + + # # initialise movie parameters if 'RecordMovie' is set to 'true' + # if options.record_movie + # kspaceFirstOrder_initialiseMovieParameters; + # end + + # ========================================================================= + # DATA CASTING + # ========================================================================= + + # preallocate the loop variables using the castZeros anonymous function + # (this creates a matrix of zeros in the data type specified by data_cast) + if not (options.data_cast == 'off'): + my_type = xp.single + my_complex_type = xp.complex64 + else: + my_type = xp.double + my_complex_type = xp.complex128 + + grid_shape = (Nx, ) + kdim = xp.asarray(kgrid.dim) + + # preallocate the loop variables + p = xp.zeros(grid_shape, dtype=my_type) + rhox = xp.zeros(grid_shape, dtype=my_type) + ux_sgx = xp.zeros(grid_shape, dtype=my_type) + p_k = xp.zeros(grid_shape, dtype=my_type) + + c0 = xp.asarray(xp.squeeze(medium.sound_speed), dtype=my_type) + rho0 = xp.asarray(rho0, dtype=my_type) + if k_sim.medium.BonA is not None: + BonA = xp.asarray(xp.squeeze(k_sim.medium.BonA), dtype=my_type) + rho0_sgx_inv = xp.asarray(rho0_sgx_inv, dtype=my_type) + + if k_sim.equation_of_state == 'lossless': + pass + elif k_sim.equation_of_state == 'absorbing': + medium.absorb_tau = xp.asarray(xp.squeeze(k_sim.medium.absorb_tau), dtype=my_type) + medium.absorb_eta = xp.asarray(xp.squeeze(k_sim.medium.absorb_eta), dtype=my_type) + medium.absorb_nabla1 = xp.asarray(xp.squeeze(k_sim.medium.absorb_nabla1), dtype=my_type) + medium.absorb_nabla2 = xp.asarray(xp.squeeze(k_sim.medium.absorb_nabla2), dtype=my_type) + elif k_sim.equation_of_state == 'stokes': + medium.absorb_tau = xp.asarray(xp.squeeze(k_sim.medium.absorb_tau), dtype=my_type) + else: + raise ValueError("Unknown equation_of_state: " + str(k_sim.equation_of_state)) + + pml_x = xp.asarray(pml_x, dtype=my_complex_type) + pml_x_sgx = xp.asarray(pml_x_sgx, dtype=my_type) + + ddx_k_shift_pos = xp.asarray(ddx_k_shift_pos, dtype=my_complex_type) + ddx_k_shift_neg = xp.asarray(ddx_k_shift_neg, dtype=my_complex_type) + kappa = xp.asarray(kappa, dtype=my_complex_type) + ddx_k = xp.asarray(ddx_k, dtype=my_complex_type) + + dt = xp.asarray(dt, dtype=my_type) + dx = xp.asarray(dx, dtype=my_type) + + sensor_mask_index = xp.asarray(k_sim.sensor_mask_index, dtype=int) + + if k_sim.source.p0 is not None: + p0 = xp.asarray(k_sim.source.p0) + + if using_gpu: + record = to_gpu(record) + sensor_data = dotdict_to_gpu(sensor_data) + + if k_sim.source_ux is not False: + source_mat = xp.zeros((Nx,), dtype=my_type) + if k_sim.source_p is not False: + source_mat = xp.zeros((Nx,), dtype=my_type) + + if k_sim.nonuniform_grid and not options.use_finite_difference: + dxudxn = xp.asarray(k_sim.kgrid.dxudxn, dtype=my_type) + dxudxn_sgx = xp.asarray(k_sim.kgrid.dxudxn_sgx, dtype=my_type) + + # ========================================================================= + # LOOP THROUGH TIME STEPS + # ========================================================================= + + # update command line status + t0 = timer.toc() + t0_scale = scale_time(t0) + print('\tprecomputation completed in', t0_scale) + print('\tstarting time loop...') + + # start time loop + for t_index in tqdm(range(index_start, index_end, index_step)): + + # calculate ux at the next time step using dp/dx at the current time step + if not k_sim.nonuniform_grid and not options.use_finite_difference: + + # calculate gradient using the k-space method on a regular grid + ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - + dt * rho0_sgx_inv * xp.real(xp.fft.ifft(ddx_k * ddx_k_shift_pos * kappa * p_k))) + + elif options.use_finite_difference: + match options.use_finite_difference: + case 2: + # calculate gradient using second-order accurate finite + # difference scheme (including half step forward) + dpdx = (xp.append(p[1:], 0.0) - p) / dx + ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - dt * rho0_sgx_inv * dpdx ) + + case 4: + # calculate gradient using fourth-order accurate finite + # difference scheme (including half step forward) + dpdx = (xp.insert(p[:-1], 0, 0) - 27.0 * p + 27 * xp.append(p[1:], 0.0) - xp.append(p[2:], [0, 0])) / (24.0 * dx) + ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - dt * rho0_sgx_inv * dpdx ) + + else: + # calculate gradient using the k-space method on a non-uniform grid + # via the mapped pseudospectral method + ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - + dt * rho0_sgx_inv * dxudxn_sgx * xp.real(xp.fft.ifft(ddx_k * ddx_k_shift_pos * kappa * p_k)) ) + + + # add in the velocity source term + if (k_sim.source_ux is not False and t_index < k_sim.source.ux.shape[1]): + match k_sim.source.u_mode: + case 'dirichlet': + # enforce the source values as a dirichlet boundary condition + ux_sgx[k_sim.u_source_pos_index] = k_sim.source.ux[k_sim.u_source_sig_index, t_index] + case 'additive': + # extract the source values into a matrix + source_mat.fill(0) + source_mat[k_sim.u_source_pos_index] = k_sim.source.ux[k_sim.u_source_sig_index, t_index] + # apply the k-space correction + source_mat = xp.real(xp.fft.ifft(source_kappa * xp.fft.fft(source_mat))) + # add the source values to the existing field values including the k-space correction + ux_sgx = ux_sgx + source_mat + case 'additive-no-correction': + # add the source values to the existing field values + ux_sgx[k_sim.u_source_pos_index] = ux_sgx[k_sim.u_source_pos_index] + k_sim.source.ux[k_sim.u_source_sig_index, t_index] + + + # calculate du/dx at the next time step + if not k_sim.nonuniform_grid and not options.use_finite_difference: + # calculate gradient using the k-space method on a regular grid + duxdx = xp.real(xp.fft.ifftn(ddx_k * ddx_k_shift_neg * kappa * xp.fft.fftn(ux_sgx, axes=(0,)), axes=(0,) ) ) + + elif options.use_finite_difference: + match options.use_finite_difference: + case 2: + # calculate gradient using second-order accurate finite difference scheme (including half step backward) + duxdx = (ux_sgx - xp.append(ux_sgx[:-1], 0)) / dx + + case 4: + # calculate gradient using fourth-order accurate finite difference scheme (including half step backward) + duxdx = (xp.append([0, 0], ux_sgx[:-2]) - 27.0 * xp.append(0, ux_sgx[:-1]) + 27.0 * ux_sgx - xp.append(ux_sgx[1:], 0)) / (24.0 * dx) + + else: + # calculate gradients using a non-uniform grid via the mapped + # pseudospectral method + duxdx = dxudxn * xp.real(xp.fft.ifftn(ddx_k * ddx_k_shift_neg * kappa * xp.fft.fftn(ux_sgx, axes=(0,)), axes=(0,))) + + + # calculate rhox at the next time step + if not k_sim.is_nonlinear: + # use linearised mass conservation equation + rhox = pml_x * (pml_x * rhox - dt * rho0 * duxdx) + else: + # use nonlinear mass conservation equation (explicit calculation) + rhox = pml_x * (pml_x * rhox - dt * (2.0 * rhox + rho0) * duxdx) + + + # add in the pre-scaled pressure source term as a mass source + if (k_sim.source_p is not False and t_index < k_sim.source.p.shape[1]): + match k_sim.source.p_mode: + case 'dirichlet': + # enforce source values as a dirichlet boundary condition + rhox[k_sim.p_source_pos_index] = k_sim.source.p[k_sim.p_source_sig_index, t_index] + case 'additive': + # extract the source values into a matrix + source_mat.fill(0) + source_mat[k_sim.p_source_pos_index] = k_sim.source.p[k_sim.p_source_sig_index, t_index] + # apply the k-space correction + source_mat = xp.real(xp.fft.ifft(source_kappa * xp.fft.fft(source_mat))) + # add the source values to the existing field values + # including the k-space correction + rhox = rhox + source_mat + case 'additive-no-correction': + # add the source values to the existing field values + rhox[k_sim.p_source_pos_index] = rhox[k_sim.p_source_pos_index] + k_sim.source.p[k_sim.p_source_sig_index, t_index] + + + # equation of state + if not k_sim.is_nonlinear: + match k_sim.equation_of_state: + case 'lossless': + # calculate p using a linear adiabatic equation of state + p = xp.squeeze(c0**2) * xp.squeeze(rhox) + + case 'absorbing': + # calculate p using a linear absorbing equation of state + p = xp.squeeze(c0**2 * (rhox + + medium.absorb_tau * xp.real(xp.fft.ifftn(medium.absorb_nabla1 * xp.fft.fftn(rho0 * duxdx, axes=(0,)), axes=(0,) )) + - medium.absorb_eta * xp.real(xp.fft.ifftn(medium.absorb_nabla2 * xp.fft.fftn(rhox, axes=(0,)), axes=(0,))) ) ) + + case 'stokes': + # calculate p using a linear absorbing equation of state + # assuming alpha_power = 2 + p = c0**2 * (rhox + medium.absorb_tau * rho0 * duxdx) + + else: + match k_sim.equation_of_state: + case 'lossless': + # calculate p using a nonlinear adiabatic equation of state + p = c0**2 * (rhox + BonA * rhox**2 / (2.0 * rho0)) + + case 'absorbing': + # calculate p using a nonlinear absorbing equation of state + p = c0**2 * (rhox + + medium.absorb_tau * xp.real(xp.fft.ifftn(medium.absorb_nabla1 * xp.fft.fftn(rho0 * duxdx, axes=(0,)), axes=(0,))) + - medium.absorb_eta * xp.real(xp.fft.ifftn(medium.absorb_nabla2 * xp.fft.fftn(rhox, axes=(0,)), axes=(0,))) + + medium.BonA * rhox**2 / (2.0 * rho0)) + + case 'stokes': + # calculate p using a nonlinear absorbing equation of state + # assuming alpha_power = 2 + p = c0**2 * (rhox + + medium.absorb_tau * rho0 * duxdx + + BonA * rhox**2 / (2.0 * rho0) ) + + # enforce initial conditions if k_sim.source.p0 is defined instead of time varying sources + if t_index == 0 and k_sim.source.p0 is not None: + + p0 = xp.squeeze(p0) + + # add the initial pressure to rho as a mass source + p = p0 + rhox = p0 / xp.squeeze(c0)**2 + + # compute u(t = t1 - dt/2) based on u(dt/2) = -u(-dt/2) which forces u(t = t1) = 0 + if not options.use_finite_difference: + + # calculate gradient using the k-space method on a regular grid + ux_sgx = dt * rho0_sgx_inv * xp.real(xp.fft.ifftn(ddx_k * ddx_k_shift_pos * kappa * xp.fft.fftn(p, axes=(0,)), axes=(0,) )) / 2.0 + p_k = xp.fft.fftn(p, axes=(0,)) + p_k = xp.squeeze(p_k) + + else: + match options.use_finite_difference: + case 2: + # calculate gradient using second-order accurate finite difference scheme (including half step forward) + dpdx = (xp.append(p[1:], 0.0) - p) / dx + ux_sgx = dt * rho0_sgx_inv * dpdx / 2.0 + + case 4: + # calculate gradient using fourth-order accurate finite difference scheme (including half step backward) + dpdx = (xp.append(p[2:], [0, 0]) - 27.0 * xp.append(p[1:], 0) + 27.0 * p - xp.append(0, p[:-1])) / (24.0 * dx) + ux_sgx = dt * rho0_sgx_inv * dpdx / 2.0 + + else: + # precompute fft of p here so p can be modified for visualisation + p_k = xp.fft.fftn(p, axes=(0,)) + + + # extract required sensor data from the pressure and particle velocity + # fields if the number of time steps elapsed is greater than record_start_index + if k_sim.use_sensor and (t_index >= record_start_index): + + # update index for data storage + file_index: int = t_index - record_start_index + + # run sub-function to extract the required data from the acoustic variables + sensor_data = extract_sensor_data(kdim, xp, sensor_data, file_index, sensor_mask_index, + extract_options, record, p, ux_sgx) + + + if using_gpu: + sensor_data = dotdict_to_cpu(sensor_data) + + return sensor_data + + + + diff --git a/kwave/utils/filters.py b/kwave/utils/filters.py index 2cdb1ba3..16dba7a4 100644 --- a/kwave/utils/filters.py +++ b/kwave/utils/filters.py @@ -588,6 +588,10 @@ def smooth(a: np.ndarray, restore_max: Optional[bool] = False, window_type: Opti win, _ = get_win(grid_size, type_=window_type, rotation=DEF_USE_ROTATION, symmetric=window_symmetry) win = np.abs(win) + if win.ndim == 2 and a.ndim == 1: + # if mismatch between 1d grid and window, make sure win is same shape as the grid + win = np.squeeze(win) + # rotate window if input mat is (1, N) if a.shape[0] == 1: # is row? win = win.T diff --git a/kwave/utils/signals.py b/kwave/utils/signals.py index 3ad280f1..a7aaffdd 100644 --- a/kwave/utils/signals.py +++ b/kwave/utils/signals.py @@ -56,7 +56,7 @@ def add_noise(signal: np.ndarray, snr: float, mode="rms"): @typechecker def get_win( - N: Union[int, np.ndarray, Tuple[int, int], Tuple[int, int, int], List[Int[kt.ScalarLike, ""]]], + N: Union[int, np.ndarray, Tuple[int], Tuple[int, int], Tuple[int, int, int], List[Int[kt.ScalarLike, ""]]], # TODO: replace and refactor for scipy.signal.get_window # https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.get_window.html#scipy.signal.get_window type_: str, # TODO change this to enum in the future @@ -142,13 +142,17 @@ def cosine_series(n: int, N: int, coeffs: List[float]) -> np.ndarray: # if a square window is required, replace grid sizes with smallest size and # store a copy of the original size - if square and (N.size != 1): + if square and (np.asarray(N).size != 1): N_orig = np.copy(N) L = min(N) N[:] = L # create the window - if N.size == 1: + if np.asarray(N).size == 1: + + # cast if is Tuple with a single value + N = int(N) + # TODO: what should this behaviour be if N is a list of ints? make windows of multiple lengths? n = np.arange(0, N) @@ -303,7 +307,7 @@ def cosine_series(n: int, N: int, coeffs: List[float]) -> np.ndarray: raise ValueError("Invalid input for N, only 1-, 2-, and 3-D windows are supported.") # enlarge the window if required - if square and (N.size != 1): + if square and (np.asarray(N).size != 1): L = N[0] win_sq = win win = np.zeros(N_orig) diff --git a/pyproject.toml b/pyproject.toml index a1d29f3f..217e5cef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,7 +32,8 @@ dependencies = [ "matplotlib==3.10.3", "beartype==0.22.4", "jaxtyping==0.3.2", - "deprecated>=1.2.14" + "deprecated>=1.2.14", + "tqdm==4.67.1" ] [project.urls] diff --git a/tests/matlab_test_data_collectors/python_testers/getWin_test.py b/tests/matlab_test_data_collectors/python_testers/getWin_test.py index 21072f60..4396727d 100644 --- a/tests/matlab_test_data_collectors/python_testers/getWin_test.py +++ b/tests/matlab_test_data_collectors/python_testers/getWin_test.py @@ -1,3 +1,4 @@ +import logging import os from pathlib import Path @@ -12,7 +13,7 @@ def test_get_win(): test_data_file = os.path.join(Path(__file__).parent, "collectedValues/getWin.mat") reader = TestRecordReader(test_data_file) for i in range(len(reader)): - # logging.log(logging.INFO, "i: => %d", i) + logging.log(logging.INFO, "i: => %d", i) N = reader.expected_value_of("N") input_args = reader.expected_value_of("input_args") @@ -30,16 +31,18 @@ def test_get_win(): N = np.squeeze(N) - # logging.log(logging.INFO, N, type_, param, rotation, symmetric, square, win) + logging.log(logging.INFO, "N=%s, type_=%s, param=%s, rotation=%s, symmetric=%s, square=%s", + N, type_, param, rotation, symmetric, square) - win_py, cg_py = get_win(N, type_, param=param, rotation=rotation, symmetric=symmetric, square=square) + if (np.isscalar(N) and N > 1) or (isinstance(N, np.ndarray) and (N > 1).all()): + win_py, cg_py = get_win(N, type_, param=param, rotation=rotation, symmetric=symmetric, square=square) - cg = reader.expected_value_of("cg") - win = reader.expected_value_of("win") - win_py = np.squeeze(win_py) - assert np.shape(win_py) == np.shape(win) - assert np.allclose(win_py, win, equal_nan=True) - assert np.allclose(cg_py, cg, equal_nan=True) + cg = reader.expected_value_of("cg") + win = reader.expected_value_of("win") + win_py = np.squeeze(win_py) + assert np.shape(win_py) == np.shape(win) + assert np.allclose(win_py, win, equal_nan=True) + assert np.allclose(cg_py, cg, equal_nan=True) reader.increment()