Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion httomolibgpu/misc/rescale.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@

from httomolibgpu.misc.utils import (
__check_variable_type,
__check_if_data_3D_array,
__check_if_data_correct_type,
)

Expand Down
82 changes: 71 additions & 11 deletions httomolibgpu/prep/normalize.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
"""Modules for raw projection data normalization"""

from httomolibgpu import cupywrapper
import numpy as np

cp = cupywrapper.cp
cupy_run = cupywrapper.cupy_run
Expand All @@ -32,8 +33,8 @@
else:
mean = Mock()

from typing import Union
from numpy import float32
from typing import Union, Optional
from numpy import float32, int64
from httomolibgpu.misc.utils import (
__check_variable_type,
__check_if_data_3D_array,
Expand All @@ -49,7 +50,8 @@ def dark_flat_field_correction(
darks: cp.ndarray,
flats_multiplier: Union[float, int] = 1.0,
darks_multiplier: Union[float, int] = 1.0,
cutoff: Union[float, int] = 10.0,
upper_bound: Optional[Union[float, int]] = None,
lower_bound: Optional[Union[float, int]] = None,
) -> cp.ndarray:
"""
Perform dark/flat field correction of raw projection data.
Expand All @@ -66,8 +68,10 @@ def dark_flat_field_correction(
A multiplier to apply to flats, can work as an intensity compensation constant.
darks_multiplier: float,int
A multiplier to apply to darks, can work as an intensity compensation constant.
cutoff : float,int
Permitted maximum value for the normalised data.
upper_bound : Optional[float, int]
All values above the upper bound are set to the provided value. Default None.
lower_bound : Optional[float, int]
All values bellow the lower bound are set to the provided value. Default None.

Returns
-------
Expand All @@ -92,10 +96,21 @@ def dark_flat_field_correction(
__check_variable_type(
darks_multiplier, [int, float], "darks_multiplier", [], methods_name
)
__check_variable_type(cutoff, [int, float], "cutoff", [], methods_name)
###################################
__check_variable_type(
upper_bound, [int, float, type(None)], "upper_bound", [], methods_name
)
__check_variable_type(
lower_bound, [int, float, type(None)], "lower_bound", [], methods_name
)

_check_valid_input_flats_darks(flats, darks)
###################################

data_elements_num = int(np.prod(data.shape))
if upper_bound is None:
upper_bound = 1e12
if lower_bound is None:
lower_bound = -1e12

dark0 = cp.empty(darks.shape[1:], dtype=float32)
flat0 = cp.empty(flats.shape[1:], dtype=float32)
Expand All @@ -114,12 +129,12 @@ def dark_flat_field_correction(
}
float v = (float(data) - float(darks))/denom;
"""
kernel += "if (v > cutoff) v = cutoff;\n"
kernel += "if (v < -cutoff) v = cutoff;\n"
kernel += "if (v > upper_bound) v = upper_bound;\n"
kernel += "if (v <= lower_bound) v = lower_bound;\n"
kernel += "out = v;\n"

normalisation_kernel = cp.ElementwiseKernel(
"T data, U flats, U darks, raw float32 cutoff",
"T data, U flats, U darks, raw float32 upper_bound, raw float32 lower_bound",
"float32 out",
kernel,
kernel_name,
Expand All @@ -128,7 +143,52 @@ def dark_flat_field_correction(
no_return=True,
)

normalisation_kernel(data, flat0, dark0, float32(cutoff), out)
count_greater_kernel = cp.ReductionKernel(
in_params="T data, raw float32 upper_bound",
out_params="int32 out",
map_expr="data >= upper_bound ? 1 : 0", # map each element → 1 or 0
reduce_expr="a + b", # sum them
post_map_expr="out = a", # final result
identity="0",
name="count_greater",
)

count_smaller_kernel = cp.ReductionKernel(
in_params="T data, raw float32 lower_bound",
out_params="int32 out",
map_expr="data <= lower_bound ? 1 : 0", # map each element → 1 or 0
reduce_expr="a + b", # sum them
post_map_expr="out = a", # final result
identity="0",
name="count_smaller",
)

normalisation_kernel(data, flat0, dark0, upper_bound, lower_bound, out)

# Count the amount of clipping and raise warnings if required
clipped_percentage_warning = (
50.0 # warning if more clipped values than given percentage
)

clipped_total_up = int(count_greater_kernel(out, float32(upper_bound)))
clipped_up_percent = clipped_total_up / data_elements_num * 100

if clipped_up_percent >= clipped_percentage_warning:
print(
"Warning! The output data of 'dark_flat_field_correction' method contains \033[31m{}\033[0m percent of 'upper_bound' clipped data.".format(
int(clipped_up_percent)
)
)

clipped_total_lower = int(count_smaller_kernel(out, float32(lower_bound)))
clipped_down_percent = clipped_total_lower / data_elements_num * 100

if clipped_down_percent >= clipped_percentage_warning:
print(
"Warning! The output data of 'dark_flat_field_correction' method contains \033[31m{}\033[0m percent of 'lower_bound' clipped data.".format(
int(clipped_down_percent)
)
)

return out

Expand Down
60 changes: 60 additions & 0 deletions tests/test_prep/test_normalize.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,66 @@ def test_dark_flat_field_correction_1D_raises(data, flats, darks, ensure_clean_m
dark_flat_field_correction(data, _data_1d, darks)


def test_dark_flat_field_upper_bound(data, flats, darks, ensure_clean_memory):
# --- testing dark_flat_field_correction with minus_log ---#
data_normalize = cp.asnumpy(
dark_flat_field_correction(
cp.copy(data),
flats,
darks,
flats_multiplier=1,
darks_multiplier=1,
upper_bound=0.8,
lower_bound=None,
)
)

assert data_normalize.dtype == np.float32
assert_allclose(np.mean(data_normalize), 0.6904512, rtol=1e-06)
assert_allclose(np.mean(data_normalize, axis=(1, 2)).sum(), 124.28119, rtol=1e-06)
assert data_normalize.flags.c_contiguous


def test_dark_flat_field_lower_bound(data, flats, darks, ensure_clean_memory):
# --- testing dark_flat_field_correction with minus_log ---#
data_normalize = cp.asnumpy(
dark_flat_field_correction(
cp.copy(data),
flats,
darks,
flats_multiplier=1,
darks_multiplier=1,
upper_bound=None,
lower_bound=0.1,
)
)

assert data_normalize.dtype == np.float32
assert_allclose(np.mean(data_normalize), 0.8285064, rtol=1e-06)
assert_allclose(np.mean(data_normalize, axis=(1, 2)).sum(), 149.13116, rtol=1e-06)
assert data_normalize.flags.c_contiguous


def test_dark_flat_field_upper_and_lower_bound(data, flats, darks, ensure_clean_memory):
# --- testing dark_flat_field_correction with minus_log ---#
data_normalize = cp.asnumpy(
dark_flat_field_correction(
cp.copy(data),
flats,
darks,
flats_multiplier=1,
darks_multiplier=1,
upper_bound=0.7,
lower_bound=0.2,
)
)

assert data_normalize.dtype == np.float32
assert_allclose(np.mean(data_normalize), 0.6218485, rtol=1e-06)
assert_allclose(np.mean(data_normalize, axis=(1, 2)).sum(), 111.93274, rtol=1e-06)
assert data_normalize.flags.c_contiguous


def test_dark_flat_field_minus_log_correction(data, flats, darks, ensure_clean_memory):
# --- testing dark_flat_field_correction with minus_log ---#
data_normalize = dark_flat_field_correction(cp.copy(data), flats, darks)
Expand Down
10 changes: 5 additions & 5 deletions tests/test_prep/test_stripe.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

def test_remove_stripe_ti_on_data(data, flats, darks):
# --- testing the CuPy implementation from TomoCupy ---#
data_norm = dark_flat_field_correction(cp.copy(data), flats, darks, cutoff=10)
data_norm = dark_flat_field_correction(cp.copy(data), flats, darks)
data_norm = minus_log(data_norm)

data_after_stripe_removal = cp.asnumpy(remove_stripe_ti(data_norm))
Expand Down Expand Up @@ -54,7 +54,7 @@ def free_postprocess(

def test_remove_stripe_fw_on_data(data, flats, darks):
# --- testing the CuPy implementation from TomoCupy ---#
data_norm = dark_flat_field_correction(data, flats, darks, cutoff=10)
data_norm = dark_flat_field_correction(data, flats, darks)
data_norm = minus_log(data_norm)

data_after_stripe_removal = remove_stripe_fw(
Expand Down Expand Up @@ -183,7 +183,7 @@ def test_remove_stripe_ti_dims_change(angles, det_y, det_x):

def test_stripe_removal_sorting_cupy(data, flats, darks):
# --- testing the CuPy port of TomoPy's implementation ---#
data_norm = dark_flat_field_correction(cp.copy(data), flats, darks, cutoff=10)
data_norm = dark_flat_field_correction(cp.copy(data), flats, darks)
data_norm = minus_log(data_norm)

corrected_data = cp.asnumpy(remove_stripe_based_sorting(data_norm))
Expand All @@ -199,7 +199,7 @@ def test_stripe_removal_sorting_cupy(data, flats, darks):


def test_stripe_raven_cupy(data, flats, darks):
data_norm = dark_flat_field_correction(cp.copy(data), flats, darks, cutoff=10)
data_norm = dark_flat_field_correction(cp.copy(data), flats, darks)
data_norm = minus_log(data_norm)

data_after_raven_gpu = cp.asnumpy(raven_filter(data_norm))
Expand Down Expand Up @@ -289,7 +289,7 @@ def test_raven_filter_performance(ensure_clean_memory):

def test_remove_all_stripe_on_data(data, flats, darks):
# --- testing the CuPy implementation from TomoCupy ---#
data_norm = dark_flat_field_correction(cp.copy(data), flats, darks, cutoff=10)
data_norm = dark_flat_field_correction(cp.copy(data), flats, darks)
data_norm = minus_log(data_norm)

data_after_stripe_removal = cp.asnumpy(remove_all_stripe(data_norm))
Expand Down
Loading
Loading