From eb3133fd13c0d35e4dcd902d6d96bb5253bf2fa9 Mon Sep 17 00:00:00 2001 From: TFLM-bot Date: Tue, 13 May 2025 14:03:00 +0000 Subject: [PATCH] Sync from upstream TF. --- tensorflow/lite/kernels/internal/reference/floor.h | 8 +++++--- tensorflow/lite/kernels/internal/reference/logistic.h | 9 +++++---- tensorflow/lite/kernels/internal/reference/round.h | 7 ++++--- tensorflow/lite/kernels/internal/reference/tanh.h | 9 ++++----- 4 files changed, 18 insertions(+), 15 deletions(-) diff --git a/tensorflow/lite/kernels/internal/reference/floor.h b/tensorflow/lite/kernels/internal/reference/floor.h index 0693fd42987..3ef844b5249 100644 --- a/tensorflow/lite/kernels/internal/reference/floor.h +++ b/tensorflow/lite/kernels/internal/reference/floor.h @@ -23,13 +23,15 @@ namespace tflite { namespace reference_ops { -inline void Floor(const RuntimeShape& input_shape, const float* input_data, - const RuntimeShape& output_shape, float* output_data) { +template +inline void Floor(const RuntimeShape& input_shape, const T* input_data, + const RuntimeShape& output_shape, T* output_data) { const int flat_size = MatchingFlatSize(input_shape, output_shape); for (int i = 0; i < flat_size; i++) { int offset = i; - output_data[offset] = std::floor(input_data[offset]); + output_data[offset] = + static_cast(std::floor(static_cast(input_data[offset]))); } } diff --git a/tensorflow/lite/kernels/internal/reference/logistic.h b/tensorflow/lite/kernels/internal/reference/logistic.h index 64b7133bec6..8a621869f1a 100644 --- a/tensorflow/lite/kernels/internal/reference/logistic.h +++ b/tensorflow/lite/kernels/internal/reference/logistic.h @@ -27,8 +27,9 @@ limitations under the License. namespace tflite { namespace reference_ops { -inline void Logistic(const RuntimeShape& input_shape, const float* input_data, - const RuntimeShape& output_shape, float* output_data) { +template +inline void Logistic(const RuntimeShape& input_shape, const T* input_data, + const RuntimeShape& output_shape, T* output_data) { const float cutoff_upper = 16.619047164916992188f; const float cutoff_lower = -9.f; @@ -43,7 +44,7 @@ inline void Logistic(const RuntimeShape& input_shape, const float* input_data, // optimized kernels. (check the definition of scalar_logistic_op) for (int i = 0; i < flat_size; i++) { - float val = input_data[i]; + T val = input_data[i]; float result; if (val > cutoff_upper) { result = 1.0f; @@ -52,7 +53,7 @@ inline void Logistic(const RuntimeShape& input_shape, const float* input_data, } else { result = 1.f / (1.f + std::exp(-val)); } - output_data[i] = result; + output_data[i] = static_cast(result); } } diff --git a/tensorflow/lite/kernels/internal/reference/round.h b/tensorflow/lite/kernels/internal/reference/round.h index 9bd8f3f2b23..9f26a00d17e 100644 --- a/tensorflow/lite/kernels/internal/reference/round.h +++ b/tensorflow/lite/kernels/internal/reference/round.h @@ -34,15 +34,16 @@ inline float RoundToNearest(float value) { } } -inline void Round(const RuntimeShape& input_shape, const float* input_data, - const RuntimeShape& output_shape, float* output_data) { +template +inline void Round(const RuntimeShape& input_shape, const Scalar* input_data, + const RuntimeShape& output_shape, Scalar* output_data) { const int flat_size = MatchingFlatSize(input_shape, output_shape); for (int i = 0; i < flat_size; ++i) { // Note that this implementation matches that of tensorFlow tf.round // and corresponds to the bankers rounding method. // cfenv (for fesetround) is not yet supported universally on Android, so // using a work around. - output_data[i] = RoundToNearest(input_data[i]); + output_data[i] = static_cast(RoundToNearest(input_data[i])); } } diff --git a/tensorflow/lite/kernels/internal/reference/tanh.h b/tensorflow/lite/kernels/internal/reference/tanh.h index 3a05c474dd3..d8a14d9b541 100644 --- a/tensorflow/lite/kernels/internal/reference/tanh.h +++ b/tensorflow/lite/kernels/internal/reference/tanh.h @@ -26,14 +26,13 @@ limitations under the License. namespace tflite { namespace reference_ops { -inline void Tanh(const RuntimeShape& input_shape, const float* input_data, - const RuntimeShape& output_shape, float* output_data) { +template +inline void Tanh(const RuntimeShape& input_shape, const T* input_data, + const RuntimeShape& output_shape, T* output_data) { const int flat_size = MatchingFlatSize(input_shape, output_shape); for (int i = 0; i < flat_size; i++) { - float val = input_data[i]; - float result = std::tanh(val); - output_data[i] = result; + output_data[i] = static_cast(std::tanh(input_data[i])); } }