From 0dfb9879602e9a03d42861b2976abbe6004d7dad Mon Sep 17 00:00:00 2001 From: Baris Demir Date: Wed, 13 May 2026 13:58:04 +0100 Subject: [PATCH] Arm backend: Stabilize MobileNetV3 fp16 TOSA test This patch fixes a flaky MobileNetV3 fp16 TOSA test caused by unseeded random input data and a tolerance that was slightly too tight for observed fp16 backend differences. The test input is now generated with a local fixed torch.Generator so the model comparison is deterministic across runs. The fp16 absolute tolerance is relaxed from 5e-2 to 6e-2, covering the observed maximum absolute error while keeping the check narrow. The regression was verified with the targeted MobileNetV3 fp16 TOSA pytest case. Signed-off-by: Baris Demir Change-Id: Ie89950adc108fd2d93763e75b28c8668f7435ab8 --- backends/arm/test/models/test_mobilenet_v3_arm.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/backends/arm/test/models/test_mobilenet_v3_arm.py b/backends/arm/test/models/test_mobilenet_v3_arm.py index eccdc839e62..da9f99010b1 100644 --- a/backends/arm/test/models/test_mobilenet_v3_arm.py +++ b/backends/arm/test/models/test_mobilenet_v3_arm.py @@ -47,14 +47,17 @@ def test_mv3_tosa_FP(): @pytest.mark.slow def test_mv3_tosa_FP_fp16(): - inputs_fp16 = tuple(t.to(torch.float16) for t in model_inputs) + input_tensor_fp16 = torch.rand( + 1, 3, 232, 232, generator=torch.Generator().manual_seed(0) + ) + inputs_fp16 = (normalize(input_tensor_fp16).to(torch.float16),) pipeline = TosaPipelineFP[input_t]( mv3_fp16, inputs_fp16, aten_op=[], exir_op=[], use_to_edge_transform_and_lower=True, - atol=5e-2, + atol=6e-2, ) pipeline.run()