Skip to content

Commit eb338a3

Browse files
Merge pull request #407 from puneetmatharu/patch-failing-pytorch-tests
Remove PyTorch float16 testing with bf16 fpmath mode
2 parents 10ab0ad + bfd6e27 commit eb338a3

2 files changed

Lines changed: 7 additions & 4 deletions

File tree

ML-Frameworks/pytorch-aarch64/examples/run_unit_tests.sh

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,13 +18,15 @@
1818
# *******************************************************************************
1919

2020
if [[ "${ONEDNN_DEFAULT_FPMATH_MODE:-}" == "BF16" ]]; then
21-
OMP_NUM_THREADS=16 python -m unittest pytorch/test/test_mkldnn.py -k lower_precision -k bf16 -k bfloat16 -k float16
21+
# Run bfloat16 tests but carefully ignore float16
22+
OMP_NUM_THREADS=16 python3 -m pytest -q pytorch/test/test_mkldnn.py -k 'bfloat16 or ((lower_precision or bf16) and not float16)'
2223
else
23-
OMP_NUM_THREADS=16 python -m unittest pytorch/test/test_mkldnn.py
24+
OMP_NUM_THREADS=16 python3 -m pytest -q pytorch/test/test_mkldnn.py
2425
fi
2526

2627
if [[ "${ONEDNN_DEFAULT_FPMATH_MODE:-}" == "BF16" ]]; then
27-
OMP_NUM_THREADS=16 python -m unittest pytorch/test/test_transformers.py -k bfloat16 -k float16
28+
# Run bfloat16 tests but carefully ignore float16
29+
OMP_NUM_THREADS=16 python3 -m pytest -q pytorch/test/test_transformers.py -k 'bfloat16 or ((lower_precision or bf16) and not float16)'
2830
else
29-
OMP_NUM_THREADS=16 python -m unittest pytorch/test/test_transformers.py
31+
OMP_NUM_THREADS=16 python3 -m pytest -q pytorch/test/test_transformers.py
3032
fi

ML-Frameworks/pytorch-aarch64/requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ pyaml~=24.9.0
2222
python-dateutil~=2.9.0.post0
2323
pytz==2024.2
2424
PyYAML~=6.0.2
25+
pytest~=8.4.2
2526
regex==2024.9.11
2627
requests~=2.32.3
2728
safetensors~=0.4.5

0 commit comments

Comments
 (0)