ci: fix xpu skip condition for test_model_parallel_beam_search (#35742)

`return unittest.skip()` used in the `test_model_parallel_beam_search` in
skip condition for xpu did not actually mark test to be skipped running
under pytest:
* 148 passed, 1 skipped

Other tests use `self.skipTest()`. Reusing this approach and moving the
condition outside the loop (since it does not depend on it) allows to skip
for xpu correctly:
* 148 skipped

Secondly, `device_map="auto"` is now implemented for XPU for IPEX>=2.5 and
torch>=2.6, so we can now enable these tests for XPU for new IPEX/torch
versions.

Fixes: 1ea3ad1ae ("[tests] use `torch_device` instead of `auto` for model testing (#29531)")

Signed-off-by: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
This commit is contained in:
Dmitry Rogozhkin 2025-01-17 07:47:27 -08:00 committed by GitHub
parent 8ad6bd0f1b
commit 7d4b3ddde4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 9 additions and 4 deletions

View file

@ -865,7 +865,7 @@ def is_ninja_available():
return True
def is_ipex_available():
def is_ipex_available(min_version: str = ""):
def get_major_and_minor_from_version(full_version):
return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor)
@ -880,6 +880,8 @@ def is_ipex_available():
f" but PyTorch {_torch_version} is found. Please switch to the matching version and run again."
)
return False
if min_version:
return version.parse(_ipex_version) >= version.parse(min_version)
return True

View file

@ -24,6 +24,7 @@ import warnings
import numpy as np
import pytest
from packaging import version
from parameterized import parameterized
from transformers import AutoConfig, is_torch_available, pipeline
@ -44,6 +45,7 @@ from transformers.testing_utils import (
slow,
torch_device,
)
from transformers.utils import is_ipex_available
from ..test_modeling_common import floats_tensor, ids_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
@ -675,10 +677,11 @@ class GenerationTesterMixin:
@require_torch_multi_accelerator
@pytest.mark.generate
def test_model_parallel_beam_search(self):
for model_class in self.all_generative_model_classes:
if "xpu" in torch_device:
return unittest.skip(reason="device_map='auto' does not work with XPU devices")
if "xpu" in torch_device:
if not (is_ipex_available("2.5") or version.parse(torch.__version__) >= version.parse("2.6")):
self.skipTest(reason="device_map='auto' does not work with XPU devices")
for model_class in self.all_generative_model_classes:
if model_class._no_split_modules is None:
continue