mirror of
https://github.com/saymrwulf/transformers.git
synced 2026-05-14 20:58:08 +00:00
[Past CI] 🔥 Leave Past CI failures in the past 🔥 (#20861)
* torch.jit._state * Fix past CI * Fix for perceiver * Fix REALM * Fix for Bloom * Fix for SwinMode * Fix for TrajectoryTransformerModel * Fix for test_wav2vec2_with_lm * make style Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
This commit is contained in:
parent
e35bc46af6
commit
5fa0b17c3d
11 changed files with 53 additions and 6 deletions
|
|
@ -31,6 +31,11 @@ performance for many languages that also transfers well to LibriSpeech.*
|
|||
|
||||
This model was contributed by [cwkeam](https://huggingface.co/cwkeam). The original code can be found [here](https://github.com/flashlight/wav2letter/tree/main/recipes/mling_pl).
|
||||
|
||||
|
||||
Tips:
|
||||
|
||||
- The PyTorch version of this model is only available in torch 1.9 and higher.
|
||||
|
||||
## MCTCTConfig
|
||||
|
||||
[[autodoc]] MCTCTConfig
|
||||
|
|
|
|||
|
|
@ -33,12 +33,19 @@ from ...modeling_utils import (
|
|||
find_pruneable_heads_and_indices,
|
||||
prune_linear_layer,
|
||||
)
|
||||
from ...pytorch_utils import is_torch_less_than_1_9
|
||||
from ...utils import logging
|
||||
from .configuration_mctct import MCTCTConfig
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
if not is_torch_less_than_1_9:
|
||||
logger.warning(
|
||||
f"You are using torch=={torch.__version__}, but torch>=1.9.0 is required to use MCTCTModel. Please upgrade"
|
||||
" torch."
|
||||
)
|
||||
|
||||
_HIDDEN_STATES_START_POSITION = 1
|
||||
|
||||
_CONFIG_FOR_DOC = "MCTCTConfig"
|
||||
|
|
|
|||
|
|
@ -3203,9 +3203,9 @@ class PerceiverImagePreprocessor(AbstractPreprocessor):
|
|||
if self.prep_type != "patches":
|
||||
# move channels to last dimension, as the _build_network_inputs method below expects this
|
||||
if inputs.ndim == 4:
|
||||
inputs = torch.permute(inputs, (0, 2, 3, 1))
|
||||
inputs = inputs.permute(0, 2, 3, 1)
|
||||
elif inputs.ndim == 5:
|
||||
inputs = torch.permute(inputs, (0, 1, 3, 4, 2))
|
||||
inputs = inputs.permute(0, 1, 3, 4, 2)
|
||||
else:
|
||||
raise ValueError("Unsupported data format for conv1x1.")
|
||||
|
||||
|
|
|
|||
|
|
@ -1361,7 +1361,7 @@ class RealmScorer(RealmPreTrainedModel):
|
|||
# [batch_size, num_candidates, retriever_proj_size]
|
||||
candidate_score = candidate_score.view(-1, self.config.num_candidates, self.config.retriever_proj_size)
|
||||
# [batch_size, num_candidates]
|
||||
relevance_score = torch.einsum("BD,BND->BN", query_score, candidate_score)
|
||||
relevance_score = torch.einsum("bd,bnd->bn", query_score, candidate_score)
|
||||
|
||||
if not return_dict:
|
||||
return relevance_score, query_score, candidate_score
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ logger = logging.get_logger(__name__)
|
|||
parsed_torch_version_base = version.parse(version.parse(torch.__version__).base_version)
|
||||
|
||||
is_torch_less_than_1_8 = parsed_torch_version_base < version.parse("1.8.0")
|
||||
is_torch_less_than_1_9 = parsed_torch_version_base < version.parse("1.9.0")
|
||||
is_torch_greater_or_equal_than_1_10 = parsed_torch_version_base >= version.parse("1.10")
|
||||
is_torch_less_than_1_11 = parsed_torch_version_base < version.parse("1.11")
|
||||
|
||||
|
|
|
|||
|
|
@ -37,7 +37,10 @@ if is_torch_available():
|
|||
BloomModel,
|
||||
BloomTokenizerFast,
|
||||
)
|
||||
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_10
|
||||
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_10, is_torch_less_than_1_9
|
||||
else:
|
||||
is_torch_greater_or_equal_than_1_10 = False
|
||||
is_torch_less_than_1_9 = True
|
||||
|
||||
|
||||
@require_torch
|
||||
|
|
@ -502,7 +505,7 @@ class BloomEmbeddingTest(unittest.TestCase):
|
|||
self.path_bigscience_model = "bigscience/bigscience-small-testing"
|
||||
|
||||
@unittest.skipIf(
|
||||
not is_torch_available() or not is_torch_greater_or_equal_than_1_10,
|
||||
not is_torch_greater_or_equal_than_1_10,
|
||||
"Test failed with torch < 1.10 (`LayerNormKernelImpl` not implemented for `BFloat16`)",
|
||||
)
|
||||
@require_torch
|
||||
|
|
@ -737,6 +740,9 @@ class BloomEmbeddingTest(unittest.TestCase):
|
|||
self.assertAlmostEqual(EMBEDDINGS_DS_AFTER_LN[key][idx], output_dict_norm[key][idx], places=1)
|
||||
|
||||
@require_torch
|
||||
@unittest.skipIf(
|
||||
is_torch_less_than_1_9, reason="Test failed with torch < 1.9 (`min_cuda` not implemented for `BFloat16`)"
|
||||
)
|
||||
def test_hidden_states_transformers(self):
|
||||
cuda_available = torch.cuda.is_available()
|
||||
model = BloomModel.from_pretrained(self.path_bigscience_model, use_cache=False, torch_dtype="auto").to(
|
||||
|
|
|
|||
|
|
@ -31,6 +31,9 @@ if is_torch_available():
|
|||
import torch
|
||||
|
||||
from transformers import MCTCTForCTC, MCTCTModel, MCTCTProcessor
|
||||
from transformers.pytorch_utils import is_torch_less_than_1_9
|
||||
else:
|
||||
is_torch_less_than_1_9 = True
|
||||
|
||||
|
||||
class MCTCTModelTester:
|
||||
|
|
@ -261,6 +264,7 @@ class MCTCTModelTester:
|
|||
|
||||
|
||||
@require_torch
|
||||
@unittest.skipIf(is_torch_less_than_1_9, "MCTCT is only available in torch v1.9+")
|
||||
class MCTCTModelTest(ModelTesterMixin, unittest.TestCase):
|
||||
all_model_classes = (MCTCTForCTC, MCTCTModel) if is_torch_available() else ()
|
||||
test_pruning = False
|
||||
|
|
|
|||
|
|
@ -32,6 +32,9 @@ if is_torch_available():
|
|||
|
||||
from transformers import SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel
|
||||
from transformers.models.swin.modeling_swin import SWIN_PRETRAINED_MODEL_ARCHIVE_LIST
|
||||
from transformers.pytorch_utils import is_torch_less_than_1_9
|
||||
else:
|
||||
is_torch_less_than_1_9 = True
|
||||
|
||||
if is_vision_available():
|
||||
from PIL import Image
|
||||
|
|
@ -253,6 +256,10 @@ class SwinModelTest(ModelTesterMixin, unittest.TestCase):
|
|||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_model(*config_and_inputs)
|
||||
|
||||
@unittest.skipIf(is_torch_less_than_1_9, reason="This test fails for SwinModel when torch < 1.9")
|
||||
def test_training_gradient_checkpointing(self):
|
||||
super().test_training_gradient_checkpointing()
|
||||
|
||||
def test_backbone(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_backbone(*config_and_inputs)
|
||||
|
|
|
|||
|
|
@ -35,6 +35,9 @@ if is_torch_available():
|
|||
from transformers.models.trajectory_transformer.modeling_trajectory_transformer import (
|
||||
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
)
|
||||
from transformers.pytorch_utils import is_torch_less_than_1_9
|
||||
else:
|
||||
is_torch_less_than_1_9 = True
|
||||
|
||||
|
||||
class TrajectoryTransformerModelTester:
|
||||
|
|
@ -195,6 +198,7 @@ class TrajectoryTransformerModelTest(ModelTesterMixin, GenerationTesterMixin, un
|
|||
).loss
|
||||
loss.backward()
|
||||
|
||||
@unittest.skipIf(is_torch_less_than_1_9, reason="This test fails for TrajectoryTransformerModel when torch < 1.9")
|
||||
def test_training_gradient_checkpointing(self):
|
||||
if not self.model_tester.is_training:
|
||||
return
|
||||
|
|
|
|||
|
|
@ -70,6 +70,9 @@ if is_torch_available():
|
|||
_compute_mask_indices,
|
||||
_sample_negative_indices,
|
||||
)
|
||||
from transformers.pytorch_utils import is_torch_less_than_1_9
|
||||
else:
|
||||
is_torch_less_than_1_9 = True
|
||||
|
||||
|
||||
if is_torchaudio_available():
|
||||
|
|
@ -1640,6 +1643,10 @@ class Wav2Vec2ModelIntegrationTest(unittest.TestCase):
|
|||
|
||||
@require_pyctcdecode
|
||||
@require_torchaudio
|
||||
@unittest.skipIf(
|
||||
is_torch_less_than_1_9,
|
||||
reason="`torchaudio.functional.resample` needs torchaudio >= 0.9 which requires torch >= 0.9",
|
||||
)
|
||||
def test_wav2vec2_with_lm(self):
|
||||
ds = load_dataset("common_voice", "es", split="test", streaming=True)
|
||||
sample = next(iter(ds))
|
||||
|
|
@ -1664,6 +1671,10 @@ class Wav2Vec2ModelIntegrationTest(unittest.TestCase):
|
|||
|
||||
@require_pyctcdecode
|
||||
@require_torchaudio
|
||||
@unittest.skipIf(
|
||||
is_torch_less_than_1_9,
|
||||
reason="`torchaudio.functional.resample` needs torchaudio >= 0.9 which requires torch >= 0.9",
|
||||
)
|
||||
def test_wav2vec2_with_lm_pool(self):
|
||||
ds = load_dataset("common_voice", "es", split="test", streaming=True)
|
||||
sample = next(iter(ds))
|
||||
|
|
|
|||
|
|
@ -696,7 +696,9 @@ class ModelTesterMixin:
|
|||
|
||||
torch._C._jit_clear_class_registry()
|
||||
torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore()
|
||||
torch.jit._state._clear_class_state()
|
||||
# torch 1.8 has no `_clear_class_state` in `torch.jit._state`
|
||||
if hasattr(torch.jit._state, "_clear_class_state"):
|
||||
torch.jit._state._clear_class_state()
|
||||
|
||||
def _create_and_check_torchscript(self, config, inputs_dict):
|
||||
if not self.test_torchscript:
|
||||
|
|
|
|||
Loading…
Reference in a new issue