mirror of
https://github.com/saymrwulf/transformers.git
synced 2026-05-14 20:58:08 +00:00
Make (TF) CI faster (test only a subset of model classes) (#24592)
* fix * fix * fix --------- Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
This commit is contained in:
parent
78a2b19fc8
commit
3441ad7d43
2 changed files with 8 additions and 8 deletions
|
|
@ -341,7 +341,7 @@ class TFModelTesterMixin:
|
|||
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
for model_class in self.all_model_classes[:2]:
|
||||
model = model_class(config)
|
||||
model.build()
|
||||
|
||||
|
|
@ -689,7 +689,7 @@ class TFModelTesterMixin:
|
|||
def test_compile_tf_model(self):
|
||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
for model_class in self.all_model_classes[:2]:
|
||||
# Prepare our model
|
||||
model = model_class(config)
|
||||
# These are maximally general inputs for the model, with multiple None dimensions
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ class TFCoreModelTesterMixin:
|
|||
@slow
|
||||
def test_graph_mode(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
for model_class in self.all_model_classes:
|
||||
for model_class in self.all_model_classes[:2]:
|
||||
inputs = self._prepare_for_class(inputs_dict, model_class)
|
||||
model = model_class(config)
|
||||
|
||||
|
|
@ -125,7 +125,7 @@ class TFCoreModelTesterMixin:
|
|||
@slow
|
||||
def test_xla_mode(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
for model_class in self.all_model_classes:
|
||||
for model_class in self.all_model_classes[:2]:
|
||||
inputs = self._prepare_for_class(inputs_dict, model_class)
|
||||
model = model_class(config)
|
||||
|
||||
|
|
@ -140,7 +140,7 @@ class TFCoreModelTesterMixin:
|
|||
def test_xla_fit(self):
|
||||
# This is a copy of the test_keras_fit method, but we use XLA compilation instead of eager
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
for model_class in self.all_model_classes:
|
||||
for model_class in self.all_model_classes[:2]:
|
||||
model = model_class(config)
|
||||
if getattr(model, "hf_compute_loss", None):
|
||||
# Test that model correctly compute the loss with kwargs
|
||||
|
|
@ -214,7 +214,7 @@ class TFCoreModelTesterMixin:
|
|||
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
|
||||
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
for model_class in self.all_model_classes[:2]:
|
||||
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
|
||||
model = model_class(config)
|
||||
model.build()
|
||||
|
|
@ -269,7 +269,7 @@ class TFCoreModelTesterMixin:
|
|||
# try/finally block to ensure subsequent tests run in float32
|
||||
try:
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
for model_class in self.all_model_classes:
|
||||
for model_class in self.all_model_classes[:2]:
|
||||
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
|
||||
model = model_class(config)
|
||||
outputs = model(class_inputs_dict)
|
||||
|
|
@ -352,7 +352,7 @@ class TFCoreModelTesterMixin:
|
|||
def test_graph_mode_with_inputs_embeds(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
for model_class in self.all_model_classes[:2]:
|
||||
model = model_class(config)
|
||||
|
||||
inputs = copy.deepcopy(inputs_dict)
|
||||
|
|
|
|||
Loading…
Reference in a new issue