diff --git a/tests/generation/test_framework_agnostic.py b/tests/generation/test_framework_agnostic.py index 85a58bdf2..f4f13dd8d 100644 --- a/tests/generation/test_framework_agnostic.py +++ b/tests/generation/test_framework_agnostic.py @@ -111,7 +111,7 @@ class GenerationIntegrationTestsMixin: article = """Justin Timberlake.""" gpt2_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") - gpt2_model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2", use_safetensors=is_pt) + gpt2_model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") input_ids = gpt2_tokenizer(article, return_tensors=return_tensors).input_ids if is_pt: gpt2_model = gpt2_model.to(torch_device) @@ -582,7 +582,7 @@ class GenerationIntegrationTestsMixin: tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors=return_tensors) - model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2", use_safetensors=is_pt) + model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") if is_pt: model = model.to(torch_device) tokens = tokens.to(torch_device) @@ -611,7 +611,7 @@ class GenerationIntegrationTestsMixin: tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors=return_tensors) - model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2", use_safetensors=is_pt) + model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") if is_pt: model = model.to(torch_device) tokens = tokens.to(torch_device) @@ -638,7 +638,7 @@ class GenerationIntegrationTestsMixin: tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors=return_tensors) - model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2", use_safetensors=is_pt) + model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") if is_pt: model = model.to(torch_device) tokens = tokens.to(torch_device) diff --git a/tests/generation/test_tf_utils.py b/tests/generation/test_tf_utils.py index 73eba05ec..f40ceebef 100644 --- a/tests/generation/test_tf_utils.py +++ b/tests/generation/test_tf_utils.py @@ -194,7 +194,7 @@ class TFGenerationIntegrationTests(unittest.TestCase, GenerationIntegrationTests tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors="tf") - model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2", use_safetensors=False) + model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") eos_token_id = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks diff --git a/tests/pipelines/test_pipelines_text_generation.py b/tests/pipelines/test_pipelines_text_generation.py index 318526b85..ada04c7db 100644 --- a/tests/pipelines/test_pipelines_text_generation.py +++ b/tests/pipelines/test_pipelines_text_generation.py @@ -268,7 +268,6 @@ class TextGenerationPipelineTests(unittest.TestCase): text_generator = TextGenerationPipeline(model=model, tokenizer=tokenizer) return text_generator, ["This is a test", "Another test"] - @require_torch # See https://github.com/huggingface/transformers/issues/30117 def test_stop_sequence_stopping_criteria(self): prompt = """Hello I believe in""" text_generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2")