diff --git a/docs/source/en/main_classes/quantization.md b/docs/source/en/main_classes/quantization.md index ad618f97e..36ef2eefa 100644 --- a/docs/source/en/main_classes/quantization.md +++ b/docs/source/en/main_classes/quantization.md @@ -20,7 +20,7 @@ rendered properly in your Markdown viewer. 🤗 Transformers has integrated `optimum` API to perform GPTQ quantization on language models. You can load and quantize your model in 8, 4, 3 or even 2 bits without a big drop of performance and faster inference speed! This is supported by most GPU hardwares. -To learn more about the the quantization model, check out: +To learn more about the quantization model, check out: - the [GPTQ](https://arxiv.org/pdf/2210.17323.pdf) paper - the `optimum` [guide](https://huggingface.co/docs/optimum/llm_quantization/usage_guides/quantization) on GPTQ quantization - the [`AutoGPTQ`](https://github.com/PanQiWei/AutoGPTQ) library used as the backend diff --git a/docs/source/en/preprocessing.md b/docs/source/en/preprocessing.md index c90c6c2a2..5927c22e6 100644 --- a/docs/source/en/preprocessing.md +++ b/docs/source/en/preprocessing.md @@ -306,7 +306,7 @@ Create a function to preprocess the dataset so the audio samples are the same le ... return inputs ``` -Apply the `preprocess_function` to the the first few examples in the dataset: +Apply the `preprocess_function` to the first few examples in the dataset: ```py >>> processed_dataset = preprocess_function(dataset[:5]) diff --git a/src/transformers/generation/configuration_utils.py b/src/transformers/generation/configuration_utils.py index 3bd85568d..002d7d75e 100644 --- a/src/transformers/generation/configuration_utils.py +++ b/src/transformers/generation/configuration_utils.py @@ -315,7 +315,7 @@ class GenerationConfig(PushToHubMixin): # Wild card self.generation_kwargs = kwargs.pop("generation_kwargs", {}) - # The remaining attributes do not parametrize `.generate()`, but are informative and/or used by the the hub + # The remaining attributes do not parametrize `.generate()`, but are informative and/or used by the hub # interface. self._from_model_config = kwargs.pop("_from_model_config", False) self._commit_hash = kwargs.pop("_commit_hash", None) diff --git a/src/transformers/models/deit/modeling_tf_deit.py b/src/transformers/models/deit/modeling_tf_deit.py index efd25788b..a8ba5c9a8 100644 --- a/src/transformers/models/deit/modeling_tf_deit.py +++ b/src/transformers/models/deit/modeling_tf_deit.py @@ -787,7 +787,7 @@ class TFDeiTForMaskedImageModeling(TFDeiTPreTrainedModel): # Reconstruct pixel values reconstructed_pixel_values = self.decoder(sequence_output, training=training) # TF 2.0 image layers can't use NCHW format when running on CPU, so intermediate layers use NHWC, - # including the The decoder. We transpose to compute the loss against the pixel values + # including the decoder. We transpose to compute the loss against the pixel values # (batch_size, height, width, num_channels) -> (batch_size, num_channels, height, width) reconstructed_pixel_values = tf.transpose(reconstructed_pixel_values, (0, 3, 1, 2)) diff --git a/src/transformers/tools/evaluate_agent.py b/src/transformers/tools/evaluate_agent.py index 47d1d4330..7d5cddf1c 100644 --- a/src/transformers/tools/evaluate_agent.py +++ b/src/transformers/tools/evaluate_agent.py @@ -200,7 +200,7 @@ EVALUATION_TASKS = [ task=[ "Provide me the summary of the `text`, then read it to me before transcribing it and translating it in French.", "Summarize `text`, read it out loud then transcribe the audio and translate it in French.", - "Read me a summary of the the `text` out loud. Transcribe this and translate it in French.", + "Read me a summary of the `text` out loud. Transcribe this and translate it in French.", ], inputs=["text"], answer="translator(transcriber(text_reader(summarizer(text))), src_lang='English', tgt_lang='French')", diff --git a/src/transformers/utils/peft_utils.py b/src/transformers/utils/peft_utils.py index 0e20db8ea..7830acd0b 100644 --- a/src/transformers/utils/peft_utils.py +++ b/src/transformers/utils/peft_utils.py @@ -39,7 +39,7 @@ def find_adapter_config_file( _commit_hash: Optional[str] = None, ) -> Optional[str]: r""" - Simply checks if the model stored on the Hub or locally is an adapter model or not, return the path the the adapter + Simply checks if the model stored on the Hub or locally is an adapter model or not, return the path of the adapter config file if it is, None otherwise. Args: diff --git a/tests/quantization/gptq/test_gptq.py b/tests/quantization/gptq/test_gptq.py index d715bd56c..913983657 100644 --- a/tests/quantization/gptq/test_gptq.py +++ b/tests/quantization/gptq/test_gptq.py @@ -178,7 +178,7 @@ class GPTQTest(unittest.TestCase): def test_generate_quality(self): """ - Simple test to check the quality of the model by comapring the the generated tokens with the expected tokens + Simple test to check the quality of the model by comparing the generated tokens with the expected tokens """ if self.device_map is None: self.check_inference_correctness(self.quantized_model.to(0)) @@ -290,7 +290,7 @@ class GPTQTestActOrderExllama(unittest.TestCase): def test_generate_quality(self): """ - Simple test to check the quality of the model by comapring the the generated tokens with the expected tokens + Simple test to check the quality of the model by comparing the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model)