diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index d87906159..435b482df 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -396,6 +396,8 @@ title: ESM - local: model_doc/falcon title: Falcon + - local: model_doc/falcon3 + title: Falcon3 - local: model_doc/falcon_mamba title: FalconMamba - local: model_doc/fastspeech2_conformer diff --git a/docs/source/en/index.md b/docs/source/en/index.md index a40bb8254..3bd1c286d 100644 --- a/docs/source/en/index.md +++ b/docs/source/en/index.md @@ -141,6 +141,7 @@ Flax), PyTorch, and/or TensorFlow. | [ESM](model_doc/esm) | ✅ | ✅ | ❌ | | [FairSeq Machine-Translation](model_doc/fsmt) | ✅ | ❌ | ❌ | | [Falcon](model_doc/falcon) | ✅ | ❌ | ❌ | +| [Falcon3](model_doc/falcon3) | ✅ | ❌ | ✅ | | [FalconMamba](model_doc/falcon_mamba) | ✅ | ❌ | ❌ | | [FastSpeech2Conformer](model_doc/fastspeech2_conformer) | ✅ | ❌ | ❌ | | [FLAN-T5](model_doc/flan-t5) | ✅ | ✅ | ✅ | diff --git a/docs/source/en/model_doc/falcon3.md b/docs/source/en/model_doc/falcon3.md new file mode 100644 index 000000000..813533dd7 --- /dev/null +++ b/docs/source/en/model_doc/falcon3.md @@ -0,0 +1,29 @@ + + +# Falcon3 + +## Overview + +Falcon3 represents a natural evolution from previous releases, emphasizing expanding the models' science, math, and code capabilities. This iteration includes five base models: Falcon3-1B-Base, Falcon3-3B-Base, Falcon3-Mamba-7B-Base, Falcon3-7B-Base, and Falcon3-10B-Base. In developing these models, we incorporated several key innovations aimed at improving the models' performances while reducing training costs: + +One pre-training: We conducted a single large-scale pretraining run on the 7B model, using 2048 H100 GPU chips, leveraging 14 trillion tokens featuring web, code, STEM, and curated high-quality and multilingual data. +Depth up-scaling for improved reasoning: Building on recent studies on the effects of model depth, we upscaled the 7B model to a 10B parameters model by duplicating the redundant layers and continuing pre-training with 2TT of high-quality data. This yielded Falcon3-10B-Base which achieves state-of-the-art zero-shot and few-shot performance for models under 13B parameters. +Knowledge distillation for better tiny models: To provide compact and efficient alternatives, we developed Falcon3-1B-Base and Falcon3-3B-Base by leveraging pruning and knowledge distillation techniques, using less than 100GT of curated high-quality data, thereby redefining pre-training efficiency. + +## Resources +- [Blog post](https://huggingface.co/blog/falcon3) +- [Models on Huggingface](https://huggingface.co/collections/tiiuae/falcon3-67605ae03578be86e4e87026) diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index 1fb7464f4..d7d8281c2 100644 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -415,6 +415,7 @@ MODEL_NAMES_MAPPING = OrderedDict( ("ernie_m", "ErnieM"), ("esm", "ESM"), ("falcon", "Falcon"), + ("falcon3", "Falcon3"), ("falcon_mamba", "FalconMamba"), ("fastspeech2_conformer", "FastSpeech2Conformer"), ("flan-t5", "FLAN-T5"), diff --git a/utils/check_table.py b/utils/check_table.py index 4a392a58f..957bfd5af 100644 --- a/utils/check_table.py +++ b/utils/check_table.py @@ -157,6 +157,7 @@ MODEL_NAMES_WITH_SAME_CONFIG = { "LayoutXLM": "LayoutLMv2", "Llama2": "LLaMA", "Llama3": "LLaMA", + "Falcon3": "LLaMA", "MADLAD-400": "T5", "MatCha": "Pix2Struct", "mBART-50": "mBART",