From d71fa9f618263130281501e6cf978d80e5850ef8 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Tue, 14 Nov 2023 10:32:57 +0100 Subject: [PATCH] [`Peft`] `modules_to_save` support for peft integration (#27466) * `modules_to_save` support for peft integration * Update docs/source/en/peft.md Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * slightly elaborate test --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- docs/source/en/peft.md | 22 ++++++++++- src/transformers/integrations/peft.py | 6 ++- .../peft_integration/test_peft_integration.py | 38 +++++++++++++++++++ 3 files changed, 63 insertions(+), 3 deletions(-) diff --git a/docs/source/en/peft.md b/docs/source/en/peft.md index 302b614e5..d86a36e62 100644 --- a/docs/source/en/peft.md +++ b/docs/source/en/peft.md @@ -98,7 +98,7 @@ You can use [`~peft.PeftModel.add_adapter`] to add a new adapter to a model with ```py from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer -from peft import PeftConfig +from peft import LoraConfig model_id = "facebook/opt-350m" model = AutoModelForCausalLM.from_pretrained(model_id) @@ -208,6 +208,26 @@ model.save_pretrained(save_dir) model = AutoModelForCausalLM.from_pretrained(save_dir) ``` +## Add additional trainable layers to a PEFT adapter + +You can also fine-tune additional trainable adapters on top of a model that has adapters attached by passing `modules_to_save` in your PEFT config. For example, if you want to also fine-tune the lm_head on top of a model with a LoRA adapter: + +```py +from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer +from peft import LoraConfig + +model_id = "facebook/opt-350m" +model = AutoModelForCausalLM.from_pretrained(model_id) + +lora_config = LoraConfig( + target_modules=["q_proj", "k_proj"], + modules_to_save=["lm_head"], +) + +model.add_adapter(lora_config) +``` + +