From 12dfd99007dd803b3dfab5fe2ec66260b8007a23 Mon Sep 17 00:00:00 2001 From: Mohamed Mekkouri <93391238+MekkCyber@users.noreply.github.com> Date: Wed, 15 Jan 2025 14:25:44 +0100 Subject: [PATCH] Fix : Nemotron Processor in GGUF conversion (#35708) * fixing nemotron processor * make style --- src/transformers/modeling_gguf_pytorch_utils.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/transformers/modeling_gguf_pytorch_utils.py b/src/transformers/modeling_gguf_pytorch_utils.py index 9b20c1b61..0da06a1f5 100644 --- a/src/transformers/modeling_gguf_pytorch_utils.py +++ b/src/transformers/modeling_gguf_pytorch_utils.py @@ -221,6 +221,17 @@ class MambaTensorProcessor(TensorProcessor): return GGUFTensor(weights, name, {}) +class NemotronTensorProcessor(TensorProcessor): + def __init__(self, config=None): + super().__init__(config=config) + + # ref : https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L4666 + def process(self, weights, name, **kwargs): + if "norm.weight" in name: + weights = weights - 1 + return GGUFTensor(weights, name, {}) + + class Gemma2TensorProcessor(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) @@ -241,6 +252,7 @@ TENSOR_PROCESSORS = { "t5encoder": T5TensorProcessor, "gpt2": GPT2TensorProcessor, "mamba": MambaTensorProcessor, + "nemotron": NemotronTensorProcessor, "gemma2": Gemma2TensorProcessor, }