Fix : Nemotron Processor in GGUF conversion (#35708)

* fixing nemotron processor

* make style
This commit is contained in:
Mohamed Mekkouri 2025-01-15 14:25:44 +01:00 committed by GitHub
parent 387663e571
commit 12dfd99007
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -221,6 +221,17 @@ class MambaTensorProcessor(TensorProcessor):
return GGUFTensor(weights, name, {})
class NemotronTensorProcessor(TensorProcessor):
def __init__(self, config=None):
super().__init__(config=config)
# ref : https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L4666
def process(self, weights, name, **kwargs):
if "norm.weight" in name:
weights = weights - 1
return GGUFTensor(weights, name, {})
class Gemma2TensorProcessor(TensorProcessor):
def __init__(self, config=None):
super().__init__(config=config)
@ -241,6 +252,7 @@ TENSOR_PROCESSORS = {
"t5encoder": T5TensorProcessor,
"gpt2": GPT2TensorProcessor,
"mamba": MambaTensorProcessor,
"nemotron": NemotronTensorProcessor,
"gemma2": Gemma2TensorProcessor,
}