mirror of
https://github.com/saymrwulf/transformers.git
synced 2026-05-15 21:01:19 +00:00
fix load_weight_prefix (#15101)
Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
This commit is contained in:
parent
71dccd0774
commit
f5d98da29e
2 changed files with 2 additions and 2 deletions
|
|
@ -155,7 +155,7 @@ class TFEncoderDecoderModel(TFPreTrainedModel):
|
|||
"""
|
||||
config_class = EncoderDecoderConfig
|
||||
base_model_prefix = "encoder_decoder"
|
||||
load_weight_prefix = "tf_encoder_decoder_model_1"
|
||||
load_weight_prefix = "tf_encoder_decoder_model"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
|
|
|||
|
|
@ -958,7 +958,7 @@ class TFEncoderDecoderModelSaveLoadTests(unittest.TestCase):
|
|||
|
||||
@slow
|
||||
def test_encoder_decoder_from_pretrained(self):
|
||||
load_weight_prefix = "tf_encoder_decoder_model_1"
|
||||
load_weight_prefix = TFEncoderDecoderModel.load_weight_prefix
|
||||
|
||||
config = self.get_encoder_decoder_config()
|
||||
encoder_tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
|
||||
|
|
|
|||
Loading…
Reference in a new issue