diff --git a/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py index fb5c2ab72..63ce2c87a 100644 --- a/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py @@ -155,7 +155,7 @@ class TFEncoderDecoderModel(TFPreTrainedModel): """ config_class = EncoderDecoderConfig base_model_prefix = "encoder_decoder" - load_weight_prefix = "tf_encoder_decoder_model_1" + load_weight_prefix = "tf_encoder_decoder_model" def __init__( self, diff --git a/tests/test_modeling_tf_encoder_decoder.py b/tests/test_modeling_tf_encoder_decoder.py index 96f2b8155..d5b0a9dd6 100644 --- a/tests/test_modeling_tf_encoder_decoder.py +++ b/tests/test_modeling_tf_encoder_decoder.py @@ -958,7 +958,7 @@ class TFEncoderDecoderModelSaveLoadTests(unittest.TestCase): @slow def test_encoder_decoder_from_pretrained(self): - load_weight_prefix = "tf_encoder_decoder_model_1" + load_weight_prefix = TFEncoderDecoderModel.load_weight_prefix config = self.get_encoder_decoder_config() encoder_tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")