From 7c31d05b59a9dce24b8ddc4b2bb8c8cf6bb5fd77 Mon Sep 17 00:00:00 2001 From: Shaopeng Fu Date: Sat, 3 Aug 2024 19:24:11 +0300 Subject: [PATCH] fix: (issue #32124) Exception raised when running `transformers/examples/flax/language-modeling/t5_tokenizer_model.py`. (#32157) fix: Exception raised when running . --- examples/flax/language-modeling/t5_tokenizer_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/flax/language-modeling/t5_tokenizer_model.py b/examples/flax/language-modeling/t5_tokenizer_model.py index b55c2c95d..a2be4afc9 100755 --- a/examples/flax/language-modeling/t5_tokenizer_model.py +++ b/examples/flax/language-modeling/t5_tokenizer_model.py @@ -47,14 +47,14 @@ class SentencePieceUnigramTokenizer(BaseTokenizer): tokenizer.pre_tokenizer = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace( - replacement=replacement, add_prefix_space="always" if add_prefix_space else "never" + replacement=replacement, prepend_scheme="always" if add_prefix_space else "never" ), pre_tokenizers.Digits(individual_digits=True), pre_tokenizers.Punctuation(), ] ) tokenizer.decoder = decoders.Metaspace( - replacement=replacement, add_prefix_space="always" if add_prefix_space else "never" + replacement=replacement, prepend_scheme="always" if add_prefix_space else "never" ) tokenizer.post_processor = TemplateProcessing(