mirror of
https://github.com/saymrwulf/transformers.git
synced 2026-05-14 20:58:08 +00:00
* ready for PR * cleanup * correct FSMT_PRETRAINED_MODEL_ARCHIVE_LIST * fix * perfectionism * revert change from another PR * odd, already committed this one * non-interactive upload workaround * backup the failed experiment * store langs in config * workaround for localizing model path * doc clean up as in https://github.com/huggingface/transformers/pull/6956 * style * back out debug mode * document: run_eval.py --num_beams 10 * remove unneeded constant * typo * re-use bart's Attention * re-use EncoderLayer, DecoderLayer from bart * refactor * send to cuda and fp16 * cleanup * revert (moved to another PR) * better error message * document run_eval --num_beams * solve the problem of tokenizer finding the right files when model is local * polish, remove hardcoded config * add a note that the file is autogenerated to avoid losing changes * prep for org change, remove unneeded code * switch to model4.pt, update scores * s/python/bash/ * missing init (but doesn't impact the finetuned model) * cleanup * major refactor (reuse-bart) * new model, new expected weights * cleanup * cleanup * full link * fix model type * merge porting notes * style * cleanup * have to create a DecoderConfig object to handle vocab_size properly * doc fix * add note (not a public class) * parametrize * - add bleu scores integration tests * skip test if sacrebleu is not installed * cache heavy models/tokenizers * some tweaks * remove tokens that aren't used * more purging * simplify code * switch to using decoder_start_token_id * add doc * Revert "major refactor (reuse-bart)" This reverts commit 226dad15ca6a9ef4e26178526e878e8fc5c85874. * decouple from bart * remove unused code #1 * remove unused code #2 * remove unused code #3 * update instructions * clean up * move bleu eval to examples * check import only once * move data+gen script into files * reuse via import * take less space * add prepare_seq2seq_batch (auto-tested) * cleanup * recode test to use json instead of yaml * ignore keys not needed * use the new -y in transformers-cli upload -y * [xlm tok] config dict: fix str into int to match definition (#7034) * [s2s] --eval_max_generate_length (#7018) * Fix CI with change of name of nlp (#7054) * nlp -> datasets * More nlp -> datasets * Woopsie * More nlp -> datasets * One last * extending to support allen_nlp wmt models - allow a specific checkpoint file to be passed - more arg settings - scripts for allen_nlp models * sync with changes * s/fsmt-wmt/wmt/ in model names * s/fsmt-wmt/wmt/ in model names (p2) * s/fsmt-wmt/wmt/ in model names (p3) * switch to a better checkpoint * typo * make non-optional args such - adjust tests where possible or skip when there is no other choice * consistency * style * adjust header * cards moved (model rename) * use best custom hparams * update info * remove old cards * cleanup * s/stas/facebook/ * update scores * s/allen_nlp/allenai/ * url maps aren't needed * typo * move all the doc / build /eval generators to their own scripts * cleanup * Apply suggestions from code review Co-authored-by: Lysandre Debut <lysandre@huggingface.co> * Apply suggestions from code review Co-authored-by: Lysandre Debut <lysandre@huggingface.co> * fix indent * duplicated line * style * use the correct add_start_docstrings * oops * resizing can't be done with the core approach, due to 2 dicts * check that the arg is a list * style * style Co-authored-by: Sam Shleifer <sshleifer@gmail.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Lysandre Debut <lysandre@huggingface.co>
147 lines
5.3 KiB
Python
147 lines
5.3 KiB
Python
# coding=utf-8
|
|
# Copyright 2018 The Google AI Language Team Authors.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
|
|
import json
|
|
import os
|
|
import unittest
|
|
|
|
from transformers.file_utils import cached_property
|
|
from transformers.testing_utils import slow
|
|
from transformers.tokenization_fsmt import VOCAB_FILES_NAMES, FSMTTokenizer
|
|
|
|
from .test_tokenization_common import TokenizerTesterMixin
|
|
|
|
|
|
class FSMTTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
|
|
tokenizer_class = FSMTTokenizer
|
|
|
|
def setUp(self):
|
|
super().setUp()
|
|
|
|
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
|
|
vocab = [
|
|
"l",
|
|
"o",
|
|
"w",
|
|
"e",
|
|
"r",
|
|
"s",
|
|
"t",
|
|
"i",
|
|
"d",
|
|
"n",
|
|
"w</w>",
|
|
"r</w>",
|
|
"t</w>",
|
|
"lo",
|
|
"low",
|
|
"er</w>",
|
|
"low</w>",
|
|
"lowest</w>",
|
|
"newer</w>",
|
|
"wider</w>",
|
|
"<unk>",
|
|
]
|
|
vocab_tokens = dict(zip(vocab, range(len(vocab))))
|
|
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
|
|
|
|
self.langs = ["en", "ru"]
|
|
config = {
|
|
"langs": self.langs,
|
|
"src_vocab_size": 10,
|
|
"tgt_vocab_size": 20,
|
|
}
|
|
|
|
self.src_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["src_vocab_file"])
|
|
self.tgt_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["tgt_vocab_file"])
|
|
config_file = os.path.join(self.tmpdirname, "tokenizer_config.json")
|
|
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
|
|
with open(self.src_vocab_file, "w") as fp:
|
|
fp.write(json.dumps(vocab_tokens))
|
|
with open(self.tgt_vocab_file, "w") as fp:
|
|
fp.write(json.dumps(vocab_tokens))
|
|
with open(self.merges_file, "w") as fp:
|
|
fp.write("\n".join(merges))
|
|
with open(config_file, "w") as fp:
|
|
fp.write(json.dumps(config))
|
|
|
|
@cached_property
|
|
def tokenizer_ru_en(self):
|
|
return FSMTTokenizer.from_pretrained("facebook/wmt19-ru-en")
|
|
|
|
@cached_property
|
|
def tokenizer_en_ru(self):
|
|
return FSMTTokenizer.from_pretrained("facebook/wmt19-en-ru")
|
|
|
|
def test_full_tokenizer(self):
|
|
""" Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt """
|
|
tokenizer = FSMTTokenizer(self.langs, self.src_vocab_file, self.tgt_vocab_file, self.merges_file)
|
|
|
|
text = "lower"
|
|
bpe_tokens = ["low", "er</w>"]
|
|
tokens = tokenizer.tokenize(text)
|
|
self.assertListEqual(tokens, bpe_tokens)
|
|
|
|
input_tokens = tokens + ["<unk>"]
|
|
input_bpe_tokens = [14, 15, 20]
|
|
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
|
|
|
|
@slow
|
|
def test_sequence_builders(self):
|
|
tokenizer = self.tokenizer_ru_en
|
|
|
|
text = tokenizer.encode("sequence builders", add_special_tokens=False)
|
|
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
|
|
|
|
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
|
|
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
|
|
|
|
assert encoded_sentence == text + [2]
|
|
assert encoded_pair == text + [2] + text_2 + [2]
|
|
|
|
@slow
|
|
def test_match_encode_decode(self):
|
|
tokenizer_enc = self.tokenizer_en_ru
|
|
tokenizer_dec = self.tokenizer_ru_en
|
|
|
|
targets = [
|
|
[
|
|
"Here's a little song I wrote. Don't worry, be happy.",
|
|
[2470, 39, 11, 2349, 7222, 70, 5979, 7, 8450, 1050, 13160, 5, 26, 6445, 7, 2],
|
|
],
|
|
["This is it. No more. I'm done!", [132, 21, 37, 7, 1434, 86, 7, 70, 6476, 1305, 427, 2]],
|
|
]
|
|
|
|
# if data needs to be recreated or added, run:
|
|
# import torch
|
|
# model = torch.hub.load("pytorch/fairseq", "transformer.wmt19.en-ru", checkpoint_file="model4.pt", tokenizer="moses", bpe="fastbpe")
|
|
# for src_text, _ in targets: print(f"""[\n"{src_text}",\n {model.encode(src_text).tolist()}\n],""")
|
|
|
|
for src_text, tgt_input_ids in targets:
|
|
input_ids = tokenizer_enc.encode(src_text, return_tensors="pt")[0].tolist()
|
|
self.assertListEqual(input_ids, tgt_input_ids)
|
|
|
|
# and decode backward, using the reversed languages model
|
|
decoded_text = tokenizer_dec.decode(input_ids, skip_special_tokens=True)
|
|
self.assertEqual(decoded_text, src_text)
|
|
|
|
@unittest.skip("FSMTConfig.__init__ requires non-optional args")
|
|
def test_torch_encode_plus_sent_to_model(self):
|
|
pass
|
|
|
|
@unittest.skip("FSMTConfig.__init__ requires non-optional args")
|
|
def test_np_encode_plus_sent_to_model(self):
|
|
pass
|