From c5f671904076ce6526bcc58ff81135bb46cf67e0 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Tue, 19 Jan 2021 09:40:15 -0500 Subject: [PATCH] Fix imports in conversion scripts (#9674) --- ...nvert_albert_original_tf_checkpoint_to_pytorch.py | 4 ++-- ...rt_bart_original_pytorch_checkpoint_to_pytorch.py | 12 +++++++++--- ...onvert_bert_original_tf2_checkpoint_to_pytorch.py | 4 ++-- ...convert_bert_original_tf_checkpoint_to_pytorch.py | 4 ++-- ...convert_bert_pytorch_checkpoint_to_original_tf.py | 2 +- ...nderbot_original_pytorch_checkpoint_to_pytorch.py | 4 ++-- ...ialogpt_original_pytorch_checkpoint_to_pytorch.py | 2 +- .../convert_dpr_original_checkpoint_to_pytorch.py | 3 +-- ...vert_electra_original_tf_checkpoint_to_pytorch.py | 4 ++-- ...rt_fsmt_original_pytorch_checkpoint_to_pytorch.py | 9 +++++---- src/transformers/models/funnel/__init__.py | 1 + ...nvert_funnel_original_tf_checkpoint_to_pytorch.py | 6 +++--- ...convert_gpt2_original_tf_checkpoint_to_pytorch.py | 6 +++--- ...ngformer_original_pytorch_lightning_to_pytorch.py | 2 +- ...nvert_lxmert_original_tf_checkpoint_to_pytorch.py | 6 +++--- .../marian/convert_marian_tatoeba_to_pytorch.py | 2 +- .../models/marian/convert_marian_to_pytorch.py | 4 ++-- .../convert_mbart_original_checkpoint_to_pytorch.py | 5 ++--- ...t_mobilebert_original_tf_checkpoint_to_pytorch.py | 4 ++-- .../models/pegasus/convert_pegasus_tf_to_pytorch.py | 4 ++-- ...phetnet_original_pytorch_checkpoint_to_pytorch.py | 4 ++-- .../convert_reformer_trax_checkpoint_to_pytorch.py | 4 ++-- ...roberta_original_pytorch_checkpoint_to_pytorch.py | 12 +++++++++--- .../convert_t5_original_tf_checkpoint_to_pytorch.py | 4 ++-- ...onvert_tapas_original_tf_checkpoint_to_pytorch.py | 4 ++-- ...t_transfo_xl_original_tf_checkpoint_to_pytorch.py | 10 +++++----- ...ert_xlm_original_pytorch_checkpoint_to_pytorch.py | 6 +++--- ...onvert_xlnet_original_tf_checkpoint_to_pytorch.py | 6 +++--- 28 files changed, 75 insertions(+), 63 deletions(-) diff --git a/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py index 7c5e04cc1..10c018170 100644 --- a/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py @@ -19,8 +19,8 @@ import argparse import torch -from ...utils import logging -from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert +from transformers import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py index 61c36702a..8978b8b2e 100644 --- a/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py @@ -23,9 +23,15 @@ import fairseq import torch from packaging import version -from ...utils import logging -from . import BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer -from .modeling_bart import _make_linear_from_emb +from transformers import ( + BartConfig, + BartForConditionalGeneration, + BartForSequenceClassification, + BartModel, + BartTokenizer, +) +from transformers.models.bart.modeling_bart import _make_linear_from_emb +from transformers.utils import logging FAIRSEQ_MODELS = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"] diff --git a/src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py b/src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py index f343fec27..c780c0f83 100644 --- a/src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py +++ b/src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py @@ -28,8 +28,8 @@ import re import tensorflow as tf import torch -from ...utils import logging -from . import BertConfig, BertModel +from transformers import BertConfig, BertModel +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py index 049d50de9..d1cb69a2e 100755 --- a/src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py @@ -19,8 +19,8 @@ import argparse import torch -from ...utils import logging -from . import BertConfig, BertForPreTraining, load_tf_weights_in_bert +from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py b/src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py index 25aeb7762..07685f645 100644 --- a/src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py +++ b/src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py @@ -22,7 +22,7 @@ import numpy as np import tensorflow as tf import torch -from . import BertModel +from transformers import BertModel def convert_pytorch_checkpoint_to_tf(model: BertModel, ckpt_dir: str, model_name: str): diff --git a/src/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py index 7958da85d..d31cf67c1 100644 --- a/src/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py @@ -18,8 +18,8 @@ import argparse import torch -from ...models.bart import BartConfig, BartForConditionalGeneration -from ...utils import logging +from transformers import BartConfig, BartForConditionalGeneration +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py index 00d9aa335..f588a2fde 100644 --- a/src/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py @@ -17,7 +17,7 @@ import os import torch -from ...file_utils import WEIGHTS_NAME +from transformers.file_utils import WEIGHTS_NAME DIALOGPT_MODELS = ["small", "medium", "large"] diff --git a/src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py b/src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py index a9cd4c1dc..cc10ac002 100644 --- a/src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py +++ b/src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py @@ -19,8 +19,7 @@ from pathlib import Path import torch from torch.serialization import default_restore_location -from ...models.bert import BertConfig -from . import DPRConfig, DPRContextEncoder, DPRQuestionEncoder, DPRReader +from .transformers import BertConfig, DPRConfig, DPRContextEncoder, DPRQuestionEncoder, DPRReader CheckpointState = collections.namedtuple( diff --git a/src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py index ffe980c6a..9cbfcf665 100644 --- a/src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py @@ -19,8 +19,8 @@ import argparse import torch -from ...utils import logging -from . import ElectraConfig, ElectraForMaskedLM, ElectraForPreTraining, load_tf_weights_in_electra +from transformers import ElectraConfig, ElectraForMaskedLM, ElectraForPreTraining, load_tf_weights_in_electra +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py index 922f0b6d0..513597979 100755 --- a/src/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py @@ -31,10 +31,11 @@ import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary -from ...file_utils import WEIGHTS_NAME -from ...tokenization_utils_base import TOKENIZER_CONFIG_FILE -from ...utils import logging -from . import VOCAB_FILES_NAMES, FSMTConfig, FSMTForConditionalGeneration +from transfomers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES +from transformers import FSMTConfig, FSMTForConditionalGeneration +from transformers.file_utils import WEIGHTS_NAME +from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE +from transformers.utils import logging logging.set_verbosity_warning() diff --git a/src/transformers/models/funnel/__init__.py b/src/transformers/models/funnel/__init__.py index d692d2d74..363df7e55 100644 --- a/src/transformers/models/funnel/__init__.py +++ b/src/transformers/models/funnel/__init__.py @@ -23,6 +23,7 @@ from ...file_utils import _BaseLazyModule, is_tf_available, is_tokenizers_availa _import_structure = { "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"], + "convert_funnel_original_tf_checkpoint_to_pytorch": [], "tokenization_funnel": ["FunnelTokenizer"], } diff --git a/src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py index 03f461658..daff28305 100755 --- a/src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py @@ -16,14 +16,14 @@ import argparse -import logging import torch -from . import FunnelConfig, FunnelForPreTraining, load_tf_weights_in_funnel +from transformers import FunnelConfig, FunnelForPreTraining, load_tf_weights_in_funnel +from transformers.utils import logging -logging.basicConfig(level=logging.INFO) +logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path): diff --git a/src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py index 5f85d0e18..e5f8be189 100755 --- a/src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py @@ -19,9 +19,9 @@ import argparse import torch -from ...file_utils import CONFIG_NAME, WEIGHTS_NAME -from ...utils import logging -from . import GPT2Config, GPT2Model, load_tf_weights_in_gpt2 +from transformers import GPT2Config, GPT2Model, load_tf_weights_in_gpt2 +from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py b/src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py index b47974631..6c310a5fa 100644 --- a/src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py +++ b/src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py @@ -20,7 +20,7 @@ import argparse import pytorch_lightning as pl import torch -from . import LongformerForQuestionAnswering, LongformerModel +from transformers import LongformerForQuestionAnswering, LongformerModel class LightningModel(pl.LightningModule): diff --git a/src/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py index 4034e7202..3b81362b2 100755 --- a/src/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py @@ -16,14 +16,14 @@ import argparse -import logging import torch -from . import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert +from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert +from transformers.utils import logging -logging.basicConfig(level=logging.INFO) +logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path): diff --git a/src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py b/src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py index 7144fc1b2..0ab653e9a 100644 --- a/src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py +++ b/src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py @@ -17,7 +17,7 @@ import os from pathlib import Path from typing import List, Tuple -from .convert_marian_to_pytorch import ( +from transformers.models.marian.convert_marian_to_pytorch import ( FRONT_MATTER_TEMPLATE, _parse_readme, convert_all_sentencepiece_models, diff --git a/src/transformers/models/marian/convert_marian_to_pytorch.py b/src/transformers/models/marian/convert_marian_to_pytorch.py index 5c47c76d8..a7faef942 100644 --- a/src/transformers/models/marian/convert_marian_to_pytorch.py +++ b/src/transformers/models/marian/convert_marian_to_pytorch.py @@ -26,8 +26,8 @@ import numpy as np import torch from tqdm import tqdm -from ...hf_api import HfApi -from . import MarianConfig, MarianMTModel, MarianTokenizer +from transformers import MarianConfig, MarianMTModel, MarianTokenizer +from transformers.hf_api import HfApi def remove_suffix(text: str, suffix: str): diff --git a/src/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py b/src/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py index 146a34fd3..cb4c7c9d7 100644 --- a/src/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py +++ b/src/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py @@ -16,9 +16,8 @@ import argparse import torch -from ..bart import BartForConditionalGeneration -from ..bart.convert_bart_original_pytorch_checkpoint_to_pytorch import remove_ignore_keys_ -from . import MBartConfig +from transformers import BartForConditionalGeneration, MBartConfig +from transformers.models.bart.convert_bart_original_pytorch_checkpoint_to_pytorch import remove_ignore_keys_ def convert_fairseq_mbart_checkpoint_from_disk(checkpoint_path, hf_config_path="facebook/mbart-large-en-ro"): diff --git a/src/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py index 5ba8bc493..ce5396a93 100644 --- a/src/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py @@ -16,8 +16,8 @@ import argparse import torch -from ...utils import logging -from . import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert +from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py b/src/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py index c6043f216..9254a0ba9 100644 --- a/src/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py +++ b/src/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py @@ -22,8 +22,8 @@ import tensorflow as tf import torch from tqdm import tqdm -from . import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer -from .configuration_pegasus import DEFAULTS, task_specific_params +from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer +from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params PATTERNS = [ diff --git a/src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py index 1f4082cbe..cbd8c4995 100644 --- a/src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py @@ -19,6 +19,8 @@ import argparse import torch +from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging + # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( @@ -28,8 +30,6 @@ from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) -from . import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging - logger = logging.get_logger(__name__) logging.set_verbosity_info() diff --git a/src/transformers/models/reformer/convert_reformer_trax_checkpoint_to_pytorch.py b/src/transformers/models/reformer/convert_reformer_trax_checkpoint_to_pytorch.py index 23adf7e74..ec58e2f91 100755 --- a/src/transformers/models/reformer/convert_reformer_trax_checkpoint_to_pytorch.py +++ b/src/transformers/models/reformer/convert_reformer_trax_checkpoint_to_pytorch.py @@ -21,8 +21,8 @@ import pickle import numpy as np import torch -from ...utils import logging -from . import ReformerConfig, ReformerModelWithLMHead +from transformers import ReformerConfig, ReformerModelWithLMHead +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py index 68c853107..e4d95354f 100644 --- a/src/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py @@ -24,9 +24,15 @@ from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version -from ...models.bert.modeling_bert import BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput -from ...utils import logging -from .modeling_roberta import RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification +from transformers import RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification +from transformers.models.bert.modeling_bert import ( + BertIntermediate, + BertLayer, + BertOutput, + BertSelfAttention, + BertSelfOutput, +) +from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("0.9.0"): diff --git a/src/transformers/models/t5/convert_t5_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/t5/convert_t5_original_tf_checkpoint_to_pytorch.py index 5a702eab9..e38680df8 100755 --- a/src/transformers/models/t5/convert_t5_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/models/t5/convert_t5_original_tf_checkpoint_to_pytorch.py @@ -17,8 +17,8 @@ import argparse -from ...utils import logging -from . import T5Config, T5ForConditionalGeneration, load_tf_weights_in_t5 +from transformers import T5Config, T5ForConditionalGeneration, load_tf_weights_in_t5 +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py index 6033dc863..63beedea3 100644 --- a/src/transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py @@ -17,8 +17,7 @@ import argparse -from ...utils import logging -from . import ( +from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, @@ -27,6 +26,7 @@ from . import ( TapasTokenizer, load_tf_weights_in_tapas, ) +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py index ea43e10aa..26355455f 100755 --- a/src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py @@ -22,11 +22,11 @@ import sys import torch -from ...file_utils import CONFIG_NAME, WEIGHTS_NAME -from ...utils import logging -from . import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl -from . import tokenization_transfo_xl as data_utils -from .tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES +from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl +from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME +from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils +from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py index 8e50b5983..82e5e24d3 100755 --- a/src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py @@ -21,9 +21,9 @@ import json import numpy import torch -from ...file_utils import CONFIG_NAME, WEIGHTS_NAME -from ...utils import logging -from .tokenization_xlm import VOCAB_FILES_NAMES +from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME +from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py index a23267d70..0426b35c7 100755 --- a/src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py @@ -20,15 +20,15 @@ import os import torch -from ...file_utils import CONFIG_NAME, WEIGHTS_NAME -from ...utils import logging -from . import ( +from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) +from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME +from transformers.utils import logging GLUE_TASKS_NUM_LABELS = {