mirror of
https://github.com/saymrwulf/transformers.git
synced 2026-05-14 20:58:08 +00:00
Remove unused GPTModelTester.
It isn't imported anywhere.
This commit is contained in:
parent
345c23a60f
commit
daf8bebcdd
1 changed files with 1 additions and 191 deletions
|
|
@ -27,7 +27,7 @@ import uuid
|
|||
|
||||
from transformers import is_torch_available
|
||||
|
||||
from .utils import CACHE_DIR, require_torch, slow, torch_device
|
||||
from .utils import require_torch, slow, torch_device
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
|
|
@ -612,196 +612,6 @@ class ModelTesterMixin:
|
|||
outputs = model(**inputs_dict)
|
||||
|
||||
|
||||
class GPTModelTester(ModelTesterMixin):
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=13,
|
||||
seq_length=7,
|
||||
is_training=True,
|
||||
use_position_ids=True,
|
||||
use_token_type_ids=True,
|
||||
use_labels=True,
|
||||
vocab_size=99,
|
||||
n_positions=33,
|
||||
hidden_size=32,
|
||||
num_hidden_layers=5,
|
||||
num_attention_heads=4,
|
||||
n_choices=3,
|
||||
type_sequence_label_size=2,
|
||||
initializer_range=0.02,
|
||||
num_labels=3,
|
||||
scope=None,
|
||||
config_class=None,
|
||||
base_model_class=None,
|
||||
lm_head_model_class=None,
|
||||
double_head_model_class=None,
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.seq_length = seq_length
|
||||
self.is_training = is_training
|
||||
self.use_position_ids = use_position_ids
|
||||
self.use_token_type_ids = use_token_type_ids
|
||||
self.use_labels = use_labels
|
||||
self.vocab_size = vocab_size
|
||||
self.n_positions = n_positions
|
||||
self.hidden_size = hidden_size
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.n_choices = n_choices
|
||||
self.type_sequence_label_size = type_sequence_label_size
|
||||
self.initializer_range = initializer_range
|
||||
self.num_labels = num_labels
|
||||
self.scope = scope
|
||||
self.config_class = config_class
|
||||
self.base_model_class = base_model_class
|
||||
self.lm_head_model_class = lm_head_model_class
|
||||
self.double_head_model_class = double_head_model_class
|
||||
self.all_model_classes = (base_model_class, lm_head_model_class, double_head_model_class)
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
total_num_tokens = self.vocab_size
|
||||
input_ids = ids_tensor([self.batch_size, self.n_choices, self.seq_length], total_num_tokens)
|
||||
|
||||
position_ids = None
|
||||
if self.use_position_ids:
|
||||
position_ids = ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.n_positions)
|
||||
|
||||
token_type_ids = None
|
||||
if self.use_token_type_ids:
|
||||
total_voc = self.vocab_size
|
||||
token_type_ids = ids_tensor([self.batch_size, self.n_choices, self.seq_length], total_voc)
|
||||
|
||||
mc_labels = None
|
||||
lm_labels = None
|
||||
mc_token_ids = None
|
||||
if self.use_labels:
|
||||
mc_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
|
||||
lm_labels = ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.num_labels)
|
||||
mc_token_ids = ids_tensor([self.batch_size, self.n_choices], self.seq_length)
|
||||
|
||||
config = self.config_class(
|
||||
vocab_size=self.vocab_size,
|
||||
n_positions=self.n_positions,
|
||||
n_embd=self.hidden_size,
|
||||
n_layer=self.num_hidden_layers,
|
||||
n_head=self.num_attention_heads,
|
||||
initializer_range=self.initializer_range,
|
||||
)
|
||||
|
||||
return (config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids)
|
||||
|
||||
def create_and_check_base_model(
|
||||
self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids
|
||||
):
|
||||
model = self.base_model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
with torch.no_grad():
|
||||
outputs = model(input_ids, position_ids, token_type_ids)
|
||||
outputs = model(input_ids, position_ids)
|
||||
outputs = model(input_ids)
|
||||
|
||||
hidden_state = outputs[0]
|
||||
self.parent.assertListEqual(
|
||||
list(hidden_state.size()), [self.batch_size, self.n_choices, self.seq_length, self.hidden_size]
|
||||
)
|
||||
|
||||
def create_and_check_lm_head(
|
||||
self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids
|
||||
):
|
||||
model = self.lm_head_model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(input_ids, position_ids, token_type_ids, lm_labels)
|
||||
loss, lm_logits = outputs[:2]
|
||||
|
||||
total_voc = self.vocab_size
|
||||
self.parent.assertListEqual(
|
||||
list(lm_logits.size()), [self.batch_size, self.n_choices, self.seq_length, total_voc]
|
||||
)
|
||||
self.parent.assertListEqual(list(loss.size()), [])
|
||||
|
||||
def create_and_check_presents(
|
||||
self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids
|
||||
):
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(input_ids)
|
||||
presents = outputs[-1]
|
||||
self.parent.assertEqual(self.num_hidden_layers, len(presents))
|
||||
self.parent.assertListEqual(
|
||||
list(presents[0].size()),
|
||||
[
|
||||
2,
|
||||
self.batch_size * self.n_choices,
|
||||
self.num_attention_heads,
|
||||
self.seq_length,
|
||||
self.hidden_size // self.num_attention_heads,
|
||||
],
|
||||
)
|
||||
|
||||
def create_and_check_double_heads(
|
||||
self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids
|
||||
):
|
||||
model = self.double_head_model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(
|
||||
input_ids,
|
||||
mc_token_ids,
|
||||
lm_labels=lm_labels,
|
||||
mc_labels=mc_labels,
|
||||
token_type_ids=token_type_ids,
|
||||
position_ids=position_ids,
|
||||
)
|
||||
lm_loss, mc_loss, lm_logits, mc_logits = outputs[:4]
|
||||
loss = [lm_loss, mc_loss]
|
||||
|
||||
total_voc = self.vocab_size
|
||||
self.parent.assertListEqual(
|
||||
list(lm_logits.size()), [self.batch_size, self.n_choices, self.seq_length, total_voc]
|
||||
)
|
||||
self.parent.assertListEqual(list(mc_logits.size()), [self.batch_size, self.n_choices])
|
||||
self.parent.assertListEqual([list(l.size()) for l in loss], [[], []])
|
||||
|
||||
def create_and_check_model_from_pretrained(self):
|
||||
for model_name in list(self.base_model_class.pretrained_model_archive_map.keys())[:1]:
|
||||
model = self.base_model_class.from_pretrained(model_name, cache_dir=CACHE_DIR)
|
||||
self.parent.assertIsNotNone(model)
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
(config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids) = config_and_inputs
|
||||
inputs_dict = {"input_ids": input_ids}
|
||||
return config, inputs_dict
|
||||
|
||||
def run_common_tests(self, test_presents=False):
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
self.create_and_check_base_model(*config_and_inputs)
|
||||
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
self.create_and_check_lm_head(*config_and_inputs)
|
||||
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
self.create_and_check_double_heads(*config_and_inputs)
|
||||
|
||||
if test_presents:
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
self.create_and_check_presents(*config_and_inputs)
|
||||
|
||||
@slow
|
||||
def run_slow_tests(self):
|
||||
self.create_and_check_model_from_pretrained()
|
||||
|
||||
|
||||
class ConfigTester(object):
|
||||
def __init__(self, parent, config_class=None, **kwargs):
|
||||
self.parent = parent
|
||||
|
|
|
|||
Loading…
Reference in a new issue