mirror of
https://github.com/saymrwulf/transformers.git
synced 2026-05-14 20:58:08 +00:00
Enable pytest live log and show warning logs on GitHub Actions CI runs (#35912)
* fix * remove * fix --------- Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
This commit is contained in:
parent
48a309d0d2
commit
3897f2caf8
4 changed files with 13 additions and 6 deletions
|
|
@ -52,3 +52,5 @@ markers = [
|
|||
"bitsandbytes: select (or deselect with `not`) bitsandbytes integration tests",
|
||||
"generate: marks tests that use the GenerationTesterMixin"
|
||||
]
|
||||
log_cli = 1
|
||||
log_cli_level = "WARNING"
|
||||
|
|
@ -785,8 +785,7 @@ class GenerationConfig(PushToHubMixin):
|
|||
for arg_name in ("cache_implementation", "cache_config", "return_legacy_cache"):
|
||||
if getattr(self, arg_name) is not None:
|
||||
logger.warning_once(
|
||||
no_cache_warning.format(cache_arg=arg_name, cache_arg_value=getattr(self, arg_name)),
|
||||
UserWarning,
|
||||
no_cache_warning.format(cache_arg=arg_name, cache_arg_value=getattr(self, arg_name))
|
||||
)
|
||||
|
||||
# 6. check watermarking arguments
|
||||
|
|
|
|||
|
|
@ -101,7 +101,8 @@ def _configure_library_root_logger() -> None:
|
|||
formatter = logging.Formatter("[%(levelname)s|%(pathname)s:%(lineno)s] %(asctime)s >> %(message)s")
|
||||
_default_handler.setFormatter(formatter)
|
||||
|
||||
library_root_logger.propagate = False
|
||||
is_ci = os.getenv("CI") is not None and os.getenv("CI").upper() in {"1", "ON", "YES", "TRUE"}
|
||||
library_root_logger.propagate = True if is_ci else False
|
||||
|
||||
|
||||
def _reset_library_root_logger() -> None:
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
import unittest
|
||||
from queue import Empty
|
||||
from threading import Thread
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
|
|
@ -27,6 +28,7 @@ from transformers import (
|
|||
is_torch_available,
|
||||
)
|
||||
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
|
||||
from transformers.utils.logging import _get_library_root_logger
|
||||
|
||||
from ..test_modeling_common import ids_tensor
|
||||
|
||||
|
|
@ -102,9 +104,12 @@ class StreamerTester(unittest.TestCase):
|
|||
model.config.eos_token_id = -1
|
||||
|
||||
input_ids = torch.ones((1, 5), device=torch_device).long() * model.config.bos_token_id
|
||||
with CaptureStdout() as cs:
|
||||
streamer = TextStreamer(tokenizer, skip_special_tokens=True)
|
||||
model.generate(input_ids, max_new_tokens=1, do_sample=False, streamer=streamer)
|
||||
|
||||
root = _get_library_root_logger()
|
||||
with patch.object(root, "propagate", False):
|
||||
with CaptureStdout() as cs:
|
||||
streamer = TextStreamer(tokenizer, skip_special_tokens=True)
|
||||
model.generate(input_ids, max_new_tokens=1, do_sample=False, streamer=streamer)
|
||||
|
||||
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
|
||||
# re-tokenized, must only contain one token
|
||||
|
|
|
|||
Loading…
Reference in a new issue