Enable pytest live log and show warning logs on GitHub Actions CI runs (#35912)

* fix

* remove

* fix

---------

Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
This commit is contained in:
Yih-Dar 2025-02-10 13:36:20 +01:00 committed by GitHub
parent 48a309d0d2
commit 3897f2caf8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 13 additions and 6 deletions

View file

@ -52,3 +52,5 @@ markers = [
"bitsandbytes: select (or deselect with `not`) bitsandbytes integration tests",
"generate: marks tests that use the GenerationTesterMixin"
]
log_cli = 1
log_cli_level = "WARNING"

View file

@ -785,8 +785,7 @@ class GenerationConfig(PushToHubMixin):
for arg_name in ("cache_implementation", "cache_config", "return_legacy_cache"):
if getattr(self, arg_name) is not None:
logger.warning_once(
no_cache_warning.format(cache_arg=arg_name, cache_arg_value=getattr(self, arg_name)),
UserWarning,
no_cache_warning.format(cache_arg=arg_name, cache_arg_value=getattr(self, arg_name))
)
# 6. check watermarking arguments

View file

@ -101,7 +101,8 @@ def _configure_library_root_logger() -> None:
formatter = logging.Formatter("[%(levelname)s|%(pathname)s:%(lineno)s] %(asctime)s >> %(message)s")
_default_handler.setFormatter(formatter)
library_root_logger.propagate = False
is_ci = os.getenv("CI") is not None and os.getenv("CI").upper() in {"1", "ON", "YES", "TRUE"}
library_root_logger.propagate = True if is_ci else False
def _reset_library_root_logger() -> None:

View file

@ -16,6 +16,7 @@
import unittest
from queue import Empty
from threading import Thread
from unittest.mock import patch
import pytest
@ -27,6 +28,7 @@ from transformers import (
is_torch_available,
)
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from transformers.utils.logging import _get_library_root_logger
from ..test_modeling_common import ids_tensor
@ -102,6 +104,9 @@ class StreamerTester(unittest.TestCase):
model.config.eos_token_id = -1
input_ids = torch.ones((1, 5), device=torch_device).long() * model.config.bos_token_id
root = _get_library_root_logger()
with patch.object(root, "propagate", False):
with CaptureStdout() as cs:
streamer = TextStreamer(tokenizer, skip_special_tokens=True)
model.generate(input_ids, max_new_tokens=1, do_sample=False, streamer=streamer)