From b460c3089367f3fadd40aa2cb3808ee370aa61e1 Mon Sep 17 00:00:00 2001 From: isdanni Date: Fri, 13 Oct 2023 16:33:03 +0000 Subject: [PATCH] [BE] Enable Ruff's Flake8 PYI042 (#111114) Enable [snake-case-type-alias (PYI042)](https://docs.astral.sh/ruff/rules/snake-case-type-alias/) Link: #110950 Pull Request resolved: https://github.com/pytorch/pytorch/pull/111114 Approved by: https://github.com/albanD --- pyproject.toml | 1 - torch/optim/adadelta.pyi | 4 ++-- torch/optim/adagrad.pyi | 4 ++-- torch/optim/adam.py | 4 ++-- torch/optim/adam.pyi | 4 ++-- torch/optim/adamax.pyi | 4 ++-- torch/optim/adamw.py | 4 ++-- torch/optim/adamw.pyi | 4 ++-- torch/optim/asgd.pyi | 4 ++-- torch/optim/lbfgs.pyi | 4 ++-- torch/optim/nadam.pyi | 4 ++-- torch/optim/optimizer.py | 4 ++-- torch/optim/radam.pyi | 4 ++-- torch/optim/rmsprop.pyi | 4 ++-- torch/optim/rprop.pyi | 4 ++-- torch/optim/sgd.pyi | 4 ++-- torch/optim/sparse_adam.pyi | 4 ++-- 17 files changed, 32 insertions(+), 33 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 8fad0c87bda..674b20d3025 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,7 +52,6 @@ ignore = [ "PYI034", "PYI036", "PYI041", - "PYI042", "PYI045", "PYI056", "SIM102", "SIM103", "SIM112", # flake8-simplify code styles diff --git a/torch/optim/adadelta.pyi b/torch/optim/adadelta.pyi index 342bf4a338d..0f475331c16 100644 --- a/torch/optim/adadelta.pyi +++ b/torch/optim/adadelta.pyi @@ -1,9 +1,9 @@ -from .optimizer import Optimizer, params_t +from .optimizer import Optimizer, ParamsT class Adadelta(Optimizer): def __init__( self, - params: params_t, + params: ParamsT, lr: float = ..., rho: float = ..., eps: float = ..., diff --git a/torch/optim/adagrad.pyi b/torch/optim/adagrad.pyi index 5dc2be2f54c..4557ece1417 100644 --- a/torch/optim/adagrad.pyi +++ b/torch/optim/adagrad.pyi @@ -1,9 +1,9 @@ -from .optimizer import Optimizer, params_t +from .optimizer import Optimizer, ParamsT class Adagrad(Optimizer): def __init__( self, - params: params_t, + params: ParamsT, lr: float = ..., lr_decay: float = ..., weight_decay: float = ..., diff --git a/torch/optim/adam.py b/torch/optim/adam.py index ce735ebd161..818b6ddc086 100644 --- a/torch/optim/adam.py +++ b/torch/optim/adam.py @@ -2,7 +2,7 @@ from typing import List, Optional, Union, Tuple import torch from torch import Tensor -from .optimizer import (Optimizer, params_t, _use_grad_for_differentiable, _get_value, +from .optimizer import (Optimizer, ParamsT, _use_grad_for_differentiable, _get_value, _stack_if_compiling, _dispatch_sqrt, _default_to_fused_or_foreach, _capturable_doc, _differentiable_doc, _foreach_doc, _fused_doc, _maximize_doc) @@ -13,7 +13,7 @@ __all__ = ['Adam', 'adam'] class Adam(Optimizer): def __init__(self, - params: params_t, + params: ParamsT, lr: Union[float, Tensor] = 1e-3, betas: Tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, diff --git a/torch/optim/adam.pyi b/torch/optim/adam.pyi index 1edfac2479f..aef8ed69a9c 100644 --- a/torch/optim/adam.pyi +++ b/torch/optim/adam.pyi @@ -2,12 +2,12 @@ from typing import Optional, Tuple, Union from torch import Tensor -from .optimizer import Optimizer, params_t +from .optimizer import Optimizer, ParamsT class Adam(Optimizer): def __init__( self, - params: params_t, + params: ParamsT, lr: Union[float, Tensor] = 1e-3, betas: Tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, diff --git a/torch/optim/adamax.pyi b/torch/optim/adamax.pyi index ae46ca2b737..d38cfaefe38 100644 --- a/torch/optim/adamax.pyi +++ b/torch/optim/adamax.pyi @@ -1,11 +1,11 @@ from typing import Tuple -from .optimizer import Optimizer, params_t +from .optimizer import Optimizer, ParamsT class Adamax(Optimizer): def __init__( self, - params: params_t, + params: ParamsT, lr: float = ..., betas: Tuple[float, float] = ..., eps: float = ..., diff --git a/torch/optim/adamw.py b/torch/optim/adamw.py index 0a834c2bbc0..cc280dd0126 100644 --- a/torch/optim/adamw.py +++ b/torch/optim/adamw.py @@ -2,7 +2,7 @@ import torch from torch import Tensor from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _dispatch_sqrt, _stack_if_compiling, _capturable_doc, _differentiable_doc, _foreach_doc, - _fused_doc, _maximize_doc, _default_to_fused_or_foreach, params_t) + _fused_doc, _maximize_doc, _default_to_fused_or_foreach, ParamsT) from typing import List, Optional, Tuple, Union from torch.utils._foreach_utils import _get_fused_kernels_supported_devices @@ -12,7 +12,7 @@ __all__ = ["AdamW", "adamw"] class AdamW(Optimizer): def __init__( self, - params: params_t, + params: ParamsT, lr: Union[float, Tensor] = 1e-3, betas: Tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, diff --git a/torch/optim/adamw.pyi b/torch/optim/adamw.pyi index 5b7ce3428f1..17c35ebec8a 100644 --- a/torch/optim/adamw.pyi +++ b/torch/optim/adamw.pyi @@ -2,12 +2,12 @@ from typing import Optional, Tuple, Union from torch import Tensor -from .optimizer import Optimizer, params_t +from .optimizer import Optimizer, ParamsT class AdamW(Optimizer): def __init__( self, - params: params_t, + params: ParamsT, lr: Union[float, Tensor] = 1e-3, betas: Tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, diff --git a/torch/optim/asgd.pyi b/torch/optim/asgd.pyi index c7d947e944a..634b0d162ce 100644 --- a/torch/optim/asgd.pyi +++ b/torch/optim/asgd.pyi @@ -1,9 +1,9 @@ -from .optimizer import Optimizer, params_t +from .optimizer import Optimizer, ParamsT class ASGD(Optimizer): def __init__( self, - params: params_t, + params: ParamsT, lr: float = ..., lambd: float = ..., alpha: float = ..., diff --git a/torch/optim/lbfgs.pyi b/torch/optim/lbfgs.pyi index 3401867e22a..c7c0ac06088 100644 --- a/torch/optim/lbfgs.pyi +++ b/torch/optim/lbfgs.pyi @@ -1,11 +1,11 @@ from typing import Optional -from .optimizer import Optimizer, params_t +from .optimizer import Optimizer, ParamsT class LBFGS(Optimizer): def __init__( self, - params: params_t, + params: ParamsT, lr: float = ..., max_iter: int = ..., max_eval: Optional[int] = ..., diff --git a/torch/optim/nadam.pyi b/torch/optim/nadam.pyi index 6efb9ae19d6..f62e188b3d7 100644 --- a/torch/optim/nadam.pyi +++ b/torch/optim/nadam.pyi @@ -1,11 +1,11 @@ from typing import Tuple -from .optimizer import Optimizer, params_t +from .optimizer import Optimizer, ParamsT class NAdam(Optimizer): def __init__( self, - params: params_t, + params: ParamsT, lr: float = ..., betas: Tuple[float, float] = ..., eps: float = ..., diff --git a/torch/optim/optimizer.py b/torch/optim/optimizer.py index 9115fcf5c4d..ffeb6598f5b 100644 --- a/torch/optim/optimizer.py +++ b/torch/optim/optimizer.py @@ -204,7 +204,7 @@ def register_optimizer_step_post_hook(hook: GlobalOptimizerPostHook) -> Removabl _global_optimizer_post_hooks[handle.id] = hook return handle -params_t: TypeAlias = Union[Iterable[torch.Tensor], Iterable[Dict[str, Any]]] +ParamsT: TypeAlias = Union[Iterable[torch.Tensor], Iterable[Dict[str, Any]]] _P = ParamSpec("_P") R = TypeVar("R") @@ -236,7 +236,7 @@ class Optimizer: _optimizer_load_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]' _optimizer_load_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]' - def __init__(self, params: params_t, defaults: Dict[str, Any]) -> None: + def __init__(self, params: ParamsT, defaults: Dict[str, Any]) -> None: torch._C._log_api_usage_once("python.optimizer") self.defaults = defaults self._optimizer_step_pre_hooks = OrderedDict() diff --git a/torch/optim/radam.pyi b/torch/optim/radam.pyi index 19750eab018..b001376b05e 100644 --- a/torch/optim/radam.pyi +++ b/torch/optim/radam.pyi @@ -1,11 +1,11 @@ from typing import Tuple -from .optimizer import Optimizer, params_t +from .optimizer import Optimizer, ParamsT class RAdam(Optimizer): def __init__( self, - params: params_t, + params: ParamsT, lr: float = ..., betas: Tuple[float, float] = ..., eps: float = ..., diff --git a/torch/optim/rmsprop.pyi b/torch/optim/rmsprop.pyi index 168d0bd142a..f206d542dce 100644 --- a/torch/optim/rmsprop.pyi +++ b/torch/optim/rmsprop.pyi @@ -1,9 +1,9 @@ -from .optimizer import Optimizer, params_t +from .optimizer import Optimizer, ParamsT class RMSprop(Optimizer): def __init__( self, - params: params_t, + params: ParamsT, lr: float = ..., alpha: float = ..., eps: float = ..., diff --git a/torch/optim/rprop.pyi b/torch/optim/rprop.pyi index 17c55b68e09..fd0c6ba2091 100644 --- a/torch/optim/rprop.pyi +++ b/torch/optim/rprop.pyi @@ -1,11 +1,11 @@ from typing import Tuple -from .optimizer import Optimizer, params_t +from .optimizer import Optimizer, ParamsT class Rprop(Optimizer): def __init__( self, - params: params_t, + params: ParamsT, lr: float = ..., etas: Tuple[float, float] = ..., step_sizes: Tuple[float, float] = ..., diff --git a/torch/optim/sgd.pyi b/torch/optim/sgd.pyi index 47dcb42690d..48721a434bb 100644 --- a/torch/optim/sgd.pyi +++ b/torch/optim/sgd.pyi @@ -1,9 +1,9 @@ -from .optimizer import Optimizer, params_t +from .optimizer import Optimizer, ParamsT class SGD(Optimizer): def __init__( self, - params: params_t, + params: ParamsT, lr: float, momentum: float = ..., dampening: float = ..., diff --git a/torch/optim/sparse_adam.pyi b/torch/optim/sparse_adam.pyi index e7d6dc5c7cb..a84001d590b 100644 --- a/torch/optim/sparse_adam.pyi +++ b/torch/optim/sparse_adam.pyi @@ -1,11 +1,11 @@ from typing import Tuple -from .optimizer import Optimizer, params_t +from .optimizer import Optimizer, ParamsT class SparseAdam(Optimizer): def __init__( self, - params: params_t, + params: ParamsT, lr: float = ..., betas: Tuple[float, float] = ..., eps: float = ...,