[BE][Ez]: Enable B019 - flags memory leaks through LRU cache on method (#127686)

Flags potential mem leaks through LRUCache and will hopefully make future contributors rethink this pattern which can cause memleaks. noqas the violations we currently have (should be fixed later)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/127686
Approved by: https://github.com/c-p-i-o
This commit is contained in:
Aaron Gokaslan 2024-06-01 17:19:24 +00:00 committed by PyTorch MergeBot
parent 22f392ba40
commit edffb28d39
6 changed files with 6 additions and 7 deletions

View file

@ -48,7 +48,6 @@ ignore = [
# these ignores are from flake8-bugbear; please fix!
"B007", "B008", "B017",
"B018", # Useless expression
"B019",
"B023",
"B028", # No explicit `stacklevel` keyword argument found
"E402",

View file

@ -258,7 +258,7 @@ class LocalCache(CacheBase):
class PersistentCache(CacheBase):
@functools.lru_cache(None)
@functools.lru_cache(None) # noqa: B019
def get_global_cache(self):
global_cache_path = self.get_global_cache_path()
if global_cache_path is None or not global_cache_path.is_file():
@ -1292,7 +1292,7 @@ cdll.LoadLibrary("__lib_path__")
def __hash__(self) -> int:
return hash(str(self))
@functools.lru_cache(None)
@functools.lru_cache(None) # noqa: B019
def __bool__(self) -> bool:
if config.cpp.vec_isa_ok is not None:
return config.cpp.vec_isa_ok

View file

@ -1533,7 +1533,7 @@ class CppWrapperCpu(WrapperCodeGen):
else:
return LAYOUT_TO_ATEN[layout]
@functools.lru_cache(None)
@functools.lru_cache(None) # noqa: B019
def codegen_int_array_var(
self,
int_array: str,

View file

@ -76,7 +76,7 @@ class CppWrapperCuda(CppWrapperCpu):
self.prefix.writeline("\n")
return super().generate(is_inference)
@functools.lru_cache(None)
@functools.lru_cache(None) # noqa: B019
def generate_load_kernel_once(
self,
name: str,

View file

@ -772,7 +772,7 @@ class ExternKernelChoice:
def call_name(self):
return f"extern_kernels.{self.name}"
@functools.lru_cache(None)
@functools.lru_cache(None) # noqa: B019
def hash_key(self):
fn = self.to_callable()
parts = [

View file

@ -88,7 +88,7 @@ class ShardingPropagator:
if schema_info is not None:
self.op_to_schema_info[op_overload] = schema_info
@lru_cache
@lru_cache # noqa: B019
def _propagate_tensor_meta(
self, op_schema: OpSchema
) -> Union[None, TensorMeta, Sequence[Optional[TensorMeta]]]: