2024-02-13 22:31:01 +00:00
|
|
|
import functools
|
2023-04-11 20:10:35 +00:00
|
|
|
import logging
|
2018-06-20 21:45:26 +00:00
|
|
|
import os
|
2023-11-08 00:39:00 +00:00
|
|
|
import sys
|
2020-08-12 16:36:40 +00:00
|
|
|
import tempfile
|
2023-04-11 20:10:35 +00:00
|
|
|
from typing import Any, Dict
|
|
|
|
|
|
2023-05-16 16:05:44 +00:00
|
|
|
import torch
|
|
|
|
|
|
2023-04-11 20:10:35 +00:00
|
|
|
log = logging.getLogger(__name__)
|
2021-08-05 21:19:56 +00:00
|
|
|
|
2018-06-20 21:45:26 +00:00
|
|
|
|
|
|
|
|
# this arbitrary-looking assortment of functionality is provided here
|
|
|
|
|
# to have a central place for overrideable behavior. The motivating
|
|
|
|
|
# use is the FB build environment, where this source file is replaced
|
|
|
|
|
# by an equivalent.
|
|
|
|
|
|
2023-05-16 16:05:44 +00:00
|
|
|
if torch._running_with_deploy():
|
2021-01-29 03:27:29 +00:00
|
|
|
# __file__ is meaningless in the context of frozen torch used in torch deploy.
|
|
|
|
|
# setting empty torch_parent should allow below functions to operate without crashing,
|
|
|
|
|
# but it's unclear if there is a valid use case for them in the context of deploy.
|
|
|
|
|
torch_parent = ""
|
2018-06-20 21:45:26 +00:00
|
|
|
else:
|
2022-07-22 02:19:50 +00:00
|
|
|
if os.path.basename(os.path.dirname(__file__)) == "shared":
|
2021-01-29 03:27:29 +00:00
|
|
|
torch_parent = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
|
|
|
|
else:
|
|
|
|
|
torch_parent = os.path.dirname(os.path.dirname(__file__))
|
2018-06-20 21:45:26 +00:00
|
|
|
|
2022-07-22 02:19:50 +00:00
|
|
|
|
2018-06-20 21:45:26 +00:00
|
|
|
def get_file_path(*path_components: str) -> str:
|
|
|
|
|
return os.path.join(torch_parent, *path_components)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_file_path_2(*path_components: str) -> str:
|
|
|
|
|
return os.path.join(*path_components)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_writable_path(path: str) -> str:
|
2020-08-12 16:36:40 +00:00
|
|
|
if os.access(path, os.W_OK):
|
|
|
|
|
return path
|
|
|
|
|
return tempfile.mkdtemp(suffix=os.path.basename(path))
|
|
|
|
|
|
2018-06-20 21:45:26 +00:00
|
|
|
|
|
|
|
|
def prepare_multiprocessing_environment(path: str) -> None:
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
2019-03-13 05:06:25 +00:00
|
|
|
def resolve_library_path(path: str) -> str:
|
|
|
|
|
return os.path.realpath(path)
|
|
|
|
|
|
|
|
|
|
|
2023-09-21 21:04:16 +00:00
|
|
|
def throw_abstract_impl_not_imported_error(opname, module, context):
|
2023-11-08 00:39:00 +00:00
|
|
|
if module in sys.modules:
|
|
|
|
|
raise NotImplementedError(
|
|
|
|
|
f"{opname}: We could not find the abstract impl for this operator. "
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
raise NotImplementedError(
|
|
|
|
|
f"{opname}: We could not find the abstract impl for this operator. "
|
|
|
|
|
f"The operator specified that you may need to import the '{module}' "
|
|
|
|
|
f"Python module to load the abstract impl. {context}"
|
|
|
|
|
)
|
2023-09-21 21:04:16 +00:00
|
|
|
|
|
|
|
|
|
2023-04-11 20:10:35 +00:00
|
|
|
# Meta only, see
|
|
|
|
|
# https://www.internalfb.com/intern/wiki/ML_Workflow_Observability/User_Guides/Adding_instrumentation_to_your_code/
|
|
|
|
|
#
|
|
|
|
|
# This will cause an event to get logged to Scuba via the signposts API. You
|
|
|
|
|
# can view samples on the API at https://fburl.com/scuba/workflow_signpost/zh9wmpqs
|
|
|
|
|
# we log to subsystem "torch", and the category and name you provide here.
|
|
|
|
|
# Each of the arguments translate into a Scuba column. We're still figuring
|
|
|
|
|
# out local conventions in PyTorch, but category should be something like
|
|
|
|
|
# "dynamo" or "inductor", and name should be a specific string describing what
|
|
|
|
|
# kind of event happened.
|
|
|
|
|
#
|
|
|
|
|
# Killswitch is at
|
|
|
|
|
# https://www.internalfb.com/intern/justknobs/?name=pytorch%2Fsignpost#event
|
|
|
|
|
def signpost_event(category: str, name: str, parameters: Dict[str, Any]):
|
|
|
|
|
log.info("%s %s: %r", category, name, parameters)
|
|
|
|
|
|
|
|
|
|
|
[Dynamo] Improve PT2 fbcode logging observability (#106932)
Summary:
https://docs.google.com/document/d/1D5K3_ELsda3tIUeSyNL_2yee-M3jVWbirqSQ5BDNvHQ/edit
This is the revamped version of D47908299.
For each frame, we will record a list of compilation metrics: e.g, backend_compile time, entire_frame_compile time, cache_size, co_filename, co_firstlineno, co_name, guards, graph input_count, graph node_count, graph op_count.
With the help of job info: mast_job_name, global_rank, we can satisfy the requirements from `Things I’ve used/wanted to use our logging to determine` in https://docs.google.com/document/d/1D5K3_ELsda3tIUeSyNL_2yee-M3jVWbirqSQ5BDNvHQ/edit (or add more metrics for this framework)
Test Plan:
```
buck2 test //caffe2/test:test_dynamo
```
Differential Revision: D48142400
Pull Request resolved: https://github.com/pytorch/pytorch/pull/106932
Approved by: https://github.com/anijain2305
2023-08-11 20:46:04 +00:00
|
|
|
def log_compilation_event(metrics):
|
|
|
|
|
log.info("%s", metrics)
|
|
|
|
|
|
|
|
|
|
|
2024-02-16 21:32:04 +00:00
|
|
|
def upload_graph(graph):
|
2023-12-01 18:25:56 +00:00
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
2024-01-30 08:13:52 +00:00
|
|
|
def set_pytorch_distributed_envs_from_justknobs():
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
2024-02-12 17:28:14 +00:00
|
|
|
def log_export_usage(**kwargs):
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
Enable TORCH_TRACE by default in all Tupperware like environments (#120915)
Summary:
This is a reimplemented version of the FB specific code in https://www.internalfb.com/diff/D54230697
The new strategy is that we unconditionally install an FB handler to trace_log logger (and always set level to DEBUG). When the first log message is emitted, we check the JK/filesystem to see if we should actually do logging. If we decide we don't do logging, we remove the handler from trace_log and are done.
build_only[github-export-checks,executorch,pytorch_benchmark,pytorch_quantization,pytorch_distributed,pytorch_distributed_gpu,pytorch_dynamo_inductor,pytorch_functorch,pytorch_fx2trt,pytorch_diff_train_tests_ads,glow_fb_pytorch_tests,training_platform,training_platform_compatibility,training_toolkit_applications,training_toolkit_examples,training_toolkit_model_optimization,dper3_pytorch,xplat_caffe2,pytorch_dev,android-pytorch-instrumentation-tests,smartpytorchgithub_first_try_merge,frl-target-determinator,f6-buck,training_platform_for_github,sigmoid_cpu,sigmoid_gpu,aiplatform_modelprocessing_for_github,accelerators_workloads_models_slimdsnn,ae_aotinductor_benchmark_test,aps_,aps_deterministic_ne_tests,dper_lib_silvertorch,torchrec,torchrec_fb,deeplearning_aot_inductor]
Test Plan:
sandcastle
```
buck2 test 'fbcode//mode/dev-nosan' fbcode//torchrec/inference/tests:test_single_gpu_executor -- --exact 'torchrec/inference/tests:test_single_gpu_executor - TorchDeployGPUTest.NestedModelSingleGPU'
buck2 test 'fbcode//mode/dev-nosan' fbcode//dper_lib/silvertorch/modules/dynamic_stats/tests:accumulators_test -- --exact 'dper_lib/silvertorch/modules/dynamic_stats/tests:accumulators_test - test_global_fixed_interval_accumulator (dper_lib.silvertorch.modules.dynamic_stats.tests.accumulators_test.GlobalFixedIntervalUnivalentAcculumatorTest)'
```
Also running a test flow with/without JK enabled
Differential Revision: D54275086
Pull Request resolved: https://github.com/pytorch/pytorch/pull/120915
Approved by: https://github.com/yanboliang
2024-03-01 04:47:13 +00:00
|
|
|
def justknobs_check(name: str) -> bool:
|
|
|
|
|
"""
|
|
|
|
|
This function can be used to killswitch functionality in FB prod,
|
|
|
|
|
where you can toggle this value to False in JK without having to
|
|
|
|
|
do a code push. In OSS, we always have everything turned on all
|
|
|
|
|
the time, because downstream users can simply choose to not update
|
|
|
|
|
PyTorch. (If more fine-grained enable/disable is needed, we could
|
|
|
|
|
potentially have a map we lookup name in to toggle behavior. But
|
|
|
|
|
the point is that it's all tied to source code in OSS, since there's
|
|
|
|
|
no live server to query.)
|
|
|
|
|
|
|
|
|
|
This is the bare minimum functionality I needed to do some killswitches.
|
|
|
|
|
We have a more detailed plan at
|
|
|
|
|
https://docs.google.com/document/d/1Ukerh9_42SeGh89J-tGtecpHBPwGlkQ043pddkKb3PU/edit
|
|
|
|
|
In particular, in some circumstances it may be necessary to read in
|
|
|
|
|
a knob once at process start, and then use it consistently for the
|
|
|
|
|
rest of the process. Future functionality will codify these patterns
|
|
|
|
|
into a better high level API.
|
|
|
|
|
|
|
|
|
|
WARNING: Do NOT call this function at module import time, JK is not
|
|
|
|
|
fork safe and you will break anyone who forks the process and then
|
|
|
|
|
hits JK again.
|
|
|
|
|
"""
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
2024-02-13 22:31:01 +00:00
|
|
|
@functools.lru_cache(None)
|
|
|
|
|
def max_clock_rate():
|
|
|
|
|
from triton.testing import nvsmi
|
|
|
|
|
|
|
|
|
|
return nvsmi(["clocks.max.sm"])[0]
|
|
|
|
|
|
|
|
|
|
|
2022-07-22 02:19:50 +00:00
|
|
|
TEST_MASTER_ADDR = "127.0.0.1"
|
2018-10-25 15:49:37 +00:00
|
|
|
TEST_MASTER_PORT = 29500
|
2021-01-29 03:27:29 +00:00
|
|
|
# USE_GLOBAL_DEPS controls whether __init__.py tries to load
|
2020-04-23 02:05:54 +00:00
|
|
|
# libtorch_global_deps, see Note [Global dependencies]
|
|
|
|
|
USE_GLOBAL_DEPS = True
|
|
|
|
|
# USE_RTLD_GLOBAL_WITH_LIBTORCH controls whether __init__.py tries to load
|
|
|
|
|
# _C.so with RTLD_GLOBAL during the call to dlopen.
|
2020-01-09 15:26:25 +00:00
|
|
|
USE_RTLD_GLOBAL_WITH_LIBTORCH = False
|