From 18dacf7e793ec3ca3fb5c3d1e85485068e004d6e Mon Sep 17 00:00:00 2001 From: Jack Khuu Date: Mon, 26 Jun 2023 13:06:28 -0700 Subject: [PATCH] [Specialized Kernel] Update yaml syntax to use kernel instead of dispatch (#104070) Based on this [code search](https://fburl.com/code/gjcnw8ly) (*.yaml with `dispatch: CPU:`), update all files found to use ``` kernels: - arg_meta: None kernel_name: ``` instead of ``` dispatch: CPU: ``` --- ## Code changes: - `fbcode/executorch/codegen/tools/gen_oplist.py` - Strip ET specific fields prior to calling parse_native_yaml_struct --- ## Files edited that are not `*functions.yaml` or `custom_ops.yaml` - fbcode/executorch/kernels/optimized/optimized.yaml - fbcode/executorch/kernels/quantized/quantized.yaml - fbcode/executorch/kernels/test/custom_kernel_example/my_functions.yaml --- ## Found Files that were not edited **Dispatched to more than just CPU** - fbcode/caffe2/aten/src/ATen/native/native_functions.yaml - xplat/caffe2/aten/src/ATen/native/native_functions.yaml - xros/third-party/caffe2/caffe2/aten/src/ATen/native/native_functions.yaml **Grouped ops.yaml path** - fbcode/on_device_ai/Assistant/Jarvis/min_runtime/operators/ops.yaml --- **Design Doc:** https://docs.google.com/document/d/1gq4Wz2R6verKJ2EFseLyPdAF0wqomnCrVDDJpRkYsRw/edit?kh_source=GDOCS#heading=h.8raqyft9y50 Differential Revision: [D46952067](https://our.internmc.facebook.com/intern/diff/D46952067/) **NOTE FOR REVIEWERS**: This PR has internal Meta-specific changes or comments, please review them on [Phabricator](https://our.internmc.facebook.com/intern/diff/D46952067/)! Pull Request resolved: https://github.com/pytorch/pytorch/pull/104070 Approved by: https://github.com/larryliu0820 --- test/edge/custom_ops.yaml | 5 +++-- torchgen/executorch/parse.py | 13 ++++++++++--- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/test/edge/custom_ops.yaml b/test/edge/custom_ops.yaml index b85fd12bd32..2ff2db88f97 100644 --- a/test/edge/custom_ops.yaml +++ b/test/edge/custom_ops.yaml @@ -1,3 +1,4 @@ - func: custom::add_3.out(Tensor a, Tensor b, Tensor c, *, Tensor(a!) out) -> Tensor(a!) - dispatch: - CPU: custom::add_3_out + kernels: + - arg_meta: null + kernel_name: custom::add_3_out diff --git a/torchgen/executorch/parse.py b/torchgen/executorch/parse.py index b7ae5b3b6df..f6f30b4554a 100644 --- a/torchgen/executorch/parse.py +++ b/torchgen/executorch/parse.py @@ -130,9 +130,7 @@ def parse_et_yaml( et_kernel = extract_kernel_fields(es) # Remove ET specific fields from entries for BC compatibility - for entry in es: - for field in ET_FIELDS: - entry.pop(field, None) + strip_et_fields(es) native_yaml = parse_native_yaml( path, @@ -142,3 +140,12 @@ def parse_et_yaml( loaded_yaml=es, ) return native_yaml.native_functions, et_kernel + + +def strip_et_fields(es: object) -> None: + """Given a loaded yaml representing a list of operators, + remove ET specific fields from every entries for BC compatibility + """ + for entry in es: # type: ignore[attr-defined] + for field in ET_FIELDS: + entry.pop(field, None)