pytorch/test
Masaki Kozuki 82bb06334d Update python binding for in-place foreach to return List[Tensor] (#121405)
fixes #104817
taking over #118622

```c++
// _foreach_atan_
static PyObject * THPVariable__foreach_atan_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
  HANDLE_TH_ERRORS
  static PythonArgParser parser({
    "_foreach_atan_(TensorList self)",
  }, /*traceable=*/false);

  ParsedArgs<1> parsed_args;
  auto _r = parser.parse(nullptr, args, kwargs, parsed_args);
  if(_r.has_torch_function()) {
    return handle_torch_function(_r, nullptr, args, kwargs, THPVariableFunctionsModule, "torch");
  }
  // aten::_foreach_atan_(Tensor(a!)[] self) -> ()

  // auto dispatch__foreach_atan_ = [](at::TensorList self) -> at::TensorList {
  auto dispatch__foreach_atan_ = [](at::TensorList self) -> void {
    pybind11::gil_scoped_release no_gil;
    at::_foreach_atan_(self);
  };
  dispatch__foreach_atan_(_r.tensorlist(0));
  PyObject* self_tensorlist = _r.args[0];
  Py_INCREF(self_tensorlist);
  return self_tensorlist;
  Py_RETURN_NONE;
  END_HANDLE_TH_ERRORS
}
...
// _foreach_div_
static PyObject * THPVariable__foreach_div_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
  HANDLE_TH_ERRORS
  static PythonArgParser parser({
    "_foreach_div_(TensorList self, ScalarList scalars)",
    "_foreach_div_(TensorList self, Tensor other)",
    "_foreach_div_(TensorList self, TensorList other)",
    "_foreach_div_(TensorList self, Scalar scalar)",
  }, /*traceable=*/false);

  ParsedArgs<2> parsed_args;
  auto _r = parser.parse(nullptr, args, kwargs, parsed_args);
  if(_r.has_torch_function()) {
    return handle_torch_function(_r, nullptr, args, kwargs, THPVariableFunctionsModule, "torch");
  }
  switch (_r.idx) {
    case 0: {
      // aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()

      // auto dispatch__foreach_div_ = [](at::TensorList self, at::ArrayRef<at::Scalar> scalars) -> at::TensorList {
      auto dispatch__foreach_div_ = [](at::TensorList self, at::ArrayRef<at::Scalar> scalars) -> void {
        pybind11::gil_scoped_release no_gil;
        at::_foreach_div_(self, scalars);
      };
      dispatch__foreach_div_(_r.tensorlist(0), _r.scalarlist(1));
      PyObject* self_tensorlist = _r.args[0];
      Py_INCREF(self_tensorlist);
      return self_tensorlist;
    }
    case 1: {
      // aten::_foreach_div_.Tensor(Tensor(a!)[] self, Tensor other) -> ()

      // auto dispatch__foreach_div_ = [](at::TensorList self, const at::Tensor & other) -> at::TensorList {
      auto dispatch__foreach_div_ = [](at::TensorList self, const at::Tensor & other) -> void {
        pybind11::gil_scoped_release no_gil;
        at::_foreach_div_(self, other);
      };
      dispatch__foreach_div_(_r.tensorlist(0), _r.tensor(1));
      PyObject* self_tensorlist = _r.args[0];
      Py_INCREF(self_tensorlist);
      return self_tensorlist;
    }
    case 2: {
      // aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()

      // auto dispatch__foreach_div_ = [](at::TensorList self, at::TensorList other) -> at::TensorList {
      auto dispatch__foreach_div_ = [](at::TensorList self, at::TensorList other) -> void {
        pybind11::gil_scoped_release no_gil;
        at::_foreach_div_(self, other);
      };
      dispatch__foreach_div_(_r.tensorlist(0), _r.tensorlist(1));
      PyObject* self_tensorlist = _r.args[0];
      Py_INCREF(self_tensorlist);
      return self_tensorlist;
    }
    case 3: {
      // aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()

      // auto dispatch__foreach_div_ = [](at::TensorList self, const at::Scalar & scalar) -> at::TensorList {
      auto dispatch__foreach_div_ = [](at::TensorList self, const at::Scalar & scalar) -> void {
        pybind11::gil_scoped_release no_gil;
        at::_foreach_div_(self, scalar);
      };
      dispatch__foreach_div_(_r.tensorlist(0), _r.scalar(1));
      PyObject* self_tensorlist = _r.args[0];
      Py_INCREF(self_tensorlist);
      return self_tensorlist;
    }
  }
  Py_RETURN_NONE;
  END_HANDLE_TH_ERRORS
}
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/121405
Approved by: https://github.com/soulitzer
2024-03-08 21:00:01 +00:00
..
ao/sparsity
autograd
backends/xeon
benchmark_utils
bottleneck_test
cpp [C10D] Fix pointToPoint op Flight Recording (#120270) 2024-02-29 01:03:31 +00:00
cpp_api_parity
cpp_extensions Revert "Change ATEN generator argument type to const std::optional<Generator>& (#120076)" 2024-03-08 20:01:03 +00:00
custom_backend
custom_operator Better error messages for impl_abstract_pystub (#120959) 2024-03-04 15:24:36 +00:00
distributed Disable GroupRegistry's thread isolation by default (#121457) 2024-03-08 19:31:24 +00:00
distributions
dynamo Improve Dynamo support for torch function and class methods in general (#121365) 2024-03-08 20:03:49 +00:00
dynamo_expected_failures Improve Dynamo support for torch function and class methods in general (#121365) 2024-03-08 20:03:49 +00:00
dynamo_skips Only profiling when it's enabled. (#121404) 2024-03-08 19:23:14 +00:00
edge
error_messages
expect Batch Norm Consolidation (#116092) 2024-03-08 15:07:15 +00:00
export suggested fixes for congruences (#121418) 2024-03-08 07:19:51 +00:00
forward_backward_compatibility
functorch [AOTDispatch] Return mutated inputs directly when keeping mutations (#120514) 2024-03-08 16:33:26 +00:00
fx Revert "[fx] Preserve Fx graph node order in partitioner across runs (#115621)" 2024-03-08 19:50:57 +00:00
higher_order_ops [hoo] Add with_effects to handle side effectful ops (#120296) 2024-03-05 08:58:32 +00:00
inductor [compiled autograd] support custom ops backed by c++ autograd::Function (#120681) 2024-03-08 20:43:29 +00:00
jit Only profiling when it's enabled. (#121404) 2024-03-08 19:23:14 +00:00
jit_hooks
lazy
mobile
nn Add complex support to parametrizations.spectral_norm (#121452) 2024-03-08 19:17:20 +00:00
onnx Batch Norm Consolidation (#116092) 2024-03-08 15:07:15 +00:00
onnx_caffe2
optim
package
profiler [profiler] Add execution_trace_observer as an optional argument to profiler (#119912) 2024-03-07 01:30:26 +00:00
quantization Ban passing in free function into capture_pre_autograd_graph (#120817) 2024-03-01 19:38:58 +00:00
scripts
test_img
torch_np
typing Fix torch.return_types init signature (#119284) 2024-02-23 21:52:34 +00:00
_test_bazel.py
allowlist_for_publicAPI.json
conftest.py
create_dummy_torchscript_model.py
delete.py
HowToWriteTestsUsingFileCheck.md
linear.py
load_torchscript_model.py
minioptest_failures_dict.json
mkl_verbose.py
mkldnn_verbose.py
pytest_shard_custom.py
run_doctests.sh
run_test.py CI sanity check test for env vars (#120519) 2024-03-08 20:28:50 +00:00
simulate_nccl_errors.py
test_ao_sparsity.py
test_autocast.py
test_autograd.py Deprecate torch.autograd.function.traceable, is_traceable (#121413) 2024-03-08 18:41:07 +00:00
test_autograd_fallback.py
test_binary_ufuncs.py
test_bundled_images.py
test_bundled_inputs.py
test_ci_sanity_check_fail.py CI sanity check test for env vars (#120519) 2024-03-08 20:28:50 +00:00
test_comparison_utils.py
test_compile_benchmark_util.py
test_complex.py
test_content_store.py
test_cpp_api_parity.py
test_cpp_extensions_aot.py
test_cpp_extensions_jit.py
test_cpp_extensions_open_device_registration.py Modify StorageImplCreateHelper (#118459) 2024-03-07 06:26:55 +00:00
test_cuda.py Add ASGD capturable API for forloop (#121264) 2024-03-08 00:00:30 +00:00
test_cuda_expandable_segments.py
test_cuda_multigpu.py [PyTorch][CUDA Caching Allocator] Export sync-stream-and-free-HBM counter in memory_stats for performance debugging (#120050) 2024-02-27 04:34:53 +00:00
test_cuda_nvml_based_avail.py
test_cuda_primary_ctx.py
test_cuda_sanitizer.py
test_cuda_trace.py
test_custom_ops.py [compiled autograd] support custom ops backed by c++ autograd::Function (#120681) 2024-03-08 20:43:29 +00:00
test_dataloader.py
test_datapipe.py
test_decomp.py
test_deploy.py
test_determination.py
test_dispatch.py
test_dlpack.py
test_dynamic_shapes.py Introduce EphemeralSource for symbols that should be simplified out (#120948) 2024-03-06 02:30:52 +00:00
test_expanded_weights.py
test_fake_tensor.py Change FakeTensor serialization to consider only an _active_ FakeTensor mode (#120848) 2024-03-01 02:37:21 +00:00
test_flop_counter.py
test_foreach.py Update python binding for in-place foreach to return List[Tensor] (#121405) 2024-03-08 21:00:01 +00:00
test_function_schema.py
test_functional_autograd_benchmark.py
test_functional_optim.py
test_functionalization.py
test_functionalization_of_rng_ops.py
test_futures.py
test_fx.py
test_fx_experimental.py
test_fx_passes.py
test_fx_reinplace_pass.py
test_hub.py
test_import_stats.py
test_indexing.py
test_itt.py
test_jit.py support bool as Scalar Type in TorchScript (#113835) 2024-03-01 04:20:15 +00:00
test_jit_autocast.py
test_jit_disabled.py
test_jit_fuser.py
test_jit_fuser_legacy.py
test_jit_fuser_te.py Batch Norm Consolidation (#116092) 2024-03-08 15:07:15 +00:00
test_jit_legacy.py
test_jit_llga_fuser.py
test_jit_profiling.py
test_jit_simple.py
test_jit_string.py
test_jiterator.py
test_kernel_launch_checks.py
test_legacy_vmap.py
test_license.py
test_linalg.py add int8 packed gemm support on CPU device (#118056) 2024-03-07 08:41:43 +00:00
test_logging.py
test_masked.py
test_maskedtensor.py
test_matmul_cuda.py [ROCm] enable scaled_gemm (#117822) 2024-02-29 10:20:48 +00:00
test_meta.py Batch Norm Consolidation (#116092) 2024-03-08 15:07:15 +00:00
test_metal.py
test_mkl_verbose.py
test_mkldnn.py
test_mkldnn_fusion.py
test_mkldnn_verbose.py
test_mobile_optimizer.py
test_model_dump.py
test_model_exports_to_core_aten.py
test_modules.py Add exhaustive module and optimizer tests for torch.load(state_dict, weights_only=True) (#121049) 2024-03-05 14:27:50 +00:00
test_monitor.py
test_mps.py [BE] [MPS] Fix out resize logic in torch.where (#121476) 2024-03-08 18:59:37 +00:00
test_multiprocessing.py
test_multiprocessing_spawn.py
test_namedtensor.py Add the bound check for flatten with out_dim (#120894) 2024-03-02 03:56:55 +00:00
test_namedtuple_return_api.py
test_native_functions.py
test_native_mha.py
test_nestedtensor.py Subclass view fake-ification via reified ViewFuncs (#118405) 2024-03-07 19:56:16 +00:00
test_nn.py Bugfix consume_prefix_in_state_dict_if_present function to keep the order of the state_dict (#117464) 2024-03-07 04:00:49 +00:00
test_nnapi.py
test_numba_integration.py
test_numpy_interop.py Fixed a memory leak when calling from_numpy on a numpy array with an … (#121156) 2024-03-06 19:37:38 +00:00
test_openmp.py
test_ops.py [Autograd] Improve error for leaf tensors as out argument to fallback (#121089) 2024-03-05 21:13:27 +00:00
test_ops_fwd_gradients.py
test_ops_gradients.py
test_ops_jit.py
test_optim.py [BE] NO MORE discrepancy between forloop foreach capturable YAY (#121269) 2024-03-08 00:00:30 +00:00
test_out_dtype_op.py [export][reland] Disable exported_program.__call__ (#120019) 2024-03-05 05:29:46 +00:00
test_overrides.py
test_package.py
test_per_overload_api.py
test_prims.py
test_proxy_tensor.py Batch Norm Consolidation (#116092) 2024-03-08 15:07:15 +00:00
test_pruning_op.py
test_public_bindings.py
test_python_dispatch.py
test_pytree.py [pytree][reland] Require pytree serialized_type_name (#120636) 2024-02-27 06:53:33 +00:00
test_quantization.py
test_reductions.py Fix permuted sum precision issue for lower precision on CPU (#108559) 2024-03-06 01:01:35 +00:00
test_scatter_gather_ops.py
test_schema_check.py Fix ouput typos (#120870) 2024-02-29 08:29:14 +00:00
test_segment_reductions.py
test_serialization.py add complex32 to v3_dtypes (#120388) 2024-02-28 02:32:29 +00:00
test_set_default_mobile_cpu_allocator.py
test_shape_ops.py
test_show_pickle.py
test_sort_and_select.py
test_sparse.py Add sparse compressed fake tensor support (#120920) 2024-03-04 14:38:45 +00:00
test_sparse_csr.py
test_sparse_semi_structured.py Skip semi-structured-sparse on windows (#120807) 2024-02-29 21:48:52 +00:00
test_spectral_ops.py
test_stateless.py
test_static_runtime.py [PyTorch] Split StaticModule out of test_static_runtime (#121028) 2024-03-05 23:14:07 +00:00
test_subclass.py
test_sympy_utils.py
test_tensor_creation_ops.py
test_tensorboard.py [tensorboard] Handle bfloat16 type in add_histogram (#120087) 2024-03-05 00:27:21 +00:00
test_tensorexpr.py
test_tensorexpr_pybind.py
test_testing.py
test_throughput_benchmark.py
test_torch.py Avoid COW materialization in at::parallel_for/parallel_reduce (#120455) 2024-03-01 05:05:28 +00:00
test_transformers.py Fixes issue_119785 (#121048) 2024-03-05 20:19:02 +00:00
test_type_hints.py
test_type_info.py
test_type_promotion.py
test_typing.py
test_unary_ufuncs.py add decomposition for frexp (#119217) 2024-02-23 21:52:42 +00:00
test_utils.py Revert "Increased compile time max GPUs to 512. Switched to int16_t DeviceIndex. (#119639)" 2024-02-28 18:57:09 +00:00
test_view_ops.py
test_vulkan.py
test_weak.py
test_xnnpack_integration.py
test_xpu.py [2/2] Intel GPU Runtime Upstreaming for Generator (#118613) 2024-02-28 05:28:11 +00:00