mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/76637 The previous naming convention `default_affine_fixed_qparams_observer` and `default_symmetric_fixed_qparams_observer` were uninformative, and users had to read the definition in order to understand what these observers are. The new naming convention reveals information about the range of the observers The analogous changes were also made for `default_symmetric_fixed_qparams_fake_quant` and `default_affine_fixed_qparams_fake_quant` Test Plan: ``` python test/test_quantization.py ``` ``` python test/test_quantization.py ``` Differential Revision: D36054169 D36054169 Reviewed By: vkuzo Pulled By: dzdang fbshipit-source-id: 215f7786a4b7abda7327f17cc61735697ec5cca9 (cherry picked from commit 21a4e6eda4467c8adca7fd534a506a14e975f9cf)
3581 lines
70 KiB
JSON
3581 lines
70 KiB
JSON
{
|
|
"torch.amp.autocast_mode": [
|
|
"Any",
|
|
"Optional"
|
|
],
|
|
"torch.ao.nn.sparse.quantized.dynamic.linear": [
|
|
"LinearBlockSparsePattern",
|
|
"Optional",
|
|
"hide_packed_params_repr"
|
|
],
|
|
"torch.ao.nn.sparse.quantized.linear": [
|
|
"Optional",
|
|
"hide_packed_params_repr"
|
|
],
|
|
"torch.ao.quantization": [
|
|
"ABC",
|
|
"ABCMeta",
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"List",
|
|
"Module",
|
|
"Optional",
|
|
"OrderedDict",
|
|
"Pattern",
|
|
"QConfigAny",
|
|
"Set",
|
|
"Tuple",
|
|
"Type",
|
|
"Union",
|
|
"abstractmethod",
|
|
"namedtuple",
|
|
"partial",
|
|
"type_before_parametrizations",
|
|
"wrap_cpp_module"
|
|
],
|
|
"torch.ao.quantization.fake_quantize": [
|
|
"ABC",
|
|
"Any",
|
|
"FixedQParamsObserver",
|
|
"HistogramObserver",
|
|
"Module",
|
|
"MovingAverageMinMaxObserver",
|
|
"MovingAveragePerChannelMinMaxObserver",
|
|
"Tuple",
|
|
"abstractmethod",
|
|
"default_fixed_qparams_range_0to1_fake_quant",
|
|
"default_fixed_qparams_range_0to1_observer",
|
|
"default_affine_fixed_qparams_fake_quant",
|
|
"default_affine_fixed_qparams_observer",
|
|
"default_dynamic_fake_quant",
|
|
"default_embedding_fake_quant",
|
|
"default_embedding_fake_quant_4bit",
|
|
"default_fake_quant",
|
|
"default_fused_act_fake_quant",
|
|
"default_fused_per_channel_wt_fake_quant",
|
|
"default_fused_wt_fake_quant",
|
|
"default_histogram_fake_quant",
|
|
"default_per_channel_weight_fake_quant",
|
|
"default_fixed_qparams_range_neg1to1_fake_quant",
|
|
"default_fixed_qparams_range_neg1to1_observer",
|
|
"default_symmetric_fixed_qparams_fake_quant",
|
|
"default_symmetric_fixed_qparams_observer",
|
|
"default_weight_fake_quant",
|
|
"fused_per_channel_wt_fake_quant_range_neg_127_to_127",
|
|
"fused_wt_fake_quant_range_neg_127_to_127"
|
|
],
|
|
"torch.ao.quantization.fuse_modules": [
|
|
"List",
|
|
"Optional",
|
|
"fuse_conv_bn",
|
|
"fuse_conv_bn_relu",
|
|
"get_fuser_method",
|
|
"type_before_parametrizations"
|
|
],
|
|
"torch.ao.quantization.fuser_method_mappings": [
|
|
"Callable",
|
|
"Dict",
|
|
"MatchAllNode",
|
|
"Optional",
|
|
"Pattern",
|
|
"Tuple",
|
|
"Type",
|
|
"Union",
|
|
"get_combined_dict"
|
|
],
|
|
"torch.ao.quantization.backend_config.native": [
|
|
"Any",
|
|
"Dict",
|
|
"FixedQParamsFakeQuantize",
|
|
"List",
|
|
"ObservationType",
|
|
"default_fixed_qparams_range_0to1_observer",
|
|
"default_fixed_qparams_range_neg1to1_observer",
|
|
"default_affine_fixed_qparams_observer",
|
|
"default_symmetric_fixed_qparams_observer",
|
|
"fuse_conv_bn",
|
|
"fuse_conv_bn_relu",
|
|
"fuse_convtranspose_bn",
|
|
"fuse_linear_bn",
|
|
"namedtuple",
|
|
"reverse2",
|
|
"reverse3",
|
|
"reverse_sequential_wrapper2"
|
|
],
|
|
"torch.ao.quantization.backend_config.observation_type": [
|
|
"Enum"
|
|
],
|
|
"torch.ao.quantization.backend_config.tensorrt": [
|
|
"ObservationType",
|
|
"reverse_sequential_wrapper2"
|
|
],
|
|
"torch.ao.quantization.quantization_types": [
|
|
"Any",
|
|
"Node",
|
|
"NodePattern",
|
|
"Pattern",
|
|
"QuantizerCls",
|
|
"Tuple",
|
|
"Union"
|
|
],
|
|
"torch.ao.quantization.fx.convert": [
|
|
"Any",
|
|
"Argument",
|
|
"Callable",
|
|
"Dict",
|
|
"Graph",
|
|
"GraphModule",
|
|
"List",
|
|
"Node",
|
|
"Optional",
|
|
"QConfigAny",
|
|
"QuantizedGraphModule",
|
|
"Set",
|
|
"Tuple",
|
|
"activation_is_statically_quantized",
|
|
"collect_producer_nodes",
|
|
"compare_prepare_convert_qconfig_dict",
|
|
"convert_dict_to_ordered_dict",
|
|
"convert_eq_obs",
|
|
"create_getattr_from_value",
|
|
"generate_qconfig_map",
|
|
"get_custom_module_class_keys",
|
|
"get_fused_module_classes",
|
|
"get_native_backend_config_dict",
|
|
"get_pattern_to_dtype_configs",
|
|
"get_qat_module_classes",
|
|
"get_qparam_dict",
|
|
"get_quantize_node_info",
|
|
"get_root_module_to_quantized_reference_module",
|
|
"get_swapped_custom_module_class",
|
|
"graph_module_from_producer_nodes",
|
|
"is_activation_post_process",
|
|
"is_observed_module",
|
|
"is_observed_standalone_module",
|
|
"is_qconfig_supported_by_dtype_configs",
|
|
"lower_to_fbgemm",
|
|
"qconfig_equals",
|
|
"update_obs_for_equalization",
|
|
"update_qconfig_for_fusion",
|
|
"update_qconfig_for_qat",
|
|
"weight_is_quantized"
|
|
],
|
|
"torch.ao.quantization.fx.fuse": [
|
|
"ABC",
|
|
"Any",
|
|
"Callable",
|
|
"DefaultFuseHandler",
|
|
"Dict",
|
|
"FuseHandler",
|
|
"FusedGraphModule",
|
|
"Graph",
|
|
"GraphModule",
|
|
"List",
|
|
"MatchAllNode",
|
|
"Node",
|
|
"NodePattern",
|
|
"Optional",
|
|
"Pattern",
|
|
"Tuple",
|
|
"Union",
|
|
"abstractmethod",
|
|
"get_fuser_method_mapping",
|
|
"get_fuser_method_new",
|
|
"get_fusion_pattern_to_extra_inputs_getter",
|
|
"get_fusion_pattern_to_fuse_handler_cls",
|
|
"get_fusion_pattern_to_root_node_getter",
|
|
"get_native_backend_config_dict",
|
|
"is_match",
|
|
"map_arg",
|
|
"sorted_patterns_dict"
|
|
],
|
|
"torch.ao.quantization.fx.fusion_patterns": [
|
|
"ABC",
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"Graph",
|
|
"List",
|
|
"MatchAllNode",
|
|
"Node",
|
|
"NodePattern",
|
|
"Optional",
|
|
"Pattern",
|
|
"Union",
|
|
"abstractmethod",
|
|
"get_fuser_method_new"
|
|
],
|
|
"torch.ao.quantization.fx.graph_module": [
|
|
"Any",
|
|
"Dict",
|
|
"Graph",
|
|
"GraphModule",
|
|
"Set",
|
|
"Union"
|
|
],
|
|
"torch.ao.quantization.fx.lower_to_fbgemm": [
|
|
"Dict",
|
|
"QConfigAny",
|
|
"QuantizedGraphModule",
|
|
"Tuple"
|
|
],
|
|
"torch.ao.quantization.fx.match_utils": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"Graph",
|
|
"List",
|
|
"MatchAllNode",
|
|
"MatchResult",
|
|
"Node",
|
|
"Optional",
|
|
"Pattern",
|
|
"QConfigAny",
|
|
"QuantizeHandler",
|
|
"Set",
|
|
"Tuple",
|
|
"is_observed_standalone_module"
|
|
],
|
|
"torch.ao.quantization.fx.pattern_utils": [
|
|
"Any",
|
|
"Dict",
|
|
"FixedQParamsFakeQuantize",
|
|
"List",
|
|
"MatchResult",
|
|
"Node",
|
|
"ObserverBase",
|
|
"Optional",
|
|
"OrderedDict",
|
|
"Pattern",
|
|
"QConfigAny",
|
|
"QuantizeHandler",
|
|
"Tuple"
|
|
],
|
|
"torch.ao.quantization.fx.prepare": [
|
|
"Any",
|
|
"Argument",
|
|
"Callable",
|
|
"Dict",
|
|
"Graph",
|
|
"GraphModule",
|
|
"List",
|
|
"MatchResult",
|
|
"Node",
|
|
"NodePattern",
|
|
"ObservedGraphModule",
|
|
"ObservedStandaloneGraphModule",
|
|
"ObserverBase",
|
|
"Optional",
|
|
"Pattern",
|
|
"QConfigAny",
|
|
"QuantizeHandler",
|
|
"Set",
|
|
"Tuple",
|
|
"Union",
|
|
"activation_is_int8_quantized",
|
|
"activation_is_statically_quantized",
|
|
"all_node_args_have_no_tensors",
|
|
"assert_and_get_unique_device",
|
|
"convert",
|
|
"convert_dict_to_ordered_dict",
|
|
"defaultdict",
|
|
"find_matches",
|
|
"generate_qconfig_map",
|
|
"get_custom_module_class_keys",
|
|
"get_flattened_qconfig_dict",
|
|
"get_fusion_pattern_to_root_node_getter",
|
|
"get_module_to_qat_module",
|
|
"get_native_backend_config_dict",
|
|
"get_new_attr_name_with_prefix",
|
|
"get_non_observable_arg_indexes_and_types",
|
|
"get_pattern_to_dtype_configs",
|
|
"get_pattern_to_input_type_to_index",
|
|
"get_pattern_to_quantize_handlers",
|
|
"get_qconfig_dtypes",
|
|
"get_standalone_module_configs",
|
|
"get_swapped_custom_module_class",
|
|
"is_activation_post_process",
|
|
"is_equalization_observer",
|
|
"is_reuse_input_qconfig",
|
|
"node_supports_equalization",
|
|
"propagate_qconfig_",
|
|
"sorted_patterns_dict",
|
|
"update_qconfig_for_fusion",
|
|
"update_qconfig_for_qat"
|
|
],
|
|
"torch.ao.quantization.fx.qconfig_utils": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"Graph",
|
|
"GraphModule",
|
|
"List",
|
|
"Optional",
|
|
"QConfig",
|
|
"QConfigAny",
|
|
"Set",
|
|
"Tuple",
|
|
"add_module_to_qconfig_obs_ctr",
|
|
"defaultdict",
|
|
"get_object_type_qconfig",
|
|
"get_qconfig_dtypes",
|
|
"is_activation_post_process",
|
|
"maybe_adjust_qconfig_for_module_type_or_name",
|
|
"qconfig_equals"
|
|
],
|
|
"torch.ao.quantization.fx.quantization_patterns": [
|
|
"ABC",
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"Node",
|
|
"NodePattern",
|
|
"Optional",
|
|
"Pattern",
|
|
"all_node_args_have_no_tensors"
|
|
],
|
|
"torch.ao.quantization.fx.quantization_types": [
|
|
"Any",
|
|
"Node",
|
|
"NodePattern",
|
|
"Pattern",
|
|
"QuantizerCls",
|
|
"Tuple",
|
|
"Union"
|
|
],
|
|
"torch.ao.quantization.fx.utils": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"Graph",
|
|
"GraphModule",
|
|
"List",
|
|
"Node",
|
|
"Optional",
|
|
"Set",
|
|
"Tuple",
|
|
"Type",
|
|
"Union",
|
|
"is_activation_post_process",
|
|
"is_per_channel",
|
|
"is_per_tensor",
|
|
"map_arg",
|
|
"namedtuple"
|
|
],
|
|
"torch.ao.quantization.fx.backend_config_utils": [
|
|
"Any",
|
|
"Callable",
|
|
"DefaultFuseHandler",
|
|
"Dict",
|
|
"NodePattern",
|
|
"ObservationType",
|
|
"Optional",
|
|
"Pattern",
|
|
"QuantizeHandler",
|
|
"QuantizerCls",
|
|
"activation_dtype",
|
|
"get_combined_dict",
|
|
"get_default_quant_patterns",
|
|
"get_native_backend_config_dict",
|
|
"sorted_patterns_dict",
|
|
"get_quantize_handler_cls"
|
|
],
|
|
"torch.ao.quantization.observer": [
|
|
"ABC",
|
|
"ABCMeta",
|
|
"Any",
|
|
"Dict",
|
|
"List",
|
|
"Optional",
|
|
"OrderedDict",
|
|
"Tuple",
|
|
"Union",
|
|
"abstractmethod",
|
|
"calculate_qmin_qmax",
|
|
"check_min_max_valid",
|
|
"partial"
|
|
],
|
|
"torch.ao.quantization.qconfig": [
|
|
"Any",
|
|
"FakeQuantize",
|
|
"FakeQuantizeBase",
|
|
"FusedMovingAvgObsFakeQuantize",
|
|
"HistogramObserver",
|
|
"MovingAverageMinMaxObserver",
|
|
"NoopObserver",
|
|
"Optional",
|
|
"PlaceholderObserver",
|
|
"QConfigAny",
|
|
"ReuseInputObserver",
|
|
"default_debug_observer",
|
|
"default_dynamic_fake_quant",
|
|
"default_dynamic_quant_observer",
|
|
"default_embedding_fake_quant",
|
|
"default_embedding_fake_quant_4bit",
|
|
"default_fake_quant",
|
|
"default_float_qparams_observer",
|
|
"default_float_qparams_observer_4bit",
|
|
"default_fused_act_fake_quant",
|
|
"default_fused_per_channel_wt_fake_quant",
|
|
"default_fused_wt_fake_quant",
|
|
"default_observer",
|
|
"default_per_channel_weight_fake_quant",
|
|
"default_per_channel_weight_observer",
|
|
"default_placeholder_observer",
|
|
"default_reuse_input_observer",
|
|
"default_weight_fake_quant",
|
|
"default_weight_observer",
|
|
"fused_per_channel_wt_fake_quant_range_neg_127_to_127",
|
|
"fused_wt_fake_quant_range_neg_127_to_127",
|
|
"namedtuple",
|
|
"per_channel_weight_observer_range_neg_127_to_127",
|
|
"weight_observer_range_neg_127_to_127"
|
|
],
|
|
"torch.ao.quantization.qconfig_dict_utils": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"OrderedDict",
|
|
"QConfigAny",
|
|
"Union",
|
|
"get_combined_dict",
|
|
"get_default_qat_module_mappings"
|
|
],
|
|
"torch.ao.quantization.quantization_mappings": [
|
|
"Any",
|
|
"Callable",
|
|
"DeQuantStub",
|
|
"Dict",
|
|
"Optional",
|
|
"QuantStub",
|
|
"Set",
|
|
"Union",
|
|
"default_fixed_qparams_range_0to1_fake_quant",
|
|
"default_fixed_qparams_range_neg1to1_fake_quant",
|
|
"default_affine_fixed_qparams_fake_quant",
|
|
"default_symmetric_fixed_qparams_fake_quant",
|
|
"get_combined_dict",
|
|
"type_before_parametrizations"
|
|
],
|
|
"torch.ao.quantization.quantize": [
|
|
"DeQuantStub",
|
|
"QuantWrapper",
|
|
"activation_is_memoryless",
|
|
"add_module_to_qconfig_obs_ctr",
|
|
"get_default_dynamic_quant_module_mappings",
|
|
"get_default_qat_module_mappings",
|
|
"get_default_qconfig_propagation_list",
|
|
"get_default_static_quant_module_mappings",
|
|
"get_default_static_quant_reference_module_mappings",
|
|
"get_qparam_dict",
|
|
"has_no_children_ignoring_parametrizations",
|
|
"no_observer_set",
|
|
"type_before_parametrizations"
|
|
],
|
|
"torch.ao.quantization.quantize_jit": [
|
|
"QConfig",
|
|
"QuantType",
|
|
"wrap_cpp_module"
|
|
],
|
|
"torch.ao.quantization.utils": [
|
|
"Any",
|
|
"Callable",
|
|
"Pattern",
|
|
"QuantType",
|
|
"Tuple",
|
|
"Union",
|
|
"is_parametrized",
|
|
"quant_type_to_str"
|
|
],
|
|
"torch.ao.sparsity.experimental.pruner.base_pruner": [
|
|
"ActivationReconstruction",
|
|
"BaseSparsifier",
|
|
"BiasHook",
|
|
"ModuleDict",
|
|
"ModuleList",
|
|
"PruningParametrization",
|
|
"ZeroesParametrization",
|
|
"fqn_to_module",
|
|
"module_to_fqn"
|
|
],
|
|
"torch.ao.sparsity.experimental.pruner.parametrization": [
|
|
"Any",
|
|
"List"
|
|
],
|
|
"torch.ao.sparsity.scheduler.base_scheduler": [
|
|
"BaseSparsifier",
|
|
"wraps"
|
|
],
|
|
"torch.ao.sparsity.scheduler.lambda_scheduler": [
|
|
"BaseScheduler"
|
|
],
|
|
"torch.ao.sparsity.sparsifier.base_sparsifier": [
|
|
"Dict",
|
|
"FakeSparsity",
|
|
"Optional",
|
|
"Tuple",
|
|
"defaultdict",
|
|
"fqn_to_module",
|
|
"module_to_fqn"
|
|
],
|
|
"torch.ao.sparsity.sparsifier.weight_norm_sparsifier": [
|
|
"BaseSparsifier",
|
|
"Tuple",
|
|
"reduce"
|
|
],
|
|
"torch.autograd": [
|
|
"NestedIOFunction",
|
|
"detect_anomaly",
|
|
"enable_grad",
|
|
"grad",
|
|
"gradcheck",
|
|
"gradgradcheck",
|
|
"inference_mode",
|
|
"no_grad",
|
|
"set_detect_anomaly",
|
|
"set_grad_enabled",
|
|
"variable"
|
|
],
|
|
"torch.autograd.anomaly_mode": [
|
|
"Any"
|
|
],
|
|
"torch.autograd.forward_ad": [
|
|
"Any",
|
|
"namedtuple"
|
|
],
|
|
"torch.autograd.function": [
|
|
"Any",
|
|
"List",
|
|
"Optional",
|
|
"OrderedDict",
|
|
"with_metaclass"
|
|
],
|
|
"torch.autograd.functional": [
|
|
"List",
|
|
"Tuple"
|
|
],
|
|
"torch.autograd.gradcheck": [
|
|
"Callable",
|
|
"Dict",
|
|
"Iterable",
|
|
"List",
|
|
"Optional",
|
|
"Tuple",
|
|
"Union",
|
|
"is_tensor_like",
|
|
"product",
|
|
"vmap"
|
|
],
|
|
"torch.autograd.graph": [
|
|
"Any",
|
|
"Callable"
|
|
],
|
|
"torch.autograd.profiler": [
|
|
"Any",
|
|
"ContextDecorator",
|
|
"DeviceType",
|
|
"Dict",
|
|
"Future",
|
|
"List",
|
|
"Optional",
|
|
"ProfilerActivity",
|
|
"ProfilerConfig",
|
|
"ProfilerState",
|
|
"kineto_available",
|
|
"warn"
|
|
],
|
|
"torch.autograd.profiler_legacy": [
|
|
"DeviceType",
|
|
"EventList",
|
|
"FunctionEvent",
|
|
"ProfilerConfig",
|
|
"ProfilerState",
|
|
"warn"
|
|
],
|
|
"torch.autograd.profiler_util": [
|
|
"DeviceType",
|
|
"Dict",
|
|
"List",
|
|
"Optional",
|
|
"Tuple",
|
|
"attrgetter",
|
|
"defaultdict",
|
|
"namedtuple"
|
|
],
|
|
"torch.autograd.variable": [
|
|
"ImperativeEngine",
|
|
"with_metaclass"
|
|
],
|
|
"torch.backends": [
|
|
"contextmanager"
|
|
],
|
|
"torch.backends.cuda": [
|
|
"Union"
|
|
],
|
|
"torch.cpu.amp.autocast_mode": [
|
|
"Any"
|
|
],
|
|
"torch.cuda": [
|
|
"Any",
|
|
"Device",
|
|
"Dict",
|
|
"List",
|
|
"Optional",
|
|
"Tuple",
|
|
"Union",
|
|
"classproperty"
|
|
],
|
|
"torch.cuda.amp.autocast_mode": [
|
|
"Any"
|
|
],
|
|
"torch.cuda.amp.common": [
|
|
"find_spec"
|
|
],
|
|
"torch.cuda.amp.grad_scaler": [
|
|
"Any",
|
|
"Dict",
|
|
"Enum",
|
|
"List",
|
|
"Optional",
|
|
"Tuple",
|
|
"amp_definitely_not_available",
|
|
"defaultdict"
|
|
],
|
|
"torch.cuda.nccl": [
|
|
"init_rank",
|
|
"is_available",
|
|
"unique_id",
|
|
"version"
|
|
],
|
|
"torch.cuda.profiler": [
|
|
"check_error",
|
|
"cudart"
|
|
],
|
|
"torch.distributed": [
|
|
"AllToAllOptions",
|
|
"AllreduceCoalescedOptions",
|
|
"AllreduceOptions",
|
|
"BarrierOptions",
|
|
"BroadcastOptions",
|
|
"BuiltinCommHookType",
|
|
"Callable",
|
|
"DebugLevel",
|
|
"Dict",
|
|
"Enum",
|
|
"FileStore",
|
|
"GatherOptions",
|
|
"GradBucket",
|
|
"HashStore",
|
|
"Logger",
|
|
"Optional",
|
|
"PrefixStore",
|
|
"ProcessGroup",
|
|
"ProcessGroupGloo",
|
|
"ReduceOp",
|
|
"ReduceOptions",
|
|
"ReduceScatterOptions",
|
|
"Reducer",
|
|
"ScatterOptions",
|
|
"Store",
|
|
"TCPStore",
|
|
"Tuple",
|
|
"Union",
|
|
"get_debug_level",
|
|
"set_debug_level",
|
|
"set_debug_level_from_env",
|
|
"timedelta",
|
|
"ProcessGroupMPI",
|
|
"ProcessGroupNCCL"
|
|
],
|
|
"torch.distributed.algorithms.ddp_comm_hooks": [
|
|
"DistributedDataParallel",
|
|
"Enum",
|
|
"partial"
|
|
],
|
|
"torch.distributed.algorithms.ddp_comm_hooks.debugging_hooks": [
|
|
"Any",
|
|
"GradBucket"
|
|
],
|
|
"torch.distributed.algorithms.ddp_comm_hooks.default_hooks": [
|
|
"Any",
|
|
"Callable"
|
|
],
|
|
"torch.distributed.algorithms.ddp_comm_hooks.optimizer_overlap_hooks": [
|
|
"Any",
|
|
"Callable"
|
|
],
|
|
"torch.distributed.algorithms.join": [
|
|
"ABC",
|
|
"Any",
|
|
"List",
|
|
"NamedTuple",
|
|
"Optional",
|
|
"TracebackType",
|
|
"Type",
|
|
"abstractmethod"
|
|
],
|
|
"torch.distributed.algorithms.model_averaging.averagers": [
|
|
"ABC",
|
|
"Dict",
|
|
"Iterable",
|
|
"Union",
|
|
"abstractmethod"
|
|
],
|
|
"torch.distributed.algorithms.model_averaging.utils": [
|
|
"Dict",
|
|
"Iterable",
|
|
"Iterator",
|
|
"ProcessGroup",
|
|
"Union",
|
|
"group"
|
|
],
|
|
"torch.distributed.autograd": [
|
|
"DistAutogradContext",
|
|
"backward",
|
|
"get_gradients"
|
|
],
|
|
"torch.distributed.distributed_c10d": [
|
|
"AllToAllOptions",
|
|
"AllreduceCoalescedOptions",
|
|
"AllreduceOptions",
|
|
"BarrierOptions",
|
|
"BroadcastOptions",
|
|
"Callable",
|
|
"DebugLevel",
|
|
"Dict",
|
|
"GatherOptions",
|
|
"Optional",
|
|
"PrefixStore",
|
|
"ProcessGroup",
|
|
"ProcessGroupGloo",
|
|
"ReduceOp",
|
|
"ReduceOptions",
|
|
"ReduceScatterOptions",
|
|
"ScatterOptions",
|
|
"Store",
|
|
"Tuple",
|
|
"Union",
|
|
"get_debug_level",
|
|
"register_rendezvous_handler",
|
|
"rendezvous",
|
|
"timedelta",
|
|
"ProcessGroupMPI",
|
|
"ProcessGroupNCCL"
|
|
],
|
|
"torch.distributed.elastic.agent.server.api": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"Enum",
|
|
"Event",
|
|
"EventSource",
|
|
"List",
|
|
"Optional",
|
|
"ProcessFailure",
|
|
"SignalException",
|
|
"Std",
|
|
"Store",
|
|
"Tuple",
|
|
"Union",
|
|
"closing",
|
|
"dataclass",
|
|
"field",
|
|
"get_logger",
|
|
"prof",
|
|
"put_metric",
|
|
"record"
|
|
],
|
|
"torch.distributed.elastic.events": [
|
|
"Dict",
|
|
"Enum",
|
|
"EventMetadataValue",
|
|
"Optional"
|
|
],
|
|
"torch.distributed.elastic.events.api": [
|
|
"Dict",
|
|
"Enum",
|
|
"EventMetadataValue",
|
|
"Optional",
|
|
"Union",
|
|
"asdict",
|
|
"dataclass",
|
|
"field"
|
|
],
|
|
"torch.distributed.elastic.events.handlers": [
|
|
"Dict"
|
|
],
|
|
"torch.distributed.elastic.metrics": [
|
|
"Optional"
|
|
],
|
|
"torch.distributed.elastic.metrics.api": [
|
|
"Dict",
|
|
"Optional",
|
|
"namedtuple",
|
|
"wraps"
|
|
],
|
|
"torch.distributed.elastic.multiprocessing": [
|
|
"Callable",
|
|
"Dict",
|
|
"Tuple",
|
|
"Union",
|
|
"get_logger"
|
|
],
|
|
"torch.distributed.elastic.multiprocessing.api": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"FrameType",
|
|
"IntFlag",
|
|
"Optional",
|
|
"ProcessFailure",
|
|
"Set",
|
|
"TailLog",
|
|
"Tuple",
|
|
"Union",
|
|
"dataclass",
|
|
"field",
|
|
"nullcontext",
|
|
"record",
|
|
"redirect_stderr",
|
|
"redirect_stdout"
|
|
],
|
|
"torch.distributed.elastic.multiprocessing.errors": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"GlobalRank",
|
|
"JSON",
|
|
"List",
|
|
"Optional",
|
|
"Template",
|
|
"Tuple",
|
|
"TypeVar",
|
|
"dataclass",
|
|
"datetime",
|
|
"field",
|
|
"get_logger",
|
|
"wraps"
|
|
],
|
|
"torch.distributed.elastic.multiprocessing.errors.error_handler": [
|
|
"Optional"
|
|
],
|
|
"torch.distributed.elastic.multiprocessing.errors.handlers": [
|
|
"ErrorHandler"
|
|
],
|
|
"torch.distributed.elastic.multiprocessing.redirects": [
|
|
"contextmanager",
|
|
"partial",
|
|
"redirect_stderr",
|
|
"redirect_stdout"
|
|
],
|
|
"torch.distributed.elastic.multiprocessing.tail_log": [
|
|
"Dict",
|
|
"Event",
|
|
"Future",
|
|
"List",
|
|
"TextIO",
|
|
"ThreadPoolExecutor"
|
|
],
|
|
"torch.distributed.elastic.rendezvous": [
|
|
"RendezvousHandlerCreator"
|
|
],
|
|
"torch.distributed.elastic.rendezvous.api": [
|
|
"ABC",
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"Optional",
|
|
"RendezvousHandlerCreator",
|
|
"Store",
|
|
"Tuple",
|
|
"abstractmethod"
|
|
],
|
|
"torch.distributed.elastic.rendezvous.dynamic_rendezvous": [
|
|
"ABC",
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"Enum",
|
|
"List",
|
|
"NodeState",
|
|
"Optional",
|
|
"PrefixStore",
|
|
"RendezvousClosedError",
|
|
"RendezvousError",
|
|
"RendezvousHandler",
|
|
"RendezvousParameters",
|
|
"RendezvousStateError",
|
|
"RendezvousTimeoutError",
|
|
"Set",
|
|
"Store",
|
|
"Token",
|
|
"Tuple",
|
|
"abstractmethod",
|
|
"cast",
|
|
"construct_and_record_rdzv_event",
|
|
"dataclass",
|
|
"datetime",
|
|
"timedelta"
|
|
],
|
|
"torch.distributed.elastic.rendezvous.registry": [
|
|
"RendezvousHandler",
|
|
"RendezvousParameters",
|
|
"create_handler"
|
|
],
|
|
"torch.distributed.elastic.rendezvous.utils": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"Event",
|
|
"Optional",
|
|
"Thread",
|
|
"Tuple",
|
|
"Union",
|
|
"timedelta"
|
|
],
|
|
"torch.distributed.elastic.timer.api": [
|
|
"Any",
|
|
"Dict",
|
|
"List",
|
|
"Optional",
|
|
"Set",
|
|
"contextmanager",
|
|
"getframeinfo",
|
|
"stack"
|
|
],
|
|
"torch.distributed.elastic.timer.local_timer": [
|
|
"Any",
|
|
"Dict",
|
|
"Empty",
|
|
"List",
|
|
"RequestQueue",
|
|
"Set",
|
|
"TimerClient",
|
|
"TimerRequest",
|
|
"TimerServer",
|
|
"Tuple"
|
|
],
|
|
"torch.distributed.elastic.utils.api": [
|
|
"Any",
|
|
"List",
|
|
"Template"
|
|
],
|
|
"torch.distributed.elastic.utils.data.elastic_distributed_sampler": [
|
|
"DistributedSampler"
|
|
],
|
|
"torch.distributed.elastic.utils.logging": [
|
|
"Optional",
|
|
"get_log_level"
|
|
],
|
|
"torch.distributed.elastic.utils.store": [
|
|
"List",
|
|
"timedelta"
|
|
],
|
|
"torch.distributed.fsdp.flatten_params_wrapper": [
|
|
"Any",
|
|
"Dict",
|
|
"Generator",
|
|
"Iterator",
|
|
"List",
|
|
"NamedTuple",
|
|
"Optional",
|
|
"ParamOffset",
|
|
"Sequence",
|
|
"SharedParamInfo",
|
|
"Tensor",
|
|
"Tuple",
|
|
"Union",
|
|
"accumulate"
|
|
],
|
|
"torch.distributed.fsdp.fully_sharded_data_parallel": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"Enum",
|
|
"FlatParameter",
|
|
"FlattenParamsWrapper",
|
|
"Generator",
|
|
"Iterable",
|
|
"Iterator",
|
|
"List",
|
|
"Mapping",
|
|
"NamedTuple",
|
|
"Optional",
|
|
"Parameter",
|
|
"ProcessGroup",
|
|
"Set",
|
|
"Shard",
|
|
"ShardedTensor",
|
|
"Tuple",
|
|
"Union",
|
|
"Variable",
|
|
"auto",
|
|
"cast",
|
|
"contextmanager",
|
|
"dataclass",
|
|
"init_from_local_shards"
|
|
],
|
|
"torch.distributed.fsdp.utils": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"List",
|
|
"OrderedDict",
|
|
"Set",
|
|
"Tuple",
|
|
"Union"
|
|
],
|
|
"torch.distributed.fsdp.wrap": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"Generator",
|
|
"Optional",
|
|
"Set",
|
|
"Tuple",
|
|
"Type",
|
|
"cast"
|
|
],
|
|
"torch.distributed.launcher.api": [
|
|
"Any",
|
|
"Callable",
|
|
"ChildFailedError",
|
|
"Dict",
|
|
"List",
|
|
"LocalElasticAgent",
|
|
"Optional",
|
|
"RendezvousParameters",
|
|
"SignalException",
|
|
"Std",
|
|
"Tuple",
|
|
"Union",
|
|
"WorkerSpec",
|
|
"dataclass",
|
|
"field",
|
|
"get_logger",
|
|
"parse_rendezvous_endpoint"
|
|
],
|
|
"torch.distributed.nn": [
|
|
"Function",
|
|
"ReduceOp",
|
|
"group"
|
|
],
|
|
"torch.distributed.nn.api.remote_module": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"Iterator",
|
|
"List",
|
|
"Mapping",
|
|
"Module",
|
|
"Optional",
|
|
"Parameter",
|
|
"RemovableHandle",
|
|
"Set",
|
|
"Tensor",
|
|
"Tuple",
|
|
"Type",
|
|
"TypeVar",
|
|
"Union",
|
|
"device",
|
|
"dtype"
|
|
],
|
|
"torch.distributed.nn.functional": [
|
|
"Function",
|
|
"ReduceOp",
|
|
"group"
|
|
],
|
|
"torch.distributed.nn.jit.instantiator": [
|
|
"Optional",
|
|
"get_remote_module_template"
|
|
],
|
|
"torch.distributed.optim.functional_adadelta": [
|
|
"Dict",
|
|
"List",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.distributed.optim.functional_adagrad": [
|
|
"Dict",
|
|
"List",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.distributed.optim.functional_adam": [
|
|
"Dict",
|
|
"List",
|
|
"Optional",
|
|
"Tensor",
|
|
"Tuple"
|
|
],
|
|
"torch.distributed.optim.functional_adamax": [
|
|
"Dict",
|
|
"List",
|
|
"Optional",
|
|
"Tensor",
|
|
"Tuple"
|
|
],
|
|
"torch.distributed.optim.functional_adamw": [
|
|
"Dict",
|
|
"List",
|
|
"Optional",
|
|
"Tensor",
|
|
"Tuple"
|
|
],
|
|
"torch.distributed.optim.functional_rmsprop": [
|
|
"Dict",
|
|
"List",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.distributed.optim.functional_rprop": [
|
|
"Dict",
|
|
"List",
|
|
"Optional",
|
|
"Tensor",
|
|
"Tuple"
|
|
],
|
|
"torch.distributed.optim.functional_sgd": [
|
|
"Dict",
|
|
"List",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.distributed.optim.optimizer": [
|
|
"List",
|
|
"Lock",
|
|
"Optional",
|
|
"RRef",
|
|
"Tensor",
|
|
"defaultdict"
|
|
],
|
|
"torch.distributed.optim.utils": [
|
|
"Type"
|
|
],
|
|
"torch.distributed.pipeline.sync.checkpoint": [
|
|
"Checkpoint",
|
|
"Checkpointing",
|
|
"Context",
|
|
"Function",
|
|
"Recompute",
|
|
"ThreadLocal",
|
|
"checkpoint",
|
|
"enable_checkpointing",
|
|
"enable_recomputing",
|
|
"restore_rng_states",
|
|
"save_rng_states"
|
|
],
|
|
"torch.distributed.pipeline.sync.copy": [
|
|
"Context",
|
|
"Copy",
|
|
"Wait"
|
|
],
|
|
"torch.distributed.pipeline.sync.dependency": [
|
|
"Fork",
|
|
"Join",
|
|
"fork",
|
|
"join"
|
|
],
|
|
"torch.distributed.pipeline.sync.microbatch": [
|
|
"Batch",
|
|
"NoChunk",
|
|
"check",
|
|
"gather",
|
|
"scatter"
|
|
],
|
|
"torch.distributed.pipeline.sync.phony": [
|
|
"get_phony"
|
|
],
|
|
"torch.distributed.pipeline.sync.pipe": [
|
|
"BalanceError",
|
|
"PipeSequential",
|
|
"Pipeline",
|
|
"WithDevice"
|
|
],
|
|
"torch.distributed.pipeline.sync.pipeline": [
|
|
"Pipeline"
|
|
],
|
|
"torch.distributed.pipeline.sync.skip.layout": [
|
|
"SkipLayout",
|
|
"inspect_skip_layout"
|
|
],
|
|
"torch.distributed.pipeline.sync.skip.portal": [
|
|
"Context",
|
|
"Portal",
|
|
"PortalBlue",
|
|
"PortalCopy",
|
|
"PortalOrange"
|
|
],
|
|
"torch.distributed.pipeline.sync.skip.skippable": [
|
|
"Skippable"
|
|
],
|
|
"torch.distributed.pipeline.sync.skip.tracker": [
|
|
"SkipTracker",
|
|
"SkipTrackerThroughPotals",
|
|
"ThreadLocal",
|
|
"current_skip_tracker",
|
|
"use_skip_tracker"
|
|
],
|
|
"torch.distributed.pipeline.sync.stream": [
|
|
"CPUStreamType",
|
|
"as_cuda",
|
|
"current_stream",
|
|
"default_stream",
|
|
"get_device",
|
|
"is_cuda",
|
|
"new_stream",
|
|
"record_stream",
|
|
"use_device",
|
|
"use_stream",
|
|
"wait_stream"
|
|
],
|
|
"torch.distributed.pipeline.sync.worker": [
|
|
"Task",
|
|
"create_workers",
|
|
"spawn_workers",
|
|
"worker"
|
|
],
|
|
"torch.distributed.remote_device": [
|
|
"Optional",
|
|
"Union"
|
|
],
|
|
"torch.distributed.rendezvous": [
|
|
"Dict",
|
|
"FileStore",
|
|
"Iterable",
|
|
"Optional",
|
|
"PrefixStore",
|
|
"Store",
|
|
"TCPStore",
|
|
"Tuple",
|
|
"Union",
|
|
"cast",
|
|
"timedelta",
|
|
"urlparse",
|
|
"urlunparse"
|
|
],
|
|
"torch.distributed.rpc": [
|
|
"Any",
|
|
"Dict",
|
|
"Future",
|
|
"Generator",
|
|
"Generic",
|
|
"GenericWithOneTypeVar",
|
|
"PyRRef",
|
|
"RemoteProfilerManager",
|
|
"RpcAgent",
|
|
"RpcBackendOptions",
|
|
"Set",
|
|
"Store",
|
|
"TensorPipeAgent",
|
|
"Tuple",
|
|
"TypeVar",
|
|
"WorkerInfo",
|
|
"enable_gil_profiling",
|
|
"get_rpc_timeout",
|
|
"method",
|
|
"timedelta",
|
|
"urlparse"
|
|
],
|
|
"torch.distributed.rpc.api": [
|
|
"Any",
|
|
"Dict",
|
|
"Future",
|
|
"Generic",
|
|
"GenericWithOneTypeVar",
|
|
"PyRRef",
|
|
"PythonUDF",
|
|
"RPCExecMode",
|
|
"RemoteProfilerManager",
|
|
"Set",
|
|
"TypeVar",
|
|
"WorkerInfo",
|
|
"get_rpc_timeout",
|
|
"method"
|
|
],
|
|
"torch.distributed.rpc.backend_registry": [
|
|
"Dict",
|
|
"List",
|
|
"Set",
|
|
"Tuple"
|
|
],
|
|
"torch.distributed.rpc.constants": [
|
|
"timedelta"
|
|
],
|
|
"torch.distributed.rpc.internal": [
|
|
"Enum"
|
|
],
|
|
"torch.distributed.rpc.options": [
|
|
"DeviceType",
|
|
"Dict",
|
|
"List",
|
|
"Optional",
|
|
"Union"
|
|
],
|
|
"torch.distributed.rpc.server_process_global_profiler": [
|
|
"profile"
|
|
],
|
|
"torch.distributions.bernoulli": [
|
|
"ExponentialFamily",
|
|
"Number",
|
|
"binary_cross_entropy_with_logits",
|
|
"broadcast_all",
|
|
"lazy_property",
|
|
"logits_to_probs",
|
|
"probs_to_logits"
|
|
],
|
|
"torch.distributions.beta": [
|
|
"Dirichlet",
|
|
"ExponentialFamily",
|
|
"Number",
|
|
"Real",
|
|
"broadcast_all"
|
|
],
|
|
"torch.distributions.binomial": [
|
|
"Distribution",
|
|
"broadcast_all",
|
|
"lazy_property",
|
|
"logits_to_probs",
|
|
"probs_to_logits"
|
|
],
|
|
"torch.distributions.categorical": [
|
|
"Distribution",
|
|
"lazy_property",
|
|
"logits_to_probs",
|
|
"probs_to_logits"
|
|
],
|
|
"torch.distributions.cauchy": [
|
|
"Distribution",
|
|
"Number",
|
|
"broadcast_all"
|
|
],
|
|
"torch.distributions.chi2": [
|
|
"Gamma"
|
|
],
|
|
"torch.distributions.continuous_bernoulli": [
|
|
"ExponentialFamily",
|
|
"Number",
|
|
"binary_cross_entropy_with_logits",
|
|
"broadcast_all",
|
|
"clamp_probs",
|
|
"lazy_property",
|
|
"logits_to_probs",
|
|
"probs_to_logits"
|
|
],
|
|
"torch.distributions.dirichlet": [
|
|
"ExponentialFamily",
|
|
"Function",
|
|
"once_differentiable"
|
|
],
|
|
"torch.distributions.distribution": [
|
|
"Any",
|
|
"Dict",
|
|
"Optional",
|
|
"lazy_property"
|
|
],
|
|
"torch.distributions.exp_family": [
|
|
"Distribution"
|
|
],
|
|
"torch.distributions.exponential": [
|
|
"ExponentialFamily",
|
|
"Number",
|
|
"broadcast_all"
|
|
],
|
|
"torch.distributions.fishersnedecor": [
|
|
"Distribution",
|
|
"Gamma",
|
|
"Number",
|
|
"broadcast_all"
|
|
],
|
|
"torch.distributions.gamma": [
|
|
"ExponentialFamily",
|
|
"Number",
|
|
"broadcast_all"
|
|
],
|
|
"torch.distributions.geometric": [
|
|
"Distribution",
|
|
"Number",
|
|
"binary_cross_entropy_with_logits",
|
|
"broadcast_all",
|
|
"lazy_property",
|
|
"logits_to_probs",
|
|
"probs_to_logits"
|
|
],
|
|
"torch.distributions.gumbel": [
|
|
"AffineTransform",
|
|
"ExpTransform",
|
|
"Number",
|
|
"TransformedDistribution",
|
|
"Uniform",
|
|
"broadcast_all"
|
|
],
|
|
"torch.distributions.half_cauchy": [
|
|
"AbsTransform",
|
|
"Cauchy",
|
|
"TransformedDistribution"
|
|
],
|
|
"torch.distributions.half_normal": [
|
|
"AbsTransform",
|
|
"Normal",
|
|
"TransformedDistribution"
|
|
],
|
|
"torch.distributions.independent": [
|
|
"Dict",
|
|
"Distribution"
|
|
],
|
|
"torch.distributions.kl": [
|
|
"Bernoulli",
|
|
"Beta",
|
|
"Binomial",
|
|
"Callable",
|
|
"Categorical",
|
|
"Cauchy",
|
|
"ContinuousBernoulli",
|
|
"Dict",
|
|
"Dirichlet",
|
|
"Distribution",
|
|
"Exponential",
|
|
"ExponentialFamily",
|
|
"Gamma",
|
|
"Geometric",
|
|
"Gumbel",
|
|
"HalfNormal",
|
|
"Independent",
|
|
"Laplace",
|
|
"LowRankMultivariateNormal",
|
|
"MultivariateNormal",
|
|
"Normal",
|
|
"OneHotCategorical",
|
|
"Pareto",
|
|
"Poisson",
|
|
"TransformedDistribution",
|
|
"Tuple",
|
|
"Type",
|
|
"Uniform",
|
|
"total_ordering"
|
|
],
|
|
"torch.distributions.kumaraswamy": [
|
|
"AffineTransform",
|
|
"PowerTransform",
|
|
"TransformedDistribution",
|
|
"Uniform",
|
|
"broadcast_all"
|
|
],
|
|
"torch.distributions.laplace": [
|
|
"Distribution",
|
|
"Number",
|
|
"broadcast_all"
|
|
],
|
|
"torch.distributions.lkj_cholesky": [
|
|
"Beta",
|
|
"Distribution",
|
|
"broadcast_all"
|
|
],
|
|
"torch.distributions.log_normal": [
|
|
"ExpTransform",
|
|
"Normal",
|
|
"TransformedDistribution"
|
|
],
|
|
"torch.distributions.logistic_normal": [
|
|
"Normal",
|
|
"StickBreakingTransform",
|
|
"TransformedDistribution"
|
|
],
|
|
"torch.distributions.lowrank_multivariate_normal": [
|
|
"Distribution",
|
|
"lazy_property"
|
|
],
|
|
"torch.distributions.mixture_same_family": [
|
|
"Categorical",
|
|
"Dict",
|
|
"Distribution"
|
|
],
|
|
"torch.distributions.multinomial": [
|
|
"Binomial",
|
|
"Categorical",
|
|
"Distribution",
|
|
"broadcast_all"
|
|
],
|
|
"torch.distributions.multivariate_normal": [
|
|
"Distribution",
|
|
"lazy_property"
|
|
],
|
|
"torch.distributions.negative_binomial": [
|
|
"Distribution",
|
|
"broadcast_all",
|
|
"lazy_property",
|
|
"logits_to_probs",
|
|
"probs_to_logits"
|
|
],
|
|
"torch.distributions.normal": [
|
|
"ExponentialFamily",
|
|
"Number",
|
|
"Real",
|
|
"broadcast_all"
|
|
],
|
|
"torch.distributions.one_hot_categorical": [
|
|
"Categorical",
|
|
"Distribution"
|
|
],
|
|
"torch.distributions.pareto": [
|
|
"AffineTransform",
|
|
"ExpTransform",
|
|
"Exponential",
|
|
"TransformedDistribution",
|
|
"broadcast_all"
|
|
],
|
|
"torch.distributions.poisson": [
|
|
"ExponentialFamily",
|
|
"Number",
|
|
"broadcast_all"
|
|
],
|
|
"torch.distributions.relaxed_bernoulli": [
|
|
"Distribution",
|
|
"Number",
|
|
"SigmoidTransform",
|
|
"TransformedDistribution",
|
|
"broadcast_all",
|
|
"clamp_probs",
|
|
"lazy_property",
|
|
"logits_to_probs",
|
|
"probs_to_logits"
|
|
],
|
|
"torch.distributions.relaxed_categorical": [
|
|
"Categorical",
|
|
"Distribution",
|
|
"ExpTransform",
|
|
"TransformedDistribution",
|
|
"broadcast_all",
|
|
"clamp_probs"
|
|
],
|
|
"torch.distributions.studentT": [
|
|
"Chi2",
|
|
"Distribution",
|
|
"broadcast_all"
|
|
],
|
|
"torch.distributions.transformed_distribution": [
|
|
"ComposeTransform",
|
|
"Dict",
|
|
"Distribution",
|
|
"Independent",
|
|
"Transform"
|
|
],
|
|
"torch.distributions.uniform": [
|
|
"Distribution",
|
|
"Number",
|
|
"broadcast_all"
|
|
],
|
|
"torch.distributions.utils": [
|
|
"Any",
|
|
"Dict",
|
|
"Number",
|
|
"is_tensor_like",
|
|
"update_wrapper"
|
|
],
|
|
"torch.distributions.von_mises": [
|
|
"Distribution",
|
|
"broadcast_all",
|
|
"lazy_property"
|
|
],
|
|
"torch.distributions.weibull": [
|
|
"AffineTransform",
|
|
"Exponential",
|
|
"PowerTransform",
|
|
"TransformedDistribution",
|
|
"broadcast_all"
|
|
],
|
|
"torch.distributions.wishart": [
|
|
"ExponentialFamily",
|
|
"Number",
|
|
"Union",
|
|
"lazy_property"
|
|
],
|
|
"torch.fft": [
|
|
"Tensor",
|
|
"fft",
|
|
"fft2",
|
|
"fftfreq",
|
|
"fftn",
|
|
"fftshift",
|
|
"hfft",
|
|
"ifft",
|
|
"ifft2",
|
|
"ifftn",
|
|
"ifftshift",
|
|
"ihfft",
|
|
"irfft",
|
|
"irfft2",
|
|
"irfftn",
|
|
"rfft",
|
|
"rfft2",
|
|
"rfftfreq",
|
|
"rfftn"
|
|
],
|
|
"torch.functional": [
|
|
"istft",
|
|
"pca_lowrank",
|
|
"svd_lowrank"
|
|
],
|
|
"torch.futures": [
|
|
"Callable",
|
|
"Future",
|
|
"Generic",
|
|
"List",
|
|
"Optional",
|
|
"Type",
|
|
"TypeVar",
|
|
"Union",
|
|
"cast"
|
|
],
|
|
"torch.fx": [
|
|
"ProxyableClassMeta",
|
|
"Tracer",
|
|
"symbolic_trace",
|
|
"wrap"
|
|
],
|
|
"torch.fx.experimental.unification.core": [
|
|
"Iterator",
|
|
"assoc",
|
|
"dispatch",
|
|
"isvar",
|
|
"partial",
|
|
"unify",
|
|
"walk"
|
|
],
|
|
"torch.fx.experimental.unification.dispatch": [
|
|
"dispatch",
|
|
"partial"
|
|
],
|
|
"torch.fx.experimental.unification.more": [
|
|
"dispatch",
|
|
"reify",
|
|
"unify"
|
|
],
|
|
"torch.fx.experimental.unification.multipledispatch.conflict": [
|
|
"groupby",
|
|
"isvariadic"
|
|
],
|
|
"torch.fx.experimental.unification.multipledispatch.core": [
|
|
"Dispatcher",
|
|
"MethodDispatcher"
|
|
],
|
|
"torch.fx.experimental.unification.multipledispatch.dispatcher": [
|
|
"AmbiguityWarning",
|
|
"Variadic",
|
|
"ambiguities",
|
|
"expand_tuples",
|
|
"isvariadic",
|
|
"ordering",
|
|
"super_signature",
|
|
"warn"
|
|
],
|
|
"torch.fx.experimental.unification.multipledispatch.utils": [
|
|
"OrderedDict"
|
|
],
|
|
"torch.fx.experimental.unification.multipledispatch.variadic": [
|
|
"typename"
|
|
],
|
|
"torch.fx.experimental.unification.unification_tools": [
|
|
"first",
|
|
"getter",
|
|
"groupby"
|
|
],
|
|
"torch.fx.experimental.unification.variable": [
|
|
"contextmanager",
|
|
"dispatch",
|
|
"hashable",
|
|
"isvar"
|
|
],
|
|
"torch.fx.graph": [
|
|
"Any",
|
|
"Argument",
|
|
"Callable",
|
|
"Dict",
|
|
"FrozenSet",
|
|
"List",
|
|
"NamedTuple",
|
|
"Node",
|
|
"Optional",
|
|
"Set",
|
|
"Target",
|
|
"TransformCodeFunc",
|
|
"Tuple",
|
|
"Type",
|
|
"compatibility",
|
|
"contextmanager",
|
|
"dataclass",
|
|
"map_arg"
|
|
],
|
|
"torch.fx.graph_module": [
|
|
"Any",
|
|
"Dict",
|
|
"Graph",
|
|
"Importer",
|
|
"List",
|
|
"Optional",
|
|
"PackageExporter",
|
|
"PackageImporter",
|
|
"Path",
|
|
"PythonCode",
|
|
"Set",
|
|
"Type",
|
|
"Union",
|
|
"compatibility"
|
|
],
|
|
"torch.fx.immutable_collections": [
|
|
"Any",
|
|
"Context",
|
|
"Dict",
|
|
"List",
|
|
"Tuple",
|
|
"compatibility"
|
|
],
|
|
"torch.fx.interpreter": [
|
|
"Any",
|
|
"Argument",
|
|
"Dict",
|
|
"Graph",
|
|
"GraphModule",
|
|
"Iterator",
|
|
"List",
|
|
"Node",
|
|
"Optional",
|
|
"Proxy",
|
|
"Target",
|
|
"Tracer",
|
|
"Tuple",
|
|
"Union",
|
|
"compatibility",
|
|
"map_aggregate",
|
|
"map_arg"
|
|
],
|
|
"torch.fx.node": [
|
|
"Any",
|
|
"ArgsKwargsPair",
|
|
"Argument",
|
|
"BaseArgumentTypes",
|
|
"Callable",
|
|
"Dict",
|
|
"List",
|
|
"Optional",
|
|
"Set",
|
|
"Target",
|
|
"Tuple",
|
|
"Union",
|
|
"compatibility",
|
|
"immutable_dict",
|
|
"immutable_list",
|
|
"normalize_function",
|
|
"normalize_module"
|
|
],
|
|
"torch.fx.operator_schemas": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"List",
|
|
"NamedTuple",
|
|
"OpOverload",
|
|
"OpOverloadPacket",
|
|
"Optional",
|
|
"Tuple",
|
|
"cast",
|
|
"compatibility"
|
|
],
|
|
"torch.fx.passes.graph_drawer": [
|
|
"Any",
|
|
"Dict",
|
|
"TensorMetadata",
|
|
"chain",
|
|
"compatibility"
|
|
],
|
|
"torch.fx.passes.graph_manipulation": [
|
|
"Any",
|
|
"Argument",
|
|
"Dict",
|
|
"Graph",
|
|
"GraphModule",
|
|
"List",
|
|
"NamedTuple",
|
|
"Node",
|
|
"Optional",
|
|
"ShapeProp",
|
|
"Target",
|
|
"Tuple",
|
|
"compatibility",
|
|
"lift_lowering_attrs_to_nodes",
|
|
"map_aggregate",
|
|
"map_arg"
|
|
],
|
|
"torch.fx.passes.net_min_base": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"FxNetAccFusionsFinder",
|
|
"Names",
|
|
"NodeList",
|
|
"NodeSet",
|
|
"Optional",
|
|
"ShapeProp",
|
|
"TensorOrTensors",
|
|
"Tensors",
|
|
"Tuple",
|
|
"compatibility",
|
|
"dataclass",
|
|
"map_arg",
|
|
"split_by_tags"
|
|
],
|
|
"torch.fx.passes.operator_support": [
|
|
"IsNodeSupported",
|
|
"SupportDict",
|
|
"SupportedArgumentDTypes",
|
|
"TargetTypeName",
|
|
"TensorMetadata",
|
|
"compatibility",
|
|
"get_node_target"
|
|
],
|
|
"torch.fx.passes.param_fetch": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"GraphModule",
|
|
"List",
|
|
"Tuple",
|
|
"Type",
|
|
"compatibility"
|
|
],
|
|
"torch.fx.passes.shape_prop": [
|
|
"Any",
|
|
"Dict",
|
|
"NamedTuple",
|
|
"Node",
|
|
"Optional",
|
|
"Tuple",
|
|
"compatibility",
|
|
"map_aggregate"
|
|
],
|
|
"torch.fx.passes.split_module": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"GraphModule",
|
|
"List",
|
|
"Optional",
|
|
"compatibility"
|
|
],
|
|
"torch.fx.passes.split_utils": [
|
|
"Dict",
|
|
"List",
|
|
"NodeList",
|
|
"NodeSet",
|
|
"Optional",
|
|
"compatibility",
|
|
"dataclass",
|
|
"field",
|
|
"map_arg"
|
|
],
|
|
"torch.fx.passes.splitter_base": [
|
|
"Any",
|
|
"Dict",
|
|
"FxGraphDrawer",
|
|
"FxNetAccFusionsFinder",
|
|
"Iterable",
|
|
"List",
|
|
"NamedTuple",
|
|
"NodeList",
|
|
"NodeSet",
|
|
"OperatorSupportBase",
|
|
"Optional",
|
|
"Sequence",
|
|
"ShapeProp",
|
|
"Tensors",
|
|
"Tuple",
|
|
"compatibility",
|
|
"dataclass",
|
|
"defaultdict",
|
|
"get_node_target",
|
|
"get_size_of_node",
|
|
"is_node_output_tensor",
|
|
"map_arg",
|
|
"split_by_tags"
|
|
],
|
|
"torch.fx.passes.tools_common": [
|
|
"Any",
|
|
"Dict",
|
|
"List",
|
|
"Mapping",
|
|
"Names",
|
|
"NodeList",
|
|
"NodeSet",
|
|
"Set",
|
|
"TensorOrTensors",
|
|
"Tensors",
|
|
"Tuple",
|
|
"Union",
|
|
"compatibility",
|
|
"dataclass"
|
|
],
|
|
"torch.fx.proxy": [
|
|
"Any",
|
|
"Argument",
|
|
"Callable",
|
|
"Dict",
|
|
"Graph",
|
|
"Iterable",
|
|
"Iterator",
|
|
"Node",
|
|
"Optional",
|
|
"Target",
|
|
"Tuple",
|
|
"check_for_mutable_operation",
|
|
"compatibility",
|
|
"map_aggregate"
|
|
],
|
|
"torch.fx.subgraph_rewriter": [
|
|
"Callable",
|
|
"Dict",
|
|
"Graph",
|
|
"GraphModule",
|
|
"List",
|
|
"NamedTuple",
|
|
"Node",
|
|
"Optional",
|
|
"Set",
|
|
"compatibility",
|
|
"symbolic_trace"
|
|
],
|
|
"torch.hub": [
|
|
"HTTPError",
|
|
"Path",
|
|
"Request",
|
|
"tqdm",
|
|
"urlopen",
|
|
"urlparse"
|
|
],
|
|
"torch.jit": [
|
|
"Attribute",
|
|
"Final",
|
|
"Iterator",
|
|
"ONNXTracedModule",
|
|
"RecursiveScriptClass",
|
|
"RecursiveScriptModule",
|
|
"ScriptModule",
|
|
"ScriptWarning",
|
|
"TopLevelTracedModule",
|
|
"TracedModule",
|
|
"TracerWarning",
|
|
"TracingCheckError",
|
|
"contextmanager",
|
|
"export",
|
|
"fork",
|
|
"freeze",
|
|
"fuser",
|
|
"ignore",
|
|
"interface",
|
|
"is_scripting",
|
|
"is_tracing",
|
|
"jit_module_from_flatbuffer",
|
|
"last_executed_optimized_graph",
|
|
"load",
|
|
"optimize_for_inference",
|
|
"optimized_execution",
|
|
"run_frozen_optimizations",
|
|
"save",
|
|
"save_jit_module_to_flatbuffer",
|
|
"script",
|
|
"script_method",
|
|
"set_fusion_strategy",
|
|
"set_module",
|
|
"trace",
|
|
"trace_module",
|
|
"unused",
|
|
"wait"
|
|
],
|
|
"torch.jit.annotations": [
|
|
"Any",
|
|
"AnyType",
|
|
"ComplexType",
|
|
"Dict",
|
|
"DictType",
|
|
"EvalEnv",
|
|
"FloatType",
|
|
"IntType",
|
|
"List",
|
|
"ListType",
|
|
"StringType",
|
|
"TensorType",
|
|
"Tuple",
|
|
"TupleType",
|
|
"get_enum_value_type",
|
|
"is_dict",
|
|
"is_function_or_method",
|
|
"is_list",
|
|
"is_optional",
|
|
"is_tensor",
|
|
"is_tuple",
|
|
"is_union",
|
|
"is_vararg"
|
|
],
|
|
"torch.jit.frontend": [
|
|
"Apply",
|
|
"Assert",
|
|
"Assign",
|
|
"Attribute",
|
|
"AugAssign",
|
|
"BinOp",
|
|
"Break",
|
|
"ClassDef",
|
|
"Const",
|
|
"Continue",
|
|
"Decl",
|
|
"Def",
|
|
"Delete",
|
|
"DictComp",
|
|
"DictLiteral",
|
|
"Dots",
|
|
"EmptyTypeAnnotation",
|
|
"ExprStmt",
|
|
"FalseLiteral",
|
|
"For",
|
|
"FunctionModifiers",
|
|
"Ident",
|
|
"If",
|
|
"List",
|
|
"ListComp",
|
|
"ListLiteral",
|
|
"NoneLiteral",
|
|
"Param",
|
|
"Pass",
|
|
"Property",
|
|
"Raise",
|
|
"Return",
|
|
"Select",
|
|
"SliceExpr",
|
|
"Starred",
|
|
"Stmt",
|
|
"StringLiteral",
|
|
"Subscript",
|
|
"TernaryIf",
|
|
"TrueLiteral",
|
|
"Tuple",
|
|
"TupleLiteral",
|
|
"UnaryOp",
|
|
"Var",
|
|
"While",
|
|
"With",
|
|
"WithItem",
|
|
"dedent",
|
|
"get_qualified_name",
|
|
"get_source_lines_and_file",
|
|
"is_static_fn",
|
|
"make_source_context",
|
|
"namedtuple",
|
|
"parse_def",
|
|
"should_drop",
|
|
"monkeytype_trace"
|
|
],
|
|
"torch.linalg": [
|
|
"LinAlgError",
|
|
"Tensor",
|
|
"cholesky",
|
|
"cholesky_ex",
|
|
"cond",
|
|
"cross",
|
|
"det",
|
|
"diagonal",
|
|
"eig",
|
|
"eigh",
|
|
"eigvals",
|
|
"eigvalsh",
|
|
"householder_product",
|
|
"inv",
|
|
"inv_ex",
|
|
"ldl_factor",
|
|
"ldl_factor_ex",
|
|
"ldl_solve",
|
|
"lstsq",
|
|
"lu_factor",
|
|
"lu_factor_ex",
|
|
"matmul",
|
|
"matrix_exp",
|
|
"matrix_norm",
|
|
"matrix_power",
|
|
"matrix_rank",
|
|
"multi_dot",
|
|
"norm",
|
|
"pinv",
|
|
"qr",
|
|
"slogdet",
|
|
"solve",
|
|
"solve_triangular",
|
|
"svd",
|
|
"svdvals",
|
|
"tensorinv",
|
|
"tensorsolve",
|
|
"vector_norm"
|
|
],
|
|
"torch.multiprocessing": [
|
|
"Array",
|
|
"AuthenticationError",
|
|
"Barrier",
|
|
"BoundedSemaphore",
|
|
"BufferTooShort",
|
|
"Condition",
|
|
"Event",
|
|
"JoinableQueue",
|
|
"Lock",
|
|
"Manager",
|
|
"Pipe",
|
|
"Pool",
|
|
"Process",
|
|
"ProcessContext",
|
|
"ProcessError",
|
|
"ProcessExitedException",
|
|
"ProcessRaisedException",
|
|
"Queue",
|
|
"RLock",
|
|
"RawArray",
|
|
"RawValue",
|
|
"Semaphore",
|
|
"SimpleQueue",
|
|
"SpawnContext",
|
|
"TimeoutError",
|
|
"Value",
|
|
"active_children",
|
|
"allow_connection_pickling",
|
|
"cpu_count",
|
|
"current_process",
|
|
"freeze_support",
|
|
"get_all_start_methods",
|
|
"get_context",
|
|
"get_logger",
|
|
"get_start_method",
|
|
"init_reductions",
|
|
"log_to_stderr",
|
|
"set_executable",
|
|
"set_forkserver_preload",
|
|
"set_start_method",
|
|
"spawn",
|
|
"start_processes",
|
|
"parent_process"
|
|
],
|
|
"torch.multiprocessing.reductions": [
|
|
"ForkingPickler",
|
|
"Union",
|
|
"check_serializing_named_tensor",
|
|
"register_after_fork"
|
|
],
|
|
"torch.multiprocessing.spawn": [
|
|
"Optional"
|
|
],
|
|
"torch.nn.common_types": [
|
|
"Optional",
|
|
"Tensor",
|
|
"Tuple",
|
|
"TypeVar",
|
|
"Union"
|
|
],
|
|
"torch.nn.functional": [
|
|
"Callable",
|
|
"DType",
|
|
"List",
|
|
"Optional",
|
|
"Tensor",
|
|
"Tuple",
|
|
"Union",
|
|
"adaptive_avg_pool1d",
|
|
"avg_pool1d",
|
|
"avg_pool2d",
|
|
"avg_pool3d",
|
|
"bilinear",
|
|
"boolean_dispatch",
|
|
"celu_",
|
|
"channel_shuffle",
|
|
"conv1d",
|
|
"conv2d",
|
|
"conv3d",
|
|
"conv_tbc",
|
|
"conv_transpose1d",
|
|
"conv_transpose2d",
|
|
"conv_transpose3d",
|
|
"cosine_similarity",
|
|
"elu_",
|
|
"gelu",
|
|
"handle_torch_function",
|
|
"hardshrink",
|
|
"hardtanh_",
|
|
"has_torch_function",
|
|
"has_torch_function_unary",
|
|
"has_torch_function_variadic",
|
|
"leaky_relu_",
|
|
"linear",
|
|
"logsigmoid",
|
|
"native_channel_shuffle",
|
|
"one_hot",
|
|
"pairwise_distance",
|
|
"pdist",
|
|
"pixel_shuffle",
|
|
"pixel_unshuffle",
|
|
"prelu",
|
|
"relu_",
|
|
"rrelu_",
|
|
"selu_",
|
|
"softplus",
|
|
"softshrink",
|
|
"threshold_"
|
|
],
|
|
"torch.nn.init": [
|
|
"Tensor"
|
|
],
|
|
"torch.nn.intrinsic.modules": [
|
|
"_FusedModule"
|
|
],
|
|
"torch.nn.intrinsic.modules.fused": [
|
|
"BatchNorm1d",
|
|
"BatchNorm2d",
|
|
"BatchNorm3d",
|
|
"Conv1d",
|
|
"Conv2d",
|
|
"Conv3d",
|
|
"Linear",
|
|
"ReLU",
|
|
"type_before_parametrizations"
|
|
],
|
|
"torch.nn.intrinsic.qat.modules.conv_fused": [
|
|
"Parameter",
|
|
"TypeVar",
|
|
"fuse_conv_bn_weights"
|
|
],
|
|
"torch.nn.intrinsic.qat.modules.linear_fused": [
|
|
"Parameter",
|
|
"fuse_linear_bn_weights"
|
|
],
|
|
"torch.nn.intrinsic.quantized.modules.conv_relu": [
|
|
"fuse_conv_bn_weights"
|
|
],
|
|
"torch.nn.modules.activation": [
|
|
"Module",
|
|
"NonDynamicallyQuantizableLinear",
|
|
"Optional",
|
|
"Parameter",
|
|
"Tensor",
|
|
"Tuple",
|
|
"constant_",
|
|
"xavier_normal_",
|
|
"xavier_uniform_"
|
|
],
|
|
"torch.nn.modules.adaptive": [
|
|
"Linear",
|
|
"List",
|
|
"Module",
|
|
"ModuleList",
|
|
"Sequence",
|
|
"Sequential",
|
|
"Tensor",
|
|
"log_softmax",
|
|
"namedtuple"
|
|
],
|
|
"torch.nn.modules.batchnorm": [
|
|
"Any",
|
|
"LazyModuleMixin",
|
|
"Module",
|
|
"Optional",
|
|
"Parameter",
|
|
"Tensor",
|
|
"UninitializedBuffer",
|
|
"UninitializedParameter",
|
|
"sync_batch_norm"
|
|
],
|
|
"torch.nn.modules.channelshuffle": [
|
|
"Module",
|
|
"Tensor"
|
|
],
|
|
"torch.nn.modules.container": [
|
|
"Any",
|
|
"Dict",
|
|
"Iterable",
|
|
"Iterator",
|
|
"Mapping",
|
|
"Module",
|
|
"Optional",
|
|
"OrderedDict",
|
|
"Parameter",
|
|
"Tuple",
|
|
"TypeVar",
|
|
"Union",
|
|
"chain",
|
|
"islice",
|
|
"overload"
|
|
],
|
|
"torch.nn.modules.conv": [
|
|
"LazyModuleMixin",
|
|
"List",
|
|
"Module",
|
|
"Optional",
|
|
"Parameter",
|
|
"Tensor",
|
|
"Tuple",
|
|
"UninitializedParameter",
|
|
"Union"
|
|
],
|
|
"torch.nn.modules.distance": [
|
|
"Module",
|
|
"Tensor"
|
|
],
|
|
"torch.nn.modules.dropout": [
|
|
"Module",
|
|
"Tensor"
|
|
],
|
|
"torch.nn.modules.flatten": [
|
|
"Module",
|
|
"Tensor",
|
|
"Tuple",
|
|
"Union"
|
|
],
|
|
"torch.nn.modules.fold": [
|
|
"Module",
|
|
"Tensor"
|
|
],
|
|
"torch.nn.modules.instancenorm": [
|
|
"Tensor"
|
|
],
|
|
"torch.nn.modules.lazy": [
|
|
"Protocol",
|
|
"is_lazy"
|
|
],
|
|
"torch.nn.modules.linear": [
|
|
"LazyModuleMixin",
|
|
"Module",
|
|
"Parameter",
|
|
"Tensor",
|
|
"UninitializedParameter"
|
|
],
|
|
"torch.nn.modules.loss": [
|
|
"Callable",
|
|
"Module",
|
|
"Optional",
|
|
"PairwiseDistance",
|
|
"Tensor"
|
|
],
|
|
"torch.nn.modules.module": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"Iterator",
|
|
"List",
|
|
"Mapping",
|
|
"Optional",
|
|
"OrderedDict",
|
|
"Parameter",
|
|
"RemovableHandle",
|
|
"Set",
|
|
"Tensor",
|
|
"Tuple",
|
|
"TypeVar",
|
|
"Union",
|
|
"device",
|
|
"dtype",
|
|
"namedtuple",
|
|
"overload"
|
|
],
|
|
"torch.nn.modules.normalization": [
|
|
"List",
|
|
"Module",
|
|
"Parameter",
|
|
"Size",
|
|
"Tensor",
|
|
"Tuple",
|
|
"Union"
|
|
],
|
|
"torch.nn.modules.padding": [
|
|
"Module",
|
|
"Sequence",
|
|
"Tensor",
|
|
"Tuple"
|
|
],
|
|
"torch.nn.modules.pixelshuffle": [
|
|
"Module",
|
|
"Tensor"
|
|
],
|
|
"torch.nn.modules.pooling": [
|
|
"List",
|
|
"Module",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.nn.modules.rnn": [
|
|
"List",
|
|
"Module",
|
|
"Optional",
|
|
"PackedSequence",
|
|
"Parameter",
|
|
"Tensor",
|
|
"Tuple",
|
|
"overload"
|
|
],
|
|
"torch.nn.modules.sparse": [
|
|
"Module",
|
|
"Optional",
|
|
"Parameter",
|
|
"Tensor"
|
|
],
|
|
"torch.nn.modules.transformer": [
|
|
"Any",
|
|
"Callable",
|
|
"Dropout",
|
|
"LayerNorm",
|
|
"Linear",
|
|
"Module",
|
|
"ModuleList",
|
|
"MultiheadAttention",
|
|
"Optional",
|
|
"Tensor",
|
|
"Union",
|
|
"xavier_uniform_"
|
|
],
|
|
"torch.nn.modules.upsampling": [
|
|
"Module",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.nn.modules.utils": [
|
|
"Any",
|
|
"Dict",
|
|
"List",
|
|
"repeat"
|
|
],
|
|
"torch.nn.parallel": [
|
|
"DistributedDataParallelCPU"
|
|
],
|
|
"torch.nn.parallel.comm": [
|
|
"List"
|
|
],
|
|
"torch.nn.parallel.data_parallel": [
|
|
"Module",
|
|
"chain",
|
|
"gather",
|
|
"parallel_apply",
|
|
"replicate",
|
|
"scatter_kwargs"
|
|
],
|
|
"torch.nn.parallel.distributed": [
|
|
"Any",
|
|
"Callable",
|
|
"Enum",
|
|
"Function",
|
|
"Join",
|
|
"JoinHook",
|
|
"Joinable",
|
|
"Module",
|
|
"RRef",
|
|
"ReduceOp",
|
|
"Type",
|
|
"Variable",
|
|
"auto",
|
|
"contextmanager",
|
|
"dataclass",
|
|
"gather",
|
|
"is_namedtuple",
|
|
"scatter_kwargs",
|
|
"tree_flatten",
|
|
"tree_unflatten"
|
|
],
|
|
"torch.nn.parallel.parallel_apply": [
|
|
"ExceptionWrapper",
|
|
"autocast"
|
|
],
|
|
"torch.nn.parallel.replicate": [
|
|
"OrderedDict"
|
|
],
|
|
"torch.nn.parallel.scatter_gather": [
|
|
"Gather",
|
|
"Scatter"
|
|
],
|
|
"torch.nn.parameter": [
|
|
"OrderedDict"
|
|
],
|
|
"torch.nn.qat.dynamic.modules.linear": [
|
|
"activation_is_memoryless"
|
|
],
|
|
"torch.nn.qat.modules.conv": [
|
|
"Tuple",
|
|
"TypeVar",
|
|
"Union"
|
|
],
|
|
"torch.nn.qat.modules.embedding_ops": [
|
|
"Tensor"
|
|
],
|
|
"torch.nn.qat.modules.linear": [
|
|
"LinearReLU",
|
|
"is_parametrized",
|
|
"transfer_parametrizations_and_params",
|
|
"type_before_parametrizations"
|
|
],
|
|
"torch.nn.quantizable.modules.activation": [
|
|
"Optional",
|
|
"Tensor",
|
|
"Tuple"
|
|
],
|
|
"torch.nn.quantizable.modules.rnn": [
|
|
"Optional",
|
|
"Tensor",
|
|
"Tuple"
|
|
],
|
|
"torch.nn.quantized": [
|
|
"MaxPool2d"
|
|
],
|
|
"torch.nn.quantized.dynamic.modules.conv": [
|
|
"Tensor"
|
|
],
|
|
"torch.nn.quantized.dynamic.modules.rnn": [
|
|
"Dict",
|
|
"List",
|
|
"Optional",
|
|
"PackedSequence",
|
|
"Tensor",
|
|
"Tuple",
|
|
"Union"
|
|
],
|
|
"torch.nn.quantized.functional": [
|
|
"List",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.nn.quantized.modules": [
|
|
"MaxPool2d",
|
|
"_ConvNd"
|
|
],
|
|
"torch.nn.quantized.modules.batchnorm": [
|
|
"Tensor"
|
|
],
|
|
"torch.nn.quantized.modules.conv": [
|
|
"List",
|
|
"Optional",
|
|
"TypeVar",
|
|
"WeightedQuantizedModule",
|
|
"fuse_conv_bn_weights"
|
|
],
|
|
"torch.nn.quantized.modules.embedding_ops": [
|
|
"List",
|
|
"Optional",
|
|
"Tensor",
|
|
"hide_packed_params_repr"
|
|
],
|
|
"torch.nn.quantized.modules.functional_modules": [
|
|
"List",
|
|
"Tensor"
|
|
],
|
|
"torch.nn.quantized.modules.linear": [
|
|
"Iterable",
|
|
"Optional",
|
|
"WeightedQuantizedModule",
|
|
"fuse_linear_bn_weights",
|
|
"hide_packed_params_repr",
|
|
"type_before_parametrizations"
|
|
],
|
|
"torch.nn.quantized.modules.utils": [
|
|
"repeat"
|
|
],
|
|
"torch.nn.utils.clip_grad": [
|
|
"Iterable",
|
|
"Union"
|
|
],
|
|
"torch.nn.utils.convert_parameters": [
|
|
"Iterable",
|
|
"Optional"
|
|
],
|
|
"torch.nn.utils.parametrizations": [
|
|
"Enum",
|
|
"Module",
|
|
"Optional",
|
|
"Tensor",
|
|
"auto"
|
|
],
|
|
"torch.nn.utils.parametrize": [
|
|
"Dict",
|
|
"Module",
|
|
"ModuleDict",
|
|
"ModuleList",
|
|
"Optional",
|
|
"Parameter",
|
|
"Sequence",
|
|
"Tensor",
|
|
"Tuple",
|
|
"Union",
|
|
"contextmanager"
|
|
],
|
|
"torch.nn.utils.rnn": [
|
|
"Iterable",
|
|
"List",
|
|
"Optional",
|
|
"Tensor",
|
|
"Tuple",
|
|
"Union",
|
|
"namedtuple"
|
|
],
|
|
"torch.nn.utils.spectral_norm": [
|
|
"Any",
|
|
"Module",
|
|
"Optional",
|
|
"TypeVar",
|
|
"normalize"
|
|
],
|
|
"torch.nn.utils.weight_norm": [
|
|
"Any",
|
|
"Module",
|
|
"Parameter",
|
|
"TypeVar",
|
|
"UninitializedParameter",
|
|
"norm_except_dim"
|
|
],
|
|
"torch.onnx": [
|
|
"Dict",
|
|
"OperatorExportTypes",
|
|
"Optional",
|
|
"TensorProtoDataType",
|
|
"TrainingMode"
|
|
],
|
|
"torch.optim.adadelta": [
|
|
"List",
|
|
"Optimizer",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.optim.adagrad": [
|
|
"List",
|
|
"Optimizer",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.optim.adam": [
|
|
"List",
|
|
"Optimizer",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.optim.adamax": [
|
|
"List",
|
|
"Optimizer",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.optim.adamw": [
|
|
"List",
|
|
"Optimizer",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.optim.asgd": [
|
|
"List",
|
|
"Optimizer",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.optim.lbfgs": [
|
|
"Optimizer",
|
|
"reduce"
|
|
],
|
|
"torch.optim.lr_scheduler": [
|
|
"Counter",
|
|
"Optimizer",
|
|
"bisect_right",
|
|
"wraps"
|
|
],
|
|
"torch.optim.nadam": [
|
|
"List",
|
|
"Optimizer",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.optim.optimizer": [
|
|
"chain",
|
|
"deepcopy",
|
|
"defaultdict"
|
|
],
|
|
"torch.optim.radam": [
|
|
"List",
|
|
"Optimizer",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.optim.rmsprop": [
|
|
"List",
|
|
"Optimizer",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.optim.rprop": [
|
|
"List",
|
|
"Optimizer",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.optim.sgd": [
|
|
"List",
|
|
"Optimizer",
|
|
"Optional",
|
|
"Tensor"
|
|
],
|
|
"torch.optim.sparse_adam": [
|
|
"Optimizer"
|
|
],
|
|
"torch.optim.swa_utils": [
|
|
"Module",
|
|
"deepcopy"
|
|
],
|
|
"torch.overrides": [
|
|
"BaseTorchFunctionMode",
|
|
"TorchFunctionMode",
|
|
"TorchFunctionModeMeta",
|
|
"enable_torch_function_mode",
|
|
"get_default_nowrap_functions",
|
|
"has_torch_function",
|
|
"push_torch_function_mode"
|
|
],
|
|
"torch.package.analyze.find_first_use_of_broken_modules": [
|
|
"Dict",
|
|
"List",
|
|
"PackagingError"
|
|
],
|
|
"torch.package.analyze.is_from_package": [
|
|
"Any",
|
|
"ModuleType",
|
|
"is_mangled"
|
|
],
|
|
"torch.package.analyze.trace_dependencies": [
|
|
"Any",
|
|
"Callable",
|
|
"Iterable",
|
|
"List",
|
|
"Tuple"
|
|
],
|
|
"torch.package.file_structure_representation": [
|
|
"Dict",
|
|
"GlobGroup",
|
|
"GlobPattern",
|
|
"List"
|
|
],
|
|
"torch.package.find_file_dependencies": [
|
|
"List",
|
|
"Optional",
|
|
"Tuple"
|
|
],
|
|
"torch.package.glob_group": [
|
|
"GlobPattern",
|
|
"Iterable",
|
|
"Union"
|
|
],
|
|
"torch.package.importer": [
|
|
"ABC",
|
|
"Any",
|
|
"Dict",
|
|
"List",
|
|
"ModuleType",
|
|
"Optional",
|
|
"Tuple",
|
|
"abstractmethod",
|
|
"demangle",
|
|
"get_mangle_prefix",
|
|
"is_mangled"
|
|
],
|
|
"torch.package.package_exporter": [
|
|
"ActionHook",
|
|
"Any",
|
|
"BinaryIO",
|
|
"Callable",
|
|
"DefaultDict",
|
|
"DiGraph",
|
|
"Dict",
|
|
"Enum",
|
|
"GlobGroup",
|
|
"GlobPattern",
|
|
"Importer",
|
|
"List",
|
|
"Optional",
|
|
"OrderedDict",
|
|
"OrderedImporter",
|
|
"Path",
|
|
"RemovableHandle",
|
|
"Sequence",
|
|
"Set",
|
|
"Storage",
|
|
"Union",
|
|
"cast",
|
|
"create_pickler",
|
|
"dataclass",
|
|
"defaultdict",
|
|
"demangle",
|
|
"find_files_source_depends_on",
|
|
"is_mangled",
|
|
"is_stdlib_module",
|
|
"location_tag",
|
|
"normalize_storage_type"
|
|
],
|
|
"torch.package.package_importer": [
|
|
"Any",
|
|
"BinaryIO",
|
|
"Callable",
|
|
"Dict",
|
|
"Directory",
|
|
"DirectoryReader",
|
|
"GlobPattern",
|
|
"Importer",
|
|
"List",
|
|
"Optional",
|
|
"PackageMangler",
|
|
"PackageUnpickler",
|
|
"Path",
|
|
"Union",
|
|
"WeakValueDictionary",
|
|
"cast",
|
|
"contextmanager",
|
|
"demangle"
|
|
],
|
|
"torch.profiler": [
|
|
"DeviceType",
|
|
"ProfilerActivity",
|
|
"kineto_available",
|
|
"record_function"
|
|
],
|
|
"torch.profiler.profiler": [
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"Enum",
|
|
"Iterable",
|
|
"List",
|
|
"Optional",
|
|
"ProfilerActivity",
|
|
"Tuple",
|
|
"kineto_available",
|
|
"partial",
|
|
"warn"
|
|
],
|
|
"torch.quantization": [
|
|
"ABC",
|
|
"DeQuantStub",
|
|
"FakeQuantize",
|
|
"FakeQuantizeBase",
|
|
"FixedQParamsFakeQuantize",
|
|
"FusedMovingAvgObsFakeQuantize",
|
|
"HistogramObserver",
|
|
"MinMaxObserver",
|
|
"MovingAverageMinMaxObserver",
|
|
"MovingAveragePerChannelMinMaxObserver",
|
|
"NoopObserver",
|
|
"ObserverBase",
|
|
"PerChannelMinMaxObserver",
|
|
"PlaceholderObserver",
|
|
"QConfig",
|
|
"QConfigAny",
|
|
"QConfigDynamic",
|
|
"QuantStub",
|
|
"QuantType",
|
|
"QuantWrapper",
|
|
"RecordingObserver",
|
|
"add_module_to_qconfig_obs_ctr",
|
|
"add_observer_",
|
|
"add_quant_dequant",
|
|
"assert_valid_qconfig",
|
|
"convert",
|
|
"convert_dynamic_jit",
|
|
"convert_jit",
|
|
"default_fixed_qparams_range_0to1_fake_quant",
|
|
"default_affine_fixed_qparams_fake_quant",
|
|
"default_debug_observer",
|
|
"default_dynamic_quant_observer",
|
|
"default_fake_quant",
|
|
"default_float_qparams_observer",
|
|
"default_fused_act_fake_quant",
|
|
"default_fused_per_channel_wt_fake_quant",
|
|
"default_fused_wt_fake_quant",
|
|
"default_histogram_fake_quant",
|
|
"default_histogram_observer",
|
|
"default_observer",
|
|
"default_per_channel_weight_fake_quant",
|
|
"default_per_channel_weight_observer",
|
|
"default_placeholder_observer",
|
|
"default_fixed_qparams_range_neg1to1_fake_quant",
|
|
"default_symmetric_fixed_qparams_fake_quant",
|
|
"default_weight_fake_quant",
|
|
"default_weight_observer",
|
|
"disable_fake_quant",
|
|
"disable_observer",
|
|
"enable_fake_quant",
|
|
"enable_observer",
|
|
"fuse_conv_bn",
|
|
"fuse_conv_bn_jit",
|
|
"fuse_conv_bn_relu",
|
|
"fuse_linear_bn",
|
|
"fuse_modules",
|
|
"get_default_compare_output_module_list",
|
|
"get_default_dynamic_quant_module_mappings",
|
|
"get_default_float_to_quantized_operator_mappings",
|
|
"get_default_qat_module_mappings",
|
|
"get_default_qat_qconfig",
|
|
"get_default_qconfig",
|
|
"get_default_qconfig_propagation_list",
|
|
"get_default_static_quant_module_mappings",
|
|
"get_dynamic_quant_module_class",
|
|
"get_fuser_method",
|
|
"get_observer_dict",
|
|
"get_observer_state_dict",
|
|
"get_quantized_operator",
|
|
"get_static_quant_module_class",
|
|
"get_unique_devices_",
|
|
"is_activation_post_process",
|
|
"load_observer_state_dict",
|
|
"no_observer_set",
|
|
"prepare",
|
|
"prepare_dynamic_jit",
|
|
"prepare_jit",
|
|
"prepare_qat",
|
|
"propagate_qconfig_",
|
|
"qconfig_equals",
|
|
"quant_type_to_str",
|
|
"quantize",
|
|
"quantize_dynamic",
|
|
"quantize_dynamic_jit",
|
|
"quantize_jit",
|
|
"quantize_qat",
|
|
"register_activation_post_process_hook",
|
|
"script_qconfig",
|
|
"script_qconfig_dict",
|
|
"swap_module"
|
|
],
|
|
"torch.quantization.fake_quantize": [
|
|
"FakeQuantize",
|
|
"FakeQuantizeBase",
|
|
"FixedQParamsFakeQuantize",
|
|
"FusedMovingAvgObsFakeQuantize",
|
|
"default_fixed_qparams_range_0to1_fake_quant",
|
|
"default_affine_fixed_qparams_fake_quant",
|
|
"default_fake_quant",
|
|
"default_fused_act_fake_quant",
|
|
"default_fused_per_channel_wt_fake_quant",
|
|
"default_fused_wt_fake_quant",
|
|
"default_histogram_fake_quant",
|
|
"default_per_channel_weight_fake_quant",
|
|
"default_fixed_qparams_range_neg1to1_fake_quant",
|
|
"default_symmetric_fixed_qparams_fake_quant",
|
|
"default_weight_fake_quant",
|
|
"disable_fake_quant",
|
|
"disable_observer",
|
|
"enable_fake_quant",
|
|
"enable_observer"
|
|
],
|
|
"torch.quantization.fuse_modules": [
|
|
"fuse_conv_bn",
|
|
"fuse_conv_bn_relu",
|
|
"fuse_known_modules",
|
|
"fuse_modules",
|
|
"get_fuser_method"
|
|
],
|
|
"torch.quantization.fuser_method_mappings": [
|
|
"fuse_conv_bn",
|
|
"fuse_conv_bn_relu",
|
|
"fuse_linear_bn",
|
|
"get_fuser_method"
|
|
],
|
|
"torch.quantization.observer": [
|
|
"ABC",
|
|
"HistogramObserver",
|
|
"MinMaxObserver",
|
|
"MovingAverageMinMaxObserver",
|
|
"MovingAveragePerChannelMinMaxObserver",
|
|
"NoopObserver",
|
|
"ObserverBase",
|
|
"PerChannelMinMaxObserver",
|
|
"PlaceholderObserver",
|
|
"RecordingObserver",
|
|
"default_debug_observer",
|
|
"default_dynamic_quant_observer",
|
|
"default_float_qparams_observer",
|
|
"default_histogram_observer",
|
|
"default_observer",
|
|
"default_per_channel_weight_observer",
|
|
"default_placeholder_observer",
|
|
"default_weight_observer",
|
|
"get_observer_state_dict",
|
|
"load_observer_state_dict"
|
|
],
|
|
"torch.quantization.qconfig": [
|
|
"QConfig",
|
|
"QConfigAny",
|
|
"QConfigDynamic",
|
|
"add_module_to_qconfig_obs_ctr",
|
|
"assert_valid_qconfig",
|
|
"get_default_qat_qconfig",
|
|
"get_default_qconfig",
|
|
"qconfig_equals"
|
|
],
|
|
"torch.quantization.quant_type": [
|
|
"QuantType",
|
|
"quant_type_to_str"
|
|
],
|
|
"torch.quantization.quantization_mappings": [
|
|
"get_default_compare_output_module_list",
|
|
"get_default_dynamic_quant_module_mappings",
|
|
"get_default_float_to_quantized_operator_mappings",
|
|
"get_default_qat_module_mappings",
|
|
"get_default_qconfig_propagation_list",
|
|
"get_default_static_quant_module_mappings",
|
|
"get_dynamic_quant_module_class",
|
|
"get_quantized_operator",
|
|
"get_static_quant_module_class",
|
|
"no_observer_set"
|
|
],
|
|
"torch.quantization.quantize": [
|
|
"add_observer_",
|
|
"add_quant_dequant",
|
|
"convert",
|
|
"get_observer_dict",
|
|
"get_unique_devices_",
|
|
"is_activation_post_process",
|
|
"prepare",
|
|
"prepare_qat",
|
|
"propagate_qconfig_",
|
|
"quantize",
|
|
"quantize_dynamic",
|
|
"quantize_qat",
|
|
"register_activation_post_process_hook",
|
|
"swap_module"
|
|
],
|
|
"torch.quantization.quantize_jit": [
|
|
"convert_dynamic_jit",
|
|
"convert_jit",
|
|
"fuse_conv_bn_jit",
|
|
"prepare_dynamic_jit",
|
|
"prepare_jit",
|
|
"quantize_dynamic_jit",
|
|
"quantize_jit",
|
|
"script_qconfig",
|
|
"script_qconfig_dict"
|
|
],
|
|
"torch.quantization.stubs": [
|
|
"DeQuantStub",
|
|
"QuantStub",
|
|
"QuantWrapper"
|
|
],
|
|
"torch.quasirandom": [
|
|
"Optional"
|
|
],
|
|
"torch.random": [
|
|
"Generator"
|
|
],
|
|
"torch.return_types": [
|
|
"_det_lu_based_helper",
|
|
"_fake_quantize_per_tensor_affine_cachemask_tensor_qparams",
|
|
"_fused_moving_avg_obs_fq_helper",
|
|
"_linalg_svd",
|
|
"_linalg_svd_out",
|
|
"_lu_with_info",
|
|
"_unpack_dual",
|
|
"attr",
|
|
"pytree_register_structseq"
|
|
],
|
|
"torch.serialization": [
|
|
"Any",
|
|
"BinaryIO",
|
|
"Dict",
|
|
"IO",
|
|
"Optional",
|
|
"Storage",
|
|
"Tuple",
|
|
"Type",
|
|
"Union",
|
|
"cast",
|
|
"closing",
|
|
"contextmanager",
|
|
"get_source_lines_and_file"
|
|
],
|
|
"torch.sparse": [
|
|
"BFloat16Tensor",
|
|
"ByteTensor",
|
|
"CharTensor",
|
|
"DoubleTensor",
|
|
"FloatTensor",
|
|
"HalfTensor",
|
|
"IntTensor",
|
|
"LongTensor",
|
|
"ShortTensor",
|
|
"_csr_to_block_csr",
|
|
"addmm",
|
|
"log_softmax",
|
|
"mm",
|
|
"softmax"
|
|
],
|
|
"torch.special": [
|
|
"digamma",
|
|
"entr",
|
|
"erf",
|
|
"erfc",
|
|
"erfcx",
|
|
"erfinv",
|
|
"exp2",
|
|
"expit",
|
|
"expm1",
|
|
"gammainc",
|
|
"gammaincc",
|
|
"gammaln",
|
|
"i0",
|
|
"i0e",
|
|
"i1",
|
|
"i1e",
|
|
"log1p",
|
|
"log_ndtr",
|
|
"log_softmax",
|
|
"logit",
|
|
"logsumexp",
|
|
"multigammaln",
|
|
"ndtr",
|
|
"ndtri",
|
|
"polygamma",
|
|
"psi",
|
|
"round",
|
|
"sinc",
|
|
"softmax",
|
|
"xlog1py",
|
|
"xlogy",
|
|
"zeta"
|
|
],
|
|
"torch.storage": [
|
|
"Any",
|
|
"Storage",
|
|
"Type",
|
|
"TypeVar",
|
|
"Union",
|
|
"cast",
|
|
"lru_cache"
|
|
],
|
|
"torch.testing": [
|
|
"FileCheck",
|
|
"all_types",
|
|
"all_types_and",
|
|
"all_types_and_complex",
|
|
"all_types_and_complex_and",
|
|
"all_types_and_half",
|
|
"assert_allclose",
|
|
"assert_close",
|
|
"complex_types",
|
|
"double_types",
|
|
"empty_types",
|
|
"floating_and_complex_types",
|
|
"floating_and_complex_types_and",
|
|
"floating_types",
|
|
"floating_types_and",
|
|
"floating_types_and_half",
|
|
"get_all_complex_dtypes",
|
|
"get_all_device_types",
|
|
"get_all_dtypes",
|
|
"get_all_fp_dtypes",
|
|
"get_all_int_dtypes",
|
|
"get_all_math_dtypes",
|
|
"integral_types",
|
|
"integral_types_and",
|
|
"make_non_contiguous",
|
|
"make_tensor",
|
|
"rand",
|
|
"randn"
|
|
],
|
|
"torch.torch_version": [
|
|
"Any",
|
|
"Iterable"
|
|
],
|
|
"torch.types": [
|
|
"Any",
|
|
"Device",
|
|
"List",
|
|
"Number",
|
|
"Sequence",
|
|
"Tuple",
|
|
"Union"
|
|
],
|
|
"torch.utils": [
|
|
"disable_minidumps",
|
|
"enable_minidumps",
|
|
"enable_minidumps_on_exceptions"
|
|
],
|
|
"torch.utils.benchmark.utils.common": [
|
|
"_make_temp_dir",
|
|
"ordered_unique",
|
|
"select_unit",
|
|
"set_torch_threads",
|
|
"trim_sigfig",
|
|
"unit_to_english"
|
|
],
|
|
"torch.utils.benchmark.utils.compare": [
|
|
"Colorize",
|
|
"Table",
|
|
"optional_min"
|
|
],
|
|
"torch.utils.benchmark.utils.cpp_jit": [
|
|
"Any",
|
|
"CallgrindModuleType",
|
|
"List",
|
|
"Optional",
|
|
"TimeitModuleType"
|
|
],
|
|
"torch.utils.benchmark.utils.fuzzer": [
|
|
"dtype_size",
|
|
"prod"
|
|
],
|
|
"torch.utils.benchmark.utils.sparse_fuzzer": [
|
|
"FuzzedTensor",
|
|
"Number",
|
|
"Optional",
|
|
"Tuple",
|
|
"Union"
|
|
],
|
|
"torch.utils.benchmark.utils.timer": [
|
|
"CPPTimer",
|
|
"timer"
|
|
],
|
|
"torch.utils.benchmark.utils.valgrind_wrapper.timer_interface": [
|
|
"GlobalsBridge",
|
|
"Serialization",
|
|
"wrapper_singleton"
|
|
],
|
|
"torch.utils.cpp_extension": [
|
|
"ExtensionVersioner",
|
|
"FileBaton",
|
|
"GeneratedFileCleaner",
|
|
"List",
|
|
"Optional",
|
|
"TorchVersion",
|
|
"Tuple",
|
|
"Union",
|
|
"build_ext",
|
|
"get_hip_file_path"
|
|
],
|
|
"torch.utils.data": [
|
|
"_DatasetKind",
|
|
"argument_validation",
|
|
"default_collate",
|
|
"default_convert",
|
|
"functional_datapipe",
|
|
"get_worker_info",
|
|
"guaranteed_datapipes_determinism",
|
|
"non_deterministic",
|
|
"runtime_validation",
|
|
"runtime_validation_disabled"
|
|
],
|
|
"torch.utils.data.dataloader": [
|
|
"default_collate",
|
|
"default_convert",
|
|
"get_worker_info"
|
|
],
|
|
"torch.utils.data.datapipes.dataframe": [
|
|
"DFIterDataPipe"
|
|
],
|
|
"torch.utils.dlpack": [
|
|
"Any",
|
|
"to_dlpack"
|
|
],
|
|
"torch.utils.hipify.hipify_python": [
|
|
"Dict",
|
|
"HipifyFinalResult",
|
|
"HipifyResult",
|
|
"Iterable",
|
|
"Iterator",
|
|
"List",
|
|
"Mapping",
|
|
"Optional"
|
|
],
|
|
"torch.utils.hooks": [
|
|
"Any",
|
|
"OrderedDict"
|
|
],
|
|
"torch.utils.show_pickle": [
|
|
"Any",
|
|
"BinaryIO",
|
|
"IO",
|
|
"Union"
|
|
],
|
|
"torch.utils.tensorboard.summary": [
|
|
"HistogramProto",
|
|
"Optional",
|
|
"PrCurvePluginData",
|
|
"Summary",
|
|
"SummaryMetadata",
|
|
"TensorProto",
|
|
"TensorShapeProto",
|
|
"TextPluginData",
|
|
"convert_to_HWC",
|
|
"make_np",
|
|
"range"
|
|
],
|
|
"torch.utils.tensorboard.writer": [
|
|
"Event",
|
|
"EventFileWriter",
|
|
"ProjectorConfig",
|
|
"SessionLog",
|
|
"audio",
|
|
"custom_scalars",
|
|
"figure_to_image",
|
|
"get_embedding_info",
|
|
"graph",
|
|
"histogram",
|
|
"histogram_raw",
|
|
"hparams",
|
|
"image",
|
|
"image_boxes",
|
|
"load_onnx_graph",
|
|
"make_mat",
|
|
"make_np",
|
|
"make_sprite",
|
|
"make_tsv",
|
|
"mesh",
|
|
"pr_curve",
|
|
"pr_curve_raw",
|
|
"scalar",
|
|
"text",
|
|
"video",
|
|
"write_pbtxt"
|
|
],
|
|
"torch": [
|
|
"BFloat16Storage",
|
|
"BFloat16Tensor",
|
|
"ComplexDoubleStorage",
|
|
"ComplexFloatStorage",
|
|
"DisableTorchFunction",
|
|
"Generator",
|
|
"HalfStorage",
|
|
"HalfTensor",
|
|
"QInt32Storage",
|
|
"QInt8Storage",
|
|
"QUInt2x4Storage",
|
|
"QUInt4x2Storage",
|
|
"QUInt8Storage",
|
|
"Storage",
|
|
"_TypedStorage",
|
|
"_adaptive_avg_pool2d",
|
|
"_adaptive_avg_pool3d",
|
|
"_add_batch_dim",
|
|
"_add_relu",
|
|
"_add_relu_",
|
|
"_addmm_activation",
|
|
"_aminmax",
|
|
"_amp_foreach_non_finite_check_and_unscale_",
|
|
"_amp_update_scale_",
|
|
"_assert_async",
|
|
"_batch_norm_impl_index",
|
|
"_cast_Byte",
|
|
"_cast_Char",
|
|
"_cast_Double",
|
|
"_cast_Float",
|
|
"_cast_Half",
|
|
"_cast_Int",
|
|
"_cast_Long",
|
|
"_cast_Short",
|
|
"_choose_qparams_per_tensor",
|
|
"_coalesce",
|
|
"_compute_linear_combination",
|
|
"_conj",
|
|
"_conj_copy",
|
|
"_conj_physical",
|
|
"_convert_indices_from_coo_to_csr",
|
|
"_convert_indices_from_csr_to_coo",
|
|
"_convolution",
|
|
"_convolution_mode",
|
|
"_copy_from",
|
|
"_copy_from_and_resize",
|
|
"_ctc_loss",
|
|
"_cudnn_ctc_loss",
|
|
"_cudnn_init_dropout_state",
|
|
"_cudnn_rnn",
|
|
"_cudnn_rnn_flatten_weight",
|
|
"_cufft_clear_plan_cache",
|
|
"_cufft_get_plan_cache_max_size",
|
|
"_cufft_get_plan_cache_size",
|
|
"_cufft_set_plan_cache_max_size",
|
|
"_cummax_helper",
|
|
"_cummin_helper",
|
|
"_debug_has_internal_overlap",
|
|
"_det_lu_based_helper",
|
|
"_det_lu_based_helper_backward_helper",
|
|
"_dim_arange",
|
|
"_dirichlet_grad",
|
|
"_disable_functionalization",
|
|
"_efficientzerotensor",
|
|
"_embedding_bag",
|
|
"_embedding_bag_forward_only",
|
|
"_empty_affine_quantized",
|
|
"_empty_per_channel_affine_quantized",
|
|
"_enable_functionalization",
|
|
"_euclidean_dist",
|
|
"_fake_quantize_learnable_per_channel_affine",
|
|
"_fake_quantize_learnable_per_tensor_affine",
|
|
"_fake_quantize_per_tensor_affine_cachemask_tensor_qparams",
|
|
"_fft_c2c",
|
|
"_fft_c2r",
|
|
"_fft_r2c",
|
|
"_foreach_abs",
|
|
"_foreach_abs_",
|
|
"_foreach_acos",
|
|
"_foreach_acos_",
|
|
"_foreach_add",
|
|
"_foreach_add_",
|
|
"_foreach_addcdiv",
|
|
"_foreach_addcdiv_",
|
|
"_foreach_addcmul",
|
|
"_foreach_addcmul_",
|
|
"_foreach_asin",
|
|
"_foreach_asin_",
|
|
"_foreach_atan",
|
|
"_foreach_atan_",
|
|
"_foreach_ceil",
|
|
"_foreach_ceil_",
|
|
"_foreach_cos",
|
|
"_foreach_cos_",
|
|
"_foreach_cosh",
|
|
"_foreach_cosh_",
|
|
"_foreach_div",
|
|
"_foreach_div_",
|
|
"_foreach_erf",
|
|
"_foreach_erf_",
|
|
"_foreach_erfc",
|
|
"_foreach_erfc_",
|
|
"_foreach_exp",
|
|
"_foreach_exp_",
|
|
"_foreach_expm1",
|
|
"_foreach_expm1_",
|
|
"_foreach_floor",
|
|
"_foreach_floor_",
|
|
"_foreach_frac",
|
|
"_foreach_frac_",
|
|
"_foreach_lgamma",
|
|
"_foreach_lgamma_",
|
|
"_foreach_log",
|
|
"_foreach_log10",
|
|
"_foreach_log10_",
|
|
"_foreach_log1p",
|
|
"_foreach_log1p_",
|
|
"_foreach_log2",
|
|
"_foreach_log2_",
|
|
"_foreach_log_",
|
|
"_foreach_maximum",
|
|
"_foreach_minimum",
|
|
"_foreach_mul",
|
|
"_foreach_mul_",
|
|
"_foreach_neg",
|
|
"_foreach_neg_",
|
|
"_foreach_norm",
|
|
"_foreach_reciprocal",
|
|
"_foreach_reciprocal_",
|
|
"_foreach_round",
|
|
"_foreach_round_",
|
|
"_foreach_sigmoid",
|
|
"_foreach_sigmoid_",
|
|
"_foreach_sin",
|
|
"_foreach_sin_",
|
|
"_foreach_sinh",
|
|
"_foreach_sinh_",
|
|
"_foreach_sqrt",
|
|
"_foreach_sqrt_",
|
|
"_foreach_sub",
|
|
"_foreach_sub_",
|
|
"_foreach_tan",
|
|
"_foreach_tan_",
|
|
"_foreach_tanh",
|
|
"_foreach_tanh_",
|
|
"_foreach_trunc",
|
|
"_foreach_trunc_",
|
|
"_foreach_zero_",
|
|
"_from_functional_tensor",
|
|
"_fused_dropout",
|
|
"_fused_moving_avg_obs_fq_helper",
|
|
"_fw_primal_copy",
|
|
"_grid_sampler_2d_cpu_fallback",
|
|
"_has_compatible_shallow_copy_type",
|
|
"_histogramdd_bin_edges",
|
|
"_histogramdd_from_bin_cts",
|
|
"_histogramdd_from_bin_tensors",
|
|
"_index_put_impl_",
|
|
"_indices_copy",
|
|
"_is_functional_tensor",
|
|
"_is_zerotensor",
|
|
"_linalg_check_errors",
|
|
"_linalg_inv_out_helper_",
|
|
"_linalg_qr_helper",
|
|
"_linalg_svd",
|
|
"_log_softmax",
|
|
"_log_softmax_backward_data",
|
|
"_logcumsumexp",
|
|
"_lu_with_info",
|
|
"_make_dual",
|
|
"_make_dual_copy",
|
|
"_make_per_channel_quantized_tensor",
|
|
"_make_per_tensor_quantized_tensor",
|
|
"_masked_scale",
|
|
"_masked_softmax",
|
|
"_mkldnn_reshape",
|
|
"_mkldnn_transpose",
|
|
"_mkldnn_transpose_",
|
|
"_neg_view",
|
|
"_neg_view_copy",
|
|
"_nested_from_padded",
|
|
"_nested_from_padded_and_nested_example",
|
|
"_nnpack_available",
|
|
"_nnpack_spatial_convolution",
|
|
"_pack_padded_sequence",
|
|
"_pad_packed_sequence",
|
|
"_pin_memory",
|
|
"_remove_batch_dim",
|
|
"_reshape_alias_copy",
|
|
"_reshape_from_tensor",
|
|
"_rowwise_prune",
|
|
"_sample_dirichlet",
|
|
"_saturate_weight_to_fp16",
|
|
"_shape_as_tensor",
|
|
"_sobol_engine_draw",
|
|
"_sobol_engine_ff_",
|
|
"_sobol_engine_initialize_state_",
|
|
"_sobol_engine_scramble_",
|
|
"_softmax",
|
|
"_softmax_backward_data",
|
|
"_sparse_broadcast_to",
|
|
"_sparse_broadcast_to_copy",
|
|
"_sparse_coo_tensor_unsafe",
|
|
"_sparse_csr_prod",
|
|
"_sparse_csr_sum",
|
|
"_sparse_csr_tensor_unsafe",
|
|
"_sparse_log_softmax_backward_data",
|
|
"_sparse_mask_helper",
|
|
"_sparse_softmax_backward_data",
|
|
"_sparse_sparse_matmul",
|
|
"_sparse_sum",
|
|
"_stack",
|
|
"_standard_gamma",
|
|
"_standard_gamma_grad",
|
|
"_sync",
|
|
"_test_serialization_subcmul",
|
|
"_to_cpu",
|
|
"_to_functional_tensor",
|
|
"_torch_cuda_cu_linker_symbol_op",
|
|
"_trilinear",
|
|
"_unique",
|
|
"_unique2",
|
|
"_unpack_dual",
|
|
"_use_cudnn_ctc_loss",
|
|
"_use_cudnn_rnn_flatten_weight",
|
|
"_validate_sparse_compressed_tensor_args",
|
|
"_validate_sparse_coo_tensor_args",
|
|
"_validate_sparse_csr_tensor_args",
|
|
"_values_copy",
|
|
"_weight_norm",
|
|
"_weight_norm_cuda_interface",
|
|
"autocast",
|
|
"broadcast_shapes",
|
|
"candidate",
|
|
"compiled_with_cxx11_abi",
|
|
"from_dlpack",
|
|
"lobpcg",
|
|
"lu",
|
|
"obj",
|
|
"set_default_dtype",
|
|
"set_grad_enabled",
|
|
"set_printoptions",
|
|
"unique"
|
|
]
|
|
}
|