codegen/native_functions.yaml in torch-rb-0.7.0 vs codegen/native_functions.yaml in torch-rb-0.8.0

- old
+ new

@@ -37,32 +37,31 @@ - func: _cast_Half(Tensor self, bool non_blocking=False) -> Tensor variants: function # Computes the gradient of current tensor w.r.t. graph leaves. - func: _backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> () - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - manual_kernel_registration: True + manual_cpp_binding: True variants: method # DEPRECATED. Sets the tensor data held by this `Variable` to be the same as # `new_data`. It requires that `new_data` and `Variable` have compatible tensor # type, by checking `_has_compatible_shallow_copy_type(this, new_data)`. # # This function is deprecated because it doesn't really make sense in a world # where Variables *are* Tensors (as opposed to them containing tensors, which # is what the previous interpretation was.) - func: set_data(Tensor(a!) self, Tensor new_data) -> () - manual_kernel_registration: True + manual_cpp_binding: True variants: method - func: data(Tensor self) -> Tensor - manual_kernel_registration: True + manual_cpp_binding: True variants: method # True if this `Variable` is a leaf and thus does not have a `grad_fn`. - func: is_leaf(Tensor self) -> bool - manual_kernel_registration: True + manual_cpp_binding: True variants: method # Returns the output index of this variable from the forward operation that # produced it. Conversely, it returns the input index of the gradient `Node` to # which this `Variable` is connected (because in the gradient computation, @@ -72,30 +71,30 @@ # assert y0.output_nr == 0 # assert y1.output_nr == 1 # assert y2.output_nr == 2 # - func: output_nr(Tensor self) -> int - manual_kernel_registration: True + manual_cpp_binding: True variants: method - func: _version(Tensor self) -> int - manual_kernel_registration: True + manual_cpp_binding: True variants: method - func: requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!) - manual_kernel_registration: True + manual_cpp_binding: True variants: method # Enables .grad attribute for non-leaf Tensors. - func: retain_grad(Tensor(a!) self) -> () - manual_kernel_registration: True + manual_cpp_binding: True variants: method - func: _fw_primal(Tensor(a) self, int level) -> Tensor(a) variants: method dispatch: - DefaultBackend: _fw_primal + CompositeExplicitAutograd: _fw_primal - func: _make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a) variants: function - func: _unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent) @@ -116,39 +115,49 @@ - func: align_as(Tensor self, Tensor other) -> Tensor variants: method - func: align_tensors(Tensor[] tensors) -> Tensor[] +# Not assert because it's a keyword; not Assert because FX already +# took that syntax +# TODO: need to specify this is side-effectful somehow +- func: _assert_async(Tensor self) -> () + dispatch: + CPU: _assert_async_cpu + CUDA: _assert_async_cuda + - func: refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a) variants: method - func: _use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool + device_check: NoCheck # Tensor arguments allowed to be on different devices, see also _cudnn_ctc_loss dispatch: CUDA: _use_cudnn_ctc_loss - func: _cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor) + device_check: NoCheck # log_probs is expected to be on CUDA while targets is expected to be on CPU dispatch: CUDA: _cudnn_ctc_loss - func: _use_cudnn_rnn_flatten_weight() -> bool - func: _cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, int input_size, int mode, int hidden_size, int proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor dispatch: CUDA: _cudnn_rnn_flatten_weight - func: _cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, int hidden_size, int proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + # rnn_tanh may or may not redispatch to _cudnn_rnn based on algorithm and build. Thus it might hit dispatch or kernel device check. + # Disable dispatch time device check for consistent behavior. + device_check: NoCheck dispatch: CUDA: _cudnn_rnn - func: _cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: _cudnn_rnn_backward - func: _cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: _cudnn_init_dropout_state - func: _debug_has_internal_overlap(Tensor self) -> int variants: function @@ -190,21 +199,23 @@ - func: feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor - func: feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) - func: abs(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: abs + CompositeExplicitAutograd: abs - func: abs_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: abs_ + CompositeExplicitAutograd: abs_ - func: abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: abs_out # Note [Adding an alias] # To add an alias do the following: @@ -225,31 +236,35 @@ # torch/_tensor_docs.py and docs/source/tensors.rst if adding a method, # or module-specific doc bindings (like torch/linalg/__init__.py) if # adding an alias in a namespace.) # 5) Update torch/overrides.py consistent with the original function. # 6) Update the alias_map in torch/csrc/jit/passes/normalize_ops.cpp. -# 7) Add entries to test/test_op_aliases.py's "alias_infos" +# 7) Add aliases argument to existing OpInfo/UnaryUfuncInfo or create new OpInfo/UnaryUfuncInfo entry +# in op_db list in torch/testing/_internal/common_methods_invocations.py # # See torch.absolute, an alias for torch.abs, as an example. # Absolute, alias for abs - func: absolute(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: absolute_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator - func: angle(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: angle + CPU, CUDA: angle - func: angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: angle_out - func: view_as_real(Tensor(a) self) -> Tensor(a) variants: function @@ -262,54 +277,58 @@ CPU, CUDA: view_as_complex - func: sgn(Tensor self) -> Tensor variants: function, method dispatch: - DefaultBackend: sgn + CompositeExplicitAutograd: sgn - func: sgn_(Tensor(a!) self) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: method dispatch: - DefaultBackend: sgn_ + CompositeExplicitAutograd: sgn_ - func: sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: sgn_out - func: real(Tensor(a) self) -> Tensor(a) + device_check: NoCheck # TensorIterator variants: function - func: imag(Tensor(a) self) -> Tensor(a) + device_check: NoCheck # TensorIterator variants: function - func: conj(Tensor(a) self) -> Tensor(a) + device_check: NoCheck # TensorIterator variants: function, method - func: conj.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: conj_out + SparseCPU, SparseCUDA: conj_out_sparse - func: _conj(Tensor self) -> Tensor variants: function dispatch: - DefaultBackend: _conj + CompositeExplicitAutograd: _conj - func: acos(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - dispatch: - DefaultBackend: acos + structured_delegate: acos.out - func: acos_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: function, method - dispatch: - DefaultBackend: acos_ + structured_delegate: acos.out - func: acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: acos_out # arccos, alias of acos - func: arccos(Tensor self) -> Tensor @@ -317,40 +336,45 @@ - func: arccos_(Tensor(a!) self) -> Tensor(a!) variants: function, method - func: arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor - func: adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor) - func: add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + device_check: NoCheck # TensorIterator structured_delegate: add.out variants: function, method dispatch: SparseCPU, SparseCUDA: add_sparse + SparseCsrCPU: add_sparse_csr MkldnnCPU: mkldnn_add - func: add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method structured_delegate: add.out dispatch: SparseCPU, SparseCUDA: add_sparse_ + SparseCsrCPU: add_sparse_csr_ MkldnnCPU: mkldnn_add_ - func: add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: add_out SparseCPU: add_out_sparse_cpu SparseCUDA: add_out_sparse_cuda + SparseCsrCPU: add_out_sparse_csr_cpu MkldnnCPU: mkldnn_add_out - func: _add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor variants: function dispatch: @@ -360,120 +384,113 @@ variants: function dispatch: CPU: add_relu_ - func: _add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function dispatch: CPU: add_relu_out # For C++ only, until we have conversion from C++ numbers to Tensor - func: add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: add + CompositeExplicitAutograd: add - func: add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: add_ + CompositeExplicitAutograd: add_ - func: addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor + structured_delegate: addmv.out variants: function, method - dispatch: - CPU, CUDA: addmv - func: addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + structured_delegate: addmv.out variants: function, method - dispatch: - CPU, CUDA: addmv_ - func: addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True dispatch: - CPU, CUDA: addmv_out + CPU: addmv_out_cpu + CUDA: addmv_out_cuda -- func: _addmv_impl_(Tensor(a!) self, Tensor self2, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) - dispatch: - CPU: addmv_impl_cpu - CUDA: addmv_impl_cuda - - func: addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor variants: function, method dispatch: CPU, CUDA: addr - Math: math_addr + CompositeImplicitAutograd: math_addr - func: addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) variants: method dispatch: - DefaultBackend: addr_ + CompositeExplicitAutograd: addr_ - func: addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: addr_out - Math: math_addr_out + CompositeImplicitAutograd: math_addr_out - func: affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor variants: function dispatch: - DefaultBackend: affine_grid_generator + CompositeExplicitAutograd: affine_grid_generator - func: affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor variants: function - func: all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: all - func: all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: all_out - func: all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator - func: allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool variants: function, method - func: any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: any - func: any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: any_out - func: any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator - func: arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: arange.start_step(Scalar start, Scalar end, Scalar step, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: arange_cpu_out CUDA: arange_cuda_out # This function is a temporary hack to allow tracing of arange like constructs with dynamic @@ -482,41 +499,40 @@ # preserve tracing. Get rid of this when arange can directly take tensors for bounds # (so that it can be traced directly). - func: _dim_arange(Tensor like, int dim) -> Tensor - func: argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: argmax - func: argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: argmax_out - func: argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: argmin - func: argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: argmin_out - func: acosh(Tensor self) -> Tensor variants: function, method - dispatch: - DefaultBackend: acosh + structured_delegate: acosh.out - func: acosh_(Tensor(a!) self) -> Tensor(a!) variants: function, method - dispatch: - DefaultBackend: acosh_ + structured_delegate: acosh.out - func: acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: acosh_out # arccosh, alias for acosh - func: arccosh(Tensor self) -> Tensor @@ -524,24 +540,22 @@ - func: arccosh_(Tensor(a!) self) -> Tensor(a!) variants: function, method - func: arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: asinh(Tensor self) -> Tensor variants: function, method - dispatch: - DefaultBackend: asinh + structured_delegate: asinh.out - func: asinh_(Tensor(a!) self) -> Tensor(a!) variants: function, method - dispatch: - DefaultBackend: asinh_ + structured_delegate: asinh.out - func: asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: asinh_out # arcsinh, alias for asinh - func: arcsinh(Tensor self) -> Tensor @@ -549,24 +563,22 @@ - func: arcsinh_(Tensor(a!) self) -> Tensor(a!) variants: function, method - func: arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: atanh(Tensor self) -> Tensor + structured_delegate: atanh.out variants: function, method - dispatch: - DefaultBackend: atanh - func: atanh_(Tensor(a!) self) -> Tensor(a!) + structured_delegate: atanh.out variants: function, method - dispatch: - DefaultBackend: atanh_ - func: atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: atanh_out # arctanh, alias for atanh - func: arctanh(Tensor self) -> Tensor @@ -574,40 +586,45 @@ - func: arctanh_(Tensor(a!) self) -> Tensor(a!) variants: function, method - func: arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: as_strided(Tensor(a) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a) variants: function, method dispatch: - CPU, CUDA: as_strided_tensorimpl + CPU, CUDA, Meta: as_strided_tensorimpl QuantizedCPU, QuantizedCUDA: as_strided_qtensorimpl + device_check: NoCheck device_guard: False - func: as_strided_(Tensor(a!) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + use_const_ref_for_mutable_tensors: True variants: function, method + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: as_strided_ + CompositeExplicitAutograd: as_strided_ - func: asin(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method + structured_delegate: asin.out dispatch: - CPU, CUDA: asin SparseCPU, SparseCUDA: asin_sparse - func: asin_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: function, method + structured_delegate: asin.out dispatch: - CPU, CUDA: asin_ SparseCPU, SparseCUDA: asin_sparse_ - func: asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: asin_out SparseCPU, SparseCUDA: asin_out_sparse # arcsin, alias of asin @@ -616,24 +633,25 @@ - func: arcsin_(Tensor(a!) self) -> Tensor(a!) variants: function, method - func: arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: atan(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: atan.out variants: function, method - dispatch: - DefaultBackend: atan - func: atan_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: atan.out variants: function, method - dispatch: - DefaultBackend: atan_ - func: atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: atan_out # arctan, alias of atan - func: arctan(Tensor self) -> Tensor @@ -641,11 +659,10 @@ - func: arctan_(Tensor(a!) self) -> Tensor(a!) variants: function, method - func: arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: atleast_1d(Tensor self) -> Tensor variants: function - func: atleast_1d.Sequence(Tensor[] tensors) -> Tensor[] @@ -676,200 +693,211 @@ - func: _baddbmm_mkl_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) variants: function - func: baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function dispatch: CPU: baddbmm_out_cpu CUDA: baddbmm_out_cuda - func: bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor - func: quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: QuantizedCPU: quantized_batch_norm - func: _batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: _batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures # Sample bernoulli with values in `self` as probability. - func: bernoulli(Tensor self, *, Generator? generator=None) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: bernoulli + CompositeExplicitAutograd: bernoulli - func: bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: bernoulli_out - func: bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: bernoulli_ - func: bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: bernoulli_ # This out-of-place version isn't used explicitly, but needed by jit. # There is no default valid on `p` here because it would introduce ambiguity # with `bernoulli(Tensor self, *, Generator? generator=None)` declaration. - func: bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator python_module: nn variants: function dispatch: CPU: binary_cross_entropy_cpu CUDA: binary_cross_entropy_cuda - func: binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator python_module: nn variants: function dispatch: CPU: binary_cross_entropy_out_cpu CUDA: binary_cross_entropy_out_cuda - func: binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn variants: function dispatch: CPU: binary_cross_entropy_backward_cpu CUDA: binary_cross_entropy_backward_cuda - func: binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn variants: function dispatch: CPU: binary_cross_entropy_backward_out_cpu CUDA: binary_cross_entropy_backward_out_cuda - func: binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator variants: function dispatch: - DefaultBackend: binary_cross_entropy_with_logits + CompositeExplicitAutograd: binary_cross_entropy_with_logits - func: binary_cross_entropy_with_logits_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function - func: bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function, method dispatch: CPU: _bincount_cpu CUDA: _bincount_cuda - func: bitwise_not(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: bitwise_not.out variants: function, method - func: bitwise_not_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: bitwise_not.out variants: method - func: bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: bitwise_not_out +- func: copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: copysign_out + - func: copysign.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - dispatch: - CPU, CUDA: copysign + structured_delegate: copysign.out - func: copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - dispatch: - CPU, CUDA: copysign_ + structured_delegate: copysign.out -- func: copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - dispatch: - CPU, CUDA: copysign_out - - func: copysign.Scalar(Tensor self, Scalar other) -> Tensor variants: function, method dispatch: - CPU, CUDA: copysign + CompositeExplicitAutograd: copysign - func: copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) variants: method dispatch: - CPU, CUDA: copysign_ + CompositeExplicitAutograd: copysign_ +- func: copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CompositeExplicitAutograd: copysign_out + - func: logical_not(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: logical_not_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: logical_not_out - func: logical_xor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: logical_xor_out - func: logical_and(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: logical_and_out - func: logical_or(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: logical_or_out - func: blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: bmm(Tensor self, Tensor mat2) -> Tensor variants: function, method dispatch: CPU: bmm_cpu @@ -881,73 +909,80 @@ variants: function dispatch: SparseCUDA: _bmm_sparse_cuda - func: bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function dispatch: CPU: bmm_out_cpu CUDA: bmm_out_cuda SparseCPU: bmm_out_sparse_cpu SparseCUDA: bmm_out_sparse_cuda - func: _bmm.out(Tensor self, Tensor mat2, *, bool deterministic=False, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function dispatch: SparseCUDA: _bmm_out_sparse_cuda - func: broadcast_tensors(Tensor[] tensors) -> Tensor[] + device_check: NoCheck device_guard: False - func: broadcast_to(Tensor(a) self, int[] size) -> Tensor(a) variants: function, method - dispatch: - Math: broadcast_to - func: cat(Tensor[] tensors, int dim=0) -> Tensor dispatch: - DefaultBackend: cat + CompositeExplicitAutograd: cat - func: cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: cat_out + CompositeExplicitAutograd: cat_out - func: cat.names(Tensor[] tensors, Dimname dim) -> Tensor - func: cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: block_diag(Tensor[] tensors) -> Tensor variants: function - func: ceil(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: ceil.out variants: function, method dispatch: - DefaultBackend: ceil + CompositeExplicitAutograd: ceil - func: ceil_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: ceil.out variants: function, method dispatch: - DefaultBackend: ceil_ + CompositeExplicitAutograd: ceil_ - func: ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: ceil_out +# alias for torch.linalg.multi_dot - func: chain_matmul(Tensor[] matrices) -> Tensor variants: function +# alias for torch.linalg.multi_dot +- func: chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!) + - func: unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[] variants: function, method + device_check: NoCheck device_guard: False - func: chunk(Tensor(a) self, int chunks, int dim=0) -> Tensor(a)[] variants: function, method + device_check: NoCheck device_guard: False - func: tensor_split.sections(Tensor(a) self, int sections, int dim=0) -> Tensor(a)[] variants: function, method @@ -956,183 +991,248 @@ - func: tensor_split.tensor_indices_or_sections(Tensor(a) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[] variants: function, method - func: clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method + cpp_no_default_args: ['min'] dispatch: CPU, CUDA: clamp QuantizedCPU: clamp_quantized_cpu +- func: clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor + variants: function, method + dispatch: + CPU, CUDA: clamp + - func: clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: function, method + cpp_no_default_args: ['min'] dispatch: - DefaultBackend: clamp_ + CompositeExplicitAutograd: clamp_ +- func: clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) + variants: function, method + dispatch: + CompositeExplicitAutograd: clamp_ + - func: clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + cpp_no_default_args: ['min'] dispatch: CPU, CUDA: clamp_out +- func: clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + dispatch: + CPU, CUDA: clamp_out + - func: clamp_max(Tensor self, Scalar max) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: clamp_max + CompositeExplicitAutograd: clamp_max +- func: clamp_max.Tensor(Tensor self, Tensor max) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutograd: clamp_max + - func: clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: clamp_max_ + CompositeExplicitAutograd: clamp_max_ +- func: clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!) + variants: function, method + dispatch: + CompositeExplicitAutograd: clamp_max_ + - func: clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: clamp_max_out +- func: clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: clamp_max_out + - func: clamp_min(Tensor self, Scalar min) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: clamp_min + CompositeExplicitAutograd: clamp_min +- func: clamp_min.Tensor(Tensor self, Tensor min) -> Tensor + variants: function, method + dispatch: + CompositeExplicitAutograd: clamp_min + - func: clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: clamp_min_ + CompositeExplicitAutograd: clamp_min_ +- func: clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!) + variants: function, method + dispatch: + CompositeExplicitAutograd: clamp_min_ + - func: clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: clamp_min_out +- func: clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: clamp_min_out + # clip is an alias for clamp - func: clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor + cpp_no_default_args: ['min'] variants: function, method +- func: clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor + variants: function, method + - func: clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + cpp_no_default_args: ['min'] variants: function, method +- func: clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) + variants: function, method + - func: clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + cpp_no_default_args: ['min'] +- func: clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) + - func: cudnn_is_acceptable(Tensor self) -> bool + device_check: NoCheck device_guard: False - func: complex(Tensor real, Tensor imag) -> Tensor variants: function dispatch: - DefaultBackend: complex + CompositeExplicitAutograd: complex - func: complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: complex_out - func: polar(Tensor abs, Tensor angle) -> Tensor variants: function dispatch: - DefaultBackend: polar + CompositeExplicitAutograd: polar - func: polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: polar_out - func: constant_pad_nd(Tensor self, int[] pad, Scalar value=0) -> Tensor variants: function dispatch: - DefaultBackend: constant_pad_nd + CompositeExplicitAutograd: constant_pad_nd - func: contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a) variants: method manual_cpp_binding: True - func: convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: convolution_overrideable + CompositeExplicitAutograd: convolution_overrideable - func: convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) dispatch: - DefaultBackend: convolution_backward_overrideable + CompositeExplicitAutograd: convolution_backward_overrideable - func: _convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: _convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures +- func: _convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor + - func: _convolution_nogroup(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: _convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, bool[3] output_mask) -> (Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures +- func: conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor + cpp_no_default_args: ['bias', 'stride', 'padding'] + +- func: conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor + cpp_no_default_args: ['bias', 'stride', 'padding'] + +- func: conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor + cpp_no_default_args: ['bias', 'stride', 'padding'] + - func: conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor dispatch: - DefaultBackend: conv_tbc + CompositeExplicitAutograd: conv_tbc - func: conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor) # NB: we inherit the goofy argument order from PyTorch torch.nn.functional - func: conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) variants: method + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: copy_ + MkldnnCPU: copy_mkldnn_ + CompositeExplicitAutograd: copy_ - func: _copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor dispatch: {} - func: cos(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - dispatch: - DefaultBackend: cos + structured_delegate: cos.out - func: cos_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: function, method - dispatch: - DefaultBackend: cos_ + structured_delegate: cos.out - func: cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: cos_out - func: cosh(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - dispatch: - DefaultBackend: cosh + structured_delegate: cosh.out - func: cosh_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: function, method - dispatch: - DefaultBackend: cosh_ + structured_delegate: cosh.out - func: cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: cosh_out - func: cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor @@ -1142,11 +1242,11 @@ CPU, CUDA: count_nonzero - func: count_nonzero(Tensor self, int? dim=None) -> Tensor variants: function, method dispatch: - DefaultBackend: count_nonzero + CompositeExplicitAutograd: count_nonzero - func: cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid dispatch: CUDA: cudnn_affine_grid_generator_forward @@ -1154,22 +1254,19 @@ - func: cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta dispatch: CUDA: cudnn_affine_grid_generator_backward - func: cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: cudnn_batch_norm # NB: You can only use this if you used cudnn_batch_norm training=True - func: cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: cudnn_batch_norm_backward - func: cudnn_convolution.deprecated(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: cudnn_convolution_deprecated - func: cudnn_convolution.deprecated2(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor dispatch: @@ -1190,11 +1287,10 @@ - func: cudnn_convolution_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor dispatch: CUDA: cudnn_convolution_backward_weight - func: cudnn_convolution_transpose.deprecated(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: cudnn_convolution_transpose_deprecated - func: cudnn_convolution_transpose.deprecated2(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor dispatch: @@ -1216,120 +1312,136 @@ - func: cudnn_convolution_transpose_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor dispatch: CUDA: cudnn_convolution_transpose_backward_weight +- func: cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor + dispatch: + CUDA: cudnn_convolution_relu + +- func: cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor + dispatch: + CUDA: cudnn_convolution_add_relu + # NB: input is special cased in a way I don't quite understand - func: cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output dispatch: CUDA: cudnn_grid_sampler_forward - func: cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid) dispatch: CUDA: cudnn_grid_sampler_backward - func: cummax(Tensor self, int dim) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: cummax + CompositeExplicitAutograd: cummax - func: cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: - DefaultBackend: cummax_out + CompositeExplicitAutograd: cummax_out - func: cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator variants: function, method - func: cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator - func: _cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function dispatch: CPU: cummax_helper_cpu CUDA: cummax_helper_cuda - func: cummin(Tensor self, int dim) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: cummin + CompositeExplicitAutograd: cummin - func: cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: - DefaultBackend: cummin_out + CompositeExplicitAutograd: cummin_out - func: cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator variants: function, method - func: cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator - func: _cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function dispatch: CPU: cummin_helper_cpu CUDA: cummin_helper_cuda - func: cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor variants: function + device_check: NoCheck device_guard: False - func: cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: cumprod + CompositeExplicitAutograd: cumprod - func: cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) variants: method dispatch: - DefaultBackend: cumprod_ + CompositeExplicitAutograd: cumprod_ - func: cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: - DefaultBackend: cumprod_out + CompositeExplicitAutograd: cumprod_out - func: cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) variants: method - func: cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator -- func: cumprod_backward(Tensor grad, Tensor input, int dim) -> Tensor +- func: cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor variants: function + device_check: NoCheck device_guard: False - func: cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: cumsum + CompositeExplicitAutograd: cumsum - func: cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) variants: method dispatch: - DefaultBackend: cumsum_ + CompositeExplicitAutograd: cumsum_ - func: cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: - DefaultBackend: cumsum_out + CompositeExplicitAutograd: cumsum_out - func: cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) variants: method - func: cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator - func: ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor # convenience function that converts to intlists for you - func: ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor @@ -1351,157 +1463,191 @@ variants: function, method - func: diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) variants: function, method dispatch: - DefaultBackend: diagonal + CompositeExplicitAutograd: diagonal - func: diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a) variants: function, method - func: diagonal_backward(Tensor grad, int[] input_sizes, int offset, int dim1, int dim2) -> Tensor variants: function + device_check: NoCheck device_guard: False - func: fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!) variants: method - func: diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor variants: function, method - dispatch: - Math: diff - func: diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!) variants: function - dispatch: - Math: diff_out +- func: gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[] + variants: function + +- func: gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[] + variants: function + +- func: gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[] + variants: function + +- func: gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[] + variants: function + +- func: gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[] + variants: function + +- func: gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[] + variants: function + +- func: gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[] + variants: function + - func: div.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method + structured_delegate: div.out dispatch: - CPU, CUDA: div SparseCPU, SparseCUDA: div_sparse - func: div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method + structured_delegate: div.out dispatch: - CPU, CUDA: div_ SparseCPU, SparseCUDA: div_sparse_ - func: div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: div_out SparseCPU, SparseCUDA: div_out_sparse_zerodim -- func: div.Tensor_mode(Tensor self, Tensor other, *, str rounding_mode) -> Tensor +- func: div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method + structured_delegate: div.out_mode dispatch: - CPU, CUDA: div + SparseCPU, SparseCUDA: div_sparse -- func: div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str rounding_mode) -> Tensor(a!) +- func: div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method + structured_delegate: div.out_mode dispatch: - CPU, CUDA: div_ + SparseCPU, SparseCUDA: div_sparse_ -- func: div.out_mode(Tensor self, Tensor other, *, str rounding_mode, Tensor(a!) out) -> Tensor(a!) +- func: div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: - CPU, CUDA: div_out + CPU, CUDA: div_out_mode + SparseCPU, SparseCUDA: div_out_sparse_zerodim # For C++ only, until we have conversion from C++ numbers to Tensor - func: div.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: div + CompositeExplicitAutograd: div - func: div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: div_ + CompositeExplicitAutograd: div_ -- func: div.Scalar_mode(Tensor self, Scalar other, *, str rounding_mode) -> Tensor +- func: div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor variants: function, method dispatch: - DefaultBackend: div + CompositeExplicitAutograd: div -- func: div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str rounding_mode) -> Tensor(a!) +- func: div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) variants: method dispatch: - DefaultBackend: div_ + CompositeExplicitAutograd: div_ # divide, alias for div - func: divide.Tensor(Tensor self, Tensor other) -> Tensor variants: function, method - func: divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: method - func: divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: divide.Scalar(Tensor self, Scalar other) -> Tensor variants: function, method - func: divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) variants: method -- func: divide.Tensor_mode(Tensor self, Tensor other, *, str rounding_mode) -> Tensor +- func: divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor variants: function, method -- func: divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str rounding_mode) -> Tensor(a!) +- func: divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) variants: method -- func: divide.out_mode(Tensor self, Tensor other, *, str rounding_mode, Tensor(a!) out) -> Tensor(a!) +- func: divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) -- func: divide.Scalar_mode(Tensor self, Scalar other, *, str rounding_mode) -> Tensor +- func: divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor variants: function, method -- func: divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str rounding_mode) -> Tensor(a!) +- func: divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) variants: method # true_divide, an alias for div - func: true_divide.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator - func: true_divide.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: dot(Tensor self, Tensor tensor) -> Tensor variants: function, method dispatch: CPU: dot CUDA: dot_cuda - func: dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: dot_out + CompositeExplicitAutograd: dot_out - func: vdot(Tensor self, Tensor other) -> Tensor variants: function, method dispatch: CPU: vdot CUDA: vdot_cuda - func: vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: vdot_out + CompositeExplicitAutograd: vdot_out - func: einsum(str equation, Tensor[] tensors) -> Tensor - func: embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor dispatch: - DefaultBackend: embedding + CompositeExplicitAutograd: embedding - func: embedding_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor - func: embedding_dense_backward(Tensor grad_output, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor dispatch: @@ -1523,224 +1669,220 @@ # The above `embedding_bag` wrapper is created to achieve this, e.g., # applying indices = indices.contiguous(). # The backward functions apply a check that these input tensors are contiguous. -- func: _embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures +- func: _embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor) dispatch: CPU: _embedding_bag_forward_only_cpu CUDA: _embedding_bag_forward_only_cuda - func: _rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor) # row_stack is the alias of vstack - func: row_stack(Tensor[] tensors) -> Tensor - dispatch: - Math: row_stack - func: row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - dispatch: - Math: row_stack_out - func: embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures -- func: _embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures +# To keep backward and forward compatibility, and to avoid ambiguity with the +# original signature above, scale_grad_by_freq, mode, sparse, +# per_sample_weights, and include_last_offset parameters do not have default +# values. Once the original signature is removed, default values can be added. +- func: embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor) + +- func: _embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor) dispatch: CPU: _embedding_bag_cpu CUDA: _embedding_bag_cuda -- func: _embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures +- func: _embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor -- func: _embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures +- func: _embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor -- func: _embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures +- func: _embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor dispatch: CPU: _embedding_bag_dense_backward_cpu CUDA: _embedding_bag_dense_backward_cuda -- func: _embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode) -> Tensor +- func: _embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor dispatch: CPU: _embedding_bag_per_sample_weights_backward_cpu CUDA: _embedding_bag_per_sample_weights_backward_cuda -- func: empty_meta(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - - func: empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck device_guard: False - func: empty.memory_format(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor dispatch: CPU: empty_cpu CUDA: empty_cuda + Meta: empty_meta MkldnnCPU: empty_mkldnn SparseCPU, SparseCUDA: empty_sparse - func: new_empty(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor variants: method - func: new_empty_strided(Tensor self, int[] size, int[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: method - func: new_full(Tensor self, int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: method - func: new_zeros(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: method # other overrides are to provide a more helpful error message that dtype is required - func: _empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: empty_affine_quantized_other_backends_stub QuantizedCPU, QuantizedCUDA: empty_affine_quantized # it's a factory function receiving a tensor argument, thus overriding explicitly # other overrides are to provide a more helpful error message that dtype is required - func: _empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures category_override: factory dispatch: CPU: empty_per_channel_affine_quantized_other_backends_stub QuantizedCPU, QuantizedCUDA: empty_per_channel_affine_quantized - func: resize_(Tensor(a!) self, int[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) + use_const_ref_for_mutable_tensors: True variants: method + device_check: NoCheck device_guard: False dispatch: - CPU: resize_ + CPU, Meta: resize_ CUDA: resize_cuda_ QuantizedCPU: quantized_resize_cpu_ - Meta: resize_meta_ - func: empty_quantized(int[] size, Tensor qtensor) -> Tensor variants: function dispatch: QuantizedCPU, QuantizedCUDA: empty_quantized - func: empty.out(int[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck device_guard: False - func: empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck device_guard: False - func: empty_strided(int[] size, int[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor dispatch: CPU: empty_strided_cpu CUDA: empty_strided_cuda + Meta: empty_strided_meta - func: erf(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: erf.out variants: function, method - dispatch: - DefaultBackend: erf - func: erf_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: erf.out variants: function, method - dispatch: - DefaultBackend: erf_ - func: erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: erf_out - func: erfc(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: erfc.out variants: function, method - dispatch: - DefaultBackend: erfc - func: erfc_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: erfc.out variants: function, method - dispatch: - DefaultBackend: erfc_ - func: erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: erfc_out - func: exp(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: exp.out variants: function, method - dispatch: - DefaultBackend: exp - func: exp_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: exp.out variants: function, method - dispatch: - DefaultBackend: exp_ - func: exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: exp_out - func: exp2(Tensor self) -> Tensor + structured_delegate: exp2.out variants: function, method - dispatch: - DefaultBackend: exp2 - func: exp2_(Tensor(a!) self) -> Tensor(a!) + structured_delegate: exp2.out variants: function, method - dispatch: - DefaultBackend: exp2_ - func: exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: exp2_out - func: expm1(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: expm1.out variants: function, method - dispatch: - DefaultBackend: expm1 - func: expm1_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: expm1.out variants: function, method - dispatch: - DefaultBackend: expm1_ - func: expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: expm1_out - func: expand(Tensor(a) self, int[] size, *, bool implicit=False) -> Tensor(a) variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too. + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: expand + CompositeExplicitAutograd: expand - func: expand_as(Tensor(a) self, Tensor other) -> Tensor(a) variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too. + device_check: NoCheck device_guard: False - func: eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: eye.out(int n, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: eye_out_cpu CUDA: eye_out_cuda - func: eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: eye_out_cpu CUDA: eye_out_cuda - func: flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a) @@ -1760,111 +1902,129 @@ - func: unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a) variants: method - func: fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: fill_ + CPU, CUDA, QuantizedCPU, QuantizedCUDA: fill_ + Meta: fill_meta_ - func: fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: fill_ + CPU, CUDA, QuantizedCPU, QuantizedCUDA: fill_ + Meta: fill_meta_ - func: floor(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: floor.out variants: function, method dispatch: - DefaultBackend: floor + CompositeExplicitAutograd: floor - func: floor_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: floor.out variants: function, method dispatch: - DefaultBackend: floor_ + CompositeExplicitAutograd: floor_ - func: floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: floor_out - func: floor_divide(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: floor_divide SparseCPU, SparseCUDA: floor_divide_sparse - func: floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: floor_divide_ SparseCPU, SparseCUDA: floor_divide_sparse_ - func: floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: floor_divide_out SparseCPU, SparseCUDA: floor_divide_out_sparse_zerodim - func: floor_divide.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: frac(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: frac.out variants: function, method - dispatch: - DefaultBackend: frac - func: frac_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: frac.out variants: function, method - dispatch: - DefaultBackend: frac_ - func: frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: frac_out - func: full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck device_guard: False - func: full(int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: full.out(int[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: from_file - func: gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: gcd_out - func: gcd(Tensor self, Tensor other) -> Tensor + structured_delegate: gcd.out variants: function, method - func: gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!) + structured_delegate: gcd.out variants: function, method - func: lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: lcm_out - func: lcm(Tensor self, Tensor other) -> Tensor + structured_delegate: lcm.out variants: function, method - func: lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!) + structured_delegate: lcm.out variants: function, method # NOTE [ grid_sampler Native Functions ] # `grid_sampler` does all the shape checking and then dispatches to one of # `cudnn_grid_sampler`, `grid_sampler_2d`, or `grid_sampler_3d`, each of which @@ -1893,11 +2053,11 @@ CUDA: grid_sampler_2d_backward_cuda # See NOTE [ grid_sample CPU fallback ] - func: _grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor dispatch: - DefaultBackend: _grid_sampler_2d_cpu_fallback + CompositeExplicitAutograd: _grid_sampler_2d_cpu_fallback - func: _grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) - func: grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor dispatch: @@ -1908,49 +2068,37 @@ dispatch: CPU: grid_sampler_3d_backward_cpu CUDA: grid_sampler_3d_backward_cuda - func: hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor - func: group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: native_group_norm(Tensor input, Tensor? weight, Tensor? bias, int N, int C, int HxW, int group, float eps) -> (Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: native_group_norm - Math: math_group_norm + CompositeImplicitAutograd: math_group_norm - func: native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, int N, int C, int HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: native_group_norm_backward # Real to complex forward FFT - func: _fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor @@ -1958,11 +2106,10 @@ dispatch: CPU: _fft_r2c_mkl CUDA: _fft_r2c_cufft - func: _fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function dispatch: CPU: _fft_r2c_mkl_out CUDA: _fft_r2c_cufft_out @@ -1972,11 +2119,10 @@ dispatch: CPU: _fft_c2r_mkl CUDA: _fft_c2r_cufft - func: _fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function dispatch: CPU: _fft_c2r_mkl_out CUDA: _fft_c2r_cufft_out @@ -1986,11 +2132,10 @@ dispatch: CPU: _fft_c2c_mkl CUDA: _fft_c2c_cufft - func: _fft_c2c.out(Tensor self, int[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function dispatch: CPU: _fft_c2c_mkl_out CUDA: _fft_c2c_cufft_out @@ -2001,10 +2146,11 @@ - func: _cufft_set_plan_cache_max_size(int device_index, int max_size) -> () - func: _cufft_clear_plan_cache(int device_index) -> () - func: index.Tensor(Tensor self, Tensor?[] indices) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: index QuantizedCPU: quantized_index # NB: This function is special-cased in tools/autograd/gen_variable_type.py @@ -2013,11 +2159,11 @@ # - Tensor Tensor::index(std::initializer_list<TensorIndex> indices) - func: index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) variants: method dispatch: - DefaultBackend: index_copy_ + CompositeExplicitAutograd: index_copy_ - func: index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor variants: function, method - func: index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!) @@ -2025,40 +2171,41 @@ - func: index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor variants: function, method - func: index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) + device_check: NoCheck # delegate to _index_put_impl_, which leverages TensorIterator variants: function, method dispatch: - DefaultBackend: index_put_ + CompositeExplicitAutograd: index_put_ # NB: The following functions are declared in aten/src/ATen/templates/TensorBody.h and defined in aten/src/ATen/TensorIndexing.cpp: # - Tensor & Tensor::index_put_(ArrayRef<TensorIndex> indices, Tensor const & rhs) # - Tensor & Tensor::index_put_(ArrayRef<TensorIndex> indices, Scalar v) # - Tensor & Tensor::index_put_(std::initializer_list<TensorIndex> indices, Tensor const & rhs) # - Tensor & Tensor::index_put_(std::initializer_list<TensorIndex> indices, Scalar v) - func: index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor + device_check: NoCheck # delegate to _index_put_impl_ after clone, which leverages TensorIterator variants: function, method - func: _index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: _index_put_impl_ - func: instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function - func: inverse(Tensor self) -> Tensor variants: function, method dispatch: - DefaultBackend: inverse + CompositeExplicitAutograd: inverse - func: inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: inverse_out + CompositeExplicitAutograd: inverse_out - func: _inverse_helper(Tensor self) -> Tensor variants: function dispatch: CPU: _inverse_helper_cpu @@ -2067,131 +2214,125 @@ - func: isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor variants: function, method - func: isnan(Tensor self) -> Tensor variants: function, method + device_check: NoCheck device_guard: False dispatch: CPU, CUDA: isnan SparseCPU, SparseCUDA: isnan_sparse - func: is_distributed(Tensor self) -> bool variants: function, method + device_check: NoCheck device_guard: False - func: is_floating_point(Tensor self) -> bool variants: function, method + device_check: NoCheck device_guard: False + manual_cpp_binding: True - func: is_complex(Tensor self) -> bool variants: function, method + device_check: NoCheck device_guard: False + manual_cpp_binding: True - func: isreal(Tensor self) -> Tensor variants: function, method - func: is_nonzero(Tensor self) -> bool variants: function, method + device_check: NoCheck device_guard: False - func: is_same_size(Tensor self, Tensor other) -> bool variants: function, method + device_check: NoCheck device_guard: False - func: is_signed(Tensor self) -> bool variants: function, method + device_check: NoCheck device_guard: False + manual_cpp_binding: True - func: kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor dispatch: - DefaultBackend: kl_div + CompositeExplicitAutograd: kl_div - func: kl_div_backward(Tensor grad_output, Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor dispatch: CPU: kl_div_backward_cpu CUDA: kl_div_backward_cuda - func: kron(Tensor self, Tensor other) -> Tensor variants: function, method - dispatch: - Math: kron - func: kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - dispatch: - Math: kron_out - func: kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method dispatch: - DefaultBackend: kthvalue + CompositeExplicitAutograd: kthvalue - func: kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: kthvalue_out_cpu CUDA: kthvalue_out_cuda - func: kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method - func: kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: native_layer_norm(Tensor input, int[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: layer_norm_cpu CUDA: layer_norm_cuda - Math: math_native_layer_norm + CompositeImplicitAutograd: math_native_layer_norm - func: native_layer_norm_backward(Tensor grad_out, Tensor input, int[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: layer_norm_backward_cpu CUDA: layer_norm_backward_cuda - func: nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor variants: function, method dispatch: - DefaultBackend: nan_to_num + CompositeExplicitAutograd: nan_to_num - func: nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!) variants: function, method dispatch: - DefaultBackend: nan_to_num_ + CompositeExplicitAutograd: nan_to_num_ - func: nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: nan_to_num_out + CPU, CUDA: nan_to_num_out - func: linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn - func: mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: MkldnnCPU: mkldnn_linear - func: mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - MkldnnCPU: mkldnn_linear_backward_input + MkldnnCPU: mkldnn_linear_backward_input - func: mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: MkldnnCPU: mkldnn_linear_backward_weights - func: mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: MkldnnCPU: mkldnn_linear_backward - func: fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor @@ -2214,159 +2355,173 @@ - func: ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: function, method - func: ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: linspace(Scalar start, Scalar end, int? steps=None, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: linspace.out(Scalar start, Scalar end, int? steps=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: linspace_cpu_out CUDA: linspace_cuda_out - func: log(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: log.out variants: function, method - dispatch: - DefaultBackend: log - func: log_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: log.out variants: function, method - dispatch: - DefaultBackend: log_ - func: log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: log_out - func: log10(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: log10.out variants: function, method - dispatch: - DefaultBackend: log10 - func: log10_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: log10.out variants: function, method - dispatch: - DefaultBackend: log10_ - func: log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: log10_out - func: log1p(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: log1p.out variants: function, method dispatch: - CPU, CUDA: log1p SparseCPU, SparseCUDA: log1p_sparse - func: log1p_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: log1p.out variants: function, method dispatch: - CPU, CUDA: log1p_ SparseCPU, SparseCUDA: log1p_sparse_ - func: log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: log1p_out SparseCPU, SparseCUDA: log1p_out_sparse - func: log2(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: log2.out variants: function, method - dispatch: - DefaultBackend: log2 - func: log2_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: log2.out variants: function, method - dispatch: - DefaultBackend: log2_ - func: log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: log2_out - func: logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: logaddexp_out - func: logaddexp(Tensor self, Tensor other) -> Tensor variants: method, function + structured_delegate: logaddexp.out dispatch: - DefaultBackend: logaddexp + CompositeExplicitAutograd: logaddexp - func: logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: logaddexp2_out - func: logaddexp2(Tensor self, Tensor other) -> Tensor variants: method, function + structured_delegate: logaddexp2.out dispatch: - DefaultBackend: logaddexp2 + CompositeExplicitAutograd: logaddexp2 - func: xlogy.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: xlogy - func: xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: xlogy - func: xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: xlogy # xlogy: inplace variant - func: xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: xlogy_ - func: xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: xlogy_ # xlogy: out variant - func: xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: xlogy_out - func: xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: xlogy_out - func: xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: xlogy_out - func: logdet(Tensor self) -> Tensor variants: function, method dispatch: - DefaultBackend: logdet + CompositeExplicitAutograd: logdet - func: logspace(Scalar start, Scalar end, int? steps=None, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: logspace.out(Scalar start, Scalar end, int? steps=None, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: logspace_cpu_out CUDA: logspace_cuda_out # log_softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models. @@ -2390,62 +2545,64 @@ dispatch: CPU: _logcumsumexp_cpu CUDA: _logcumsumexp_cuda - func: _logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: _logcumsumexp_out_cpu CUDA: _logcumsumexp_out_cuda - func: logcumsumexp(Tensor self, int dim) -> Tensor variants: function, method dispatch: - DefaultBackend: logcumsumexp + CompositeExplicitAutograd: logcumsumexp - func: logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: logcumsumexp_out + CompositeExplicitAutograd: logcumsumexp_out - func: logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor variants: function, method - func: logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: logsumexp + CompositeExplicitAutograd: logsumexp - func: logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: - DefaultBackend: logsumexp_out + CompositeExplicitAutograd: logsumexp_out - func: logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator - func: margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor - func: matmul(Tensor self, Tensor other) -> Tensor variants: function, method - func: matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: matrix_rank.tol(Tensor self, float tol, bool symmetric=False) -> Tensor - func: matrix_rank(Tensor self, bool symmetric=False) -> Tensor +# Alias to linalg.matrix_power - func: matrix_power(Tensor self, int n) -> Tensor variants: function, method +# Alias to linalg.matrix_power +- func: matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) + - func: matrix_exp(Tensor self) -> Tensor variants: function, method dispatch: CPU, CUDA: matrix_exp @@ -2464,41 +2621,42 @@ - func: _compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor dispatch: CPU, CUDA: _compute_linear_combination - func: _compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: _compute_linear_combination_out - func: max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: max + CPU, CUDA, QuantizedCPU, QuantizedCUDA: max - func: max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: max_out - func: max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator variants: function, method - func: max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator - func: value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, int[] sizes, bool keepdim) -> Tensor variants: function + device_check: NoCheck device_guard: False - func: amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor variants: function, method dispatch: - DefaultBackend: amax + CompositeExplicitAutograd: amax - func: amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: amax_out # Return: (Tensor output, Tensor indices) - func: max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) @@ -2509,14 +2667,22 @@ - func: mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor dispatch: MkldnnCPU: mkldnn_max_pool2d +- func: mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + dispatch: + MkldnnCPU: mkldnn_max_pool2d_backward + - func: mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor dispatch: MkldnnCPU: mkldnn_max_pool3d +- func: mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + dispatch: + MkldnnCPU: mkldnn_max_pool3d_backward + - func: quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor dispatch: QuantizedCPU: quantized_max_pool1d - func: quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor @@ -2526,130 +2692,126 @@ - func: max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor # The CPU and GPU dispatch variants are named weirdly here because otherwise there # are namespacing issues in C++ - func: mean(Tensor self, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: mean_cpu_gpu QuantizedCPU: mean_quantized_cpu - func: mean.dim(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: mean_cpu_gpu QuantizedCPU: mean_quantized_cpu - func: mean.out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: mean_out_cpu_gpu QuantizedCPU: mean_out_quantized_cpu - func: mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator - func: median(Tensor self) -> Tensor variants: function, method dispatch: CPU: median_cpu CUDA: median_cuda - func: median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method dispatch: - DefaultBackend: median + CompositeExplicitAutograd: median - func: median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: median_out_cpu CUDA: median_out_cuda - func: median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method - func: median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: nanmedian(Tensor self) -> Tensor variants: function, method dispatch: CPU: nanmedian_cpu CUDA: nanmedian_cuda - func: nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method dispatch: - DefaultBackend: nanmedian + CompositeExplicitAutograd: nanmedian - func: nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: nanmedian_out_cpu CUDA: nanmedian_out_cuda - func: nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method - func: nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: min + CPU, CUDA, QuantizedCPU, QuantizedCUDA: min - func: min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: min_out - func: min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator variants: function, method - func: min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator - func: amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor variants: function, method dispatch: - DefaultBackend: amin + CompositeExplicitAutograd: amin - func: amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: amin_out - func: mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: mkldnn_convolution + CompositeExplicitAutograd: mkldnn_convolution - func: mkldnn_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool bias_defined) -> Tensor - func: mkldnn_convolution_backward_weights(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool bias_defined) -> (Tensor, Tensor) - func: mkldnn_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) dispatch: - DefaultBackend: mkldnn_convolution_backward + CompositeExplicitAutograd: mkldnn_convolution_backward - func: miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: miopen_batch_norm - func: miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: miopen_batch_norm_backward - func: miopen_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: miopen_convolution - func: miopen_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor dispatch: @@ -2666,11 +2828,10 @@ - func: miopen_convolution_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor dispatch: CUDA: miopen_convolution_backward_weight - func: miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: miopen_convolution_transpose # NB: output_padding not strictly needed here, but it's helpful for the float # backwards @@ -2685,11 +2846,10 @@ - func: miopen_convolution_transpose_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor dispatch: CUDA: miopen_convolution_transpose_backward_weight - func: miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: miopen_depthwise_convolution - func: miopen_depthwise_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor dispatch: @@ -2702,103 +2862,104 @@ - func: miopen_depthwise_convolution_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor dispatch: CUDA: miopen_depthwise_convolution_backward_weight - func: miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: miopen_rnn - func: miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: miopen_rnn_backward - func: mm(Tensor self, Tensor mat2) -> Tensor variants: function, method dispatch: CPU: mm_cpu CUDA: mm_cuda - SparseCPU, SparseCUDA: _sparse_mm + SparseCPU, SparseCUDA, SparseCsrCPU: _sparse_mm - func: mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: mm_cpu_out CUDA: mm_out_cuda SparseCPU, SparseCUDA: _sparse_mm_out + SparseCsrCPU: _sparse_csr_mm_out - func: _sparse_mm(Tensor sparse, Tensor dense) -> Tensor - func: _sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor dispatch: SparseCPU: sparse_sparse_matmul_cpu SparseCUDA: sparse_sparse_matmul_cuda -- func: _sparse_matrix_mask_helper(Tensor t, Tensor mask_indices) -> Tensor +- func: _sparse_mask_helper(Tensor t, Tensor mask_indices) -> Tensor dispatch: - SparseCPU: sparse_matrix_mask_helper_cpu - SparseCUDA: sparse_matrix_mask_helper_cuda + SparseCPU: sparse_mask_helper_cpu + SparseCUDA: sparse_mask_helper_cuda - func: mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method dispatch: CPU, CUDA: mode - func: mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: mode_out + CompositeExplicitAutograd: mode_out - func: mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method - func: mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: mul.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: mul.out variants: function, method dispatch: - CPU, CUDA: mul SparseCPU, SparseCUDA: mul_sparse MkldnnCPU: mkldnn_mul - func: mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: mul.out variants: method dispatch: - CPU, CUDA: mul_ SparseCPU, SparseCUDA: mul_sparse_ MkldnnCPU: mkldnn_mul_ - func: mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: mul_out SparseCPU: mul_out_sparse_cpu SparseCUDA: mul_out_sparse_cuda MkldnnCPU: mkldnn_mul_out # For C++ only, until we have conversion from C++ numbers to Tensor - func: mul.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: mul + CompositeExplicitAutograd: mul - func: mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: mul_ + CompositeExplicitAutograd: mul_ # multiply, alias for mul - func: multiply.Tensor(Tensor self, Tensor other) -> Tensor variants: function, method - func: multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: method - func: multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: multiply.Scalar(Tensor self, Scalar other) -> Tensor variants: function, method - func: multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) @@ -2806,114 +2967,107 @@ - func: mv(Tensor self, Tensor vec) -> Tensor variants: function, method dispatch: CPU, CUDA: mv - SparseCPU, SparseCUDA: mv_sparse + SparseCPU, SparseCUDA, SparseCsrCPU: mv_sparse - func: mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: mv_out + CompositeExplicitAutograd: mv_out - func: mvlgamma(Tensor self, int p) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: mvlgamma + CompositeExplicitAutograd: mvlgamma - func: mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: mvlgamma_ + CompositeExplicitAutograd: mvlgamma_ - func: narrow_copy(Tensor self, int dim, int start, int length) -> Tensor variants: function, method dispatch: CPU: narrow_copy_dense_cpu SparseCPU, SparseCUDA: narrow_copy_sparse - DefaultBackend: narrow_copy_dense + CompositeExplicitAutograd: narrow_copy_dense - func: narrow_copy.out(Tensor self, int dim, int start, int length, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: narrow_copy_dense_cpu_out - func: narrow(Tensor(a) self, int dim, int start, int length) -> Tensor(a) variants: function, method + device_check: NoCheck device_guard: False - func: narrow.Tensor(Tensor(a) self, int dim, Tensor start, int length) -> Tensor(a) variants: function, method + device_check: NoCheck device_guard: False - func: native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: batch_norm_cpu CUDA: batch_norm_cuda MkldnnCPU: mkldnn_batch_norm - func: native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: batch_norm_cuda_out - func: batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor) dispatch: CUDA: batch_norm_stats_cuda - func: batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: batch_norm_elemt_cuda - func: batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: batch_norm_elemt_cuda_out # for backward compatibility - func: batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: batch_norm_gather_stats_cuda - func: batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: batch_norm_gather_stats_with_counts_cuda - func: native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: batch_norm_backward_cpu CUDA: batch_norm_backward_cuda + MkldnnCPU: mkldnn_batch_norm_backward - func: batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: batch_norm_backward_reduce_cuda -- func: batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures +- func: batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor dispatch: CUDA: batch_norm_backward_elemt_cuda - func: batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: batch_norm_update_stats_cpu CUDA: batch_norm_update_stats_cuda - func: is_vulkan_available() -> bool - func: _nnpack_available() -> bool - func: _nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, int[2] padding, int[2] stride=1) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function dispatch: - DefaultBackend: _nnpack_spatial_convolution + CompositeExplicitAutograd: _nnpack_spatial_convolution - func: _nnpack_spatial_convolution_backward(Tensor input, Tensor grad_output, Tensor weight, int[2] padding, bool[3] output_mask) -> (Tensor, Tensor, Tensor) variants: function - func: _nnpack_spatial_convolution_backward_input(Tensor input, Tensor grad_output, Tensor weight, int[2] padding) -> Tensor @@ -2921,29 +3075,26 @@ - func: _nnpack_spatial_convolution_backward_weight(Tensor input, int[] weightsize, Tensor grad_output, int[2] padding) -> Tensor variants: function - func: ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck device_guard: False - func: ones(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: ones.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor - func: cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor - func: _euclidean_dist(Tensor x1, Tensor x2) -> Tensor dispatch: - DefaultBackend: _euclidean_dist + CompositeExplicitAutograd: _euclidean_dist - func: _cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor dispatch: CPU, CUDA: _cdist_forward @@ -2963,13 +3114,13 @@ - func: cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor variants: function - func: permute(Tensor(a) self, int[] dims) -> Tensor(a) - variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too. + variants: function, method dispatch: - DefaultBackend: permute + CompositeExplicitAutograd: permute - func: movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) variants: function, method - func: movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a) @@ -3014,174 +3165,151 @@ variants: function - func: rad2deg(Tensor self) -> Tensor variants: function, method dispatch: - DefaultBackend: rad2deg + CompositeExplicitAutograd: rad2deg - func: rad2deg_(Tensor(a!) self) -> Tensor(a!) variants: function, method dispatch: - DefaultBackend: rad2deg_ + CompositeExplicitAutograd: rad2deg_ - func: rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: rad2deg_out + CompositeExplicitAutograd: rad2deg_out - func: deg2rad(Tensor self) -> Tensor variants: function, method dispatch: - DefaultBackend: deg2rad + CompositeExplicitAutograd: deg2rad - func: deg2rad_(Tensor(a!) self) -> Tensor(a!) variants: function, method dispatch: - DefaultBackend: deg2rad_ + CompositeExplicitAutograd: deg2rad_ - func: deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: deg2rad_out + CompositeExplicitAutograd: deg2rad_out - func: scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: rand.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck device_guard: False - func: rand.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck device_guard: False - func: rand(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: rand.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: rand.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: rand.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: randint(int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: randint.generator(int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: randint.low(int low, int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: randint.low_generator(int low, int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: randint.out(int high, int[] size, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: randint.generator_out(int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: randint.low_out(int low, int high, int[] size, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: randint.low_generator_out(int low, int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: randn(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: randn.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: randn.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck device_guard: False - func: randn.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck device_guard: False - func: randn.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: randn.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures -- func: randperm(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures +- func: randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -- func: randperm.generator(int n, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures +- func: randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: randperm_out_cpu CUDA: randperm_out_cuda - func: range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: range_cpu_out CUDA: range_cuda_out - func: ravel(Tensor(a) self) -> Tensor(a) variants: function, method - func: reciprocal(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: reciprocal.out variants: function, method - dispatch: - DefaultBackend: reciprocal - func: reciprocal_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: reciprocal.out variants: function, method - dispatch: - DefaultBackend: reciprocal_ - func: reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: reciprocal_out - func: neg(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: neg.out variants: function, method dispatch: - DefaultBackend: neg + SparseCPU, SparseCUDA: neg_sparse - func: neg_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: neg.out variants: function, method dispatch: - CPU, CUDA: neg_ SparseCPU, SparseCUDA: neg_sparse_ - func: neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: neg_out SparseCPU, SparseCUDA: neg_out_sparse # Alias for neg @@ -3190,16 +3318,15 @@ - func: negative_(Tensor(a!) self) -> Tensor(a!) variants: function, method - func: negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: repeat(Tensor self, int[] repeats) -> Tensor variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too. dispatch: - DefaultBackend: repeat + CompositeExplicitAutograd: repeat - func: repeat_interleave.Tensor(Tensor repeats) -> Tensor variants: function dispatch: CPU: repeat_interleave_cpu @@ -3211,55 +3338,70 @@ - func: repeat_interleave.self_int(Tensor self, int repeats, int? dim=None) -> Tensor variants: function, method - func: reshape(Tensor(a) self, int[] shape) -> Tensor(a) variants: function, method + device_check: NoCheck device_guard: False - func: _mkldnn_reshape(Tensor self, int[] shape) -> Tensor + device_check: NoCheck device_guard: False dispatch: MkldnnCPU: mkldnn_reshape - func: reshape_as(Tensor(a) self, Tensor other) -> Tensor(a) variants: method + device_check: NoCheck device_guard: False - func: round(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: round.out variants: function, method - dispatch: - DefaultBackend: round - func: round_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: round.out variants: function, method - dispatch: - DefaultBackend: round_ - func: round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU: round_out CUDA: round_out - func: rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor + device_check: NoCheck # TensorIterator - func: rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator - func: relu(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: relu MkldnnCPU: mkldnn_relu QuantizedCPU: relu_quantized_cpu - func: relu_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: relu_ MkldnnCPU: mkldnn_relu_ QuantizedCPU: relu_quantized_cpu_ +- func: relu6(Tensor self) -> Tensor + python_module: nn + +- func: relu6_(Tensor(a!) self) -> Tensor(a!) + python_module: nn + - func: prelu(Tensor self, Tensor weight) -> Tensor variants: function, method dispatch: CPU: prelu_cpu CUDA: prelu_cuda @@ -3269,12 +3411,14 @@ dispatch: CPU: prelu_backward_cpu CUDA: prelu_backward_cuda - func: gelu(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator python_module: nn dispatch: + MkldnnCPU: mkldnn_gelu CPU: gelu_cpu CUDA: gelu_cuda - func: gelu_backward(Tensor grad, Tensor self) -> Tensor python_module: nn @@ -3283,100 +3427,143 @@ CUDA: gelu_backward_cuda - func: infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor variants: function python_module: nn + device_check: NoCheck device_guard: False - func: hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: hardshrink - func: hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor variants: function, method dispatch: CPU, CUDA: hardshrink_backward - func: rsqrt(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: rsqrt.out variants: function, method - dispatch: - DefaultBackend: rsqrt - func: rsqrt_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: rsqrt.out variants: function, method - dispatch: - DefaultBackend: rsqrt_ - func: rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: rsqrt_out - func: select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a) variants: function, method + device_check: NoCheck device_guard: False - func: select.int(Tensor(a) self, int dim, int index) -> Tensor(a) variants: function, method + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: select + CompositeExplicitAutograd: select - func: select_backward(Tensor grad, int[] input_sizes, int dim, int index) -> Tensor variants: function + device_check: NoCheck device_guard: False - func: selu(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator - func: selu_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator - func: celu(Tensor self, Scalar alpha=1.0) -> Tensor + device_check: NoCheck # TensorIterator dispatch: - DefaultBackend: celu + CompositeExplicitAutograd: celu - func: celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!) + device_check: NoCheck # TensorIterator dispatch: - DefaultBackend: celu_ + CompositeExplicitAutograd: celu_ - func: silu(Tensor self) -> Tensor + structured_delegate: silu.out python_module: nn dispatch: - DefaultBackend: silu + CompositeExplicitAutograd: silu - func: silu_(Tensor(a!) self) -> Tensor(a!) + structured_delegate: silu.out python_module: nn dispatch: - DefaultBackend: silu_ + CompositeExplicitAutograd: silu_ - func: silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase python_module: nn dispatch: CPU, CUDA: silu_out - func: silu_backward(Tensor grad_output, Tensor self) -> Tensor python_module: nn dispatch: CPU, CUDA: silu_backward - Math: math_silu_backward + CompositeImplicitAutograd: math_silu_backward +- func: mish(Tensor self) -> Tensor + structured_delegate: mish.out + python_module: nn + dispatch: + CompositeExplicitAutograd: mish + +- func: mish_(Tensor(a!) self) -> Tensor(a!) + structured_delegate: mish.out + python_module: nn + dispatch: + CompositeExplicitAutograd: mish_ + +- func: mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + python_module: nn + dispatch: + CPU, CUDA: mish_out + +- func: mish_backward(Tensor grad_output, Tensor self) -> Tensor + python_module: nn + dispatch: + CPU, CUDA: mish_backward + CompositeImplicitAutograd: math_mish_backward + - func: sigmoid(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: sigmoid.out variants: function, method dispatch: - CPU, CUDA: sigmoid QuantizedCPU: sigmoid_quantized_cpu MkldnnCPU: mkldnn_sigmoid - func: sigmoid_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: sigmoid.out variants: function, method dispatch: - CPU, CUDA: sigmoid_ MkldnnCPU: mkldnn_sigmoid_ - func: sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: sigmoid_out - func: logit(Tensor self, float? eps=None) -> Tensor variants: function, method @@ -3387,56 +3574,58 @@ variants: function, method dispatch: CPU, CUDA: logit_ - func: logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: logit_out - func: sin(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: sin.out variants: function, method - dispatch: - DefaultBackend: sin - func: sin_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: sin.out variants: function, method - dispatch: - DefaultBackend: sin_ - func: sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: sin_out - func: sinc(Tensor self) -> Tensor + structured_delegate: sinc.out variants: function, method - dispatch: - DefaultBackend: sinc - func: sinc_(Tensor(a!) self) -> Tensor(a!) + structured_delegate: sinc.out variants: function, method - dispatch: - DefaultBackend: sinc_ - func: sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: sinc_out - func: sinh(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: sinh.out variants: function, method - dispatch: - DefaultBackend: sinh - func: sinh_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: sinh.out variants: function, method - dispatch: - DefaultBackend: sinh_ - func: sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: sinh_out # Returns a copy of this `Variable` that is detached from its autograd graph. # This method is OK to call if the `Variable` is a view. @@ -3450,43 +3639,47 @@ # changing metadata of the detached tensor and expecting the original tensor to also # be updated. - func: detach(Tensor(a) self) -> Tensor(a) variants: function, method dispatch: - DefaultBackend: detach + CompositeExplicitAutograd: detach # Like `detach()`, but modifies this `Variable` in-place. This method may # only be called on non-view `Variable`s. You can use `is_view()` to check # this. If this `Variable` is a view, throws an `std::runtime_error()`. - func: detach_(Tensor(a!) self) -> Tensor(a!) variants: function, method dispatch: - DefaultBackend: detach_ + CompositeExplicitAutograd: detach_ - func: size.int(Tensor self, int dim) -> int variants: function + device_check: NoCheck device_guard: False manual_cpp_binding: True - func: size.Dimname(Tensor self, Dimname dim) -> int variants: function, method + device_check: NoCheck device_guard: False -- func: slice.Tensor(Tensor(a) self, int dim=0, int? start=0, int? end=9223372036854775807, int step=1) -> Tensor(a) +- func: slice.Tensor(Tensor(a) self, int dim=0, int? start=None, int? end=None, int step=1) -> Tensor(a) variants: function, method + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: slice + CompositeExplicitAutograd: slice - func: slice_backward(Tensor grad, int[] input_sizes, int dim, int start, int end, int step) -> Tensor variants: function + device_check: NoCheck device_guard: False - func: slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) variants: function, method dispatch: - DefaultBackend: slogdet + CompositeExplicitAutograd: slogdet - func: smm(Tensor self, Tensor mat2) -> Tensor variants: function, method # softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models. @@ -3507,150 +3700,176 @@ CPU: softmax_backward_cpu CUDA: softmax_backward_cuda - func: unsafe_split.Tensor(Tensor self, int split_size, int dim=0) -> Tensor[] variants: function, method + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: unsafe_split + CompositeExplicitAutograd: unsafe_split - func: split.Tensor(Tensor(a) self, int split_size, int dim=0) -> Tensor(a)[] variants: function, method + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: split + CompositeExplicitAutograd: split - func: unsafe_split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> Tensor[] variants: function, method + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: unsafe_split_with_sizes + CompositeExplicitAutograd: unsafe_split_with_sizes - func: split_with_sizes(Tensor(a) self, int[] split_sizes, int dim=0) -> Tensor(a)[] variants: function, method + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: split_with_sizes + CompositeExplicitAutograd: split_with_sizes +- func: hsplit.int(Tensor(a) self, int sections) -> Tensor(a)[] + variants: function, method + +- func: hsplit.array(Tensor(a) self, int[] indices) -> Tensor(a)[] + variants: function, method + +- func: vsplit.int(Tensor(a) self, int sections) -> Tensor(a)[] + variants: function, method + +- func: vsplit.array(Tensor(a) self, int[] indices) -> Tensor(a)[] + variants: function, method + +- func: dsplit.int(Tensor(a) self, int sections) -> Tensor(a)[] + variants: function, method + +- func: dsplit.array(Tensor(a) self, int[] indices) -> Tensor(a)[] + variants: function, method + - func: squeeze(Tensor(a) self) -> Tensor(a) variants: function, method + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: squeeze + CompositeExplicitAutograd: squeeze - func: squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) variants: function, method + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: squeeze + CompositeExplicitAutograd: squeeze - func: squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a) variants: function, method + device_check: NoCheck device_guard: False - func: squeeze_(Tensor(a!) self) -> Tensor(a!) variants: method + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: squeeze_ + CompositeExplicitAutograd: squeeze_ - func: squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!) variants: method + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: squeeze_ + CompositeExplicitAutograd: squeeze_ - func: squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!) variants: method + device_check: NoCheck device_guard: False - func: sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor variants: function, method - func: sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: _sspaddmm_out_only_sparse CUDA: _sspaddmm_out_only_sparse_cuda SparseCPU: _sspaddmm_out_cpu SparseCUDA: _sspaddmm_out_cuda - func: stack(Tensor[] tensors, int dim=0) -> Tensor dispatch: - DefaultBackend: stack + CompositeExplicitAutograd: stack - func: stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: stack_out + CompositeExplicitAutograd: stack_out - func: _stack(Tensor[] tensors, int dim=0) -> Tensor dispatch: # match the backends supported by _cat CPU: _stack_cpu - DefaultBackend: _stack + CompositeExplicitAutograd: _stack - func: _stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) dispatch: # match the backends supported by _cat_out CPU: _stack_out_cpu - DefaultBackend: _stack_out + CompositeExplicitAutograd: _stack_out - func: hstack(Tensor[] tensors) -> Tensor - func: hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: vstack(Tensor[] tensors) -> Tensor - func: vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: dstack(Tensor[] tensors) -> Tensor - func: dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures # The signature is designed to be consistent with librosa except that it is # missing the `pad_mode` and `center` arguments, which are taken care of at # `torch.functional.py`. They shall be moved here once we have mapping between # Python strings and C++ Enum in codegen. - func: stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function, method - func: istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function, method - func: stride.int(Tensor self, int dim) -> int variants: function + device_check: NoCheck device_guard: False manual_cpp_binding: True - func: stride.Dimname(Tensor self, Dimname dim) -> int variants: function, method + device_check: NoCheck device_guard: False - func: sum(Tensor self, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: sum - func: sum.dim_IntList(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: sum - func: sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: sum.IntList_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: sum_out - func: sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator - func: nansum(Tensor self, *, ScalarType? dtype=None) -> Tensor variants: function, method dispatch: CPU, CUDA: nansum @@ -3659,199 +3878,255 @@ variants: function, method dispatch: CPU, CUDA: nansum - func: nansum.IntList_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: nansum_out - func: sum_to_size(Tensor self, int[] size) -> Tensor variants: method + device_check: NoCheck device_guard: False - func: sqrt(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: sqrt.out variants: function, method dispatch: - CPU, CUDA: sqrt SparseCPU, SparseCUDA: sqrt_sparse - func: sqrt_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: sqrt.out variants: function, method - dispatch: - CPU, CUDA: sqrt_ - func: sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: sqrt_out SparseCPU, SparseCUDA: sqrt_out_sparse - func: square(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: square_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: function, method +- func: square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + CPU, CUDA: square_out + - func: std(Tensor self, bool unbiased=True) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - dispatch: - CPU, CUDA: std - func: std.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method + +- func: std.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method dispatch: CPU, CUDA: std - func: std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator variants: function - dispatch: - CPU, CUDA: std_mean - func: std_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator variants: function + +- func: std_mean.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator + variants: function dispatch: CPU, CUDA: std_mean - func: std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator variants: function +- func: std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator + variants: function + - func: std.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + +- func: std.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: std_out - func: std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator +- func: std.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function + - func: prod(Tensor self, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: prod - func: prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: prod - func: prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: prod_out - func: prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator - func: t(Tensor(a) self) -> Tensor(a) + device_check: NoCheck device_guard: False variants: function, method dispatch: - DefaultBackend: t + CompositeExplicitAutograd: t - func: t_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck device_guard: False variants: method dispatch: - DefaultBackend: t_ + CompositeExplicitAutograd: t_ - func: tan(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: tan.out variants: function, method - dispatch: - DefaultBackend: tan - func: tan_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: tan.out variants: function, method - dispatch: - DefaultBackend: tan_ - func: tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: tan_out - func: tanh(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: tanh.out variants: function, method dispatch: - CPU, CUDA: tanh QuantizedCPU: tanh_quantized_cpu + MkldnnCPU: mkldnn_tanh - func: tanh_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: tanh.out variants: function, method dispatch: - DefaultBackend: tanh_ - + MkldnnCPU: mkldnn_tanh_ - func: tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: tanh_out - func: tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor variants: function - func: tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function dispatch: CPU, CUDA: tensordot_out # TODO: namespace threshold in 'nn' - func: threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor + device_check: NoCheck # TensorIterator variants: function + structured_delegate: threshold.out dispatch: - CPU: threshold - CUDA: threshold_cuda QuantizedCPU: threshold_quantized_cpu - func: threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: function - dispatch: - CPU: threshold_ - CUDA: threshold__cuda + structured_delegate: threshold.out - func: threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: - CPU: threshold_out - CUDA: threshold_out_cuda + CPU, CUDA: threshold_out +- func: threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: threshold_backward_out + - func: threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor variants: function + structured_delegate: threshold_backward.grad_input dispatch: - CPU: threshold_backward - CUDA: threshold_backward_cuda + MkldnnCPU: mkldnn_relu_backward - func: tile(Tensor self, int[] dims) -> Tensor variants: function, method - func: transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) variants: function, method + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: transpose + CompositeExplicitAutograd: transpose - func: transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a) variants: function, method + device_check: NoCheck device_guard: False - func: _mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor + device_check: NoCheck device_guard: False dispatch: MkldnnCPU: mkldnn_transpose - func: transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) variants: method + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: transpose_ + CompositeExplicitAutograd: transpose_ - func: _mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) + device_check: NoCheck device_guard: False dispatch: MkldnnCPU: mkldnn_transpose_ - func: one_hot(Tensor self, int num_classes=-1) -> Tensor @@ -3879,34 +4154,40 @@ # default int[] value [0,1] should not add space after comma, since codegen parser uses ', ' to split args - func: rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor variants: function, method dispatch: - DefaultBackend: rot90 + CompositeExplicitAutograd: rot90 - func: trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor - func: trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor - func: _trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor dispatch: - DefaultBackend: _trilinear + CompositeExplicitAutograd: _trilinear - func: triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor - func: trunc(Tensor self) -> Tensor + structured_delegate: trunc.out + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: trunc + CompositeExplicitAutograd: trunc - func: trunc_(Tensor(a!) self) -> Tensor(a!) + structured_delegate: trunc.out + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: trunc_ + CompositeExplicitAutograd: trunc_ - func: trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: trunc_out # Alias for trunc - func: fix(Tensor self) -> Tensor @@ -3914,11 +4195,10 @@ - func: fix_(Tensor(a!) self) -> Tensor(a!) variants: function, method - func: fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: type_as(Tensor self, Tensor other) -> Tensor variants: method - func: _has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool @@ -3958,68 +4238,97 @@ CPU: _unique2_cpu CUDA: _unique2_cuda - func: _unsafe_view(Tensor self, int[] size) -> Tensor dispatch: - DefaultBackend: _unsafe_view + CompositeExplicitAutograd: _unsafe_view - func: unsqueeze(Tensor(a) self, int dim) -> Tensor(a) variants: function, method + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: unsqueeze + CompositeExplicitAutograd: unsqueeze - func: unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!) variants: method + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: unsqueeze_ + CompositeExplicitAutograd: unsqueeze_ - func: vander(Tensor x, int? N=None, bool increasing=False) -> Tensor - func: var(Tensor self, bool unbiased=True) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - dispatch: - CPU, CUDA: var - func: var.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method + +- func: var.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method dispatch: CPU, CUDA: var - func: var.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + +- func: var.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: var_out - func: var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator +- func: var.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator + variants: function, method + +- func: var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: function + - func: var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator variants: function - dispatch: - CPU, CUDA: var_mean - func: var_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator variants: function + +- func: var_mean.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator + variants: function dispatch: CPU, CUDA: var_mean - func: var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator variants: function +- func: var_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor) + device_check: NoCheck # TensorIterator + variants: function + - func: view_as(Tensor(a) self, Tensor other) -> Tensor(a) variants: method + device_check: NoCheck device_guard: False # we define both of these because 'where' does the broadcast and '_s_where' doesn't; # this allows us to implicitly calculate the broadcast derivative, while only dealing with the # _s_where derivative. - func: where.self(Tensor condition, Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor variants: function @@ -4028,10 +4337,11 @@ - func: where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor variants: function - func: where(Tensor condition) -> Tensor[] + device_check: NoCheck # TensorIterator variants: function - func: _s_where(Tensor condition, Tensor self, Tensor other) -> Tensor variants: function dispatch: @@ -4057,21 +4367,18 @@ - func: _weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) variants: function - func: zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck device_guard: False - func: zeros(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: zeros.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: _standard_gamma_grad(Tensor self, Tensor output) -> Tensor variants: function dispatch: CPU: _standard_gamma_grad_cpu @@ -4093,15 +4400,17 @@ dispatch: CPU: _s_dirichlet_cpu CUDA: _s_dirichlet_cuda - func: poisson(Tensor self, Generator? generator=None) -> Tensor + device_check: NoCheck # TensorIterator dispatch: CPU: _s_poisson_cpu CUDA: _s_poisson_cuda - func: binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor + device_check: NoCheck # TensorIterator dispatch: CPU: _s_binomial_cpu CUDA: _s_binomial_cuda # When more variants get ported to native, this dispatch will get more @@ -4120,11 +4429,11 @@ - func: _sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor - func: _sparse_sum.dim(Tensor self, int[1] dim) -> Tensor dispatch: - DefaultBackend: _sparse_sum + CompositeExplicitAutograd: _sparse_sum - func: _sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor - func: _sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor dispatch: @@ -4162,127 +4471,158 @@ dispatch: SparseCPU: log_softmax_backward_sparse_cpu SparseCUDA: log_softmax_backward_sparse_cuda - func: norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: norm + CPU, CUDA, SparseCPU, SparseCUDA: norm - func: norm.Scalar(Tensor self, Scalar p=2) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: norm + CPU, CUDA, SparseCPU, SparseCUDA: norm - func: norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: norm + CPU, CUDA, SparseCPU, SparseCUDA: norm - func: norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: norm + CPU, CUDA, SparseCPU, SparseCUDA: norm - func: norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: norm_out - func: norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: norm_out +# These four redispatch in their implementation, so OK to be CompositeImplicitAutograd - func: norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator - func: norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator +- func: frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent) + variants: method, function + dispatch: + CompositeExplicitAutograd: frexp + +- func: frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent) + dispatch: + CPU, CUDA: frexp_out + - func: frobenius_norm(Tensor self) -> Tensor variants: function - func: frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor variants: function - func: frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function - func: nuclear_norm(Tensor self, bool keepdim=False) -> Tensor variants: function - func: nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function - func: nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor variants: function - func: nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function - func: clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor variants: function, method dispatch: - CPU, CUDA: clone + CompositeExplicitAutograd: clone SparseCPU, SparseCUDA: clone_sparse MkldnnCPU: mkldnn_clone QuantizedCPU, QuantizedCUDA: quantized_clone +- func: positive(Tensor(a) self) -> Tensor(a) + variants: function, method + - func: resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!) + use_const_ref_for_mutable_tensors: True variants: function, method dispatch: - DefaultBackend: resize_as_ + CompositeExplicitAutograd: resize_as_ +- func: resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!) + use_const_ref_for_mutable_tensors: True + variants: function + dispatch: + SparseCPU, SparseCUDA: resize_as_sparse_ + SparseCsrCPU: resize_as_sparse_csr_ + - func: zero_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: zero_ + Meta: zero_meta_ SparseCPU, SparseCUDA: zero_sparse_ MkldnnCPU: mkldnn_zero_ - func: sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: sub_out SparseCPU, SparseCUDA: sub_out_sparse - func: sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method + structured_delegate: sub.out dispatch: - CPU, CUDA: sub SparseCPU, SparseCUDA: sub_sparse - func: sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method + structured_delegate: sub.out dispatch: - CPU, CUDA: sub_ SparseCPU, SparseCUDA: sub_sparse_ # For C++ only, until we have conversion from C++ numbers to Tensor - func: sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: - DefaultBackend: sub + CompositeExplicitAutograd: sub - func: sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: sub_ + CompositeExplicitAutograd: sub_ # subtract, alias for sub - func: subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor variants: function, method - func: subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) @@ -4294,59 +4634,66 @@ - func: subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) variants: method - func: rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: rsub - func: heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: heaviside_out - func: heaviside(Tensor self, Tensor values) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method + structured_delegate: heaviside.out - func: heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator variants: method + structured_delegate: heaviside.out # For C++ only, until we have conversion from C++ numbers to Tensor - func: rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + device_check: NoCheck # TensorIterator variants: function dispatch: - DefaultBackend: rsub + CompositeExplicitAutograd: rsub # Functionally the same as addmm, but we give it a different derivative formula # that doesn't propagate gradients to non-present entries on sparse. - func: _sparse_addmm(Tensor self, Tensor sparse, Tensor dense, *, Scalar beta=1, Scalar alpha=1) -> Tensor dispatch: - DefaultBackend: _sparse_addmm + CompositeExplicitAutograd: _sparse_addmm - func: addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True dispatch: - CPU: addmm_cpu_out + CPU: addmm_out_cpu CUDA: addmm_out_cuda SparseCPU: addmm_out_sparse_dense_cpu SparseCUDA: addmm_out_sparse_dense_cuda + SparseCsrCPU: addmm_out_sparse_csr_dense_cpu - func: addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + structured_delegate: addmm.out variants: function, method dispatch: - CPU: addmm_cpu - CUDA: addmm_cuda SparseCPU: addmm_sparse_dense_cpu SparseCUDA: addmm_sparse_dense_cuda + SparseCsrCPU: addmm_sparse_csr_dense_cpu - func: addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + structured_delegate: addmm.out variants: method dispatch: - CPU: addmm_cpu_ - CUDA: addmm__cuda # Warning! For whatever reason, the inplace sparse addmm is NON # broadcasting SparseCPU: s_addmm_sparse_dense_cpu_ SparseCUDA: s_addmm_sparse_dense_cuda_ @@ -4456,24 +4803,24 @@ # For other methods that return outputs that share storage with inputs, i.e., # `indices()` and `_indices()`. We mark their outputs as non-differentiable, so # the view relation is not tracked by autograd, but the version counter is still # shared. In other words, their outputs are non-differentiable views of the # sparse tensor. - # FIXME: would be nicer if TensorOptions was optional based; not adding default arguments for options given # the default would never make sense. + +- func: _sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + +- func: _sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + - func: sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: _sparse_coo_tensor_unsafe(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: _validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size) -> () - func: _sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor dispatch: @@ -4482,15 +4829,17 @@ - func: _sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, int[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor dispatch: SparseCPU, SparseCUDA: new_with_dims_and_tensor_sparse - func: sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) + use_const_ref_for_mutable_tensors: True variants: method dispatch: SparseCPU, SparseCUDA: sparse_resize_ - func: sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) + use_const_ref_for_mutable_tensors: True variants: method dispatch: SparseCPU, SparseCUDA: sparse_resize_and_clear_ - func: sparse_mask(Tensor self, Tensor mask) -> Tensor @@ -4500,112 +4849,146 @@ SparseCUDA: sparse_mask_cuda - func: to_dense(Tensor self, ScalarType? dtype=None) -> Tensor variants: method dispatch: - SparseCPU, SparseCUDA: sparse_to_dense + SparseCPU, SparseCUDA, SparseCsrCPU: sparse_to_dense MkldnnCPU: mkldnn_to_dense - func: to_dense_backward(Tensor grad, Tensor input) -> Tensor - func: sparse_dim(Tensor self) -> int variants: method dispatch: SparseCPU, SparseCUDA: sparse_dim_sparse + device_check: NoCheck device_guard: False # legacy method - func: _dimI(Tensor self) -> int variants: method dispatch: SparseCPU, SparseCUDA: sparse_dim_sparse + device_check: NoCheck device_guard: False - func: dense_dim(Tensor self) -> int variants: method dispatch: SparseCPU, SparseCUDA: dense_dim_sparse + device_check: NoCheck device_guard: False # legacy method - func: _dimV(Tensor self) -> int variants: method dispatch: SparseCPU, SparseCUDA: dense_dim_sparse + device_check: NoCheck device_guard: False - func: _nnz(Tensor self) -> int variants: method dispatch: SparseCPU, SparseCUDA: _nnz_sparse + SparseCsrCPU: _nnz_sparse_csr + device_check: NoCheck device_guard: False -- func: coalesce(Tensor self) -> Tensor +# NOTE: [ coalesce autograd ] +# coalesce returns self directly for already coalesced sparse tensors. +# This means coalesce cannot have a derivative registered, otherwise it creates +# circular references in the autograd graph (see gh-52874). +# Instead, the derivative is registered on the slow-path "_coalesce" +- func: coalesce(Tensor(a) self) -> Tensor(a) variants: method + +- func: _coalesce(Tensor self) -> Tensor dispatch: - SparseCPU: coalesce_sparse_cpu - SparseCUDA: coalesce_sparse_cuda + SparseCPU: _coalesce_sparse_cpu + SparseCUDA: _coalesce_sparse_cuda - func: is_coalesced(Tensor self) -> bool variants: method dispatch: SparseCPU, SparseCUDA: is_coalesced_sparse + device_check: NoCheck device_guard: False - func: _indices(Tensor(a) self) -> Tensor(a) variants: method dispatch: SparseCPU, SparseCUDA: _indices_sparse + device_check: NoCheck device_guard: False - func: _values(Tensor(a) self) -> Tensor(a) variants: method dispatch: SparseCPU, SparseCUDA: _values_sparse + device_check: NoCheck device_guard: False # This method doesn't do any check but only directly sets the flag. So it can be # a bit unsafe. Similar to _indices and _values, this is useful for implementing # custom sparse operations in Python/C++ extension. - func: _coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!) variants: method dispatch: SparseCPU, SparseCUDA: _coalesced_sparse_ + device_check: NoCheck device_guard: False - func: indices(Tensor(a) self) -> Tensor(a) variants: method dispatch: SparseCPU, SparseCUDA: indices_sparse + device_check: NoCheck device_guard: False - func: values(Tensor(a) self) -> Tensor(a) variants: method dispatch: SparseCPU, SparseCUDA: values_sparse + SparseCsrCPU: values_sparse_csr + device_check: NoCheck device_guard: False +- func: crow_indices(Tensor(a) self) -> Tensor(a) + variants: method + dispatch: + SparseCsrCPU: crow_indices_sparse_csr + device_check: NoCheck + device_guard: False + +- func: col_indices(Tensor(a) self) -> Tensor(a) + variants: method + dispatch: + SparseCsrCPU: col_indices_sparse_csr + device_check: NoCheck + device_guard: False + - func: hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: SparseCPU: hspmm_out_sparse_cpu SparseCUDA: hspmm_out_sparse_cuda - func: hspmm(Tensor mat1, Tensor mat2) -> Tensor dispatch: SparseCPU: hspmm_sparse_cpu SparseCUDA: hspmm_sparse_cuda - func: copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) + device_check: NoCheck # Allows copy into different device variants: function dispatch: SparseCPU, SparseCUDA: copy_sparse_ - func: unbind.int(Tensor(a) self, int dim=0) -> Tensor(a)[] variants: function, method dispatch: - DefaultBackend: unbind + CompositeExplicitAutograd: unbind - func: unbind.Dimname(Tensor(a) self, Dimname dim) -> Tensor(a)[] variants: function, method - func: to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor @@ -4653,11 +5036,12 @@ CPU: quantize_per_channel_cpu - func: dequantize.self(Tensor self) -> Tensor variants: function, method dispatch: - QuantizedCPU, QuantizedCUDA: dequantize_quant + CPU: dequantize_cpu + QuantizedCPU, QuantizedCUDA: dequantize_quantized_cpu - func: dequantize.tensors(Tensor[] tensors) -> Tensor[] variants: function dispatch: QuantizedCPU: dequantize_tensors_quantized_cpu @@ -4686,10 +5070,11 @@ variants: function, method dispatch: QuantizedCPU, QuantizedCUDA: q_per_channel_axis - func: int_repr(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method dispatch: QuantizedCPU: int_repr_quantized_cpu QuantizedCUDA: int_repr_quantized_cuda @@ -4706,13 +5091,12 @@ variants: method dispatch: QuantizedCPU, QuantizedCUDA: qscheme_quant - func: fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor + device_check: NoCheck # TensorIterator variants: function - dispatch: - Math: fake_quantize_per_tensor_affine - func: fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask) variants: function dispatch: CPU, CUDA: fake_quantize_per_tensor_affine_cachemask @@ -4727,13 +5111,12 @@ - func: _fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor) variants: function - func: fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor + device_check: NoCheck # TensorIterator variants: function - dispatch: - Math: fake_quantize_per_channel_affine - func: fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask) variants: function dispatch: CPU, CUDA: fake_quantize_per_channel_affine_cachemask @@ -4760,24 +5143,27 @@ # to(Device) must not exist because all constructors of Device also works for # TensorOptions. Otherwise, an ambiguity error is thrown. # See NOTE [ TensorOptions Constructors ]. - func: to.dtype_layout(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: method + device_check: NoCheck device_guard: False - func: to.device(Tensor self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor variants: method + device_check: NoCheck device_guard: False - func: to.dtype(Tensor self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor variants: method + device_check: NoCheck device_guard: False - func: to.other(Tensor self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor variants: method + device_check: NoCheck device_guard: False - func: meshgrid(Tensor[] tensors) -> Tensor[] - func: cartesian_prod(Tensor[] tensors) -> Tensor @@ -4813,33 +5199,28 @@ CUDA: _local_scalar_dense_cuda variants: function # Fused RNN kernels - func: _thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: _thnn_fused_lstm_cell_cuda - func: _thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: _thnn_fused_lstm_cell_backward_cuda - func: _thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: _thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CUDA: _thnn_fused_gru_cell_cuda - func: _thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) dispatch: CUDA: _thnn_fused_gru_cell_backward_cuda - func: _thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures # RNN cells and layers - func: lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) - func: lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor) @@ -4855,20 +5236,16 @@ - func: rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) - func: rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) - func: lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures # Quantized RNN layer registration has been moved to C10 dispatch in `RNN.cpp` # Quantized RNN layers # - func: quantized_lstm(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor) @@ -4895,34 +5272,37 @@ - func: quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor # PackedSequence utilities - func: _pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor) dispatch: - DefaultBackend: _pack_padded_sequence + CompositeExplicitAutograd: _pack_padded_sequence - func: _pack_padded_sequence_backward(Tensor grad, int[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor - func: _pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor) # wrappers for legacy TH methods - func: set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!) variants: method + device_check: NoCheck device_guard: False dispatch: CPU, CUDA: set_ - func: set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!) variants: method + device_check: NoCheck device_guard: False dispatch: CPU: set_storage_cpu_ CUDA: set_storage_cuda_ QuantizedCPU, QuantizedCUDA: set_storage_quantized_ - func: set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!) variants: method + device_check: NoCheck device_guard: False dispatch: CPU, CUDA: set_tensor_ - func: set_(Tensor(a!) self) -> Tensor(a!) @@ -4931,30 +5311,35 @@ CPU: set_cpu_ CUDA: set_cuda_ - func: is_set_to(Tensor self, Tensor tensor) -> bool variants: method + device_check: NoCheck device_guard: False dispatch: CPU, CUDA: is_set_to - func: masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU: masked_fill__cpu CUDA: masked_fill__cuda - func: masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU: masked_fill__cpu CUDA: masked_fill__cuda - func: masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!) variants: method dispatch: @@ -4964,72 +5349,90 @@ - func: masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor variants: function, method - func: view(Tensor(a) self, int[] size) -> Tensor(a) variants: method + device_check: NoCheck device_guard: False dispatch: - CPU, CUDA, QuantizedCPU, QuantizedCUDA: view + CPU, CUDA, Meta, QuantizedCPU, QuantizedCUDA: view MkldnnCPU: mkldnn_view # Warning: If you want to change the name or overload name of this # operator, you might also want to change the `isBlockListedSchema` # function in `torch/csrc/jit/frontend/schema_catching.cpp`. # The name and overload name of this operator is hardcoded in that # function in order to workaround a bug: # https://github.com/pytorch/pytorch/issues/47964 - func: view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a) variants: method + device_check: NoCheck device_guard: False dispatch: - DefaultBackend: view_dtype + CompositeExplicitAutograd: view_dtype - func: put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!) variants: method dispatch: - CPU: legacy::cpu::_th_put_ - CUDA: legacy::cuda::_th_put_ + CPU, CUDA: put_ +- func: put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor + variants: function, method + - func: index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) variants: method + +- func: index_add_.alpha(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha) -> Tensor(a!) + variants: method dispatch: CPU: index_add_cpu_ CUDA: index_add_cuda_ - func: index_add(Tensor self, int dim, Tensor index, Tensor source) -> Tensor variants: function, method -- func: index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor +- func: index_add.alpha(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha) -> Tensor variants: function, method +- func: index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor + variants: function, method + - func: index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU: index_fill_ CUDA: index_fill_ - func: index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: index_fill_ - func: index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor + device_check: NoCheck # TensorIterator variants: function, method - func: scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) variants: method dispatch: @@ -5072,172 +5475,201 @@ - func: scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor variants: function, method - func: eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: eq_ + CompositeExplicitAutograd: eq_ - func: eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: eq_ + CompositeExplicitAutograd: eq_ - func: bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: bitwise_and_out - func: bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: bitwise_and_out - func: bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function - func: bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function - func: bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: __and__.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function - func: __and__.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function - func: __iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: __iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: bitwise_or_out - func: bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: bitwise_or_out - func: bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function - func: bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function - func: bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: __or__.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function - func: __or__.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function - func: __ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: __ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: bitwise_xor_out - func: bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: bitwise_xor_out - func: bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function - func: bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function - func: bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: __xor__.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function - func: __xor__.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function - func: __ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: __ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method - func: __lshift__.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: __lshift__ - func: __lshift__.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: __lshift__ - func: __ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: __ilshift__ - func: __ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: __ilshift__ - func: __rshift__.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: __rshift__ - func: __rshift__.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: __rshift__ - func: __irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: __irshift__ - func: __irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: __irshift__ -- func: atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) - variants: method - dispatch: - CPU, CUDA: atan2_ - - func: tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) variants: method dispatch: CPU: tril_cpu_ CUDA: tril_cuda_ @@ -5247,163 +5679,176 @@ dispatch: CPU: triu_cpu_ CUDA: triu_cuda_ - func: digamma_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: digamma.out variants: method - dispatch: - CPU, CUDA: digamma_ -- func: polygamma_(Tensor(a!) self, int n) -> Tensor(a!) - variants: method - - func: renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU: legacy::cpu::_th_renorm_ CUDA: legacy::cuda::_th_renorm_ - func: lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU: lerp_cpu_scalar_ CUDA: lerp_cuda_scalar_ - func: lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU: lerp_cpu_tensor_ CUDA: lerp_cuda_tensor_ - func: fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: fmod_ - func: fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: fmod_ - func: remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: remainder_ - func: remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: remainder_ - func: addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) variants: method dispatch: CPU, CUDA: addbmm_ - func: addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: addbmm_out - func: addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor variants: method, function dispatch: CPU, CUDA: addbmm - func: addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: addcdiv_ + CompositeExplicitAutograd: addcdiv_ - func: random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: random_ + Meta: random_meta_ - func: random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: random_ + Meta: random_meta_ - func: random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: random_ + Meta: random_meta_ - func: uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: uniform_ + Meta: uniform_meta_ - func: cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: cauchy_ - func: log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: log_normal_ - func: exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: exponential_ - func: geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: geometric_ # wrappers for TH functions - func: diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: diag_cpu_out CUDA: diag_cuda_out - func: diag(Tensor self, int diagonal=0) -> Tensor variants: method, function dispatch: - DefaultBackend: diag + CompositeExplicitAutograd: diag - func: diag_backward(Tensor grad, int[] input_sizes, int diagonal) -> Tensor variants: function + device_check: NoCheck device_guard: False - func: cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: cross_out - func: cross(Tensor self, Tensor other, int? dim=None) -> Tensor variants: method, function dispatch: CPU, CUDA: cross - func: triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: triu_cpu_out CUDA: triu_cuda_out - func: triu(Tensor self, int diagonal=0) -> Tensor variants: method, function dispatch: - DefaultBackend: triu + CompositeExplicitAutograd: triu - func: tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: tril_cpu_out CUDA: tril_cuda_out - func: tril(Tensor self, int diagonal=0) -> Tensor variants: method, function dispatch: - DefaultBackend: tril + CompositeExplicitAutograd: tril - func: tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor dispatch: CPU: tril_indices_cpu CUDA: tril_indices_cuda @@ -5419,55 +5864,58 @@ CPU: trace_cpu CUDA: trace_cuda - func: trace_backward(Tensor grad, int[] sizes) -> Tensor variants: function + device_check: NoCheck device_guard: False - func: ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: ne_out QuantizedCPU: ne_out_quantized_cpu - func: ne.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: ne QuantizedCPU: ne_quantized_cpu - func: ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: ne_out QuantizedCPU: ne_out_quantized_cpu - func: ne.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: ne QuantizedCPU: ne_quantized_cpu - func: ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: ne_ + CompositeExplicitAutograd: ne_ - func: ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: ne_ + CompositeExplicitAutograd: ne_ # not_equal, alias for torch.ne - func: not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: not_equal.Scalar(Tensor self, Scalar other) -> Tensor variants: method, function - func: not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: not_equal.Tensor(Tensor self, Tensor other) -> Tensor variants: method, function - func: not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) @@ -5475,76 +5923,80 @@ - func: not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: method - func: eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: eq_out QuantizedCPU: eq_out_quantized_cpu - func: eq.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: eq QuantizedCPU: eq_quantized_cpu - func: eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: eq_out QuantizedCPU: eq_out_quantized_cpu - func: eq.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: eq QuantizedCPU: eq_quantized_cpu - func: ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: ge_out QuantizedCPU: ge_out_quantized_cpu - func: ge.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: ge QuantizedCPU: ge_quantized_cpu - func: ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: ge_out QuantizedCPU: ge_out_quantized_cpu - func: ge.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: ge QuantizedCPU: ge_quantized_cpu - func: ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: ge_ + CompositeExplicitAutograd: ge_ - func: ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: ge_ + CompositeExplicitAutograd: ge_ # greater_equal, alias for torch.ge - func: greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: greater_equal.Scalar(Tensor self, Scalar other) -> Tensor variants: method, function - func: greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: greater_equal.Tensor(Tensor self, Tensor other) -> Tensor variants: method, function - func: greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) @@ -5552,52 +6004,54 @@ - func: greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: method - func: le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: le_out QuantizedCPU: le_out_quantized_cpu - func: le.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: le QuantizedCPU: le_quantized_cpu - func: le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: le_out QuantizedCPU: le_out_quantized_cpu - func: le.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: le QuantizedCPU: le_quantized_cpu - func: le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: le_ + CompositeExplicitAutograd: le_ - func: le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: le_ + CompositeExplicitAutograd: le_ # less_equal, alias for torch.le - func: less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: less_equal.Scalar(Tensor self, Scalar other) -> Tensor variants: method, function - func: less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: less_equal.Tensor(Tensor self, Tensor other) -> Tensor variants: method, function - func: less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) @@ -5605,52 +6059,54 @@ - func: less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: method - func: gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: gt_out QuantizedCPU: gt_out_quantized_cpu - func: gt.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: gt QuantizedCPU: gt_quantized_cpu - func: gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: gt_out QuantizedCPU: gt_out_quantized_cpu - func: gt.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: gt QuantizedCPU: gt_quantized_cpu - func: gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: gt_ + CompositeExplicitAutograd: gt_ - func: gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: gt_ + CompositeExplicitAutograd: gt_ # greater, alias for torch.gt - func: greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: greater.Scalar(Tensor self, Scalar other) -> Tensor variants: method, function - func: greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: greater.Tensor(Tensor self, Tensor other) -> Tensor variants: method, function - func: greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) @@ -5658,52 +6114,54 @@ - func: greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: method - func: lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: lt_out QuantizedCPU: lt_out_quantized_cpu - func: lt.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: lt QuantizedCPU: lt_quantized_cpu - func: lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: lt_out QuantizedCPU: lt_out_quantized_cpu - func: lt.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: lt QuantizedCPU: lt_quantized_cpu - func: lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: lt_ + CompositeExplicitAutograd: lt_ - func: lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: lt_ + CompositeExplicitAutograd: lt_ # less, alias for torch.lt - func: less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: less.Scalar(Tensor self, Scalar other) -> Tensor variants: method, function - func: less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: less.Tensor(Tensor self, Tensor other) -> Tensor variants: method, function - func: less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) @@ -5711,27 +6169,24 @@ - func: less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: method - func: take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - CPU: take_out_cpu - CUDA: take_out_cuda + CPU, CUDA: take_out - func: take(Tensor self, Tensor index) -> Tensor variants: method, function dispatch: - CPU: take_cpu - CUDA: take_cuda + CPU, CUDA: take -- func: take_backward(Tensor grad, Tensor input, Tensor index) -> Tensor - variants: function - device_guard: False +- func: take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) +- func: take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor + variants: method, function + - func: index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: index_select_out_cpu_ CUDA: index_select_out_cuda - func: index_select(Tensor self, int dim, Tensor index) -> Tensor @@ -5741,21 +6196,20 @@ CUDA: index_select_cuda SparseCPU: index_select_sparse SparseCUDA: index_select_sparse - func: index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor variants: method, function - func: index_select_backward(Tensor grad, int[] self_sizes, int dim, Tensor index) -> Tensor variants: function + device_check: NoCheck device_guard: False - func: masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: masked_select_out_cpu CUDA: masked_select_out_cuda - func: masked_select(Tensor self, Tensor mask) -> Tensor @@ -5764,14 +6218,14 @@ CPU: masked_select_cpu CUDA: masked_select_cuda - func: masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor variants: function + device_check: NoCheck device_guard: False - func: nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: legacy::cpu::_th_nonzero_out CUDA: nonzero_out_cuda - func: nonzero(Tensor self) -> Tensor @@ -5782,11 +6236,10 @@ - func: nonzero_numpy(Tensor self) -> Tensor[] variants: method, function - func: gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: gather_out_cpu_cuda CUDA: gather_out_cpu_cuda - func: gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor @@ -5794,47 +6247,52 @@ dispatch: CPU, CUDA: gather - func: gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor variants: function + device_check: NoCheck device_guard: False - func: gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor variants: method, function - func: _gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor - func: addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: addcmul_out - func: addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: - DefaultBackend: addcmul + CompositeExplicitAutograd: addcmul - func: addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: - DefaultBackend: addcmul_ + CompositeExplicitAutograd: addcmul_ - func: addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: addcdiv_out - func: addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: - DefaultBackend: addcdiv + CompositeExplicitAutograd: addcdiv +- func: cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor + python_module: nn + - func: lstsq.X(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: legacy::cpu::_th_gels_out CUDA: legacy::cuda::_th_gels_out - func: lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR) @@ -5842,126 +6300,107 @@ dispatch: CPU: legacy::cpu::_th_gels CUDA: legacy::cuda::_th_gels - func: triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: triangular_solve_out + CPU, CUDA: triangular_solve_out - func: triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient) variants: method, function dispatch: - DefaultBackend: triangular_solve + CPU, CUDA: triangular_solve -- func: _triangular_solve_helper(Tensor self, Tensor A, bool upper, bool transpose, bool unitriangular) -> (Tensor, Tensor) - variants: function - dispatch: - CPU: _triangular_solve_helper_cpu - CUDA: _triangular_solve_helper_cuda - - func: symeig.e(Tensor self, bool eigenvectors=False, bool upper=True, *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: symeig_out + CompositeExplicitAutograd: symeig_out - func: symeig(Tensor self, bool eigenvectors=False, bool upper=True) -> (Tensor eigenvalues, Tensor eigenvectors) variants: method, function dispatch: - DefaultBackend: symeig + CompositeExplicitAutograd: symeig - func: _symeig_helper(Tensor self, bool eigenvectors, bool upper) -> (Tensor, Tensor) variants: function dispatch: CPU: _symeig_helper_cpu CUDA: _symeig_helper_cuda - func: eig.e(Tensor self, bool eigenvectors=False, *, Tensor(a!) e, Tensor(b!) v) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: eig_out + CompositeExplicitAutograd: eig_out - func: eig(Tensor self, bool eigenvectors=False) -> (Tensor eigenvalues, Tensor eigenvectors) variants: method, function dispatch: - DefaultBackend: eig + CompositeExplicitAutograd: eig - func: svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - dispatch: - Math: svd_out - func: svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) variants: method, function - dispatch: - Math: svd - func: _svd_helper(Tensor self, bool some, bool compute_uv) -> (Tensor U, Tensor S, Tensor V) variants: function dispatch: CPU: _svd_helper_cpu CUDA: _svd_helper_cuda # swapaxes, alias for transpose - func: swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a) variants: function, method + device_check: NoCheck device_guard: False - func: swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!) variants: method + device_check: NoCheck device_guard: False # swapdims, alias for transpose - func: swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a) variants: function, method + device_check: NoCheck device_guard: False - func: swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) variants: method + device_check: NoCheck device_guard: False - func: cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: cholesky_out + CPU, CUDA: cholesky_out - func: cholesky(Tensor self, bool upper=False) -> Tensor variants: method, function dispatch: - DefaultBackend: cholesky + CPU, CUDA: cholesky -- func: _cholesky_helper(Tensor self, bool upper) -> Tensor - variants: function - dispatch: - CPU: _cholesky_helper_cpu - CUDA: _cholesky_helper_cuda - - func: cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: cholesky_solve_out + CompositeExplicitAutograd: cholesky_solve_out - func: cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor variants: method, function dispatch: - DefaultBackend: cholesky_solve + CompositeExplicitAutograd: cholesky_solve - func: _cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor variants: function dispatch: CPU: _cholesky_solve_helper_cpu CUDA: _cholesky_solve_helper_cuda - func: solve(Tensor self, Tensor A) -> (Tensor solution, Tensor LU) variants: function, method dispatch: - DefaultBackend: solve + CompositeExplicitAutograd: solve - func: solve.solution(Tensor self, Tensor A, *, Tensor(a!) solution, Tensor(b!) lu) -> (Tensor(a!) solution, Tensor(b!) LU) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: solve_out + CompositeExplicitAutograd: solve_out - func: _solve_helper(Tensor self, Tensor A) -> (Tensor, Tensor) variants: function dispatch: CPU: _solve_helper_cpu @@ -5975,213 +6414,229 @@ - func: cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: cholesky_inverse_out - func: qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - dispatch: - Math: qr_out - func: qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R) variants: method, function - dispatch: - Math: qr - func: geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - CPU: legacy::cpu::_th_geqrf_out - CUDA: legacy::cuda::_th_geqrf_out + CPU, CUDA: geqrf_out - func: geqrf(Tensor self) -> (Tensor a, Tensor tau) variants: method, function dispatch: - CPU: legacy::cpu::_th_geqrf - CUDA: legacy::cuda::_th_geqrf + CPU, CUDA: geqrf -- func: orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) - dispatch: - CPU: orgqr_out - +# orgqr, alias for linalg_householder_product - func: orgqr(Tensor self, Tensor input2) -> Tensor variants: method, function - dispatch: - CPU: orgqr +- func: orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) + - func: ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - CPU: legacy::cpu::_th_ormqr_out + CPU, CUDA: ormqr_out - func: ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor variants: method, function dispatch: - CPU: legacy::cpu::_th_ormqr + CPU, CUDA: ormqr - func: _lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor, Tensor, Tensor) variants: function dispatch: - CPU: _lu_with_info_cpu - CUDA: _lu_with_info_cuda + CPU, CUDA: _lu_with_info - func: lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: lu_solve_out + CompositeExplicitAutograd: lu_solve_out - func: lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor variants: method, function dispatch: - DefaultBackend: lu_solve + CompositeExplicitAutograd: lu_solve -- func: _lu_solve_helper(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor +- func: lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U) variants: function dispatch: - CPU: _lu_solve_helper_cpu - CUDA: _lu_solve_helper_cuda + CPU, CUDA: lu_unpack +- func: lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) + variants: function + dispatch: + CPU, CUDA: lu_unpack_out + # TODO: remove dispatch section when porting TH CUDA to ATen - func: multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: multinomial_out - func: multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor variants: method, function dispatch: CPU, CUDA: multinomial - func: lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: lgamma_out - func: lgamma_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: lgamma.out variants: method - dispatch: - CPU, CUDA: lgamma_ - func: lgamma(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: lgamma.out variants: method, function - dispatch: - CPU, CUDA: lgamma - func: digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: digamma_out - func: digamma(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: digamma.out variants: method, function - dispatch: - CPU, CUDA: digamma - func: polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: polygamma_out - func: polygamma(int n, Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: polygamma.out variants: method, function + +- func: polygamma_(Tensor(a!) self, int n) -> Tensor(a!) + device_check: NoCheck # TensorIterator + variants: method dispatch: - DefaultBackend: polygamma + CompositeExplicitAutograd: polygamma_ - func: erfinv(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: erfinv.out variants: method, function - dispatch: - CPU, CUDA: erfinv - func: erfinv_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: erfinv.out variants: method - dispatch: - CPU, CUDA: erfinv_ - func: erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: erfinv_out - func: i0(Tensor self) -> Tensor + structured_delegate: i0.out variants: function, method - dispatch: - DefaultBackend: i0 - func: i0_(Tensor(a!) self) -> Tensor(a!) + structured_delegate: i0.out variants: function, method - dispatch: - DefaultBackend: i0_ - func: i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: i0_out - func: sign(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: sign.out variants: function, method dispatch: - DefaultBackend: sign + CompositeExplicitAutograd: sign - func: sign_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: sign.out variants: method dispatch: - DefaultBackend: sign_ + CompositeExplicitAutograd: sign_ - func: sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: sign_out - func: signbit(Tensor self) -> Tensor variants: function, method - func: signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: signbit_out CUDA: signbit_out - func: dist(Tensor self, Tensor other, Scalar p=2) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: - DefaultBackend: dist + CompositeExplicitAutograd: dist - func: atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: atan2_out +- func: atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: atan2.out + variants: method + - func: atan2(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: atan2.out variants: method, function - dispatch: - CPU, CUDA: atan2 - func: lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU: lerp_cpu_scalar_out CUDA: lerp_cuda_scalar_out - func: lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU: lerp_cpu_tensor_out CUDA: lerp_cuda_tensor_out - func: lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU: lerp_cpu_scalar CUDA: lerp_cuda_scalar - func: lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU: lerp_cpu_tensor CUDA: lerp_cuda_tensor - func: histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: legacy::cpu::_th_histc_out CUDA: _histc_out_cuda - func: histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor @@ -6189,113 +6644,115 @@ dispatch: CPU: legacy::cpu::_th_histc CUDA: _histc_cuda - func: fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: fmod_out - func: fmod.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: fmod - func: fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: fmod_out - func: fmod.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: fmod - func: hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: hypot_out - func: hypot(Tensor self, Tensor other) -> Tensor + structured_delegate: hypot.out variants: method, function - dispatch: - CPU, CUDA: hypot - func: hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured_delegate: hypot.out variants: method dispatch: - DefaultBackend: hypot_ + CompositeExplicitAutograd: hypot_ - func: igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: igamma_out - func: igamma(Tensor self, Tensor other) -> Tensor + structured_delegate: igamma.out variants: method, function - dispatch: - CPU, CUDA: igamma - func: igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured_delegate: igamma.out variants: method - dispatch: - CPU, CUDA: igamma_ - func: igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: igammac_out - func: igammac(Tensor self, Tensor other) -> Tensor + structured_delegate: igammac.out variants: method, function - dispatch: - CPU, CUDA: igammac - func: igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!) + structured_delegate: igammac.out variants: method - dispatch: - CPU, CUDA: igammac_ - func: nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: nextafter_out - func: nextafter(Tensor self, Tensor other) -> Tensor + structured_delegate: nextafter.out variants: method, function - dispatch: - CPU, CUDA: nextafter - func: nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured_delegate: nextafter.out variants: method dispatch: - DefaultBackend: nextafter_ + CompositeExplicitAutograd: nextafter_ - func: remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: remainder_out - func: remainder.Scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: remainder - func: remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: remainder_out - func: remainder.Tensor(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: remainder - func: min(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: min QuantizedCPU: min_quantized_cpu @@ -6307,10 +6764,11 @@ - func: fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: fmin_out - func: max(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: max QuantizedCPU: max_quantized_cpu @@ -6322,141 +6780,183 @@ - func: fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: fmax_out - func: maximum(Tensor self, Tensor other) -> Tensor + structured_delegate: maximum.out + device_check: NoCheck # TensorIterator variants: method, function - dispatch: - CPU, CUDA: maximum - func: maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: maximum_out # binary max, alias of maximum # NOTE: max is not an alias for maximum, since there is also unary max - func: max.other(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function - func: max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator - func: minimum(Tensor self, Tensor other) -> Tensor + structured_delegate: minimum.out + device_check: NoCheck # TensorIterator variants: method, function - dispatch: - CPU, CUDA: minimum - func: minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: minimum_out # binary min, alias for minimum # NOTE: min is not an alias for minimum, since there is also unary min - func: min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator - func: min.other(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function +# The following quantile signatures are DEPRECATED in favor of the new ones with the interpolation kwarg. - func: quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False) -> Tensor variants: method, function - func: quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False) -> Tensor variants: method, function - func: nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False) -> Tensor variants: method, function - func: nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False) -> Tensor variants: method, function +# To keep backward and forward compatibility, and to avoid ambiguity with the original signatures, dim, keepdim and interpolation +# parameters are required for now. Once the deprecated signatures are removed they will be made optional. +- func: quantile.new_scalar_out(Tensor self, float q, int? dim, bool keepdim, *, str interpolation, Tensor(a!) out) -> Tensor(a!) + +- func: quantile.new_scalar(Tensor self, float q, int? dim, bool keepdim, *, str interpolation) -> Tensor + variants: method, function + +- func: quantile.new_out(Tensor self, Tensor q, int? dim, bool keepdim, *, str interpolation, Tensor(a!) out) -> Tensor(a!) + +- func: quantile.new(Tensor self, Tensor q, int? dim, bool keepdim, *, str interpolation) -> Tensor + variants: method, function + +- func: nanquantile.new_scalar_out(Tensor self, float q, int? dim, bool keepdim, *, str interpolation, Tensor(a!) out) -> Tensor(a!) + +- func: nanquantile.new_scalar(Tensor self, float q, int? dim, bool keepdim, *, str interpolation) -> Tensor + variants: method, function + +- func: nanquantile.new_out(Tensor self, Tensor q, int? dim, bool keepdim, *, str interpolation, Tensor(a!) out) -> Tensor(a!) + +- func: nanquantile.new(Tensor self, Tensor q, int? dim, bool keepdim, *, str interpolation) -> Tensor + variants: method, function + - func: sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU: sort_out_cpu - CUDA: legacy::cuda::_th_sort_out + CUDA: sort_out_cuda +- func: sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + dispatch: + CPU: sort_out_cpu_stable + CUDA: sort_out_stable_cuda + - func: sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU: sort_cpu - CUDA: legacy::cuda::_th_sort + CUDA: sort_cuda QuantizedCPU: sort_quantized_cpu +- func: sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) + variants: method, function + dispatch: + CPU: sort_cpu_stable + CUDA: sort_stable_cuda + QuantizedCPU: sort_quantized_cpu_stable + - func: sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures +- func: sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + - func: sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) variants: method, function +- func: sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) + variants: method, function + - func: msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - dispatch: - Math: msort_out - func: msort(Tensor self) -> Tensor variants: method, function - dispatch: - Math: msort - func: argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function - func: argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor variants: method, function - func: topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True dispatch: CPU: topk_out_cpu - CUDA: legacy::cuda::_th_topk_out + CUDA: topk_out_cuda - func: topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) variants: method, function + structured_delegate: topk.values dispatch: - CPU, CUDA: topk QuantizedCPU: topk_quantized_cpu - func: all(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: all - func: any(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: any SparseCPU, SparseCUDA: any_sparse - func: renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator dispatch: CPU: legacy::cpu::_th_renorm_out CUDA: legacy::cuda::_th_renorm_out - func: renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor + device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU: legacy::cpu::_th_renorm CUDA: legacy::cuda::_th_renorm - func: unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a) variants: method + device_check: NoCheck device_guard: False dispatch: CPU, CUDA: unfold QuantizedCPU, QuantizedCUDA: unfold @@ -6471,754 +6971,806 @@ CPU: cpu_equal CUDA: cuda_equal QuantizedCPU: equal_quantized_cpu - func: pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: - CPU, CUDA: pow_out + CPU, CUDA: pow_Tensor_Tensor_out - func: pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: pow.Tensor_Tensor_out variants: method, function - dispatch: - CPU, CUDA: pow - func: pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True dispatch: - CPU, CUDA: pow_out + CPU, CUDA: pow_Scalar_out - func: pow.Scalar(Scalar self, Tensor exponent) -> Tensor - dispatch: - CPU, CUDA: pow + device_check: NoCheck # TensorIterator + structured_delegate: pow.Scalar_out - func: pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase dispatch: - CPU, CUDA: pow_out + CPU, CUDA: pow_Tensor_Scalar_out SparseCPU, SparseCUDA: pow_out_sparse_scalar - func: pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor + device_check: NoCheck # TensorIterator + structured_delegate: pow.Tensor_Scalar_out variants: function, method dispatch: - CPU, CUDA: pow SparseCPU, SparseCUDA: pow_sparse_scalar - func: pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: pow.Tensor_Scalar_out variants: method - dispatch: - CPU, CUDA: pow_ - func: pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured_delegate: pow.Tensor_Tensor_out variants: method - dispatch: - CPU, CUDA: pow_ - func: float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - dispatch: - Math: float_power_out - func: float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor variants: function, method - dispatch: - Math: float_power - func: float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - dispatch: - Math: float_power_out - func: float_power.Scalar(Scalar self, Tensor exponent) -> Tensor - dispatch: - Math: float_power - func: float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - dispatch: - Math: float_power_out - func: float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor variants: function, method - dispatch: - Math: float_power - func: float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) variants: method - dispatch: - Math: float_power_ - func: float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) variants: method - dispatch: - Math: float_power_ - func: normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!) + device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: normal_ + Meta: normal_meta_ - func: normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: normal_out - func: normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor dispatch: CPU, CUDA: normal - func: normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: normal_out - func: normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor dispatch: CPU, CUDA: normal - func: normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: normal_out - func: normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor dispatch: CPU, CUDA: normal - func: normal.float_float(float mean, float std, int[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: normal.float_float_out(float mean, float std, int[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: alias(Tensor(a) self) -> Tensor(a) variants: method, function dispatch: - DefaultBackend: alias + CompositeExplicitAutograd: alias - func: _index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) dispatch: - CPU: legacy::cpu::_th_index_copy_ - CUDA: legacy::cuda::_th_index_copy_ + CPU: _index_copy_impl_ + CUDA: _index_copy_impl_ - func: _cumsum(Tensor self, int dim) -> Tensor dispatch: CPU: _cumsum_cpu CUDA: _cumsum_cuda - func: _cumsum.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: _cumsum_out_cpu CUDA: _cumsum_out_cuda - func: _cumprod(Tensor self, int dim) -> Tensor dispatch: CPU: _cumprod_cpu CUDA: _cumprod_cuda - func: _cumprod.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: _cumprod_out_cpu CUDA: _cumprod_out_cuda -- func: _var(Tensor self, bool unbiased=True) -> Tensor - dispatch: - CPU: legacy::cpu::_th_var - -- func: _std(Tensor self, bool unbiased=True) -> Tensor - dispatch: - CPU: legacy::cpu::_th_std - - func: _amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> () variants: function dispatch: CUDA: _amp_foreach_non_finite_check_and_unscale_cuda_ -- func: _amp_update_scale(Tensor(a!) growth_tracker, Tensor current_scale, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor +- func: _amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!) variants: function dispatch: - CUDA: _amp_update_scale_cuda + CUDA: _amp_update_scale_cuda_ - func: _cat(Tensor[] tensors, int dim=0) -> Tensor dispatch: CPU: _cat_cpu CUDA: cat_cuda QuantizedCPU: cat_quantized_cpu - func: _cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: _cat_out_cpu CUDA: cat_out_cuda QuantizedCPU: cat_out_quantized_cpu - func: _foreach_add.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_add_scalar_kernel_slow CUDA: foreach_tensor_add_scalar_kernel_cuda - func: _foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_add_scalar_kernel_slow_ CUDA: foreach_tensor_add_scalar_kernel_cuda_ - func: _foreach_sub.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sub_scalar_kernel_slow CUDA: foreach_tensor_sub_scalar_kernel_cuda - func: _foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sub_scalar_kernel_slow_ CUDA: foreach_tensor_sub_scalar_kernel_cuda_ - func: _foreach_mul.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_mul_scalar_kernel_slow CUDA: foreach_tensor_mul_scalar_kernel_cuda - func: _foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_mul_scalar_kernel_slow_ CUDA: foreach_tensor_mul_scalar_kernel_cuda_ - func: _foreach_div.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_div_scalar_kernel_slow CUDA: foreach_tensor_div_scalar_kernel_cuda - func: _foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_div_scalar_kernel_slow_ CUDA: foreach_tensor_div_scalar_kernel_cuda_ - func: _foreach_add.List(Tensor[] tensors1, Tensor[] tensors2, *, Scalar alpha=1) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_add_list_kernel_slow CUDA: foreach_tensor_add_list_kernel_cuda - func: _foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_add_list_kernel_slow_ CUDA: foreach_tensor_add_list_kernel_cuda_ - func: _foreach_sub.List(Tensor[] tensors1, Tensor[] tensors2, *, Scalar alpha=1) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sub_list_kernel_slow CUDA: foreach_tensor_sub_list_kernel_cuda - func: _foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sub_list_kernel_slow_ CUDA: foreach_tensor_sub_list_kernel_cuda_ - func: _foreach_mul.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_mul_list_kernel_slow CUDA: foreach_tensor_mul_list_kernel_cuda - func: _foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_mul_list_kernel_slow_ CUDA: foreach_tensor_mul_list_kernel_cuda_ - func: _foreach_div.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_div_list_kernel_slow CUDA: foreach_tensor_div_list_kernel_cuda - func: _foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_div_list_kernel_slow_ CUDA: foreach_tensor_div_list_kernel_cuda_ -- func: _foreach_add.ScalarList(Tensor[] tensors, float[] scalars) -> Tensor[] +- func: _foreach_add.ScalarList(Tensor[] tensors, Scalar[] scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_add_scalarlist_kernel_slow CUDA: foreach_tensor_add_scalarlist_kernel_cuda -- func: _foreach_add_.ScalarList(Tensor(a!)[] self, float[] scalars) -> () +- func: _foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_add_scalarlist_kernel_slow_ CUDA: foreach_tensor_add_scalarlist_kernel_cuda_ -- func: _foreach_sub.ScalarList(Tensor[] tensors, float[] scalars) -> Tensor[] +- func: _foreach_sub.ScalarList(Tensor[] tensors, Scalar[] scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sub_scalarlist_kernel_slow CUDA: foreach_tensor_sub_scalarlist_kernel_cuda -- func: _foreach_sub_.ScalarList(Tensor(a!)[] self, float[] scalars) -> () +- func: _foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sub_scalarlist_kernel_slow_ CUDA: foreach_tensor_sub_scalarlist_kernel_cuda_ -- func: _foreach_div.ScalarList(Tensor[] tensors, float[] scalars) -> Tensor[] +- func: _foreach_div.ScalarList(Tensor[] tensors, Scalar[] scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_div_scalarlist_kernel_slow CUDA: foreach_tensor_div_scalarlist_kernel_cuda -- func: _foreach_div_.ScalarList(Tensor(a!)[] self, float[] scalars) -> () +- func: _foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_div_scalarlist_kernel_slow_ CUDA: foreach_tensor_div_scalarlist_kernel_cuda_ -- func: _foreach_mul.ScalarList(Tensor[] tensors, float[] scalars) -> Tensor[] +- func: _foreach_mul.ScalarList(Tensor[] tensors, Scalar[] scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_mul_scalarlist_kernel_slow CUDA: foreach_tensor_mul_scalarlist_kernel_cuda -- func: _foreach_mul_.ScalarList(Tensor(a!)[] self, float[] scalars) -> () +- func: _foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_mul_scalarlist_kernel_slow_ CUDA: foreach_tensor_mul_scalarlist_kernel_cuda_ - func: _foreach_exp(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_exp_slow CUDA: foreach_tensor_exp_cuda - func: _foreach_zero_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_zero_slow_ CUDA: foreach_tensor_zero_cuda_ - func: _foreach_exp_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_exp_slow_ CUDA: foreach_tensor_exp_cuda_ - func: _foreach_sqrt(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sqrt_slow CUDA: foreach_tensor_sqrt_cuda - func: _foreach_sqrt_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sqrt_slow_ CUDA: foreach_tensor_sqrt_cuda_ - func: _foreach_abs(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_abs_slow CUDA: foreach_tensor_abs_cuda - func: _foreach_abs_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_abs_slow_ CUDA: foreach_tensor_abs_cuda_ - func: _foreach_acos(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_acos_slow CUDA: foreach_tensor_acos_cuda - func: _foreach_acos_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_acos_slow_ CUDA: foreach_tensor_acos_cuda_ - func: _foreach_asin(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_asin_slow CUDA: foreach_tensor_asin_cuda - func: _foreach_asin_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_asin_slow_ CUDA: foreach_tensor_asin_cuda_ - func: _foreach_atan(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_atan_slow CUDA: foreach_tensor_atan_cuda - func: _foreach_atan_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_atan_slow_ CUDA: foreach_tensor_atan_cuda_ - func: _foreach_ceil(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_ceil_slow CUDA: foreach_tensor_ceil_cuda - func: _foreach_ceil_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_ceil_slow_ CUDA: foreach_tensor_ceil_cuda_ - func: _foreach_cos(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_cos_slow CUDA: foreach_tensor_cos_cuda - func: _foreach_cos_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_cos_slow_ CUDA: foreach_tensor_cos_cuda_ - func: _foreach_cosh(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_cosh_slow CUDA: foreach_tensor_cosh_cuda - func: _foreach_cosh_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_cosh_slow_ CUDA: foreach_tensor_cosh_cuda_ - func: _foreach_erf(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_erf_slow CUDA: foreach_tensor_erf_cuda - func: _foreach_erf_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_erf_slow_ CUDA: foreach_tensor_erf_cuda_ - func: _foreach_erfc(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_erfc_slow CUDA: foreach_tensor_erfc_cuda - func: _foreach_erfc_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_erfc_slow_ CUDA: foreach_tensor_erfc_cuda_ - func: _foreach_expm1(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_expm1_slow CUDA: foreach_tensor_expm1_cuda - func: _foreach_expm1_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_expm1_slow_ CUDA: foreach_tensor_expm1_cuda_ - func: _foreach_floor(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_floor_slow CUDA: foreach_tensor_floor_cuda - func: _foreach_floor_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_floor_slow_ CUDA: foreach_tensor_floor_cuda_ - func: _foreach_log(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_log_slow CUDA: foreach_tensor_log_cuda - func: _foreach_log_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_log_slow_ CUDA: foreach_tensor_log_cuda_ - func: _foreach_log10(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_log10_slow CUDA: foreach_tensor_log10_cuda - func: _foreach_log10_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_log10_slow_ CUDA: foreach_tensor_log10_cuda_ - func: _foreach_log1p(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_log1p_slow CUDA: foreach_tensor_log1p_cuda - func: _foreach_log1p_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_log1p_slow_ CUDA: foreach_tensor_log1p_cuda_ - func: _foreach_log2(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_log2_slow CUDA: foreach_tensor_log2_cuda - func: _foreach_log2_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_log2_slow_ CUDA: foreach_tensor_log2_cuda_ - func: _foreach_neg(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_neg_slow CUDA: foreach_tensor_neg_cuda - func: _foreach_neg_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_neg_slow_ CUDA: foreach_tensor_neg_cuda_ - func: _foreach_tan(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_tan_slow CUDA: foreach_tensor_tan_cuda - func: _foreach_tan_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_tan_slow_ CUDA: foreach_tensor_tan_cuda_ - func: _foreach_tanh(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_tanh_slow CUDA: foreach_tensor_tanh_cuda - func: _foreach_tanh_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_tanh_slow_ CUDA: foreach_tensor_tanh_cuda_ - func: _foreach_sin(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sin_slow CUDA: foreach_tensor_sin_cuda - func: _foreach_sin_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sin_slow_ CUDA: foreach_tensor_sin_cuda_ - func: _foreach_sinh(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sinh_slow CUDA: foreach_tensor_sinh_cuda - func: _foreach_sinh_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sinh_slow_ CUDA: foreach_tensor_sinh_cuda_ - func: _foreach_round(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_round_slow CUDA: foreach_tensor_round_cuda - func: _foreach_round_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_round_slow_ CUDA: foreach_tensor_round_cuda_ - func: _foreach_lgamma(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_lgamma_slow CUDA: foreach_tensor_lgamma_cuda - func: _foreach_lgamma_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_lgamma_slow_ CUDA: foreach_tensor_lgamma_cuda_ - func: _foreach_frac(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_frac_slow CUDA: foreach_tensor_frac_cuda - func: _foreach_frac_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_frac_slow_ CUDA: foreach_tensor_frac_cuda_ - func: _foreach_reciprocal(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_reciprocal_slow CUDA: foreach_tensor_reciprocal_cuda - func: _foreach_reciprocal_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_reciprocal_slow_ CUDA: foreach_tensor_reciprocal_cuda_ - func: _foreach_sigmoid(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sigmoid_slow CUDA: foreach_tensor_sigmoid_cuda - func: _foreach_sigmoid_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sigmoid_slow_ CUDA: foreach_tensor_sigmoid_cuda_ - func: _foreach_trunc(Tensor[] tensors) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_trunc_slow CUDA: foreach_tensor_trunc_cuda - func: _foreach_trunc_(Tensor(a!)[] self) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_trunc_slow_ CUDA: foreach_tensor_trunc_cuda_ - func: _foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_addcdiv_scalar_slow_ CUDA: foreach_tensor_addcdiv_scalar_cuda_ - func: _foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_addcmul_scalar_slow_ CUDA: foreach_tensor_addcmul_scalar_cuda_ -- func: _foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, float[] scalars) -> () +- func: _foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_addcdiv_scalarlist_slow_ CUDA: foreach_tensor_addcdiv_scalarlist_cuda_ -- func: _foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, float[] scalars) -> () +- func: _foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_addcmul_scalarlist_slow_ CUDA: foreach_tensor_addcmul_scalarlist_cuda_ - func: _foreach_addcdiv.Scalar(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_addcdiv_scalar_slow CUDA: foreach_tensor_addcdiv_scalar_cuda - func: _foreach_addcmul.Scalar(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_addcmul_scalar_slow CUDA: foreach_tensor_addcmul_scalar_cuda -- func: _foreach_addcdiv.ScalarList(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, float[] scalars) -> Tensor[] +- func: _foreach_addcdiv.ScalarList(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_addcdiv_scalarlist_slow CUDA: foreach_tensor_addcdiv_scalarlist_cuda -- func: _foreach_addcmul.ScalarList(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, float[] scalars) -> Tensor[] +- func: _foreach_addcmul.ScalarList(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_addcmul_scalarlist_slow CUDA: foreach_tensor_addcmul_scalarlist_cuda - func: _foreach_maximum.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_maximum_slow CUDA: foreach_tensor_maximum_cuda - func: _foreach_minimum.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_minimum_slow CUDA: foreach_tensor_minimum_cuda -- func: _mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor, Tensor) - dispatch: - CPU: legacy::cpu::_th_mode - CUDA: legacy::cuda::_th_mode - -- func: _mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - dispatch: - CPU: legacy::cpu::_th_mode_out - CUDA: legacy::cuda::_th_mode_out - - func: bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor dispatch: CPU: bucketize_cpu CUDA: bucketize_cuda - func: bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: bucketize_out_cpu CUDA: bucketize_out_cuda - func: bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor @@ -7230,11 +7782,10 @@ dispatch: CPU: searchsorted_cpu CUDA: searchsorted_cuda - func: searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU: searchsorted_out_cpu CUDA: searchsorted_out_cuda - func: searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False) -> Tensor @@ -7243,90 +7794,82 @@ CUDA: searchsorted_cuda ## NN wrappers - func: mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: mse_loss_out - func: mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: mse_loss - func: mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU, CUDA: mse_loss_backward_out - func: mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor python_module: nn dispatch: CPU, CUDA: mse_loss_backward - func: l1_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: - DefaultBackend: l1_loss_out + CompositeExplicitAutograd: l1_loss_out - func: l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor python_module: nn dispatch: - DefaultBackend: l1_loss + CompositeExplicitAutograd: l1_loss - func: l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU, CUDA: l1_loss_backward_out - func: l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor python_module: nn dispatch: - DefaultBackend: l1_loss_backward + CompositeExplicitAutograd: l1_loss_backward - func: multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: multi_margin_loss_cpu_out CUDA: legacy::cuda::_thnn_multi_margin_loss_forward_out - func: multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: multi_margin_loss_cpu CUDA: legacy::cuda::_thnn_multi_margin_loss_forward - func: multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: multi_margin_loss_cpu_backward_out CUDA: legacy::cuda::_thnn_multi_margin_loss_backward_out - func: multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: multi_margin_loss_cpu_backward CUDA: legacy::cuda::_thnn_multi_margin_loss_backward - func: multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn - func: multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor python_module: nn - func: multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: multilabel_margin_loss_forward_out_cpu CUDA: legacy::cuda::_thnn_multilabel_margin_loss_forward_out @@ -7335,11 +7878,10 @@ dispatch: CPU: multilabel_margin_loss_forward_cpu CUDA: legacy::cuda::_thnn_multilabel_margin_loss_forward - func: multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: multilabel_margin_loss_backward_cpu_out CUDA: legacy::cuda::_thnn_multilabel_margin_loss_backward_out @@ -7348,150 +7890,162 @@ dispatch: CPU: multilabel_margin_loss_backward_cpu CUDA: legacy::cuda::_thnn_multilabel_margin_loss_backward - func: nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn +- func: nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor + python_module: nn + - func: nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn - func: nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: nll_loss_forward_out_cpu CUDA: legacy::cuda::_thnn_nll_loss_forward_out - func: nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: nll_loss_forward_cpu CUDA: legacy::cuda::_thnn_nll_loss_forward - func: nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: nll_loss_backward_out_cpu CUDA: legacy::cuda::_thnn_nll_loss_backward_out - func: nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: nll_loss_backward_cpu CUDA: legacy::cuda::_thnn_nll_loss_backward - func: nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn - func: nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn - func: nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: nll_loss2d_forward_out_cpu CUDA: legacy::cuda::_thnn_nll_loss2d_forward_out - func: nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: nll_loss2d_forward_cpu CUDA: legacy::cuda::_thnn_nll_loss2d_forward - func: nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: nll_loss2d_backward_out_cpu CUDA: legacy::cuda::_thnn_nll_loss2d_backward_out - func: nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: nll_loss2d_backward_cpu CUDA: legacy::cuda::_thnn_nll_loss2d_backward - func: smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU: smooth_l1_loss_out CUDA: smooth_l1_loss_out - func: smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor + device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: smooth_l1_loss - func: smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: smooth_l1_loss_backward_out CUDA: smooth_l1_loss_backward_out - func: smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor python_module: nn dispatch: - DefaultBackend: smooth_l1_loss_backward + CompositeExplicitAutograd: smooth_l1_loss_backward +- func: huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) + python_module: nn + dispatch: + CPU, CUDA: huber_loss_out + +- func: huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor + python_module: nn + dispatch: + CPU, CUDA: huber_loss + +- func: huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!) + python_module: nn + dispatch: + CPU, CUDA: huber_loss_backward_out + +- func: huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor + python_module: nn + dispatch: + CompositeExplicitAutograd: huber_loss_backward + - func: soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: - DefaultBackend: soft_margin_loss_out + CompositeExplicitAutograd: soft_margin_loss_out - func: soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor python_module: nn dispatch: - DefaultBackend: soft_margin_loss + CompositeExplicitAutograd: soft_margin_loss - func: soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: - DefaultBackend: soft_margin_loss_backward_out + CompositeExplicitAutograd: soft_margin_loss_backward_out - func: soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor python_module: nn dispatch: - DefaultBackend: soft_margin_loss_backward + CompositeExplicitAutograd: soft_margin_loss_backward - func: elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: elu_out - func: elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor + structured_delegate: elu.out + device_check: NoCheck # TensorIterator python_module: nn - dispatch: - CPU, CUDA: elu - func: elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor python_module: nn dispatch: CPU, CUDA: elu_backward - func: elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!) + structured_delegate: elu.out + device_check: NoCheck # TensorIterator python_module: nn dispatch: - DefaultBackend: elu_ + CompositeExplicitAutograd: elu_ - func: glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: glu_out CUDA: legacy::cuda::_thnn_glu_forward_out @@ -7500,11 +8054,10 @@ dispatch: CPU: glu CUDA: legacy::cuda::_thnn_glu_forward - func: glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: glu_backward_out CUDA: legacy::cuda::_thnn_glu_backward_out @@ -7513,128 +8066,139 @@ dispatch: CPU: glu_backward CUDA: legacy::cuda::_thnn_glu_backward - func: hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: hardsigmoid_out - func: hardsigmoid(Tensor self) -> Tensor + structured_delegate: hardsigmoid.out + device_check: NoCheck # TensorIterator python_module: nn dispatch: - CPU, CUDA: hardsigmoid QuantizedCPU: hardsigmoid_quantized_cpu - func: hardsigmoid_(Tensor(a!) self) -> Tensor(a!) + structured_delegate: hardsigmoid.out + device_check: NoCheck # TensorIterator python_module: nn - dispatch: - CPU, CUDA: hardsigmoid_ - func: hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor python_module: nn dispatch: CPU, CUDA: hardsigmoid_backward - func: hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: hardtanh_out QuantizedCPU: hardtanh_out_quantized_cpu - func: hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor + device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: hardtanh QuantizedCPU: hardtanh_quantized_cpu - func: hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU, CUDA: hardtanh_backward_out - func: hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor python_module: nn dispatch: CPU, CUDA: hardtanh_backward - func: hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!) + device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: hardtanh_ QuantizedCPU: hardtanh_quantized_cpu_ - func: hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: hardswish_out - func: hardswish(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: hardswish - func: hardswish_(Tensor(a!) self) -> Tensor(a!) + device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: hardswish_ - func: hardswish_backward(Tensor grad_output, Tensor self) -> Tensor python_module: nn dispatch: CPU, CUDA: hardswish_backward - func: leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: leaky_relu_out QuantizedCPU: leaky_relu_out_quantized_cpu - func: leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor + structured_delegate: leaky_relu.out + device_check: NoCheck # TensorIterator python_module: nn dispatch: - CPU, CUDA: leaky_relu QuantizedCPU: leaky_relu_quantized_cpu - func: leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor python_module: nn dispatch: CPU, CUDA: leaky_relu_backward - func: leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!) + structured_delegate: leaky_relu.out + device_check: NoCheck # TensorIterator python_module: nn dispatch: - CPU, CUDA: leaky_relu_ QuantizedCPU: leaky_relu_quantized_cpu_ - func: log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator python_module: nn - func: log_sigmoid(Tensor self) -> Tensor + device_check: NoCheck # TensorIterator python_module: nn - func: log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU: log_sigmoid_forward_out_cpu CUDA: legacy::cuda::_thnn_log_sigmoid_forward_out - func: log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer) + device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU: log_sigmoid_forward_cpu CUDA: legacy::cuda::_thnn_log_sigmoid_forward - func: log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: log_sigmoid_backward_out_cpu CUDA: legacy::cuda::_thnn_log_sigmoid_backward_out @@ -7643,11 +8207,10 @@ dispatch: CPU: log_sigmoid_backward_cpu CUDA: legacy::cuda::_thnn_log_sigmoid_backward - func: rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: rrelu_with_noise_out_cpu CUDA: legacy::cuda::_thnn_rrelu_with_noise_forward_out @@ -7658,64 +8221,65 @@ CUDA: legacy::cuda::_thnn_rrelu_with_noise_forward - func: rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor python_module: nn dispatch: - DefaultBackend: rrelu_with_noise_backward + CompositeExplicitAutograd: rrelu_with_noise_backward - func: rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) python_module: nn dispatch: CPU: rrelu_with_noise_cpu_ CUDA: legacy::cuda::_thnn_rrelu_with_noise_forward_ - func: softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: softplus_out - func: softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor + structured_delegate: softplus.out + device_check: NoCheck # TensorIterator python_module: nn - dispatch: - CPU, CUDA: softplus - func: softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU, CUDA: softplus_backward_out - func: softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor output) -> Tensor python_module: nn dispatch: CPU, CUDA: softplus_backward - func: softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + structured: True + structured_inherits: TensorIteratorBase + device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: softshrink_out - func: softshrink(Tensor self, Scalar lambd=0.5) -> Tensor + structured_delegate: softshrink.out + device_check: NoCheck # TensorIterator python_module: nn - dispatch: - CPU, CUDA: softshrink - func: softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU, CUDA: softshrink_backward_out - func: softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor python_module: nn dispatch: CPU, CUDA: softshrink_backward - func: adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU, CUDA: adaptive_avg_pool2d_out_cpu MkldnnCPU: mkldnn_adaptive_avg_pool2d_out @@ -7724,10 +8288,14 @@ - func: mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor dispatch: MkldnnCPU: mkldnn_adaptive_avg_pool2d +- func: mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor + dispatch: + MkldnnCPU: mkldnn_adaptive_avg_pool2d_backward + - func: _adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor dispatch: CPU: adaptive_avg_pool2d_cpu CUDA: adaptive_avg_pool2d_cuda QuantizedCPU: adaptive_avg_pool2d_quantized_cpu @@ -7737,95 +8305,86 @@ dispatch: CPU: adaptive_avg_pool2d_backward_cpu CUDA: adaptive_avg_pool2d_backward_cuda - func: adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: adaptive_avg_pool3d_out_cpu CUDA: adaptive_avg_pool3d_out_cuda QuantizedCPU: adaptive_avg_pool3d_out_quantized_cpu - func: adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor python_module: nn + +- func: _adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor dispatch: CPU: adaptive_avg_pool3d_cpu CUDA: adaptive_avg_pool3d_cuda QuantizedCPU: adaptive_avg_pool3d_quantized_cpu - func: adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: adaptive_avg_pool3d_backward_out_cpu CUDA: adaptive_avg_pool3d_backward_out_cuda -- func: adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor +- func: _adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor python_module: nn dispatch: CPU: adaptive_avg_pool3d_backward_cpu CUDA: adaptive_avg_pool3d_backward_cuda # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: adaptive_max_pool2d_out_cpu CUDA: adaptive_max_pool2d_out_cuda # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor) python_module: nn - dispatch: - CPU: adaptive_max_pool2d_cpu - CUDA: adaptive_max_pool2d_cuda + structured_delegate: adaptive_max_pool2d.out - func: adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: adaptive_max_pool2d_backward_out_cpu CUDA: adaptive_max_pool2d_backward_out_cuda - func: adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor python_module: nn - dispatch: - CPU: adaptive_max_pool2d_backward_cpu - CUDA: adaptive_max_pool2d_backward_cuda + structured_delegate: adaptive_max_pool2d_backward.grad_input # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: adaptive_max_pool3d_out_cpu CUDA: adaptive_max_pool3d_out_cuda # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor) python_module: nn - dispatch: - CPU: adaptive_max_pool3d_cpu - CUDA: adaptive_max_pool3d_cuda + structured_delegate: adaptive_max_pool3d.out - func: adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: adaptive_max_pool3d_backward_out_cpu CUDA: adaptive_max_pool3d_backward_out_cuda - func: adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor python_module: nn - dispatch: - CPU: adaptive_max_pool3d_backward_cpu - CUDA: adaptive_max_pool3d_backward_cuda + structured_delegate: adaptive_max_pool3d_backward.grad_input - func: avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: avg_pool2d_out_cpu CUDA: avg_pool2d_out_cuda MkldnnCPU: mkldnn_avg_pool2d_out @@ -7837,24 +8396,24 @@ CUDA: avg_pool2d_cuda MkldnnCPU: mkldnn_avg_pool2d QuantizedCPU: avg_pool2d_quantized_cpu - func: avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: avg_pool2d_backward_out_cpu CUDA: avg_pool2d_backward_out_cuda + MkldnnCPU: mkldnn_avg_pool2d_backward_out - func: avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor python_module: nn dispatch: CPU: avg_pool2d_backward_cpu CUDA: avg_pool2d_backward_cuda + MkldnnCPU: mkldnn_avg_pool2d_backward - func: avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: avg_pool3d_out_cpu CUDA: avg_pool3d_out_cuda MkldnnCPU: mkldnn_avg_pool3d_out @@ -7866,39 +8425,37 @@ CUDA: avg_pool3d_cuda MkldnnCPU: mkldnn_avg_pool3d QuantizedCPU: avg_pool3d_quantized_cpu - func: avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: avg_pool3d_backward_out_cpu CUDA: avg_pool3d_backward_out_cuda + MkldnnCPU: mkldnn_avg_pool3d_backward_out - func: avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor python_module: nn dispatch: CPU: avg_pool3d_backward_cpu CUDA: avg_pool3d_backward_cuda + MkldnnCPU: mkldnn_avg_pool3d_backward # Return: (Tensor output, Tensor indices) - func: fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: fractional_max_pool2d_out_cpu CUDA: fractional_max_pool2d_out_cuda # Return: (Tensor output, Tensor indices) - func: fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor) python_module: nn - dispatch: - CPU: fractional_max_pool2d_cpu - CUDA: fractional_max_pool2d_cuda + structured_delegate: fractional_max_pool2d.output - func: fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: fractional_max_pool2d_backward_out_cpu CUDA: fractional_max_pool2d_backward_out_cuda @@ -7908,11 +8465,10 @@ CPU: fractional_max_pool2d_backward_cpu CUDA: fractional_max_pool2d_backward_cuda # Return: (Tensor output, Tensor indices) - func: fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: fractional_max_pool3d_out_cpu CUDA: fractional_max_pool3d_out_cuda @@ -7922,11 +8478,10 @@ dispatch: CPU: fractional_max_pool3d_cpu CUDA: fractional_max_pool3d_cuda - func: fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: fractional_max_pool3d_backward_out_cpu CUDA: fractional_max_pool3d_backward_out_cuda @@ -7936,39 +8491,34 @@ CPU: fractional_max_pool3d_backward_cpu CUDA: fractional_max_pool3d_backward_cuda # Return: (Tensor output, Tensor indices) - func: max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: max_pool2d_with_indices_out_cpu CUDA: max_pool2d_with_indices_out_cuda # Return: (Tensor output, Tensor indices) - func: max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) python_module: nn - dispatch: - CPU: max_pool2d_with_indices_cpu - CUDA: max_pool2d_with_indices_cuda + structured_delegate: max_pool2d_with_indices.out - func: max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: max_pool2d_with_indices_backward_out_cpu CUDA: max_pool2d_with_indices_backward_out_cuda - func: max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor python_module: nn - dispatch: - CPU: max_pool2d_with_indices_backward_cpu - CUDA: max_pool2d_with_indices_backward_cuda + structured_delegate: max_pool2d_with_indices_backward.grad_input # Return: (Tensor output, Tensor indices) - func: max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: max_pool3d_with_indices_out_cpu CUDA: max_pool3d_with_indices_out_cuda @@ -7978,11 +8528,10 @@ dispatch: CPU: max_pool3d_with_indices_cpu CUDA: max_pool3d_with_indices_cuda - func: max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: max_pool3d_with_indices_backward_out_cpu CUDA: max_pool3d_with_indices_backward_out_cuda @@ -7991,11 +8540,10 @@ dispatch: CPU: max_pool3d_with_indices_backward_cpu CUDA: max_pool3d_with_indices_backward_cuda - func: max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: max_unpooling2d_forward_out_cpu CUDA: max_unpooling2d_forward_out_cuda @@ -8004,11 +8552,10 @@ dispatch: CPU: max_unpooling2d_forward_cpu CUDA: max_unpooling2d_forward_cuda - func: max_unpool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: max_unpooling2d_backward_out_cpu CUDA: max_unpooling2d_backward_out_cuda @@ -8017,11 +8564,10 @@ dispatch: CPU: max_unpooling2d_backward_cpu CUDA: max_unpooling2d_backward_cuda - func: max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: max_unpooling3d_forward_out_cpu CUDA: max_unpooling3d_forward_out_cuda @@ -8030,11 +8576,10 @@ dispatch: CPU: max_unpooling3d_forward_cpu CUDA: max_unpooling3d_forward_cuda - func: max_unpool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: max_unpooling3d_backward_out_cpu CUDA: max_unpooling3d_backward_out_cuda @@ -8043,24 +8588,23 @@ dispatch: CPU: max_unpooling3d_backward_cpu CUDA: max_unpooling3d_backward_cuda - func: reflection_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU, QuantizedCPU: reflection_pad1d_out_cpu CUDA: reflection_pad1d_out_cuda - func: reflection_pad1d(Tensor self, int[2] padding) -> Tensor python_module: nn + structured_delegate: reflection_pad1d.out dispatch: - CPU, QuantizedCPU: reflection_pad1d_cpu - CUDA: reflection_pad1d_cuda + QuantizedCPU: reflection_pad1d_cpu - func: reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: reflection_pad1d_backward_out_cpu CUDA: reflection_pad1d_backward_out_cuda @@ -8069,11 +8613,10 @@ dispatch: CPU: reflection_pad1d_backward_cpu CUDA: reflection_pad1d_backward_cuda - func: reflection_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU, QuantizedCPU: reflection_pad2d_out_cpu CUDA: reflection_pad2d_out_cuda @@ -8082,11 +8625,10 @@ dispatch: CPU, QuantizedCPU: reflection_pad2d_cpu CUDA: reflection_pad2d_cuda - func: reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: reflection_pad2d_backward_out_cpu CUDA: reflection_pad2d_backward_out_cuda @@ -8095,50 +8637,43 @@ dispatch: CPU: reflection_pad2d_backward_cpu CUDA: reflection_pad2d_backward_cuda - func: replication_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: replication_pad1d_out_cpu CUDA: replication_pad1d_out_cuda - func: replication_pad1d(Tensor self, int[2] padding) -> Tensor python_module: nn - dispatch: - CPU: replication_pad1d_cpu - CUDA: replication_pad1d_cuda + structured_delegate: replication_pad1d.out - func: replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: replication_pad1d_backward_out_cpu CUDA: replication_pad1d_backward_out_cuda - func: replication_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor python_module: nn - dispatch: - CPU: replication_pad1d_backward_cpu - CUDA: replication_pad1d_backward_cuda + structured_delegate: replication_pad1d_backward.grad_input - func: replication_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: replication_pad2d_out_cpu CUDA: replication_pad2d_out_cuda - func: replication_pad2d(Tensor self, int[4] padding) -> Tensor python_module: nn - dispatch: - CPU: replication_pad2d_cpu - CUDA: replication_pad2d_cuda + structured_delegate: replication_pad2d.out - func: replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: replication_pad2d_backward_out_cpu CUDA: replication_pad2d_backward_out_cuda @@ -8147,24 +8682,21 @@ dispatch: CPU: replication_pad2d_backward_cpu CUDA: replication_pad2d_backward_cuda - func: replication_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: replication_pad3d_out_cpu CUDA: replication_pad3d_out_cuda - func: replication_pad3d(Tensor self, int[6] padding) -> Tensor python_module: nn - dispatch: - CPU: replication_pad3d_cpu - CUDA: replication_pad3d_cuda + structured_delegate: replication_pad3d.out - func: replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: replication_pad3d_backward_out_cpu CUDA: replication_pad3d_backward_out_cuda @@ -8175,75 +8707,66 @@ CUDA: replication_pad3d_backward_cuda - func: upsample_linear1d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: - CPU: upsample_linear1d_cpu - CUDA: upsample_linear1d_cuda + CompositeExplicitAutograd: upsample_linear1d - func: upsample_linear1d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: - CPU: upsample_linear1d_backward_cpu - CUDA: upsample_linear1d_backward_cuda + CompositeExplicitAutograd: upsample_linear1d_backward - func: upsample_bilinear2d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: - CPU: upsample_bilinear2d_cpu - CUDA: upsample_bilinear2d_cuda - QuantizedCPU: upsample_bilinear2d_quantized_cpu + CompositeExplicitAutograd: upsample_bilinear2d - func: upsample_bilinear2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: - CPU: upsample_bilinear2d_backward_cpu - CUDA: upsample_bilinear2d_backward_cuda + CompositeExplicitAutograd: upsample_bilinear2d_backward - func: upsample_trilinear3d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: - CPU: upsample_trilinear3d_cpu - CUDA: upsample_trilinear3d_cuda + CompositeExplicitAutograd: upsample_trilinear3d - func: upsample_trilinear3d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: - CPU: upsample_trilinear3d_backward_cpu - CUDA: upsample_trilinear3d_backward_cuda + CompositeExplicitAutograd: upsample_trilinear3d_backward - func: upsample_bicubic2d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: - CPU: upsample_bicubic2d_cpu - CUDA: upsample_bicubic2d_cuda + CompositeExplicitAutograd: upsample_bicubic2d - func: upsample_bicubic2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: - CPU: upsample_bicubic2d_backward_cpu - CUDA: upsample_bicubic2d_backward_cuda + CompositeExplicitAutograd: upsample_bicubic2d_backward - func: upsample_nearest1d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor python_module: nn dispatch: - DefaultBackend: upsample_nearest1d + CompositeExplicitAutograd: upsample_nearest1d - func: upsample_nearest1d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor python_module: nn dispatch: - DefaultBackend: upsample_nearest1d_backward + CompositeExplicitAutograd: upsample_nearest1d_backward - func: upsample_nearest2d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor python_module: nn dispatch: - DefaultBackend: upsample_nearest2d + CompositeExplicitAutograd: upsample_nearest2d - func: upsample_nearest2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor python_module: nn dispatch: - DefaultBackend: upsample_nearest2d_backward + CompositeExplicitAutograd: upsample_nearest2d_backward - func: upsample_nearest3d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor python_module: nn dispatch: CPU: upsample_nearest3d_cpu @@ -8256,113 +8779,98 @@ CPU: upsample_nearest3d_backward_cpu CUDA: upsample_nearest3d_backward_cuda # NOTE: all of the non-"vec" upsample overloads are only kept for backward compatibility. - func: upsample_linear1d.out(Tensor self, int[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: upsample_linear1d_out_cpu CUDA: upsample_linear1d_out_cuda - func: upsample_linear1d(Tensor self, int[1] output_size, bool align_corners, float? scales=None) -> Tensor python_module: nn - dispatch: - CPU: upsample_linear1d_cpu - CUDA: upsample_linear1d_cuda + structured_delegate: upsample_linear1d.out - func: upsample_linear1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: upsample_linear1d_backward_out_cpu CUDA: upsample_linear1d_backward_out_cuda - func: upsample_linear1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners, float? scales=None) -> Tensor python_module: nn - dispatch: - CPU: upsample_linear1d_backward_cpu - CUDA: upsample_linear1d_backward_cuda + structured_delegate: upsample_linear1d_backward.grad_input - func: upsample_bilinear2d.out(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: upsample_bilinear2d_out_cpu CUDA: upsample_bilinear2d_out_cuda - func: upsample_bilinear2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn + structured_delegate: upsample_bilinear2d.out dispatch: - CPU: upsample_bilinear2d_cpu - CUDA: upsample_bilinear2d_cuda QuantizedCPU: upsample_bilinear2d_quantized_cpu - func: upsample_bilinear2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: upsample_bilinear2d_backward_out_cpu CUDA: upsample_bilinear2d_backward_out_cuda - func: upsample_bilinear2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn - dispatch: - CPU: upsample_bilinear2d_backward_cpu - CUDA: upsample_bilinear2d_backward_cuda + structured_delegate: upsample_bilinear2d_backward.grad_input - func: upsample_bicubic2d.out(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: upsample_bicubic2d_out_cpu CUDA: upsample_bicubic2d_out_cuda - func: upsample_bicubic2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn - dispatch: - CPU: upsample_bicubic2d_cpu - CUDA: upsample_bicubic2d_cuda + structured_delegate: upsample_bicubic2d.out - func: upsample_bicubic2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: upsample_bicubic2d_backward_out_cpu CUDA: upsample_bicubic2d_backward_out_cuda - func: upsample_bicubic2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn - dispatch: - CPU: upsample_bicubic2d_backward_cpu - CUDA: upsample_bicubic2d_backward_cuda + structured_delegate: upsample_bicubic2d_backward.grad_input - func: upsample_trilinear3d.out(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: upsample_trilinear3d_out_cpu CUDA: upsample_trilinear3d_out_cuda - func: upsample_trilinear3d(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn - dispatch: - CPU: upsample_trilinear3d_cpu - CUDA: upsample_trilinear3d_cuda + structured_delegate: upsample_trilinear3d.out - func: upsample_trilinear3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: upsample_trilinear3d_backward_out_cpu CUDA: upsample_trilinear3d_backward_out_cuda - func: upsample_trilinear3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn - dispatch: - CPU: upsample_trilinear3d_backward_cpu - CUDA: upsample_trilinear3d_backward_cuda + structured_delegate: upsample_trilinear3d_backward.grad_input - func: upsample_nearest1d.out(Tensor self, int[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: @@ -8407,60 +8915,54 @@ - func: upsample_nearest2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: upsample_nearest2d_backward.grad_input - func: upsample_nearest3d.out(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: upsample_nearest3d_out_cpu CUDA: upsample_nearest3d_out_cuda - func: upsample_nearest3d(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn + structured_delegate: upsample_nearest3d.out dispatch: - CPU: upsample_nearest3d_cpu - CUDA: upsample_nearest3d_cuda QuantizedCPU: upsample_nearest3d_quantized_cpu - func: upsample_nearest3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn + structured: True dispatch: CPU: upsample_nearest3d_backward_out_cpu CUDA: upsample_nearest3d_backward_out_cuda - func: upsample_nearest3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn - dispatch: - CPU: upsample_nearest3d_backward_cpu - CUDA: upsample_nearest3d_backward_cuda + structured_delegate: upsample_nearest3d_backward.grad_input - func: sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU, CUDA: sigmoid_backward_out - func: sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor python_module: nn dispatch: CPU, CUDA: sigmoid_backward - func: logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU, CUDA: logit_backward_out - func: logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor python_module: nn dispatch: CPU, CUDA: logit_backward - func: tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU, CUDA: tanh_backward_out - func: tanh_backward(Tensor grad_output, Tensor output) -> Tensor @@ -8485,25 +8987,22 @@ # one that is written in the native style: modern C++. Algorithmically, # these are the same thing, but we give them different prefixes to # make the operational distinction clear. - func: slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: slow_conv_transpose2d_out_cpu CUDA: slow_conv_transpose2d_out_cuda - func: slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: slow_conv_transpose2d_cpu CUDA: slow_conv_transpose2d_cuda - func: slow_conv_transpose2d_backward.grad_output(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] output_padding, int[2] dilation, Tensor columns, Tensor ones, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: slow_conv_transpose2d_backward_out_cpu CUDA: slow_conv_transpose2d_backward_out_cuda @@ -8512,25 +9011,22 @@ dispatch: CPU: slow_conv_transpose2d_backward_cpu CUDA: slow_conv_transpose2d_backward_cuda - func: slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: slow_conv_transpose3d_out_cpu CUDA: slow_conv_transpose3d_out_cuda - func: slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: slow_conv_transpose3d_cpu CUDA: slow_conv_transpose3d_cuda - func: slow_conv_transpose3d_backward.grad_output(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] output_padding, int[3] dilation, Tensor finput, Tensor fgrad_input, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: slow_conv_transpose3d_backward_out_cpu CUDA: slow_conv_transpose3d_backward_out_cuda @@ -8539,33 +9035,28 @@ dispatch: CPU: slow_conv_transpose3d_backward_cpu CUDA: slow_conv_transpose3d_backward_cuda - func: thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn - func: thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn - func: thnn_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output, Tensor(b!) finput, Tensor(c!) fgrad_input) -> (Tensor(a!), Tensor(b!), Tensor(c!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: slow_conv2d_forward_out_cpu CUDA: legacy::cuda::_thnn_conv2d_forward_out - func: thnn_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> (Tensor output, Tensor finput, Tensor fgrad_input) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: slow_conv2d_forward_cpu CUDA: legacy::cuda::_thnn_conv2d_forward - func: thnn_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, Tensor finput, Tensor fgrad_input, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: slow_conv2d_backward_out_cpu CUDA: slow_conv2d_backward_out_cuda @@ -8574,73 +9065,77 @@ dispatch: CPU: slow_conv2d_backward_cpu CUDA: slow_conv2d_backward_cuda - func: thnn_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn - func: thnn_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn - func: thnn_conv_depthwise2d_forward.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CUDA: legacy::cuda::_thnn_conv_depthwise2d_forward_out - func: thnn_conv_depthwise2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CUDA: legacy::cuda::_thnn_conv_depthwise2d_forward - func: thnn_conv_depthwise2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) grad_input, Tensor(b!) grad_weight) -> (Tensor(a!), Tensor(b!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CUDA: thnn_conv_depthwise2d_backward_out - func: thnn_conv_depthwise2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool[2] output_mask) -> (Tensor grad_input, Tensor grad_weight) python_module: nn dispatch: CUDA: thnn_conv_depthwise2d_backward +- func: conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, int[3] dilation) -> Tensor + python_module: nn + dispatch: + CUDA: conv_depthwise3d_cuda + +- func: conv_depthwise3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + python_module: nn + dispatch: + CUDA: conv_depthwise3d_backward_cuda_out + +- func: conv_depthwise3d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + python_module: nn + dispatch: + CUDA: conv_depthwise3d_backward_cuda + - func: slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn - func: slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn - func: slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, *, Tensor(a!) output, Tensor(b!) finput, Tensor(c!) fgrad_input) -> (Tensor(a!), Tensor(b!), Tensor(c!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: slow_conv3d_forward_out_cpu - func: slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding) -> (Tensor output, Tensor finput, Tensor fgrad_input) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: slow_conv3d_forward_cpu - func: slow_conv3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, Tensor finput, Tensor fgrad_input, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: slow_conv3d_backward_out_cpu - func: slow_conv3d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, Tensor finput, Tensor fgrad_input, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) python_module: nn dispatch: CPU: slow_conv3d_backward_cpu - func: slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: slow_conv_dilated2d_cpu CUDA: slow_conv_dilated2d_cuda @@ -8649,11 +9144,10 @@ dispatch: CPU: slow_conv_dilated2d_backward_cpu CUDA: slow_conv_dilated2d_backward_cuda - func: slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: slow_conv_dilated3d_cpu CUDA: slow_conv_dilated3d_cuda @@ -8662,11 +9156,10 @@ dispatch: CPU: slow_conv_dilated3d_backward_cpu CUDA: slow_conv_dilated3d_backward_cuda - func: col2im.out(Tensor self, int[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: col2im_out_cpu CUDA: col2im_out_cuda @@ -8675,11 +9168,10 @@ dispatch: CPU: col2im_cpu CUDA: col2im_cuda - func: col2im_backward.grad_input(Tensor grad_output, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: col2im_backward_out_cpu CUDA: col2im_backward_out_cuda @@ -8688,20 +9180,14 @@ dispatch: CPU: col2im_backward_cpu CUDA: col2im_backward_cuda - func: column_stack(Tensor[] tensors) -> Tensor - dispatch: - Math: column_stack - func: column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - dispatch: - Math: column_stack_out - func: im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: im2col_out_cpu CUDA: im2col_out_cuda @@ -8710,11 +9196,10 @@ dispatch: CPU: im2col_cpu CUDA: im2col_cuda - func: im2col_backward.grad_input(Tensor grad_output, int[2] input_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) grad_input) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: nn dispatch: CPU: im2col_backward_out_cpu CUDA: im2col_backward_out_cuda @@ -8724,14 +9209,16 @@ CPU: im2col_backward_cpu CUDA: im2col_backward_cuda - func: isfinite(Tensor self) -> Tensor variants: function, method + device_check: NoCheck device_guard: False - func: isinf(Tensor self) -> Tensor variants: function, method + device_check: NoCheck device_guard: False - func: record_stream(Tensor(a!) self, Stream s) -> () variants: method dispatch: @@ -8739,19 +9226,17 @@ - func: isposinf(Tensor self) -> Tensor variants: function, method - func: isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: isposinf_out - func: isneginf(Tensor self) -> Tensor variants: function, method - func: isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: CPU, CUDA: isneginf_out # NOTE [_add_batch_dim and _remove_batch_dim] # _add_batch_dim and _remove_batch_dim are meant to be used in the implementation @@ -8762,10 +9247,146 @@ # See NOTE [_add_batch_dim and _remove_batch_dim] - func: _remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor variants: function +## Functions related to the `torch.special` namespace +# Note [special namespace binding] +# Functions in the special python module should have their names start with +# "special_" underscore and be bound to the desired Python name in +# torch/special/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/special.h. +# The "special_" names should be hidden from the user and not documented. + +- func: special_entr(Tensor self) -> Tensor + structured_delegate: special_entr.out + python_module: special + variants: function + +- func: special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + structured: True + structured_inherits: TensorIteratorBase + python_module: special + variants: function + dispatch: + CPU, CUDA: special_entr_out + +- func: special_expm1(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_exp2(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_gammaln(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_erf(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + +- func: special_erfc(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + +- func: special_erfinv(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + +- func: special_xlog1py(Tensor self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + python_module: special + variants: function + structured_delegate: special_xlog1py.out + +- func: special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor + device_check: NoCheck # TensorIterator + python_module: special + variants: function + dispatch: + CompositeExplicitAutograd: special_xlog1py + +- func: special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor + device_check: NoCheck # TensorIterator + python_module: special + variants: function + dispatch: + CompositeExplicitAutograd: special_xlog1py + +- func: special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + structured: True + structured_inherits: TensorIteratorBase + python_module: special + variants: function + dispatch: + CPU, CUDA: special_xlog1py_out + +- func: special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + python_module: special + variants: function + dispatch: + CompositeExplicitAutograd: special_xlog1py_out + +- func: special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + device_check: NoCheck # TensorIterator + python_module: special + variants: function + dispatch: + CompositeExplicitAutograd: special_xlog1py_out + +- func: special_i0e(Tensor self) -> Tensor + python_module: special + variants: function + structured_delegate: special_i0e.out + +- func: special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + structured: True + structured_inherits: TensorIteratorBase + dispatch: + CPU, CUDA: special_i0e_out + +- func: special_logit(Tensor self, float? eps=None) -> Tensor + python_module: special + variants: function + +- func: special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + +- func: special_expit(Tensor self) -> Tensor + python_module: special + variants: function + +- func: special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: special + variants: function + ## Functions related to the fast Fourier transform and the torch.fft namespace # Note [FFT namespace binding] # Functions in the fft python module should have their names start with # "fft_" underscore and be bound to the desired Python name in # torch/fft/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/fft.h. @@ -8778,148 +9399,130 @@ - func: fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor python_module: fft variants: function - func: fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor python_module: fft variants: function - func: fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor python_module: fft variants: function - func: fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor python_module: fft variants: function - func: fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: fft variants: function - func: fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor python_module: fft @@ -8936,33 +9539,58 @@ # torch/linalg/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/linalg.h. # The "linalg_" names should be hidden from the user and not documented. # # See linalg_det as an example. -- func: linalg_cholesky(Tensor self) -> Tensor +# "_ex" stands for experimental +- func: linalg_cholesky_ex(Tensor self, *, bool check_errors=False) -> (Tensor L, Tensor info) python_module: linalg variants: function dispatch: - DefaultBackend: linalg_cholesky + CPU, CUDA: linalg_cholesky_ex -- func: linalg_cholesky.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures +- func: linalg_cholesky_ex.L(Tensor self, *, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) python_module: linalg variants: function dispatch: - DefaultBackend: linalg_cholesky_out + CPU, CUDA: linalg_cholesky_ex_out -# torch.linalg.det, alias for torch.det +- func: linalg_cholesky(Tensor self) -> Tensor + python_module: linalg + variants: function + +- func: linalg_cholesky.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + - func: linalg_det(Tensor self) -> Tensor python_module: linalg variants: function + dispatch: + CompositeExplicitAutograd: linalg_det +- func: linalg_det.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + dispatch: + CompositeExplicitAutograd: linalg_det_out + +# torch.det, alias for torch.linalg.det - func: det(Tensor self) -> Tensor variants: function, method + +- func: linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values) + python_module: linalg + variants: function dispatch: - DefaultBackend: det + CompositeExplicitAutograd: linalg_lstsq +- func: linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) + python_module: linalg + variants: function + dispatch: + CPU, CUDA: linalg_lstsq_out + - func: linalg_slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) python_module: linalg variants: function dispatch: CPU, CUDA: linalg_slogdet @@ -8970,159 +9598,187 @@ - func: linalg_slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) python_module: linalg dispatch: CPU, CUDA: linalg_slogdet_out -- func: _syevd_helper(Tensor self, bool compute_eigenvectors, str uplo) -> (Tensor, Tensor) +- func: linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors) + python_module: linalg variants: function dispatch: - CPU: _syevd_helper_cpu - CUDA: _syevd_helper_cuda + CPU, CUDA: linalg_eig +- func: linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + python_module: linalg + dispatch: + CPU, CUDA: linalg_eig_out + +- func: linalg_eigvals(Tensor self) -> Tensor + python_module: linalg + +- func: linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + - func: linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors) python_module: linalg variants: function dispatch: - DefaultBackend: linalg_eigh + CompositeExplicitAutograd: linalg_eigh - func: linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: linalg dispatch: - DefaultBackend: linalg_eigh_out + CompositeExplicitAutograd: linalg_eigh_out - func: linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor python_module: linalg variants: function - dispatch: - DefaultBackend: linalg_eigvalsh - func: linalg_eigvalsh.out(Tensor self, str UPLO='L', *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: linalg + +- func: linalg_householder_product(Tensor input, Tensor tau) -> Tensor + python_module: linalg + variants: function dispatch: - DefaultBackend: linalg_eigvalsh_out + CPU, CUDA: linalg_householder_product +- func: linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + dispatch: + CPU, CUDA: linalg_householder_product_out + - func: _linalg_inv_out_helper_(Tensor(a!) self, Tensor(b!) infos_lu, Tensor(c!) infos_getri) -> Tensor(a!) variants: function dispatch: CPU: _linalg_inv_out_helper_cpu CUDA: _linalg_inv_out_helper_cuda -- func: linalg_inv(Tensor self) -> Tensor +- func: linalg_inv_ex(Tensor self, *, bool check_errors=False) -> (Tensor inverse, Tensor info) python_module: linalg variants: function dispatch: - DefaultBackend: linalg_inv + CompositeExplicitAutograd: linalg_inv_ex -- func: linalg_inv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +- func: linalg_inv_ex.inverse(Tensor self, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) python_module: linalg - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function dispatch: - DefaultBackend: linalg_inv_out + CompositeExplicitAutograd: linalg_inv_ex_out +- func: linalg_inv(Tensor self) -> Tensor + python_module: linalg + variants: function + +- func: linalg_inv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + - func: inner(Tensor self, Tensor other) -> Tensor variants: function, method - func: inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures # torch.outer, alias for torch.ger - func: outer(Tensor self, Tensor vec2) -> Tensor variants: function, method - func: outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures - func: ger(Tensor self, Tensor vec2) -> Tensor variants: function, method dispatch: - DefaultBackend: ger + CompositeExplicitAutograd: ger - func: ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: ger_out + CompositeExplicitAutograd: ger_out - func: linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor python_module: linalg variants: function - func: linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor python_module: linalg variants: function - func: linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: linalg variants: function - func: linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: linalg variants: function -- func: linalg_svd.U(Tensor self, bool full_matrices=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures +- func: linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor python_module: linalg + variants: function + dispatch: + CPU, CUDA: linalg_vector_norm -- func: linalg_svd(Tensor self, bool full_matrices=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) +- func: linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) python_module: linalg + dispatch: + CPU, CUDA: linalg_vector_norm_out + +- func: linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + python_module: linalg + +- func: linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + +- func: linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + python_module: linalg + +- func: linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + +- func: linalg_svd.U(Tensor self, bool full_matrices=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) + python_module: linalg + +- func: linalg_svd(Tensor self, bool full_matrices=True) -> (Tensor U, Tensor S, Tensor Vh) + python_module: linalg variants: function +- func: linalg_svdvals(Tensor input) -> Tensor + python_module: linalg + variants: function + +- func: linalg_svdvals.out(Tensor input, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + - func: linalg_cond(Tensor self, Scalar? p=None) -> Tensor python_module: linalg variants: function - dispatch: - Math: linalg_cond - func: linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: linalg variants: function - dispatch: - Math: linalg_cond_out - func: linalg_cond.p_str(Tensor self, str p) -> Tensor python_module: linalg variants: function - dispatch: - Math: linalg_cond - func: linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: linalg variants: function - dispatch: - Math: linalg_cond_out - func: linalg_pinv(Tensor self, float rcond=1e-15, bool hermitian=False) -> Tensor python_module: linalg variants: function - dispatch: - Math: linalg_pinv - func: linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor python_module: linalg variants: function - dispatch: - Math: linalg_pinv - func: linalg_pinv.out(Tensor self, float rcond=1e-15, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function - dispatch: - Math: linalg_pinv_out - func: linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function - dispatch: - Math: linalg_pinv_out - func: _linalg_solve_out_helper_(Tensor(a!) self, Tensor(b!) other, Tensor(c!) infos) -> Tensor(a!) variants: function dispatch: CPU: _linalg_solve_out_helper_cpu @@ -9130,77 +9786,79 @@ - func: linalg_solve(Tensor input, Tensor other) -> Tensor python_module: linalg variants: function dispatch: - DefaultBackend: linalg_solve + CompositeExplicitAutograd: linalg_solve - func: linalg_solve.out(Tensor input, Tensor other, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures dispatch: - DefaultBackend: linalg_solve_out + CompositeExplicitAutograd: linalg_solve_out - func: linalg_tensorinv(Tensor self, int ind=2) -> Tensor - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: linalg variants: function - dispatch: - Math: linalg_tensorinv - func: linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: linalg variants: function - dispatch: - Math: linalg_tensorinv_out - func: linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor python_module: linalg variants: function - dispatch: - Math: linalg_tensorsolve - func: linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: linalg variants: function - dispatch: - Math: linalg_tensorsolve_out - func: linalg_qr(Tensor self, str mode='reduced') -> (Tensor Q, Tensor R) python_module: linalg variants: function dispatch: - DefaultBackend: linalg_qr + CompositeExplicitAutograd: linalg_qr - func: linalg_qr.out(Tensor self, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: linalg variants: function dispatch: - DefaultBackend: linalg_qr_out + CompositeExplicitAutograd: linalg_qr_out - func: _linalg_qr_helper(Tensor self, str mode) -> (Tensor, Tensor) variants: function dispatch: - CPU: _linalg_qr_helper_cpu + CPU: _linalg_qr_helper_default CUDA: _linalg_qr_helper_cuda +- func: linalg_matrix_power(Tensor self, int n) -> Tensor + python_module: linalg + +- func: linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + - func: linalg_matrix_rank(Tensor self, float? tol=None, bool hermitian=False) -> Tensor python_module: linalg variants: function - dispatch: - Math: linalg_matrix_rank - func: linalg_matrix_rank.out(Tensor self, float? tol=None, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) - use_c10_dispatcher: hacky_wrapper_for_legacy_signatures python_module: linalg variants: function - dispatch: - Math: linalg_matrix_rank_out +- func: linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor + python_module: linalg + variants: function + +- func: linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + variants: function + +- func: linalg_multi_dot(Tensor[] tensors) -> Tensor + python_module: linalg + +- func: linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + python_module: linalg + ## Functions that are only for testing # It is undocumented and should not be used outside of tests. - func: _test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor # Note: this function is only for testing. @@ -9230,6 +9888,28 @@ python_module: nn # Note: this function is only for testing. - func: _test_ambiguous_defaults.b(Tensor dummy, int a=2, str b="2") -> Tensor cpp_no_default_args: ['a', 'b'] + python_module: nn + +- func: segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor + variants: function + dispatch: + CPU, CUDA: segment_reduce_kernel + +- func: segment_reduce_backward(Tensor grad, Tensor output, Tensor data, *, Tensor? lengths=None) -> Tensor + variants: function + dispatch: + CPU, CUDA: segment_reduce_backward_kernel + +- func: pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor + python_module: nn + variants: function + +- func: flatten_dense_tensors(Tensor[] tensors) -> Tensor + variants: function + python_module: nn + +- func: unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[] + variants: function python_module: nn