codegen/native_functions.yaml in torch-rb-0.12.2 vs codegen/native_functions.yaml in torch-rb-0.13.0
- old
+ new
@@ -242,18 +242,19 @@
variants: function
dispatch:
CPU: native_dropout_cpu
CUDA: native_dropout_cuda
NestedTensorCPU, NestedTensorCUDA: native_dropout_nested
- tags: nondeterministic_seeded
+ tags: [nondeterministic_seeded, core]
autogen: native_dropout.out
- func: native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor
dispatch:
CPU, NestedTensorCPU, NestedTensorCUDA: native_dropout_backward
CUDA: native_dropout_backward_cuda
autogen: native_dropout_backward.out
+ tags: pointwise
- func: _sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)
- func: _sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)
@@ -294,10 +295,11 @@
variants: function, method
dispatch:
CompositeExplicitAutograd: abs
SparseCPU, SparseCUDA: abs_sparse
SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr
+ tags: [core, pointwise]
- func: abs_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
@@ -310,10 +312,11 @@
dispatch:
CPU, CUDA: abs_out
MPS: abs_out_mps
SparseCPU, SparseCUDA: abs_sparse_out
SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr_out
+ tags: pointwise
# Note [Adding an alias]
# To add an alias do the following:
#
# 1) Copy the original functions native_functions.yaml entry, but replace the
@@ -333,12 +336,12 @@
# 5) Update the alias_map in torch/csrc/jit/passes/normalize_ops.cpp.
# 6) Add aliases argument to existing OpInfo/UnaryUfuncInfo or create new OpInfo/UnaryUfuncInfo entry
# in op_db list in torch/testing/_internal/common_methods_invocations.py
#
# See torch.absolute, an alias for torch.abs, as an example.
-
# Absolute, alias for abs
+
- func: absolute(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: absolute_(Tensor(a!) self) -> Tensor(a!)
@@ -352,16 +355,18 @@
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: angle
SparseCsrCPU, SparseCsrCUDA: angle_sparse_csr
+ tags: pointwise
- func: angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: angle_out
SparseCsrCPU, SparseCsrCUDA: angle_sparse_csr_out
+ tags: pointwise
- func: view_as_real(Tensor(a) self) -> Tensor(a)
variants: function
dispatch:
CPU, CUDA, MPS, Meta: view_as_real
@@ -375,25 +380,28 @@
variants: function, method
structured_delegate: sgn.out
dispatch:
SparseCPU, SparseCUDA: sgn_sparse
SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr
+ tags: pointwise
- func: sgn_(Tensor(a!) self) -> Tensor(a!)
variants: method
structured_delegate: sgn.out
dispatch:
SparseCPU, SparseCUDA: sgn_sparse_
SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr_
+ tags: pointwise
- func: sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: sgn_out
SparseCPU, SparseCUDA: sgn_sparse_out
SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr_out
+ tags: pointwise
- func: chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
variants: method
- func: real(Tensor(a) self) -> Tensor(a)
@@ -420,22 +428,25 @@
SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr
autogen: _conj_physical.out
- func: conj_physical(Tensor self) -> Tensor
variants: function, method
+ tags: pointwise
- func: conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: conj_physical_out
SparseCPU, SparseCUDA: conj_physical_out_sparse
SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr_out
+ tags: pointwise
- func: conj_physical_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
dispatch:
CompositeExplicitAutograd: conj_physical_
SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr_
+ tags: pointwise
- func: resolve_conj(Tensor(a) self) -> Tensor(a)
variants: function, method
- func: resolve_neg(Tensor(a) self) -> Tensor(a)
@@ -448,23 +459,26 @@
- func: acos(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: acos.out
+ tags: [core, pointwise]
- func: acos_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: acos.out
+ tags: pointwise
- func: acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: acos_out
MPS: acos_out_mps
+ tags: pointwise
# arccos, alias of acos
- func: arccos(Tensor self) -> Tensor
variants: function, method
@@ -488,20 +502,22 @@
SparseCPU, SparseCUDA: add_sparse
SparseCsrCPU, SparseCsrCUDA: add_sparse_csr
MkldnnCPU: mkldnn_add
ZeroTensor: add_zerotensor
NestedTensorCPU, NestedTensorCUDA: NestedTensor_add_Tensor
+ tags: [core, pointwise]
- func: add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: add.out
dispatch:
SparseCPU, SparseCUDA: add_sparse_
SparseCsrCPU, SparseCsrCUDA: add_sparse_csr_
MkldnnCPU: mkldnn_add_
NestedTensorCPU, NestedTensorCUDA: NestedTensor_add__Tensor
+ tags: pointwise
- func: add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
@@ -513,10 +529,11 @@
SparseCUDA: add_out_sparse_cuda
SparseCsrCPU: add_out_sparse_csr_cpu
SparseCsrCUDA: add_out_sparse_csr_cuda
MkldnnCPU: mkldnn_add_out
MPS: add_out_mps
+ tags: pointwise
- func: _add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
variants: function
dispatch:
CPU: add_relu
@@ -546,17 +563,19 @@
- func: add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: add
+ tags: [core, pointwise]
- func: add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: add_
autogen: add.Scalar_out
+ tags: pointwise
- func: addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor
structured_delegate: addmv.out
variants: function, method
@@ -575,20 +594,22 @@
- func: addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
variants: function, method
dispatch:
CPU, CUDA: addr
+ MPS: addr_mps
CompositeExplicitAutograd: math_addr
- func: addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
variants: method
dispatch:
CompositeExplicitAutograd: addr_
- func: addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: addr_out
+ MPS: addr_out_mps
CompositeExplicitAutograd: math_addr_out
- func: affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor
variants: function
dispatch:
@@ -596,10 +617,24 @@
autogen: affine_grid_generator.out
- func: affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor
variants: function
+- func: _is_all_true(Tensor self) -> Tensor
+ variants: function, method
+ dispatch:
+ CompositeExplicitAutograd: _is_all_true
+
+- func: _is_any_true(Tensor self) -> Tensor
+ variants: function, method
+ dispatch:
+ CompositeExplicitAutograd: _is_any_true
+
+# Note: this function is only for testing.
+- func: _test_check_tensor(Tensor self) -> Tensor
+ variants: function
+
- func: all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: all.out
variants: function, method
@@ -663,10 +698,11 @@
- func: arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: arange
cpp_no_default_args: ['step']
+ tags: core
- func: arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: arange_out
@@ -686,10 +722,11 @@
- func: argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
structured_delegate: argmax.out
device_check: NoCheck # TensorIterator
variants: function, method
+ tags: core
- func: argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU, CUDA: argmax_out
@@ -697,33 +734,37 @@
- func: argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
structured_delegate: argmin.out
device_check: NoCheck # TensorIterator
variants: function, method
+ tags: core
- func: argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU, CUDA: argmin_out
MPS: argmin_out_mps
- func: acosh(Tensor self) -> Tensor
variants: function, method
structured_delegate: acosh.out
+ tags: [core, pointwise]
- func: acosh_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
structured_delegate: acosh.out
+ tags: pointwise
- func: acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: acosh_out
MPS: acosh_out_mps
-
+ tags: pointwise
# arccosh, alias for acosh
+
- func: arccosh(Tensor self) -> Tensor
variants: function, method
- func: arccosh_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
@@ -734,26 +775,29 @@
variants: function, method
structured_delegate: asinh.out
dispatch:
SparseCPU, SparseCUDA: asinh_sparse
SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr
+ tags: [core, pointwise]
- func: asinh_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
structured_delegate: asinh.out
dispatch:
SparseCPU, SparseCUDA: asinh_sparse_
SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr_
+ tags: pointwise
- func: asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: asinh_out
MPS: asinh_out_mps
SparseCPU, SparseCUDA: asinh_sparse_out
SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr_out
+ tags: pointwise
# arcsinh, alias for asinh
- func: arcsinh(Tensor self) -> Tensor
variants: function, method
@@ -766,28 +810,31 @@
structured_delegate: atanh.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: atanh_sparse
SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr
+ tags: [core, pointwise]
- func: atanh_(Tensor(a!) self) -> Tensor(a!)
structured_delegate: atanh.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: atanh_sparse_
SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr_
+ tags: pointwise
- func: atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: atanh_out
MPS: atanh_out_mps
SparseCPU, SparseCUDA: atanh_sparse_out
SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr_out
-
+ tags: pointwise
# arctanh, alias for atanh
+
- func: arctanh(Tensor self) -> Tensor
variants: function, method
- func: arctanh_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
@@ -801,45 +848,49 @@
Meta: as_strided_tensorimpl_meta_symint
MPS: as_strided_tensorimpl_mps
QuantizedCPU, QuantizedCUDA: as_strided_qtensorimpl
device_check: NoCheck
device_guard: False
+ tags: core
- func: as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
variants: function, method
device_check: NoCheck
device_guard: False
tags: inplace_view
dispatch:
- CompositeExplicitAutogradNonFunctional: as_strided_
+ CompositeExplicitAutogradNonFunctional: as_strided__symint
- func: asin(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: asin.out
dispatch:
SparseCPU, SparseCUDA: asin_sparse
SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr
+ tags: [core, pointwise]
- func: asin_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: asin.out
dispatch:
SparseCPU, SparseCUDA: asin_sparse_
SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr_
+ tags: pointwise
- func: asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: asin_out
MPS: asin_out_mps
SparseCPU, SparseCUDA: asin_sparse_out
SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr_out
+ tags: pointwise
# arcsin, alias of asin
- func: arcsin(Tensor self) -> Tensor
variants: function, method
@@ -853,28 +904,31 @@
structured_delegate: atan.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: atan_sparse
SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr
+ tags: [core, pointwise]
- func: atan_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: atan.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: atan_sparse_
SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr_
+ tags: pointwise
- func: atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: atan_out
MPS: atan_out_mps
SparseCPU, SparseCUDA: atan_sparse_out
SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr_out
+ tags: pointwise
# arctan, alias of atan
- func: arctan(Tensor self) -> Tensor
variants: function, method
@@ -979,10 +1033,12 @@
# with `bernoulli(Tensor self, *, Generator? generator=None)` declaration.
- func: bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutogradNonFunctional: bernoulli
- func: bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor
- func: binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
device_check: NoCheck # TensorIterator
@@ -1028,132 +1084,152 @@
- func: bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor
variants: function, method
dispatch:
CPU: _bincount_cpu
CUDA: _bincount_cuda
+ MPS: _bincount_mps
tags: dynamic_output_shape
autogen: bincount.out
- func: bitwise_not(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: bitwise_not.out
variants: function, method
+ tags: [core, pointwise]
- func: bitwise_not_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: bitwise_not.out
variants: method
+ tags: pointwise
- func: bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: bitwise_not_out
+ tags: pointwise
- func: copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: copysign_out
+ tags: pointwise
- func: copysign.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: copysign.out
+ tags: pointwise
- func: copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: copysign.out
- func: copysign.Scalar(Tensor self, Scalar other) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: copysign
+ tags: pointwise
- func: copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
variants: method
dispatch:
CompositeExplicitAutograd: copysign_
- func: copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: copysign_out
+ tags: pointwise
- func: logical_not(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: logical_not
+ tags: [core, pointwise]
- func: logical_not_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: logical_not_
+ tags: pointwise
- func: logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: logical_not_out
MPS: logical_not_out_mps
+ tags: pointwise
- func: logical_xor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: logical_xor
+ tags: pointwise
- func: logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: logical_xor_
+ tags: pointwise
- func: logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: logical_xor_out
MPS: logical_xor_out_mps
+ tags: pointwise
- func: logical_and(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: logical_and
+ tags: [core, pointwise]
- func: logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: logical_and_
+ tags: pointwise
- func: logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: logical_and_out
MPS: logical_and_out_mps
+ tags: pointwise
- func: logical_or(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: logical_or
+ tags: [core, pointwise]
- func: logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: logical_or_
+ tags: pointwise
- func: logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: logical_or_out
MPS: logical_or_out_mps
+ tags: pointwise
- func: blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: blackman_window
autogen: blackman_window.out
@@ -1167,11 +1243,13 @@
structured_delegate: bmm.out
variants: function, method
dispatch:
SparseCPU: bmm_sparse_cpu
SparseCUDA: bmm_sparse_cuda
- NestedTensorCPU, NestedTensorCUDA: bmm_nested
+ NestedTensorCPU: bmm_nested
+ NestedTensorCUDA: bmm_nested_cuda
+ tags: core
- func: bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
structured: True
variants: function
dispatch:
@@ -1184,12 +1262,14 @@
- func: broadcast_tensors(Tensor[] tensors) -> Tensor[]
device_check: NoCheck
device_guard: False
-- func: broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)
+- func: broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)
variants: function, method
+ dispatch:
+ CompositeImplicitAutograd: broadcast_to_symint
- func: _sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)
variants: function
dispatch:
SparseCPU, SparseCUDA: sparse_broadcast_to
@@ -1197,10 +1277,11 @@
- func: cat(Tensor[] tensors, int dim=0) -> Tensor
structured_delegate: cat.out
dispatch:
SparseCPU, SparseCUDA: cat_sparse
QuantizedCPU: cat_quantized_cpu
+ tags: core
- func: cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
structured: True
precomputed:
- dim -> int dim, int valid, bool all_contiguous, bool all_same_dtype, bool all_same_sizes_and_stride, MemoryFormat memory_format
@@ -1243,28 +1324,31 @@
structured_delegate: ceil.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: ceil_sparse
SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr
+ tags: pointwise
- func: ceil_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: ceil.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: ceil_sparse_
SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr_
+ tags: pointwise
- func: ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: ceil_out
MPS: ceil_out_mps
SparseCPU, SparseCUDA: ceil_sparse_out
SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr_out
+ tags: pointwise
# alias for torch.linalg.multi_dot
- func: chain_matmul(Tensor[] matrices) -> Tensor
variants: function
@@ -1278,16 +1362,23 @@
- func: chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]
variants: function, method
device_check: NoCheck
device_guard: False
+ dispatch:
+ CompositeImplicitAutograd: chunk
+ NestedTensorCPU, NestedTensorCUDA: chunk_nested_tensor
-- func: tensor_split.sections(Tensor(a -> *) self, int sections, int dim=0) -> Tensor(a)[]
+- func: tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]
variants: function, method
+ dispatch:
+ CompositeImplicitAutograd: tensor_split_sections_symint
-- func: tensor_split.indices(Tensor(a -> *) self, int[] indices, int dim=0) -> Tensor(a)[]
+- func: tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]
variants: function, method
+ dispatch:
+ CompositeImplicitAutograd: tensor_split_indices_symint
- func: tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]
variants: function, method
- func: clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
@@ -1295,127 +1386,150 @@
variants: function, method
cpp_no_default_args: ['min']
structured_delegate: clamp.out
dispatch:
QuantizedCPU: clamp_quantized_cpu
+ tags: [core, pointwise]
- func: clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
variants: function, method
structured_delegate: clamp.Tensor_out
+ tags: pointwise
- func: clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
cpp_no_default_args: ['min']
structured_delegate: clamp.out
+ tags: pointwise
- func: clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
variants: function, method
structured_delegate: clamp.Tensor_out
+ tags: pointwise
- func: clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
cpp_no_default_args: ['min']
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: clamp_out
MPS: clamp_out_mps
+ tags: pointwise
- func: clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: clamp_Tensor_out
MPS: clamp_Tensor_out_mps
+ tags: pointwise
- func: clamp_max(Tensor self, Scalar max) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: clamp_max.out
+ tags: pointwise
- func: clamp_max.Tensor(Tensor self, Tensor max) -> Tensor
variants: function, method
structured_delegate: clamp_max.Tensor_out
+ tags: pointwise
- func: clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: clamp_max.out
+ tags: pointwise
- func: clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)
variants: function, method
structured_delegate: clamp_max.Tensor_out
+ tags: pointwise
- func: clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: clamp_max_out
MPS: clamp_max_out_mps
+ tags: pointwise
- func: clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: clamp_max_Tensor_out
MPS: clamp_max_Tensor_out_mps
+ tags: pointwise
- func: clamp_min(Tensor self, Scalar min) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: clamp_min.out
+ tags: pointwise
- func: clamp_min.Tensor(Tensor self, Tensor min) -> Tensor
variants: function, method
structured_delegate: clamp_min.Tensor_out
+ tags: pointwise
- func: clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: clamp_min.out
+ tags: pointwise
- func: clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)
variants: function, method
structured_delegate: clamp_min.Tensor_out
+ tags: pointwise
- func: clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: clamp_min_out
MPS: clamp_min_out_mps
+ tags: pointwise
- func: clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: clamp_min_Tensor_out
MPS: clamp_min_Tensor_out_mps
+ tags: pointwise
# clip is an alias for clamp
- func: clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
cpp_no_default_args: ['min']
variants: function, method
+ tags: pointwise
- func: clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
variants: function, method
+ tags: pointwise
- func: clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
cpp_no_default_args: ['min']
variants: function, method
+ tags: pointwise
- func: clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
variants: function, method
+ tags: pointwise
- func: clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
cpp_no_default_args: ['min']
+ tags: pointwise
- func: clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
- func: cudnn_is_acceptable(Tensor self) -> bool
device_check: NoCheck
@@ -1437,30 +1551,33 @@
- func: polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: polar_out
-- func: constant_pad_nd(Tensor self, int[] pad, Scalar value=0) -> Tensor
+- func: constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: constant_pad_nd
MPS: constant_pad_nd_mps
autogen: constant_pad_nd.out
+ tags: core
- func: contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)
variants: method
manual_cpp_binding: True
-- func: convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor
+- func: convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor
dispatch:
CompositeExplicitAutograd: convolution
autogen: convolution.out
+ tags: core
-- func: convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
+- func: convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
dispatch:
CompositeExplicitAutograd, CUDA: convolution_backward
autogen: convolution_backward.out
+ tags: core
- func: convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor
dispatch:
CompositeExplicitAutograd: convolution_overrideable
autogen: convolution_overrideable.out
@@ -1468,20 +1585,20 @@
- func: convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
dispatch:
CompositeExplicitAutograd: convolution_backward_overrideable
autogen: convolution_backward_overrideable.out
-- func: _convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor
+- func: _convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor
dispatch:
CompositeExplicitAutograd: _convolution
autogen: _convolution.out
- func: _convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor
- func: _convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor
-- func: _convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
+- func: _convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
- func: conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor
- func: conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor
@@ -1510,20 +1627,23 @@
- func: conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor
- func: copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
variants: function
+ dispatch:
+ CompositeExplicitAutogradNonFunctional: copy
- func: copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
dispatch:
MkldnnCPU: copy_mkldnn_
SparseCPU, SparseCUDA: copy_sparse_wrapper_
CompositeExplicitAutograd: copy_
SparseCsrCPU, SparseCsrCUDA: copy_sparse_compressed_
+ NestedTensorCPU, NestedTensorCUDA: copy_nested_
autogen: copy.out
- func: _copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor
dispatch:
MPS: _copy_from_mps
@@ -1538,41 +1658,47 @@
- func: cos(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: cos.out
+ tags: [core, pointwise]
- func: cos_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: cos.out
+ tags: pointwise
- func: cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: cos_out
MPS: cos_out_mps
+ tags: pointwise
- func: cosh(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: cosh.out
+ tags: [core, pointwise]
- func: cosh_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: cosh.out
+ tags: pointwise
- func: cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: cosh_out
MPS: cosh_out_mps
+ tags: pointwise
- func: cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
- func: count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor
variants: function, method
@@ -1752,10 +1878,11 @@
- func: cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
structured: True
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: cumsum_out
+ MPS: cumsum_out_mps
- func: cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
@@ -1777,14 +1904,17 @@
- func: _ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
dispatch:
CPU: ctc_loss_cpu
CUDA: ctc_loss_gpu
autogen: _ctc_loss.out
+ tags: dynamic_output_shape # the shape of second output is data dependent
- func: _ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
dispatch:
CPU, CUDA: ctc_loss_tensor
+ autogen: _ctc_loss.Tensor_out
+ tags: dynamic_output_shape # the shape of second output is data dependent
- func: _ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
dispatch:
CPU: ctc_loss_backward_cpu
CUDA: ctc_loss_backward_gpu
@@ -1795,11 +1925,11 @@
CPU, CUDA: ctc_loss_backward_tensor
- func: diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor
variants: function, method
dispatch:
- CompositeExplicitAutograd: diag_embed
+ CompositeExplicitAutogradNonFunctional: diag_embed
autogen: diag_embed.out
- func: diagflat(Tensor self, int offset=0) -> Tensor
variants: function, method
@@ -1858,74 +1988,86 @@
variants: function, method
structured_delegate: div.out
dispatch:
SparseCPU, SparseCUDA: div_sparse
ZeroTensor: div_zerotensor
+ NestedTensorCPU, NestedTensorCUDA: NestedTensor_div_Tensor
+ tags: [core, pointwise]
- func: div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: div.out
dispatch:
SparseCPU, SparseCUDA: div_sparse_
+ tags: pointwise
- func: div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: div_out
MPS: div_out_mps
SparseCPU, SparseCUDA: div_out_sparse_zerodim
+ tags: pointwise
- func: div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: div.out_mode
dispatch:
SparseCPU, SparseCUDA: div_sparse
+ tags: pointwise
- func: div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: div.out_mode
dispatch:
SparseCPU, SparseCUDA: div_sparse_
+ tags: pointwise
- func: div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: div_out_mode
MPS: div_out_mode_mps
SparseCPU, SparseCUDA: div_out_sparse_zerodim
+ tags: pointwise
# For C++ only, until we have conversion from C++ numbers to Tensor
- func: div.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: div
+ NestedTensorCPU, NestedTensorCUDA: NestedTensor_div_Scalar
+ tags: [core, pointwise]
- func: div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: div_
autogen: div.Scalar_out
+ tags: pointwise
- func: div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: div
+ tags: pointwise
- func: div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
variants: method
dispatch:
CompositeExplicitAutograd: div_
autogen: div.Scalar_mode_out
+ tags: pointwise
# divide, alias for div
- func: divide.Tensor(Tensor self, Tensor other) -> Tensor
variants: function, method
@@ -1956,10 +2098,11 @@
# true_divide, an alias for div
- func: true_divide.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
+ tags: pointwise
- func: true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
@@ -1995,26 +2138,27 @@
dispatch:
CompositeExplicitAutograd: vdot_out
- func: einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor
-- func: embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor
+- func: embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor
dispatch:
- CompositeExplicitAutograd: embedding
+ CompositeExplicitAutograd: embedding_symint
NestedTensorCPU, NestedTensorCUDA: NestedTensor_embedding
autogen: embedding.out
-- func: embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, int padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor
+- func: embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor
dispatch:
CompositeImplicitAutograd: embedding_backward_symint
-- func: embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor
+- func: embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor
dispatch:
CPU: embedding_dense_backward_cpu
CUDA: embedding_dense_backward_cuda
MPS: embedding_dense_backward_mps
autogen: embedding_dense_backward.out
+ tags: core
- func: embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)
dispatch:
CPU: embedding_renorm_cpu_
CUDA: embedding_renorm_cuda_
@@ -2057,15 +2201,19 @@
dispatch:
CPU: _embedding_bag_cpu
CUDA: _embedding_bag_cuda
autogen: _embedding_bag.out
-- func: _embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
+- func: _embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
+ dispatch:
+ CompositeImplicitAutograd: _embedding_bag_backward_symint
-- func: _embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
+- func: _embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
+ dispatch:
+ CompositeImplicitAutograd: _embedding_bag_sparse_backward_symint
-- func: _embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
+- func: _embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
dispatch:
CPU: _embedding_bag_dense_backward_cpu
CUDA: _embedding_bag_dense_backward_cuda
autogen: _embedding_bag_dense_backward.out
@@ -2152,11 +2300,12 @@
variants: method
device_check: NoCheck
device_guard: False
tags: inplace_view
dispatch:
- CPU, Meta: resize_
+ Meta: resize__symint
+ CPU: resize_
CUDA: resize_cuda_
MPS: resize_mps_
QuantizedCPU: quantized_resize_cpu_
SparseCsrCPU, SparseCsrCUDA: resize_sparse_csr_
autogen: resize, resize.out
@@ -2188,128 +2337,147 @@
dispatch:
CompositeExplicitAutograd: empty_like
QuantizedCPU, QuantizedCUDA: empty_like_quantized
SparseCPU, SparseCUDA, SparseMeta: empty_like_sparse_coo
SparseCsrCPU, SparseCsrCUDA: empty_like_sparse_csr
+ NestedTensorCPU, NestedTensorCUDA: empty_like_nested
autogen: empty_like.out
- func: empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CPU: empty_strided_cpu
CUDA: empty_strided_cuda
MPS: empty_strided_mps
Meta: empty_strided_meta_symint
QuantizedCPU, QuantizedCUDA: empty_strided_unknown_quantized
autogen: empty_strided.out
+ tags: core
- func: erf(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: erf.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: erf_sparse
SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr
+ tags: [core, pointwise]
- func: erf_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: erf.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: erf_sparse_
SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr_
+ tags: pointwise
- func: erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: erf_out
MPS: erf_out_mps
SparseCPU, SparseCUDA: erf_sparse_out
SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr_out
+ tags: pointwise
- func: erfc(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: erfc.out
variants: function, method
+ tags: pointwise
- func: erfc_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: erfc.out
variants: function, method
+ tags: pointwise
- func: erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: erfc_out
+ tags: pointwise
- func: exp(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: exp.out
variants: function, method
+ tags: [core, pointwise]
- func: exp_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: exp.out
variants: function, method
+ tags: pointwise
- func: exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: exp_out
MPS: exp_out_mps
+ tags: pointwise
- func: exp2(Tensor self) -> Tensor
structured_delegate: exp2.out
variants: function, method
+ tags: pointwise
- func: exp2_(Tensor(a!) self) -> Tensor(a!)
structured_delegate: exp2.out
variants: function, method
+ tags: pointwise
- func: exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: exp2_out
MPS: exp2_out_mps
+ tags: pointwise
- func: expm1(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: expm1.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: expm1_sparse
SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr
+ tags: pointwise
- func: expm1_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: expm1.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: expm1_sparse_
SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr_
+ tags: pointwise
- func: expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: expm1_out
+ MPS: expm1_out_mps
SparseCPU, SparseCUDA: expm1_sparse_out
SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr_out
+ tags: pointwise
- func: expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)
variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: expand
+ tags: core
- func: expand_as(Tensor(a) self, Tensor other) -> Tensor(a)
variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
device_check: NoCheck
device_guard: False
@@ -2355,10 +2523,11 @@
- func: fill.Scalar(Tensor self, Scalar value) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: fill
+ tags: core
- func: fill.Tensor(Tensor self, Tensor value) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: fill
@@ -2370,66 +2539,74 @@
CPU, CUDA: fill_
MPS: fill_scalar_mps
QuantizedCPU, QuantizedCUDA: fill_quantized_
Meta: fill_meta_
SparseCsrCPU, SparseCsrCUDA: fill_sparse_csr_
+ NestedTensorCPU, NestedTensorCUDA: fill_nested_
autogen: fill.Scalar_out
- func: fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: fill_
MPS: fill_tensor_mps_
QuantizedCPU, QuantizedCUDA: fill_quantized_
Meta: fill_meta_
+ NestedTensorCPU, NestedTensorCUDA: fill_nested_
autogen: fill.Tensor_out
- func: floor(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: floor.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: floor_sparse
SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr
+ tags: [core, pointwise]
- func: floor_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: floor.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: floor_sparse_
SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr_
+ tags: pointwise
- func: floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: floor_out
MPS: floor_out_mps
SparseCPU, SparseCUDA: floor_sparse_out
SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr_out
+ tags: pointwise
- func: floor_divide(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: floor_divide
+ MPS: floor_divide_mps
SparseCPU, SparseCUDA: floor_divide_sparse
- func: floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CPU, CUDA: floor_divide_
+ MPS: floor_divide_mps_
SparseCPU, SparseCUDA: floor_divide_sparse_
- func: floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: floor_divide_out
+ MPS: floor_divide_out_mps
SparseCPU, SparseCUDA: floor_divide_out_sparse_zerodim
- func: floor_divide.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
@@ -2440,22 +2617,34 @@
- func: frac(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: frac.out
variants: function, method
+ dispatch:
+ SparseCPU, SparseCUDA: frac_sparse
+ SparseCsrCPU, SparseCsrCUDA: frac_sparse_csr
+ tags: pointwise
- func: frac_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: frac.out
variants: function, method
+ dispatch:
+ SparseCPU, SparseCUDA: frac_sparse_
+ SparseCsrCPU, SparseCsrCUDA: frac_sparse_csr_
+ tags: pointwise
- func: frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: frac_out
+ MPS: frac_out_mps
+ SparseCPU, SparseCUDA: frac_sparse_out
+ SparseCsrCPU, SparseCsrCUDA: frac_sparse_csr_out
+ tags: pointwise
- func: full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
device_check: NoCheck
device_guard: False
dispatch:
@@ -2463,10 +2652,11 @@
autogen: full.names_out
- func: full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: full
+ tags: core
- func: full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: full_out
@@ -2485,28 +2675,32 @@
- func: gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: gcd_out
+ tags: pointwise
- func: gcd(Tensor self, Tensor other) -> Tensor
structured_delegate: gcd.out
variants: function, method
+ tags: pointwise
- func: gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: gcd.out
variants: function, method
- func: lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: lcm_out
+ tags: pointwise
- func: lcm(Tensor self, Tensor other) -> Tensor
structured_delegate: lcm.out
variants: function, method
+ tags: pointwise
- func: lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: lcm.out
variants: function, method
@@ -2531,11 +2725,13 @@
- func: grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
dispatch:
CPU, QuantizedCPU: grid_sampler_2d_cpu
CUDA: grid_sampler_2d_cuda
+ MPS: grid_sampler_2d_mps
autogen: grid_sampler_2d.out
+ tags: core
# `grid_sampler_2d_backward` takes in `output_mask` to optimize performance for
# the case where `input` doesn't require gradient. Gradient for `grid` is always
# computed (only `output_mask[0]` is checked by the implementations).
- func: grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
@@ -2619,15 +2815,17 @@
- func: native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)
dispatch:
CPU, CUDA: native_group_norm
CompositeExplicitAutograd: math_group_norm
autogen: native_group_norm.out
+ tags: core
- func: native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
dispatch:
CPU, CUDA: native_group_norm_backward
autogen: native_group_norm_backward.out
+ tags: core
# Real to complex forward FFT
- func: _fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor
variants: function
dispatch:
@@ -2652,17 +2850,17 @@
dispatch:
CPU: _fft_c2r_mkl_out
CUDA: _fft_c2r_cufft_out
# Standard complex to complex FFT (forward or backward)
-- func: _fft_c2c(Tensor self, int[] dim, int normalization, bool forward) -> Tensor
+- func: _fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor
variants: function
dispatch:
CPU: _fft_c2c_mkl
CUDA: _fft_c2c_cufft
-- func: _fft_c2c.out(Tensor self, int[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)
+- func: _fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
CPU: _fft_c2c_mkl_out
CUDA: _fft_c2c_cufft_out
@@ -2794,10 +2992,11 @@
dispatch:
CPU, CUDA, MPS: isnan
SparseCPU, SparseCUDA: isnan_sparse
SparseCsrCPU, SparseCsrCUDA: isnan_sparse_csr
autogen: isnan.out
+ tags: [core, pointwise]
- func: is_distributed(Tensor self) -> bool
variants: function, method
device_check: NoCheck
device_guard: False
@@ -2877,43 +3076,52 @@
- func: kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
variants: function, method
- func: kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
-- func: layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
+- func: layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
+ dispatch:
+ CompositeImplicitAutograd: layer_norm_symint
- func: native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)
dispatch:
CPU: layer_norm_cpu
CUDA: layer_norm_cuda
MPS: layer_norm_mps
CompositeExplicitAutograd: math_native_layer_norm
+ NestedTensorCPU, NestedTensorCUDA: nested_layer_norm
autogen: native_layer_norm.out
+ tags: core
- func: native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
dispatch:
CPU: layer_norm_backward_cpu
CUDA: layer_norm_backward_cuda
MPS: layer_norm_backward_mps
autogen: native_layer_norm_backward.out
+ tags: core
- func: nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: nan_to_num
SparseCPU, SparseCUDA: nan_to_num_sparse
+ tags: pointwise
- func: nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)
variants: function, method
dispatch:
CompositeExplicitAutograd: nan_to_num_
SparseCPU, SparseCUDA: nan_to_num_sparse_
+ tags: pointwise
- func: nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: nan_to_num_out
+ MPS: nan_to_num_out_mps
SparseCPU, SparseCUDA: nan_to_num_sparse_out
+ tags: pointwise
- func: linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor
python_module: nn
dispatch:
CompositeImplicitAutograd: linear
@@ -2971,12 +3179,14 @@
- func: ldexp.Tensor(Tensor self, Tensor other) -> Tensor
variants: function, method
- func: ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)
variants: function, method
+ tags: pointwise
- func: ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
+ tags: pointwise
- func: linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: linspace
@@ -2988,130 +3198,150 @@
- func: log(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: log.out
variants: function, method
+ tags: [core, pointwise]
- func: log_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: log.out
variants: function, method
+ tags: pointwise
- func: log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: log_out
MPS: log_out_mps
+ tags: pointwise
- func: log10(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: log10.out
variants: function, method
+ tags: pointwise
- func: log10_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: log10.out
variants: function, method
+ tags: pointwise
- func: log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: log10_out
MPS: log10_out_mps
+ tags: pointwise
- func: log1p(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: log1p.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: log1p_sparse
SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr
+ tags: pointwise
- func: log1p_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: log1p.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: log1p_sparse_
SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr_
+ tags: pointwise
- func: log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: log1p_out
MPS: log1p_out_mps
SparseCPU, SparseCUDA: log1p_sparse_out
SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr_out
+ tags: pointwise
- func: log2(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: log2.out
variants: function, method
+ tags: pointwise
- func: log2_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: log2.out
variants: function, method
+ tags: pointwise
- func: log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: log2_out
MPS: log2_out_mps
+ tags: pointwise
- func: logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: logaddexp_out
MPS: logaddexp_out_mps
+ tags: pointwise
- func: logaddexp(Tensor self, Tensor other) -> Tensor
variants: method, function
structured_delegate: logaddexp.out
+ tags: pointwise
- func: logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: logaddexp2_out
MPS: logaddexp2_out_mps
+ tags: pointwise
- func: logaddexp2(Tensor self, Tensor other) -> Tensor
variants: method, function
structured_delegate: logaddexp2.out
+ tags: pointwise
- func: xlogy.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: xlogy.OutTensor
variants: function, method
+ tags: pointwise
- func: xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: xlogy
+ tags: pointwise
- func: xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: xlogy
+ tags: pointwise
# xlogy: inplace variant
- func: xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: xlogy.OutTensor
+ tags: pointwise
- func: xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
@@ -3123,22 +3353,25 @@
structured: True
structured_inherits: TensorIteratorBase
variants: function
dispatch:
CPU, CUDA: xlogy_out
+ tags: pointwise
- func: xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: xlogy_out
+ tags: pointwise
- func: xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: xlogy_out
+ tags: pointwise
- func: logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: logspace
@@ -3159,10 +3392,11 @@
- func: log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
variants: function, method
- func: _log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
structured_delegate: _log_softmax.out
+ tags: core
- func: _log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU: log_softmax_cpu_out
@@ -3289,10 +3523,11 @@
device_check: NoCheck # TensorIterator
structured_delegate: max.dim_max
variants: function, method
dispatch:
QuantizedCPU, QuantizedCUDA: qmax
+ tags: core
- func: max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
device_check: NoCheck # TensorIterator
structured: True
precomputed:
@@ -3306,18 +3541,21 @@
variants: function, method
- func: max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
device_check: NoCheck # TensorIterator
-- func: value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, int[] sizes, bool keepdim) -> Tensor
+- func: value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
+ dispatch:
+ CompositeImplicitAutograd: value_selecting_reduction_backward_symint
- func: amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
variants: function, method
structured_delegate: amax.out
+ tags: core
- func: amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU, CUDA: amax_out
@@ -3327,23 +3565,18 @@
- func: max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
- func: max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
- func: max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
-
-# TODO: Add this function to MPS dispatch key so that we avoid declaring it in
-# native_functions.yaml
-# https://github.com/pytorch/pytorch/issues/77394
-- func: _mps_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
- MPS: _mps_max_pool2d
- autogen: _mps_max_pool2d.out
+ CompositeImplicitAutograd: max_pool2d
+ MPS: mps_max_pool2d
-- func: mps_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
+- func: max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
MPS: mps_max_pool2d_backward
- autogen: mps_max_pool2d_backward.out
+ autogen: max_pool2d_backward.out
- func: mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
MkldnnCPU: mkldnn_max_pool2d
autogen: mkldnn_max_pool2d.out
@@ -3395,10 +3628,11 @@
structured_delegate: mean.out
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
QuantizedCPU: mean_quantized_cpu
+ tags: core
- func: mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
structured: True
device_check: NoCheck # TensorIterator
dispatch:
@@ -3423,10 +3657,11 @@
- func: median(Tensor self) -> Tensor
variants: function, method
dispatch:
CPU: median_cpu
CUDA: median_cuda
+ MPS: median_mps
autogen: median.out
- func: median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
variants: function, method
dispatch:
@@ -3434,10 +3669,11 @@
- func: median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
dispatch:
CPU: median_out_cpu
CUDA: median_out_cuda
+ MPS: median_out_mps
- func: median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
variants: function, method
- func: median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
@@ -3468,10 +3704,11 @@
device_check: NoCheck # TensorIterator
structured_delegate: min.dim_min
variants: function, method
dispatch:
QuantizedCPU, QuantizedCUDA: qmin
+ tags: core
- func: min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
device_check: NoCheck # TensorIterator
structured: True
precomputed:
@@ -3488,10 +3725,11 @@
device_check: NoCheck # TensorIterator
- func: amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
variants: function, method
structured_delegate: amin.out
+ tags: core
- func: amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU, CUDA: amin_out
@@ -3508,36 +3746,46 @@
- func: mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
dispatch:
MPS: mps_convolution_backward
autogen: mps_convolution_backward.out
-- func: mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor
+- func: mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor
dispatch:
CompositeExplicitAutograd: mkldnn_convolution
autogen: mkldnn_convolution.out
+- func: mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor)
+ dispatch:
+ CPU: mkldnn_rnn_layer
+ autogen: mkldnn_rnn_layer.out
+
+- func: mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
+ dispatch:
+ CPU: mkldnn_rnn_layer_backward
+ autogen: mkldnn_rnn_layer_backward.out
+
- func: miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)
dispatch:
CUDA: miopen_batch_norm
autogen: miopen_batch_norm.out
- func: miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)
dispatch:
CUDA: miopen_batch_norm_backward
autogen: miopen_batch_norm_backward.out
-- func: miopen_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
+- func: miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
dispatch:
CUDA: miopen_convolution
autogen: miopen_convolution.out
-- func: miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
+- func: miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
dispatch:
CUDA: miopen_convolution_transpose
autogen: miopen_convolution_transpose.out
-- func: miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
+- func: miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
dispatch:
CUDA: miopen_depthwise_convolution
autogen: miopen_depthwise_convolution.out
- func: miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
@@ -3562,10 +3810,11 @@
structured_delegate: mm.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: _sparse_mm
SparseCsrCPU, SparseCsrCUDA: _sparse_csr_mm
+ tags: core
- func: mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU: mm_out_cpu
@@ -3575,22 +3824,19 @@
SparseCsrCPU, SparseCsrCUDA: _sparse_csr_mm_out
- func: _sparse_mm(Tensor sparse, Tensor dense) -> Tensor
python_module: sparse
+- func: _sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor
+ python_module: sparse
+
- func: _sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor
dispatch:
SparseCPU: sparse_sparse_matmul_cpu
SparseCUDA: sparse_sparse_matmul_cuda
autogen: _sparse_sparse_matmul.out
-- func: _sparse_mask_helper(Tensor t, Tensor mask_indices) -> Tensor
- dispatch:
- SparseCPU: sparse_mask_helper_cpu
- SparseCUDA: sparse_mask_helper_cuda
- autogen: _sparse_mask_helper.out
-
- func: mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
variants: function, method
dispatch:
CPU, CUDA: mode
@@ -3611,20 +3857,22 @@
SparseCPU, SparseCUDA: mul_sparse
SparseCsrCPU, SparseCsrCUDA: mul_sparse_csr
MkldnnCPU: mkldnn_mul
ZeroTensor: mul_zerotensor
NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul_Tensor
+ tags: [core, pointwise]
- func: mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: mul.out
variants: method
dispatch:
SparseCPU, SparseCUDA: mul_sparse_
SparseCsrCPU, SparseCsrCUDA: mul_sparse_csr_
MkldnnCPU: mkldnn_mul_
NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul__Tensor
+ tags: pointwise
- func: mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
@@ -3633,30 +3881,33 @@
MPS: mul_out_mps
SparseCPU: mul_out_sparse_cpu
SparseCUDA: mul_out_sparse_cuda
SparseCsrCPU, SparseCsrCUDA: mul_out_sparse_csr
MkldnnCPU: mkldnn_mul_out
-
+ tags: pointwise
# For C++ only, until we have conversion from C++ numbers to Tensor
+
- func: mul.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: mul
SparseCsrCPU, SparseCsrCUDA: mul_scalar_sparse_csr
NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul_Scalar
+ tags: [core, pointwise]
- func: mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: mul_
SparseCsrCPU, SparseCsrCUDA: mul__scalar_sparse_csr
NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul__Scalar
autogen: mul.Scalar_out
-
+ tags: pointwise
# multiply, alias for mul
+
- func: multiply.Tensor(Tensor self, Tensor other) -> Tensor
variants: function, method
- func: multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
variants: method
@@ -3680,57 +3931,95 @@
CompositeExplicitAutograd: mv_out
- func: mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: mvlgamma_out
+ tags: pointwise
- func: mvlgamma(Tensor self, int p) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: mvlgamma
+ tags: pointwise
- func: mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: mvlgamma_
+ tags: pointwise
- func: narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
variants: function, method
dispatch:
CPU: narrow_copy_dense_cpu
SparseCPU, SparseCUDA: narrow_copy_sparse
- CompositeExplicitAutogradNonFunctional: narrow_copy_dense
+ CompositeExplicitAutogradNonFunctional: narrow_copy_dense_symint
tags: view_copy
- func: narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: narrow_copy_dense_cpu_out
-- func: narrow(Tensor(a) self, int dim, int start, int length) -> Tensor(a)
+- func: narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
+ dispatch:
+ CompositeImplicitAutograd: narrow_symint
-- func: narrow.Tensor(Tensor(a) self, int dim, Tensor start, int length) -> Tensor(a)
+- func: narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
+ dispatch:
+ CompositeImplicitAutograd: narrow_tensor_symint
- func: native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
dispatch:
CPU: batch_norm_cpu
CUDA: batch_norm_cuda
MPS: batch_norm_mps
MkldnnCPU: mkldnn_batch_norm
+ tags: core
- func: native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
dispatch:
CUDA: batch_norm_cuda_out
MPS: batch_norm_mps_out
+ CPU: batch_norm_cpu_out
+# TODO: In 2 weeks, we should make native_batch_norm composite implicit so that this correct schema percolates correctly through our dispatching
+- func: _native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
+ dispatch:
+ CPU: _batch_norm_legit_cpu
+ CUDA: _batch_norm_legit_cuda
+ MPS: _batch_norm_legit_mps
+ MkldnnCPU: _mkldnn_batch_norm_legit
+ autogen: _native_batch_norm_legit_functional
+
+- func: _native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))
+ dispatch:
+ CPU: _batch_norm_legit_cpu_out
+ CUDA: _batch_norm_legit_cuda_out
+ MPS: _batch_norm_legit_mps_out
+
+- func: _native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
+ dispatch:
+ CPU: _batch_norm_legit_no_stats_cpu
+ CUDA: _batch_norm_legit_no_stats_cuda
+ MPS: _batch_norm_legit_no_stats_mps
+ MkldnnCPU: _mkldnn_batch_norm_legit_no_stats
+ tags: core
+
+- func: _native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
+ dispatch:
+ CPU: _batch_norm_legit_no_stats_cpu_out
+ CUDA: _batch_norm_legit_no_stats_cuda_out
+ MPS: _batch_norm_legit_no_stats_mps_out
+
- func: batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)
dispatch:
CUDA: batch_norm_stats_cuda
autogen: batch_norm_stats.out
@@ -3779,11 +4068,11 @@
- func: is_vulkan_available() -> bool
- func: _nnpack_available() -> bool
-- func: _nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, int[2] padding, int[2] stride=1) -> Tensor
+- func: _nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: _nnpack_spatial_convolution
autogen: _nnpack_spatial_convolution.out
@@ -3805,10 +4094,11 @@
- func: ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
dispatch:
# NB: Although this composite mutates on the inside, it is
# non-differentiable so NonFunctional doesn't apply
CompositeExplicitAutograd: ones_like
+ NestedTensorCPU, NestedTensorCUDA: ones_like
autogen: ones_like.out
- func: pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor
- func: cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor
@@ -3819,10 +4109,11 @@
autogen: _euclidean_dist.out
- func: _cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor
dispatch:
CPU, CUDA: _cdist_forward
+ MPS: _cdist_forward_mps
autogen: _cdist_forward.out
- func: _cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor
dispatch:
CPU, CUDA: _cdist_backward
@@ -3847,10 +4138,11 @@
variants: function, method
dispatch:
CompositeExplicitAutograd: permute
MPS: permute_mps
SparseCPU, SparseCUDA: permute_sparse_coo
+ tags: core
- func: movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
variants: function, method
- func: movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)
@@ -3938,120 +4230,133 @@
- func: rad2deg(Tensor self) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: rad2deg
+ SparseCPU, SparseCUDA: rad2deg_sparse
SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr
- func: rad2deg_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
dispatch:
CompositeExplicitAutograd: rad2deg_
+ SparseCPU, SparseCUDA: rad2deg_sparse_
SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr_
- func: rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: rad2deg_out
+ SparseCPU, SparseCUDA: rad2deg_sparse_out
SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr_out
- func: deg2rad(Tensor self) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: deg2rad
+ SparseCPU, SparseCUDA: deg2rad_sparse
+ SparseCsrCPU, SparseCsrCUDA: deg2rad_sparse_csr
+ tags: pointwise
- func: deg2rad_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
dispatch:
CompositeExplicitAutograd: deg2rad_
+ SparseCPU, SparseCUDA: deg2rad_sparse_
+ SparseCsrCPU, SparseCsrCUDA: deg2rad_sparse_csr_
+ tags: pointwise
- func: deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: deg2rad_out
+ SparseCPU, SparseCUDA: deg2rad_sparse_out
+ SparseCsrCPU, SparseCsrCUDA: deg2rad_sparse_csr_out
+ tags: pointwise
- func: scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: scalar_tensor
autogen: scalar_tensor.out
+ tags: core
-- func: rand.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: rand
autogen: rand.names_out
tags: nondeterministic_seeded
-- func: rand.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
device_check: NoCheck
device_guard: False
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: rand
autogen: rand.generator_with_names_out
-- func: rand(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: rand
-- func: rand.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: rand
-- func: rand.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
+- func: rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: rand_out
-- func: rand.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
+- func: rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
- func: rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
# NB: Although this composite mutates on the inside, it is
# non-differentiable so NonFunctional doesn't apply
CompositeExplicitAutograd: rand_like
autogen: rand_like.out
-- func: randint(int high, int[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint
-- func: randint.generator(int high, int[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint
-- func: randint.low(int low, int high, int[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint
-- func: randint.low_generator(int low, int high, int[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint
-- func: randint.out(int high, int[] size, *, Tensor(a!) out) -> Tensor(a!)
+- func: randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint_out
-- func: randint.generator_out(int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
+- func: randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint_out
-- func: randint.low_out(int low, int high, int[] size, *, Tensor(a!) out) -> Tensor(a!)
+- func: randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint_out
-- func: randint.low_generator_out(int low, int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
+- func: randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint_out
- func: randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
@@ -4068,40 +4373,40 @@
# NB: Although this composite mutates on the inside, it is
# non-differentiable so NonFunctional doesn't apply
CompositeExplicitAutograd: randint_like
autogen: randint_like.low_dtype_out
-- func: randn(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randn
-- func: randn.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randn
-- func: randn.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: randn
autogen: randn.names_out
-- func: randn.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: randn
autogen: randn.generator_with_names_out
-- func: randn.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
+- func: randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
-- func: randn.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
+- func: randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
- func: randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
@@ -4128,10 +4433,11 @@
- func: randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CPU: randperm_out_cpu
CUDA: randperm_out_cuda
+ MPS: randperm_out_mps
- func: range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: range
@@ -4145,60 +4451,69 @@
- func: range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, Meta: range_out
CUDA: range_cuda_out
+ MPS: range_mps_out
cpp_no_default_args: ['step']
- func: ravel(Tensor(a) self) -> Tensor(a)
variants: function, method
- func: reciprocal(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: reciprocal.out
variants: function, method
+ tags: [core, pointwise]
- func: reciprocal_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: reciprocal.out
variants: function, method
+ tags: pointwise
- func: reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: reciprocal_out
MPS: reciprocal_out_mps
+ tags: pointwise
- func: neg(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: neg.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: neg_sparse
SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr
+ NestedTensorCPU, NestedTensorCUDA: NestedTensor_neg
+ tags: [core, pointwise]
- func: neg_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: neg.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: neg_sparse_
SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr_
+ NestedTensorCPU, NestedTensorCUDA: NestedTensor_neg_
+ tags: pointwise
- func: neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: neg_out
MPS: neg_out_mps
SparseCPU, SparseCUDA: neg_out_sparse
SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr_out
-
+ tags: pointwise
# Alias for neg
+
- func: negative(Tensor self) -> Tensor
variants: function, method
- func: negative_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
@@ -4209,33 +4524,42 @@
variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
dispatch:
CompositeExplicitAutograd: repeat
MPS: repeat_mps
autogen: repeat.out
+ tags: core
- func: repeat_interleave.Tensor(Tensor repeats, *, int? output_size=None) -> Tensor
variants: function
dispatch:
CPU: repeat_interleave_cpu
CUDA: repeat_interleave_cuda
+ MPS: repeat_interleave_mps
tags: dynamic_output_shape
autogen: repeat_interleave.Tensor_out
- func: repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor
variants: function, method
-- func: repeat_interleave.self_int(Tensor self, int repeats, int? dim=None, *, int? output_size=None) -> Tensor
+- func: repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor
variants: function, method
+ dispatch:
+ CompositeImplicitAutograd: repeat_interleave_symint
- func: reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeImplicitAutograd: reshape_symint
CompositeImplicitAutogradNestedTensor: reshape_nested
+- func: _reshape_copy(Tensor self, SymInt[] size) -> Tensor
+ variants: function
+ dispatch:
+ CompositeExplicitAutograd: _reshape_copy_symint
+
# NOTE [ _reshape_alias ] is meant to be used in the implementation of reshape.
# They are not user-facing, hence the leading underscore. Please don't use it
# anywhere else.
- func: _reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)
variants: function, method
@@ -4265,18 +4589,20 @@
structured_delegate: round.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: round_sparse
SparseCsrCPU, SparseCsrCUDA: round_sparse_csr
+ tags: pointwise
- func: round_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: round.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: round_sparse_
SparseCsrCPU, SparseCsrCUDA: round_sparse_csr_
+ tags: pointwise
- func: round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
@@ -4284,28 +4610,32 @@
CPU: round_out
CUDA: round_out
MPS: round_out_mps
SparseCPU, SparseCUDA: round_sparse_out
SparseCsrCPU, SparseCsrCUDA: round_sparse_csr_out
+ tags: pointwise
- func: round.decimals(Tensor self, *, int decimals) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: round.decimals_out
variants: function, method
+ tags: pointwise
- func: round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: round.decimals_out
variants: function, method
+ tags: pointwise
- func: round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU: round_decimals_out
CUDA: round_decimals_out
+ tags: pointwise
- func: rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
device_check: NoCheck # TensorIterator
tags: nondeterministic_seeded
@@ -4321,10 +4651,13 @@
MPS: relu_mps
MkldnnCPU: mkldnn_relu
QuantizedCPU: relu_quantized_cpu
QuantizedCUDA: relu_quantized_cuda
NestedTensorCPU, NestedTensorCUDA: NestedTensor_relu
+ SparseCPU, SparseCUDA: relu_sparse
+ SparseCsrCPU, SparseCsrCUDA: relu_sparse_csr
+ tags: [core, pointwise]
- func: relu_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
@@ -4332,36 +4665,37 @@
MPS: relu_mps_
MkldnnCPU: mkldnn_relu_
QuantizedCPU: relu_quantized_cpu_
QuantizedCUDA: relu_quantized_cuda_
NestedTensorCPU, NestedTensorCUDA: NestedTensor_relu_
+ SparseCPU, SparseCUDA: relu_sparse_
+ SparseCsrCPU, SparseCsrCUDA: relu_sparse_csr_
autogen: relu.out
+ tags: pointwise
- func: relu6(Tensor self) -> Tensor
python_module: nn
- func: relu6_(Tensor(a!) self) -> Tensor(a!)
python_module: nn
- func: prelu(Tensor self, Tensor weight) -> Tensor
variants: function, method
+ autogen: prelu.out
+
+- func: _prelu_kernel(Tensor self, Tensor weight) -> Tensor
dispatch:
+ CPU, CUDA: _prelu_kernel
+ QuantizedCPU: _prelu_kernel_quantized_cpu
MkldnnCPU: mkldnn_prelu
- CPU: prelu_cpu
- CUDA: prelu_cuda
MPS: prelu_mps
- QuantizedCPU: prelu_quantized_cpu
- autogen: prelu.out
-- func: prelu_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)
- variants: function, method
+- func: _prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)
dispatch:
+ CPU, CUDA: _prelu_kernel_backward
MkldnnCPU: mkldnn_prelu_backward
- CPU: prelu_backward_cpu
- CUDA: prelu_backward_cuda
MPS: prelu_backward_mps
- autogen: prelu_backward.out
- func: gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
@@ -4385,10 +4719,11 @@
dispatch:
MkldnnCPU: mkldnn_gelu
QuantizedCPU: gelu_quantized_cpu
QuantizedCUDA: gelu_quantized_cuda
NestedTensorCPU, NestedTensorCUDA: NestedTensor_gelu
+ tags: [core, pointwise]
- func: gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
@@ -4400,10 +4735,12 @@
- func: gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor
structured_delegate: gelu_backward.grad_input
python_module: nn
dispatch:
MkldnnCPU: mkldnn_gelu_backward
+ NestedTensorCPU, NestedTensorCUDA: gelu_backwards_nested
+ tags: pointwise
- func: infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor
variants: function
python_module: nn
device_check: NoCheck
@@ -4433,52 +4770,56 @@
- func: rsqrt(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: rsqrt.out
variants: function, method
+ tags: [core, pointwise]
- func: rsqrt_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: rsqrt.out
variants: function, method
+ tags: pointwise
- func: rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: rsqrt_out
MPS: rsqrt_out_mps
+ tags: pointwise
- func: select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
-- func: select.int(Tensor(a) self, int dim, int index) -> Tensor(a)
+- func: select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
- CompositeExplicitAutograd: select
+ CompositeExplicitAutograd: select_symint
SparseCsrCPU, SparseCsrCUDA: select_sparse_csr
NestedTensorCPU, NestedTensorCUDA: select_nested
+ tags: core
-- func: select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, int index) -> Tensor
+- func: select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
dispatch:
- CompositeExplicitAutogradNonFunctional: select_backward
+ CompositeExplicitAutogradNonFunctional: select_backward_symint
autogen: select_backward.out
-- func: _nested_select_backward(Tensor grad_output, Tensor self, int dim, int index) -> Tensor
+- func: _nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
dispatch:
- NestedTensorCPU, NestedTensorCUDA: _nested_select_backward
+ NestedTensorCPU, NestedTensorCUDA: _nested_select_backward_symint
- func: selu(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
- func: selu_(Tensor(a!) self) -> Tensor(a!)
@@ -4551,95 +4892,109 @@
structured_delegate: sigmoid.out
variants: function, method
dispatch:
QuantizedCPU: sigmoid_quantized_cpu
MkldnnCPU: mkldnn_sigmoid
+ tags: [core, pointwise]
- func: sigmoid_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: sigmoid.out
variants: function, method
dispatch:
MkldnnCPU: mkldnn_sigmoid_
+ tags: pointwise
- func: sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: sigmoid_out
MPS: sigmoid_out_mps
+ tags: pointwise
- func: logit(Tensor self, float? eps=None) -> Tensor
variants: function, method
dispatch:
CPU, CUDA: logit
+ tags: pointwise
- func: logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)
variants: function, method
dispatch:
CPU, CUDA: logit_
+ tags: pointwise
- func: logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: logit_out
+ tags: pointwise
- func: sin(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: sin.out
variants: function, method
dispatch:
SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr
SparseCPU, SparseCUDA: sin_sparse
+ tags: [core, pointwise]
- func: sin_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: sin.out
variants: function, method
dispatch:
SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr_
SparseCPU, SparseCUDA: sin_sparse_
+ tags: pointwise
- func: sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: sin_out
MPS: sin_out_mps
SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr_out
SparseCPU, SparseCUDA: sin_sparse_out
+ tags: pointwise
- func: sinc(Tensor self) -> Tensor
structured_delegate: sinc.out
variants: function, method
+ tags: pointwise
- func: sinc_(Tensor(a!) self) -> Tensor(a!)
structured_delegate: sinc.out
variants: function, method
+ tags: pointwise
- func: sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: sinc_out
+ tags: pointwise
- func: sinh(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: sinh.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: sinh_sparse
SparseCsrCPU, SparseCsrCUDA: sinh_sparse_csr
+ tags: [core, pointwise]
- func: sinh_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: sinh.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: sinh_sparse_
SparseCsrCPU, SparseCsrCUDA: sinh_sparse_csr_
+ tags: pointwise
- func: sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
@@ -4658,10 +5013,11 @@
# those metadata changes to the detached tensor will not update the original tensor
# anymore, and in the `detach()` function we need to set `allow_tensor_metadata_change_`
# to false to make such changes explicitly illegal, in order to prevent users from
# changing metadata of the detached tensor and expecting the original tensor to also
# be updated.
+ tags: pointwise
- func: detach(Tensor(a) self) -> Tensor(a)
variants: function, method
dispatch:
CompositeExplicitAutograd: detach
NestedTensorCPU, NestedTensorCUDA: detach
@@ -4690,10 +5046,12 @@
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: slice
+ tags: core
+
# NOTE: The implementation of split_with_sizes bypasses the dispatcher to call this; undo
# that if adding specific implementations here!
- func: slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor
variants: function
@@ -4708,17 +5066,18 @@
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: slice_scatter
autogen: slice_scatter.out
+ tags: core
-- func: select_scatter(Tensor self, Tensor src, int dim, int index) -> Tensor
+- func: select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
- CompositeExplicitAutograd: select_scatter
+ CompositeExplicitAutograd: select_scatter_symint
autogen: select_scatter.out
- func: diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor
variants: function, method
device_check: NoCheck
@@ -4753,10 +5112,11 @@
- func: _softmax(Tensor self, int dim, bool half_to_float) -> Tensor
structured_delegate: _softmax.out
dispatch:
MkldnnCPU: mkldnn_softmax
NestedTensorCPU, NestedTensorCUDA: softmax_nested
+ tags: core
- func: _softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU: softmax_cpu_out
@@ -4773,38 +5133,40 @@
dispatch:
CPU: softmax_backward_cpu_out
CUDA: softmax_backward_cuda_out
MPS: softmax_backward_mps_out
-- func: unsafe_split.Tensor(Tensor self, int split_size, int dim=0) -> Tensor[]
+- func: unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: unsafe_split
autogen: unsafe_split.Tensor_out
-- func: split.Tensor(Tensor(a -> *) self, int split_size, int dim=0) -> Tensor(a)[]
+- func: split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: split
-- func: split.sizes(Tensor(a -> *) self, int[] split_size, int dim=0) -> Tensor(a)[]
+- func: split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]
variants: function, method
device_guard: False
+ dispatch:
+ CompositeImplicitAutograd: split_symint
-- func: unsafe_split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> Tensor[]
+- func: unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: unsafe_split_with_sizes
autogen: unsafe_split_with_sizes.out
-- func: split_with_sizes(Tensor(a -> *) self, int[] split_sizes, int dim=0) -> Tensor(a)[]
+- func: split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: split_with_sizes
@@ -4832,24 +5194,38 @@
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: squeeze
QuantizedCPU, QuantizedCUDA: squeeze_quantized
+ NestedTensorCPU, NestedTensorCUDA: squeeze_nested
- func: squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: squeeze
QuantizedCPU, QuantizedCUDA: squeeze_quantized
+ NestedTensorCPU, NestedTensorCUDA: squeeze_dim_nested
+ tags: core
- func: squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
+
+- func: squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)
+ variants: function, method
+ device_check: NoCheck
+ device_guard: False
+ dispatch:
+ CompositeExplicitAutograd: squeeze
+ QuantizedCPU, QuantizedCUDA: squeeze_quantized
+ NestedTensorCPU, NestedTensorCUDA: squeeze_dim_nested
+ tags: core
+
- func: squeeze_(Tensor(a!) self) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
tags: inplace_view
@@ -4862,10 +5238,18 @@
device_guard: False
tags: inplace_view
dispatch:
CompositeExplicitAutograd: squeeze_
+- func: squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)
+ variants: method
+ device_check: NoCheck
+ device_guard: False
+ tags: inplace_view
+ dispatch:
+ CompositeExplicitAutograd: squeeze_
+
- func: squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
tags: inplace_view
@@ -4935,19 +5319,22 @@
- func: sum(Tensor self, *, ScalarType? dtype=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: sum
+ SparseCPU, SparseCUDA: sum_coo
SparseCsrCPU, SparseCsrCUDA: sum_csr
autogen: sum.out
- func: sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
structured_delegate: sum.IntList_out
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
NestedTensorCPU: NestedTensor_sum_dim_CPU
+ SparseCPU, SparseCUDA: sum_sparse_coo
+ tags: core
- func: sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
@@ -4968,14 +5355,16 @@
- func: nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
variants: function, method
dispatch:
CPU, CUDA: nansum
+ MPS: nansum_mps
- func: nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: nansum_out
+ MPS: nansum_out_mps
- func: sum_to_size(Tensor self, int[] size) -> Tensor
variants: method
device_check: NoCheck
device_guard: False
@@ -4985,99 +5374,113 @@
structured_delegate: sqrt.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: sqrt_sparse
SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr
+ tags: [core, pointwise]
- func: sqrt_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: sqrt.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: sqrt_sparse_
SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr_
+ tags: pointwise
- func: sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: sqrt_out
MPS: sqrt_out_mps
SparseCPU, SparseCUDA: sqrt_sparse_out
SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr_out
+ tags: pointwise
- func: square(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
+ tags: pointwise
- func: square_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
+ tags: pointwise
- func: square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+ tags: pointwise
- func: std(Tensor self, bool unbiased=True) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
+ cpp_no_default_args: ["unbiased"]
- func: std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
+ cpp_no_default_args: ["unbiased"]
-- func: std.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> Tensor
+- func: std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: std
MPS: std_mps
QuantizedCPU: std_quantized_cpu
- func: std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
+ cpp_no_default_args: ["unbiased"]
- func: std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
+ cpp_no_default_args: ["unbiased"]
-- func: std_mean.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor)
+- func: std_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CPU, CUDA: std_mean
autogen: std_mean.correction_out
- func: std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
+ cpp_no_default_args: ["unbiased"]
-- func: std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor)
+- func: std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
- func: std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
+ cpp_no_default_args: ["unbiased"]
-- func: std.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
+- func: std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: std_out
QuantizedCPU: std_out_quantized_cpu
- func: std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
+ cpp_no_default_args: ["unbiased"]
- func: std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
+ cpp_no_default_args: ["unbiased"]
-- func: std.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> Tensor
+- func: std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
-- func: std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
+- func: std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
- func: prod(Tensor self, *, ScalarType? dtype=None) -> Tensor
device_check: NoCheck # TensorIterator
@@ -5126,57 +5529,65 @@
structured_delegate: tan.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: tan_sparse
SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr
+ tags: pointwise
- func: tan_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: tan.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: tan_sparse_
SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr_
+ tags: pointwise
- func: tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: tan_out
MPS: tan_out_mps
SparseCPU, SparseCUDA: tan_sparse_out
SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr_out
+ tags: pointwise
- func: tanh(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: tanh.out
variants: function, method
dispatch:
QuantizedCPU: tanh_quantized_cpu
MkldnnCPU: mkldnn_tanh
SparseCPU, SparseCUDA: tanh_sparse
SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr
+ NestedTensorCPU, NestedTensorCUDA: NestedTensor_tanh
+ tags: [core, pointwise]
- func: tanh_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: tanh.out
variants: function, method
dispatch:
MkldnnCPU: mkldnn_tanh_
SparseCPU, SparseCUDA: tanh_sparse_
SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr_
+ NestedTensorCPU, NestedTensorCUDA: NestedTensor_tanh_
+ tags: pointwise
- func: tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: tanh_out
MPS: tanh_out_mps
SparseCPU, SparseCUDA: tanh_sparse_out
SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr_out
+ tags: pointwise
- func: tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor
variants: function
- func: tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)
@@ -5209,16 +5620,22 @@
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: threshold_backward_out
MPS: threshold_backward_out_mps
+ SparseCPU, SparseCUDA: threshold_backward_sparse_out
+ SparseCsrCPU, SparseCsrCUDA: threshold_backward_sparse_compressed_out
- func: threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor
variants: function
structured_delegate: threshold_backward.grad_input
dispatch:
MkldnnCPU: mkldnn_relu_backward
+ SparseCPU, SparseCUDA: threshold_backward_sparse
+ SparseCsrCPU, SparseCsrCUDA: threshold_backward_sparse_compressed
+ NestedTensorCPU, NestedTensorCUDA: threshold_backwards_nested
+ tags: pointwise
- func: tile(Tensor self, int[] dims) -> Tensor
variants: function, method
- func: transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
@@ -5264,10 +5681,11 @@
variants: function, method
dispatch:
CPU, QuantizedCPU, CUDA, QuantizedCUDA: flip
MPS: flip_mps
autogen: flip.out
+ tags: core
- func: fliplr(Tensor self) -> Tensor
variants: function, method
- func: flipud(Tensor self) -> Tensor
@@ -5373,30 +5791,33 @@
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
SparseCPU, SparseCUDA: trunc_sparse
SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr
+ tags: pointwise
- func: trunc_(Tensor(a!) self) -> Tensor(a!)
structured_delegate: trunc.out
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
SparseCPU, SparseCUDA: trunc_sparse_
SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr_
+ tags: pointwise
- func: trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: trunc_out
MPS: trunc_out_mps
SparseCPU, SparseCUDA: trunc_sparse_out
SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr_out
-
+ tags: pointwise
# Alias for trunc
+
- func: fix(Tensor self) -> Tensor
variants: function, method
- func: fix_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
@@ -5427,18 +5848,20 @@
- func: unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)
variants: function
dispatch:
CPU: unique_consecutive_cpu
CUDA: unique_consecutive_cuda
+ MPS: unique_consecutive_mps
tags: dynamic_output_shape
autogen: unique_consecutive.out
- func: unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
variants: function
dispatch:
CPU: unique_dim_consecutive_cpu
CUDA: unique_dim_consecutive_cuda
+ MPS: unique_dim_consecutive_mps
tags: dynamic_output_shape
autogen: unique_dim_consecutive.out
# _unique and _unique_dim are fragile and modifying them easily cause internal break
# the below operator is a temporary hack for adding return_counts support
@@ -5447,10 +5870,11 @@
- func: _unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
variants: function
dispatch:
CPU: _unique2_cpu
CUDA: _unique2_cuda
+ MPS: _unique2_mps
tags: dynamic_output_shape
autogen: _unique2.out
- func: _unsafe_view(Tensor self, SymInt[] size) -> Tensor
dispatch:
@@ -5463,10 +5887,12 @@
device_guard: False
dispatch:
CompositeExplicitAutograd: unsqueeze
SparseCPU, SparseCUDA: unsqueeze_sparse
QuantizedCPU, QuantizedCUDA: unsqueeze_quantized
+ NestedTensorCPU, NestedTensorCUDA: unsqueeze_nested
+ tags: core
- func: unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
@@ -5477,65 +5903,74 @@
- func: vander(Tensor x, int? N=None, bool increasing=False) -> Tensor
- func: var(Tensor self, bool unbiased=True) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
+ cpp_no_default_args: ["unbiased"]
- func: var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
+ tags: core
+ cpp_no_default_args: ["unbiased"]
-- func: var.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> Tensor
+- func: var.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: var
MPS: var_mps
- func: var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
+ cpp_no_default_args: ["unbiased"]
-- func: var.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
+- func: var.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: var_out
- func: var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
+ cpp_no_default_args: ["unbiased"]
- func: var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
+ cpp_no_default_args: ["unbiased"]
-- func: var.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> Tensor
+- func: var.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
-- func: var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
+- func: var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
- func: var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
+ cpp_no_default_args: ["unbiased"]
- func: var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
+ cpp_no_default_args: ["unbiased"]
-- func: var_mean.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor)
+- func: var_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CPU, CUDA: var_mean
autogen: var_mean.correction_out
- func: var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
+ cpp_no_default_args: ["unbiased"]
-- func: var_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor)
+- func: var_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
- func: view_as(Tensor(a) self, Tensor other) -> Tensor(a)
variants: method
@@ -5546,10 +5981,11 @@
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: where
MPS: where_mps
+ tags: [core, pointwise]
- func: where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: where_self_out
@@ -5557,11 +5993,11 @@
- func: where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor
variants: function
- func: where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor
- variants: function
+ variants: function, method
- func: where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor
variants: function
- func: where(Tensor condition) -> Tensor[]
@@ -5602,10 +6038,11 @@
- func: _efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CPU: _efficientzerotensor
CUDA: _efficientzerotensor_cuda
+ Meta: _efficientzerotensor_meta
autogen: _efficientzerotensor.out
- func: zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: zeros_symint
@@ -5789,10 +6226,11 @@
- func: norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
structured: True
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: norm_dtype_out
+ MPS: norm_dtype_out_mps
- func: norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
structured: True
device_check: NoCheck # TensorIterator
dispatch:
@@ -5816,20 +6254,18 @@
- func: frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)
variants: method, function
dispatch:
CompositeExplicitAutograd: frexp
+ tags: pointwise
- func: frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)
dispatch:
CPU, CUDA: frexp_out
+ tags: pointwise
# Deprecated (v.1.12)
-- func: frobenius_norm(Tensor self) -> Tensor
- variants: function
-
-# Deprecated (v.1.12)
- func: frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
variants: function
# Deprecated (v.1.12)
- func: frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
@@ -5859,13 +6295,15 @@
SparseCsrCPU, SparseCsrCUDA: clone_sparse_compressed
MkldnnCPU: mkldnn_clone
QuantizedCPU, QuantizedCUDA: quantized_clone
NestedTensorCPU, NestedTensorCUDA: clone_nested
autogen: clone.out
+ tags: core
- func: positive(Tensor(a) self) -> Tensor(a)
variants: function, method
+ tags: pointwise
- func: resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
variants: function, method
dispatch:
@@ -5898,41 +6336,46 @@
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: sub_out
MPS: sub_out_mps
SparseCPU, SparseCUDA: sub_out_sparse
+ tags: pointwise
- func: sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: sub.out
dispatch:
SparseCPU, SparseCUDA: sub_sparse
ZeroTensor: sub_zerotensor
+ tags: [core, pointwise]
- func: sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: sub.out
dispatch:
SparseCPU, SparseCUDA: sub_sparse_
-
+ tags: pointwise
# For C++ only, until we have conversion from C++ numbers to Tensor
+
- func: sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: sub
+ tags: [core, pointwise]
- func: sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: sub_
autogen: sub.Scalar_out
-
+ tags: pointwise
# subtract, alias for sub
+
- func: subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- func: subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
variants: function, method
@@ -5957,15 +6400,17 @@
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: heaviside_out
+ tags: pointwise
- func: heaviside(Tensor self, Tensor values) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: heaviside.out
+ tags: pointwise
- func: heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: heaviside.out
@@ -5978,10 +6423,11 @@
CompositeExplicitAutograd: rsub
autogen: rsub.Scalar_out
# Functionally the same as addmm, but we give it a different derivative formula
# that doesn't propagate gradients to non-present entries on sparse.
+ tags: pointwise
- func: _sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
python_module: sparse
dispatch:
CompositeExplicitAutograd: _sparse_addmm
autogen: _sparse_addmm.out
@@ -5996,10 +6442,20 @@
python_module: sparse
dispatch:
SparseCsrCUDA: sparse_sampled_addmm_sparse_csr_cuda
SparseCsrCPU: sparse_sampled_addmm_sparse_csr_cpu
+- func: _sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)
+ python_module: sparse
+ dispatch:
+ SparseCsrCPU: _sparse_mm_reduce_impl_sparse_csr_cpu
+
+- func: _sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)
+ python_module: sparse
+ dispatch:
+ SparseCsrCPU: _sparse_mm_reduce_impl_backward_sparse_csr_cpu
+
- func: addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU: addmm_out_cpu
CUDA: addmm_out_cuda
@@ -6014,10 +6470,11 @@
variants: function, method
dispatch:
SparseCPU: addmm_sparse_dense_cpu
SparseCUDA: addmm_sparse_dense_cuda
SparseCsrCPU, SparseCsrCUDA: addmm_sparse_compressed_dense
+ tags: core
- func: addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
structured_delegate: addmm.out
variants: method
dispatch:
@@ -6172,11 +6629,13 @@
- func: sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
-- func: _sparse_coo_tensor_unsafe(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: _sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeImplicitAutograd: _sparse_coo_tensor_unsafe_symint
- func: _validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size) -> ()
- func: _validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()
- func: _validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
@@ -6187,13 +6646,13 @@
- func: _sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
dispatch:
SparseCPU, SparseCUDA, SparseMeta, Meta: new_with_dims_sparse
autogen: _sparse_coo_tensor_with_dims.out
-- func: _sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, int[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
+- func: _sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
dispatch:
- SparseCPU, SparseCUDA, SparseMeta, Meta: new_with_dims_and_tensor_sparse
+ SparseCPU, SparseCUDA, SparseMeta, Meta: new_with_dims_and_tensor_sparse_symint
autogen: _sparse_coo_tensor_with_dims_and_tensors.out
- func: sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
variants: method
@@ -6209,12 +6668,11 @@
autogen: sparse_resize_and_clear, sparse_resize_and_clear.out
- func: sparse_mask(Tensor self, Tensor mask) -> Tensor
variants: method
dispatch:
- SparseCPU: sparse_mask_cpu
- SparseCUDA: sparse_mask_cuda
+ SparseCPU, SparseCUDA: sparse_mask
SparseCsrCPU, SparseCsrCUDA: sparse_mask_sparse_csr
autogen: sparse_mask.out
- func: _to_cpu(Tensor[] tensors) -> Tensor[]
variants: function
@@ -6234,10 +6692,11 @@
- func: to_dense_backward(Tensor grad, Tensor input) -> Tensor
- func: sparse_dim(Tensor self) -> int
variants: method
dispatch:
+ CPU, CUDA: sparse_dim_strided
SparseCPU, SparseCUDA, SparseMeta: sparse_dim_sparse
SparseCsrCPU, SparseCsrCUDA: sparse_dim_sparse_csr
device_check: NoCheck
device_guard: False
@@ -6250,10 +6709,11 @@
device_guard: False
- func: dense_dim(Tensor self) -> int
variants: method
dispatch:
+ CPU, CUDA: dense_dim_strided
SparseCPU, SparseCUDA, SparseMeta: dense_dim_sparse
SparseCsrCPU, SparseCsrCUDA: dense_dim_sparse_csr
device_check: NoCheck
device_guard: False
@@ -6289,10 +6749,11 @@
- func: is_coalesced(Tensor self) -> bool
variants: method
dispatch:
SparseCPU, SparseCUDA, SparseMeta: is_coalesced_sparse
+ CompositeExplicitAutograd: is_coalesced_default
device_check: NoCheck
device_guard: False
- func: _indices(Tensor(a) self) -> Tensor(a)
variants: method
@@ -6321,47 +6782,53 @@
- func: indices(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
SparseCPU, SparseCUDA, SparseMeta: indices_sparse
+ CompositeExplicitAutograd: indices_default
device_check: NoCheck
device_guard: False
- func: values(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
SparseCPU, SparseCUDA, SparseMeta: values_sparse
SparseCsrCPU, SparseCsrCUDA: values_sparse_csr
NestedTensorCPU, NestedTensorCUDA: values_nested
+ CompositeExplicitAutograd: values_default
device_check: NoCheck
device_guard: False
- func: crow_indices(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
SparseCsrCPU, SparseCsrCUDA: crow_indices_sparse_csr
+ CompositeExplicitAutograd: crow_indices_default
device_check: NoCheck
device_guard: False
- func: col_indices(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
SparseCsrCPU, SparseCsrCUDA: col_indices_sparse_csr
+ CompositeExplicitAutograd: col_indices_default
device_check: NoCheck
device_guard: False
- func: ccol_indices(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
SparseCsrCPU, SparseCsrCUDA: ccol_indices_sparse_csr
+ CompositeExplicitAutograd: ccol_indices_default
device_check: NoCheck
device_guard: False
- func: row_indices(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
SparseCsrCPU, SparseCsrCUDA: row_indices_sparse_csr
+ CompositeExplicitAutograd: row_indices_default
device_check: NoCheck
device_guard: False
- func: hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
@@ -6392,45 +6859,47 @@
- func: to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse
+ SparseCPU, SparseCUDA: sparse_coo_to_sparse
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse
autogen: to_sparse.sparse_dim_out
-- func: to_sparse(Tensor self) -> Tensor
+- func: to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse
+ SparseCPU, SparseCUDA: sparse_coo_to_sparse
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse
autogen: to_sparse.out
-- func: to_sparse_csr(Tensor self) -> Tensor
+- func: to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse_csr
SparseCPU, SparseCUDA: coo_to_sparse_csr
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_csr
autogen: to_sparse_csr.out
-- func: to_sparse_csc(Tensor self) -> Tensor
+- func: to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse_csc
SparseCPU, SparseCUDA: coo_to_sparse_csc
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_csc
autogen: to_sparse_csc.out
-- func: to_sparse_bsr(Tensor self, int[2] blocksize) -> Tensor
+- func: to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse_bsr
SparseCPU, SparseCUDA: coo_to_sparse_bsr
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_bsr
autogen: to_sparse_bsr.out
-- func: to_sparse_bsc(Tensor self, int[2] blocksize) -> Tensor
+- func: to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse_bsc
SparseCPU, SparseCUDA: coo_to_sparse_bsc
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_bsc
@@ -6440,11 +6909,11 @@
variants: method
dispatch:
CPU: dense_to_mkldnn
autogen: to_mkldnn.out
-- func: mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1) -> Tensor
+- func: mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None) -> Tensor
variants: function
python_module: nn
dispatch:
MkldnnCPU: mkldnn_reorder_conv2d_weight
autogen: mkldnn_reorder_conv2d_weight.out
@@ -6640,11 +7109,13 @@
- func: _to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: _to_copy
+ NestedTensorCPU, NestedTensorCUDA: _to_copy_nested
autogen: _to_copy.out
+ tags: core
# to(Device) must not exist because all constructors of Device also works for
# TensorOptions. Otherwise, an ambiguity error is thrown.
# See NOTE [ TensorOptions Constructors ].
- func: to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
@@ -6710,16 +7181,16 @@
MPS: _local_scalar_dense_mps
variants: function
# MPS LSTM implementation
-- func: _lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
+- func: _lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
dispatch:
MPS: _lstm_mps
autogen: _lstm_mps.out
-- func: lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])
+- func: lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])
dispatch:
MPS: lstm_mps_backward
autogen: lstm_mps_backward.out
@@ -6808,11 +7279,13 @@
- func: _pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)
dispatch:
CompositeExplicitAutograd: _pack_padded_sequence
autogen: _pack_padded_sequence.out
-- func: _pack_padded_sequence_backward(Tensor grad, int[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor
+- func: _pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor
+ dispatch:
+ CompositeImplicitAutograd: _pack_padded_sequence_backward_symint
- func: _pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)
# wrappers for legacy TH methods
@@ -6881,11 +7354,11 @@
# Like lift, but it clones the input.
- func: lift_fresh_copy(Tensor self) -> Tensor
tags: view_copy
dispatch:
- CompositeExplicitAutograd: lift_fresh_copy
+ CompositeExplicitAutogradNonFunctional: lift_fresh_copy
autogen: lift_fresh_copy.out
- func: is_set_to(Tensor self, Tensor tensor) -> bool
variants: method
device_check: NoCheck
@@ -6907,10 +7380,11 @@
- func: masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: masked_fill
+ tags: pointwise
- func: masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
@@ -6957,10 +7431,11 @@
device_guard: False
dispatch:
ZeroTensor, Meta, CPU, CUDA, QuantizedCPU, QuantizedCUDA, MPS: view
MkldnnCPU: mkldnn_view
NestedTensorCPU, NestedTensorCUDA: view_nested
+ tags: core
# Warning: If you want to change the name or overload name of this
# operator, you might also want to change the `isBlockListedSchema`
# function in `torch/csrc/jit/frontend/schema_catching.cpp`.
# The name and overload name of this operator is hardcoded in that
@@ -6974,11 +7449,11 @@
CompositeExplicitAutograd: view_dtype
- func: put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)
variants: method
dispatch:
- CPU, CUDA, MPS: put_
+ CPU, CUDA: put_
autogen: put.out
- func: put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor
variants: function, method
dispatch:
@@ -7132,10 +7607,11 @@
variants: function, method
- func: scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
structured_delegate: scatter_add.out
variants: function, method
+ tags: core
- func: scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
structured_delegate: scatter_add.out
variants: method
@@ -7150,10 +7626,11 @@
variants: function, method
- func: scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor
structured_delegate: scatter_reduce.two_out
variants: function, method
+ tags: core
- func: scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)
structured_delegate: scatter_reduce.two_out
variants: method
@@ -7178,43 +7655,50 @@
structured: True
structured_inherits: TensorIteratorBase
variants: function
dispatch:
CPU, CUDA: bitwise_and_out
+ tags: pointwise
- func: bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_and_out
+ tags: pointwise
- func: bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CompositeExplicitAutograd: bitwise_and
+ tags: pointwise
- func: bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_and
autogen: bitwise_and.Scalar_Tensor_out
+ tags: pointwise
- func: bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
structured_delegate: bitwise_and.Tensor_out
+ tags: [core, pointwise]
- func: bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
+ tags: pointwise
- func: bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: bitwise_and.Tensor_out
+ tags: pointwise
- func: __and__.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
@@ -7235,41 +7719,48 @@
structured: True
structured_inherits: TensorIteratorBase
variants: function
dispatch:
CPU, CUDA: bitwise_or_out
+ tags: pointwise
- func: bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_or_out
+ tags: pointwise
- func: bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
+ tags: pointwise
- func: bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_or
autogen: bitwise_or.Scalar_Tensor_out
+ tags: pointwise
- func: bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
structured_delegate: bitwise_or.Tensor_out
+ tags: [core, pointwise]
- func: bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
+ tags: pointwise
- func: bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: bitwise_or.Tensor_out
+ tags: pointwise
- func: __or__.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
@@ -7290,137 +7781,161 @@
structured: True
structured_inherits: TensorIteratorBase
variants: function
dispatch:
CPU, CUDA: bitwise_xor_out
+ tags: pointwise
- func: bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_xor_out
+ tags: pointwise
- func: bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
+ tags: pointwise
- func: bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_xor
autogen: bitwise_xor.Scalar_Tensor_out
+ tags: pointwise
- func: bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
structured_delegate: bitwise_xor.Tensor_out
+ tags: [core, pointwise]
- func: bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
+ tags: pointwise
- func: bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: bitwise_xor.Tensor_out
+ tags: pointwise
- func: __xor__.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
+ tags: pointwise
- func: __xor__.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
+ tags: pointwise
- func: __ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
+ tags: pointwise
- func: __ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
+ tags: pointwise
- func: __lshift__.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CPU, CUDA: __lshift__
+ tags: pointwise
- func: __lshift__.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CPU, CUDA: __lshift__
+ tags: pointwise
- func: __ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CPU, CUDA: __ilshift__
autogen: __lshift__.Scalar_out
+ tags: pointwise
- func: __ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CPU, CUDA: __ilshift__
autogen: __lshift__.Tensor_out
+ tags: pointwise
- func: bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: bitwise_left_shift.Tensor_out
+ tags: pointwise
- func: bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: bitwise_left_shift.Tensor_out
+ tags: pointwise
- func: bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: bitwise_left_shift_out
+ tags: pointwise
- func: bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CompositeExplicitAutograd: bitwise_left_shift
+ tags: pointwise
- func: bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: bitwise_left_shift_
+ tags: pointwise
- func: bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_left_shift_out
+ tags: pointwise
- func: bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_left_shift
autogen: bitwise_left_shift.Scalar_Tensor_out
+ tags: pointwise
- func: __rshift__.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CPU, CUDA: __rshift__
+ tags: pointwise
- func: __rshift__.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CPU, CUDA: __rshift__
+ tags: pointwise
- func: __irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
@@ -7436,47 +7951,54 @@
- func: bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: bitwise_right_shift.Tensor_out
+ tags: pointwise
- func: bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: bitwise_right_shift.Tensor_out
+ tags: pointwise
- func: bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: bitwise_right_shift_out
+ tags: pointwise
- func: bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CompositeExplicitAutograd: bitwise_right_shift
+ tags: pointwise
- func: bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: bitwise_right_shift_
+ tags: pointwise
- func: bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_right_shift_out
+ tags: pointwise
- func: bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_right_shift
autogen: bitwise_right_shift.Scalar_Tensor_out
+ tags: pointwise
- func: tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
structured_delegate: tril.out
variants: method
@@ -7486,20 +8008,23 @@
- func: digamma_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: digamma.out
variants: method
+ tags: pointwise
- func: lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: lerp.Scalar_out
+ tags: pointwise
- func: lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: lerp.Tensor_out
+ tags: pointwise
- func: addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
variants: method
dispatch:
CPU, CUDA: addbmm_
@@ -7589,27 +8114,14 @@
# wrappers for TH functions
autogen: geometric, geometric.out
- func: diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU: diag_cpu_out
- CUDA: diag_cuda_out
- MPS: diag_mps_out
- func: diag(Tensor self, int diagonal=0) -> Tensor
variants: method, function
- dispatch:
- CompositeExplicitAutograd: diag
-- func: diag_backward(Tensor grad, SymInt[] input_sizes, int diagonal) -> Tensor
- variants: function
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeImplicitAutograd: diag_backward_symint
-
- func: cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
- func: cross(Tensor self, Tensor other, int? dim=None) -> Tensor
variants: method, function
@@ -7650,48 +8162,55 @@
- func: trace(Tensor self) -> Tensor
variants: method, function
dispatch:
CPU: trace_cpu
CUDA: trace_cuda
+ MPS: trace_mps_out
autogen: trace.out
-- func: trace_backward(Tensor grad, int[] sizes) -> Tensor
+- func: trace_backward(Tensor grad, SymInt[] sizes) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
+ dispatch:
+ CompositeImplicitAutograd: trace_backward_symint
- func: ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: ne_Scalar_out
MPS: ne_scalar_out_mps
QuantizedCPU: ne_out_quantized_cpu
+ tags: pointwise
- func: ne.Scalar(Tensor self, Scalar other) -> Tensor
structured_delegate: ne.Scalar_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: ne_quantized_cpu
+ tags: [core, pointwise]
- func: ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: ne_Tensor_out
MPS: ne_tensor_out_mps
QuantizedCPU: ne_out_quantized_cpu
+ tags: pointwise
- func: ne.Tensor(Tensor self, Tensor other) -> Tensor
structured_delegate: ne.Tensor_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: ne_quantized_cpu
+ tags: [core, pointwise]
- func: ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
structured_delegate: ne.Scalar_out
device_check: NoCheck # TensorIterator
variants: method
@@ -7724,65 +8243,73 @@
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: eq_Scalar_out
MPS: eq_scalar_out_mps
QuantizedCPU: eq_out_quantized_cpu
+ tags: pointwise
- func: eq.Scalar(Tensor self, Scalar other) -> Tensor
structured_delegate: eq.Scalar_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: eq_quantized_cpu
+ tags: [core, pointwise]
- func: eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: eq_Tensor_out
MPS: eq_tensor_out_mps
QuantizedCPU: eq_out_quantized_cpu
+ tags: pointwise
- func: eq.Tensor(Tensor self, Tensor other) -> Tensor
structured_delegate: eq.Tensor_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: eq_quantized_cpu
+ tags: [core, pointwise]
- func: ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: ge_Scalar_out
MPS: ge_scalar_out_mps
QuantizedCPU: ge_out_quantized_cpu
+ tags: pointwise
- func: ge.Scalar(Tensor self, Scalar other) -> Tensor
structured_delegate: ge.Scalar_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: ge_quantized_cpu
+ tags: [core, pointwise]
- func: ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: ge_Tensor_out
MPS: ge_tensor_out_mps
QuantizedCPU: ge_out_quantized_cpu
+ tags: pointwise
- func: ge.Tensor(Tensor self, Tensor other) -> Tensor
structured_delegate: ge.Tensor_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: ge_quantized_cpu
+ tags: [core, pointwise]
- func: ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
structured_delegate: ge.Scalar_out
device_check: NoCheck # TensorIterator
variants: method
@@ -7815,33 +8342,37 @@
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: le_Scalar_out
MPS: le_scalar_out_mps
QuantizedCPU: le_out_quantized_cpu
+ tags: pointwise
- func: le.Scalar(Tensor self, Scalar other) -> Tensor
structured_delegate: le.Scalar_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: le_quantized_cpu
+ tags: [core, pointwise]
- func: le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: le_Tensor_out
MPS: le_tensor_out_mps
QuantizedCPU: le_out_quantized_cpu
+ tags: pointwise
- func: le.Tensor(Tensor self, Tensor other) -> Tensor
structured_delegate: le.Tensor_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: le_quantized_cpu
+ tags: [core, pointwise]
- func: le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
structured_delegate: le.Scalar_out
device_check: NoCheck # TensorIterator
variants: method
@@ -7874,33 +8405,37 @@
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: gt_Scalar_out
MPS: gt_scalar_out_mps
QuantizedCPU: gt_out_quantized_cpu
+ tags: pointwise
- func: gt.Scalar(Tensor self, Scalar other) -> Tensor
structured_delegate: gt.Scalar_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: gt_quantized_cpu
+ tags: [core, pointwise]
- func: gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: gt_Tensor_out
MPS: gt_tensor_out_mps
QuantizedCPU: gt_out_quantized_cpu
+ tags: pointwise
- func: gt.Tensor(Tensor self, Tensor other) -> Tensor
structured_delegate: gt.Tensor_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: gt_quantized_cpu
+ tags: [core, pointwise]
- func: gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
structured_delegate: gt.Scalar_out
device_check: NoCheck # TensorIterator
variants: method
@@ -7933,33 +8468,37 @@
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: lt_Scalar_out
MPS: lt_scalar_out_mps
QuantizedCPU: lt_out_quantized_cpu
+ tags: pointwise
- func: lt.Scalar(Tensor self, Scalar other) -> Tensor
structured_delegate: lt.Scalar_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: lt_quantized_cpu
+ tags: [core, pointwise]
- func: lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: lt_Tensor_out
MPS: lt_tensor_out_mps
QuantizedCPU: lt_out_quantized_cpu
+ tags: pointwise
- func: lt.Tensor(Tensor self, Tensor other) -> Tensor
structured_delegate: lt.Tensor_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: lt_quantized_cpu
+ tags: [core, pointwise]
- func: lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
structured_delegate: lt.Scalar_out
device_check: NoCheck # TensorIterator
variants: method
@@ -8014,20 +8553,23 @@
CUDA: index_select_cuda
QuantizedCUDA: index_select_quantized_cuda
SparseCPU: index_select_sparse_cpu
SparseCUDA: index_select_sparse_cuda
MPS: index_select_mps
+ tags: core
- func: index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
- func: index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor
variants: method, function
-- func: index_select_backward(Tensor grad, int[] self_sizes, int dim, Tensor index) -> Tensor
+- func: index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
+ dispatch:
+ CompositeImplicitAutograd: index_select_backward_symint
- func: masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: masked_select_out_cpu
CUDA: masked_select_out_cuda
@@ -8049,18 +8591,20 @@
- func: nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: nonzero_out_cpu
CUDA: nonzero_out_cuda
+ MPS: nonzero_out_mps
tags: dynamic_output_shape
- func: nonzero(Tensor self) -> Tensor
variants: method, function
dispatch:
CPU: nonzero_cpu
CUDA: nonzero_cuda
- tags: dynamic_output_shape
+ MPS: nonzero_mps
+ tags: [dynamic_output_shape, core]
- func: nonzero_numpy(Tensor self) -> Tensor[]
variants: method, function
- func: argwhere(Tensor self) -> Tensor
@@ -8074,10 +8618,11 @@
MPS: gather_out_mps
- func: gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor
variants: method, function
structured_delegate: gather.out
+ tags: core
- func: gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
@@ -8094,46 +8639,55 @@
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: addcmul_out
MPS: addcmul_out_mps
+ tags: pointwise
- func: addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
structured_delegate: addcmul.out
device_check: NoCheck # TensorIterator
variants: method, function
+ tags: pointwise
- func: addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
structured_delegate: addcmul.out
device_check: NoCheck # TensorIterator
variants: method
+ tags: pointwise
- func: addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: addcdiv_out
MPS: addcdiv_out_mps
+ tags: pointwise
- func: addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
structured_delegate: addcdiv.out
device_check: NoCheck # TensorIterator
variants: method, function
+ tags: pointwise
- func: addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
structured_delegate: addcdiv.out
device_check: NoCheck # TensorIterator
variants: method
+ tags: pointwise
-- func: cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, float label_smoothing=0.0) -> Tensor
+- func: cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor
python_module: nn
+ dispatch:
+ CompositeImplicitAutograd: cross_entropy_loss_symint
- func: triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)
structured: True
dispatch:
CPU, CUDA: triangular_solve_out
+ MPS: triangular_solve_mps_out
SparseCsrCPU: triangular_solve_out_sparse_csr_cpu
SparseCsrCUDA: triangular_solve_out_sparse_csr_cuda
- func: triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)
structured_delegate: triangular_solve.X
@@ -8145,36 +8699,22 @@
- func: linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
dispatch:
CPU, CUDA: linalg_solve_triangular_out
+ MPS: linalg_solve_triangular_mps_out
- func: linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor
python_module: linalg
variants: function
dispatch:
CPU, CUDA: linalg_solve_triangular
+ MPS: linalg_solve_triangular_mps
- func: linalg_vander(Tensor x, *, int? N=None) -> Tensor
python_module: linalg
-- func: symeig.e(Tensor self, bool eigenvectors=False, bool upper=True, *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
- dispatch:
- CompositeExplicitAutograd: symeig_out
-
-- func: symeig(Tensor self, bool eigenvectors=False, bool upper=True) -> (Tensor eigenvalues, Tensor eigenvectors)
- variants: method, function
- dispatch:
- CompositeExplicitAutograd: symeig
-
-- func: _symeig_helper(Tensor self, bool eigenvectors, bool upper) -> (Tensor, Tensor)
- variants: function
- dispatch:
- CPU: _symeig_helper_cpu
- CUDA: _symeig_helper_cuda
- autogen: _symeig_helper.out
-
- func: svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
- func: svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)
variants: method, function
@@ -8302,131 +8842,151 @@
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: lgamma_out
+ tags: pointwise
- func: lgamma_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: lgamma.out
variants: method
+ tags: pointwise
- func: lgamma(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: lgamma.out
variants: method, function
+ tags: pointwise
- func: digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: digamma_out
+ tags: pointwise
- func: digamma(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: digamma.out
variants: method, function
+ tags: pointwise
- func: polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: polygamma_out
+ tags: pointwise
- func: polygamma(int n, Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: polygamma.out
variants: method, function
+ tags: pointwise
- func: polygamma_(Tensor(a!) self, int n) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: polygamma_
+ tags: pointwise
- func: erfinv(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: erfinv.out
variants: method, function
dispatch:
SparseCPU, SparseCUDA: erfinv_sparse
SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr
+ tags: pointwise
- func: erfinv_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: erfinv.out
variants: method
dispatch:
SparseCPU, SparseCUDA: erfinv_sparse_
SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr_
+ tags: pointwise
- func: erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: erfinv_out
SparseCPU, SparseCUDA: erfinv_sparse_out
SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr_out
+ tags: pointwise
- func: i0(Tensor self) -> Tensor
structured_delegate: i0.out
variants: function, method
+ tags: pointwise
- func: i0_(Tensor(a!) self) -> Tensor(a!)
structured_delegate: i0.out
variants: function, method
+ tags: pointwise
- func: i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: i0_out
+ tags: pointwise
- func: sign(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: sign.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: sign_sparse
SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr
+ tags: [core, pointwise]
- func: sign_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: sign.out
variants: method
dispatch:
SparseCPU, SparseCUDA: sign_sparse_
SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr_
+ tags: pointwise
- func: sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: sign_out
MPS: sign_out_mps
SparseCPU, SparseCUDA: sign_sparse_out
SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr_out
+ tags: pointwise
- func: signbit(Tensor self) -> Tensor
variants: function, method
structured_delegate: signbit.out
dispatch:
SparseCPU, SparseCUDA: signbit_sparse
SparseCsrCPU, SparseCsrCUDA: signbit_sparse_csr
+ tags: pointwise
- func: signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU: signbit_out
CUDA: signbit_out
+ MPS: signbit_out_mps
SparseCPU, SparseCUDA: signbit_sparse_out
SparseCsrCPU, SparseCsrCUDA: signbit_sparse_csr_out
+ tags: pointwise
- func: dist(Tensor self, Tensor other, Scalar p=2) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
@@ -8438,22 +8998,25 @@
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: atan2_out
MPS: atan2_mps_out
+ tags: pointwise
- func: atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: atan2.out
variants: method
+ tags: pointwise
- func: atan2(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: atan2.out
variants: method, function
-
+ tags: pointwise
# arctan2, alias of atan2
+
- func: arctan2(Tensor self, Tensor other) -> Tensor
variants: method, function
- func: arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
@@ -8465,27 +9028,31 @@
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: lerp_Scalar
+ tags: pointwise
- func: lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: lerp_Tensor
+ tags: pointwise
- func: lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
structured_delegate: lerp.Scalar_out
+ tags: pointwise
- func: lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
structured_delegate: lerp.Tensor_out
+ tags: pointwise
- func: histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: histogram_histc_cpu_out
CUDA: _histc_out_cuda
@@ -8537,134 +9104,160 @@
- func: fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CompositeExplicitAutograd: fmod_out
+ tags: pointwise
- func: fmod.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CompositeExplicitAutograd: fmod
+ tags: pointwise
- func: fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: fmod_
+ tags: pointwise
- func: fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: fmod_out
+ MPS: fmod_mps_out
+ tags: pointwise
- func: fmod.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: fmod.Tensor_out
variants: method, function
+ tags: [core, pointwise]
-
- func: fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: fmod.Tensor_out
+ tags: pointwise
- func: hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: hypot_out
+ tags: pointwise
- func: hypot(Tensor self, Tensor other) -> Tensor
structured_delegate: hypot.out
variants: method, function
+ tags: pointwise
- func: hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: hypot.out
variants: method
+ tags: pointwise
- func: igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: igamma_out
+ tags: pointwise
- func: igamma(Tensor self, Tensor other) -> Tensor
structured_delegate: igamma.out
variants: method, function
+ tags: pointwise
- func: igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: igamma.out
variants: method
+ tags: pointwise
- func: igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: igammac_out
+ tags: pointwise
- func: igammac(Tensor self, Tensor other) -> Tensor
structured_delegate: igammac.out
variants: method, function
+ tags: pointwise
- func: igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: igammac.out
variants: method
+ tags: pointwise
- func: nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: nextafter_out
+ tags: pointwise
- func: nextafter(Tensor self, Tensor other) -> Tensor
structured_delegate: nextafter.out
variants: method, function
+ tags: pointwise
- func: nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: nextafter.out
variants: method
+ tags: pointwise
- func: remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: remainder_out
+ tags: pointwise
- func: remainder.Scalar(Tensor self, Scalar other) -> Tensor
variants: method, function
dispatch:
CompositeExplicitAutograd: remainder
+ tags: pointwise
- func: remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
variants: method
dispatch:
CompositeExplicitAutograd: remainder_
+ tags: pointwise
- func: remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: remainder_out
+ MPS: remainder_out_mps
+ tags: pointwise
- func: remainder.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: remainder.Tensor_out
variants: method, function
+ tags: [core, pointwise]
- func: remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: remainder.Tensor_out
variants: method
+ tags: pointwise
- func: remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
- CPU, CUDA: remainder
+ CPU, CUDA, MPS: remainder
autogen: remainder.Scalar_Tensor_out
+ tags: pointwise
- func: min(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
@@ -8681,88 +9274,99 @@
- func: fmin(Tensor self, Tensor other) -> Tensor
structured_delegate: fmin.out
device_check: NoCheck # TensorIterator
variants: method, function
+ tags: pointwise
- func: fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: fmin_out
+ tags: pointwise
- func: max(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CPU, CUDA: max
MPS: max_mps
QuantizedCPU: max_quantized_cpu
-# Not to be confused with binary op `max.out`. Commented because of failed CI
-# FIXME: enable this
-#- func: max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
-# device_check: NoCheck # TensorIterator
-# dispatch:
-# CompositeExplicitAutograd: max_unary_out
-
- func: fmax(Tensor self, Tensor other) -> Tensor
structured_delegate: fmax.out
device_check: NoCheck # TensorIterator
variants: method, function
+ tags: pointwise
- func: fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: fmax_out
+ tags: pointwise
- func: maximum(Tensor self, Tensor other) -> Tensor
structured_delegate: maximum.out
device_check: NoCheck # TensorIterator
variants: method, function
+ tags: [core, pointwise]
- func: maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: maximum_out
MPS: maximum_out_mps
+ tags: pointwise
# binary max, alias of maximum
# NOTE: max is not an alias for maximum, since there is also unary max
- func: max.other(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
+ tags: pointwise
- func: max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
+ tags: pointwise
+- func: max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck # TensorIterator
+ dispatch:
+ CPU, CUDA: max_unary_out
+ QuantizedCPU: max_quantized_unary_out
+
- func: minimum(Tensor self, Tensor other) -> Tensor
structured_delegate: minimum.out
device_check: NoCheck # TensorIterator
variants: method, function
+ tags: [core, pointwise]
- func: minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: minimum_out
MPS: minimum_out_mps
+ tags: pointwise
# binary min, alias for minimum
# NOTE: min is not an alias for minimum, since there is also unary min
- func: min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
+ tags: pointwise
- func: min.other(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
+ tags: pointwise
- func: quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
variants: method, function
- func: quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
@@ -8789,10 +9393,11 @@
- func: sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
structured: True
dispatch:
CPU, CUDA: sort_stable_out
+ MPS: sort_stable_out_mps
- func: sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
@@ -8825,11 +9430,11 @@
- func: argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
- CPU, CUDA: argsort_stable
+ CPU, CUDA, MPS: argsort_stable
autogen: argsort.stable_out
- func: argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor
variants: method, function
@@ -8843,10 +9448,11 @@
- func: topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)
variants: method, function
structured_delegate: topk.values
dispatch:
QuantizedCPU: topk_quantized_cpu
+ tags: core
- func: all(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: all.all_out
variants: method, function
@@ -8891,21 +9497,21 @@
- func: unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)
variants: method
device_check: NoCheck
device_guard: False
dispatch:
- CPU, CUDA, Meta: unfold
+ CPU, CUDA, Meta, MPS: unfold
QuantizedCPU, QuantizedCUDA: unfold
-- func: unfold_backward(Tensor grad_in, int[] input_sizes, int dim, int size, int step) -> Tensor
+- func: unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor
variants: function
dispatch:
CPU, CUDA: unfold_backward
autogen: unfold_backward.out
- func: equal(Tensor self, Tensor other) -> bool
- tags: data_dependent_output
+ tags: [data_dependent_output, pointwise]
variants: method, function
dispatch:
CPU: cpu_equal
CUDA: cuda_equal
MPS: mps_equal
@@ -8916,71 +9522,87 @@
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: pow_Tensor_Tensor_out
MPS: pow_tensor_tensor_out_mps
+ tags: pointwise
- func: pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: pow.Tensor_Tensor_out
variants: method, function
+ tags: [core, pointwise]
- func: pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
dispatch:
CPU, CUDA: pow_Scalar_out
+ tags: pointwise
- func: pow.Scalar(Scalar self, Tensor exponent) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: pow.Scalar_out
+ tags: pointwise
- func: pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: pow_Tensor_Scalar_out
SparseCPU, SparseCUDA: pow_out_sparse_scalar
MPS: pow_tensor_scalar_out_mps
+ tags: pointwise
- func: pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: pow.Tensor_Scalar_out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: pow_sparse_scalar
+ tags: [core, pointwise]
- func: pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: pow.Tensor_Scalar_out
variants: method
+ tags: pointwise
- func: pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: pow.Tensor_Tensor_out
variants: method
+ tags: pointwise
- func: float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
+ tags: pointwise
- func: float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
variants: function, method
+ tags: pointwise
- func: float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
+ tags: pointwise
- func: float_power.Scalar(Scalar self, Tensor exponent) -> Tensor
+ tags: pointwise
- func: float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
+ tags: pointwise
- func: float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
variants: function, method
+ tags: pointwise
- func: float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
variants: method
+ tags: pointwise
- func: float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
variants: method
+ tags: pointwise
- func: normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
tags: nondeterministic_seeded
variants: method
@@ -9040,24 +9662,25 @@
CPU, CUDA: normal
MPS: normal_mps
Meta: normal_meta
tags: nondeterministic_seeded
-- func: normal.float_float(float mean, float std, int[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: normal
tags: nondeterministic_seeded
-- func: normal.float_float_out(float mean, float std, int[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
+- func: normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: normal_out
tags: nondeterministic_seeded
- func: alias(Tensor(a) self) -> Tensor(a)
variants: method, function
dispatch:
CompositeExplicitAutograd: alias
+ tags: core
- func: _amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()
variants: function
dispatch:
CUDA: _amp_foreach_non_finite_check_and_unscale_cuda_
@@ -9140,10 +9763,71 @@
dispatch:
CPU: foreach_tensor_div_scalar_kernel_slow_
CUDA: foreach_tensor_div_scalar_kernel_cuda_
autogen: _foreach_div.Scalar_out
+- func: _foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_min_scalar_kernel_slow
+ CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda
+
+- func: _foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_min_scalar_kernel_slow_
+ CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda_
+ autogen: _foreach_clamp_min.Scalar_out
+
+- func: _foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_max_scalar_kernel_slow
+ CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda
+
+- func: _foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_max_scalar_kernel_slow_
+ CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda_
+ autogen: _foreach_clamp_max.Scalar_out
+
+# foreach_minimum/maximum dispatches to clamp_max/min
+- func: _foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_min_scalar_kernel_slow
+ CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda
+
+- func: _foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_min_scalar_kernel_slow_
+ CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda_
+ autogen: _foreach_maximum.Scalar_out
+
+- func: _foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_max_scalar_kernel_slow
+ CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda
+
+- func: _foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_max_scalar_kernel_slow_
+ CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda_
+ autogen: _foreach_minimum.Scalar_out
+
- func: _foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_add_list_kernel_slow
@@ -9200,10 +9884,72 @@
dispatch:
CPU: foreach_tensor_div_list_kernel_slow_
CUDA: foreach_tensor_div_list_kernel_cuda_
autogen: _foreach_div.List_out
+- func: _foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[]
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_min_list_kernel_slow
+ CUDA: foreach_tensor_clamp_min_list_kernel_cuda
+
+- func: _foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> ()
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_min_list_kernel_slow_
+ CUDA: foreach_tensor_clamp_min_list_kernel_cuda_
+ autogen: _foreach_clamp_min.List_out
+
+- func: _foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[]
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_max_list_kernel_slow
+ CUDA: foreach_tensor_clamp_max_list_kernel_cuda
+
+- func: _foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> ()
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_max_list_kernel_slow_
+ CUDA: foreach_tensor_clamp_max_list_kernel_cuda_
+ autogen: _foreach_clamp_max.List_out
+
+# foreach_minimum/maximum dispatches to clamp_max/min
+- func: _foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_min_list_kernel_slow
+ CUDA: foreach_tensor_clamp_min_list_kernel_cuda
+
+- func: _foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_min_list_kernel_slow_
+ CUDA: foreach_tensor_clamp_min_list_kernel_cuda_
+ autogen: _foreach_maximum.List_out
+
+- func: _foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_max_list_kernel_slow
+ CUDA: foreach_tensor_clamp_max_list_kernel_cuda
+
+- func: _foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_max_list_kernel_slow_
+ CUDA: foreach_tensor_clamp_max_list_kernel_cuda_
+ autogen: _foreach_minimum.List_out
+
+
- func: _foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_add_scalarlist_kernel_slow
@@ -9260,10 +10006,71 @@
dispatch:
CPU: foreach_tensor_mul_scalarlist_kernel_slow_
CUDA: foreach_tensor_mul_scalarlist_kernel_cuda_
autogen: _foreach_mul.ScalarList_out
+- func: _foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow
+ CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda
+
+- func: _foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow_
+ CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda_
+ autogen: _foreach_clamp_min.ScalarList_out
+
+- func: _foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow
+ CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda
+
+- func: _foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow_
+ CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda_
+ autogen: _foreach_clamp_max.ScalarList_out
+
+# foreach_minimum/maximum dispatches to clamp_max/min
+- func: _foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow
+ CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda
+
+- func: _foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow_
+ CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda_
+ autogen: _foreach_maximum.ScalarList_out
+
+- func: _foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow
+ CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda
+
+- func: _foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow_
+ CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda_
+ autogen: _foreach_minimum.ScalarList_out
+
- func: _foreach_exp(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_exp_slow
@@ -9712,18 +10519,34 @@
dispatch:
CPU: foreach_tensor_addcdiv_scalarlist_slow_
CUDA: foreach_tensor_addcdiv_scalarlist_cuda_
autogen: _foreach_addcdiv.ScalarList_out
+- func: _foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_addcdiv_tensor_slow_
+ CUDA: foreach_tensor_addcdiv_tensor_cuda_
+ autogen: _foreach_addcdiv.Tensor_out
+
- func: _foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcmul_scalarlist_slow_
CUDA: foreach_tensor_addcmul_scalarlist_cuda_
autogen: _foreach_addcmul.ScalarList_out
+- func: _foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_addcmul_tensor_slow_
+ CUDA: foreach_tensor_addcmul_tensor_cuda_
+ autogen: _foreach_addcmul.Tensor_out
+
- func: _foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcdiv_scalar_slow
@@ -9741,55 +10564,71 @@
variants: function
dispatch:
CPU: foreach_tensor_addcdiv_scalarlist_slow
CUDA: foreach_tensor_addcdiv_scalarlist_cuda
+- func: _foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_addcdiv_tensor_slow
+ CUDA: foreach_tensor_addcdiv_tensor_cuda
+
- func: _foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcmul_scalarlist_slow
CUDA: foreach_tensor_addcmul_scalarlist_cuda
-- func: _foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]
+- func: _foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
- CPU: foreach_tensor_maximum_slow
- CUDA: foreach_tensor_maximum_cuda
+ CPU: foreach_tensor_addcmul_tensor_slow
+ CUDA: foreach_tensor_addcmul_tensor_cuda
-- func: _foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
+- func: _foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
- CPU: foreach_tensor_maximum_slow_
- CUDA: foreach_tensor_maximum_cuda_
- autogen: _foreach_maximum.List_out
+ CPU: foreach_tensor_norm_slow
+ CUDA: foreach_tensor_norm_cuda
+ autogen: _foreach_norm.Scalar_out
-- func: _foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+- func: _foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]
+ device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
variants: function
dispatch:
- CPU: foreach_tensor_minimum_slow
- CUDA: foreach_tensor_minimum_cuda
+ CPU: foreach_tensor_ternary_lerp_slow
+ CUDA: foreach_tensor_lerp_ternary_cuda
+ autogen: _foreach_lerp.List_out
-- func: _foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+- func: _foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()
+ device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
variants: function
dispatch:
- CPU: foreach_tensor_minimum_slow_
- CUDA: foreach_tensor_minimum_cuda_
- autogen: _foreach_minimum.List_out
+ CPU: foreach_tensor_ternary_lerp_slow_
+ CUDA: foreach_tensor_lerp_ternary_cuda_
+ autogen: _foreach_lerp.List_out
-- func: _foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+- func: _foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]
+ device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
variants: function
dispatch:
- CPU: foreach_tensor_norm_slow
- CUDA: foreach_tensor_norm_cuda
- autogen: _foreach_norm.Scalar_out
+ CPU: foreach_tensor_lerp_list_kernel_slow
+ CUDA: foreach_tensor_lerp_list_cuda
+ autogen: _foreach_lerp.Scalar_out
+- func: _foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()
+ device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
+ variants: function
+ dispatch:
+ CPU: foreach_tensor_lerp_list_kernel_slow_
+ CUDA: foreach_tensor_lerp_list_cuda_
+ autogen: _foreach_lerp.Scalar_out
+
- func: bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
dispatch:
CPU: bucketize_cpu
CUDA: bucketize_cuda
@@ -9807,21 +10646,10 @@
- func: searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
dispatch:
CPU: searchsorted_cpu
CUDA: searchsorted_cuda
-# [Note about _torch_cuda_cu_linker_symbol_op and torch_cuda_cu]
-# This is a DUMMY function to force the linking against torch_cuda_cu on Windows.
-# Otherwise, the Windows linker will optimize and not include torch_cuda_cu even when we
-# want it to be included. This is similar to what we do with warp_size for torch_cuda_cpp,
-# described as the solution to this issue: https://github.com/pytorch/pytorch/issues/31611
-# This op should NOT be used or exposed or edited or else Windows builds (with BUILD_SPLIT_CUDA) will break.
-- func: _torch_cuda_cu_linker_symbol_op(Tensor self) -> Tensor
- dispatch:
- CUDA: _torch_cuda_cu_linker_symbol_op_cuda
- autogen: _torch_cuda_cu_linker_symbol_op.out
-
- func: searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: searchsorted_out_cpu
CUDA: searchsorted_out_cuda
@@ -9932,71 +10760,77 @@
python_module: nn
dispatch:
CPU: multilabel_margin_loss_backward_cpu
CUDA: multilabel_margin_loss_backward_cuda
-- func: nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
+- func: nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
-- func: nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor
+- func: nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
python_module: nn
+ dispatch:
+ CompositeImplicitAutograd: nll_loss_nd_symint
-- func: nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor
+- func: nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
python_module: nn
+ dispatch:
+ CompositeImplicitAutograd: nll_loss_symint
-- func: nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
+- func: nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
python_module: nn
structured: True
dispatch:
CPU: nll_loss_forward_out_cpu
CUDA: nll_loss_forward_out_cuda
MPS: nll_loss_forward_out_mps
-- func: nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight)
+- func: nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
python_module: nn
structured_delegate: nll_loss_forward.output
-- func: nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: nll_loss_backward_out_cpu
CUDA: nll_loss_backward_out_cuda
MPS: nll_loss_backward_out_mps
-- func: nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor
+- func: nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
python_module: nn
structured_delegate: nll_loss_backward.grad_input
-- func: nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
+- func: nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
-- func: nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor
+- func: nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
python_module: nn
+ dispatch:
+ CompositeImplicitAutograd: nll_loss2d_symint
-- func: nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
+- func: nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
python_module: nn
dispatch:
CPU: nll_loss2d_forward_out_cpu
CUDA: nll_loss2d_forward_out_cuda
MPS: nll_loss2d_forward_out_mps
-- func: nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight)
+- func: nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
python_module: nn
dispatch:
CPU: nll_loss2d_forward_cpu
CUDA: nll_loss2d_forward_cuda
MPS: nll_loss2d_forward_mps
-- func: nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: nll_loss2d_backward_out_cpu
CUDA: nll_loss2d_backward_out_cuda
MPS: nll_loss2d_backward_out_mps
-- func: nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor
+- func: nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
python_module: nn
dispatch:
CPU: nll_loss2d_backward_cpu
CUDA: nll_loss2d_backward_cuda
MPS: nll_loss2d_backward_mps
@@ -10183,10 +11017,11 @@
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU, CUDA, MPS: hardtanh
QuantizedCPU: hardtanh_quantized_cpu
+ tags: core
- func: hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU, CUDA: hardtanh_backward_out
@@ -10208,27 +11043,31 @@
- func: hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU, CUDA: hardswish_out
+ MPS: hardswish_out_mps
- func: hardswish(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU, CUDA: hardswish
+ MPS: hardswish_mps
- func: hardswish_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU, CUDA: hardswish_
+ MPS: hardswish_mps_
- func: hardswish_backward(Tensor grad_output, Tensor self) -> Tensor
python_module: nn
dispatch:
CPU, CUDA: hardswish_backward
+ MPS: hardswish_backward_mps
autogen: hardswish_backward.out
- func: leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
@@ -10243,10 +11082,11 @@
structured_delegate: leaky_relu.out
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
QuantizedCPU: leaky_relu_quantized_cpu
+ tags: core
- func: leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
@@ -10408,30 +11248,34 @@
CUDA: adaptive_avg_pool2d_cuda
MPS: adaptive_avg_pool2d_mps
QuantizedCPU: adaptive_avg_pool2d_quantized_cpu
QuantizedCUDA: adaptive_avg_pool2d_quantized_cuda
autogen: _adaptive_avg_pool2d.out
+ tags: core
- func: _adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
python_module: nn
dispatch:
CPU: adaptive_avg_pool2d_backward_cpu
CUDA: adaptive_avg_pool2d_backward_cuda
MPS: adaptive_avg_pool2d_backward_mps
autogen: _adaptive_avg_pool2d_backward.out
+ tags: core
-- func: adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
+- func: adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU: adaptive_avg_pool3d_out_cpu
CUDA: adaptive_avg_pool3d_out_cuda
QuantizedCPU: adaptive_avg_pool3d_out_quantized_cpu
-- func: adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor
+- func: adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
python_module: nn
+ dispatch:
+ CompositeImplicitAutograd: adaptive_avg_pool3d_symint
-- func: _adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor
+- func: _adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
dispatch:
CPU: adaptive_avg_pool3d_cpu
CUDA: adaptive_avg_pool3d_cuda
QuantizedCPU: adaptive_avg_pool3d_quantized_cpu
autogen: _adaptive_avg_pool3d.out
@@ -10516,10 +11360,11 @@
python_module: nn
structured_delegate: avg_pool2d.out
dispatch:
MkldnnCPU: mkldnn_avg_pool2d
QuantizedCPU: avg_pool2d_quantized_cpu
+ tags: core
- func: avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
@@ -10531,10 +11376,11 @@
- func: avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
python_module: nn
structured_delegate: avg_pool2d_backward.grad_input
dispatch:
MkldnnCPU: mkldnn_avg_pool2d_backward
+ tags: core
- func: avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
@@ -10627,10 +11473,11 @@
# Return: (Tensor output, Tensor indices)
- func: max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
python_module: nn
structured_delegate: max_pool2d_with_indices.out
+ tags: core
- func: max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
@@ -10639,10 +11486,11 @@
MPS: max_pool2d_with_indices_backward_out_mps
- func: max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor
python_module: nn
structured_delegate: max_pool2d_with_indices_backward.grad_input
+ tags: core
# Return: (Tensor output, Tensor indices)
- func: max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
python_module: nn
dispatch:
@@ -10653,10 +11501,11 @@
- func: max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
python_module: nn
dispatch:
CPU: max_pool3d_with_indices_cpu
CUDA: max_pool3d_with_indices_cuda
+ tags: core
- func: max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: max_pool3d_with_indices_backward_out_cpu
@@ -10690,323 +11539,233 @@
python_module: nn
dispatch:
CPU: max_unpooling3d_forward_cpu
CUDA: max_unpooling3d_forward_cuda
-- func: reflection_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!)
+- func: reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: reflection_pad1d_out_cpu
QuantizedCPU: reflection_pad1d_out_quantized_cpu
CUDA: reflection_pad1d_out_cuda
MPS: reflection_pad1d_out_mps
-- func: reflection_pad1d(Tensor self, int[2] padding) -> Tensor
+- func: reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor
python_module: nn
structured_delegate: reflection_pad1d.out
-- func: reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: reflection_pad1d_backward_out_cpu
CUDA: reflection_pad1d_backward_out_cuda
MPS: reflection_pad1d_backward_out_mps
-- func: reflection_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor
+- func: reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
python_module: nn
structured_delegate: reflection_pad1d_backward.grad_input
-- func: reflection_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!)
+- func: reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU, QuantizedCPU: reflection_pad2d_out_cpu
CUDA: reflection_pad2d_out_cuda
MPS: reflection_pad2d_out_mps
-- func: reflection_pad2d(Tensor self, int[4] padding) -> Tensor
+- func: reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor
python_module: nn
dispatch:
CPU: reflection_pad2d_cpu
QuantizedCPU: reflection_pad2d_quantized_cpu
CUDA: reflection_pad2d_cuda
MPS: reflection_pad2d_mps
+ tags: core
-- func: reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: reflection_pad2d_backward_out_cpu
CUDA: reflection_pad2d_backward_out_cuda
MPS: reflection_pad2d_backward_out_mps
-- func: reflection_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor
+- func: reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
python_module: nn
dispatch:
CPU: reflection_pad2d_backward_cpu
CUDA: reflection_pad2d_backward_cuda
MPS: reflection_pad2d_backward_mps
-- func: reflection_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!)
+- func: reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: reflection_pad3d_out_cpu
CUDA: reflection_pad3d_out_cuda
MPS: reflection_pad3d_out_mps
-- func: reflection_pad3d(Tensor self, int[6] padding) -> Tensor
+- func: reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor
python_module: nn
structured_delegate: reflection_pad3d.out
-- func: reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: reflection_pad3d_backward_out_cpu
CUDA: reflection_pad3d_backward_out_cuda
MPS: reflection_pad3d_backward_out_mps
-- func: reflection_pad3d_backward(Tensor grad_output, Tensor self, int[6] padding) -> Tensor
+- func: reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
python_module: nn
structured_delegate: reflection_pad3d_backward.grad_input
-- func: replication_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!)
+- func: replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: replication_pad1d_out_cpu
CUDA: replication_pad1d_out_cuda
MPS: replication_pad1d_out_mps
-- func: replication_pad1d(Tensor self, int[2] padding) -> Tensor
+- func: replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor
python_module: nn
structured_delegate: replication_pad1d.out
-- func: replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: replication_pad1d_backward_out_cpu
CUDA: replication_pad1d_backward_out_cuda
MPS: replication_pad1d_backward_out_mps
-- func: replication_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor
+- func: replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
python_module: nn
structured_delegate: replication_pad1d_backward.grad_input
-- func: replication_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!)
+- func: replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: replication_pad2d_out_cpu
CUDA: replication_pad2d_out_cuda
MPS: replication_pad2d_out_mps
-- func: replication_pad2d(Tensor self, int[4] padding) -> Tensor
+- func: replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor
python_module: nn
structured_delegate: replication_pad2d.out
+ tags: core
-- func: replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: replication_pad2d_backward_out_cpu
CUDA: replication_pad2d_backward_out_cuda
MPS: replication_pad2d_backward_out_mps
-- func: replication_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor
+- func: replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
python_module: nn
dispatch:
CPU: replication_pad2d_backward_cpu
CUDA: replication_pad2d_backward_cuda
MPS: replication_pad2d_backward_mps
-- func: replication_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!)
+- func: replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: replication_pad3d_out_cpu
CUDA: replication_pad3d_out_cuda
MPS: replication_pad3d_out_mps
-- func: replication_pad3d(Tensor self, int[6] padding) -> Tensor
+- func: replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor
python_module: nn
structured_delegate: replication_pad3d.out
+ tags: core
-- func: replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
+
+- func: replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: replication_pad3d_backward_out_cpu
CUDA: replication_pad3d_backward_out_cuda
MPS: replication_pad3d_backward_out_mps
-- func: replication_pad3d_backward(Tensor grad_output, Tensor self, int[6] padding) -> Tensor
+- func: replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
python_module: nn
dispatch:
CPU: replication_pad3d_backward_cpu
CUDA: replication_pad3d_backward_cuda
MPS: replication_pad3d_backward_mps
-- func: _pad_circular(Tensor self, int[] pad) -> Tensor
+- func: _pad_circular(Tensor self, SymInt[] pad) -> Tensor
python_module: nn
+ dispatch:
+ CompositeImplicitAutograd: _pad_circular_symint
-- func: _pad_enum(Tensor self, int[] pad, int mode, float? value=None) -> Tensor
+- func: _pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor
python_module: nn
+ dispatch:
+ CompositeImplicitAutograd: _pad_enum_symint
-- func: pad(Tensor self, int[] pad, str mode="constant", float? value=None) -> Tensor
+- func: pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor
python_module: nn
+ dispatch:
+ CompositeImplicitAutograd: pad_symint
- func: upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
- dispatch:
- CompositeExplicitAutograd: upsample_linear1d
autogen: upsample_linear1d.vec_out
-- func: upsample_linear1d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: upsample_linear1d_backward
- autogen: upsample_linear1d_backward.vec_out
-
- func: upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
- dispatch:
- CompositeExplicitAutograd: upsample_bilinear2d
autogen: upsample_bilinear2d.vec_out
+ tags: core
-- func: upsample_bilinear2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: upsample_bilinear2d_backward
- autogen: upsample_bilinear2d_backward.vec_out
-
- func: _upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
- dispatch:
- CompositeExplicitAutograd: _upsample_bilinear2d_aa
autogen: _upsample_bilinear2d_aa.vec_out
-- func: _upsample_bilinear2d_aa_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: _upsample_bilinear2d_aa_backward
- autogen: _upsample_bilinear2d_aa_backward.vec_out
-
- func: upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
- dispatch:
- CompositeExplicitAutograd: upsample_trilinear3d
autogen: upsample_trilinear3d.vec_out
-- func: upsample_trilinear3d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: upsample_trilinear3d_backward
- autogen: upsample_trilinear3d_backward.vec_out
-
- func: upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
- dispatch:
- CompositeExplicitAutograd: upsample_bicubic2d
autogen: upsample_bicubic2d.vec_out
-- func: upsample_bicubic2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: upsample_bicubic2d_backward
- autogen: upsample_bicubic2d_backward.vec_out
-
- func: _upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
- dispatch:
- CompositeExplicitAutograd: _upsample_bicubic2d_aa
autogen: _upsample_bicubic2d_aa.vec_out
-- func: _upsample_bicubic2d_aa_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: _upsample_bicubic2d_aa_backward
- autogen: _upsample_bicubic2d_aa_backward.vec_out
-
- func: upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
- dispatch:
- CompositeExplicitAutograd: upsample_nearest1d
autogen: upsample_nearest1d.vec_out
- func: _upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
- dispatch:
- CompositeExplicitAutograd: _upsample_nearest_exact1d
autogen: _upsample_nearest_exact1d.vec_out
-- func: upsample_nearest1d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: upsample_nearest1d_backward
- autogen: upsample_nearest1d_backward.vec_out
-
-- func: _upsample_nearest_exact1d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: _upsample_nearest_exact1d_backward
- autogen: _upsample_nearest_exact1d_backward.vec_out
-
- func: upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
- dispatch:
- CompositeExplicitAutograd: upsample_nearest2d
autogen: upsample_nearest2d.vec_out
+ tags: core
- func: _upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
- dispatch:
- CompositeExplicitAutograd: _upsample_nearest_exact2d
autogen: _upsample_nearest_exact2d.vec_out
-- func: upsample_nearest2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: upsample_nearest2d_backward
- autogen: upsample_nearest2d_backward.vec_out
-
-- func: _upsample_nearest_exact2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: _upsample_nearest_exact2d_backward
- autogen: _upsample_nearest_exact2d_backward.vec_out
-
- func: upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
- dispatch:
- CPU: upsample_nearest3d_cpu
- CUDA: upsample_nearest3d_cuda
- QuantizedCPU: upsample_nearest3d_quantized_cpu
autogen: upsample_nearest3d.vec_out
- func: _upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
- dispatch:
- CPU: _upsample_nearest_exact3d_cpu
- CUDA: _upsample_nearest_exact3d_cuda
- QuantizedCPU: _upsample_nearest_exact3d_quantized_cpu
autogen: _upsample_nearest_exact3d.vec_out
-- func: upsample_nearest3d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor
- python_module: nn
- dispatch:
- CPU: upsample_nearest3d_backward_cpu
- CUDA: upsample_nearest3d_backward_cuda
- autogen: upsample_nearest3d_backward.vec_out
-
-- func: _upsample_nearest_exact3d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor
- python_module: nn
- dispatch:
- CPU: _upsample_nearest_exact3d_backward_cpu
- CUDA: _upsample_nearest_exact3d_backward_cuda
- autogen: _upsample_nearest_exact3d_backward.vec_out
-
# NOTE: all of the non-"vec" upsample overloads are only kept for backward compatibility.
- func: upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
@@ -11154,10 +11913,11 @@
python_module: nn
structured: True
dispatch:
CPU: _upsample_nearest_exact1d_out_cpu
CUDA: _upsample_nearest_exact1d_out_cuda
+ MPS: _upsample_nearest_exact1d_out_mps
- func: upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
python_module: nn
structured_delegate: upsample_nearest1d.out
@@ -11169,17 +11929,19 @@
python_module: nn
structured: True
dispatch:
CPU: upsample_nearest1d_backward_out_cpu
CUDA: upsample_nearest1d_backward_out_cuda
+ MPS: upsample_nearest1d_backward_out_mps
- func: _upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_nearest_exact1d_backward_out_cpu
CUDA: _upsample_nearest_exact1d_backward_out_cuda
+ MPS: _upsample_nearest_exact1d_backward_out_mps
- func: upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
python_module: nn
structured_delegate: upsample_nearest1d_backward.grad_input
@@ -11292,33 +12054,38 @@
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: sigmoid_backward_out
MPS: sigmoid_backward_out_mps
+ tags: pointwise
- func: sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor
python_module: nn
structured_delegate: sigmoid_backward.grad_input
+ tags: pointwise
- func: logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: logit_backward_out
+ tags: pointwise
- func: logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor
python_module: nn
structured_delegate: logit_backward.grad_input
+ tags: pointwise
- func: tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: tanh_backward_out
MPS: tanh_backward_out_mps
+ tags: pointwise
- func: tanh_backward(Tensor grad_output, Tensor output) -> Tensor
python_module: nn
structured_delegate: tanh_backward.grad_input
@@ -11337,29 +12104,30 @@
# to a convolution that is still written in the "legacy" style; that is,
# C code in the THNN/ or THCUNN/ directory. A slow_ convolution is
# one that is written in the native style: modern C++. Algorithmically,
# these are the same thing, but we give them different prefixes to
# make the operational distinction clear.
+ tags: pointwise
-- func: slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
+- func: slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: slow_conv_transpose2d_structured_cpu
CUDA: slow_conv_transpose2d_structured_cuda
-- func: slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1) -> Tensor
+- func: slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor
python_module: nn
structured_delegate: slow_conv_transpose2d.out
-- func: slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
+- func: slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU: slow_conv_transpose3d_out_cpu
CUDA: slow_conv_transpose3d_out_cuda
-- func: slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1) -> Tensor
+- func: slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor
python_module: nn
dispatch:
CPU: slow_conv_transpose3d_cpu
CUDA: slow_conv_transpose3d_cuda
@@ -11392,51 +12160,51 @@
dispatch:
CPU: slow_conv2d_backward_cpu
CUDA: slow_conv2d_backward_cuda
autogen: _slow_conv2d_backward.output_mask_out
-- func: _conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
+- func: _conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
python_module: nn
dispatch:
CUDA: conv_depthwise2d_cuda_out
-- func: _conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation) -> Tensor
+- func: _conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation) -> Tensor
python_module: nn
dispatch:
CUDA: conv_depthwise2d_cuda
-- func: conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, int[3] dilation) -> Tensor
+- func: conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor
python_module: nn
dispatch:
CUDA: conv_depthwise3d_cuda
autogen: conv_depthwise3d.out
-- func: slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
+- func: slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
-- func: slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0) -> Tensor
+- func: slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor
python_module: nn
-- func: slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, *, Tensor(a!) output) -> Tensor(a!)
+- func: slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)
python_module: nn
dispatch:
CPU: slow_conv3d_forward_out_cpu
-- func: slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding) -> Tensor
+- func: slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor
python_module: nn
dispatch:
CPU: slow_conv3d_forward_cpu
-- func: slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor
+- func: slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor
python_module: nn
dispatch:
CPU: slow_conv_dilated2d_cpu
CUDA: slow_conv_dilated2d_cuda
autogen: slow_conv_dilated2d.out
-- func: slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1) -> Tensor
+- func: slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1) -> Tensor
python_module: nn
dispatch:
CPU: slow_conv_dilated3d_cpu
CUDA: slow_conv_dilated3d_cuda
autogen: slow_conv_dilated3d.out
@@ -11450,10 +12218,11 @@
- func: col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
python_module: nn
dispatch:
CPU: col2im_cpu
CUDA: col2im_cuda
+ tags: core
- func: column_stack(Tensor[] tensors) -> Tensor
- func: column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
@@ -11482,10 +12251,11 @@
CompositeExplicitAutograd: isinf
SparseCPU, SparseCUDA: isinf_sparse
SparseMeta: isinf_sparse_meta
SparseCsrCPU, SparseCsrCUDA: isinf_sparse_csr
autogen: isinf.out
+ tags: core
- func: record_stream(Tensor(a!) self, Stream s) -> ()
variants: method
dispatch:
CUDA: record_stream_cuda
@@ -11494,33 +12264,37 @@
variants: function, method
structured_delegate: isposinf.out
dispatch:
SparseCPU, SparseCUDA: isposinf_sparse
SparseCsrCPU, SparseCsrCUDA: isposinf_sparse_csr
+ tags: pointwise
- func: isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: isposinf_out
SparseCPU, SparseCUDA: isposinf_sparse_out
SparseCsrCPU, SparseCsrCUDA: isposinf_sparse_csr_out
+ tags: pointwise
- func: isneginf(Tensor self) -> Tensor
variants: function, method
structured_delegate: isneginf.out
dispatch:
SparseCPU, SparseCUDA: isneginf_sparse
SparseCsrCPU, SparseCsrCUDA: isneginf_sparse_csr
+ tags: pointwise
- func: isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: isneginf_out
SparseCPU, SparseCUDA: isneginf_sparse_out
SparseCsrCPU, SparseCsrCUDA: isneginf_sparse_csr_out
+ tags: pointwise
# NOTE [_add_batch_dim and _remove_batch_dim]
# _add_batch_dim and _remove_batch_dim are meant to be used in the implementation
# of the vmap frontend API (see torch/_vmap_internals.py). They are not
# user-facing, hence the leading underscore. Please don't use them them anywhere else.
@@ -11540,44 +12314,50 @@
- func: special_entr(Tensor self) -> Tensor
structured_delegate: special_entr.out
python_module: special
variants: function
+ tags: pointwise
- func: special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: special
variants: function
dispatch:
CPU, CUDA: special_entr_out
+ tags: pointwise
- func: special_ndtri(Tensor self) -> Tensor
structured_delegate: special_ndtri.out
python_module: special
variants: function
+ tags: pointwise
- func: special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: special
variants: function
dispatch:
CPU, CUDA: special_ndtri_out
+ tags: pointwise
- func: special_log_ndtr(Tensor self) -> Tensor
structured_delegate: special_log_ndtr.out
python_module: special
variants: function
+ tags: pointwise
- func: special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: special
variants: function
dispatch:
CPU, CUDA: special_log_ndtr_out
+ tags: pointwise
- func: special_expm1(Tensor self) -> Tensor
python_module: special
variants: function
@@ -11634,17 +12414,19 @@
- func: special_erfcx(Tensor self) -> Tensor
python_module: special
variants: function
structured_delegate: special_erfcx.out
+ tags: pointwise
- func: special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: special_erfcx_out
+ tags: pointwise
- func: special_erfinv(Tensor self) -> Tensor
python_module: special
variants: function
@@ -11662,47 +12444,53 @@
- func: special_xlog1py(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
structured_delegate: special_xlog1py.out
+ tags: pointwise
- func: special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
dispatch:
CompositeExplicitAutograd: special_xlog1py
+ tags: pointwise
- func: special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
dispatch:
CompositeExplicitAutograd: special_xlog1py
+ tags: pointwise
- func: special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
python_module: special
variants: function
dispatch:
CPU, CUDA: special_xlog1py_out
+ tags: pointwise
- func: special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: special
variants: function
dispatch:
CompositeExplicitAutograd: special_xlog1py_out
+ tags: pointwise
- func: special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: special
variants: function
dispatch:
CompositeExplicitAutograd: special_xlog1py_out
+ tags: pointwise
- func: special_xlogy(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
@@ -11735,47 +12523,53 @@
- func: special_zeta(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
structured_delegate: special_zeta.out
+ tags: pointwise
- func: special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
dispatch:
CompositeExplicitAutograd: special_zeta
+ tags: pointwise
- func: special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
dispatch:
CompositeExplicitAutograd: special_zeta
+ tags: pointwise
- func: special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
python_module: special
variants: function
dispatch:
CPU, CUDA: special_zeta_out
+ tags: pointwise
- func: special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: special
variants: function
dispatch:
CompositeExplicitAutograd: special_zeta_out
+ tags: pointwise
- func: special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: special
variants: function
dispatch:
CompositeExplicitAutograd: special_zeta_out
+ tags: pointwise
- func: special_i0(Tensor self) -> Tensor
python_module: special
variants: function
@@ -11785,41 +12579,47 @@
- func: special_i0e(Tensor self) -> Tensor
python_module: special
variants: function
structured_delegate: special_i0e.out
+ tags: pointwise
- func: special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: special_i0e_out
+ tags: pointwise
- func: special_i1(Tensor self) -> Tensor
python_module: special
variants: function
structured_delegate: special_i1.out
+ tags: pointwise
- func: special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: special_i1_out
+ tags: pointwise
- func: special_i1e(Tensor self) -> Tensor
python_module: special
variants: function
structured_delegate: special_i1e.out
+ tags: pointwise
- func: special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: special_i1e_out
+ tags: pointwise
- func: special_logit(Tensor self, float? eps=None) -> Tensor
python_module: special
variants: function
@@ -12134,11 +12934,11 @@
- func: linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
structured: True
dispatch:
- CPU, CUDA: linalg_cross_out
+ CPU, CUDA, MPS: linalg_cross_out
# linalg.lu_factor
- func: linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)
python_module: linalg
variants: function
@@ -12353,10 +13153,11 @@
- func: linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)
python_module: linalg
structured: True
dispatch:
CPU, CUDA: linalg_inv_ex_out
+ MPS: linalg_inv_ex_out_mps
- func: linalg_inv(Tensor A) -> Tensor
python_module: linalg
- func: linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
@@ -12713,462 +13514,362 @@
- func: _fw_primal_copy(Tensor self, int level) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: _fw_primal_copy
tags: view_copy
+ autogen: _fw_primal_copy.out
- func: _make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: _make_dual_copy
tags: view_copy
+ autogen: _make_dual_copy.out
- func: view_as_real_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: view_as_real_copy
tags: view_copy
+ autogen: view_as_real_copy.out
- func: view_as_complex_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: view_as_complex_copy
tags: view_copy
+ autogen: view_as_complex_copy.out
- func: _conj_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: _conj_copy
tags: view_copy
+ autogen: _conj_copy.out
- func: _neg_view_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: _neg_view_copy
tags: view_copy
+ autogen: _neg_view_copy.out
- func: as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutogradNonFunctional: as_strided_copy
+ CompositeExplicitAutogradNonFunctional: as_strided_copy_symint
tags: view_copy
+ autogen: as_strided_copy.out
- func: _sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: _sparse_broadcast_to_copy
tags: view_copy
+ autogen: _sparse_broadcast_to_copy.out
- func: diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: diagonal_copy
tags: view_copy
+ autogen: diagonal_copy.out
- func: expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutogradNonFunctional: expand_copy
+ CompositeExplicitAutogradNonFunctional: expand_copy_symint
tags: view_copy
+ autogen: expand_copy.out
- func: permute_copy(Tensor self, int[] dims) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: permute_copy
tags: view_copy
+ autogen: permute_copy.out
- func: _reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutogradNonFunctional: _reshape_alias_copy
+ CompositeExplicitAutogradNonFunctional: _reshape_alias_copy_symint
tags: view_copy
+ autogen: _reshape_alias_copy.out
-- func: select_copy.int(Tensor self, int dim, int index) -> Tensor
+- func: select_copy.int(Tensor self, int dim, SymInt index) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutogradNonFunctional: select_copy_int
+ CompositeExplicitAutogradNonFunctional: select_copy_symint
+ SparseCsrCPU, SparseCsrCUDA: select_copy_sparse_csr
tags: view_copy
+ autogen: select_copy.int_out
- func: detach_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: detach_copy
tags: view_copy
+ autogen: detach_copy.out
- func: slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutogradNonFunctional: slice_copy_Tensor
+ CompositeExplicitAutogradNonFunctional: slice_copy_Tensor_symint
tags: view_copy
+ autogen: slice_copy.Tensor_out
-- func: split_copy.Tensor(Tensor self, int split_size, int dim=0) -> Tensor[]
+- func: split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
variants: function
dispatch:
- CompositeExplicitAutogradNonFunctional: split_copy_Tensor
+ CompositeExplicitAutogradNonFunctional: split_copy_Tensor_symint
tags: view_copy
-- func: split_with_sizes_copy(Tensor self, int[] split_sizes, int dim=0) -> Tensor[]
+- func: split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
variants: function
dispatch:
- CompositeExplicitAutogradNonFunctional: split_with_sizes_copy
+ CompositeExplicitAutogradNonFunctional: split_with_sizes_copy_symint
tags: view_copy
- func: squeeze_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: squeeze_copy
tags: view_copy
+ autogen: squeeze_copy.out
- func: squeeze_copy.dim(Tensor self, int dim) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: squeeze_copy_dim
tags: view_copy
+ autogen: squeeze_copy.dim_out
+- func: squeeze_copy.dims(Tensor self, int[] dim) -> Tensor
+ variants: function
+ dispatch:
+ CompositeExplicitAutogradNonFunctional: squeeze_copy_dims
+ tags: view_copy
+ autogen: squeeze_copy.dims_out
+
- func: t_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: t_copy
tags: view_copy
+ autogen: t_copy.out
- func: transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: transpose_copy_int
tags: view_copy
+ autogen: transpose_copy.int_out
- func: unsqueeze_copy(Tensor self, int dim) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: unsqueeze_copy
tags: view_copy
+ autogen: unsqueeze_copy.out
- func: _indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: _indices_copy
tags: view_copy
+ autogen: _indices_copy.out
- func: _values_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: _values_copy
tags: view_copy
+ autogen: _values_copy.out
- func: indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: indices_copy
tags: view_copy
+ autogen: indices_copy.out
- func: values_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: values_copy
tags: view_copy
+ autogen: values_copy.out
- func: crow_indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: crow_indices_copy
tags: view_copy
+ autogen: crow_indices_copy.out
- func: col_indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: col_indices_copy
tags: view_copy
+ autogen: col_indices_copy.out
- func: ccol_indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: ccol_indices_copy
+ CompositeExplicitAutogradNonFunctional: ccol_indices_copy
tags: view_copy
autogen: ccol_indices_copy.out
- func: row_indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: row_indices_copy
+ CompositeExplicitAutogradNonFunctional: row_indices_copy
tags: view_copy
autogen: row_indices_copy.out
- func: unbind_copy.int(Tensor self, int dim=0) -> Tensor[]
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: unbind_copy_int
tags: view_copy
+- func: unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()
+ variants: function
+ dispatch:
+ CompositeExplicitAutograd: unbind_copy_int_out
+
+- func: split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
+ variants: function
+ dispatch:
+ CompositeExplicitAutograd: split_copy_Tensor_out
+
+
+- func: split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
+ variants: function
+ dispatch:
+ CompositeExplicitAutograd: split_with_sizes_copy_out
+
- func: view_copy(Tensor self, SymInt[] size) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: view_copy_symint
tags: view_copy
+ autogen: view_copy.out
- func: view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: view_copy_dtype
tags: view_copy
+ autogen: view_copy.dtype_out
- func: unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: unfold_copy
tags: view_copy
+ autogen: unfold_copy.out
- func: alias_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: alias_copy
tags: view_copy
+ autogen: alias_copy.out
-- func: _fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
+- func: to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor
+ variants: method
dispatch:
- CompositeExplicitAutograd: _fw_primal_copy_out
+ NestedTensorCPU: NestedTensor_to_padded_tensor_generic
+ NestedTensorCUDA: NestedTensor_to_padded_tensor_cuda
+ autogen: to_padded_tensor.out
-
-- func: _make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
+- func: _nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor
dispatch:
- CompositeExplicitAutograd: _make_dual_copy_out
+ NestedTensorCPU: NestedTensor_softmax_dropout
+ NestedTensorCUDA: NestedTensor_softmax_dropout_cuda
-
-- func: view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+# Apparently, putting "forward" in the name will cause Python bindings to be skipped, so "fwd" it is.
+- func: _transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: view_as_real_copy_out
+ CPU, CUDA, NestedTensorCPU, NestedTensorCUDA: transformer_encoder_layer_forward
+ autogen: _transformer_encoder_layer_fwd.out
-
-- func: view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+- func: _native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)
variants: function
dispatch:
- CompositeExplicitAutograd: view_as_complex_copy_out
+ CPU, NestedTensorCPU: native_multi_head_attention_cpu
+ CUDA, NestedTensorCUDA: native_multi_head_attention_cuda
+ autogen: _native_multi_head_attention.out
-
-- func: _conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+- func: scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> Tensor
+ python_module: nn
variants: function
- dispatch:
- CompositeExplicitAutograd: _conj_copy_out
+ autogen: scaled_dot_product_attention.out
-
-- func: _neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+# TODO: THIS NEEDS TO BE REMOVED BUT PEOPLE HAVE TRAINED THEIR MODELS WITH THIS OP BUILTIN
+- func: _scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)
+ python_module: nn
variants: function
- dispatch:
- CompositeExplicitAutograd: _neg_view_copy_out
+ autogen: _scaled_dot_product_attention.out
-
-- func: as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
+# This aten function is kept so that we can test the choice function from Python
+- func: _fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> int
dispatch:
- CompositeExplicitAutograd: as_strided_copy_out_symint
+ Meta: _fused_sdp_choice_meta
+ CPU, NestedTensorCPU: _fused_sdp_choice_cpp
+ CUDA, NestedTensorCUDA: _fused_sdp_choice_cuda
-
-- func: _sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!)
+- func: _scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None) -> (Tensor, Tensor)
variants: function
- dispatch:
- CompositeExplicitAutograd: _sparse_broadcast_to_copy_out
-
-- func: diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
+- func: _scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False) -> (Tensor ouput, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, int philox_seed, int philox_offset, Tensor debug_attn_mask)
dispatch:
- CompositeExplicitAutograd: diagonal_copy_out
+ CUDA: _scaled_dot_product_flash_attention_cuda
+ NestedTensorCUDA: _scaled_dot_product_flash_attention_nestedtensor_cuda
-
-- func: expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)
+- func: _scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)
variants: function
dispatch:
- CompositeExplicitAutograd: expand_copy_out_symint
+ CUDA: _scaled_dot_product_flash_attention_backward_cuda
-
-- func: permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
+- func: _scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, bool compute_log_sumexp, bool is_causal=False) -> (Tensor, Tensor)
dispatch:
- CompositeExplicitAutograd: permute_copy_out
+ CUDA: _scaled_dot_product_efficient_attention_cuda
+ NestedTensorCUDA: _scaled_dot_product_efficient_attention_nestedtensor_cuda
-
-- func: _reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
+- func: _scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)
dispatch:
- CompositeExplicitAutograd: _reshape_alias_copy_out
+ CUDA: _scaled_dot_product_efficient_attention_backward_cuda
-
-- func: select_copy.int_out(Tensor self, int dim, int index, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
+- func: _chunk_grad_outputs_efficient_attention(Tensor query, Tensor key, Tensor value, bool is_causal=False) -> bool
dispatch:
- CompositeExplicitAutograd: select_copy_int_out
+ CUDA: _chunk_grad_outputs_efficient_attention
-
-- func: detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+- func: _flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, bool return_debug_mask) -> (Tensor output, Tensor softmax_logsumexp, int philox_seed, int philox_offset, Tensor debug_attn_mask)
variants: function
dispatch:
- CompositeExplicitAutograd: detach_copy_out
+ CUDA: _flash_attention_forward
-
-- func: slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
+- func: _flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor, Tensor, Tensor)
variants: function
dispatch:
- CompositeExplicitAutograd: slice_copy_Tensor_out
+ CUDA: _flash_attention_backward
-
-- func: split_copy.Tensor_out(Tensor self, int split_size, int dim=0, *, Tensor(a!)[] out) -> ()
+# Returns ouput, logsumexp if compute_logsumexp
+- func: _efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, bool compute_log_sumexp=False, bool causal=False) -> (Tensor, Tensor)
variants: function
dispatch:
- CompositeExplicitAutograd: split_copy_Tensor_out
+ CUDA: _efficient_attention_forward
-
-- func: split_with_sizes_copy.out(Tensor self, int[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
+- func: _efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)
variants: function
dispatch:
- CompositeExplicitAutograd: split_with_sizes_copy_out
+ CUDA: _efficient_attention_backward
-
-- func: squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: squeeze_copy_out
-
-
-- func: squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: squeeze_copy_dim_out
-
-
-- func: t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: t_copy_out
-
-
-- func: transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: transpose_copy_int_out
-
-
-- func: unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: unsqueeze_copy_out
-
-
-- func: _indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: _indices_copy_out
-
-
-- func: _values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: _values_copy_out
-
-
-- func: indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: indices_copy_out
-
-
-- func: values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: values_copy_out
-
-
-- func: crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: crow_indices_copy_out
-
-
-- func: col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: col_indices_copy_out
-
-
-- func: unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()
- variants: function
- dispatch:
- CompositeExplicitAutograd: unbind_copy_int_out
-
-
-- func: view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: view_copy_out_symint
-
-
-- func: view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: view_copy_dtype_out
-
-
-- func: unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: unfold_copy_out
-
-
-- func: alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: alias_copy_out
-
-- func: to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor
- variants: method
- dispatch:
- NestedTensorCPU: NestedTensor_to_padded_tensor_generic
- NestedTensorCUDA: NestedTensor_to_padded_tensor_cuda
- autogen: to_padded_tensor.out
-
-- func: _nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor
- dispatch:
- NestedTensorCPU: NestedTensor_softmax_dropout
- NestedTensorCUDA: NestedTensor_softmax_dropout_cuda
-
-- func: _nested_tensor_layer_norm(Tensor self, Tensor? weight, Tensor? bias, float eps) -> Tensor
- variants: method
- dispatch:
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_layer_norm
- autogen: _nested_tensor_layer_norm.out
-
-# Apparently, putting "forward" in the name will cause Python bindings to be skipped, so "fwd" it is.
-- func: _transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor
- variants: function
- dispatch:
- CPU, CUDA, NestedTensorCPU, NestedTensorCUDA: transformer_encoder_layer_forward
- autogen: _transformer_encoder_layer_fwd.out
-
-- func: _native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)
- variants: function
- dispatch:
- CPU, CUDA, NestedTensorCPU, NestedTensorCUDA: native_multi_head_attention
- autogen: _native_multi_head_attention.out
-
-- func: _scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)
- python_module: nn
- variants: function
- autogen: _scaled_dot_product_attention.out
-
-# Register the math kernel for cpu
-- func: _scaled_dot_product_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)
- variants: function
- dispatch:
- CUDA: _scaled_dot_product_attention_forward_cuda
- CPU: _scaled_dot_product_attention_forward_math
- NestedTensorCPU, NestedTensorCUDA: _scaled_dot_product_attention_forward_math
- Meta: _scaled_dot_product_attention_forward_math
-
-- func: _scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)
- variants: function
-
- func: _triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor
variants: function
dispatch:
CUDA: triton_scaled_dot_attention
autogen: _triton_scaled_dot_attention.out
@@ -13181,24 +13882,21 @@
- func: special_airy_ai(Tensor x) -> Tensor
python_module: special
structured_delegate: special_airy_ai.out
variants: function
+ tags: pointwise
- func: special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_airy_ai_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
-- func: _flash_scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal) -> Tensor
- variants: function
- dispatch:
- CUDA: flash_scaled_dot_product_attention
-
- func: _transformer_decoder_only_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None) -> (Tensor, Tensor, Tensor)
variants: function
dispatch:
CPU, CUDA, NestedTensorCPU, NestedTensorCUDA: transformer_decoder_only_layer_forward
autogen: _transformer_decoder_only_layer_fwd.out
@@ -13211,592 +13909,686 @@
- func: special_bessel_j0(Tensor self) -> Tensor
python_module: special
structured_delegate: special_bessel_j0.out
variants: function
+ tags: pointwise
- func: special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_bessel_j0_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_bessel_j1(Tensor self) -> Tensor
python_module: special
structured_delegate: special_bessel_j1.out
variants: function
+ tags: pointwise
- func: special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_bessel_j1_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_bessel_y0(Tensor self) -> Tensor
python_module: special
structured_delegate: special_bessel_y0.out
variants: function
+ tags: pointwise
- func: special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_bessel_y0_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_bessel_y1(Tensor self) -> Tensor
python_module: special
structured_delegate: special_bessel_y1.out
variants: function
+ tags: pointwise
- func: special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_bessel_y1_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_chebyshev_polynomial_t.out
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_chebyshev_polynomial_t_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_t_out
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_chebyshev_polynomial_u.out
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_chebyshev_polynomial_u_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_u_out
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_chebyshev_polynomial_v.out
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_chebyshev_polynomial_v_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_v_out
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_chebyshev_polynomial_w.out
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_chebyshev_polynomial_w_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_w_out
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_hermite_polynomial_h.out
variants: function
+ tags: pointwise
- func: special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_hermite_polynomial_h_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_hermite_polynomial_h_out
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_hermite_polynomial_he.out
variants: function
+ tags: pointwise
- func: special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_hermite_polynomial_he_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_hermite_polynomial_he_out
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_laguerre_polynomial_l.out
variants: function
+ tags: pointwise
- func: special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_laguerre_polynomial_l_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_laguerre_polynomial_l_out
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_legendre_polynomial_p.out
variants: function
+ tags: pointwise
- func: special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_legendre_polynomial_p_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_legendre_polynomial_p_out
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_modified_bessel_i0(Tensor self) -> Tensor
python_module: special
structured_delegate: special_modified_bessel_i0.out
variants: function
+ tags: pointwise
- func: special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_modified_bessel_i0_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_modified_bessel_i1(Tensor self) -> Tensor
python_module: special
structured_delegate: special_modified_bessel_i1.out
variants: function
+ tags: pointwise
- func: special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_modified_bessel_i1_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_modified_bessel_k0(Tensor self) -> Tensor
python_module: special
structured_delegate: special_modified_bessel_k0.out
variants: function
+ tags: pointwise
- func: special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_modified_bessel_k0_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_modified_bessel_k1(Tensor self) -> Tensor
python_module: special
structured_delegate: special_modified_bessel_k1.out
variants: function
+ tags: pointwise
- func: special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_modified_bessel_k1_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_scaled_modified_bessel_k0(Tensor x) -> Tensor
python_module: special
structured_delegate: special_scaled_modified_bessel_k0.out
variants: function
+ tags: pointwise
- func: special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_scaled_modified_bessel_k0_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_scaled_modified_bessel_k1(Tensor x) -> Tensor
python_module: special
structured_delegate: special_scaled_modified_bessel_k1.out
variants: function
+ tags: pointwise
- func: special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_scaled_modified_bessel_k1_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_shifted_chebyshev_polynomial_t.out
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_shifted_chebyshev_polynomial_t_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_t_out
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_shifted_chebyshev_polynomial_u.out
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_shifted_chebyshev_polynomial_u_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_u_out
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_shifted_chebyshev_polynomial_v.out
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_shifted_chebyshev_polynomial_v_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_v_out
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_shifted_chebyshev_polynomial_w.out
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_shifted_chebyshev_polynomial_w_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_w_out
device_check: NoCheck
python_module: special
variants: function
+ tags: pointwise
- func: special_spherical_bessel_j0(Tensor x) -> Tensor
python_module: special
structured_delegate: special_spherical_bessel_j0.out
variants: function
+ tags: pointwise
- func: special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_spherical_bessel_j0_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
+ tags: pointwise
# Aux function used in the test TestPythonDispatch.test_kwarg_only_and_positional_default
# within test/test_python_dispatch.py
- func: _foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor
dispatch:
@@ -13808,5 +14600,12 @@
# Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now).
variants: function
dispatch:
CUDA: _fused_adam_kernel_cuda_
autogen: _fused_adam, _fused_adam.out
+
+- func: _fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
+ # Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now).
+ variants: function
+ dispatch:
+ CUDA: _fused_adamw_kernel_cuda_
+ autogen: _fused_adamw, _fused_adamw.out