codegen/native_functions.yaml in torch-rb-0.11.2 vs codegen/native_functions.yaml in torch-rb-0.12.0
- old
+ new
@@ -129,10 +129,11 @@
# the number of batch dims for other once we support that use case
- func: _new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: _new_zeros_with_same_feature_meta
+ autogen: _new_zeros_with_same_feature_meta.out
# This function compares the storage numel of self with that of other, where
# storage numel is cumputed as: `other.storage().nbytes() / other.itemsize()`.
# We create this function for composite compliance purposes. The batching rule
# always returns true because vmapped as_strided does not support accessing
@@ -167,67 +168,92 @@
- func: _assert_async(Tensor self) -> ()
dispatch:
CPU: _assert_async_cpu
CUDA: _assert_async_cuda
+
+- func: _assert_tensor_metadata(Tensor a, int[]? size=None, int[]? stride=None, ScalarType? dtype=None) -> ()
+
- func: refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)
variants: method
- func: _use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool
device_check: NoCheck # Tensor arguments allowed to be on different devices, see also _cudnn_ctc_loss
dispatch:
CUDA: _use_cudnn_ctc_loss
+- func: _use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool
+ device_check: NoCheck # Tensor arguments allowed to be on different devices, see also _cudnn_ctc_loss
+ dispatch:
+ CUDA: _use_cudnn_ctc_loss_tensor
+
- func: _cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
device_check: NoCheck # log_probs is expected to be on CUDA while targets is expected to be on CPU
dispatch:
CUDA: _cudnn_ctc_loss
+ autogen: _cudnn_ctc_loss.out
+- func: _cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
+ device_check: NoCheck # log_probs is expected to be on CUDA while targets is expected to be on CPU
+ dispatch:
+ CUDA: _cudnn_ctc_loss_tensor
+
- func: _use_cudnn_rnn_flatten_weight() -> bool
-- func: _cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, int input_size, int mode, int hidden_size, int proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
+- func: _cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
dispatch:
CUDA: _cudnn_rnn_flatten_weight
+ autogen: _cudnn_rnn_flatten_weight.out
-- func: _cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, int hidden_size, int proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
+- func: _cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
# rnn_tanh may or may not redispatch to _cudnn_rnn based on algorithm and build. Thus it might hit dispatch or kernel device check.
# Disable dispatch time device check for consistent behavior.
device_check: NoCheck
dispatch:
CUDA: _cudnn_rnn
+ autogen: _cudnn_rnn.out
-- func: _cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
+- func: _cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
dispatch:
CUDA: _cudnn_rnn_backward
+ autogen: _cudnn_rnn_backward.out
- func: _cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
dispatch:
CUDA: _cudnn_init_dropout_state
+ autogen: _cudnn_init_dropout_state.out
- func: _debug_has_internal_overlap(Tensor self) -> int
variants: function
- func: _fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)
variants: function
dispatch:
CUDA: fused_dropout_cuda
+ tags: nondeterministic_seeded
+ autogen: _fused_dropout.out
- func: _masked_scale(Tensor self, Tensor mask, float scale) -> Tensor
variants: function
dispatch:
CUDA: masked_scale_cuda
+ autogen: _masked_scale.out
- func: native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)
variants: function
dispatch:
CPU: native_dropout_cpu
CUDA: native_dropout_cuda
+ NestedTensorCPU, NestedTensorCUDA: native_dropout_nested
+ tags: nondeterministic_seeded
+ autogen: native_dropout.out
- func: native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor
dispatch:
- CPU: native_dropout_backward_cpu
+ CPU, NestedTensorCPU, NestedTensorCUDA: native_dropout_backward
CUDA: native_dropout_backward_cuda
+ autogen: native_dropout_backward.out
- func: _sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)
- func: _sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)
@@ -238,24 +264,32 @@
- func: _reshape_from_tensor(Tensor self, Tensor shape) -> Tensor
- func: _shape_as_tensor(Tensor self) -> Tensor
- func: dropout(Tensor input, float p, bool train) -> Tensor
+ tags: nondeterministic_seeded
- func: dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
+ tags: nondeterministic_seeded
- func: feature_dropout(Tensor input, float p, bool train) -> Tensor
+ tags: nondeterministic_seeded
- func: feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
+ tags: nondeterministic_seeded
- func: alpha_dropout(Tensor input, float p, bool train) -> Tensor
+ tags: nondeterministic_seeded
- func: alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
+ tags: nondeterministic_seeded
- func: feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor
+ tags: nondeterministic_seeded
- func: feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
+ tags: nondeterministic_seeded
- func: abs(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
@@ -382,10 +416,11 @@
- func: _conj_physical(Tensor self) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: _conj_physical
SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr
+ autogen: _conj_physical.out
- func: conj_physical(Tensor self) -> Tensor
variants: function, method
- func: conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
@@ -540,26 +575,27 @@
- func: addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
variants: function, method
dispatch:
CPU, CUDA: addr
- CompositeImplicitAutograd: math_addr
+ CompositeExplicitAutograd: math_addr
- func: addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
variants: method
dispatch:
CompositeExplicitAutograd: addr_
- func: addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: addr_out
- CompositeImplicitAutograd: math_addr_out
+ CompositeExplicitAutograd: math_addr_out
- func: affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: affine_grid_generator
+ autogen: affine_grid_generator.out
- func: affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor
variants: function
- func: all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
@@ -583,10 +619,13 @@
- func: all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
- func: allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool
variants: function, method
+ tags: data_dependent_output
+ dispatch:
+ CompositeExplicitAutograd: allclose
- func: any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: any.out
variants: function, method
@@ -606,28 +645,39 @@
- func: any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
- func: arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: arange
- func: arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: arange
-# Note [arange.start_step schema]
-# We want `arange.start_step` to be grouped up with `arange.start_out`,
-# But this doesn't happen automatically because the step argument
-# is defaultable for .start_out but not for .start_step.
-# We should probably just make "step" a defaultable param on arange.start,
-# and kill arange.start_step.
-- func: arange.start_step(Scalar start, Scalar end, Scalar step, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+# This operator should be named `aragne.start_out` if following the naming convention. However that
+# name is already taken. Disabled because of CI job failures.
+# FIXME: enable this
+#- func: arange.start_out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)
+# dispatch:
+# CompositeExplicitAutograd: arange_start_out
+- func: arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: arange
+ cpp_no_default_args: ['step']
+
- func: arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: arange_out
- func: arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, Meta: arange_out
CUDA: arange_cuda_out
MPS: arange_mps_out
+ cpp_no_default_args: ['step']
# This function is a temporary hack to allow tracing of arange like constructs with dynamic
# bounds on arange. Normal arange is not traceable because it does not take any tensor inputs;
# if the range you need is based on another tensor, calling this function directly will
# preserve tracing. Get rid of this when arange can directly take tensors for bounds
@@ -652,10 +702,11 @@
- func: argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU, CUDA: argmin_out
+ MPS: argmin_out_mps
- func: acosh(Tensor self) -> Tensor
variants: function, method
structured_delegate: acosh.out
@@ -713,11 +764,10 @@
- func: atanh(Tensor self) -> Tensor
structured_delegate: atanh.out
variants: function, method
dispatch:
- CompositeExplicitAutograd: atanh
SparseCPU, SparseCUDA: atanh_sparse
SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr
- func: atanh_(Tensor(a!) self) -> Tensor(a!)
structured_delegate: atanh.out
@@ -742,27 +792,28 @@
- func: arctanh_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
- func: arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
-- func: as_strided(Tensor(a) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a)
+- func: as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)
variants: function, method
dispatch:
- ZeroTensor, CPU, CUDA, Meta: as_strided_tensorimpl
+ ZeroTensor, CPU, CUDA: as_strided_tensorimpl
+ Meta: as_strided_tensorimpl_meta_symint
MPS: as_strided_tensorimpl_mps
QuantizedCPU, QuantizedCUDA: as_strided_qtensorimpl
device_check: NoCheck
device_guard: False
-- func: as_strided_(Tensor(a!) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a!)
+- func: as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
variants: function, method
device_check: NoCheck
device_guard: False
tags: inplace_view
dispatch:
- CompositeExplicitAutograd: as_strided_
+ CompositeExplicitAutogradNonFunctional: as_strided_
- func: asin(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: asin.out
@@ -865,18 +916,25 @@
CUDA: baddbmm_out_cuda
MPS: baddbmm_out_mps
SparseCsrCUDA: baddbmm_out_sparse_csr_cuda
- func: bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: bartlett_window
+ autogen: bartlett_window.out
- func: bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: bartlett_window
+ autogen: bartlett_window.periodic_out
- func: batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor
- func: quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor
dispatch:
QuantizedCPU: quantized_batch_norm
+ autogen: quantized_batch_norm.out
- func: _batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)
- func: _batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)
@@ -884,29 +942,33 @@
- func: bernoulli(Tensor self, *, Generator? generator=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: bernoulli
+ tags: nondeterministic_seeded
- func: bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
+ tags: nondeterministic_seeded
dispatch:
CPU, CUDA: bernoulli_out
MPS: bernoulli_out_mps
- func: bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
+ tags: nondeterministic_seeded
dispatch:
CPU, CUDA: bernoulli_
MPS: bernoulli_mps_
- autogen: bernoulli.Tensor_functional, bernoulli.Tensor_out
+ autogen: bernoulli.Tensor, bernoulli.Tensor_out
- func: bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
+ tags: nondeterministic_seeded
dispatch:
CPU, CUDA: bernoulli_
MPS: bernoulli_mps_
autogen: bernoulli.float_out
@@ -916,10 +978,11 @@
# There is no default valid on `p` here because it would introduce ambiguity
# with `bernoulli(Tensor self, *, Generator? generator=None)` declaration.
- func: bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
+ tags: nondeterministic_seeded
- func: bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor
- func: binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
device_check: NoCheck # TensorIterator
@@ -958,19 +1021,19 @@
- func: binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: binary_cross_entropy_with_logits
+ autogen: binary_cross_entropy_with_logits.out
-- func: binary_cross_entropy_with_logits_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor
- variants: function
-
- func: bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor
variants: function, method
dispatch:
CPU: _bincount_cpu
CUDA: _bincount_cuda
+ tags: dynamic_output_shape
+ autogen: bincount.out
- func: bitwise_not(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: bitwise_not.out
variants: function, method
@@ -1032,10 +1095,11 @@
- func: logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: logical_not_out
+ MPS: logical_not_out_mps
- func: logical_xor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
@@ -1049,10 +1113,11 @@
- func: logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: logical_xor_out
+ MPS: logical_xor_out_mps
- func: logical_and(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
@@ -1066,10 +1131,11 @@
- func: logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: logical_and_out
+ MPS: logical_and_out_mps
- func: logical_or(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
@@ -1083,21 +1149,29 @@
- func: logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: logical_or_out
+ MPS: logical_or_out_mps
- func: blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: blackman_window
+ autogen: blackman_window.out
- func: blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: blackman_window
+ autogen: blackman_window.periodic_out
- func: bmm(Tensor self, Tensor mat2) -> Tensor
structured_delegate: bmm.out
variants: function, method
dispatch:
SparseCPU: bmm_sparse_cpu
SparseCUDA: bmm_sparse_cuda
+ NestedTensorCPU, NestedTensorCUDA: bmm_nested
- func: bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
structured: True
variants: function
dispatch:
@@ -1147,28 +1221,38 @@
- func: concat.names(Tensor[] tensors, Dimname dim) -> Tensor
- func: concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
+# alias for torch.cat
+- func: concatenate(Tensor[] tensors, int dim=0) -> Tensor
+
+- func: concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
+
+- func: concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor
+
+- func: concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
+
- func: block_diag(Tensor[] tensors) -> Tensor
variants: function
+ dispatch:
+ CompositeExplicitAutograd: block_diag
+ autogen: block_diag.out
- func: ceil(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: ceil.out
variants: function, method
dispatch:
- CompositeExplicitAutograd: ceil
SparseCPU, SparseCUDA: ceil_sparse
SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr
- func: ceil_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: ceil.out
variants: function, method
dispatch:
- CompositeExplicitAutograd: ceil_
SparseCPU, SparseCUDA: ceil_sparse_
SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr_
- func: ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
@@ -1221,12 +1305,10 @@
- func: clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
cpp_no_default_args: ['min']
structured_delegate: clamp.out
- dispatch:
- CompositeExplicitAutograd: clamp_
- func: clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
variants: function, method
structured_delegate: clamp.Tensor_out
@@ -1359,34 +1441,41 @@
- func: constant_pad_nd(Tensor self, int[] pad, Scalar value=0) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: constant_pad_nd
+ MPS: constant_pad_nd_mps
+ autogen: constant_pad_nd.out
- func: contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)
variants: method
manual_cpp_binding: True
- func: convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor
dispatch:
CompositeExplicitAutograd: convolution
+ autogen: convolution.out
-- func: convolution_backward(Tensor grad_output, Tensor input, Tensor weight, int[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
+- func: convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
dispatch:
CompositeExplicitAutograd, CUDA: convolution_backward
+ autogen: convolution_backward.out
- func: convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor
dispatch:
CompositeExplicitAutograd: convolution_overrideable
+ autogen: convolution_overrideable.out
- func: convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
dispatch:
CompositeExplicitAutograd: convolution_backward_overrideable
+ autogen: convolution_backward_overrideable.out
- func: _convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor
dispatch:
CompositeExplicitAutograd: _convolution
+ autogen: _convolution.out
- func: _convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor
- func: _convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor
@@ -1408,10 +1497,11 @@
cpp_no_default_args: ['bias', 'stride', 'padding']
- func: conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor
dispatch:
CompositeExplicitAutograd: conv_tbc
+ autogen: conv_tbc.out
- func: conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)
# NB: we inherit the goofy argument order from PyTorch torch.nn.functional
- func: conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor
@@ -1435,16 +1525,18 @@
autogen: copy.out
- func: _copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor
dispatch:
MPS: _copy_from_mps
+ autogen: _copy_from.out
# We need this to be able to properly copy from a CPU to an XLA tensor with different sizes.
# See https://github.com/pytorch/xla/issues/2881
- func: _copy_from_and_resize(Tensor self, Tensor dst) -> Tensor
dispatch:
MPS: _copy_from_and_resize_mps
+ autogen: _copy_from_and_resize.out
- func: cos(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: cos.out
@@ -1486,72 +1578,86 @@
variants: function, method
dispatch:
CPU: count_nonzero_cpu
CUDA: count_nonzero_cuda
MPS: count_nonzero_mps
+ autogen: count_nonzero.dim_IntList_out
- func: count_nonzero(Tensor self, int? dim=None) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: count_nonzero
+ autogen: count_nonzero.out
- func: cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor
variants: function, method
- func: corrcoef(Tensor self) -> Tensor
variants: function, method
- func: cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid
dispatch:
CUDA: cudnn_affine_grid_generator_forward
+ autogen: cudnn_affine_grid_generator.out
# TODO: Why do I have to call this grad?!
- func: cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta
dispatch:
CUDA: cudnn_affine_grid_generator_backward
+ autogen: cudnn_affine_grid_generator_backward.out
- func: cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)
dispatch:
CUDA: cudnn_batch_norm
+ autogen: cudnn_batch_norm.out
# NB: You can only use this if you used cudnn_batch_norm training=True
- func: cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor)
dispatch:
CUDA: cudnn_batch_norm_backward
+ autogen: cudnn_batch_norm_backward.out
- func: cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
dispatch:
CUDA: cudnn_convolution
+ autogen: cudnn_convolution.out
- func: cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
dispatch:
CUDA: cudnn_convolution_transpose
+ autogen: cudnn_convolution_transpose.out
- func: _mps_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups) -> Tensor
dispatch:
MPS: _mps_convolution_transpose
+ autogen: _mps_convolution_transpose.out
- func: mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask) -> (Tensor, Tensor)
dispatch:
MPS: mps_convolution_transpose_backward
+ autogen: mps_convolution_transpose_backward.out
- func: cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
dispatch:
CUDA: cudnn_convolution_relu
+ autogen: cudnn_convolution_relu.out
- func: cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
dispatch:
CUDA: cudnn_convolution_add_relu
+ autogen: cudnn_convolution_add_relu.out
# NB: input is special cased in a way I don't quite understand
- func: cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output
dispatch:
CUDA: cudnn_grid_sampler_forward
+ autogen: cudnn_grid_sampler.out
- func: cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)
dispatch:
CUDA: cudnn_grid_sampler_backward
+ autogen: cudnn_grid_sampler_backward.out
- func: cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
@@ -1670,20 +1776,31 @@
- func: _ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
dispatch:
CPU: ctc_loss_cpu
CUDA: ctc_loss_gpu
+ autogen: _ctc_loss.out
+- func: _ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
+ dispatch:
+ CPU, CUDA: ctc_loss_tensor
+
- func: _ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
dispatch:
CPU: ctc_loss_backward_cpu
CUDA: ctc_loss_backward_gpu
+ autogen: _ctc_loss_backward.out
+- func: _ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
+ dispatch:
+ CPU, CUDA: ctc_loss_backward_tensor
+
- func: diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: diag_embed
+ autogen: diag_embed.out
- func: diagflat(Tensor self, int offset=0) -> Tensor
variants: function, method
- func: diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)
@@ -1696,16 +1813,17 @@
variants: function
- func: diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)
variants: function, method
-- func: diagonal_backward(Tensor grad_output, int[] input_sizes, int offset, int dim1, int dim2) -> Tensor
+- func: diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
dispatch:
- CompositeExplicitAutograd: diagonal_backward
+ CompositeExplicitAutograd: diagonal_backward_symint
+ autogen: diagonal_backward.out
- func: fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)
variants: method
- func: diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor
@@ -1875,30 +1993,34 @@
- func: vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: vdot_out
-- func: einsum(str equation, Tensor[] tensors) -> Tensor
+- func: einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor
- func: embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor
dispatch:
CompositeExplicitAutograd: embedding
NestedTensorCPU, NestedTensorCUDA: NestedTensor_embedding
+ autogen: embedding.out
-- func: embedding_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor
+- func: embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, int padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor
+ dispatch:
+ CompositeImplicitAutograd: embedding_backward_symint
-- func: embedding_dense_backward(Tensor grad_output, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor
+- func: embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor
dispatch:
CPU: embedding_dense_backward_cpu
CUDA: embedding_dense_backward_cuda
MPS: embedding_dense_backward_mps
+ autogen: embedding_dense_backward.out
- func: embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)
dispatch:
CPU: embedding_renorm_cpu_
CUDA: embedding_renorm_cuda_
- autogen: embedding_renorm.functional, embedding_renorm.out
+ autogen: embedding_renorm, embedding_renorm.out
- func: embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor
# NOTE [ embedding_bag Native Functions ]
# The `_embedding_bag.*` variants assume that input tensors except for `weight`,
@@ -1912,10 +2034,11 @@
- func: _embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
dispatch:
CPU: _embedding_bag_forward_only_cpu
CUDA: _embedding_bag_forward_only_cuda
+ autogen: _embedding_bag_forward_only.out
- func: _rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor)
# row_stack is the alias of vstack
- func: row_stack(Tensor[] tensors) -> Tensor
@@ -1932,124 +2055,153 @@
- func: _embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
dispatch:
CPU: _embedding_bag_cpu
CUDA: _embedding_bag_cuda
+ autogen: _embedding_bag.out
- func: _embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
- func: _embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
- func: _embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
dispatch:
CPU: _embedding_bag_dense_backward_cpu
CUDA: _embedding_bag_dense_backward_cuda
+ autogen: _embedding_bag_dense_backward.out
- func: _embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor
dispatch:
CPU: _embedding_bag_per_sample_weights_backward_cpu
CUDA: _embedding_bag_per_sample_weights_backward_cuda
+ autogen: _embedding_bag_per_sample_weights_backward.out
- func: empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
device_check: NoCheck
device_guard: False
+ dispatch:
+ CompositeExplicitAutograd: empty_names
+ autogen: empty.names_out
-- func: empty.memory_format(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
+- func: empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
dispatch:
CPU: empty_cpu
CUDA: empty_cuda
MPS: empty_mps
- Meta: empty_meta
+ Meta: empty_meta_symint
MkldnnCPU: empty_mkldnn
- SparseCPU, SparseCUDA: empty_sparse
+ SparseCPU, SparseCUDA, SparseMeta: empty_sparse
SparseCsrCPU, SparseCsrCUDA: empty_sparse_compressed
- QuantizedCPU, QuantizedCUDA: empty_unknown_quantized
+ QuantizedCPU, QuantizedCUDA, QuantizedMeta: empty_unknown_quantized
# We do not make new_empty a composite that calls into new_empty_strided, as the strided version
# is significantly more difficult to implement by different backends
-- func: new_empty(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
variants: method
dispatch:
- CompositeExplicitAutograd: new_empty
+ CompositeExplicitAutograd: new_empty_symint
+ autogen: new_empty.out
-- func: new_empty_strided(Tensor self, int[] size, int[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
variants: method
dispatch:
- CompositeExplicitAutograd: new_empty_strided
+ CompositeExplicitAutogradNonFunctional: new_empty_strided_symint
+ autogen: new_empty_strided.out
-- func: new_full(Tensor self, int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
variants: method
+ dispatch:
+ # NB: Although this composite mutates on the inside, it is
+ # non-differentiable so NonFunctional doesn't apply
+ CompositeExplicitAutograd: new_full
+ autogen: new_full.out
-- func: new_zeros(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
variants: method
+ dispatch:
+ # NB: Although this composite mutates on the inside, it is
+ # non-differentiable so NonFunctional doesn't apply
+ CompositeExplicitAutograd: new_zeros
+ autogen: new_zeros.out
-- func: new_ones(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
variants: method
+ dispatch:
+ # NB: Although this composite mutates on the inside, it is
+ # non-differentiable so NonFunctional doesn't apply
+ CompositeExplicitAutograd: new_ones
+ autogen: new_ones.out
# other overrides are to provide a more helpful error message that dtype is required
- func: _empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor
dispatch:
CPU: empty_affine_quantized_other_backends_stub
QuantizedCPU, QuantizedCUDA: empty_affine_quantized
+ autogen: _empty_affine_quantized.out
# it's a factory function receiving a tensor argument, thus overriding explicitly
# other overrides are to provide a more helpful error message that dtype is required
- func: _empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
category_override: factory
dispatch:
CPU: empty_per_channel_affine_quantized_other_backends_stub
QuantizedCPU, QuantizedCUDA: empty_per_channel_affine_quantized
+ autogen: _empty_per_channel_affine_quantized.out
-- func: resize_(Tensor(a!) self, int[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)
+- func: resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
variants: method
device_check: NoCheck
device_guard: False
+ tags: inplace_view
dispatch:
CPU, Meta: resize_
CUDA: resize_cuda_
MPS: resize_mps_
QuantizedCPU: quantized_resize_cpu_
SparseCsrCPU, SparseCsrCUDA: resize_sparse_csr_
- autogen: resize.functional, resize.out
+ autogen: resize, resize.out
# This is a utility function to enable users to resize out tensor while registering kernels for out variants.
# Eventually, we can consider exposing `resize_output` as a public API to ship it with python op registration
# to make it easy to register out variants for ops.
- func: _resize_output_(Tensor(a!) self, int[] size, Device device) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
variants: function
dispatch:
Meta: _resize_output_
- autogen: _resize_output.functional, _resize_output.out
+ autogen: _resize_output, _resize_output.out
- func: empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
category_override: factory
variants: function
dispatch:
QuantizedCPU, QuantizedCUDA: empty_quantized
+ autogen: empty_quantized.out
-- func: empty.out(int[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
+- func: empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
device_guard: False
- func: empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: empty_like
QuantizedCPU, QuantizedCUDA: empty_like_quantized
- SparseCPU, SparseCUDA: empty_like_sparse_coo
+ SparseCPU, SparseCUDA, SparseMeta: empty_like_sparse_coo
SparseCsrCPU, SparseCsrCUDA: empty_like_sparse_csr
+ autogen: empty_like.out
-- func: empty_strided(int[] size, int[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CPU: empty_strided_cpu
CUDA: empty_strided_cuda
MPS: empty_strided_mps
- Meta: empty_strided_meta
+ Meta: empty_strided_meta_symint
QuantizedCPU, QuantizedCUDA: empty_strided_unknown_quantized
+ autogen: empty_strided.out
- func: erf(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: erf.out
variants: function, method
@@ -2148,42 +2300,40 @@
dispatch:
CPU, CUDA: expm1_out
SparseCPU, SparseCUDA: expm1_sparse_out
SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr_out
-- func: expand.SymInt(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)
+- func: expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)
variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
device_check: NoCheck
device_guard: False
dispatch:
- CompositeExplicitAutograd: expand_symint
-
-- func: expand(Tensor(a) self, int[] size, *, bool implicit=False) -> Tensor(a)
- variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
- device_check: NoCheck
- device_guard: False
- dispatch:
CompositeExplicitAutograd: expand
- func: expand_as(Tensor(a) self, Tensor other) -> Tensor(a)
variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
device_check: NoCheck
device_guard: False
+# decomposes to eye.m
- func: eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: eye
- func: eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: eye
- func: eye.out(int n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
- CPU: eye_out_cpu
+ CPU, Meta: eye_out_cpu
CUDA: eye_out_cuda
MPS: eye_out_mps
- func: eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
- CPU: eye_out_cpu
+ CPU, Meta: eye_out_cpu
CUDA: eye_out_cuda
MPS: eye_out_mps
- func: flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)
variants: function, method
@@ -2195,15 +2345,15 @@
variants: function, method
- func: flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)
variants: function, method
-- func: unflatten.int(Tensor(a) self, int dim, int[] sizes, Dimname[]? names=None) -> Tensor(a)
- variants: method
+- func: unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a)
+ variants: function, method
- func: unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a)
- variants: method
+ variants: function, method
- func: fill.Scalar(Tensor self, Scalar value) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: fill
@@ -2237,20 +2387,18 @@
- func: floor(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: floor.out
variants: function, method
dispatch:
- CompositeExplicitAutograd: floor
SparseCPU, SparseCUDA: floor_sparse
SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr
- func: floor_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: floor.out
variants: function, method
dispatch:
- CompositeExplicitAutograd: floor_
SparseCPU, SparseCUDA: floor_sparse_
SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr_
- func: floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
@@ -2308,20 +2456,33 @@
CPU, CUDA: frac_out
- func: full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
device_check: NoCheck
device_guard: False
+ dispatch:
+ CompositeExplicitAutograd: full
+ autogen: full.names_out
-- func: full(int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: full
-- func: full.out(int[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
+- func: full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: full_out
- func: full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
+ dispatch:
+ # NB: Although this composite mutates on the inside, it is
+ # non-differentiable so NonFunctional doesn't apply
+ CompositeExplicitAutograd: full_like
+ autogen: full_like.out
- func: from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CPU: from_file
+ autogen: from_file.out
- func: gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
@@ -2370,69 +2531,103 @@
- func: grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
dispatch:
CPU, QuantizedCPU: grid_sampler_2d_cpu
CUDA: grid_sampler_2d_cuda
+ autogen: grid_sampler_2d.out
# `grid_sampler_2d_backward` takes in `output_mask` to optimize performance for
# the case where `input` doesn't require gradient. Gradient for `grid` is always
# computed (only `output_mask[0]` is checked by the implementations).
- func: grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
dispatch:
CPU: grid_sampler_2d_backward_cpu
CUDA: grid_sampler_2d_backward_cuda
+ autogen: grid_sampler_2d_backward.out
# See NOTE [ grid_sample CPU fallback ]
- func: _grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
dispatch:
CompositeExplicitAutograd: _grid_sampler_2d_cpu_fallback
+ autogen: _grid_sampler_2d_cpu_fallback.out
- func: _grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)
- func: grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
dispatch:
CPU: grid_sampler_3d_cpu
CUDA: grid_sampler_3d_cuda
+ autogen: grid_sampler_3d.out
# `grid_sampler_3d_backward` takes in `output_mask` to optimize performance for
# the case where `input` doesn't require gradient. Gradient for `grid` is always
# computed (only `output_mask[0]` is checked by the implementations).
- func: grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
dispatch:
CPU: grid_sampler_3d_backward_cpu
CUDA: grid_sampler_3d_backward_cuda
+ autogen: grid_sampler_3d_backward.out
- func: hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: hann_window
+ autogen: hann_window.out
- func: hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: hann_window
+ autogen: hann_window.periodic_out
- func: hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: hamming_window
+ autogen: hamming_window.out
- func: hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: hamming_window
+ autogen: hamming_window.periodic_out
- func: hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: hamming_window
+ autogen: hamming_window.periodic_alpha_out
- func: hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: hamming_window
+ autogen: hamming_window.periodic_alpha_beta_out
- func: kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: kaiser_window
+ autogen: kaiser_window.out
- func: kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: kaiser_window
+ autogen: kaiser_window.periodic_out
- func: kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: kaiser_window
+ autogen: kaiser_window.beta_out
- func: hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor
- func: group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor
-- func: native_group_norm(Tensor input, Tensor? weight, Tensor? bias, int N, int C, int HxW, int group, float eps) -> (Tensor, Tensor, Tensor)
+- func: native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)
dispatch:
CPU, CUDA: native_group_norm
- CompositeImplicitAutograd: math_group_norm
+ CompositeExplicitAutograd: math_group_norm
+ autogen: native_group_norm.out
-- func: native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, int N, int C, int HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
+- func: native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
dispatch:
CPU, CUDA: native_group_norm_backward
+ autogen: native_group_norm_backward.out
# Real to complex forward FFT
- func: _fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor
variants: function
dispatch:
@@ -2469,29 +2664,46 @@
variants: function
dispatch:
CPU: _fft_c2c_mkl_out
CUDA: _fft_c2c_cufft_out
+- func: _validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> ()
+ device_check: NoCheck
+ variants: function
+ dispatch:
+ CPU: _validate_compressed_sparse_indices_cpu
+ CUDA: _validate_compressed_sparse_indices_cuda
+
- func: _cufft_get_plan_cache_size(int device_index) -> int
- func: _cufft_get_plan_cache_max_size(int device_index) -> int
- func: _cufft_set_plan_cache_max_size(int device_index, int max_size) -> ()
- func: _cufft_clear_plan_cache(int device_index) -> ()
- func: index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
device_check: NoCheck # TensorIterator
+ structured_delegate: index.Tensor_out
variants: function, method
dispatch:
- CPU, CUDA: index
QuantizedCPU: quantized_index
+ tags: dynamic_output_shape
# NB: This function is special-cased in tools/autograd/gen_variable_type.py
# NB: The following functions are declared in aten/src/ATen/templates/TensorBody.h and defined in aten/src/ATen/TensorIndexing.cpp:
# - Tensor Tensor::index(ArrayRef<TensorIndex> indices)
# - Tensor Tensor::index(std::initializer_list<TensorIndex> indices)
+- func: index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ structured: True
+ structured_inherits: TensorIteratorBase
+ precomputed:
+ - indices -> DimVector sizes, DimVector strides
+ dispatch:
+ CPU, CUDA, MPS: index_out
+
- func: index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
structured: True
variants: function
precomputed:
- dim -> int dim
@@ -2532,25 +2744,18 @@
- func: _index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
- CPU, CUDA: _index_put_impl_
- autogen: _index_put_impl.functional, _index_put_impl.out
+ CPU, CUDA, MPS: _index_put_impl_
+ QuantizedCPU: _index_put_impl_quantized_cpu_
+ QuantizedCUDA: _index_put_impl_quantized_cuda_
+ autogen: _index_put_impl, _index_put_impl.out
- func: instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor
variants: function
-- func: inverse(Tensor self) -> Tensor
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: inverse
-
-- func: inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: inverse_out
-
- func: isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor
variants: function, method
- func: isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
variants: function
@@ -2588,10 +2793,11 @@
device_guard: False
dispatch:
CPU, CUDA, MPS: isnan
SparseCPU, SparseCUDA: isnan_sparse
SparseCsrCPU, SparseCsrCUDA: isnan_sparse_csr
+ autogen: isnan.out
- func: is_distributed(Tensor self) -> bool
variants: function, method
device_check: NoCheck
device_guard: False
@@ -2633,10 +2839,13 @@
- func: is_same_size(Tensor self, Tensor other) -> bool
variants: function, method
device_check: NoCheck
device_guard: False
+ dispatch:
+ NestedTensorCPU, NestedTensorCUDA: nested_is_same_size
+ CompositeExplicitAutograd: is_same_size
- func: is_signed(Tensor self) -> bool
variants: function, method
device_check: NoCheck
device_guard: False
@@ -2647,18 +2856,11 @@
device_check: NoCheck
device_guard: False
manual_cpp_binding: True
- func: kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor
- dispatch:
- CompositeExplicitAutograd: kl_div
-- func: kl_div_backward(Tensor grad_output, Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor
- dispatch:
- CPU: kl_div_backward_cpu
- CUDA: kl_div_backward_cuda
-
- func: kron(Tensor self, Tensor other) -> Tensor
variants: function, method
- func: kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
@@ -2677,22 +2879,24 @@
- func: kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- func: layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
-- func: native_layer_norm(Tensor input, int[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)
+- func: native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)
dispatch:
CPU: layer_norm_cpu
CUDA: layer_norm_cuda
MPS: layer_norm_mps
- CompositeImplicitAutograd: math_native_layer_norm
+ CompositeExplicitAutograd: math_native_layer_norm
+ autogen: native_layer_norm.out
-- func: native_layer_norm_backward(Tensor grad_out, Tensor input, int[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
+- func: native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
dispatch:
CPU: layer_norm_backward_cpu
CUDA: layer_norm_backward_cuda
MPS: layer_norm_backward_mps
+ autogen: native_layer_norm_backward.out
- func: nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: nan_to_num
@@ -2709,51 +2913,47 @@
CPU, CUDA: nan_to_num_out
SparseCPU, SparseCUDA: nan_to_num_sparse_out
- func: linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor
python_module: nn
+ dispatch:
+ CompositeImplicitAutograd: linear
+ NestedTensorCPU, NestedTensorCUDA: nested_linear
+ MPS: _mps_linear
+- func: linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
+ dispatch:
+ NestedTensorCPU, NestedTensorCUDA: nested_linear_backward
+ MPS: mps_linear_backward
+ autogen: linear_backward.out
+
- func: linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
-
-# TODO: Add this function to MPS dispatch key so that we avoid declaring it in
-# native_functions.yaml
-# https://github.com/pytorch/pytorch/issues/77394
-- func: _mps_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor
- python_module: nn
dispatch:
- MPS: _mps_linear
+ CompositeExplicitAutograd: linear_out
- func: mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor
python_module: nn
dispatch:
MkldnnCPU: mkldnn_linear
+ autogen: mkldnn_linear.out
- func: mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor
dispatch:
MkldnnCPU: mkldnn_linear_backward_input
+ autogen: mkldnn_linear_backward_input.out
- func: mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)
dispatch:
MkldnnCPU: mkldnn_linear_backward_weights
+ autogen: mkldnn_linear_backward_weights.out
- func: mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
dispatch:
MkldnnCPU: mkldnn_linear_backward
+ autogen: mkldnn_linear_backward.out
-- func: _mps_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor
- dispatch:
- MPS: _mps_linear_backward_input
-
-- func: _mps_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)
- dispatch:
- MPS: _mps_linear_backward_weights
-
-- func: mps_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
- dispatch:
- MPS: mps_linear_backward
-
- func: fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
- func: fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
- func: fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)
@@ -2775,10 +2975,12 @@
variants: function, method
- func: ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- func: linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: linspace
- func: linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, Meta: linspace_out
CUDA: linspace_cuda_out
@@ -2804,12 +3006,10 @@
- func: log10(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: log10.out
variants: function, method
- dispatch:
- CompositeExplicitAutograd: log10
- func: log10_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: log10.out
variants: function, method
@@ -2874,12 +3074,10 @@
MPS: logaddexp_out_mps
- func: logaddexp(Tensor self, Tensor other) -> Tensor
variants: method, function
structured_delegate: logaddexp.out
- dispatch:
- CompositeExplicitAutograd: logaddexp
- func: logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
@@ -2887,12 +3085,10 @@
MPS: logaddexp2_out_mps
- func: logaddexp2(Tensor self, Tensor other) -> Tensor
variants: method, function
structured_delegate: logaddexp2.out
- dispatch:
- CompositeExplicitAutograd: logaddexp2
- func: xlogy.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: xlogy.OutTensor
variants: function, method
@@ -2940,17 +3136,14 @@
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: xlogy_out
-- func: logdet(Tensor self) -> Tensor
- variants: function, method
+- func: logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
- CompositeExplicitAutograd: logdet
+ CompositeExplicitAutograd: logspace
-- func: logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
-
- func: logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, Meta: logspace_out
CUDA: logspace_cuda_out
@@ -3017,11 +3210,12 @@
CompositeExplicitAutograd: logsumexp
- func: logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
- CompositeExplicitAutograd: logsumexp_out
+ # calls squeeze
+ CompositeExplicitAutogradNonFunctional: logsumexp_out
- func: logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
@@ -3030,17 +3224,24 @@
- func: margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
- func: matmul(Tensor self, Tensor other) -> Tensor
variants: function, method
+ dispatch:
+ CompositeImplicitAutograd: matmul
+ NestedTensorCPU, NestedTensorCUDA: matmul_nested
+- func: matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor)
+ dispatch:
+ NestedTensorCPU, NestedTensorCUDA: matmul_backward_nested
+ autogen: matmul_backward.out
+
- func: matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeImplicitAutograd: matmul_out
+ NestedTensorCPU, NestedTensorCUDA: matmul_out_nested
-- func: matrix_rank.tol(Tensor self, float tol, bool symmetric=False) -> Tensor
-
-- func: matrix_rank(Tensor self, bool symmetric=False) -> Tensor
-
# Alias to linalg.matrix_power
- func: matrix_power(Tensor self, int n) -> Tensor
variants: function, method
# Alias to linalg.matrix_power
@@ -3055,15 +3256,17 @@
# DEPRECATED: Use torch.aminmax instead
- func: _aminmax(Tensor self) -> (Tensor, Tensor)
dispatch:
CPU, CUDA: _aminmax_all
+ autogen: _aminmax.out
# DEPRECATED: Use torch.aminmax instead
- func: _aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
dispatch:
CPU, CUDA: _aminmax
+ autogen: _aminmax.dim_out
- func: aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
device_check: NoCheck # TensorIterator
structured_delegate: aminmax.out
variants: function, method
@@ -3116,10 +3319,11 @@
- func: amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU, CUDA: amax_out
+ MPS: amax_out_mps
# Return: (Tensor output, Tensor indices)
- func: max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
- func: max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
@@ -3130,39 +3334,47 @@
# native_functions.yaml
# https://github.com/pytorch/pytorch/issues/77394
- func: _mps_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
MPS: _mps_max_pool2d
+ autogen: _mps_max_pool2d.out
- func: mps_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
MPS: mps_max_pool2d_backward
+ autogen: mps_max_pool2d_backward.out
- func: mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
MkldnnCPU: mkldnn_max_pool2d
+ autogen: mkldnn_max_pool2d.out
- func: mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
MkldnnCPU: mkldnn_max_pool2d_backward
+ autogen: mkldnn_max_pool2d_backward.out
- func: mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
MkldnnCPU: mkldnn_max_pool3d
+ autogen: mkldnn_max_pool3d.out
- func: mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
MkldnnCPU: mkldnn_max_pool3d_backward
+ autogen: mkldnn_max_pool3d_backward.out
- func: quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
QuantizedCPU: quantized_max_pool1d
+ autogen: quantized_max_pool1d.out
- func: quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
QuantizedCPU: quantized_max_pool2d
QuantizedCUDA: quantized_max_pool2d_cudnn
+ autogen: quantized_max_pool2d.out
- func: max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
# The CPU and GPU dispatch variants are named weirdly here because otherwise there
# are namespacing issues in C++
@@ -3170,18 +3382,25 @@
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: mean
-- func: mean.dim(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
+# For normal naming convention this should be `mean.out`. However since we already have `mean.out` we have to rename this.
+# FIXME: fix CI jobs and re-enable this
+#- func: mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
+# device_check: NoCheck # TensorIterator
+# dispatch:
+# CompositeExplicitAutograd: mean_dtype_out
+
+- func: mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
structured_delegate: mean.out
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
QuantizedCPU: mean_quantized_cpu
-- func: mean.out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
+- func: mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
structured: True
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: mean_out
MPS: mean_out_mps
@@ -3192,22 +3411,23 @@
variants: function, method
- func: mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
-- func: nanmean(Tensor self, int[1] dim=[], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
+- func: nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
device_check: NoCheck # Composite
variants: function, method
-- func: nanmean.out(Tensor self, int[1] dim=[], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
+- func: nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # Composite
- func: median(Tensor self) -> Tensor
variants: function, method
dispatch:
CPU: median_cpu
CUDA: median_cuda
+ autogen: median.out
- func: median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
variants: function, method
dispatch:
CompositeExplicitAutograd: median
@@ -3225,10 +3445,11 @@
- func: nanmedian(Tensor self) -> Tensor
variants: function, method
dispatch:
CPU: nanmedian_cpu
CUDA: nanmedian_cuda
+ autogen: nanmedian.out
- func: nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
variants: function, method
dispatch:
CompositeExplicitAutograd: nanmedian
@@ -3272,53 +3493,72 @@
- func: amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU, CUDA: amin_out
+ MPS: amin_out_mps
# TODO: Add this function to MPS dispatch key so that we avoid declaring it in
# native_functions.yaml
# https://github.com/pytorch/pytorch/issues/77394
- func: _mps_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor
dispatch:
MPS: _mps_convolution
+ autogen: _mps_convolution.out
- func: mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
dispatch:
MPS: mps_convolution_backward
+ autogen: mps_convolution_backward.out
- func: mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor
dispatch:
CompositeExplicitAutograd: mkldnn_convolution
+ autogen: mkldnn_convolution.out
- func: miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)
dispatch:
CUDA: miopen_batch_norm
+ autogen: miopen_batch_norm.out
- func: miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)
dispatch:
CUDA: miopen_batch_norm_backward
+ autogen: miopen_batch_norm_backward.out
- func: miopen_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
dispatch:
CUDA: miopen_convolution
+ autogen: miopen_convolution.out
- func: miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
dispatch:
CUDA: miopen_convolution_transpose
+ autogen: miopen_convolution_transpose.out
- func: miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
dispatch:
CUDA: miopen_depthwise_convolution
+ autogen: miopen_depthwise_convolution.out
+- func: miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
+ dispatch:
+ CUDA: miopen_convolution_relu
+
+- func: miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
+ dispatch:
+ CUDA: miopen_convolution_add_relu
+
- func: miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
dispatch:
CUDA: miopen_rnn
+ autogen: miopen_rnn.out
- func: miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
dispatch:
CUDA: miopen_rnn_backward
+ autogen: miopen_rnn_backward.out
- func: mm(Tensor self, Tensor mat2) -> Tensor
structured_delegate: mm.out
variants: function, method
dispatch:
@@ -3339,15 +3579,17 @@
- func: _sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor
dispatch:
SparseCPU: sparse_sparse_matmul_cpu
SparseCUDA: sparse_sparse_matmul_cuda
+ autogen: _sparse_sparse_matmul.out
- func: _sparse_mask_helper(Tensor t, Tensor mask_indices) -> Tensor
dispatch:
SparseCPU: sparse_mask_helper_cpu
SparseCUDA: sparse_mask_helper_cuda
+ autogen: _sparse_mask_helper.out
- func: mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
variants: function, method
dispatch:
CPU, CUDA: mode
@@ -3399,17 +3641,19 @@
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: mul
SparseCsrCPU, SparseCsrCUDA: mul_scalar_sparse_csr
+ NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul_Scalar
- func: mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: mul_
SparseCsrCPU, SparseCsrCUDA: mul__scalar_sparse_csr
+ NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul__Scalar
autogen: mul.Scalar_out
# multiply, alias for mul
- func: multiply.Tensor(Tensor self, Tensor other) -> Tensor
variants: function, method
@@ -3449,25 +3693,20 @@
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: mvlgamma_
-- func: narrow_copy(Tensor self, int dim, int start, int length) -> Tensor
+- func: narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
variants: function, method
dispatch:
CPU: narrow_copy_dense_cpu
SparseCPU, SparseCUDA: narrow_copy_sparse
- CompositeExplicitAutograd: narrow_copy_dense
+ CompositeExplicitAutogradNonFunctional: narrow_copy_dense
tags: view_copy
-- func: narrow_copy.SymInt(Tensor self, int dim, int start, SymInt length) -> Tensor
- variants: function, method
+- func: narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
- CompositeExplicitAutograd: narrow_copy_symint
-
-- func: narrow_copy.out(Tensor self, int dim, int start, int length, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
CPU: narrow_copy_dense_cpu_out
- func: narrow(Tensor(a) self, int dim, int start, int length) -> Tensor(a)
variants: function, method
device_check: NoCheck
@@ -3491,10 +3730,11 @@
MPS: batch_norm_mps_out
- func: batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)
dispatch:
CUDA: batch_norm_stats_cuda
+ autogen: batch_norm_stats.out
- func: batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor
dispatch:
CUDA: batch_norm_elemt_cuda
@@ -3504,88 +3744,113 @@
# for backward compatibility
- func: batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)
dispatch:
CUDA: batch_norm_gather_stats_cuda
+ autogen: batch_norm_gather_stats.out
- func: batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)
dispatch:
CUDA: batch_norm_gather_stats_with_counts_cuda
+ autogen: batch_norm_gather_stats_with_counts.out
- func: native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
dispatch:
CPU: batch_norm_backward_cpu
CUDA: batch_norm_backward_cuda
MPS: batch_norm_backward_mps
MkldnnCPU: mkldnn_batch_norm_backward
+ autogen: native_batch_norm_backward.out
- func: batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)
dispatch:
CUDA: batch_norm_backward_reduce_cuda
+ autogen: batch_norm_backward_reduce.out
- func: batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor
dispatch:
CUDA: batch_norm_backward_elemt_cuda
+ autogen: batch_norm_backward_elemt.out
- func: batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)
dispatch:
CPU: batch_norm_update_stats_cpu
CUDA: batch_norm_update_stats_cuda
+ autogen: batch_norm_update_stats.out
- func: is_vulkan_available() -> bool
- func: _nnpack_available() -> bool
- func: _nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, int[2] padding, int[2] stride=1) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: _nnpack_spatial_convolution
+ autogen: _nnpack_spatial_convolution.out
- func: ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
device_check: NoCheck
device_guard: False
+ dispatch:
+ CompositeExplicitAutograd: ones
+ autogen: ones.names_out
-- func: ones(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: ones
-- func: ones.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
+- func: ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: ones_out
- func: ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
+ dispatch:
+ # NB: Although this composite mutates on the inside, it is
+ # non-differentiable so NonFunctional doesn't apply
+ CompositeExplicitAutograd: ones_like
+ autogen: ones_like.out
- func: pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor
- func: cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor
- func: _euclidean_dist(Tensor x1, Tensor x2) -> Tensor
dispatch:
CompositeExplicitAutograd: _euclidean_dist
+ autogen: _euclidean_dist.out
- func: _cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor
dispatch:
CPU, CUDA: _cdist_forward
+ autogen: _cdist_forward.out
- func: _cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor
dispatch:
CPU, CUDA: _cdist_backward
+ autogen: _cdist_backward.out
- func: pdist(Tensor self, float p=2) -> Tensor
- func: _pdist_forward(Tensor self, float p=2) -> Tensor
dispatch:
CPU, CUDA: _pdist_forward
+ autogen: _pdist_forward.out
- func: _pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor
dispatch:
CPU, CUDA: _pdist_backward
+ autogen: _pdist_backward.out
- func: cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor
variants: function
- func: permute(Tensor(a) self, int[] dims) -> Tensor(a)
variants: function, method
dispatch:
CompositeExplicitAutograd: permute
MPS: permute_mps
+ SparseCPU, SparseCUDA: permute_sparse_coo
- func: movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
variants: function, method
- func: movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)
@@ -3624,21 +3889,24 @@
variants: function, method
- func: pixel_shuffle(Tensor self, int upscale_factor) -> Tensor
dispatch:
CPU: pixel_shuffle_cpu
- CompositeExplicitAutograd: math_pixel_shuffle
+ CompositeExplicitAutogradNonFunctional: math_pixel_shuffle
+ autogen: pixel_shuffle.out
- func: pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor
dispatch:
CPU: pixel_unshuffle_cpu
- CompositeExplicitAutograd: math_pixel_unshuffle
+ CompositeExplicitAutogradNonFunctional: math_pixel_unshuffle
+ autogen: pixel_unshuffle.out
- func: channel_shuffle(Tensor self, int groups) -> Tensor
dispatch:
CPU: channel_shuffle
QuantizedCPU: channel_shuffle_quantized_cpu
+ autogen: channel_shuffle.out
- func: native_channel_shuffle(Tensor self, int groups) -> Tensor
dispatch:
CPU: channel_shuffle_cpu
CompositeImplicitAutograd: math_channel_shuffle
@@ -3658,10 +3926,11 @@
# Unlike pin_memory, this is guaranteed to give a new non-aliasing tensor
- func: _pin_memory(Tensor self, Device? device=None) -> Tensor
dispatch:
CUDA: _pin_memory_cuda
MPS: _pin_memory_mps
+ autogen: _pin_memory.out
- func: pinverse(Tensor self, float rcond=1e-15) -> Tensor
variants: function, method
- func: poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor
@@ -3697,86 +3966,190 @@
- func: deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: deg2rad_out
- func: scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: scalar_tensor
+ autogen: scalar_tensor.out
- func: rand.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
device_check: NoCheck
device_guard: False
+ dispatch:
+ CompositeExplicitAutograd: rand
+ autogen: rand.names_out
+ tags: nondeterministic_seeded
- func: rand.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
device_check: NoCheck
device_guard: False
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: rand
+ autogen: rand.generator_with_names_out
- func: rand(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: rand
- func: rand.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: rand
- func: rand.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: rand_out
- func: rand.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
+ tags: nondeterministic_seeded
- func: rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
+ tags: nondeterministic_seeded
+ dispatch:
+ # NB: Although this composite mutates on the inside, it is
+ # non-differentiable so NonFunctional doesn't apply
+ CompositeExplicitAutograd: rand_like
+ autogen: rand_like.out
-- func: randint(int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: randint(int high, int[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: randint
-- func: randint.generator(int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: randint.generator(int high, int[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: randint
-- func: randint.low(int low, int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: randint.low(int low, int high, int[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: randint
-- func: randint.low_generator(int low, int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: randint.low_generator(int low, int high, int[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: randint
- func: randint.out(int high, int[] size, *, Tensor(a!) out) -> Tensor(a!)
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: randint_out
- func: randint.generator_out(int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: randint_out
- func: randint.low_out(int low, int high, int[] size, *, Tensor(a!) out) -> Tensor(a!)
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: randint_out
- func: randint.low_generator_out(int low, int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: randint_out
- func: randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
+ tags: nondeterministic_seeded
+ dispatch:
+ # NB: Although this composite mutates on the inside, it is
+ # non-differentiable so NonFunctional doesn't apply
+ CompositeExplicitAutograd: randint_like
+ autogen: randint_like.out
- func: randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
+ tags: nondeterministic_seeded
+ dispatch:
+ # NB: Although this composite mutates on the inside, it is
+ # non-differentiable so NonFunctional doesn't apply
+ CompositeExplicitAutograd: randint_like
+ autogen: randint_like.low_dtype_out
- func: randn(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: randn
- func: randn.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: randn
- func: randn.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ tags: nondeterministic_seeded
device_check: NoCheck
device_guard: False
+ dispatch:
+ CompositeExplicitAutograd: randn
+ autogen: randn.names_out
- func: randn.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ tags: nondeterministic_seeded
device_check: NoCheck
device_guard: False
+ dispatch:
+ CompositeExplicitAutograd: randn
+ autogen: randn.generator_with_names_out
- func: randn.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
+ tags: nondeterministic_seeded
- func: randn.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
+ tags: nondeterministic_seeded
- func: randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
+ tags: nondeterministic_seeded
+ dispatch:
+ # NB: Although this composite mutates on the inside, it is
+ # non-differentiable so NonFunctional doesn't apply
+ CompositeExplicitAutograd: randn_like
+ autogen: randn_like.out
- func: randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: randperm
- func: randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: randperm
- func: randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!)
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: randperm_out
- func: randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
+ tags: nondeterministic_seeded
dispatch:
CPU: randperm_out_cpu
CUDA: randperm_out_cuda
- func: range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: range
- func: range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: range
+- func: range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: range_out_no_step
+
- func: range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, Meta: range_out
CUDA: range_cuda_out
+ cpp_no_default_args: ['step']
- func: ravel(Tensor(a) self) -> Tensor(a)
variants: function, method
- func: reciprocal(Tensor self) -> Tensor
@@ -3830,37 +4203,43 @@
- func: negative_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
- func: negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
-- func: repeat(Tensor self, int[] repeats) -> Tensor
+- func: repeat(Tensor self, SymInt[] repeats) -> Tensor
variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
dispatch:
CompositeExplicitAutograd: repeat
MPS: repeat_mps
+ autogen: repeat.out
- func: repeat_interleave.Tensor(Tensor repeats, *, int? output_size=None) -> Tensor
variants: function
dispatch:
CPU: repeat_interleave_cpu
CUDA: repeat_interleave_cuda
+ tags: dynamic_output_shape
+ autogen: repeat_interleave.Tensor_out
- func: repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor
variants: function, method
- func: repeat_interleave.self_int(Tensor self, int repeats, int? dim=None, *, int? output_size=None) -> Tensor
variants: function, method
-- func: reshape(Tensor(a) self, int[] shape) -> Tensor(a)
+- func: reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
+ dispatch:
+ CompositeImplicitAutograd: reshape_symint
+ CompositeImplicitAutogradNestedTensor: reshape_nested
# NOTE [ _reshape_alias ] is meant to be used in the implementation of reshape.
# They are not user-facing, hence the leading underscore. Please don't use it
# anywhere else.
-- func: _reshape_alias(Tensor(a) self, int[] size, int[] stride) -> Tensor(a)
+- func: _reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CPU, CUDA, Meta, QuantizedCPU, QuantizedCUDA, ZeroTensor, MPS: _reshape_alias
@@ -3869,15 +4248,19 @@
- func: _mkldnn_reshape(Tensor self, int[] shape) -> Tensor
device_check: NoCheck
device_guard: False
dispatch:
MkldnnCPU: mkldnn_reshape
+ autogen: _mkldnn_reshape.out
- func: reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)
variants: method
device_check: NoCheck
device_guard: False
+ dispatch:
+ CompositeImplicitAutograd: reshape_as
+ CompositeImplicitAutogradNestedTensor: reshape_as_nested
- func: round(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: round.out
variants: function, method
@@ -3922,32 +4305,36 @@
CPU: round_decimals_out
CUDA: round_decimals_out
- func: rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
device_check: NoCheck # TensorIterator
+ tags: nondeterministic_seeded
- func: rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
+ tags: nondeterministic_seeded
device_check: NoCheck # TensorIterator
- func: relu(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: relu
MPS: relu_mps
MkldnnCPU: mkldnn_relu
QuantizedCPU: relu_quantized_cpu
+ QuantizedCUDA: relu_quantized_cuda
NestedTensorCPU, NestedTensorCUDA: NestedTensor_relu
- func: relu_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: relu_
MPS: relu_mps_
MkldnnCPU: mkldnn_relu_
QuantizedCPU: relu_quantized_cpu_
+ QuantizedCUDA: relu_quantized_cuda_
NestedTensorCPU, NestedTensorCUDA: NestedTensor_relu_
autogen: relu.out
- func: relu6(Tensor self) -> Tensor
python_module: nn
@@ -3959,17 +4346,22 @@
variants: function, method
dispatch:
MkldnnCPU: mkldnn_prelu
CPU: prelu_cpu
CUDA: prelu_cuda
+ MPS: prelu_mps
+ QuantizedCPU: prelu_quantized_cpu
+ autogen: prelu.out
- func: prelu_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)
variants: function, method
dispatch:
MkldnnCPU: mkldnn_prelu_backward
CPU: prelu_backward_cpu
CUDA: prelu_backward_cuda
+ MPS: prelu_backward_mps
+ autogen: prelu_backward.out
- func: gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
@@ -3991,10 +4383,11 @@
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
MkldnnCPU: mkldnn_gelu
QuantizedCPU: gelu_quantized_cpu
+ QuantizedCUDA: gelu_quantized_cuda
NestedTensorCPU, NestedTensorCUDA: NestedTensor_gelu
- func: gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
@@ -4066,18 +4459,27 @@
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: select
SparseCsrCPU, SparseCsrCUDA: select_sparse_csr
+ NestedTensorCPU, NestedTensorCUDA: select_nested
-- func: select_backward(Tensor grad_output, int[] input_sizes, int dim, int index) -> Tensor
+- func: select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, int index) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
dispatch:
- CompositeExplicitAutograd: select_backward
+ CompositeExplicitAutogradNonFunctional: select_backward
+ autogen: select_backward.out
+- func: _nested_select_backward(Tensor grad_output, Tensor self, int dim, int index) -> Tensor
+ variants: function
+ device_check: NoCheck
+ device_guard: False
+ dispatch:
+ NestedTensorCPU, NestedTensorCUDA: _nested_select_backward
+
- func: selu(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
- func: selu_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
@@ -4094,18 +4496,14 @@
autogen: celu.out
- func: silu(Tensor self) -> Tensor
structured_delegate: silu.out
python_module: nn
- dispatch:
- CompositeExplicitAutograd: silu
- func: silu_(Tensor(a!) self) -> Tensor(a!)
structured_delegate: silu.out
python_module: nn
- dispatch:
- CompositeExplicitAutograd: silu_
- func: silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
@@ -4128,18 +4526,14 @@
CompositeImplicitAutograd: math_silu_backward
- func: mish(Tensor self) -> Tensor
structured_delegate: mish.out
python_module: nn
- dispatch:
- CompositeExplicitAutograd: mish
- func: mish_(Tensor(a!) self) -> Tensor(a!)
structured_delegate: mish.out
python_module: nn
- dispatch:
- CompositeExplicitAutograd: mish_
- func: mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
@@ -4268,10 +4662,11 @@
# be updated.
- func: detach(Tensor(a) self) -> Tensor(a)
variants: function, method
dispatch:
CompositeExplicitAutograd: detach
+ NestedTensorCPU, NestedTensorCUDA: detach
# Like `detach()`, but modifies this `Variable` in-place. This method may
# only be called on non-view `Variable`s. You can use `is_view()` to check
# this. If this `Variable` is a view, throws an `std::runtime_error()`.
- func: detach_(Tensor(a!) self) -> Tensor(a!)
@@ -4289,49 +4684,58 @@
- func: size.Dimname(Tensor self, Dimname dim) -> int
variants: function, method
device_check: NoCheck
device_guard: False
-- func: slice.Tensor(Tensor(a) self, int dim=0, int? start=None, int? end=None, int step=1) -> Tensor(a)
+- func: slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: slice
+# NOTE: The implementation of split_with_sizes bypasses the dispatcher to call this; undo
+# that if adding specific implementations here!
-- func: slice_backward(Tensor grad_output, int[] input_sizes, int dim, int start, int end, int step) -> Tensor
+- func: slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: slice_backward
+ autogen: slice_backward.out
-- func: slice_scatter(Tensor self, Tensor src, int dim=0, int? start=None, int? end=None, int step=1) -> Tensor
+- func: slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: slice_scatter
+ autogen: slice_scatter.out
- func: select_scatter(Tensor self, Tensor src, int dim, int index) -> Tensor
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: select_scatter
+ autogen: select_scatter.out
- func: diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: diagonal_scatter
+ autogen: diagonal_scatter.out
-- func: slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
+- func: as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
variants: function, method
+ device_check: NoCheck
+ device_guard: False
dispatch:
- CompositeExplicitAutograd: slogdet
+ CompositeExplicitAutograd: as_strided_scatter_symint
+ autogen: as_strided_scatter.out
- func: smm(Tensor self, Tensor mat2) -> Tensor
variants: function, method
# softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models.
@@ -4348,20 +4752,23 @@
- func: _softmax(Tensor self, int dim, bool half_to_float) -> Tensor
structured_delegate: _softmax.out
dispatch:
MkldnnCPU: mkldnn_softmax
+ NestedTensorCPU, NestedTensorCUDA: softmax_nested
- func: _softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU: softmax_cpu_out
CUDA: softmax_cuda_out
MPS: softmax_mps_out
- func: _softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
structured_delegate: _softmax_backward_data.out
+ dispatch:
+ NestedTensorCPU, NestedTensorCUDA: nested_softmax_backward
- func: _softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)
structured: True
dispatch:
CPU: softmax_backward_cpu_out
@@ -4372,10 +4779,11 @@
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: unsafe_split
+ autogen: unsafe_split.Tensor_out
- func: split.Tensor(Tensor(a -> *) self, int split_size, int dim=0) -> Tensor(a)[]
variants: function, method
device_check: NoCheck
device_guard: False
@@ -4390,10 +4798,11 @@
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: unsafe_split_with_sizes
+ autogen: unsafe_split_with_sizes.out
- func: split_with_sizes(Tensor(a -> *) self, int[] split_sizes, int dim=0) -> Tensor(a)[]
variants: function, method
device_check: NoCheck
device_guard: False
@@ -4527,36 +4936,44 @@
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: sum
SparseCsrCPU, SparseCsrCUDA: sum_csr
+ autogen: sum.out
-- func: sum.dim_IntList(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
+- func: sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
structured_delegate: sum.IntList_out
device_check: NoCheck # TensorIterator
variants: function, method
+ dispatch:
+ NestedTensorCPU: NestedTensor_sum_dim_CPU
- func: sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
-- func: sum.IntList_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
+- func: sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
structured: True
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: sum_out
MPS: sum_out_mps
- func: sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
-- func: nansum(Tensor self, int[1] dim=[], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
+# TODO: this function will be replaced once nested expand semantics have been settled on
+- func: _nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor
+ dispatch:
+ NestedTensorCPU: _nested_sum_backward_cpu
+
+- func: nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
variants: function, method
dispatch:
CPU, CUDA: nansum
-- func: nansum.out(Tensor self, int[1] dim=[], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
+- func: nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: nansum_out
- func: sum_to_size(Tensor self, int[] size) -> Tensor
variants: method
@@ -4601,50 +5018,53 @@
- func: std(Tensor self, bool unbiased=True) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
-- func: std.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
+- func: std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: std.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: std
MPS: std_mps
+ QuantizedCPU: std_quantized_cpu
- func: std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
-- func: std_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
+- func: std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
- func: std_mean.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CPU, CUDA: std_mean
+ autogen: std_mean.correction_out
- func: std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
- func: std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
-- func: std.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
+- func: std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
- func: std.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: std_out
+ QuantizedCPU: std_out_quantized_cpu
- func: std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
@@ -4663,10 +5083,11 @@
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: prod
MPS: prod_mps
+ autogen: prod.out
- func: prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
structured_delegate: prod.int_out
device_check: NoCheck # TensorIterator
variants: function, method
@@ -4804,10 +5225,11 @@
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: transpose
+ NestedTensorCPU, NestedTensorCUDA: transpose_nested
- func: transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
@@ -4834,15 +5256,18 @@
autogen: _mkldnn_transpose.out
- func: one_hot(Tensor self, int num_classes=-1) -> Tensor
python_module: nn
variants: function
+ tags: dynamic_output_shape
- func: flip(Tensor self, int[] dims) -> Tensor
variants: function, method
dispatch:
CPU, QuantizedCPU, CUDA, QuantizedCUDA: flip
+ MPS: flip_mps
+ autogen: flip.out
- func: fliplr(Tensor self) -> Tensor
variants: function, method
- func: flipud(Tensor self) -> Tensor
@@ -4851,17 +5276,19 @@
- func: roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor
variants: function, method
dispatch:
CPU: roll_cpu
CUDA: roll_cuda
+ autogen: roll.out
# default int[] value [0,1] should not add space after comma, since codegen parser uses ', ' to split args
- func: rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: rot90
+ autogen: rot90.out
- func: trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
- func: trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
@@ -4872,48 +5299,90 @@
# Fused implementation detail for transformers. Adds in-projection bias to QKV and divides Q by sqrt(D/num_heads).
- func: _transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor)
dispatch:
CPU, NestedTensorCPU: transform_bias_rescale_qkv_cpu
CUDA, NestedTensorCUDA: transform_bias_rescale_qkv_cuda
+ autogen: _transform_bias_rescale_qkv.out
-- func: _nested_tensor_from_mask(Tensor t, Tensor mask) -> Tensor
+- func: _nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor
dispatch:
CPU, CUDA: NestedTensor_nested_tensor_from_mask
+ autogen: _nested_tensor_from_mask.out
+- func: _nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool
+ dispatch:
+ CPU, CUDA: NestedTensor_nested_tensor_from_mask_left_aligned
+
- func: _nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor
device_check: NoCheck # cpu_nested_shape_example will always be on CPU
dispatch:
CPU: nested_from_padded_generic
CUDA: nested_from_padded_cuda
+ autogen: _nested_from_padded.out
+# These private functions are temporary. They will be updated/deleted when nested tensors switch to using SymInts for their metadata representation
+- func: _nested_tensor_size(Tensor self) -> Tensor
+ variants: method
+ dispatch:
+ NestedTensorCPU, NestedTensorCUDA: _nested_tensor_size
+ autogen: _nested_tensor_size.out
+
+- func: _nested_tensor_strides(Tensor self) -> Tensor
+ variants: method
+ dispatch:
+ NestedTensorCPU, NestedTensorCUDA: _nested_tensor_strides
+ autogen: _nested_tensor_strides.out
+
+- func: _nested_tensor_offsets(Tensor self) -> int[]
+ variants: method
+ dispatch:
+ NestedTensorCPU, NestedTensorCUDA: _nested_tensor_offsets
+
# _nested_from_padded is not usable from Python, so
# _nested_from_padded_and_nested_example is available for testing.
- func: _nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor
dispatch:
NestedTensorCPU, NestedTensorCUDA: NestedTensor_from_padded_and_nested_example
+ autogen: _nested_from_padded_and_nested_example.out
+# The input arguments' types to this functions are temporary. When nested tensors switch to using SymInts for their metadata representation
+# this will need to be updated
+- func: _nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor(a)
+ variants: function
+ device_check: NoCheck
+ dispatch:
+ CPU, CUDA: _nested_view_from_buffer
+
+- func: _nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor
+ variants: function
+ device_check: NoCheck
+ tags: view_copy
+ dispatch:
+ CompositeExplicitAutogradNonFunctional: _nested_view_from_buffer_copy
+ autogen: _nested_view_from_buffer_copy.out
+
- func: _trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor
dispatch:
- CompositeExplicitAutograd: _trilinear
+ # calls unsqueeze
+ CompositeExplicitAutogradNonFunctional: _trilinear
+ autogen: _trilinear.out
- func: triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor
- func: trunc(Tensor self) -> Tensor
structured_delegate: trunc.out
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
- CompositeExplicitAutograd: trunc
SparseCPU, SparseCUDA: trunc_sparse
SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr
- func: trunc_(Tensor(a!) self) -> Tensor(a!)
structured_delegate: trunc.out
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
- CompositeExplicitAutograd: trunc_
SparseCPU, SparseCUDA: trunc_sparse_
SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr_
- func: trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
@@ -4943,42 +5412,52 @@
- func: _unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)
variants: function
dispatch:
CPU: _unique_cpu
CUDA: _unique_cuda
+ autogen: _unique.out
- func: unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
variants: function
dispatch:
CPU: unique_dim_cpu
CUDA: unique_dim_cuda
+ tags: dynamic_output_shape
+ autogen: unique_dim.out
- func: unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)
variants: function
dispatch:
CPU: unique_consecutive_cpu
CUDA: unique_consecutive_cuda
+ tags: dynamic_output_shape
+ autogen: unique_consecutive.out
- func: unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
variants: function
dispatch:
CPU: unique_dim_consecutive_cpu
CUDA: unique_dim_consecutive_cuda
+ tags: dynamic_output_shape
+ autogen: unique_dim_consecutive.out
# _unique and _unique_dim are fragile and modifying them easily cause internal break
# the below operator is a temporary hack for adding return_counts support
# Please don't rely on these two operators, they will be removed soon
- func: _unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
variants: function
dispatch:
CPU: _unique2_cpu
CUDA: _unique2_cuda
+ tags: dynamic_output_shape
+ autogen: _unique2.out
-- func: _unsafe_view(Tensor self, int[] size) -> Tensor
+- func: _unsafe_view(Tensor self, SymInt[] size) -> Tensor
dispatch:
CompositeExplicitAutograd: _unsafe_view
+ autogen: _unsafe_view.out
- func: unsqueeze(Tensor(a) self, int dim) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
@@ -4999,22 +5478,22 @@
- func: var(Tensor self, bool unbiased=True) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
-- func: var.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
+- func: var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: var.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: var
MPS: var_mps
-- func: var.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
+- func: var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
- func: var.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
@@ -5037,19 +5516,20 @@
- func: var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
-- func: var_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
+- func: var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
- func: var_mean.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CPU, CUDA: var_mean
+ autogen: var_mean.correction_out
- func: var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
@@ -5099,106 +5579,138 @@
- func: _weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)
variants: function
dispatch:
CPU: weight_norm_cpu
CUDA: weight_norm_cuda
+ autogen: _weight_norm_interface.out
- func: _weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
variants: function
dispatch:
CPU: weight_norm_backward_cpu
CUDA: weight_norm_backward_cuda
+ autogen: _weight_norm_interface_backward.out
- func: _weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
variants: function
- func: zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
device_check: NoCheck
device_guard: False
+ dispatch:
+ CompositeExplicitAutograd: zeros
+ autogen: zeros.names_out
- func: _efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CPU: _efficientzerotensor
CUDA: _efficientzerotensor_cuda
+ autogen: _efficientzerotensor.out
-- func: zeros(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: zeros_symint
-- func: zeros.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
+- func: zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: zeros_out
+ SparseCPU, SparseCUDA, SparseMeta: zeros_sparse_out
- func: zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
+ dispatch:
+ # NB: Although this composite mutates on the inside, it is
+ # non-differentiable so NonFunctional doesn't apply
+ CompositeExplicitAutograd: zeros_like
+ autogen: zeros_like.out
- func: _standard_gamma_grad(Tensor self, Tensor output) -> Tensor
variants: function
dispatch:
CPU: _standard_gamma_grad_cpu
CUDA: _standard_gamma_grad_cuda
+ autogen: _standard_gamma_grad.out
- func: _standard_gamma(Tensor self, Generator? generator=None) -> Tensor
variants: function
dispatch:
CPU: _s_gamma_cpu
CUDA: _s_gamma_cuda
+ tags: nondeterministic_seeded
+ autogen: _standard_gamma.out
- func: _dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor
dispatch:
CPU: _dirichlet_grad_cpu
CUDA: _dirichlet_grad_cuda
+ autogen: _dirichlet_grad.out
- func: _sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor
+ tags: nondeterministic_seeded
variants: function
dispatch:
CPU: _s_dirichlet_cpu
CUDA: _s_dirichlet_cuda
+ autogen: _sample_dirichlet.out
- func: poisson(Tensor self, Generator? generator=None) -> Tensor
device_check: NoCheck # TensorIterator
dispatch:
CPU: _s_poisson_cpu
CUDA: _s_poisson_cuda
+ tags: nondeterministic_seeded
+ autogen: poisson.out
- func: binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor
device_check: NoCheck # TensorIterator
dispatch:
CPU: _s_binomial_cpu
CUDA: _s_binomial_cuda
+ tags: nondeterministic_seeded
+ autogen: binomial.out
# When more variants get ported to native, this dispatch will get more
# complicated
- func: native_norm(Tensor self, Scalar p=2) -> Tensor
dispatch:
SparseCPU, SparseCUDA: norm_sparse
+ autogen: native_norm.out
- func: native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor
dispatch:
SparseCPU, SparseCUDA: norm_sparse
+ autogen: native_norm.ScalarOpt_dim_dtype_out
# TODO: reduce signatures down to one when optional args is available
- func: _sparse_sum(Tensor self) -> Tensor
- func: _sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor
- func: _sparse_sum.dim(Tensor self, int[1] dim) -> Tensor
dispatch:
CompositeExplicitAutograd: _sparse_sum
+ autogen: _sparse_sum.dim_out
- func: _sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor
- func: _sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor
dispatch:
SparseCPU: _sparse_sum_backward_cpu
SparseCUDA: _sparse_sum_backward_cuda
+ autogen: _sparse_sum_backward.out
- func: _sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
dispatch:
SparseCsrCPU: _sparse_csr_sum_cpu
SparseCsrCUDA: _sparse_csr_sum_cuda
+ autogen: _sparse_csr_sum.dim_dtype_out
- func: _sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
dispatch:
SparseCsrCPU: _sparse_csr_prod_cpu
SparseCsrCUDA: _sparse_csr_prod_cuda
+ autogen: _sparse_csr_prod.dim_dtype_out
- func: _sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
python_module: sparse
variants: function
@@ -5209,15 +5721,17 @@
- func: _sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
python_module: sparse
dispatch:
SparseCPU: softmax_sparse_cpu
SparseCUDA: softmax_sparse_cuda
+ autogen: _sparse_softmax.out
- func: _sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
dispatch:
SparseCPU: softmax_backward_sparse_cpu
SparseCUDA: softmax_backward_sparse_cuda
+ autogen: _sparse_softmax_backward_data.out
- func: _sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
python_module: sparse
variants: function
@@ -5228,27 +5742,37 @@
- func: _sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
python_module: sparse
dispatch:
SparseCPU: log_softmax_sparse_cpu
SparseCUDA: log_softmax_sparse_cuda
+ autogen: _sparse_log_softmax.out
- func: _sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
dispatch:
SparseCPU: log_softmax_backward_sparse_cpu
SparseCUDA: log_softmax_backward_sparse_cuda
+ autogen: _sparse_log_softmax_backward_data.out
+- func: _spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor
+ python_module: sparse
+ dispatch:
+ CPU: spdiags
+ autogen: _spdiags.out
+
- func: norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: norm
+ autogen: norm.ScalarOpt_dtype_out
- func: norm.Scalar(Tensor self, Scalar p=2) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: norm
+ autogen: norm.Scalar_out
- func: norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
structured_delegate: norm.dtype_out
device_check: NoCheck # TensorIterator
variants: function, method
@@ -5333,40 +5857,42 @@
CompositeExplicitAutograd: clone
SparseCPU, SparseCUDA: clone_sparse
SparseCsrCPU, SparseCsrCUDA: clone_sparse_compressed
MkldnnCPU: mkldnn_clone
QuantizedCPU, QuantizedCUDA: quantized_clone
+ NestedTensorCPU, NestedTensorCUDA: clone_nested
+ autogen: clone.out
- func: positive(Tensor(a) self) -> Tensor(a)
variants: function, method
- func: resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
variants: function, method
dispatch:
CompositeExplicitAutograd: resize_as_
- autogen: resize_as.functional, resize_as.out
+ autogen: resize_as, resize_as.out
- func: resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
variants: function, method
dispatch:
SparseCPU, SparseCUDA: resize_as_sparse_
- SparseCsrCPU, SparseCsrCUDA: resize_as_sparse_csr_
- autogen: resize_as_sparse.functional, resize_as_sparse.out
+ SparseCsrCPU, SparseCsrCUDA: resize_as_sparse_compressed_
+ autogen: resize_as_sparse, resize_as_sparse.out
- func: zero_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CPU, CUDA: zero_
MPS: zero_mps_
Meta: zero_meta_
- SparseCPU, SparseCUDA: zero_sparse_
+ SparseCPU, SparseCUDA, SparseMeta: zero_sparse_
SparseCsrCPU, SparseCsrCUDA: zero_sparse_csr_
MkldnnCPU: mkldnn_zero_
- autogen: zero.functional, zero.out
+ autogen: zero, zero.out
- func: sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
@@ -5423,10 +5949,11 @@
- func: rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CPU, CUDA: rsub
+ autogen: rsub.Tensor_out
- func: heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
@@ -5447,17 +5974,19 @@
- func: rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: rsub
+ autogen: rsub.Scalar_out
# Functionally the same as addmm, but we give it a different derivative formula
# that doesn't propagate gradients to non-present entries on sparse.
- func: _sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
python_module: sparse
dispatch:
CompositeExplicitAutograd: _sparse_addmm
+ autogen: _sparse_addmm.out
- func: sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
python_module: sparse
dispatch:
SparseCsrCUDA: sparse_sampled_addmm_out_sparse_csr_cuda
@@ -5635,10 +6164,13 @@
- func: _sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: _sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: _sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: sparse_coo_tensor
+ autogen: sparse_coo_tensor.size_out
- func: sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
@@ -5652,36 +6184,39 @@
- func: _validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
- func: _validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
- func: _sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
dispatch:
- SparseCPU, SparseCUDA: new_with_dims_sparse
+ SparseCPU, SparseCUDA, SparseMeta, Meta: new_with_dims_sparse
+ autogen: _sparse_coo_tensor_with_dims.out
- func: _sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, int[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
dispatch:
- SparseCPU, SparseCUDA: new_with_dims_and_tensor_sparse
+ SparseCPU, SparseCUDA, SparseMeta, Meta: new_with_dims_and_tensor_sparse
+ autogen: _sparse_coo_tensor_with_dims_and_tensors.out
- func: sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
variants: method
dispatch:
- SparseCPU, SparseCUDA: sparse_resize_
- autogen: sparse_resize.functional, sparse_resize.out
+ SparseCPU, SparseCUDA, SparseMeta: sparse_resize_
+ autogen: sparse_resize, sparse_resize.out
- func: sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
variants: method
dispatch:
- SparseCPU, SparseCUDA: sparse_resize_and_clear_
- autogen: sparse_resize_and_clear.functional, sparse_resize_and_clear.out
+ SparseCPU, SparseCUDA, SparseMeta: sparse_resize_and_clear_
+ autogen: sparse_resize_and_clear, sparse_resize_and_clear.out
- func: sparse_mask(Tensor self, Tensor mask) -> Tensor
variants: method
dispatch:
SparseCPU: sparse_mask_cpu
SparseCUDA: sparse_mask_cuda
SparseCsrCPU, SparseCsrCUDA: sparse_mask_sparse_csr
+ autogen: sparse_mask.out
- func: _to_cpu(Tensor[] tensors) -> Tensor[]
variants: function
- func: to_dense(Tensor self, ScalarType? dtype=None) -> Tensor
@@ -5692,17 +6227,19 @@
variants: method
dispatch:
SparseCPU, SparseCUDA: sparse_to_dense
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_dense
MkldnnCPU: mkldnn_to_dense
+ autogen: _to_dense.out
- func: to_dense_backward(Tensor grad, Tensor input) -> Tensor
- func: sparse_dim(Tensor self) -> int
variants: method
dispatch:
- SparseCPU, SparseCUDA: sparse_dim_sparse
+ SparseCPU, SparseCUDA, SparseMeta: sparse_dim_sparse
+ SparseCsrCPU, SparseCsrCUDA: sparse_dim_sparse_csr
device_check: NoCheck
device_guard: False
# legacy method
- func: _dimI(Tensor self) -> int
@@ -5713,26 +6250,27 @@
device_guard: False
- func: dense_dim(Tensor self) -> int
variants: method
dispatch:
- SparseCPU, SparseCUDA: dense_dim_sparse
+ SparseCPU, SparseCUDA, SparseMeta: dense_dim_sparse
+ SparseCsrCPU, SparseCsrCUDA: dense_dim_sparse_csr
device_check: NoCheck
device_guard: False
# legacy method
- func: _dimV(Tensor self) -> int
variants: method
dispatch:
- SparseCPU, SparseCUDA: dense_dim_sparse
+ SparseCPU, SparseCUDA, SparseMeta: dense_dim_sparse
device_check: NoCheck
device_guard: False
- func: _nnz(Tensor self) -> int
variants: method
dispatch:
- SparseCPU, SparseCUDA: _nnz_sparse
+ SparseCPU, SparseCUDA, SparseMeta: _nnz_sparse
SparseCsrCPU, SparseCsrCUDA: _nnz_sparse_csr
device_check: NoCheck
device_guard: False
# NOTE: [ coalesce autograd ]
@@ -5745,55 +6283,57 @@
- func: _coalesce(Tensor self) -> Tensor
dispatch:
SparseCPU: _coalesce_sparse_cpu
SparseCUDA: _coalesce_sparse_cuda
+ autogen: _coalesce.out
- func: is_coalesced(Tensor self) -> bool
variants: method
dispatch:
- SparseCPU, SparseCUDA: is_coalesced_sparse
+ SparseCPU, SparseCUDA, SparseMeta: is_coalesced_sparse
device_check: NoCheck
device_guard: False
- func: _indices(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
- SparseCPU, SparseCUDA: _indices_sparse
+ SparseCPU, SparseCUDA, SparseMeta: _indices_sparse
device_check: NoCheck
device_guard: False
- func: _values(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
- SparseCPU, SparseCUDA: _values_sparse
+ SparseCPU, SparseCUDA, SparseMeta: _values_sparse
device_check: NoCheck
device_guard: False
# This method doesn't do any check but only directly sets the flag. So it can be
# a bit unsafe. Similar to _indices and _values, this is useful for implementing
# custom sparse operations in Python/C++ extension.
- func: _coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)
variants: method
dispatch:
- SparseCPU, SparseCUDA: _coalesced_sparse_
+ SparseCPU, SparseCUDA, SparseMeta: _coalesced_sparse_
device_check: NoCheck
device_guard: False
- autogen: _coalesced.functional, _coalesced.out
+ autogen: _coalesced, _coalesced.out
- func: indices(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
- SparseCPU, SparseCUDA: indices_sparse
+ SparseCPU, SparseCUDA, SparseMeta: indices_sparse
device_check: NoCheck
device_guard: False
- func: values(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
- SparseCPU, SparseCUDA: values_sparse
+ SparseCPU, SparseCUDA, SparseMeta: values_sparse
SparseCsrCPU, SparseCsrCUDA: values_sparse_csr
+ NestedTensorCPU, NestedTensorCUDA: values_nested
device_check: NoCheck
device_guard: False
- func: crow_indices(Tensor(a) self) -> Tensor(a)
variants: method
@@ -5836,115 +6376,132 @@
- func: copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
device_check: NoCheck # Allows copy into different device
variants: function
dispatch:
SparseCPU, SparseCUDA: copy_sparse_
- autogen: copy_sparse_to_sparse.functional, copy_sparse_to_sparse.out
+ autogen: copy_sparse_to_sparse, copy_sparse_to_sparse.out
+# By adding the AutogradNestedTensor this makes this function CompositeImplicit-like for nested tensors
- func: unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]
variants: function, method
dispatch:
CompositeExplicitAutograd: unbind
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_unbind
+ CompositeImplicitAutogradNestedTensor: NestedTensor_unbind
- func: unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]
variants: function, method
- func: to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse
+ autogen: to_sparse.sparse_dim_out
- func: to_sparse(Tensor self) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse
+ autogen: to_sparse.out
- func: to_sparse_csr(Tensor self) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse_csr
SparseCPU, SparseCUDA: coo_to_sparse_csr
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_csr
+ autogen: to_sparse_csr.out
- func: to_sparse_csc(Tensor self) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse_csc
SparseCPU, SparseCUDA: coo_to_sparse_csc
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_csc
+ autogen: to_sparse_csc.out
- func: to_sparse_bsr(Tensor self, int[2] blocksize) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse_bsr
SparseCPU, SparseCUDA: coo_to_sparse_bsr
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_bsr
+ autogen: to_sparse_bsr.out
- func: to_sparse_bsc(Tensor self, int[2] blocksize) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse_bsc
SparseCPU, SparseCUDA: coo_to_sparse_bsc
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_bsc
+ autogen: to_sparse_bsc.out
- func: to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor
variants: method
dispatch:
CPU: dense_to_mkldnn
+ autogen: to_mkldnn.out
- func: mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1) -> Tensor
variants: function
python_module: nn
dispatch:
MkldnnCPU: mkldnn_reorder_conv2d_weight
+ autogen: mkldnn_reorder_conv2d_weight.out
- func: mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor
variants: function
python_module: nn
dispatch:
MkldnnCPU: mkldnn_reorder_conv3d_weight
+ autogen: mkldnn_reorder_conv3d_weight.out
- func: to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor
- func: quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor
variants: function
dispatch:
CPU, CUDA: quantize_per_tensor_dynamic
+ autogen: quantize_per_tensor_dynamic.out
- func: quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor
variants: function
dispatch:
CPU, CUDA: quantize_per_tensor
+ autogen: quantize_per_tensor.out
- func: quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor
variants: function
dispatch:
CPU, CUDA: quantize_per_tensor_tensor_qparams
+ autogen: quantize_per_tensor.tensor_qparams_out
- func: quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]
variants: function
dispatch:
CPU: quantize_per_tensor_list_cpu
+ autogen: quantize_per_tensor.tensors_out
- func: quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor
variants: function
dispatch:
CPU, CUDA: quantize_per_channel
+ autogen: quantize_per_channel.out
- func: dequantize.self(Tensor self) -> Tensor
variants: function, method
dispatch:
CPU, CUDA: dequantize_cpu_or_cuda
QuantizedCPU, QuantizedCUDA: dequantize_quantized
+ autogen: dequantize.self_out
- func: dequantize.tensors(Tensor[] tensors) -> Tensor[]
variants: function
dispatch:
QuantizedCPU: dequantize_tensors_quantized_cpu
+ autogen: dequantize.tensors_out
- func: q_scale(Tensor self) -> float
variants: function, method
dispatch:
QuantizedCPU, QuantizedCUDA: q_scale_quant
@@ -5956,15 +6513,17 @@
- func: q_per_channel_scales(Tensor self) -> Tensor
variants: function, method
dispatch:
QuantizedCPU, QuantizedCUDA: q_per_channel_scales
+ autogen: q_per_channel_scales.out
- func: q_per_channel_zero_points(Tensor self) -> Tensor
variants: function, method
dispatch:
QuantizedCPU, QuantizedCUDA: q_per_channel_zero_points
+ autogen: q_per_channel_zero_points.out
- func: q_per_channel_axis(Tensor self) -> int
variants: function, method
dispatch:
QuantizedCPU, QuantizedCUDA: q_per_channel_axis
@@ -5973,20 +6532,23 @@
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
QuantizedCPU: int_repr_quantized_cpu
QuantizedCUDA: int_repr_quantized_cuda
+ autogen: int_repr.out
- func: _make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor
dispatch:
CPU: make_per_tensor_quantized_tensor_cpu
CUDA: make_per_tensor_quantized_tensor_cuda
+ autogen: _make_per_tensor_quantized_tensor.out
- func: _make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor
dispatch:
CPU: make_per_channel_quantized_tensor_cpu
CUDA: make_per_channel_quantized_tensor_cuda
+ autogen: _make_per_channel_quantized_tensor.out
- func: qscheme(Tensor self) -> QScheme
variants: method
dispatch:
QuantizedCPU, QuantizedCUDA: qscheme_quant
@@ -6001,55 +6563,64 @@
- func: fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
variants: function
dispatch:
CPU, CUDA: fake_quantize_per_tensor_affine_cachemask
+ autogen: fake_quantize_per_tensor_affine_cachemask.out
- func: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
variants: function
dispatch:
CPU, CUDA: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams
+ autogen: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out
- func: fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
variants: function
- func: _fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
variants: function
dispatch:
CPU, CUDA: _fake_quantize_learnable_per_tensor_affine
+ autogen: _fake_quantize_learnable_per_tensor_affine.out
- func: _fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
variants: function
+ dispatch:
+ CPU, CUDA: _fake_quantize_learnable_per_tensor_affine_backward
- func: fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
- func: fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
variants: function
dispatch:
CPU, CUDA: fake_quantize_per_channel_affine_cachemask
+ autogen: fake_quantize_per_channel_affine_cachemask.out
- func: fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
variants: function
- func: _fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
variants: function
dispatch:
CPU, CUDA: _fake_quantize_learnable_per_channel_affine
+ autogen: _fake_quantize_learnable_per_channel_affine.out
- func: _fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
variants: function
+ dispatch:
+ CPU, CUDA: _fake_quantize_learnable_per_channel_affine_backward
- func: fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor
variants: function
- func: _fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)
dispatch:
CPU: fused_moving_avg_obs_fake_quant_cpu
CUDA: fused_moving_avg_obs_fake_quant_cuda
- autogen: _fused_moving_avg_obs_fq_helper.functional, _fused_moving_avg_obs_fq_helper.out
+ autogen: _fused_moving_avg_obs_fq_helper_functional, _fused_moving_avg_obs_fq_helper.out
- func: _choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)
variants: function
- func: _saturate_weight_to_fp16(Tensor weight) -> Tensor
@@ -6069,10 +6640,11 @@
- func: _to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: _to_copy
+ autogen: _to_copy.out
# to(Device) must not exist because all constructors of Device also works for
# TensorOptions. Otherwise, an ambiguity error is thrown.
# See NOTE [ TensorOptions Constructors ].
- func: to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
@@ -6107,10 +6679,11 @@
- func: combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor
variants: function
- func: item(Tensor self) -> Scalar
+ tags: data_dependent_output
variants: method
- func: result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType
variants: function
@@ -6128,10 +6701,11 @@
- func: promote_types(ScalarType type1, ScalarType type2) -> ScalarType
variants: function
# NB: Does NOT check precondition that numel == 1
- func: _local_scalar_dense(Tensor self) -> Scalar
+ tags: data_dependent_output
dispatch:
CPU: _local_scalar_dense_cpu
CUDA: _local_scalar_dense_cuda
MPS: _local_scalar_dense_mps
variants: function
@@ -6139,39 +6713,45 @@
# MPS LSTM implementation
- func: _lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
dispatch:
MPS: _lstm_mps
+ autogen: _lstm_mps.out
- func: lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])
dispatch:
MPS: lstm_mps_backward
+ autogen: lstm_mps_backward.out
# Fused RNN kernels
- func: _thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)
dispatch:
CUDA: _thnn_fused_lstm_cell_cuda
+ autogen: _thnn_fused_lstm_cell.out
# NB: The composite version of this function below is a simple wrapper that duplicates some of the outputs
# It is necessary to avoid triggering TensorImpl use count checks in debug mode
# NB: this is function is NOT differentiable
- func: _thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)
dispatch:
CUDA: _thnn_fused_lstm_cell_backward_impl_cuda
+ autogen: _thnn_fused_lstm_cell_backward_impl.out
- func: _thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
- func: _thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
- func: _thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)
dispatch:
CUDA: _thnn_fused_gru_cell_cuda
+ autogen: _thnn_fused_gru_cell.out
- func: _thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
dispatch:
CUDA: _thnn_fused_gru_cell_backward_cuda
+ autogen: _thnn_fused_gru_cell_backward.out
- func: _thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
# RNN cells and layers
- func: lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)
@@ -6226,10 +6806,11 @@
# PackedSequence utilities
- func: _pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)
dispatch:
CompositeExplicitAutograd: _pack_padded_sequence
+ autogen: _pack_padded_sequence.out
- func: _pack_padded_sequence_backward(Tensor grad, int[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor
- func: _pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)
@@ -6239,52 +6820,74 @@
variants: method
device_check: NoCheck
device_guard: False
dispatch:
CPU, CUDA, Meta, MPS: set_
- autogen: set.source_Storage_functional, set.source_Storage_out
+ autogen: set.source_Storage, set.source_Storage_out
-- func: set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!)
+- func: set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
dispatch:
- CPU, Meta: set_storage_cpu_
+ CPU: set_storage_cpu_
+ Meta: set_storage_meta__symint
CUDA: set_storage_cuda_
MPS: set_storage_mps_
QuantizedCPU, QuantizedCUDA: set_storage_quantized_
- autogen: set.source_Storage_storage_offset_functional, set.source_Storage_storage_offset_out
+ autogen: set.source_Storage_storage_offset, set.source_Storage_storage_offset_out
-- func: set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!)
+- func: set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
+ dispatch:
+ CompositeImplicitAutograd: set__symint
- func: set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
dispatch:
CPU, CUDA, Meta, MPS: set_tensor_
- autogen: set.source_Tensor_functional, set.source_Tensor_out
+ autogen: set.source_Tensor, set.source_Tensor_out
- func: set_(Tensor(a!) self) -> Tensor(a!)
variants: method
dispatch:
CPU: set_cpu_
CUDA: set_cuda_
Meta: set_meta_
MPS: set_mps_
- autogen: set.functional, set.out
+ autogen: set, set.out
+# Not making it CompositeImplicitAutograd because lift
+# should be a primitive w.r.t. functorch
+
+# TODO: this should have a view annotation
+# TODO: shouldn't be a method
- func: lift(Tensor self) -> Tensor
- variants: method
dispatch:
- # Not making it CompositeImplicitAutograd because lift
- # should be a primitive w.r.t. functorch
CompositeExplicitAutograd: lift
+ autogen: lift.out
+# lift_fresh is called with an argument that is guaranteed to be
+# fresh (i.e., newly allocated). This is ONLY called from a
+# torch.tensor call; if you FX trace a lift_fresh, you are obligated
+# to convert this into a lift_fresh_copy (because FX will violate the
+# freshness invariant when tracing).
+- func: lift_fresh(Tensor(a) self) -> Tensor(a)
+ dispatch:
+ CompositeExplicitAutograd: lift_fresh
+
+# Like lift, but it clones the input.
+- func: lift_fresh_copy(Tensor self) -> Tensor
+ tags: view_copy
+ dispatch:
+ CompositeExplicitAutograd: lift_fresh_copy
+ autogen: lift_fresh_copy.out
+
- func: is_set_to(Tensor self, Tensor tensor) -> bool
variants: method
device_check: NoCheck
device_guard: False
dispatch:
@@ -6294,10 +6897,12 @@
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CPU: masked_fill__cpu
CUDA: masked_fill__cuda
+ QuantizedCPU: masked_fill__quantized_cpu
+ QuantizedCUDA: masked_fill__quantized_cuda
MPS: masked_fill__mps
autogen: masked_fill.Scalar_out
- func: masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor
device_check: NoCheck # TensorIterator
@@ -6309,10 +6914,12 @@
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CPU: masked_fill__cpu
CUDA: masked_fill__cuda
+ QuantizedCPU: masked_fill__quantized_cpu
+ QuantizedCUDA: masked_fill__quantized_cuda
MPS: masked_fill__mps
autogen: masked_fill.Tensor_out
- func: masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor
device_check: NoCheck # TensorIterator
@@ -6330,27 +6937,30 @@
- func: masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: masked_scatter
-- func: _masked_softmax(Tensor self, Tensor mask, int? dim=None) -> Tensor
+- func: _masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor
dispatch:
CUDA: masked_softmax_cuda
CPU: masked_softmax_cpu
+ autogen: _masked_softmax.out
- func: _masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor
dispatch:
CUDA: masked_softmax_backward_cuda
CPU: masked_softmax_backward_cpu
+ autogen: _masked_softmax_backward.out
-- func: view(Tensor(a) self, int[] size) -> Tensor(a)
+- func: view(Tensor(a) self, SymInt[] size) -> Tensor(a)
variants: method
device_check: NoCheck
device_guard: False
dispatch:
- ZeroTensor, CPU, CUDA, Meta, QuantizedCPU, QuantizedCUDA, MPS: view
+ ZeroTensor, Meta, CPU, CUDA, QuantizedCPU, QuantizedCUDA, MPS: view
MkldnnCPU: mkldnn_view
+ NestedTensorCPU, NestedTensorCUDA: view_nested
# Warning: If you want to change the name or overload name of this
# operator, you might also want to change the `isBlockListedSchema`
# function in `torch/csrc/jit/frontend/schema_catching.cpp`.
# The name and overload name of this operator is hardcoded in that
@@ -6369,19 +6979,22 @@
CPU, CUDA, MPS: put_
autogen: put.out
- func: put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor
variants: function, method
+ dispatch:
+ CompositeExplicitAutograd: put
- func: index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
structured: True
variants: function
precomputed:
- dim -> int dim
dispatch:
CPU: index_add_cpu_out
CUDA: index_add_cuda_out
+ MPS: index_add_mps_out
- func: index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)
structured_delegate: index_add.out
variants: method
@@ -6552,19 +7165,15 @@
- func: eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
structured_delegate: eq.Scalar_out
device_check: NoCheck # TensorIterator
variants: method
- dispatch:
- CompositeExplicitAutograd: eq_
- func: eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: eq.Tensor_out
device_check: NoCheck # TensorIterator
variants: method
- dispatch:
- CompositeExplicitAutograd: eq_
- func: bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
@@ -6587,10 +7196,11 @@
- func: bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_and
+ autogen: bitwise_and.Scalar_Tensor_out
- func: bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
structured_delegate: bitwise_and.Tensor_out
@@ -6641,10 +7251,11 @@
- func: bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_or
+ autogen: bitwise_or.Scalar_Tensor_out
- func: bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
structured_delegate: bitwise_or.Tensor_out
@@ -6695,10 +7306,11 @@
- func: bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_xor
+ autogen: bitwise_xor.Scalar_Tensor_out
- func: bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
structured_delegate: bitwise_xor.Tensor_out
@@ -6792,10 +7404,11 @@
- func: bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_left_shift
+ autogen: bitwise_left_shift.Scalar_Tensor_out
- func: __rshift__.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
@@ -6859,10 +7472,11 @@
- func: bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_right_shift
+ autogen: bitwise_right_shift.Scalar_Tensor_out
- func: tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
structured_delegate: tril.out
variants: method
@@ -6903,71 +7517,80 @@
MPS: addbmm_mps
- func: random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
+ tags: nondeterministic_seeded
dispatch:
CPU, CUDA: random_
Meta: random_meta_
MPS: random_mps_
- autogen: random.from_functional, random.from_out
+ autogen: random.from, random.from_out
- func: random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
+ tags: nondeterministic_seeded
variants: method
dispatch:
CPU, CUDA: random_
Meta: random_meta_
MPS: random_mps_
- autogen: random.to_functional, random.to_out
+ autogen: random.to, random.to_out
- func: random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
+ tags: nondeterministic_seeded
variants: method
dispatch:
CPU, CUDA: random_
Meta: random_meta_
- autogen: random.functional, random.out
+ autogen: random, random.out
- func: uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
+ tags: nondeterministic_seeded
variants: method
dispatch:
CPU, CUDA: uniform_
MPS: uniform_mps_
Meta: uniform_meta_
- autogen: uniform.functional, uniform.out
+ autogen: uniform, uniform.out
- func: cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
+ tags: nondeterministic_seeded
dispatch:
CPU, CUDA: cauchy_
- autogen: cauchy.functional, cauchy.out
+ autogen: cauchy, cauchy.out
- func: log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
+ tags: nondeterministic_seeded
variants: method
dispatch:
CPU, CUDA: log_normal_
- autogen: log_normal.functional, log_normal.out
+ autogen: log_normal, log_normal.out
- func: exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
+ tags: nondeterministic_seeded
variants: method
dispatch:
CPU, CUDA: exponential_
- autogen: exponential.functional, exponential.out
+ MPS: exponential_mps_
+ autogen: exponential, exponential.out
- func: geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
+ tags: nondeterministic_seeded
variants: method
dispatch:
CPU, CUDA: geometric_
-# wrappers for TH functions
- autogen: geometric.functional, geometric.out
+ # wrappers for TH functions
+ autogen: geometric, geometric.out
- func: diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: diag_cpu_out
CUDA: diag_cuda_out
@@ -6976,14 +7599,16 @@
- func: diag(Tensor self, int diagonal=0) -> Tensor
variants: method, function
dispatch:
CompositeExplicitAutograd: diag
-- func: diag_backward(Tensor grad, int[] input_sizes, int diagonal) -> Tensor
+- func: diag_backward(Tensor grad, SymInt[] input_sizes, int diagonal) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
+ dispatch:
+ CompositeImplicitAutograd: diag_backward_symint
- func: cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
- func: cross(Tensor self, Tensor other, int? dim=None) -> Tensor
variants: method, function
@@ -7012,21 +7637,24 @@
- func: tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CPU: tril_indices_cpu
CUDA: tril_indices_cuda
+ autogen: tril_indices.out
- func: triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CPU: triu_indices_cpu
CUDA: triu_indices_cuda
+ autogen: triu_indices.out
- func: trace(Tensor self) -> Tensor
variants: method, function
dispatch:
CPU: trace_cpu
CUDA: trace_cuda
+ autogen: trace.out
- func: trace_backward(Tensor grad, int[] sizes) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
@@ -7065,19 +7693,15 @@
- func: ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
structured_delegate: ne.Scalar_out
device_check: NoCheck # TensorIterator
variants: method
- dispatch:
- CompositeExplicitAutograd: ne_
- func: ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: ne.Tensor_out
device_check: NoCheck # TensorIterator
variants: method
- dispatch:
- CompositeExplicitAutograd: ne_
# not_equal, alias for torch.ne
- func: not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- func: not_equal.Scalar(Tensor self, Scalar other) -> Tensor
@@ -7160,19 +7784,15 @@
- func: ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
structured_delegate: ge.Scalar_out
device_check: NoCheck # TensorIterator
variants: method
- dispatch:
- CompositeExplicitAutograd: ge_
- func: ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: ge.Tensor_out
device_check: NoCheck # TensorIterator
variants: method
- dispatch:
- CompositeExplicitAutograd: ge_
# greater_equal, alias for torch.ge
- func: greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- func: greater_equal.Scalar(Tensor self, Scalar other) -> Tensor
@@ -7223,19 +7843,15 @@
- func: le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
structured_delegate: le.Scalar_out
device_check: NoCheck # TensorIterator
variants: method
- dispatch:
- CompositeExplicitAutograd: le_
- func: le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: le.Tensor_out
device_check: NoCheck # TensorIterator
variants: method
- dispatch:
- CompositeExplicitAutograd: le_
# less_equal, alias for torch.le
- func: less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- func: less_equal.Scalar(Tensor self, Scalar other) -> Tensor
@@ -7286,19 +7902,15 @@
- func: gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
structured_delegate: gt.Scalar_out
device_check: NoCheck # TensorIterator
variants: method
- dispatch:
- CompositeExplicitAutograd: gt_
- func: gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: gt.Tensor_out
device_check: NoCheck # TensorIterator
variants: method
- dispatch:
- CompositeExplicitAutograd: gt_
# greater, alias for torch.gt
- func: greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- func: greater.Scalar(Tensor self, Scalar other) -> Tensor
@@ -7349,19 +7961,15 @@
- func: lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
structured_delegate: lt.Scalar_out
device_check: NoCheck # TensorIterator
variants: method
- dispatch:
- CompositeExplicitAutograd: lt_
- func: lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: lt.Tensor_out
device_check: NoCheck # TensorIterator
variants: method
- dispatch:
- CompositeExplicitAutograd: lt_
# less, alias for torch.lt
- func: less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- func: less.Scalar(Tensor self, Scalar other) -> Tensor
@@ -7421,38 +8029,45 @@
- func: masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: masked_select_out_cpu
CUDA: masked_select_out_cuda
+ MPS: masked_select_out_mps
+ tags: dynamic_output_shape
- func: masked_select(Tensor self, Tensor mask) -> Tensor
variants: method, function
dispatch:
CPU: masked_select_cpu
CUDA: masked_select_cuda
+ MPS: masked_select_mps
+ tags: dynamic_output_shape
- func: masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
- func: nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: nonzero_out_cpu
CUDA: nonzero_out_cuda
+ tags: dynamic_output_shape
- func: nonzero(Tensor self) -> Tensor
variants: method, function
dispatch:
CPU: nonzero_cpu
CUDA: nonzero_cuda
+ tags: dynamic_output_shape
- func: nonzero_numpy(Tensor self) -> Tensor[]
variants: method, function
- func: argwhere(Tensor self) -> Tensor
variants: method, function
+ tags: dynamic_output_shape
- func: gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU, CUDA: gather_out
@@ -7511,21 +8126,10 @@
variants: method
- func: cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, float label_smoothing=0.0) -> Tensor
python_module: nn
-- func: lstsq.X(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR)
- dispatch:
- CPU: legacy_lstsq_out
- CUDA: legacy_lstsq_out_cuda
-
-- func: lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)
- variants: method, function
- dispatch:
- CPU: legacy_lstsq
- CUDA: legacy_lstsq_cuda
-
- func: triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)
structured: True
dispatch:
CPU, CUDA: triangular_solve_out
SparseCsrCPU: triangular_solve_out_sparse_csr_cpu
@@ -7565,20 +8169,12 @@
- func: _symeig_helper(Tensor self, bool eigenvectors, bool upper) -> (Tensor, Tensor)
variants: function
dispatch:
CPU: _symeig_helper_cpu
CUDA: _symeig_helper_cuda
+ autogen: _symeig_helper.out
-- func: eig.e(Tensor self, bool eigenvectors=False, *, Tensor(a!) e, Tensor(b!) v) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
- dispatch:
- CompositeExplicitAutograd: eig_out
-
-- func: eig(Tensor self, bool eigenvectors=False) -> (Tensor eigenvalues, Tensor eigenvectors)
- variants: method, function
- dispatch:
- CompositeExplicitAutograd: eig
-
- func: svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
- func: svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)
variants: method, function
@@ -7627,10 +8223,11 @@
- func: _cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor
variants: function
dispatch:
CPU: _cholesky_solve_helper_cpu
CUDA: _cholesky_solve_helper_cuda
+ autogen: _cholesky_solve_helper.out
- func: cholesky_inverse(Tensor self, bool upper=False) -> Tensor
variants: method, function
dispatch:
CPU, CUDA: cholesky_inverse
@@ -7670,17 +8267,13 @@
- func: _lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info)
variants: function
- func: lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: lu_solve_out
- func: lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor
variants: method, function
- dispatch:
- CPU, CUDA: lu_solve
# lu_unpack
- func: lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)
structured_delegate: lu_unpack.out
variants: function
@@ -7691,17 +8284,21 @@
dispatch:
CPU, CUDA: lu_unpack_out
# TODO: remove dispatch section when porting TH CUDA to ATen
- func: multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
+ tags: nondeterministic_seeded
dispatch:
CPU, CUDA: multinomial_out
+ MPS: multinomial_out_mps
- func: multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor
variants: method, function
dispatch:
CPU, CUDA: multinomial
+ MPS: multinomial_mps
+ tags: nondeterministic_seeded
- func: lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
@@ -7790,20 +8387,18 @@
- func: sign(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: sign.out
variants: function, method
dispatch:
- CompositeExplicitAutograd: sign
SparseCPU, SparseCUDA: sign_sparse
SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr
- func: sign_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: sign.out
variants: method
dispatch:
- CompositeExplicitAutograd: sign_
SparseCPU, SparseCUDA: sign_sparse_
SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr_
- func: sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
@@ -7834,10 +8429,11 @@
- func: dist(Tensor self, Tensor other, Scalar p=2) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CompositeExplicitAutograd: dist
+ autogen: dist.out
- func: atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
@@ -7919,18 +8515,21 @@
CPU: histogram_cpu
- func: _histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]
dispatch:
CPU: histogramdd_bin_edges_cpu
+ autogen: _histogramdd_bin_edges.out
- func: _histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor
dispatch:
CPU: histogramdd_cpu
+ autogen: _histogramdd_from_bin_cts.out
- func: _histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor
dispatch:
CPU: histogramdd_cpu
+ autogen: _histogramdd_from_bin_tensors.out
- func: histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
- func: histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
@@ -7982,12 +8581,10 @@
variants: method, function
- func: hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: hypot.out
variants: method
- dispatch:
- CompositeExplicitAutograd: hypot_
- func: igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
@@ -8026,12 +8623,10 @@
variants: method, function
- func: nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: nextafter.out
variants: method
- dispatch:
- CompositeExplicitAutograd: nextafter_
- func: remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: remainder_out
@@ -8065,19 +8660,27 @@
- func: remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CPU, CUDA: remainder
+ autogen: remainder.Scalar_Tensor_out
- func: min(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CPU, CUDA: min
MPS: min_mps
QuantizedCPU: min_quantized_cpu
+# Not to be confused with binary op `min.out`. Commented because of failed CI
+# FIXME: enable this
+#- func: min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+# device_check: NoCheck # TensorIterator
+# dispatch:
+# CompositeExplicitAutograd: min_unary_out
+
- func: fmin(Tensor self, Tensor other) -> Tensor
structured_delegate: fmin.out
device_check: NoCheck # TensorIterator
variants: method, function
@@ -8094,10 +8697,17 @@
dispatch:
CPU, CUDA: max
MPS: max_mps
QuantizedCPU: max_quantized_cpu
+# Not to be confused with binary op `max.out`. Commented because of failed CI
+# FIXME: enable this
+#- func: max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+# device_check: NoCheck # TensorIterator
+# dispatch:
+# CompositeExplicitAutograd: max_unary_out
+
- func: fmax(Tensor self, Tensor other) -> Tensor
structured_delegate: fmax.out
device_check: NoCheck # TensorIterator
variants: method, function
@@ -8211,10 +8821,17 @@
- func: argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
+- func: argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor
+ device_check: NoCheck # TensorIterator
+ variants: method, function
+ dispatch:
+ CPU, CUDA: argsort_stable
+ autogen: argsort.stable_out
+
- func: argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor
variants: method, function
- func: topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
structured: True
@@ -8281,16 +8898,19 @@
- func: unfold_backward(Tensor grad_in, int[] input_sizes, int dim, int size, int step) -> Tensor
variants: function
dispatch:
CPU, CUDA: unfold_backward
+ autogen: unfold_backward.out
- func: equal(Tensor self, Tensor other) -> bool
+ tags: data_dependent_output
variants: method, function
dispatch:
CPU: cpu_equal
CUDA: cuda_equal
+ MPS: mps_equal
QuantizedCPU: equal_quantized_cpu
- func: pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
@@ -8360,89 +8980,111 @@
- func: float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
variants: method
- func: normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
+ tags: nondeterministic_seeded
variants: method
dispatch:
CPU, CUDA: normal_
MPS: normal_mps_
Meta: normal_meta_
SparseCsrCPU, SparseCsrCUDA: normal_sparse_csr_
- autogen: normal.functional, normal.out
+ autogen: normal.out
+# Only used by the functionalization pass.
+# Normally, the codegen would be able to generate a normal() NativeFunction,
+# but we can't due to overload ambiguity with normal.Tensor_float.
+- func: normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor
+ device_check: NoCheck # TensorIterator
+ tags: nondeterministic_seeded
+ dispatch:
+ CompositeExplicitAutograd: normal_functional
+
- func: normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
+ tags: nondeterministic_seeded
dispatch:
CPU, CUDA: normal_out
MPS: normal_mps_out
Meta: normal_out_meta
- func: normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor
dispatch:
CPU, CUDA: normal
- #MPS: normal_mps
+ MPS: normal_mps
Meta: normal_meta
+ tags: nondeterministic_seeded
- func: normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: normal_out
Meta: normal_out_meta
MPS: normal_mps_out
+ tags: nondeterministic_seeded
- func: normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor
dispatch:
CPU, CUDA: normal
+ MPS: normal_mps
Meta: normal_meta
- #MPS: normal_mps
+ tags: nondeterministic_seeded
- func: normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: normal_out
Meta: normal_out_meta
MPS: normal_mps_out
+ tags: nondeterministic_seeded
- func: normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor
dispatch:
CPU, CUDA: normal
+ MPS: normal_mps
Meta: normal_meta
- #MPS: normal_mps
+ tags: nondeterministic_seeded
- func: normal.float_float(float mean, float std, int[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+ dispatch:
+ CompositeExplicitAutograd: normal
+ tags: nondeterministic_seeded
- func: normal.float_float_out(float mean, float std, int[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: normal_out
+ tags: nondeterministic_seeded
- func: alias(Tensor(a) self) -> Tensor(a)
variants: method, function
dispatch:
CompositeExplicitAutograd: alias
- func: _amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()
variants: function
dispatch:
CUDA: _amp_foreach_non_finite_check_and_unscale_cuda_
- autogen: _amp_foreach_non_finite_check_and_unscale.functional, _amp_foreach_non_finite_check_and_unscale.out
+ autogen: _amp_foreach_non_finite_check_and_unscale, _amp_foreach_non_finite_check_and_unscale.out
- func: _amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!)
variants: function
dispatch:
CUDA: _amp_update_scale_cuda_
- autogen: _amp_update_scale.functional, _amp_update_scale.out
+ autogen: _amp_update_scale, _amp_update_scale.out
-#- func: _cat(Tensor[] tensors, int dim=0) -> Tensor
- #dispatch:
+ #- func: _cat(Tensor[] tensors, int dim=0) -> Tensor
+ #dispatch:
#CPU: _cat_cpu
#CUDA: cat_cuda
#MPS: cat_mps
#QuantizedCPU: cat_quantized_cpu
-#- func: _cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
- #dispatch:
+ #- func: _cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
+ #dispatch:
#CPU: _cat_out_cpu
- #CUDA: cat_out_cuda
- #QuantizedCPU: cat_out_quantized_cpu
+ #CUDA: cat_out_cuda
+ #QuantizedCPU: cat_out_quantized_cpu
-- func: _foreach_add.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[]
+- func: _foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_add_scalar_kernel_slow
CUDA: foreach_tensor_add_scalar_kernel_cuda
@@ -8451,13 +9093,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_add_scalar_kernel_slow_
CUDA: foreach_tensor_add_scalar_kernel_cuda_
- autogen: _foreach_add.Scalar_functional, _foreach_add.Scalar_out
+ autogen: _foreach_add.Scalar_out
-- func: _foreach_sub.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[]
+- func: _foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sub_scalar_kernel_slow
CUDA: foreach_tensor_sub_scalar_kernel_cuda
@@ -8466,13 +9108,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sub_scalar_kernel_slow_
CUDA: foreach_tensor_sub_scalar_kernel_cuda_
- autogen: _foreach_sub.Scalar_functional, _foreach_sub.Scalar_out
+ autogen: _foreach_sub.Scalar_out
-- func: _foreach_mul.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[]
+- func: _foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_mul_scalar_kernel_slow
CUDA: foreach_tensor_mul_scalar_kernel_cuda
@@ -8481,13 +9123,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_mul_scalar_kernel_slow_
CUDA: foreach_tensor_mul_scalar_kernel_cuda_
- autogen: _foreach_mul.Scalar_functional, _foreach_mul.Scalar_out
+ autogen: _foreach_mul.Scalar_out
-- func: _foreach_div.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[]
+- func: _foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_div_scalar_kernel_slow
CUDA: foreach_tensor_div_scalar_kernel_cuda
@@ -8496,13 +9138,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_div_scalar_kernel_slow_
CUDA: foreach_tensor_div_scalar_kernel_cuda_
- autogen: _foreach_div.Scalar_functional, _foreach_div.Scalar_out
+ autogen: _foreach_div.Scalar_out
-- func: _foreach_add.List(Tensor[] tensors1, Tensor[] tensors2, *, Scalar alpha=1) -> Tensor[]
+- func: _foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_add_list_kernel_slow
CUDA: foreach_tensor_add_list_kernel_cuda
@@ -8511,13 +9153,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_add_list_kernel_slow_
CUDA: foreach_tensor_add_list_kernel_cuda_
- autogen: _foreach_add.List_functional, _foreach_add.List_out
+ autogen: _foreach_add.List_out
-- func: _foreach_sub.List(Tensor[] tensors1, Tensor[] tensors2, *, Scalar alpha=1) -> Tensor[]
+- func: _foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sub_list_kernel_slow
CUDA: foreach_tensor_sub_list_kernel_cuda
@@ -8526,13 +9168,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sub_list_kernel_slow_
CUDA: foreach_tensor_sub_list_kernel_cuda_
- autogen: _foreach_sub.List_functional, _foreach_sub.List_out
+ autogen: _foreach_sub.List_out
-- func: _foreach_mul.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[]
+- func: _foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_mul_list_kernel_slow
CUDA: foreach_tensor_mul_list_kernel_cuda
@@ -8541,13 +9183,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_mul_list_kernel_slow_
CUDA: foreach_tensor_mul_list_kernel_cuda_
- autogen: _foreach_mul.List_functional, _foreach_mul.List_out
+ autogen: _foreach_mul.List_out
-- func: _foreach_div.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[]
+- func: _foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_div_list_kernel_slow
CUDA: foreach_tensor_div_list_kernel_cuda
@@ -8556,13 +9198,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_div_list_kernel_slow_
CUDA: foreach_tensor_div_list_kernel_cuda_
- autogen: _foreach_div.List_functional, _foreach_div.List_out
+ autogen: _foreach_div.List_out
-- func: _foreach_add.ScalarList(Tensor[] tensors, Scalar[] scalars) -> Tensor[]
+- func: _foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_add_scalarlist_kernel_slow
CUDA: foreach_tensor_add_scalarlist_kernel_cuda
@@ -8571,13 +9213,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_add_scalarlist_kernel_slow_
CUDA: foreach_tensor_add_scalarlist_kernel_cuda_
- autogen: _foreach_add.ScalarList_functional, _foreach_add.ScalarList_out
+ autogen: _foreach_add.ScalarList_out
-- func: _foreach_sub.ScalarList(Tensor[] tensors, Scalar[] scalars) -> Tensor[]
+- func: _foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sub_scalarlist_kernel_slow
CUDA: foreach_tensor_sub_scalarlist_kernel_cuda
@@ -8586,13 +9228,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sub_scalarlist_kernel_slow_
CUDA: foreach_tensor_sub_scalarlist_kernel_cuda_
- autogen: _foreach_sub.ScalarList_functional, _foreach_sub.ScalarList_out
+ autogen: _foreach_sub.ScalarList_out
-- func: _foreach_div.ScalarList(Tensor[] tensors, Scalar[] scalars) -> Tensor[]
+- func: _foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_div_scalarlist_kernel_slow
CUDA: foreach_tensor_div_scalarlist_kernel_cuda
@@ -8601,13 +9243,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_div_scalarlist_kernel_slow_
CUDA: foreach_tensor_div_scalarlist_kernel_cuda_
- autogen: _foreach_div.ScalarList_functional, _foreach_div.ScalarList_out
+ autogen: _foreach_div.ScalarList_out
-- func: _foreach_mul.ScalarList(Tensor[] tensors, Scalar[] scalars) -> Tensor[]
+- func: _foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_mul_scalarlist_kernel_slow
CUDA: foreach_tensor_mul_scalarlist_kernel_cuda
@@ -8616,13 +9258,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_mul_scalarlist_kernel_slow_
CUDA: foreach_tensor_mul_scalarlist_kernel_cuda_
- autogen: _foreach_mul.ScalarList_functional, _foreach_mul.ScalarList_out
+ autogen: _foreach_mul.ScalarList_out
-- func: _foreach_exp(Tensor[] tensors) -> Tensor[]
+- func: _foreach_exp(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_exp_slow
CUDA: foreach_tensor_exp_cuda
@@ -8631,21 +9273,21 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_zero_slow_
CUDA: foreach_tensor_zero_cuda_
- autogen: _foreach_zero.functional, _foreach_zero.out
+ autogen: _foreach_zero, _foreach_zero.out
- func: _foreach_exp_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_exp_slow_
CUDA: foreach_tensor_exp_cuda_
- autogen: _foreach_exp.functional, _foreach_exp.out
+ autogen: _foreach_exp.out
-- func: _foreach_sqrt(Tensor[] tensors) -> Tensor[]
+- func: _foreach_sqrt(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sqrt_slow
CUDA: foreach_tensor_sqrt_cuda
@@ -8654,13 +9296,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sqrt_slow_
CUDA: foreach_tensor_sqrt_cuda_
- autogen: _foreach_sqrt.functional, _foreach_sqrt.out
+ autogen: _foreach_sqrt.out
-- func: _foreach_abs(Tensor[] tensors) -> Tensor[]
+- func: _foreach_abs(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_abs_slow
CUDA: foreach_tensor_abs_cuda
@@ -8669,13 +9311,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_abs_slow_
CUDA: foreach_tensor_abs_cuda_
- autogen: _foreach_abs.functional, _foreach_abs.out
+ autogen: _foreach_abs.out
-- func: _foreach_acos(Tensor[] tensors) -> Tensor[]
+- func: _foreach_acos(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_acos_slow
CUDA: foreach_tensor_acos_cuda
@@ -8684,13 +9326,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_acos_slow_
CUDA: foreach_tensor_acos_cuda_
- autogen: _foreach_acos.functional, _foreach_acos.out
+ autogen: _foreach_acos.out
-- func: _foreach_asin(Tensor[] tensors) -> Tensor[]
+- func: _foreach_asin(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_asin_slow
CUDA: foreach_tensor_asin_cuda
@@ -8699,13 +9341,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_asin_slow_
CUDA: foreach_tensor_asin_cuda_
- autogen: _foreach_asin.functional, _foreach_asin.out
+ autogen: _foreach_asin.out
-- func: _foreach_atan(Tensor[] tensors) -> Tensor[]
+- func: _foreach_atan(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_atan_slow
CUDA: foreach_tensor_atan_cuda
@@ -8714,13 +9356,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_atan_slow_
CUDA: foreach_tensor_atan_cuda_
- autogen: _foreach_atan.functional, _foreach_atan.out
+ autogen: _foreach_atan.out
-- func: _foreach_ceil(Tensor[] tensors) -> Tensor[]
+- func: _foreach_ceil(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_ceil_slow
CUDA: foreach_tensor_ceil_cuda
@@ -8729,13 +9371,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_ceil_slow_
CUDA: foreach_tensor_ceil_cuda_
- autogen: _foreach_ceil.functional, _foreach_ceil.out
+ autogen: _foreach_ceil.out
-- func: _foreach_cos(Tensor[] tensors) -> Tensor[]
+- func: _foreach_cos(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_cos_slow
CUDA: foreach_tensor_cos_cuda
@@ -8744,13 +9386,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_cos_slow_
CUDA: foreach_tensor_cos_cuda_
- autogen: _foreach_cos.functional, _foreach_cos.out
+ autogen: _foreach_cos.out
-- func: _foreach_cosh(Tensor[] tensors) -> Tensor[]
+- func: _foreach_cosh(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_cosh_slow
CUDA: foreach_tensor_cosh_cuda
@@ -8759,13 +9401,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_cosh_slow_
CUDA: foreach_tensor_cosh_cuda_
- autogen: _foreach_cosh.functional, _foreach_cosh.out
+ autogen: _foreach_cosh.out
-- func: _foreach_erf(Tensor[] tensors) -> Tensor[]
+- func: _foreach_erf(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_erf_slow
CUDA: foreach_tensor_erf_cuda
@@ -8774,13 +9416,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_erf_slow_
CUDA: foreach_tensor_erf_cuda_
- autogen: _foreach_erf.functional, _foreach_erf.out
+ autogen: _foreach_erf.out
-- func: _foreach_erfc(Tensor[] tensors) -> Tensor[]
+- func: _foreach_erfc(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_erfc_slow
CUDA: foreach_tensor_erfc_cuda
@@ -8789,13 +9431,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_erfc_slow_
CUDA: foreach_tensor_erfc_cuda_
- autogen: _foreach_erfc.functional, _foreach_erfc.out
+ autogen: _foreach_erfc.out
-- func: _foreach_expm1(Tensor[] tensors) -> Tensor[]
+- func: _foreach_expm1(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_expm1_slow
CUDA: foreach_tensor_expm1_cuda
@@ -8804,13 +9446,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_expm1_slow_
CUDA: foreach_tensor_expm1_cuda_
- autogen: _foreach_expm1.functional, _foreach_expm1.out
+ autogen: _foreach_expm1.out
-- func: _foreach_floor(Tensor[] tensors) -> Tensor[]
+- func: _foreach_floor(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_floor_slow
CUDA: foreach_tensor_floor_cuda
@@ -8819,13 +9461,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_floor_slow_
CUDA: foreach_tensor_floor_cuda_
- autogen: _foreach_floor.functional, _foreach_floor.out
+ autogen: _foreach_floor.out
-- func: _foreach_log(Tensor[] tensors) -> Tensor[]
+- func: _foreach_log(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_log_slow
CUDA: foreach_tensor_log_cuda
@@ -8834,13 +9476,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_log_slow_
CUDA: foreach_tensor_log_cuda_
- autogen: _foreach_log.functional, _foreach_log.out
+ autogen: _foreach_log.out
-- func: _foreach_log10(Tensor[] tensors) -> Tensor[]
+- func: _foreach_log10(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_log10_slow
CUDA: foreach_tensor_log10_cuda
@@ -8849,13 +9491,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_log10_slow_
CUDA: foreach_tensor_log10_cuda_
- autogen: _foreach_log10.functional, _foreach_log10.out
+ autogen: _foreach_log10.out
-- func: _foreach_log1p(Tensor[] tensors) -> Tensor[]
+- func: _foreach_log1p(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_log1p_slow
CUDA: foreach_tensor_log1p_cuda
@@ -8864,13 +9506,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_log1p_slow_
CUDA: foreach_tensor_log1p_cuda_
- autogen: _foreach_log1p.functional, _foreach_log1p.out
+ autogen: _foreach_log1p.out
-- func: _foreach_log2(Tensor[] tensors) -> Tensor[]
+- func: _foreach_log2(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_log2_slow
CUDA: foreach_tensor_log2_cuda
@@ -8879,13 +9521,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_log2_slow_
CUDA: foreach_tensor_log2_cuda_
- autogen: _foreach_log2.functional, _foreach_log2.out
+ autogen: _foreach_log2.out
-- func: _foreach_neg(Tensor[] tensors) -> Tensor[]
+- func: _foreach_neg(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_neg_slow
CUDA: foreach_tensor_neg_cuda
@@ -8894,13 +9536,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_neg_slow_
CUDA: foreach_tensor_neg_cuda_
- autogen: _foreach_neg.functional, _foreach_neg.out
+ autogen: _foreach_neg.out
-- func: _foreach_tan(Tensor[] tensors) -> Tensor[]
+- func: _foreach_tan(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_tan_slow
CUDA: foreach_tensor_tan_cuda
@@ -8909,13 +9551,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_tan_slow_
CUDA: foreach_tensor_tan_cuda_
- autogen: _foreach_tan.functional, _foreach_tan.out
+ autogen: _foreach_tan.out
-- func: _foreach_tanh(Tensor[] tensors) -> Tensor[]
+- func: _foreach_tanh(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_tanh_slow
CUDA: foreach_tensor_tanh_cuda
@@ -8924,13 +9566,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_tanh_slow_
CUDA: foreach_tensor_tanh_cuda_
- autogen: _foreach_tanh.functional, _foreach_tanh.out
+ autogen: _foreach_tanh.out
-- func: _foreach_sin(Tensor[] tensors) -> Tensor[]
+- func: _foreach_sin(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sin_slow
CUDA: foreach_tensor_sin_cuda
@@ -8939,13 +9581,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sin_slow_
CUDA: foreach_tensor_sin_cuda_
- autogen: _foreach_sin.functional, _foreach_sin.out
+ autogen: _foreach_sin.out
-- func: _foreach_sinh(Tensor[] tensors) -> Tensor[]
+- func: _foreach_sinh(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sinh_slow
CUDA: foreach_tensor_sinh_cuda
@@ -8954,13 +9596,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sinh_slow_
CUDA: foreach_tensor_sinh_cuda_
- autogen: _foreach_sinh.functional, _foreach_sinh.out
+ autogen: _foreach_sinh.out
-- func: _foreach_round(Tensor[] tensors) -> Tensor[]
+- func: _foreach_round(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_round_slow
CUDA: foreach_tensor_round_cuda
@@ -8969,13 +9611,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_round_slow_
CUDA: foreach_tensor_round_cuda_
- autogen: _foreach_round.functional, _foreach_round.out
+ autogen: _foreach_round.out
-- func: _foreach_lgamma(Tensor[] tensors) -> Tensor[]
+- func: _foreach_lgamma(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_lgamma_slow
CUDA: foreach_tensor_lgamma_cuda
@@ -8984,13 +9626,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_lgamma_slow_
CUDA: foreach_tensor_lgamma_cuda_
- autogen: _foreach_lgamma.functional, _foreach_lgamma.out
+ autogen: _foreach_lgamma.out
-- func: _foreach_frac(Tensor[] tensors) -> Tensor[]
+- func: _foreach_frac(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_frac_slow
CUDA: foreach_tensor_frac_cuda
@@ -8999,13 +9641,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_frac_slow_
CUDA: foreach_tensor_frac_cuda_
- autogen: _foreach_frac.functional, _foreach_frac.out
+ autogen: _foreach_frac.out
-- func: _foreach_reciprocal(Tensor[] tensors) -> Tensor[]
+- func: _foreach_reciprocal(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_reciprocal_slow
CUDA: foreach_tensor_reciprocal_cuda
@@ -9014,13 +9656,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_reciprocal_slow_
CUDA: foreach_tensor_reciprocal_cuda_
- autogen: _foreach_reciprocal.functional, _foreach_reciprocal.out
+ autogen: _foreach_reciprocal.out
-- func: _foreach_sigmoid(Tensor[] tensors) -> Tensor[]
+- func: _foreach_sigmoid(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sigmoid_slow
CUDA: foreach_tensor_sigmoid_cuda
@@ -9029,13 +9671,13 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sigmoid_slow_
CUDA: foreach_tensor_sigmoid_cuda_
- autogen: _foreach_sigmoid.functional, _foreach_sigmoid.out
+ autogen: _foreach_sigmoid.out
-- func: _foreach_trunc(Tensor[] tensors) -> Tensor[]
+- func: _foreach_trunc(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_trunc_slow
CUDA: foreach_tensor_trunc_cuda
@@ -9044,92 +9686,109 @@
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_trunc_slow_
CUDA: foreach_tensor_trunc_cuda_
- autogen: _foreach_trunc.functional, _foreach_trunc.out
+ autogen: _foreach_trunc.out
- func: _foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcdiv_scalar_slow_
CUDA: foreach_tensor_addcdiv_scalar_cuda_
- autogen: _foreach_addcdiv.Scalar_functional, _foreach_addcdiv.Scalar_out
+ autogen: _foreach_addcdiv.Scalar_out
- func: _foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcmul_scalar_slow_
CUDA: foreach_tensor_addcmul_scalar_cuda_
- autogen: _foreach_addcmul.Scalar_functional, _foreach_addcmul.Scalar_out
+ autogen: _foreach_addcmul.Scalar_out
- func: _foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcdiv_scalarlist_slow_
CUDA: foreach_tensor_addcdiv_scalarlist_cuda_
- autogen: _foreach_addcdiv.ScalarList_functional, _foreach_addcdiv.ScalarList_out
+ autogen: _foreach_addcdiv.ScalarList_out
- func: _foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcmul_scalarlist_slow_
CUDA: foreach_tensor_addcmul_scalarlist_cuda_
- autogen: _foreach_addcmul.ScalarList_functional, _foreach_addcmul.ScalarList_out
+ autogen: _foreach_addcmul.ScalarList_out
-- func: _foreach_addcdiv.Scalar(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
+- func: _foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcdiv_scalar_slow
CUDA: foreach_tensor_addcdiv_scalar_cuda
-- func: _foreach_addcmul.Scalar(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
+- func: _foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcmul_scalar_slow
CUDA: foreach_tensor_addcmul_scalar_cuda
-- func: _foreach_addcdiv.ScalarList(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
+- func: _foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcdiv_scalarlist_slow
CUDA: foreach_tensor_addcdiv_scalarlist_cuda
-- func: _foreach_addcmul.ScalarList(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
+- func: _foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcmul_scalarlist_slow
CUDA: foreach_tensor_addcmul_scalarlist_cuda
-- func: _foreach_maximum.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[]
+- func: _foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_maximum_slow
CUDA: foreach_tensor_maximum_cuda
-- func: _foreach_minimum.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[]
+- func: _foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
+ CPU: foreach_tensor_maximum_slow_
+ CUDA: foreach_tensor_maximum_cuda_
+ autogen: _foreach_maximum.List_out
+
+- func: _foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
CPU: foreach_tensor_minimum_slow
CUDA: foreach_tensor_minimum_cuda
-- func: _foreach_norm.Scalar(Tensor[] tensors, Scalar ord=2) -> Tensor[]
+- func: _foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
+ CPU: foreach_tensor_minimum_slow_
+ CUDA: foreach_tensor_minimum_cuda_
+ autogen: _foreach_minimum.List_out
+
+- func: _foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[]
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
+ variants: function
+ dispatch:
CPU: foreach_tensor_norm_slow
CUDA: foreach_tensor_norm_cuda
+ autogen: _foreach_norm.Scalar_out
- func: bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
dispatch:
CPU: bucketize_cpu
CUDA: bucketize_cuda
@@ -9141,10 +9800,11 @@
- func: bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
dispatch:
CPU: bucketize_cpu
CUDA: bucketize_cuda
+ autogen: bucketize.Scalar_out
- func: searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
dispatch:
CPU: searchsorted_cpu
CUDA: searchsorted_cuda
@@ -9156,20 +9816,22 @@
# described as the solution to this issue: https://github.com/pytorch/pytorch/issues/31611
# This op should NOT be used or exposed or edited or else Windows builds (with BUILD_SPLIT_CUDA) will break.
- func: _torch_cuda_cu_linker_symbol_op(Tensor self) -> Tensor
dispatch:
CUDA: _torch_cuda_cu_linker_symbol_op_cuda
+ autogen: _torch_cuda_cu_linker_symbol_op.out
- func: searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: searchsorted_out_cpu
CUDA: searchsorted_out_cuda
- func: searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
dispatch:
CPU: searchsorted_cpu
CUDA: searchsorted_cuda
+ autogen: searchsorted.Scalar_out
- func: _convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor
structured_delegate: _convert_indices_from_coo_to_csr.out
- func: _convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!)
@@ -9213,30 +9875,13 @@
python_module: nn
dispatch:
CPU, CUDA: mse_loss_backward
MPS: mse_loss_backward_mps
-- func: l1_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: l1_loss_out
-
- func: l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
python_module: nn
- dispatch:
- CompositeExplicitAutograd: l1_loss
-- func: l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU, CUDA: l1_loss_backward_out
-
-- func: l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: l1_loss_backward
-
- func: multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU: multi_margin_loss_cpu_out
CUDA: multi_margin_loss_cuda_out
@@ -9384,20 +10029,23 @@
- func: huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU, CUDA: huber_loss_out
+ MPS: huber_loss_out_mps
- func: huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor
python_module: nn
dispatch:
CPU, CUDA: huber_loss
+ MPS: huber_loss_mps
- func: huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU, CUDA: huber_loss_backward_out
+ MPS: huber_loss_backward_out_mps
- func: huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: huber_loss_backward
@@ -9450,19 +10098,18 @@
- func: elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)
structured_delegate: elu.out
device_check: NoCheck # TensorIterator
python_module: nn
- dispatch:
- CompositeExplicitAutograd: elu_
- func: glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
dispatch:
CPU, CUDA: glu_out
+ MPS: glu_out_mps
- func: glu(Tensor self, int dim=-1) -> Tensor
structured_delegate: glu.out
device_check: NoCheck # TensorIterator
python_module: nn
@@ -9470,26 +10117,30 @@
- func: glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: glu_backward_cpu_out
CUDA: glu_backward_cuda_out
+ MPS: glu_backward_mps_out
- func: glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor
python_module: nn
dispatch:
CPU: glu_backward_cpu
CUDA: glu_backward_cuda
+ MPS: glu_backward_mps
- func: glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor
python_module: nn
dispatch:
CPU, CUDA: glu_jvp
+ autogen: glu_jvp.out
- func: glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor
python_module: nn
dispatch:
CPU, CUDA: glu_backward_jvp
+ autogen: glu_backward_jvp.out
- func: hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
@@ -9574,10 +10225,11 @@
- func: hardswish_backward(Tensor grad_output, Tensor self) -> Tensor
python_module: nn
dispatch:
CPU, CUDA: hardswish_backward
+ autogen: hardswish_backward.out
- func: leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
@@ -9647,27 +10299,31 @@
CPU: log_sigmoid_backward_cpu
CUDA: log_sigmoid_backward_cuda
- func: rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
+ tags: nondeterministic_seeded
dispatch:
CPU: rrelu_with_noise_out_cpu
CUDA: rrelu_with_noise_out_cuda
- func: rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
python_module: nn
dispatch:
CPU: rrelu_with_noise_cpu
CUDA: rrelu_with_noise_cuda
+ tags: nondeterministic_seeded
- func: rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: rrelu_with_noise_backward
+ autogen: rrelu_with_noise_backward.out
- func: rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
python_module: nn
+ tags: nondeterministic_seeded
dispatch:
CPU: rrelu_with_noise_cpu_
CUDA: rrelu_with_noise_cuda_
- func: softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)
@@ -9675,10 +10331,11 @@
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU, CUDA: softplus_out
+ MPS: softplus_out_mps
- func: softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor
structured_delegate: softplus.out
device_check: NoCheck # TensorIterator
python_module: nn
@@ -9687,10 +10344,11 @@
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
dispatch:
CPU, CUDA: softplus_backward_out
+ MPS: softplus_backward_out_mps
- func: softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor
structured_delegate: softplus_backward.grad_input
python_module: nn
@@ -9716,43 +10374,52 @@
- func: softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor
structured_delegate: softshrink_backward.grad_input
python_module: nn
-- func: adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
+- func: adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU: adaptive_avg_pool2d_out_cpu
CUDA: adaptive_avg_pool2d_out_cuda
MPS: adaptive_avg_pool2d_out_mps
- MkldnnCPU: mkldnn_adaptive_avg_pool2d_out
+ MkldnnCPU: mkldnn_adaptive_avg_pool2d_out_stub
-- func: adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
+- func: adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
python_module: nn
+ dispatch:
+ CompositeImplicitAutograd: adaptive_avg_pool2d_symint
- func: mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
dispatch:
MkldnnCPU: mkldnn_adaptive_avg_pool2d
+- func: mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ MkldnnCPU: mkldnn_adaptive_avg_pool2d_out
+
- func: mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
dispatch:
MkldnnCPU: mkldnn_adaptive_avg_pool2d_backward
+ autogen: mkldnn_adaptive_avg_pool2d_backward.out
-- func: _adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
+- func: _adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
dispatch:
CPU: adaptive_avg_pool2d_cpu
CUDA: adaptive_avg_pool2d_cuda
MPS: adaptive_avg_pool2d_mps
QuantizedCPU: adaptive_avg_pool2d_quantized_cpu
QuantizedCUDA: adaptive_avg_pool2d_quantized_cuda
+ autogen: _adaptive_avg_pool2d.out
- func: _adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
python_module: nn
dispatch:
CPU: adaptive_avg_pool2d_backward_cpu
CUDA: adaptive_avg_pool2d_backward_cuda
MPS: adaptive_avg_pool2d_backward_mps
+ autogen: _adaptive_avg_pool2d_backward.out
- func: adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU: adaptive_avg_pool3d_out_cpu
@@ -9765,10 +10432,11 @@
- func: _adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor
dispatch:
CPU: adaptive_avg_pool3d_cpu
CUDA: adaptive_avg_pool3d_cuda
QuantizedCPU: adaptive_avg_pool3d_quantized_cpu
+ autogen: _adaptive_avg_pool3d.out
- func: adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: adaptive_avg_pool3d_backward_out_cpu
@@ -9777,10 +10445,11 @@
- func: _adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor
python_module: nn
dispatch:
CPU: adaptive_avg_pool3d_backward_cpu
CUDA: adaptive_avg_pool3d_backward_cuda
+ autogen: _adaptive_avg_pool3d_backward.out
# Return: (Tensor output, Tensor indices)
- func: adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
python_module: nn
structured: True
@@ -10184,414 +10853,439 @@
python_module: nn
- func: pad(Tensor self, int[] pad, str mode="constant", float? value=None) -> Tensor
python_module: nn
-- func: upsample_linear1d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
+- func: upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: upsample_linear1d
+ autogen: upsample_linear1d.vec_out
-- func: upsample_linear1d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
+- func: upsample_linear1d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: upsample_linear1d_backward
+ autogen: upsample_linear1d_backward.vec_out
-- func: upsample_bilinear2d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
+- func: upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: upsample_bilinear2d
+ autogen: upsample_bilinear2d.vec_out
-- func: upsample_bilinear2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
+- func: upsample_bilinear2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: upsample_bilinear2d_backward
+ autogen: upsample_bilinear2d_backward.vec_out
-- func: _upsample_bilinear2d_aa.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
+- func: _upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: _upsample_bilinear2d_aa
+ autogen: _upsample_bilinear2d_aa.vec_out
-- func: _upsample_bilinear2d_aa_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
+- func: _upsample_bilinear2d_aa_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: _upsample_bilinear2d_aa_backward
+ autogen: _upsample_bilinear2d_aa_backward.vec_out
-- func: upsample_trilinear3d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
+- func: upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: upsample_trilinear3d
+ autogen: upsample_trilinear3d.vec_out
-- func: upsample_trilinear3d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
+- func: upsample_trilinear3d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: upsample_trilinear3d_backward
+ autogen: upsample_trilinear3d_backward.vec_out
-- func: upsample_bicubic2d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
+- func: upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: upsample_bicubic2d
+ autogen: upsample_bicubic2d.vec_out
-- func: upsample_bicubic2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
+- func: upsample_bicubic2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: upsample_bicubic2d_backward
+ autogen: upsample_bicubic2d_backward.vec_out
-- func: _upsample_bicubic2d_aa.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
+- func: _upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: _upsample_bicubic2d_aa
+ autogen: _upsample_bicubic2d_aa.vec_out
-- func: _upsample_bicubic2d_aa_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
+- func: _upsample_bicubic2d_aa_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: _upsample_bicubic2d_aa_backward
+ autogen: _upsample_bicubic2d_aa_backward.vec_out
-- func: upsample_nearest1d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor
+- func: upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: upsample_nearest1d
+ autogen: upsample_nearest1d.vec_out
-- func: _upsample_nearest_exact1d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor
+- func: _upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: _upsample_nearest_exact1d
+ autogen: _upsample_nearest_exact1d.vec_out
-- func: upsample_nearest1d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor
+- func: upsample_nearest1d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: upsample_nearest1d_backward
+ autogen: upsample_nearest1d_backward.vec_out
-- func: _upsample_nearest_exact1d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor
+- func: _upsample_nearest_exact1d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: _upsample_nearest_exact1d_backward
+ autogen: _upsample_nearest_exact1d_backward.vec_out
-- func: upsample_nearest2d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor
+- func: upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: upsample_nearest2d
+ autogen: upsample_nearest2d.vec_out
-- func: _upsample_nearest_exact2d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor
+- func: _upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: _upsample_nearest_exact2d
+ autogen: _upsample_nearest_exact2d.vec_out
-- func: upsample_nearest2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor
+- func: upsample_nearest2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: upsample_nearest2d_backward
+ autogen: upsample_nearest2d_backward.vec_out
-- func: _upsample_nearest_exact2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor
+- func: _upsample_nearest_exact2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: _upsample_nearest_exact2d_backward
+ autogen: _upsample_nearest_exact2d_backward.vec_out
-- func: upsample_nearest3d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor
+- func: upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CPU: upsample_nearest3d_cpu
CUDA: upsample_nearest3d_cuda
QuantizedCPU: upsample_nearest3d_quantized_cpu
+ autogen: upsample_nearest3d.vec_out
-- func: _upsample_nearest_exact3d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor
+- func: _upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CPU: _upsample_nearest_exact3d_cpu
CUDA: _upsample_nearest_exact3d_cuda
QuantizedCPU: _upsample_nearest_exact3d_quantized_cpu
+ autogen: _upsample_nearest_exact3d.vec_out
-- func: upsample_nearest3d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor
+- func: upsample_nearest3d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CPU: upsample_nearest3d_backward_cpu
CUDA: upsample_nearest3d_backward_cuda
+ autogen: upsample_nearest3d_backward.vec_out
-- func: _upsample_nearest_exact3d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor
+- func: _upsample_nearest_exact3d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor
python_module: nn
dispatch:
CPU: _upsample_nearest_exact3d_backward_cpu
CUDA: _upsample_nearest_exact3d_backward_cuda
+ autogen: _upsample_nearest_exact3d_backward.vec_out
# NOTE: all of the non-"vec" upsample overloads are only kept for backward compatibility.
-- func: upsample_linear1d.out(Tensor self, int[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
+- func: upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_linear1d_out_cpu
CUDA: upsample_linear1d_out_cuda
-- func: upsample_linear1d(Tensor self, int[1] output_size, bool align_corners, float? scales=None) -> Tensor
+- func: upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor
python_module: nn
structured_delegate: upsample_linear1d.out
-- func: upsample_linear1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_linear1d_backward_out_cpu
CUDA: upsample_linear1d_backward_out_cuda
-- func: upsample_linear1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners, float? scales=None) -> Tensor
+- func: upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor
python_module: nn
structured_delegate: upsample_linear1d_backward.grad_input
-- func: upsample_bilinear2d.out(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
+- func: upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_bilinear2d_out_cpu
CUDA: upsample_bilinear2d_out_cuda
MPS: upsample_bilinear2d_out_mps
-- func: upsample_bilinear2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_bilinear2d.out
dispatch:
QuantizedCPU: upsample_bilinear2d_quantized_cpu
-- func: upsample_bilinear2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_bilinear2d_backward_out_cpu
CUDA: upsample_bilinear2d_backward_out_cuda
MPS: upsample_bilinear2d_backward_out_mps
-- func: upsample_bilinear2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_bilinear2d_backward.grad_input
-- func: _upsample_bilinear2d_aa.out(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
+- func: _upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_bilinear2d_aa_out_cpu
CUDA: _upsample_bilinear2d_aa_out_cuda
-- func: _upsample_bilinear2d_aa(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: _upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: _upsample_bilinear2d_aa.out
-- func: _upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: _upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_bilinear2d_aa_backward_out_cpu
CUDA: _upsample_bilinear2d_aa_backward_out_cuda
-- func: _upsample_bilinear2d_aa_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: _upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: _upsample_bilinear2d_aa_backward.grad_input
-- func: upsample_bicubic2d.out(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
+- func: upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_bicubic2d_out_cpu
CUDA: upsample_bicubic2d_out_cuda
-- func: upsample_bicubic2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_bicubic2d.out
-- func: upsample_bicubic2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_bicubic2d_backward_out_cpu
CUDA: upsample_bicubic2d_backward_out_cuda
-- func: upsample_bicubic2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_bicubic2d_backward.grad_input
-- func: _upsample_bicubic2d_aa.out(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
+- func: _upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_bicubic2d_aa_out_cpu
CUDA: _upsample_bicubic2d_aa_out_cuda
-- func: _upsample_bicubic2d_aa(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: _upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: _upsample_bicubic2d_aa.out
-- func: _upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: _upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_bicubic2d_aa_backward_out_cpu
CUDA: _upsample_bicubic2d_aa_backward_out_cuda
-- func: _upsample_bicubic2d_aa_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: _upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: _upsample_bicubic2d_aa_backward.grad_input
-- func: upsample_trilinear3d.out(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
+- func: upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_trilinear3d_out_cpu
CUDA: upsample_trilinear3d_out_cuda
-- func: upsample_trilinear3d(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_trilinear3d.out
-- func: upsample_trilinear3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_trilinear3d_backward_out_cpu
CUDA: upsample_trilinear3d_backward_out_cuda
-- func: upsample_trilinear3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_trilinear3d_backward.grad_input
-- func: upsample_nearest1d.out(Tensor self, int[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
+- func: upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_nearest1d_out_cpu
CUDA: upsample_nearest1d_out_cuda
+ MPS: upsample_nearest1d_out_mps
-- func: _upsample_nearest_exact1d.out(Tensor self, int[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
+- func: _upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_nearest_exact1d_out_cpu
CUDA: _upsample_nearest_exact1d_out_cuda
-- func: upsample_nearest1d(Tensor self, int[1] output_size, float? scales=None) -> Tensor
+- func: upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
python_module: nn
structured_delegate: upsample_nearest1d.out
-- func: _upsample_nearest_exact1d(Tensor self, int[1] output_size, float? scales=None) -> Tensor
+- func: _upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
python_module: nn
structured_delegate: _upsample_nearest_exact1d.out
-- func: upsample_nearest1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_nearest1d_backward_out_cpu
CUDA: upsample_nearest1d_backward_out_cuda
-- func: _upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: _upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_nearest_exact1d_backward_out_cpu
CUDA: _upsample_nearest_exact1d_backward_out_cuda
-- func: upsample_nearest1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, float? scales=None) -> Tensor
+- func: upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
python_module: nn
structured_delegate: upsample_nearest1d_backward.grad_input
-- func: _upsample_nearest_exact1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, float? scales=None) -> Tensor
+- func: _upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
python_module: nn
structured_delegate: _upsample_nearest_exact1d_backward.grad_input
-- func: upsample_nearest2d.out(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
+- func: upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_nearest2d_out_cpu
CUDA: upsample_nearest2d_out_cuda
MPS: upsample_nearest2d_out_mps
-- func: _upsample_nearest_exact2d.out(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
+- func: _upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_nearest_exact2d_out_cpu
CUDA: _upsample_nearest_exact2d_out_cuda
MPS: _upsample_nearest_exact2d_out_mps
-- func: upsample_nearest2d(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_nearest2d.out
dispatch:
QuantizedCPU: upsample_nearest2d_quantized_cpu
-- func: _upsample_nearest_exact2d(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: _upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: _upsample_nearest_exact2d.out
dispatch:
QuantizedCPU: _upsample_nearest_exact2d_quantized_cpu
-- func: upsample_nearest2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_nearest2d_backward_out_cpu
CUDA: upsample_nearest2d_backward_out_cuda
MPS: upsample_nearest2d_backward_out_mps
-- func: _upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: _upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_nearest_exact2d_backward_out_cpu
CUDA: _upsample_nearest_exact2d_backward_out_cuda
MPS: _upsample_nearest_exact2d_backward_out_mps
-- func: upsample_nearest2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_nearest2d_backward.grad_input
-- func: _upsample_nearest_exact2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: _upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: _upsample_nearest_exact2d_backward.grad_input
-- func: upsample_nearest3d.out(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
+- func: upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_nearest3d_out_cpu
CUDA: upsample_nearest3d_out_cuda
-- func: _upsample_nearest_exact3d.out(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
+- func: _upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_nearest_exact3d_out_cpu
CUDA: _upsample_nearest_exact3d_out_cuda
-- func: upsample_nearest3d(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_nearest3d.out
dispatch:
QuantizedCPU: upsample_nearest3d_quantized_cpu
-- func: _upsample_nearest_exact3d(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: _upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: _upsample_nearest_exact3d.out
dispatch:
QuantizedCPU: _upsample_nearest_exact3d_quantized_cpu
-- func: upsample_nearest3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_nearest3d_backward_out_cpu
CUDA: upsample_nearest3d_backward_out_cuda
-- func: _upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: _upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_nearest_exact3d_backward_out_cpu
CUDA: _upsample_nearest_exact3d_backward_out_cuda
-- func: upsample_nearest3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_nearest3d_backward.grad_input
-- func: _upsample_nearest_exact3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
+- func: _upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: _upsample_nearest_exact3d_backward.grad_input
- func: sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
@@ -10696,10 +11390,11 @@
- func: _slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
python_module: nn
dispatch:
CPU: slow_conv2d_backward_cpu
CUDA: slow_conv2d_backward_cuda
+ autogen: _slow_conv2d_backward.output_mask_out
- func: _conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
python_module: nn
dispatch:
@@ -10712,10 +11407,11 @@
- func: conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, int[3] dilation) -> Tensor
python_module: nn
dispatch:
CUDA: conv_depthwise3d_cuda
+ autogen: conv_depthwise3d.out
- func: slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
- func: slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0) -> Tensor
@@ -10734,41 +11430,31 @@
- func: slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor
python_module: nn
dispatch:
CPU: slow_conv_dilated2d_cpu
CUDA: slow_conv_dilated2d_cuda
+ autogen: slow_conv_dilated2d.out
- func: slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1) -> Tensor
python_module: nn
dispatch:
CPU: slow_conv_dilated3d_cpu
CUDA: slow_conv_dilated3d_cuda
+ autogen: slow_conv_dilated3d.out
-- func: col2im.out(Tensor self, int[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
+- func: col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU: col2im_out_cpu
CUDA: col2im_out_cuda
-- func: col2im(Tensor self, int[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
+- func: col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
python_module: nn
dispatch:
CPU: col2im_cpu
CUDA: col2im_cuda
-- func: col2im_backward.grad_input(Tensor grad_output, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: col2im_backward_out_cpu
- CUDA: col2im_backward_out_cuda
-
-- func: col2im_backward(Tensor grad_output, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
- python_module: nn
- dispatch:
- CPU: col2im_backward_cpu
- CUDA: col2im_backward_cuda
-
- func: column_stack(Tensor[] tensors) -> Tensor
- func: column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
- func: im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
@@ -10781,22 +11467,10 @@
python_module: nn
dispatch:
CPU: im2col_cpu
CUDA: im2col_cuda
-- func: im2col_backward.grad_input(Tensor grad_output, int[2] input_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: im2col_backward_out_cpu
- CUDA: im2col_backward_out_cuda
-
-- func: im2col_backward(Tensor grad_output, int[2] input_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
- python_module: nn
- dispatch:
- CPU: im2col_backward_cpu
- CUDA: im2col_backward_cuda
-
- func: isfinite(Tensor self) -> Tensor
variants: function, method
device_check: NoCheck
device_guard: False
@@ -10805,11 +11479,13 @@
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: isinf
SparseCPU, SparseCUDA: isinf_sparse
+ SparseMeta: isinf_sparse_meta
SparseCsrCPU, SparseCsrCUDA: isinf_sparse_csr
+ autogen: isinf.out
- func: record_stream(Tensor(a!) self, Stream s) -> ()
variants: method
dispatch:
CUDA: record_stream_cuda
@@ -11059,12 +11735,10 @@
- func: special_zeta(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
structured_delegate: special_zeta.out
- dispatch:
- CompositeExplicitAutograd: special_zeta
- func: special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
@@ -11394,22 +12068,30 @@
variants: function
- func: fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
python_module: fft
variants: function
+ dispatch:
+ CompositeExplicitAutograd: fft_fftfreq
- func: fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
+ dispatch:
+ CompositeExplicitAutograd: fft_fftfreq_out
- func: fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
python_module: fft
variants: function
+ dispatch:
+ CompositeExplicitAutograd: fft_rfftfreq
- func: fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
+ dispatch:
+ CompositeExplicitAutograd: fft_rfftfreq_out
- func: fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor
python_module: fft
variants: function
@@ -11427,27 +12109,23 @@
# See linalg_det as an example.
# "_ex" stands for experimental
- func: linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)
python_module: linalg
- variants: function
- dispatch:
- CPU, CUDA: linalg_cholesky_ex
+ structured_delegate: linalg_cholesky_ex.L
- func: linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)
python_module: linalg
- variants: function
+ structured: True
dispatch:
CPU, CUDA: linalg_cholesky_ex_out
- func: linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor
python_module: linalg
- variants: function
- func: linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
- variants: function
- func: linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor
python_module: linalg
variants: function
structured_delegate: linalg_cross.out
@@ -11455,12 +12133,10 @@
ZeroTensor: linalg_cross_zerotensor
- func: linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
structured: True
- precomputed:
- - dim -> int dim
dispatch:
CPU, CUDA: linalg_cross_out
# linalg.lu_factor
- func: linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)
@@ -11494,32 +12170,43 @@
variants: function
structured: True
dispatch:
CPU, CUDA: linalg_lu_out
-# linalg.det
-- func: linalg_det(Tensor self) -> Tensor
+# linalg.lu_solve
+- func: linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor
python_module: linalg
+ structured_delegate: linalg_lu_solve.out
variants: function
-- func: linalg_det.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+- func: linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
+ variants: function
+ structured: True
+ dispatch:
+ CPU, CUDA: linalg_lu_solve_out
-# torch.det, alias for torch.linalg.det
-- func: det(Tensor self) -> Tensor
- variants: function, method
+# linalg.det
+- func: _linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots)
+ structured_delegate: _linalg_det.result
-- func: _det_lu_based_helper(Tensor self) -> (Tensor det, Tensor lu, Tensor pivs)
- variants: function
+- func: _linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots)
+ structured: True
dispatch:
- CPU, CUDA: _det_lu_based_helper
+ CPU, CUDA: _linalg_det_out
-- func: _det_lu_based_helper_backward_helper(Tensor det_grad, Tensor det, Tensor self, Tensor lu, Tensor pivs) -> Tensor
+- func: linalg_det(Tensor A) -> Tensor
+ python_module: linalg
variants: function
- dispatch:
- CPU, CUDA: _det_lu_based_helper_backward_helper
+- func: linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
+ python_module: linalg
+
+# torch.det, alias for torch.linalg.det
+- func: det(Tensor self) -> Tensor
+ variants: function, method
+
- func: linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)
structured_delegate: linalg_ldl_factor_ex.out
python_module: linalg
variants: function
@@ -11553,42 +12240,64 @@
- func: linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)
python_module: linalg
variants: function
dispatch:
CompositeExplicitAutograd: linalg_lstsq
+ tags: dynamic_output_shape
- func: linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)
python_module: linalg
variants: function
dispatch:
CPU, CUDA: linalg_lstsq_out
+ tags: dynamic_output_shape
# torch.linalg.matmul, alias for torch.matmul
- func: linalg_matmul(Tensor self, Tensor other) -> Tensor
python_module: linalg
variants: function
- func: linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
+- func: linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor
+ python_module: linalg
+ variants: function
+
+- func: linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
+ python_module: linalg
+
- func: linalg_matrix_exp(Tensor self) -> Tensor
python_module: linalg
variants: function
dispatch:
CPU, CUDA: linalg_matrix_exp
+ autogen: linalg_matrix_exp.out
-- func: linalg_slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
- python_module: linalg
- variants: function
+- func: _linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots)
+ structured_delegate: _linalg_slogdet.sign
+
+- func: _linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots)
+ structured: True
dispatch:
- CPU, CUDA: linalg_slogdet
+ CPU, CUDA: _linalg_slogdet_out
-- func: linalg_slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
+- func: linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)
python_module: linalg
- dispatch:
- CPU, CUDA: linalg_slogdet_out
+- func: linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
+ python_module: linalg
+
+- func: slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
+ variants: function, method
+
+- func: slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
+ variants: function
+
+- func: logdet(Tensor self) -> Tensor
+ variants: function, method
+
- func: linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)
python_module: linalg
variants: function
dispatch:
CPU, CUDA: linalg_eig
@@ -11602,29 +12311,31 @@
python_module: linalg
- func: linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
+# This function is exposes the `compute_v` flag, which is then used to implement `linalg.eigh` and
+# `linalg.eigvalsh` as composite functions that call this one
+- func: _linalg_eigh(Tensor A, str UPLO="L", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors)
+ structured_delegate: _linalg_eigh.eigenvalues
+
+- func: _linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
+ structured: True
+ dispatch:
+ CPU, CUDA: _linalg_eigh_out
+
- func: linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors)
python_module: linalg
- variants: function
- dispatch:
- CPU, CUDA: linalg_eigh
- func: linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
python_module: linalg
- dispatch:
- CPU, CUDA: linalg_eigh_out
- func: linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor
python_module: linalg
- variants: function
- func: linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
- dispatch:
- CPU, CUDA: linalg_eigvalsh_out
- func: linalg_householder_product(Tensor input, Tensor tau) -> Tensor
python_module: linalg
variants: function
dispatch:
@@ -11633,37 +12344,31 @@
- func: linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
dispatch:
CPU, CUDA: linalg_householder_product_out
-- func: _linalg_inv_out_helper_(Tensor(a!) self, Tensor(b!) infos_lu, Tensor(c!) infos_getri) -> Tensor(a!)
- variants: function
- dispatch:
- CPU: _linalg_inv_out_helper_cpu
- CUDA: _linalg_inv_out_helper_cuda
- autogen: _linalg_inv_out_helper.functional, _linalg_inv_out_helper.out
-
-- func: linalg_inv_ex(Tensor self, *, bool check_errors=False) -> (Tensor inverse, Tensor info)
+- func: linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)
python_module: linalg
- variants: function
- dispatch:
- CompositeExplicitAutograd: linalg_inv_ex
+ structured_delegate: linalg_inv_ex.inverse
-- func: linalg_inv_ex.inverse(Tensor self, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)
+- func: linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)
python_module: linalg
- variants: function
+ structured: True
dispatch:
- CompositeExplicitAutograd: linalg_inv_ex_out
+ CPU, CUDA: linalg_inv_ex_out
-- func: linalg_inv(Tensor self) -> Tensor
+- func: linalg_inv(Tensor A) -> Tensor
python_module: linalg
- variants: function
-- func: linalg_inv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+- func: linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
- variants: function
+- func: inverse(Tensor self) -> Tensor
+ variants: function, method
+
+- func: inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+
- func: inner(Tensor self, Tensor other) -> Tensor
variants: function, method
- func: inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
@@ -11717,32 +12422,32 @@
- func: linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
# This function is exposes the `compute_uv` flag, which is then used to implement `linalg.svd` and
# `linalg.svdvals` as composite functions that call this one
-- func: _linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor Vh)
+- func: _linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
variants: function
structured_delegate: _linalg_svd.U
-- func: _linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
+- func: _linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
structured: True
dispatch:
CPU, CUDA: _linalg_svd_out
-- func: linalg_svd(Tensor A, bool full_matrices=True) -> (Tensor U, Tensor S, Tensor Vh)
+- func: linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
python_module: linalg
variants: function
-- func: linalg_svd.U(Tensor A, bool full_matrices=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
+- func: linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
python_module: linalg
variants: function
-- func: linalg_svdvals(Tensor A) -> Tensor
+- func: linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor
python_module: linalg
variants: function
-- func: linalg_svdvals.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
+- func: linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
variants: function
- func: linalg_cond(Tensor self, Scalar? p=None) -> Tensor
python_module: linalg
@@ -11762,11 +12467,13 @@
- func: linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
python_module: linalg
variants: function
dispatch:
- CompositeExplicitAutograd: linalg_pinv
+ # calls svd, which calls mH() (view op)
+ # also calls narrow()
+ CompositeExplicitAutogradNonFunctional: linalg_pinv
- func: linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
variants: function
dispatch:
@@ -11796,21 +12503,30 @@
- func: linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
variants: function
-- func: linalg_solve(Tensor input, Tensor other) -> Tensor
- python_module: linalg
- variants: function
+- func: _linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info)
+ structured_delegate: _linalg_solve_ex.result
+
+- func: _linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info)
+ structured: True
dispatch:
- CPU, CUDA: linalg_solve
+ CPU, CUDA: _linalg_solve_ex_out
-- func: linalg_solve.out(Tensor input, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
+- func: linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)
python_module: linalg
- dispatch:
- CPU, CUDA: linalg_solve_out
+- func: linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)
+ python_module: linalg
+
+- func: linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor
+ python_module: linalg
+
+- func: linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)
+ python_module: linalg
+
- func: linalg_tensorinv(Tensor self, int ind=2) -> Tensor
python_module: linalg
variants: function
- func: linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)
@@ -11826,25 +12542,18 @@
variants: function
- func: linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)
python_module: linalg
variants: function
- dispatch:
- CompositeExplicitAutograd: linalg_qr
+ structured_delegate: linalg_qr.out
- func: linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
python_module: linalg
- variants: function
+ structured: True
dispatch:
- CompositeExplicitAutograd: linalg_qr_out
+ CPU, CUDA: linalg_qr_out
-- func: _linalg_qr_helper(Tensor self, str mode) -> (Tensor, Tensor)
- variants: function
- dispatch:
- CPU: _linalg_qr_helper_default
- CUDA: _linalg_qr_helper_cuda
-
- func: linalg_matrix_power(Tensor self, int n) -> Tensor
python_module: linalg
- func: linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
@@ -11887,31 +12596,45 @@
python_module: linalg
- func: linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
+## Functions related to the `torch.nested` namespace
+# Note [nested namespace binding]
+# Functions in the nested python module should have their names start with
+# "nested_" underscore and be bound to the desired Python name in
+# torch/nested/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/nested.h.
+# The "nested_" names should be hidden from the user and not documented.
+
+- func: nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor
+ python_module: nested
+ variants: function
+
## Functions that are only for testing
# It is undocumented and should not be used outside of tests.
- func: _test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor
# Note: this function is only for testing.
- func: _test_optional_intlist(Tensor values, int[]? addends) -> Tensor
python_module: nn
dispatch:
CPU: _test_optional_intlist
+ autogen: _test_optional_intlist.out
# Note: this function is only for testing.
- func: _test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor
python_module: nn
dispatch:
CPU: _test_optional_intlist
+ autogen: _test_optional_filled_intlist.out
# Note: this function is only for testing.
- func: _test_optional_floatlist(Tensor values, float[]? addends) -> Tensor
python_module: nn
dispatch:
CPU: _test_optional_floatlist
+ autogen: _test_optional_floatlist.out
# Note: this function is only for testing.
- func: _test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor
python_module: nn
@@ -11927,20 +12650,49 @@
# Note: this function is only for testing.
- func: _test_warn_in_autograd(Tensor self) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: _test_warn_in_autograd
+ autogen: _test_warn_in_autograd.out
-- func: segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor
+# Note: this function is only for testing.
+- func: _test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor
+ dispatch:
+ # the NestedTensor keys are necessary because NestedTensor has been removed
+ # from the CompositeExplicitAutograd keyset see Note [NestedTensor Not Included in Backend Keys]
+ CompositeExplicitAutograd, NestedTensorCPU, NestedTensorCUDA: _test_autograd_multiple_dispatch_fullcoverage
+ autogen: _test_autograd_multiple_dispatch.fullcoverage_out
+
+# Note: this function is only for testing.
+- func: _test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor
+ dispatch:
+ CompositeImplicitAutograd, NestedTensorCPU, NestedTensorCUDA: _test_autograd_multiple_dispatch_ntonly
+
+# Note: this function is only for testing.
+- func: _test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a)
+ dispatch:
+ CompositeExplicitAutograd: _test_autograd_multiple_dispatch_view
+
+# Note: this function is only for testing.
+- func: _test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor
variants: function
dispatch:
+ CompositeExplicitAutogradNonFunctional: _test_autograd_multiple_dispatch_view_copy
+ tags: view_copy
+ autogen: _test_autograd_multiple_dispatch_view_copy.out
+
+- func: segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor
+ variants: function
+ dispatch:
CPU, CUDA: segment_reduce_kernel
+ autogen: segment_reduce.out
-- func: _segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, int axis=0) -> Tensor
+- func: _segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor
variants: function
dispatch:
CPU, CUDA: _segment_reduce_backward_kernel
+ autogen: _segment_reduce_backward.out
- func: pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor
python_module: nn
variants: function
@@ -11950,227 +12702,226 @@
- func: unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[]
variants: function
python_module: nn
-- func: nested_tensor(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+- func: _nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
variants: function
+ dispatch:
+ CompositeExplicitAutograd: _nested_tensor_from_tensor_list
+ autogen: _nested_tensor_from_tensor_list.out
- func: _fw_primal_copy(Tensor self, int level) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: _fw_primal_copy
+ CompositeExplicitAutogradNonFunctional: _fw_primal_copy
tags: view_copy
- func: _make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: _make_dual_copy
+ CompositeExplicitAutogradNonFunctional: _make_dual_copy
tags: view_copy
- func: view_as_real_copy(Tensor self) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: view_as_real_copy
+ CompositeExplicitAutogradNonFunctional: view_as_real_copy
tags: view_copy
- func: view_as_complex_copy(Tensor self) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: view_as_complex_copy
+ CompositeExplicitAutogradNonFunctional: view_as_complex_copy
tags: view_copy
- func: _conj_copy(Tensor self) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: _conj_copy
+ CompositeExplicitAutogradNonFunctional: _conj_copy
tags: view_copy
- func: _neg_view_copy(Tensor self) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: _neg_view_copy
+ CompositeExplicitAutogradNonFunctional: _neg_view_copy
tags: view_copy
-- func: as_strided_copy(Tensor self, int[] size, int[] stride, int? storage_offset=None) -> Tensor
+- func: as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: as_strided_copy
+ CompositeExplicitAutogradNonFunctional: as_strided_copy
tags: view_copy
- func: _sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: _sparse_broadcast_to_copy
+ CompositeExplicitAutogradNonFunctional: _sparse_broadcast_to_copy
tags: view_copy
- func: diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: diagonal_copy
+ CompositeExplicitAutogradNonFunctional: diagonal_copy
tags: view_copy
-- func: expand_copy(Tensor self, int[] size, *, bool implicit=False) -> Tensor
+- func: expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: expand_copy
+ CompositeExplicitAutogradNonFunctional: expand_copy
tags: view_copy
-- func: expand_copy.SymInt(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutograd: expand_copy_SymInt
- tags: view_copy
-
- func: permute_copy(Tensor self, int[] dims) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: permute_copy
+ CompositeExplicitAutogradNonFunctional: permute_copy
tags: view_copy
-- func: _reshape_alias_copy(Tensor self, int[] size, int[] stride) -> Tensor
+- func: _reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: _reshape_alias_copy
+ CompositeExplicitAutogradNonFunctional: _reshape_alias_copy
tags: view_copy
- func: select_copy.int(Tensor self, int dim, int index) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: select_copy_int
+ CompositeExplicitAutogradNonFunctional: select_copy_int
tags: view_copy
- func: detach_copy(Tensor self) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: detach_copy
+ CompositeExplicitAutogradNonFunctional: detach_copy
tags: view_copy
-- func: slice_copy.Tensor(Tensor self, int dim=0, int? start=None, int? end=None, int step=1) -> Tensor
+- func: slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: slice_copy_Tensor
+ CompositeExplicitAutogradNonFunctional: slice_copy_Tensor
tags: view_copy
- func: split_copy.Tensor(Tensor self, int split_size, int dim=0) -> Tensor[]
variants: function
dispatch:
- CompositeExplicitAutograd: split_copy_Tensor
+ CompositeExplicitAutogradNonFunctional: split_copy_Tensor
tags: view_copy
- func: split_with_sizes_copy(Tensor self, int[] split_sizes, int dim=0) -> Tensor[]
variants: function
dispatch:
- CompositeExplicitAutograd: split_with_sizes_copy
+ CompositeExplicitAutogradNonFunctional: split_with_sizes_copy
tags: view_copy
- func: squeeze_copy(Tensor self) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: squeeze_copy
+ CompositeExplicitAutogradNonFunctional: squeeze_copy
tags: view_copy
- func: squeeze_copy.dim(Tensor self, int dim) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: squeeze_copy_dim
+ CompositeExplicitAutogradNonFunctional: squeeze_copy_dim
tags: view_copy
- func: t_copy(Tensor self) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: t_copy
+ CompositeExplicitAutogradNonFunctional: t_copy
tags: view_copy
- func: transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: transpose_copy_int
+ CompositeExplicitAutogradNonFunctional: transpose_copy_int
tags: view_copy
- func: unsqueeze_copy(Tensor self, int dim) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: unsqueeze_copy
+ CompositeExplicitAutogradNonFunctional: unsqueeze_copy
tags: view_copy
- func: _indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: _indices_copy
+ CompositeExplicitAutogradNonFunctional: _indices_copy
tags: view_copy
- func: _values_copy(Tensor self) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: _values_copy
+ CompositeExplicitAutogradNonFunctional: _values_copy
tags: view_copy
- func: indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: indices_copy
+ CompositeExplicitAutogradNonFunctional: indices_copy
tags: view_copy
- func: values_copy(Tensor self) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: values_copy
+ CompositeExplicitAutogradNonFunctional: values_copy
tags: view_copy
- func: crow_indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: crow_indices_copy
+ CompositeExplicitAutogradNonFunctional: crow_indices_copy
tags: view_copy
- func: col_indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: col_indices_copy
+ CompositeExplicitAutogradNonFunctional: col_indices_copy
tags: view_copy
- func: ccol_indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: ccol_indices_copy
tags: view_copy
+ autogen: ccol_indices_copy.out
- func: row_indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: row_indices_copy
tags: view_copy
+ autogen: row_indices_copy.out
- func: unbind_copy.int(Tensor self, int dim=0) -> Tensor[]
variants: function
dispatch:
- CompositeExplicitAutograd: unbind_copy_int
+ CompositeExplicitAutogradNonFunctional: unbind_copy_int
tags: view_copy
-- func: view_copy(Tensor self, int[] size) -> Tensor
+- func: view_copy(Tensor self, SymInt[] size) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: view_copy
+ CompositeExplicitAutogradNonFunctional: view_copy_symint
tags: view_copy
- func: view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: view_copy_dtype
+ CompositeExplicitAutogradNonFunctional: view_copy_dtype
tags: view_copy
- func: unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: unfold_copy
+ CompositeExplicitAutogradNonFunctional: unfold_copy
tags: view_copy
- func: alias_copy(Tensor self) -> Tensor
variants: function
dispatch:
- CompositeExplicitAutograd: alias_copy
+ CompositeExplicitAutogradNonFunctional: alias_copy
tags: view_copy
- func: _fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
@@ -12205,14 +12956,14 @@
variants: function
dispatch:
CompositeExplicitAutograd: _neg_view_copy_out
-- func: as_strided_copy.out(Tensor self, int[] size, int[] stride, int? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
+- func: as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
- CompositeExplicitAutograd: as_strided_copy_out
+ CompositeExplicitAutograd: as_strided_copy_out_symint
- func: _sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
@@ -12223,29 +12974,23 @@
variants: function
dispatch:
CompositeExplicitAutograd: diagonal_copy_out
-- func: expand_copy.SymInt_out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)
+- func: expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
- CompositeExplicitAutograd: expand_copy_SymInt_out
+ CompositeExplicitAutograd: expand_copy_out_symint
-- func: expand_copy.out(Tensor self, int[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: expand_copy_out
-
-
- func: permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
CompositeExplicitAutograd: permute_copy_out
-- func: _reshape_alias_copy.out(Tensor self, int[] size, int[] stride, *, Tensor(a!) out) -> Tensor(a!)
+- func: _reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
CompositeExplicitAutograd: _reshape_alias_copy_out
@@ -12259,11 +13004,11 @@
variants: function
dispatch:
CompositeExplicitAutograd: detach_copy_out
-- func: slice_copy.Tensor_out(Tensor self, int dim=0, int? start=None, int? end=None, int step=1, *, Tensor(a!) out) -> Tensor(a!)
+- func: slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
CompositeExplicitAutograd: slice_copy_Tensor_out
@@ -12349,14 +13094,14 @@
variants: function
dispatch:
CompositeExplicitAutograd: unbind_copy_int_out
-- func: view_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!)
+- func: view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
- CompositeExplicitAutograd: view_copy_out
+ CompositeExplicitAutograd: view_copy_out_symint
- func: view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
@@ -12377,21 +13122,691 @@
- func: to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor
variants: method
dispatch:
NestedTensorCPU: NestedTensor_to_padded_tensor_generic
NestedTensorCUDA: NestedTensor_to_padded_tensor_cuda
+ autogen: to_padded_tensor.out
+- func: _nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor
+ dispatch:
+ NestedTensorCPU: NestedTensor_softmax_dropout
+ NestedTensorCUDA: NestedTensor_softmax_dropout_cuda
+
- func: _nested_tensor_layer_norm(Tensor self, Tensor? weight, Tensor? bias, float eps) -> Tensor
variants: method
dispatch:
NestedTensorCPU, NestedTensorCUDA: NestedTensor_layer_norm
+ autogen: _nested_tensor_layer_norm.out
# Apparently, putting "forward" in the name will cause Python bindings to be skipped, so "fwd" it is.
-- func: _transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None) -> Tensor
+- func: _transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor
variants: function
dispatch:
CPU, CUDA, NestedTensorCPU, NestedTensorCUDA: transformer_encoder_layer_forward
+ autogen: _transformer_encoder_layer_fwd.out
-- func: _native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True) -> (Tensor, Tensor)
+- func: _native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)
variants: function
dispatch:
CPU, CUDA, NestedTensorCPU, NestedTensorCUDA: native_multi_head_attention
+ autogen: _native_multi_head_attention.out
+
+- func: _scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)
+ python_module: nn
+ variants: function
+ autogen: _scaled_dot_product_attention.out
+
+# Register the math kernel for cpu
+- func: _scaled_dot_product_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)
+ variants: function
+ dispatch:
+ CUDA: _scaled_dot_product_attention_forward_cuda
+ CPU: _scaled_dot_product_attention_forward_math
+ NestedTensorCPU, NestedTensorCUDA: _scaled_dot_product_attention_forward_math
+ Meta: _scaled_dot_product_attention_forward_math
+
+- func: _scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)
+ variants: function
+
+- func: _triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor
+ variants: function
+ dispatch:
+ CUDA: triton_scaled_dot_attention
+ autogen: _triton_scaled_dot_attention.out
+
+- func: _triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor
+ variants: function
+ dispatch:
+ CUDA: triton_multi_head_attention
+ autogen: _triton_multi_head_attention.out
+
+- func: special_airy_ai(Tensor x) -> Tensor
+ python_module: special
+ structured_delegate: special_airy_ai.out
+ variants: function
+
+- func: special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CPU, CUDA: special_airy_ai_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: _flash_scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal) -> Tensor
+ variants: function
+ dispatch:
+ CUDA: flash_scaled_dot_product_attention
+
+- func: _transformer_decoder_only_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None) -> (Tensor, Tensor, Tensor)
+ variants: function
+ dispatch:
+ CPU, CUDA, NestedTensorCPU, NestedTensorCUDA: transformer_decoder_only_layer_forward
+ autogen: _transformer_decoder_only_layer_fwd.out
+
+- func: _native_decoder_only_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True) -> (Tensor, Tensor, Tensor, Tensor)
+ variants: function
+ dispatch:
+ CPU, CUDA, NestedTensorCPU, NestedTensorCUDA: native_decoder_only_multi_head_attention
+ autogen: _native_decoder_only_multi_head_attention.out
+
+- func: special_bessel_j0(Tensor self) -> Tensor
+ python_module: special
+ structured_delegate: special_bessel_j0.out
+ variants: function
+
+- func: special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CPU, CUDA: special_bessel_j0_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_bessel_j1(Tensor self) -> Tensor
+ python_module: special
+ structured_delegate: special_bessel_j1.out
+ variants: function
+
+- func: special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CPU, CUDA: special_bessel_j1_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_bessel_y0(Tensor self) -> Tensor
+ python_module: special
+ structured_delegate: special_bessel_y0.out
+ variants: function
+
+- func: special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CPU, CUDA: special_bessel_y0_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_bessel_y1(Tensor self) -> Tensor
+ python_module: special
+ structured_delegate: special_bessel_y1.out
+ variants: function
+
+- func: special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CPU, CUDA: special_bessel_y1_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ structured_delegate: special_chebyshev_polynomial_t.out
+ variants: function
+
+- func: special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ dispatch:
+ CPU, CUDA: special_chebyshev_polynomial_t_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: special_chebyshev_polynomial_t_out
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ structured_delegate: special_chebyshev_polynomial_u.out
+ variants: function
+
+- func: special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ dispatch:
+ CPU, CUDA: special_chebyshev_polynomial_u_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: special_chebyshev_polynomial_u_out
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ structured_delegate: special_chebyshev_polynomial_v.out
+ variants: function
+
+- func: special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ dispatch:
+ CPU, CUDA: special_chebyshev_polynomial_v_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: special_chebyshev_polynomial_v_out
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ structured_delegate: special_chebyshev_polynomial_w.out
+ variants: function
+
+- func: special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ dispatch:
+ CPU, CUDA: special_chebyshev_polynomial_w_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: special_chebyshev_polynomial_w_out
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ structured_delegate: special_hermite_polynomial_h.out
+ variants: function
+
+- func: special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ dispatch:
+ CPU, CUDA: special_hermite_polynomial_h_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: special_hermite_polynomial_h_out
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ structured_delegate: special_hermite_polynomial_he.out
+ variants: function
+
+- func: special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ dispatch:
+ CPU, CUDA: special_hermite_polynomial_he_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: special_hermite_polynomial_he_out
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ structured_delegate: special_laguerre_polynomial_l.out
+ variants: function
+
+- func: special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ dispatch:
+ CPU, CUDA: special_laguerre_polynomial_l_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: special_laguerre_polynomial_l_out
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ structured_delegate: special_legendre_polynomial_p.out
+ variants: function
+
+- func: special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ dispatch:
+ CPU, CUDA: special_legendre_polynomial_p_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: special_legendre_polynomial_p_out
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_modified_bessel_i0(Tensor self) -> Tensor
+ python_module: special
+ structured_delegate: special_modified_bessel_i0.out
+ variants: function
+
+- func: special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CPU, CUDA: special_modified_bessel_i0_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_modified_bessel_i1(Tensor self) -> Tensor
+ python_module: special
+ structured_delegate: special_modified_bessel_i1.out
+ variants: function
+
+- func: special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CPU, CUDA: special_modified_bessel_i1_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_modified_bessel_k0(Tensor self) -> Tensor
+ python_module: special
+ structured_delegate: special_modified_bessel_k0.out
+ variants: function
+
+- func: special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CPU, CUDA: special_modified_bessel_k0_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_modified_bessel_k1(Tensor self) -> Tensor
+ python_module: special
+ structured_delegate: special_modified_bessel_k1.out
+ variants: function
+
+- func: special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CPU, CUDA: special_modified_bessel_k1_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_scaled_modified_bessel_k0(Tensor x) -> Tensor
+ python_module: special
+ structured_delegate: special_scaled_modified_bessel_k0.out
+ variants: function
+
+- func: special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CPU, CUDA: special_scaled_modified_bessel_k0_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_scaled_modified_bessel_k1(Tensor x) -> Tensor
+ python_module: special
+ structured_delegate: special_scaled_modified_bessel_k1.out
+ variants: function
+
+- func: special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CPU, CUDA: special_scaled_modified_bessel_k1_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ structured_delegate: special_shifted_chebyshev_polynomial_t.out
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ dispatch:
+ CPU, CUDA: special_shifted_chebyshev_polynomial_t_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_t_out
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ structured_delegate: special_shifted_chebyshev_polynomial_u.out
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ dispatch:
+ CPU, CUDA: special_shifted_chebyshev_polynomial_u_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_u_out
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ structured_delegate: special_shifted_chebyshev_polynomial_v.out
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ dispatch:
+ CPU, CUDA: special_shifted_chebyshev_polynomial_v_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_v_out
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ structured_delegate: special_shifted_chebyshev_polynomial_w.out
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ dispatch:
+ CPU, CUDA: special_shifted_chebyshev_polynomial_w_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_w_out
+ device_check: NoCheck
+ python_module: special
+ variants: function
+
+- func: special_spherical_bessel_j0(Tensor x) -> Tensor
+ python_module: special
+ structured_delegate: special_spherical_bessel_j0.out
+ variants: function
+
+- func: special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
+ dispatch:
+ CPU, CUDA: special_spherical_bessel_j0_out
+ python_module: special
+ structured_inherits: TensorIteratorBase
+ structured: True
+ variants: function
+
+# Aux function used in the test TestPythonDispatch.test_kwarg_only_and_positional_default
+# within test/test_python_dispatch.py
+- func: _foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor
+ dispatch:
+ CPU: foobar
+ autogen: _foobar.out
+
+# Fused Optimizer CUDA kernels.
+- func: _fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
+ # Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now).
+ variants: function
+ dispatch:
+ CUDA: _fused_adam_kernel_cuda_
+ autogen: _fused_adam, _fused_adam.out