# See README.md in this directory for more guidance # *********NB: _cast_* operators are DEPRECATED and will be removed # eventually. These were previously used before TorchScript IR supported # representing ScalarType's. They are now superseded by usage of # `aten::to()`. The ops remain here for backward compatibility purposes. # DEPRECATED. DO NOT USE - func: _cast_Byte(Tensor self, bool non_blocking=False) -> Tensor variants: function # DEPRECATED. DO NOT USE - func: _cast_Char(Tensor self, bool non_blocking=False) -> Tensor variants: function # DEPRECATED. DO NOT USE - func: _cast_Double(Tensor self, bool non_blocking=False) -> Tensor variants: function # DEPRECATED. DO NOT USE - func: _cast_Float(Tensor self, bool non_blocking=False) -> Tensor variants: function # DEPRECATED. DO NOT USE - func: _cast_Int(Tensor self, bool non_blocking=False) -> Tensor variants: function # DEPRECATED. DO NOT USE - func: _cast_Long(Tensor self, bool non_blocking=False) -> Tensor variants: function # DEPRECATED. DO NOT USE - func: _cast_Short(Tensor self, bool non_blocking=False) -> Tensor variants: function # DEPRECATED. DO NOT USE - func: _cast_Half(Tensor self, bool non_blocking=False) -> Tensor variants: function # Computes the gradient of current tensor w.r.t. graph leaves. - func: _backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> () manual_cpp_binding: True variants: method # DEPRECATED. Sets the tensor data held by this `Variable` to be the same as # `new_data`. It requires that `new_data` and `Variable` have compatible tensor # type, by checking `_has_compatible_shallow_copy_type(this, new_data)`. # # This function is deprecated because it doesn't really make sense in a world # where Variables *are* Tensors (as opposed to them containing tensors, which # is what the previous interpretation was.) - func: set_data(Tensor(a!) self, Tensor new_data) -> () manual_cpp_binding: True variants: method - func: data(Tensor self) -> Tensor manual_cpp_binding: True variants: method # True if this `Variable` is a leaf and thus does not have a `grad_fn`. - func: is_leaf(Tensor self) -> bool manual_cpp_binding: True variants: method # Returns the output index of this variable from the forward operation that # produced it. Conversely, it returns the input index of the gradient `Node` to # which this `Variable` is connected (because in the gradient computation, # inputs and outputs switch meaning). For example: # # y0, y1, y2 = f(x) # assert y0.output_nr == 0 # assert y1.output_nr == 1 # assert y2.output_nr == 2 # - func: output_nr(Tensor self) -> int manual_cpp_binding: True variants: method - func: _version(Tensor self) -> int manual_cpp_binding: True variants: method - func: requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!) manual_cpp_binding: True variants: method # Enables .grad attribute for non-leaf Tensors. - func: retain_grad(Tensor(a!) self) -> () manual_cpp_binding: True variants: method - func: retains_grad(Tensor self) -> bool manual_cpp_binding: True variants: method - func: _fw_primal(Tensor(a) self, int level) -> Tensor(a) variants: method dispatch: CompositeExplicitAutograd: _fw_primal - func: _make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a) variants: function dispatch: CompositeExplicitAutograd: _make_dual - func: _unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent) variants: function # NOTE: [_new_zeros_with_same_feature_meta] # This function creates a new tensor with the layout and TensorOptions # of `other` but also takes into account the batch dimensions of `self` # # This function has a couple extra constraints because it is also used for `jvp` # in functorch. # - is used for forward AD because there is the restriction # that the primal and tangent must have the same layout # - We cannot assume that `self` and `other` have the same sizes or even dim # because in the inplace over view case, `other` is the base tensor, and # `self` is the forward grad with respect to the view, which can have an # entirely different shape # - takes the number of batch dims for `self` because we also handle # some batching logic. We handle that here instead of a batching rule because # we'd like to avoid calling as_strided in the batching rule (as to enable # nested vmap in functorch). # - needs to be CompositeExplicitAutograd for jvp support in functorch. # functorch currently relies on TensorWrapper which does not have storage # CompositeExplicitAutograd makes sure the TensorWrapper is unwrapped. # - this function may eventually take on another int argument to store the # the number of batch dims for other once we support that use case - func: _new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor variants: function dispatch: CompositeExplicitAutograd: _new_zeros_with_same_feature_meta # This function compares the storage numel of self with that of other, where # storage numel is cumputed as: `other.storage().nbytes() / other.itemsize()`. # We create this function for composite compliance purposes. The batching rule # always returns true because vmapped as_strided does not support accessing # storage locations not indexable by the input tensor. # See the note above for more information. - func: _has_same_storage_numel(Tensor self, Tensor other) -> bool variants: function dispatch: CompositeExplicitAutograd: _has_same_storage_numel - func: rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!) variants: method - func: rename(Tensor(a) self, Dimname[]? names) -> Tensor(a) variants: method - func: align_to(Tensor(a) self, Dimname[] names) -> Tensor(a) variants: method - func: align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a) variants: method - func: align_as(Tensor self, Tensor other) -> Tensor variants: method - func: align_tensors(Tensor[] tensors) -> Tensor[] # Not assert because it's a keyword; not Assert because FX already # took that syntax # TODO: need to specify this is side-effectful somehow - func: _assert_async(Tensor self) -> () dispatch: CPU: _assert_async_cpu CUDA: _assert_async_cuda - func: refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a) variants: method - func: _use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool device_check: NoCheck # Tensor arguments allowed to be on different devices, see also _cudnn_ctc_loss dispatch: CUDA: _use_cudnn_ctc_loss - func: _cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor) device_check: NoCheck # log_probs is expected to be on CUDA while targets is expected to be on CPU dispatch: CUDA: _cudnn_ctc_loss - func: _use_cudnn_rnn_flatten_weight() -> bool - func: _cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, int input_size, int mode, int hidden_size, int proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor dispatch: CUDA: _cudnn_rnn_flatten_weight - func: _cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, int hidden_size, int proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) # rnn_tanh may or may not redispatch to _cudnn_rnn based on algorithm and build. Thus it might hit dispatch or kernel device check. # Disable dispatch time device check for consistent behavior. device_check: NoCheck dispatch: CUDA: _cudnn_rnn - func: _cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) dispatch: CUDA: _cudnn_rnn_backward - func: _cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor dispatch: CUDA: _cudnn_init_dropout_state - func: _debug_has_internal_overlap(Tensor self) -> int variants: function - func: _fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor) variants: function dispatch: CUDA: fused_dropout_cuda - func: _masked_scale(Tensor self, Tensor mask, float scale) -> Tensor variants: function dispatch: CUDA: masked_scale_cuda - func: native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor) variants: function dispatch: CPU: native_dropout_cpu CUDA: native_dropout_cuda - func: native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor dispatch: CPU: native_dropout_backward_cpu CUDA: native_dropout_backward_cuda - func: _sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor) - func: _sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!) - func: _sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!) - func: _sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!) - func: _reshape_from_tensor(Tensor self, Tensor shape) -> Tensor - func: _shape_as_tensor(Tensor self) -> Tensor - func: dropout(Tensor input, float p, bool train) -> Tensor - func: dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) - func: feature_dropout(Tensor input, float p, bool train) -> Tensor - func: feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) - func: alpha_dropout(Tensor input, float p, bool train) -> Tensor - func: alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) - func: feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor - func: feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) - func: abs(Tensor self) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: abs SparseCPU, SparseCUDA: abs_sparse SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr - func: abs_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: abs_ SparseCPU, SparseCUDA: abs_sparse_ SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr_ - func: abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: abs_out SparseCPU, SparseCUDA: abs_sparse_out SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr_out # Note [Adding an alias] # To add an alias do the following: # # 1) Copy the original functions native_functions.yaml entry, but replace the # original function's name with their own and delete any dispatch # keys for the aliases. Specifying a dispatch key will prevent # autograd from recording the operations the alias performs, which # will stop it from "inheriting" the original operation's autograd behavior. # 2) Implement the corresponding functions and have them redispatch to the # original function. # 3) Add docstrings to the new function that reference the original function, # and document the method as usual (if it exists.) # (See torch/_torch_docs.py and docs/source/torch.rst if adding a function, # torch/_tensor_docs.py and docs/source/tensors.rst if adding a method, # or module-specific doc bindings (like torch/linalg/__init__.py) if # adding an alias in a namespace.) # 4) Update torch/overrides.py consistent with the original function. # 5) Update the alias_map in torch/csrc/jit/passes/normalize_ops.cpp. # 6) Add aliases argument to existing OpInfo/UnaryUfuncInfo or create new OpInfo/UnaryUfuncInfo entry # in op_db list in torch/testing/_internal/common_methods_invocations.py # # See torch.absolute, an alias for torch.abs, as an example. # Absolute, alias for abs - func: absolute(Tensor self) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: absolute_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method - func: absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: angle(Tensor self) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: angle SparseCsrCPU, SparseCsrCUDA: angle_sparse_csr - func: angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: angle_out SparseCsrCPU, SparseCsrCUDA: angle_sparse_csr_out - func: view_as_real(Tensor(a) self) -> Tensor(a) variants: function dispatch: CPU, CUDA: view_as_real - func: view_as_complex(Tensor(a) self) -> Tensor(a) variants: function dispatch: CPU, CUDA: view_as_complex - func: sgn(Tensor self) -> Tensor variants: function, method structured_delegate: sgn.out dispatch: SparseCPU, SparseCUDA: sgn_sparse SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr - func: sgn_(Tensor(a!) self) -> Tensor(a!) variants: method structured_delegate: sgn.out dispatch: SparseCPU, SparseCUDA: sgn_sparse_ SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr_ - func: sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: sgn_out SparseCPU, SparseCUDA: sgn_sparse_out SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr_out - func: real(Tensor(a) self) -> Tensor(a) device_check: NoCheck # TensorIterator variants: function - func: imag(Tensor(a) self) -> Tensor(a) device_check: NoCheck # TensorIterator variants: function - func: _conj(Tensor(a) self) -> Tensor(a) variants: function, method dispatch: CompositeExplicitAutograd: _conj - func: conj(Tensor(a) self) -> Tensor(a) variants: function, method manual_cpp_binding: True - func: _conj_physical(Tensor self) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: _conj_physical SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr - func: conj_physical(Tensor self) -> Tensor variants: function, method - func: conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: conj_physical_out SparseCPU, SparseCUDA: conj_physical_out_sparse SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr_out - func: conj_physical_(Tensor(a!) self) -> Tensor(a!) variants: function, method dispatch: CompositeExplicitAutograd: conj_physical_ SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr_ - func: resolve_conj(Tensor(a) self) -> Tensor(a) variants: function, method - func: resolve_neg(Tensor(a) self) -> Tensor(a) variants: function, method - func: _neg_view(Tensor(a) self) -> Tensor(a) variants: function, method dispatch: CompositeExplicitAutograd: _neg_view - func: acos(Tensor self) -> Tensor device_check: NoCheck # TensorIterator variants: function, method structured_delegate: acos.out - func: acos_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function, method structured_delegate: acos.out - func: acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: acos_out # arccos, alias of acos - func: arccos(Tensor self) -> Tensor variants: function, method - func: arccos_(Tensor(a!) self) -> Tensor(a!) variants: function, method - func: arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - func: avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor - func: adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor) - func: add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: add.out variants: function, method dispatch: SparseCPU, SparseCUDA: add_sparse SparseCsrCPU, SparseCsrCUDA: add_sparse_csr MkldnnCPU: mkldnn_add ZeroTensor: add_zerotensor - func: add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method structured_delegate: add.out dispatch: SparseCPU, SparseCUDA: add_sparse_ SparseCsrCPU, SparseCsrCUDA: add_sparse_csr_ MkldnnCPU: mkldnn_add_ - func: add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: add_out SparseCPU: add_out_sparse_cpu SparseCUDA: add_out_sparse_cuda SparseCsrCPU: add_out_sparse_csr_cpu SparseCsrCUDA: add_out_sparse_csr_cuda MkldnnCPU: mkldnn_add_out - func: _add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor variants: function dispatch: CPU: add_relu - func: _add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) variants: function dispatch: CPU: add_relu_ - func: _add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) variants: function dispatch: CPU: add_relu_out - func: _add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor variants: function dispatch: CPU: add_relu - func: _add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) variants: function dispatch: CPU: add_relu_ # For C++ only, until we have conversion from C++ numbers to Tensor - func: add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: add - func: add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: add_ - func: addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor structured_delegate: addmv.out variants: function, method - func: addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) structured_delegate: addmv.out variants: function, method - func: addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) structured: True dispatch: CPU: addmv_out_cpu CUDA: addmv_out_cuda SparseCsrCPU: addmv_out_sparse_csr SparseCsrCUDA: addmv_out_sparse_csr_cuda - func: addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor variants: function, method dispatch: CPU, CUDA: addr CompositeImplicitAutograd: math_addr - func: addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) variants: method dispatch: CompositeExplicitAutograd: addr_ - func: addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: addr_out CompositeImplicitAutograd: math_addr_out - func: affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor variants: function dispatch: CompositeExplicitAutograd: affine_grid_generator - func: affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor variants: function - func: all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: all.out variants: function, method - func: all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True precomputed: - dim -> int dim dispatch: CPU, CUDA: all_out - func: all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool variants: function, method - func: any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: any.out variants: function, method - func: any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True precomputed: - dim -> int dim dispatch: CPU, CUDA: any_out - func: any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: arange.start_step(Scalar start, Scalar end, Scalar step, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!) - func: arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, Meta: arange_out CUDA: arange_cuda_out # This function is a temporary hack to allow tracing of arange like constructs with dynamic # bounds on arange. Normal arange is not traceable because it does not take any tensor inputs; # if the range you need is based on another tensor, calling this function directly will # preserve tracing. Get rid of this when arange can directly take tensors for bounds # (so that it can be traced directly). - func: _dim_arange(Tensor like, int dim) -> Tensor - func: argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor structured_delegate: argmax.out device_check: NoCheck # TensorIterator variants: function, method - func: argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) structured: True dispatch: CPU, CUDA: argmax_out - func: argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor structured_delegate: argmin.out device_check: NoCheck # TensorIterator variants: function, method - func: argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) structured: True dispatch: CPU, CUDA: argmin_out - func: acosh(Tensor self) -> Tensor variants: function, method structured_delegate: acosh.out - func: acosh_(Tensor(a!) self) -> Tensor(a!) variants: function, method structured_delegate: acosh.out - func: acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: acosh_out # arccosh, alias for acosh - func: arccosh(Tensor self) -> Tensor variants: function, method - func: arccosh_(Tensor(a!) self) -> Tensor(a!) variants: function, method - func: arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - func: asinh(Tensor self) -> Tensor variants: function, method structured_delegate: asinh.out dispatch: SparseCPU, SparseCUDA: asinh_sparse SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr - func: asinh_(Tensor(a!) self) -> Tensor(a!) variants: function, method structured_delegate: asinh.out dispatch: SparseCPU, SparseCUDA: asinh_sparse_ SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr_ - func: asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: asinh_out SparseCPU, SparseCUDA: asinh_sparse_out SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr_out # arcsinh, alias for asinh - func: arcsinh(Tensor self) -> Tensor variants: function, method - func: arcsinh_(Tensor(a!) self) -> Tensor(a!) variants: function, method - func: arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - func: atanh(Tensor self) -> Tensor structured_delegate: atanh.out variants: function, method dispatch: CompositeExplicitAutograd: atanh SparseCPU, SparseCUDA: atanh_sparse SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr - func: atanh_(Tensor(a!) self) -> Tensor(a!) structured_delegate: atanh.out variants: function, method dispatch: SparseCPU, SparseCUDA: atanh_sparse_ SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr_ - func: atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: atanh_out SparseCPU, SparseCUDA: atanh_sparse_out SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr_out # arctanh, alias for atanh - func: arctanh(Tensor self) -> Tensor variants: function, method - func: arctanh_(Tensor(a!) self) -> Tensor(a!) variants: function, method - func: arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - func: as_strided(Tensor(a) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a) variants: function, method dispatch: ZeroTensor, CPU, CUDA, Meta: as_strided_tensorimpl QuantizedCPU, QuantizedCUDA: as_strided_qtensorimpl device_check: NoCheck device_guard: False - func: as_strided_(Tensor(a!) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a!) use_const_ref_for_mutable_tensors: True variants: function, method device_check: NoCheck device_guard: False tags: inplace_view dispatch: CompositeExplicitAutograd: as_strided_ - func: asin(Tensor self) -> Tensor device_check: NoCheck # TensorIterator variants: function, method structured_delegate: asin.out dispatch: SparseCPU, SparseCUDA: asin_sparse SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr - func: asin_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function, method structured_delegate: asin.out dispatch: SparseCPU, SparseCUDA: asin_sparse_ SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr_ - func: asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: asin_out SparseCPU, SparseCUDA: asin_sparse_out SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr_out # arcsin, alias of asin - func: arcsin(Tensor self) -> Tensor variants: function, method - func: arcsin_(Tensor(a!) self) -> Tensor(a!) variants: function, method - func: arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - func: atan(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: atan.out variants: function, method dispatch: SparseCPU, SparseCUDA: atan_sparse SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr - func: atan_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: atan.out variants: function, method dispatch: SparseCPU, SparseCUDA: atan_sparse_ SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr_ - func: atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: atan_out SparseCPU, SparseCUDA: atan_sparse_out SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr_out # arctan, alias of atan - func: arctan(Tensor self) -> Tensor variants: function, method - func: arctan_(Tensor(a!) self) -> Tensor(a!) variants: function, method - func: arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - func: atleast_1d(Tensor self) -> Tensor variants: function - func: atleast_1d.Sequence(Tensor[] tensors) -> Tensor[] - func: atleast_2d(Tensor self) -> Tensor variants: function - func: atleast_2d.Sequence(Tensor[] tensors) -> Tensor[] variants: function - func: atleast_3d(Tensor self) -> Tensor variants: function - func: atleast_3d.Sequence(Tensor[] tensors) -> Tensor[] variants: function - func: baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor variants: function, method structured_delegate: baddbmm.out - func: baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) variants: method structured_delegate: baddbmm.out - func: baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) structured: True variants: function dispatch: CPU: baddbmm_out_cpu CUDA: baddbmm_out_cuda SparseCsrCUDA: baddbmm_out_sparse_csr_cuda - func: bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor - func: quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor dispatch: QuantizedCPU: quantized_batch_norm - func: _batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int) - func: _batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor) # Sample bernoulli with values in `self` as probability. - func: bernoulli(Tensor self, *, Generator? generator=None) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: bernoulli - func: bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: bernoulli_out - func: bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: bernoulli_ - func: bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: bernoulli_ # This out-of-place version isn't used explicitly, but needed by jit. # There is no default valid on `p` here because it would introduce ambiguity # with `bernoulli(Tensor self, *, Generator? generator=None)` declaration. - func: bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor - func: binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor device_check: NoCheck # TensorIterator python_module: nn variants: function dispatch: CPU: binary_cross_entropy_cpu CUDA: binary_cross_entropy_cuda - func: binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator python_module: nn variants: function dispatch: CPU: binary_cross_entropy_out_cpu CUDA: binary_cross_entropy_out_cuda - func: binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor python_module: nn variants: function dispatch: CPU: binary_cross_entropy_backward_cpu CUDA: binary_cross_entropy_backward_cuda - func: binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn variants: function dispatch: CPU: binary_cross_entropy_backward_out_cpu CUDA: binary_cross_entropy_backward_out_cuda - func: binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor device_check: NoCheck # TensorIterator variants: function dispatch: CompositeExplicitAutograd: binary_cross_entropy_with_logits - func: binary_cross_entropy_with_logits_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor variants: function - func: bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor variants: function, method dispatch: CPU: _bincount_cpu CUDA: _bincount_cuda - func: bitwise_not(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: bitwise_not.out variants: function, method - func: bitwise_not_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: bitwise_not.out variants: method - func: bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: bitwise_not_out - func: copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: copysign_out - func: copysign.Tensor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: function, method structured_delegate: copysign.out - func: copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method structured_delegate: copysign.out - func: copysign.Scalar(Tensor self, Scalar other) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: copysign - func: copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) variants: method dispatch: CompositeExplicitAutograd: copysign_ - func: copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) dispatch: CompositeExplicitAutograd: copysign_out - func: logical_not(Tensor self) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: logical_not - func: logical_not_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: logical_not_ - func: logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: logical_not_out - func: logical_xor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: logical_xor - func: logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: logical_xor_ - func: logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: logical_xor_out - func: logical_and(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: logical_and - func: logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: logical_and_ - func: logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: logical_and_out - func: logical_or(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: logical_or - func: logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: logical_or_ - func: logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: logical_or_out - func: blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: bmm(Tensor self, Tensor mat2) -> Tensor structured_delegate: bmm.out variants: function, method dispatch: SparseCPU: bmm_sparse_cpu SparseCUDA: bmm_sparse_cuda - func: bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) structured: True variants: function dispatch: CPU: bmm_out_cpu CUDA: bmm_out_cuda SparseCPU: bmm_out_sparse_cpu SparseCUDA: bmm_out_sparse_cuda SparseCsrCUDA: bmm_out_sparse_csr_cuda - func: broadcast_tensors(Tensor[] tensors) -> Tensor[] device_check: NoCheck device_guard: False - func: broadcast_to(Tensor(a) self, int[] size) -> Tensor(a) variants: function, method - func: _sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a) variants: function dispatch: SparseCPU, SparseCUDA: sparse_broadcast_to - func: cat(Tensor[] tensors, int dim=0) -> Tensor dispatch: CompositeExplicitAutograd: cat - func: cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) dispatch: CompositeExplicitAutograd: cat_out - func: cat.names(Tensor[] tensors, Dimname dim) -> Tensor - func: cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) # alias for torch.cat - func: concat(Tensor[] tensors, int dim=0) -> Tensor - func: concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) - func: concat.names(Tensor[] tensors, Dimname dim) -> Tensor - func: concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) - func: block_diag(Tensor[] tensors) -> Tensor variants: function - func: ceil(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: ceil.out variants: function, method dispatch: CompositeExplicitAutograd: ceil SparseCPU, SparseCUDA: ceil_sparse SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr - func: ceil_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: ceil.out variants: function, method dispatch: CompositeExplicitAutograd: ceil_ SparseCPU, SparseCUDA: ceil_sparse_ SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr_ - func: ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: ceil_out SparseCPU, SparseCUDA: ceil_sparse_out SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr_out # alias for torch.linalg.multi_dot - func: chain_matmul(Tensor[] matrices) -> Tensor variants: function # alias for torch.linalg.multi_dot - func: chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!) - func: unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[] variants: function, method device_check: NoCheck device_guard: False - func: chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[] variants: function, method device_check: NoCheck device_guard: False - func: tensor_split.sections(Tensor(a -> *) self, int sections, int dim=0) -> Tensor(a)[] variants: function, method - func: tensor_split.indices(Tensor(a -> *) self, int[] indices, int dim=0) -> Tensor(a)[] variants: function, method - func: tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[] variants: function, method - func: clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor device_check: NoCheck # TensorIterator variants: function, method cpp_no_default_args: ['min'] structured_delegate: clamp.out dispatch: QuantizedCPU: clamp_quantized_cpu - func: clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor variants: function, method dispatch: CPU, CUDA: clamp - func: clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function, method cpp_no_default_args: ['min'] structured_delegate: clamp.out dispatch: CompositeExplicitAutograd: clamp_ - func: clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) variants: function, method dispatch: CompositeExplicitAutograd: clamp_ - func: clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator cpp_no_default_args: ['min'] structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: clamp_out - func: clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: clamp_out - func: clamp_max(Tensor self, Scalar max) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: clamp_max - func: clamp_max.Tensor(Tensor self, Tensor max) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: clamp_max - func: clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: clamp_max_ - func: clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!) variants: function, method dispatch: CompositeExplicitAutograd: clamp_max_ - func: clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: clamp_max_out - func: clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: clamp_max_out - func: clamp_min(Tensor self, Scalar min) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: clamp_min - func: clamp_min.Tensor(Tensor self, Tensor min) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: clamp_min - func: clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: clamp_min_ - func: clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!) variants: function, method dispatch: CompositeExplicitAutograd: clamp_min_ - func: clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: clamp_min_out - func: clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: clamp_min_out # clip is an alias for clamp - func: clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor cpp_no_default_args: ['min'] variants: function, method - func: clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor variants: function, method - func: clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) cpp_no_default_args: ['min'] variants: function, method - func: clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) variants: function, method - func: clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) cpp_no_default_args: ['min'] - func: clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) - func: cudnn_is_acceptable(Tensor self) -> bool device_check: NoCheck device_guard: False - func: complex(Tensor real, Tensor imag) -> Tensor variants: function dispatch: CompositeExplicitAutograd: complex - func: complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: complex_out - func: polar(Tensor abs, Tensor angle) -> Tensor variants: function dispatch: CompositeExplicitAutograd: polar - func: polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: polar_out - func: constant_pad_nd(Tensor self, int[] pad, Scalar value=0) -> Tensor variants: function dispatch: CompositeExplicitAutograd: constant_pad_nd - func: contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a) variants: method manual_cpp_binding: True - func: convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor dispatch: CompositeExplicitAutograd: convolution - func: convolution_backward(Tensor grad_output, Tensor input, Tensor weight, int[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) dispatch: CompositeExplicitAutograd, CUDA: convolution_backward - func: convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor dispatch: CompositeExplicitAutograd: convolution_overrideable - func: convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) dispatch: CompositeExplicitAutograd: convolution_backward_overrideable - func: _convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor dispatch: CompositeExplicitAutograd: _convolution - func: _convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor - func: _convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor - func: _convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) - func: conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor - func: conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor - func: conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor - func: conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor cpp_no_default_args: ['bias', 'stride', 'padding'] - func: conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor cpp_no_default_args: ['bias', 'stride', 'padding'] - func: conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor cpp_no_default_args: ['bias', 'stride', 'padding'] - func: conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor dispatch: CompositeExplicitAutograd: conv_tbc - func: conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor) # NB: we inherit the goofy argument order from PyTorch torch.nn.functional - func: conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor - func: conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor - func: conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor - func: copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) variants: method device_check: NoCheck device_guard: False dispatch: MkldnnCPU: copy_mkldnn_ SparseCPU, SparseCUDA, SparseHIP: copy_sparse_wrapper_ CompositeExplicitAutograd: copy_ SparseCsrCPU, SparseCsrCUDA: copy_sparse_csr_ - func: _copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor dispatch: {} # We need this to be able to properly copy from a CPU to an XLA tensor with different sizes. # See https://github.com/pytorch/xla/issues/2881 - func: _copy_from_and_resize(Tensor self, Tensor dst) -> Tensor dispatch: {} - func: cos(Tensor self) -> Tensor device_check: NoCheck # TensorIterator variants: function, method structured_delegate: cos.out - func: cos_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function, method structured_delegate: cos.out - func: cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: cos_out - func: cosh(Tensor self) -> Tensor device_check: NoCheck # TensorIterator variants: function, method structured_delegate: cosh.out - func: cosh_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function, method structured_delegate: cosh.out - func: cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: cosh_out - func: cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor - func: count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor variants: function, method dispatch: CPU: count_nonzero_cpu CUDA: count_nonzero_cuda - func: count_nonzero(Tensor self, int? dim=None) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: count_nonzero - func: cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor variants: function, method - func: corrcoef(Tensor self) -> Tensor variants: function, method - func: cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid dispatch: CUDA: cudnn_affine_grid_generator_forward # TODO: Why do I have to call this grad?! - func: cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta dispatch: CUDA: cudnn_affine_grid_generator_backward - func: cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor) dispatch: CUDA: cudnn_batch_norm # NB: You can only use this if you used cudnn_batch_norm training=True - func: cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor) dispatch: CUDA: cudnn_batch_norm_backward - func: cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor dispatch: CUDA: cudnn_convolution - func: cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor dispatch: CUDA: cudnn_convolution_transpose - func: cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor dispatch: CUDA: cudnn_convolution_relu - func: cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor dispatch: CUDA: cudnn_convolution_add_relu # NB: input is special cased in a way I don't quite understand - func: cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output dispatch: CUDA: cudnn_grid_sampler_forward - func: cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid) dispatch: CUDA: cudnn_grid_sampler_backward - func: cummax(Tensor self, int dim) -> (Tensor values, Tensor indices) device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: cummax - func: cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) device_check: NoCheck # TensorIterator dispatch: CompositeExplicitAutograd: cummax_out - func: cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) device_check: NoCheck # TensorIterator variants: function, method - func: cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) device_check: NoCheck # TensorIterator - func: _cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () variants: function dispatch: CPU: cummax_helper_cpu CUDA: cummax_helper_cuda - func: cummin(Tensor self, int dim) -> (Tensor values, Tensor indices) device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: cummin - func: cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) device_check: NoCheck # TensorIterator dispatch: CompositeExplicitAutograd: cummin_out - func: cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) device_check: NoCheck # TensorIterator variants: function, method - func: cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) device_check: NoCheck # TensorIterator - func: _cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () variants: function dispatch: CPU: cummin_helper_cpu CUDA: cummin_helper_cuda - func: cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor variants: function device_check: NoCheck device_guard: False - func: cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor structured_delegate: cumprod.out device_check: NoCheck # TensorIterator variants: function, method - func: cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) structured_delegate: cumprod.out variants: method - func: cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) structured: True device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: cumprod_out - func: cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) variants: method - func: cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor variants: function device_check: NoCheck device_guard: False - func: cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor structured_delegate: cumsum.out device_check: NoCheck # TensorIterator variants: function, method - func: cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) structured_delegate: cumsum.out variants: method - func: cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) structured: True device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: cumsum_out - func: cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) variants: method - func: cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor - func: cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor - func: ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor # convenience function that converts to intlists for you - func: ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor - func: _ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor) dispatch: CPU: ctc_loss_cpu CUDA: ctc_loss_gpu - func: _ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor dispatch: CPU: ctc_loss_backward_cpu CUDA: ctc_loss_backward_gpu - func: diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: diag_embed - func: diagflat(Tensor self, int offset=0) -> Tensor variants: function, method - func: diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) variants: function, method dispatch: CompositeExplicitAutograd: diagonal - func: linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a) python_module: linalg variants: function - func: diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a) variants: function, method - func: diagonal_backward(Tensor grad_output, int[] input_sizes, int offset, int dim1, int dim2) -> Tensor variants: function device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: diagonal_backward - func: fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!) variants: method - func: diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor variants: function, method - func: diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!) variants: function - func: gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[] variants: function - func: gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[] variants: function - func: gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[] variants: function - func: gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[] variants: function - func: gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[] variants: function - func: gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[] variants: function - func: gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[] variants: function - func: div.Tensor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: function, method structured_delegate: div.out dispatch: SparseCPU, SparseCUDA: div_sparse ZeroTensor: div_zerotensor - func: div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method structured_delegate: div.out dispatch: SparseCPU, SparseCUDA: div_sparse_ - func: div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: div_out SparseCPU, SparseCUDA: div_out_sparse_zerodim - func: div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor device_check: NoCheck # TensorIterator variants: function, method structured_delegate: div.out_mode dispatch: SparseCPU, SparseCUDA: div_sparse - func: div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method structured_delegate: div.out_mode dispatch: SparseCPU, SparseCUDA: div_sparse_ - func: div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: div_out_mode SparseCPU, SparseCUDA: div_out_sparse_zerodim # For C++ only, until we have conversion from C++ numbers to Tensor - func: div.Scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: div - func: div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: div_ - func: div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: div - func: div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) variants: method dispatch: CompositeExplicitAutograd: div_ # divide, alias for div - func: divide.Tensor(Tensor self, Tensor other) -> Tensor variants: function, method - func: divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: method - func: divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - func: divide.Scalar(Tensor self, Scalar other) -> Tensor variants: function, method - func: divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) variants: method - func: divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor variants: function, method - func: divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) variants: method - func: divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) - func: divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor variants: function, method - func: divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) variants: method # true_divide, an alias for div - func: true_divide.Tensor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method - func: true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: true_divide.Scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method - func: dot(Tensor self, Tensor tensor) -> Tensor variants: function, method dispatch: CPU: dot CUDA: dot_cuda - func: dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!) dispatch: CompositeExplicitAutograd: dot_out - func: vdot(Tensor self, Tensor other) -> Tensor variants: function, method dispatch: CPU: vdot CUDA: vdot_cuda - func: vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) dispatch: CompositeExplicitAutograd: vdot_out - func: einsum(str equation, Tensor[] tensors) -> Tensor - func: embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor dispatch: CompositeExplicitAutograd: embedding - func: embedding_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor - func: embedding_dense_backward(Tensor grad_output, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor dispatch: CPU: embedding_dense_backward_cpu CUDA: embedding_dense_backward_cuda - func: embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!) dispatch: CPU: embedding_renorm_cpu_ CUDA: embedding_renorm_cuda_ - func: embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor # NOTE [ embedding_bag Native Functions ] # The `_embedding_bag.*` variants assume that input tensors except for `weight`, # e.g. `indices` and `offsets` (and `offset2bag`), are contiguous. # We really only need to enforce this for `_embedding_bag` (the forward) because # the backward inputs are the same as forward ones. # The above `embedding_bag` wrapper is created to achieve this, e.g., # applying indices = indices.contiguous(). # The backward functions apply a check that these input tensors are contiguous. - func: _embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor) dispatch: CPU: _embedding_bag_forward_only_cpu CUDA: _embedding_bag_forward_only_cuda - func: _rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor) # row_stack is the alias of vstack - func: row_stack(Tensor[] tensors) -> Tensor - func: row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) - func: embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor) # To keep backward and forward compatibility, and to avoid ambiguity with the # original signature above, scale_grad_by_freq, mode, sparse, # per_sample_weights, and include_last_offset parameters do not have default # values. Once the original signature is removed, default values can be added. - func: embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor) - func: _embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor) dispatch: CPU: _embedding_bag_cpu CUDA: _embedding_bag_cuda - func: _embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor - func: _embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor - func: _embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor dispatch: CPU: _embedding_bag_dense_backward_cpu CUDA: _embedding_bag_dense_backward_cuda - func: _embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor dispatch: CPU: _embedding_bag_per_sample_weights_backward_cpu CUDA: _embedding_bag_per_sample_weights_backward_cuda - func: empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor device_check: NoCheck device_guard: False - func: empty.memory_format(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor dispatch: CPU: empty_cpu CUDA: empty_cuda Meta: empty_meta MkldnnCPU: empty_mkldnn SparseCPU, SparseCUDA: empty_sparse SparseCsrCPU, SparseCsrCUDA: empty_sparse_csr # We do not make new_empty a composite that calls into new_empty_strided, as the strided version # is significantly more difficult to implement by different backends - func: new_empty(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor variants: method dispatch: CompositeExplicitAutograd: new_empty - func: new_empty_strided(Tensor self, int[] size, int[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor variants: method dispatch: CompositeExplicitAutograd: new_empty_strided - func: new_full(Tensor self, int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor variants: method - func: new_zeros(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor variants: method - func: new_ones(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor variants: method # other overrides are to provide a more helpful error message that dtype is required - func: _empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor dispatch: CPU: empty_affine_quantized_other_backends_stub QuantizedCPU, QuantizedCUDA: empty_affine_quantized # it's a factory function receiving a tensor argument, thus overriding explicitly # other overrides are to provide a more helpful error message that dtype is required - func: _empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor category_override: factory dispatch: CPU: empty_per_channel_affine_quantized_other_backends_stub QuantizedCPU, QuantizedCUDA: empty_per_channel_affine_quantized - func: resize_(Tensor(a!) self, int[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) use_const_ref_for_mutable_tensors: True variants: method device_check: NoCheck device_guard: False dispatch: CPU, Meta: resize_ CUDA: resize_cuda_ QuantizedCPU: quantized_resize_cpu_ SparseCsrCPU, SparseCsrCUDA: resize_sparse_csr_ - func: empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor category_override: factory variants: function dispatch: QuantizedCPU, QuantizedCUDA: empty_quantized - func: empty.out(int[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck device_guard: False - func: empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: empty_like SparseCPU, SparseCUDA: empty_like_sparse_coo SparseCsrCPU, SparseCsrCUDA: empty_like_sparse_csr - func: empty_strided(int[] size, int[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor dispatch: CPU: empty_strided_cpu CUDA: empty_strided_cuda Meta: empty_strided_meta - func: erf(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: erf.out variants: function, method dispatch: SparseCPU, SparseCUDA: erf_sparse SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr - func: erf_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: erf.out variants: function, method dispatch: SparseCPU, SparseCUDA: erf_sparse_ SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr_ - func: erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: erf_out SparseCPU, SparseCUDA: erf_sparse_out SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr_out - func: erfc(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: erfc.out variants: function, method - func: erfc_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: erfc.out variants: function, method - func: erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: erfc_out - func: exp(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: exp.out variants: function, method - func: exp_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: exp.out variants: function, method - func: exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: exp_out - func: exp2(Tensor self) -> Tensor structured_delegate: exp2.out variants: function, method - func: exp2_(Tensor(a!) self) -> Tensor(a!) structured_delegate: exp2.out variants: function, method - func: exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: exp2_out - func: expm1(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: expm1.out variants: function, method dispatch: SparseCPU, SparseCUDA: expm1_sparse SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr - func: expm1_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: expm1.out variants: function, method dispatch: SparseCPU, SparseCUDA: expm1_sparse_ SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr_ - func: expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: expm1_out SparseCPU, SparseCUDA: expm1_sparse_out SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr_out - func: expand(Tensor(a) self, int[] size, *, bool implicit=False) -> Tensor(a) variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too. device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: expand - func: expand_as(Tensor(a) self, Tensor other) -> Tensor(a) variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too. device_check: NoCheck device_guard: False - func: eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: eye.out(int n, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: eye_out_cpu CUDA: eye_out_cuda - func: eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: eye_out_cpu CUDA: eye_out_cuda - func: flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a) variants: function, method - func: flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a) variants: function, method - func: flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a) variants: function, method - func: flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a) variants: function, method - func: unflatten.int(Tensor(a) self, int dim, int[] sizes, Dimname[]? names=None) -> Tensor(a) variants: method - func: unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a) variants: method - func: fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: fill_ QuantizedCPU, QuantizedCUDA: fill_quantized_ Meta: fill_meta_ - func: fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: fill_ QuantizedCPU, QuantizedCUDA: fill_quantized_ Meta: fill_meta_ - func: floor(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: floor.out variants: function, method dispatch: CompositeExplicitAutograd: floor SparseCPU, SparseCUDA: floor_sparse SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr - func: floor_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: floor.out variants: function, method dispatch: CompositeExplicitAutograd: floor_ SparseCPU, SparseCUDA: floor_sparse_ SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr_ - func: floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: floor_out SparseCPU, SparseCUDA: floor_sparse_out SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr_out - func: floor_divide(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: floor_divide SparseCPU, SparseCUDA: floor_divide_sparse - func: floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: floor_divide_ SparseCPU, SparseCUDA: floor_divide_sparse_ - func: floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: floor_divide_out SparseCPU, SparseCUDA: floor_divide_out_sparse_zerodim - func: floor_divide.Scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method - func: frac(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: frac.out variants: function, method - func: frac_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: frac.out variants: function, method - func: frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: frac_out - func: full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor device_check: NoCheck device_guard: False - func: full(int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: full.out(int[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) - func: full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - func: from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor dispatch: CPU: from_file - func: gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: gcd_out - func: gcd(Tensor self, Tensor other) -> Tensor structured_delegate: gcd.out variants: function, method - func: gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!) structured_delegate: gcd.out variants: function, method - func: lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: lcm_out - func: lcm(Tensor self, Tensor other) -> Tensor structured_delegate: lcm.out variants: function, method - func: lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!) structured_delegate: lcm.out variants: function, method # NOTE [ grid_sampler Native Functions ] # `grid_sampler` does all the shape checking and then dispatches to one of # `cudnn_grid_sampler`, `grid_sampler_2d`, or `grid_sampler_3d`, each of which # has the corresponding backward defined as native functions as well. Therefore, # in these functions and their backwards, no more shape checking is done. # # There is also _grid_sampler_2d_backward_cpu_fallback which is an # implementation detail of grid_sampler_2d and is only exposed here for testing # purposes. # # Additionally, arguments `padding_mode` and `interpolation_mode` are cast to # enums defined in `native/GridSampler.h`. `cudnn_grid_sampler` doesn't take in # `interpolation_mode` because it only supports Bilinear interpolation mode. # Nor does it take in `align_corners` because it only supports the mode # `align_corners = True`. - func: grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor - func: grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor dispatch: CPU, QuantizedCPU: grid_sampler_2d_cpu CUDA: grid_sampler_2d_cuda # `grid_sampler_2d_backward` takes in `output_mask` to optimize performance for # the case where `input` doesn't require gradient. Gradient for `grid` is always # computed (only `output_mask[0]` is checked by the implementations). - func: grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor) dispatch: CPU: grid_sampler_2d_backward_cpu CUDA: grid_sampler_2d_backward_cuda # See NOTE [ grid_sample CPU fallback ] - func: _grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor dispatch: CompositeExplicitAutograd: _grid_sampler_2d_cpu_fallback - func: _grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) - func: grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor dispatch: CPU: grid_sampler_3d_cpu CUDA: grid_sampler_3d_cuda - func: grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) dispatch: CPU: grid_sampler_3d_backward_cpu CUDA: grid_sampler_3d_backward_cuda - func: hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor - func: group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor - func: native_group_norm(Tensor input, Tensor? weight, Tensor? bias, int N, int C, int HxW, int group, float eps) -> (Tensor, Tensor, Tensor) dispatch: CPU, CUDA: native_group_norm CompositeImplicitAutograd: math_group_norm - func: native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, int N, int C, int HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor) dispatch: CPU, CUDA: native_group_norm_backward # Real to complex forward FFT - func: _fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor variants: function dispatch: CPU: _fft_r2c_mkl CUDA: _fft_r2c_cufft - func: _fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!) variants: function dispatch: CPU: _fft_r2c_mkl_out CUDA: _fft_r2c_cufft_out # Complex to real inverse FFT - func: _fft_c2r(Tensor self, int[] dim, int normalization, int last_dim_size) -> Tensor variants: function dispatch: CPU: _fft_c2r_mkl CUDA: _fft_c2r_cufft - func: _fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!) variants: function dispatch: CPU: _fft_c2r_mkl_out CUDA: _fft_c2r_cufft_out # Standard complex to complex FFT (forward or backward) - func: _fft_c2c(Tensor self, int[] dim, int normalization, bool forward) -> Tensor variants: function dispatch: CPU: _fft_c2c_mkl CUDA: _fft_c2c_cufft - func: _fft_c2c.out(Tensor self, int[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) variants: function dispatch: CPU: _fft_c2c_mkl_out CUDA: _fft_c2c_cufft_out - func: _cufft_get_plan_cache_size(int device_index) -> int - func: _cufft_get_plan_cache_max_size(int device_index) -> int - func: _cufft_set_plan_cache_max_size(int device_index, int max_size) -> () - func: _cufft_clear_plan_cache(int device_index) -> () - func: index.Tensor(Tensor self, Tensor?[] indices) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: index QuantizedCPU: quantized_index # NB: This function is special-cased in tools/autograd/gen_variable_type.py # NB: The following functions are declared in aten/src/ATen/templates/TensorBody.h and defined in aten/src/ATen/TensorIndexing.cpp: # - Tensor Tensor::index(ArrayRef indices) # - Tensor Tensor::index(std::initializer_list indices) - func: index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) variants: method dispatch: CompositeExplicitAutograd: index_copy_ - func: index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: index_copy - func: index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!) variants: method - func: index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor variants: function, method - func: index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) device_check: NoCheck # delegate to _index_put_impl_, which leverages TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: index_put_ # NB: The following functions are declared in aten/src/ATen/templates/TensorBody.h and defined in aten/src/ATen/TensorIndexing.cpp: # - Tensor & Tensor::index_put_(ArrayRef indices, Tensor const & rhs) # - Tensor & Tensor::index_put_(ArrayRef indices, Scalar v) # - Tensor & Tensor::index_put_(std::initializer_list indices, Tensor const & rhs) # - Tensor & Tensor::index_put_(std::initializer_list indices, Scalar v) - func: index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor device_check: NoCheck # delegate to _index_put_impl_ after clone, which leverages TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: index_put - func: _index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: _index_put_impl_ - func: instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor variants: function - func: inverse(Tensor self) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: inverse - func: inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: CompositeExplicitAutograd: inverse_out - func: isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor variants: function, method - func: isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) variants: function structured: True dispatch: CPU, CUDA: isin_Tensor_Tensor_out - func: isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor variants: function structured_delegate: isin.Tensor_Tensor_out - func: isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) variants: function structured: True dispatch: CPU, CUDA: isin_Tensor_Scalar_out - func: isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor variants: function structured_delegate: isin.Tensor_Scalar_out - func: isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) variants: function structured: True dispatch: CPU, CUDA: isin_Scalar_Tensor_out - func: isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor variants: function structured_delegate: isin.Scalar_Tensor_out - func: isnan(Tensor self) -> Tensor variants: function, method device_check: NoCheck device_guard: False dispatch: CPU, CUDA: isnan SparseCPU, SparseCUDA: isnan_sparse SparseCsrCPU, SparseCsrCUDA: isnan_sparse_csr - func: is_distributed(Tensor self) -> bool variants: function, method device_check: NoCheck device_guard: False - func: is_floating_point(Tensor self) -> bool variants: function, method device_check: NoCheck device_guard: False manual_cpp_binding: True - func: is_complex(Tensor self) -> bool variants: function, method device_check: NoCheck device_guard: False manual_cpp_binding: True - func: is_conj(Tensor self) -> bool variants: function, method device_guard: False manual_cpp_binding: True - func: _is_zerotensor(Tensor self) -> bool variants: function, method device_guard: False manual_cpp_binding: True - func: is_neg(Tensor self) -> bool variants: function, method device_guard: False manual_cpp_binding: True - func: isreal(Tensor self) -> Tensor variants: function, method - func: is_nonzero(Tensor self) -> bool variants: function, method device_check: NoCheck device_guard: False - func: is_same_size(Tensor self, Tensor other) -> bool variants: function, method device_check: NoCheck device_guard: False - func: is_signed(Tensor self) -> bool variants: function, method device_check: NoCheck device_guard: False manual_cpp_binding: True - func: is_inference(Tensor self) -> bool variants: function, method device_check: NoCheck device_guard: False manual_cpp_binding: True - func: kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor dispatch: CompositeExplicitAutograd: kl_div - func: kl_div_backward(Tensor grad_output, Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor dispatch: CPU: kl_div_backward_cpu CUDA: kl_div_backward_cuda - func: kron(Tensor self, Tensor other) -> Tensor variants: function, method - func: kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - func: kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method dispatch: CompositeExplicitAutograd: kthvalue - func: kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) dispatch: CPU: kthvalue_out_cpu CUDA: kthvalue_out_cuda - func: kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method - func: kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - func: layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor - func: native_layer_norm(Tensor input, int[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor) dispatch: CPU: layer_norm_cpu CUDA: layer_norm_cuda CompositeImplicitAutograd: math_native_layer_norm - func: _native_multi_head_self_attention(Tensor query, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor dispatch: CPU: multi_head_self_attention_cpu CUDA: multi_head_self_attention_cuda - func: native_layer_norm_backward(Tensor grad_out, Tensor input, int[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) dispatch: CPU: layer_norm_backward_cpu CUDA: layer_norm_backward_cuda - func: nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: nan_to_num SparseCPU, SparseCUDA: nan_to_num_sparse - func: nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!) variants: function, method dispatch: CompositeExplicitAutograd: nan_to_num_ SparseCPU, SparseCUDA: nan_to_num_sparse_ - func: nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: nan_to_num_out SparseCPU, SparseCUDA: nan_to_num_sparse_out - func: linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor python_module: nn - func: linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn - func: mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor python_module: nn dispatch: MkldnnCPU: mkldnn_linear - func: mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor dispatch: MkldnnCPU: mkldnn_linear_backward_input - func: mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor) dispatch: MkldnnCPU: mkldnn_linear_backward_weights - func: mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor) dispatch: MkldnnCPU: mkldnn_linear_backward - func: fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor - func: fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor - func: fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int) - func: fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor - func: fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor - func: fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor - func: fbgemm_pack_quantized_matrix(Tensor input) -> Tensor - func: fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor - func: ldexp.Tensor(Tensor self, Tensor other) -> Tensor variants: function, method - func: ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: function, method - func: ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - func: linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, Meta: linspace_out CUDA: linspace_cuda_out - func: log(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: log.out variants: function, method - func: log_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: log.out variants: function, method - func: log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: log_out - func: log10(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: log10.out variants: function, method dispatch: CompositeExplicitAutograd: log10 - func: log10_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: log10.out variants: function, method - func: log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: log10_out - func: log1p(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: log1p.out variants: function, method dispatch: SparseCPU, SparseCUDA: log1p_sparse SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr - func: log1p_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: log1p.out variants: function, method dispatch: SparseCPU, SparseCUDA: log1p_sparse_ SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr_ - func: log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: log1p_out SparseCPU, SparseCUDA: log1p_sparse_out SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr_out - func: log2(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: log2.out variants: function, method - func: log2_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: log2.out variants: function, method - func: log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: log2_out - func: logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: logaddexp_out - func: logaddexp(Tensor self, Tensor other) -> Tensor variants: method, function structured_delegate: logaddexp.out dispatch: CompositeExplicitAutograd: logaddexp - func: logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: logaddexp2_out - func: logaddexp2(Tensor self, Tensor other) -> Tensor variants: method, function structured_delegate: logaddexp2.out dispatch: CompositeExplicitAutograd: logaddexp2 - func: xlogy.Tensor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: xlogy.OutTensor variants: function, method - func: xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: function dispatch: CompositeExplicitAutograd: xlogy - func: xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: xlogy # xlogy: inplace variant - func: xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function, method structured_delegate: xlogy.OutTensor - func: xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: xlogy_ # xlogy: out variant - func: xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase variants: function dispatch: CPU, CUDA: xlogy_out - func: xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function dispatch: CompositeExplicitAutograd: xlogy_out - func: xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function dispatch: CompositeExplicitAutograd: xlogy_out - func: logdet(Tensor self) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: logdet - func: logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, Meta: logspace_out CUDA: logspace_cuda_out # log_softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models. - func: log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor variants: function, method - func: log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor variants: function, method - func: _log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor structured_delegate: _log_softmax.out - func: _log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) structured: True dispatch: CPU: log_softmax_cpu_out CUDA: log_softmax_cuda_out - func: _log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor structured_delegate: _log_softmax_backward_data.out - func: _log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!) structured: True dispatch: CPU: log_softmax_backward_cpu_out CUDA: log_softmax_backward_cuda_out - func: _logcumsumexp(Tensor self, int dim) -> Tensor dispatch: CPU: _logcumsumexp_cpu CUDA: _logcumsumexp_cuda - func: _logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: _logcumsumexp_out_cpu CUDA: _logcumsumexp_out_cuda - func: logcumsumexp(Tensor self, int dim) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: logcumsumexp - func: logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) dispatch: CompositeExplicitAutograd: logcumsumexp_out - func: logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor variants: function, method - func: logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) - func: logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: logsumexp - func: logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator dispatch: CompositeExplicitAutograd: logsumexp_out - func: logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor - func: matmul(Tensor self, Tensor other) -> Tensor variants: function, method - func: matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - func: matrix_rank.tol(Tensor self, float tol, bool symmetric=False) -> Tensor - func: matrix_rank(Tensor self, bool symmetric=False) -> Tensor # Alias to linalg.matrix_power - func: matrix_power(Tensor self, int n) -> Tensor variants: function, method # Alias to linalg.matrix_power - func: matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) # Alias to linalg.matrix_exp - func: matrix_exp(Tensor self) -> Tensor variants: function, method # This function should be deprecated in favor of differential_analytic_matrix_function in FunctionsManual.cpp - func: matrix_exp_backward(Tensor self, Tensor grad) -> Tensor # DEPRECATED: Use torch.aminmax instead - func: _aminmax(Tensor self) -> (Tensor, Tensor) dispatch: CPU, CUDA: _aminmax_all # DEPRECATED: Use torch.aminmax instead - func: _aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor) dispatch: CPU, CUDA: _aminmax - func: aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max) device_check: NoCheck # TensorIterator structured_delegate: aminmax.out variants: function, method - func: aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max) device_check: NoCheck # TensorIterator structured: True dispatch: CPU, CUDA: aminmax_out - func: _compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor dispatch: CPU, CUDA: _compute_linear_combination - func: _compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: _compute_linear_combination_out - func: max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) device_check: NoCheck # TensorIterator structured_delegate: max.dim_max variants: function, method dispatch: QuantizedCPU, QuantizedCUDA: qmax - func: max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) device_check: NoCheck # TensorIterator structured: True precomputed: - dim -> int dim dispatch: CPU, CUDA: max_out - func: max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) device_check: NoCheck # TensorIterator variants: function, method - func: max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) device_check: NoCheck # TensorIterator - func: value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, int[] sizes, bool keepdim) -> Tensor variants: function device_check: NoCheck device_guard: False - func: amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: amax - func: amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: amax_out # Return: (Tensor output, Tensor indices) - func: max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) - func: max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor - func: max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor - func: mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor dispatch: MkldnnCPU: mkldnn_max_pool2d - func: mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor dispatch: MkldnnCPU: mkldnn_max_pool2d_backward - func: mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor dispatch: MkldnnCPU: mkldnn_max_pool3d - func: mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor dispatch: MkldnnCPU: mkldnn_max_pool3d_backward - func: quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor dispatch: QuantizedCPU: quantized_max_pool1d - func: quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor dispatch: QuantizedCPU: quantized_max_pool2d - func: max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor # The CPU and GPU dispatch variants are named weirdly here because otherwise there # are namespacing issues in C++ - func: mean(Tensor self, *, ScalarType? dtype=None) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: mean - func: mean.dim(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor structured_delegate: mean.out device_check: NoCheck # TensorIterator variants: function, method dispatch: QuantizedCPU: mean_quantized_cpu - func: mean.out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) structured: True device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: mean_out QuantizedCPU: mean_out_quantized_cpu - func: mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: nanmean(Tensor self, int[1] dim=[], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor device_check: NoCheck # Composite variants: function, method - func: nanmean.out(Tensor self, int[1] dim=[], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # Composite - func: median(Tensor self) -> Tensor variants: function, method dispatch: CPU: median_cpu CUDA: median_cuda - func: median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method dispatch: CompositeExplicitAutograd: median - func: median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) dispatch: CPU: median_out_cpu CUDA: median_out_cuda - func: median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method - func: median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - func: nanmedian(Tensor self) -> Tensor variants: function, method dispatch: CPU: nanmedian_cpu CUDA: nanmedian_cuda - func: nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method dispatch: CompositeExplicitAutograd: nanmedian - func: nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) dispatch: CPU: nanmedian_out_cpu CUDA: nanmedian_out_cuda - func: nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method - func: nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - func: min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) device_check: NoCheck # TensorIterator structured_delegate: min.dim_min variants: function, method dispatch: QuantizedCPU, QuantizedCUDA: qmin - func: min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) device_check: NoCheck # TensorIterator structured: True precomputed: - dim -> int dim dispatch: CPU, CUDA: min_out - func: min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) device_check: NoCheck # TensorIterator variants: function, method - func: min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) device_check: NoCheck # TensorIterator - func: amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: amin - func: amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: amin_out - func: mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor dispatch: CompositeExplicitAutograd: mkldnn_convolution - func: miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor) dispatch: CUDA: miopen_batch_norm - func: miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor) dispatch: CUDA: miopen_batch_norm_backward - func: miopen_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor dispatch: CUDA: miopen_convolution - func: miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor dispatch: CUDA: miopen_convolution_transpose - func: miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor dispatch: CUDA: miopen_depthwise_convolution - func: miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) dispatch: CUDA: miopen_rnn - func: miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) dispatch: CUDA: miopen_rnn_backward - func: mm(Tensor self, Tensor mat2) -> Tensor structured_delegate: mm.out variants: function, method dispatch: SparseCPU, SparseCUDA: _sparse_mm SparseCsrCPU, SparseCsrCUDA: _sparse_csr_mm - func: mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) structured: True dispatch: CPU: mm_out_cpu CUDA: mm_out_cuda SparseCPU, SparseCUDA: _sparse_mm_out SparseCsrCPU, SparseCsrCUDA: _sparse_csr_mm_out - func: _sparse_mm(Tensor sparse, Tensor dense) -> Tensor - func: _sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor dispatch: SparseCPU: sparse_sparse_matmul_cpu SparseCUDA: sparse_sparse_matmul_cuda - func: _sparse_mask_helper(Tensor t, Tensor mask_indices) -> Tensor dispatch: SparseCPU: sparse_mask_helper_cpu SparseCUDA: sparse_mask_helper_cuda - func: mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method dispatch: CPU, CUDA: mode - func: mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) dispatch: CompositeExplicitAutograd: mode_out - func: mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method - func: mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - func: mul.Tensor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: mul.out variants: function, method dispatch: SparseCPU, SparseCUDA: mul_sparse MkldnnCPU: mkldnn_mul ZeroTensor: mul_zerotensor - func: mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: mul.out variants: method dispatch: SparseCPU, SparseCUDA: mul_sparse_ MkldnnCPU: mkldnn_mul_ - func: mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: mul_out SparseCPU: mul_out_sparse_cpu SparseCUDA: mul_out_sparse_cuda MkldnnCPU: mkldnn_mul_out # For C++ only, until we have conversion from C++ numbers to Tensor - func: mul.Scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: mul - func: mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: mul_ # multiply, alias for mul - func: multiply.Tensor(Tensor self, Tensor other) -> Tensor variants: function, method - func: multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: method - func: multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - func: multiply.Scalar(Tensor self, Scalar other) -> Tensor variants: function, method - func: multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) variants: method - func: mv(Tensor self, Tensor vec) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: mv SparseCPU, SparseCUDA: mv_sparse - func: mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) dispatch: CompositeExplicitAutograd: mv_out - func: mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: mvlgamma_out - func: mvlgamma(Tensor self, int p) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: mvlgamma - func: mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: mvlgamma_ - func: narrow_copy(Tensor self, int dim, int start, int length) -> Tensor variants: function, method dispatch: CPU: narrow_copy_dense_cpu SparseCPU, SparseCUDA: narrow_copy_sparse CompositeExplicitAutograd: narrow_copy_dense - func: narrow_copy.out(Tensor self, int dim, int start, int length, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: narrow_copy_dense_cpu_out - func: narrow(Tensor(a) self, int dim, int start, int length) -> Tensor(a) variants: function, method device_check: NoCheck device_guard: False - func: narrow.Tensor(Tensor(a) self, int dim, Tensor start, int length) -> Tensor(a) variants: function, method device_check: NoCheck device_guard: False - func: native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) dispatch: CPU: batch_norm_cpu CUDA: batch_norm_cuda MkldnnCPU: mkldnn_batch_norm - func: native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) dispatch: CUDA: batch_norm_cuda_out - func: batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor) dispatch: CUDA: batch_norm_stats_cuda - func: batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor dispatch: CUDA: batch_norm_elemt_cuda - func: batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!) dispatch: CUDA: batch_norm_elemt_cuda_out # for backward compatibility - func: batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor) dispatch: CUDA: batch_norm_gather_stats_cuda - func: batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor) dispatch: CUDA: batch_norm_gather_stats_with_counts_cuda - func: native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor) dispatch: CPU: batch_norm_backward_cpu CUDA: batch_norm_backward_cuda MkldnnCPU: mkldnn_batch_norm_backward - func: batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor) dispatch: CUDA: batch_norm_backward_reduce_cuda - func: batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor dispatch: CUDA: batch_norm_backward_elemt_cuda - func: batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor) dispatch: CPU: batch_norm_update_stats_cpu CUDA: batch_norm_update_stats_cuda - func: is_vulkan_available() -> bool - func: _nnpack_available() -> bool - func: _nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, int[2] padding, int[2] stride=1) -> Tensor variants: function dispatch: CompositeExplicitAutograd: _nnpack_spatial_convolution - func: ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor device_check: NoCheck device_guard: False - func: ones(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: ones.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) - func: ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - func: pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor - func: cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor - func: _euclidean_dist(Tensor x1, Tensor x2) -> Tensor dispatch: CompositeExplicitAutograd: _euclidean_dist - func: _cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor dispatch: CPU, CUDA: _cdist_forward - func: _cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor dispatch: CPU, CUDA: _cdist_backward - func: pdist(Tensor self, float p=2) -> Tensor - func: _pdist_forward(Tensor self, float p=2) -> Tensor dispatch: CPU, CUDA: _pdist_forward - func: _pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor dispatch: CPU, CUDA: _pdist_backward - func: cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor variants: function - func: permute(Tensor(a) self, int[] dims) -> Tensor(a) variants: function, method dispatch: CompositeExplicitAutograd: permute - func: movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) variants: function, method - func: movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a) variants: function, method # moveaxis, alias for movedim - func: moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) variants: function, method - func: moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a) variants: function, method # Only exposed from C++ -- in Python, # we expose it as an attribute `T`, not a function. # # I'd like to name this "T" in C++ too, but # calling a native function "T" causes undefined # behavior on Windows, for reasons I don't understand # (maybe related to capital letter collation somehow...) - func: numpy_T(Tensor(a) self) -> Tensor(a) variants: method # Exposed on Python as an attribute 'H' - func: matrix_H(Tensor(a) self) -> Tensor(a) variants: method # Exposed on Python as an attribute 'mT' - func: mT(Tensor(a) self) -> Tensor(a) variants: method # Exposed on Python as an attribute 'mH' - func: mH(Tensor(a) self) -> Tensor(a) variants: method - func: adjoint(Tensor(a) self) -> Tensor(a) variants: function, method - func: pixel_shuffle(Tensor self, int upscale_factor) -> Tensor - func: pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor - func: channel_shuffle(Tensor self, int groups) -> Tensor dispatch: CPU: channel_shuffle QuantizedCPU: channel_shuffle_quantized_cpu - func: native_channel_shuffle(Tensor self, int groups) -> Tensor dispatch: CPU: channel_shuffle_cpu CompositeImplicitAutograd: math_channel_shuffle - func: is_pinned(Tensor self, Device? device=None) -> bool variants: method dispatch: CUDA: is_pinned_cuda CompositeExplicitAutograd: is_pinned_default # TODO: add a copy kwarg that guarantees that the tensor is put into fresh # pinned memory - func: pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a) variants: method # Unlike pin_memory, this is guaranteed to give a new non-aliasing tensor - func: _pin_memory(Tensor self, Device? device=None) -> Tensor dispatch: CUDA: _pin_memory_cuda - func: pinverse(Tensor self, float rcond=1e-15) -> Tensor variants: function, method - func: poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor variants: function - func: rad2deg(Tensor self) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: rad2deg SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr - func: rad2deg_(Tensor(a!) self) -> Tensor(a!) variants: function, method dispatch: CompositeExplicitAutograd: rad2deg_ SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr_ - func: rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: CompositeExplicitAutograd: rad2deg_out SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr_out - func: deg2rad(Tensor self) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: deg2rad - func: deg2rad_(Tensor(a!) self) -> Tensor(a!) variants: function, method dispatch: CompositeExplicitAutograd: deg2rad_ - func: deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: CompositeExplicitAutograd: deg2rad_out - func: scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: rand.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor device_check: NoCheck device_guard: False - func: rand.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor device_check: NoCheck device_guard: False - func: rand(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: rand.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: rand.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) - func: rand.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) - func: rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - func: randint(int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: randint.generator(int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: randint.low(int low, int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: randint.low_generator(int low, int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: randint.out(int high, int[] size, *, Tensor(a!) out) -> Tensor(a!) - func: randint.generator_out(int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) - func: randint.low_out(int low, int high, int[] size, *, Tensor(a!) out) -> Tensor(a!) - func: randint.low_generator_out(int low, int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) - func: randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - func: randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - func: randn(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: randn.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: randn.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor device_check: NoCheck device_guard: False - func: randn.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor device_check: NoCheck device_guard: False - func: randn.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) - func: randn.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) - func: randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - func: randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!) - func: randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: randperm_out_cpu CUDA: randperm_out_cuda - func: range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, Meta: range_out CUDA: range_cuda_out - func: ravel(Tensor(a) self) -> Tensor(a) variants: function, method - func: reciprocal(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: reciprocal.out variants: function, method - func: reciprocal_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: reciprocal.out variants: function, method - func: reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: reciprocal_out - func: neg(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: neg.out variants: function, method dispatch: SparseCPU, SparseCUDA: neg_sparse SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr - func: neg_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: neg.out variants: function, method dispatch: SparseCPU, SparseCUDA: neg_sparse_ SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr_ - func: neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: neg_out SparseCPU, SparseCUDA: neg_out_sparse SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr_out # Alias for neg - func: negative(Tensor self) -> Tensor variants: function, method - func: negative_(Tensor(a!) self) -> Tensor(a!) variants: function, method - func: negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - func: repeat(Tensor self, int[] repeats) -> Tensor variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too. dispatch: CompositeExplicitAutograd: repeat - func: repeat_interleave.Tensor(Tensor repeats, *, int? output_size=None) -> Tensor variants: function dispatch: CPU: repeat_interleave_cpu CUDA: repeat_interleave_cuda - func: repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor variants: function, method - func: repeat_interleave.self_int(Tensor self, int repeats, int? dim=None, *, int? output_size=None) -> Tensor variants: function, method - func: reshape(Tensor(a) self, int[] shape) -> Tensor(a) variants: function, method device_check: NoCheck device_guard: False # NOTE [ _reshape_alias ] is meant to be used in the implementation of reshape. # They are not user-facing, hence the leading underscore. Please don't use it # anywhere else. - func: _reshape_alias(Tensor(a) self, int[] size, int[] stride) -> Tensor(a) variants: function, method device_check: NoCheck device_guard: False dispatch: CPU, CUDA, Meta, QuantizedCPU, QuantizedCUDA, ZeroTensor: _reshape_alias # We don't need to support mkldnn since this is handled explicitly by the reshape operator. - func: _mkldnn_reshape(Tensor self, int[] shape) -> Tensor device_check: NoCheck device_guard: False dispatch: MkldnnCPU: mkldnn_reshape - func: reshape_as(Tensor(a) self, Tensor other) -> Tensor(a) variants: method device_check: NoCheck device_guard: False - func: round(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: round.out variants: function, method dispatch: SparseCPU, SparseCUDA: round_sparse SparseCsrCPU, SparseCsrCUDA: round_sparse_csr - func: round_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: round.out variants: function, method dispatch: SparseCPU, SparseCUDA: round_sparse_ SparseCsrCPU, SparseCsrCUDA: round_sparse_csr_ - func: round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU: round_out CUDA: round_out SparseCPU, SparseCUDA: round_sparse_out SparseCsrCPU, SparseCsrCUDA: round_sparse_csr_out - func: round.decimals(Tensor self, *, int decimals) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: round.decimals_out variants: function, method - func: round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: round.decimals_out variants: function, method - func: round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU: round_decimals_out CUDA: round_decimals_out - func: rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor device_check: NoCheck # TensorIterator - func: rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: relu(Tensor self) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: relu MkldnnCPU: mkldnn_relu QuantizedCPU: relu_quantized_cpu - func: relu_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: relu_ MkldnnCPU: mkldnn_relu_ QuantizedCPU: relu_quantized_cpu_ - func: relu6(Tensor self) -> Tensor python_module: nn - func: relu6_(Tensor(a!) self) -> Tensor(a!) python_module: nn - func: prelu(Tensor self, Tensor weight) -> Tensor variants: function, method dispatch: CPU: prelu_cpu CUDA: prelu_cuda - func: prelu_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor) variants: function, method dispatch: CPU: prelu_backward_cpu CUDA: prelu_backward_cuda - func: gelu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU: gelu_out_cpu CUDA: gelu_out_cuda - func: gelu(Tensor self) -> Tensor structured_delegate: gelu.out device_check: NoCheck # TensorIterator python_module: nn dispatch: MkldnnCPU: mkldnn_gelu QuantizedCPU: gelu_quantized_cpu - func: gelu_backward.grad_input(Tensor grad, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase python_module: nn dispatch: CPU: gelu_backward_out_cpu CUDA: gelu_backward_out_cuda - func: gelu_backward(Tensor grad, Tensor self) -> Tensor structured_delegate: gelu_backward.grad_input python_module: nn dispatch: MkldnnCPU: mkldnn_gelu_backward - func: infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor variants: function python_module: nn device_check: NoCheck device_guard: False - func: hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: hardshrink_out - func: hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor structured_delegate: hardshrink.out device_check: NoCheck # TensorIterator variants: function, method - func: hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: hardshrink_backward_out - func: hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor structured_delegate: hardshrink_backward.grad_input variants: function, method - func: rsqrt(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: rsqrt.out variants: function, method - func: rsqrt_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: rsqrt.out variants: function, method - func: rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: rsqrt_out - func: select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a) variants: function, method device_check: NoCheck device_guard: False - func: select.int(Tensor(a) self, int dim, int index) -> Tensor(a) variants: function, method device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: select - func: select_backward(Tensor grad_output, int[] input_sizes, int dim, int index) -> Tensor variants: function device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: select_backward - func: selu(Tensor self) -> Tensor device_check: NoCheck # TensorIterator - func: selu_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: celu(Tensor self, Scalar alpha=1.0) -> Tensor device_check: NoCheck # TensorIterator dispatch: CompositeExplicitAutograd: celu - func: celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!) device_check: NoCheck # TensorIterator dispatch: CompositeExplicitAutograd: celu_ - func: silu(Tensor self) -> Tensor structured_delegate: silu.out python_module: nn dispatch: CompositeExplicitAutograd: silu - func: silu_(Tensor(a!) self) -> Tensor(a!) structured_delegate: silu.out python_module: nn dispatch: CompositeExplicitAutograd: silu_ - func: silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase python_module: nn dispatch: CPU, CUDA: silu_out - func: silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase python_module: nn dispatch: CPU, CUDA: silu_backward_out - func: silu_backward(Tensor grad_output, Tensor self) -> Tensor structured_delegate: silu_backward.grad_input python_module: nn dispatch: CompositeImplicitAutograd: math_silu_backward - func: mish(Tensor self) -> Tensor structured_delegate: mish.out python_module: nn dispatch: CompositeExplicitAutograd: mish - func: mish_(Tensor(a!) self) -> Tensor(a!) structured_delegate: mish.out python_module: nn dispatch: CompositeExplicitAutograd: mish_ - func: mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase python_module: nn dispatch: CPU, CUDA: mish_out - func: mish_backward(Tensor grad_output, Tensor self) -> Tensor python_module: nn dispatch: CPU, CUDA: mish_backward CompositeImplicitAutograd: math_mish_backward - func: sigmoid(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: sigmoid.out variants: function, method dispatch: QuantizedCPU: sigmoid_quantized_cpu MkldnnCPU: mkldnn_sigmoid - func: sigmoid_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: sigmoid.out variants: function, method dispatch: MkldnnCPU: mkldnn_sigmoid_ - func: sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: sigmoid_out - func: logit(Tensor self, float? eps=None) -> Tensor variants: function, method dispatch: CPU, CUDA: logit - func: logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!) variants: function, method dispatch: CPU, CUDA: logit_ - func: logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: logit_out - func: sin(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: sin.out variants: function, method dispatch: SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr SparseCPU, SparseCUDA: sin_sparse - func: sin_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: sin.out variants: function, method dispatch: SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr_ SparseCPU, SparseCUDA: sin_sparse_ - func: sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: sin_out SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr_out SparseCPU, SparseCUDA: sin_sparse_out - func: sinc(Tensor self) -> Tensor structured_delegate: sinc.out variants: function, method - func: sinc_(Tensor(a!) self) -> Tensor(a!) structured_delegate: sinc.out variants: function, method - func: sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: sinc_out - func: sinh(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: sinh.out variants: function, method dispatch: SparseCPU, SparseCUDA: sinh_sparse SparseCsrCPU, SparseCsrCUDA: sinh_sparse_csr - func: sinh_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: sinh.out variants: function, method dispatch: SparseCPU, SparseCUDA: sinh_sparse_ SparseCsrCPU, SparseCsrCUDA: sinh_sparse_csr_ - func: sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: sinh_out SparseCPU, SparseCUDA: sinh_sparse_out SparseCsrCPU, SparseCsrCUDA: sinh_sparse_csr_out # Returns a copy of this `Variable` that is detached from its autograd graph. # This method is OK to call if the `Variable` is a view. # # NOTE: Previously, if we change the tensor metadata (e.g. sizes / strides / # storage / storage_offset) of a tensor created from `detach()`, those metadata # in the original tensor will also be updated. However, the new behavior is that # those metadata changes to the detached tensor will not update the original tensor # anymore, and in the `detach()` function we need to set `allow_tensor_metadata_change_` # to false to make such changes explicitly illegal, in order to prevent users from # changing metadata of the detached tensor and expecting the original tensor to also # be updated. - func: detach(Tensor(a) self) -> Tensor(a) variants: function, method dispatch: CompositeExplicitAutograd: detach # Like `detach()`, but modifies this `Variable` in-place. This method may # only be called on non-view `Variable`s. You can use `is_view()` to check # this. If this `Variable` is a view, throws an `std::runtime_error()`. - func: detach_(Tensor(a!) self) -> Tensor(a!) variants: function, method tags: inplace_view dispatch: CompositeExplicitAutograd: detach_ - func: size.int(Tensor self, int dim) -> int variants: function device_check: NoCheck device_guard: False manual_cpp_binding: True - func: size.Dimname(Tensor self, Dimname dim) -> int variants: function, method device_check: NoCheck device_guard: False - func: slice.Tensor(Tensor(a) self, int dim=0, int? start=None, int? end=None, int step=1) -> Tensor(a) variants: function, method device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: slice - func: slice_backward(Tensor grad_output, int[] input_sizes, int dim, int start, int end, int step) -> Tensor variants: function device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: slice_backward - func: slice_scatter(Tensor self, Tensor src, int dim=0, int? start=None, int? end=None, int step=1) -> Tensor variants: function, method device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: slice_scatter - func: select_scatter(Tensor self, Tensor src, int dim, int index) -> Tensor variants: function, method device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: select_scatter - func: diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor variants: function, method device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: diagonal_scatter - func: slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) variants: function, method dispatch: CompositeExplicitAutograd: slogdet - func: smm(Tensor self, Tensor mat2) -> Tensor variants: function, method # softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models. - func: softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor variants: function, method - func: softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor variants: function, method - func: _softmax(Tensor self, int dim, bool half_to_float) -> Tensor structured_delegate: _softmax.out dispatch: MkldnnCPU: mkldnn_softmax - func: _softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) structured: True dispatch: CPU: softmax_cpu_out CUDA: softmax_cuda_out - func: _softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor structured_delegate: _softmax_backward_data.out - func: _softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!) structured: True dispatch: CPU: softmax_backward_cpu_out CUDA: softmax_backward_cuda_out - func: unsafe_split.Tensor(Tensor self, int split_size, int dim=0) -> Tensor[] variants: function, method device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: unsafe_split - func: split.Tensor(Tensor(a -> *) self, int split_size, int dim=0) -> Tensor(a)[] variants: function, method device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: split - func: unsafe_split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> Tensor[] variants: function, method device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: unsafe_split_with_sizes - func: split_with_sizes(Tensor(a -> *) self, int[] split_sizes, int dim=0) -> Tensor(a)[] variants: function, method device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: split_with_sizes - func: hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] variants: function, method - func: hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] variants: function, method - func: vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] variants: function, method - func: vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] variants: function, method - func: dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] variants: function, method - func: dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] variants: function, method - func: squeeze(Tensor(a) self) -> Tensor(a) variants: function, method device_check: NoCheck device_guard: False dispatch: CPU, CUDA: squeeze QuantizedCPU, QuantizedCUDA: squeeze_quantized - func: squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) variants: function, method device_check: NoCheck device_guard: False dispatch: CPU, CUDA: squeeze QuantizedCPU, QuantizedCUDA: squeeze_quantized - func: squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a) variants: function, method device_check: NoCheck device_guard: False - func: squeeze_(Tensor(a!) self) -> Tensor(a!) variants: method device_check: NoCheck device_guard: False tags: inplace_view dispatch: CompositeExplicitAutograd: squeeze_ - func: squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!) variants: method device_check: NoCheck device_guard: False tags: inplace_view dispatch: CompositeExplicitAutograd: squeeze_ - func: squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!) variants: method device_check: NoCheck device_guard: False tags: inplace_view - func: sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor variants: function, method - func: sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: _sspaddmm_out_only_sparse CUDA: _sspaddmm_out_only_sparse_cuda SparseCPU: _sspaddmm_out_cpu SparseCUDA: _sspaddmm_out_cuda - func: stack(Tensor[] tensors, int dim=0) -> Tensor dispatch: CompositeExplicitAutograd: stack - func: stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) dispatch: CompositeExplicitAutograd: stack_out - func: _stack(Tensor[] tensors, int dim=0) -> Tensor dispatch: # match the backends supported by _cat CPU: _stack_cpu CompositeExplicitAutograd: _stack - func: _stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) dispatch: # match the backends supported by _cat_out CPU: _stack_out_cpu CompositeExplicitAutograd: _stack_out - func: hstack(Tensor[] tensors) -> Tensor - func: hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) - func: vstack(Tensor[] tensors) -> Tensor - func: vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) - func: dstack(Tensor[] tensors) -> Tensor - func: dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) # The signature is designed to be consistent with librosa except that it is # missing the `pad_mode` and `center` arguments, which are taken care of at # `torch.functional.py`. They shall be moved here once we have mapping between # Python strings and C++ Enum in codegen. - func: stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor variants: function, method - func: istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor variants: function, method - func: stride.int(Tensor self, int dim) -> int variants: function device_check: NoCheck device_guard: False manual_cpp_binding: True - func: stride.Dimname(Tensor self, Dimname dim) -> int variants: function, method device_check: NoCheck device_guard: False - func: sum(Tensor self, *, ScalarType? dtype=None) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: sum - func: sum.dim_IntList(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor structured_delegate: sum.IntList_out device_check: NoCheck # TensorIterator variants: function, method - func: sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: sum.IntList_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) structured: True device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: sum_out - func: sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: nansum(Tensor self, *, ScalarType? dtype=None) -> Tensor variants: function, method dispatch: CPU, CUDA: nansum - func: nansum.dim_IntList(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor variants: function, method dispatch: CPU, CUDA: nansum - func: nansum.IntList_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: nansum_out - func: sum_to_size(Tensor self, int[] size) -> Tensor variants: method device_check: NoCheck device_guard: False - func: sqrt(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: sqrt.out variants: function, method dispatch: SparseCPU, SparseCUDA: sqrt_sparse SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr - func: sqrt_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: sqrt.out variants: function, method dispatch: SparseCPU, SparseCUDA: sqrt_sparse_ SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr_ - func: sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: sqrt_out SparseCPU, SparseCUDA: sqrt_sparse_out SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr_out - func: square(Tensor self) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: square_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function, method - func: square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: square_out - func: std(Tensor self, bool unbiased=True) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: std.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: std.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: std - func: std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) device_check: NoCheck # TensorIterator variants: function - func: std_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) device_check: NoCheck # TensorIterator variants: function - func: std_mean.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor) device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: std_mean - func: std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) device_check: NoCheck # TensorIterator variants: function - func: std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor) device_check: NoCheck # TensorIterator variants: function - func: std.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: std.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: std_out - func: std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: std.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function - func: prod(Tensor self, *, ScalarType? dtype=None) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: prod - func: prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor structured_delegate: prod.int_out device_check: NoCheck # TensorIterator variants: function, method - func: prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) structured: True device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: prod_out - func: prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: t(Tensor(a) self) -> Tensor(a) device_check: NoCheck device_guard: False variants: function, method dispatch: CompositeExplicitAutograd: t - func: t_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck device_guard: False variants: method tags: inplace_view dispatch: CompositeExplicitAutograd: t_ - func: tan(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: tan.out variants: function, method dispatch: SparseCPU, SparseCUDA: tan_sparse SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr - func: tan_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: tan.out variants: function, method dispatch: SparseCPU, SparseCUDA: tan_sparse_ SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr_ - func: tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: tan_out SparseCPU, SparseCUDA: tan_sparse_out SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr_out - func: tanh(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: tanh.out variants: function, method dispatch: QuantizedCPU: tanh_quantized_cpu MkldnnCPU: mkldnn_tanh SparseCPU, SparseCUDA: tanh_sparse SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr - func: tanh_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: tanh.out variants: function, method dispatch: MkldnnCPU: mkldnn_tanh_ SparseCPU, SparseCUDA: tanh_sparse_ SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr_ - func: tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: tanh_out SparseCPU, SparseCUDA: tanh_sparse_out SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr_out - func: tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor variants: function - func: tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!) variants: function dispatch: CPU, CUDA: tensordot_out # TODO: namespace threshold in 'nn' - func: threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor device_check: NoCheck # TensorIterator variants: function structured_delegate: threshold.out dispatch: QuantizedCPU: threshold_quantized_cpu - func: threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function structured_delegate: threshold.out - func: threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: threshold_out - func: threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: threshold_backward_out - func: threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor variants: function structured_delegate: threshold_backward.grad_input dispatch: MkldnnCPU: mkldnn_relu_backward - func: tile(Tensor self, int[] dims) -> Tensor variants: function, method - func: transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) variants: function, method device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: transpose - func: transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a) variants: function, method device_check: NoCheck device_guard: False - func: _mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor device_check: NoCheck device_guard: False dispatch: MkldnnCPU: mkldnn_transpose - func: transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) variants: method device_check: NoCheck device_guard: False tags: inplace_view dispatch: CompositeExplicitAutograd: transpose_ - func: _mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) device_check: NoCheck device_guard: False dispatch: MkldnnCPU: mkldnn_transpose_ - func: one_hot(Tensor self, int num_classes=-1) -> Tensor python_module: nn variants: function - func: flip(Tensor self, int[] dims) -> Tensor variants: function, method dispatch: CPU, QuantizedCPU, CUDA, QuantizedCUDA: flip - func: fliplr(Tensor self) -> Tensor variants: function, method - func: flipud(Tensor self) -> Tensor variants: function, method - func: roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor variants: function, method dispatch: CPU: roll_cpu CUDA: roll_cuda # default int[] value [0,1] should not add space after comma, since codegen parser uses ', ' to split args - func: rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: rot90 - func: trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor - func: trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor - func: trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor - func: trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor - func: _trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor dispatch: CompositeExplicitAutograd: _trilinear - func: triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor - func: trunc(Tensor self) -> Tensor structured_delegate: trunc.out device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: trunc SparseCPU, SparseCUDA: trunc_sparse SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr - func: trunc_(Tensor(a!) self) -> Tensor(a!) structured_delegate: trunc.out device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: trunc_ SparseCPU, SparseCUDA: trunc_sparse_ SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr_ - func: trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: trunc_out SparseCPU, SparseCUDA: trunc_sparse_out SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr_out # Alias for trunc - func: fix(Tensor self) -> Tensor variants: function, method - func: fix_(Tensor(a!) self) -> Tensor(a!) variants: function, method - func: fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - func: type_as(Tensor self, Tensor other) -> Tensor variants: method - func: _has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool variants: function - func: _unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor) variants: function dispatch: CPU: _unique_cpu CUDA: _unique_cuda - func: unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) variants: function dispatch: CPU: unique_dim_cpu CUDA: unique_dim_cuda - func: unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor) variants: function dispatch: CPU: unique_consecutive_cpu CUDA: unique_consecutive_cuda - func: unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) variants: function dispatch: CPU: unique_dim_consecutive_cpu CUDA: unique_dim_consecutive_cuda # _unique and _unique_dim are fragile and modifying them easily cause internal break # the below operator is a temporary hack for adding return_counts support # Please don't rely on these two operators, they will be removed soon - func: _unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) variants: function dispatch: CPU: _unique2_cpu CUDA: _unique2_cuda - func: _unsafe_view(Tensor self, int[] size) -> Tensor dispatch: CompositeExplicitAutograd: _unsafe_view - func: unsqueeze(Tensor(a) self, int dim) -> Tensor(a) variants: function, method device_check: NoCheck device_guard: False dispatch: CPU, CUDA: unsqueeze SparseCPU, SparseCUDA: unsqueeze_sparse QuantizedCPU, QuantizedCUDA: unsqueeze_quantized - func: unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!) variants: method device_check: NoCheck device_guard: False tags: inplace_view dispatch: CompositeExplicitAutograd: unsqueeze_ - func: vander(Tensor x, int? N=None, bool increasing=False) -> Tensor - func: var(Tensor self, bool unbiased=True) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: var.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: var.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CPU, CUDA: var - func: var.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: var.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: var_out - func: var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: var.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function - func: var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) device_check: NoCheck # TensorIterator variants: function - func: var_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) device_check: NoCheck # TensorIterator variants: function - func: var_mean.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor) device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: var_mean - func: var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) device_check: NoCheck # TensorIterator variants: function - func: var_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor) device_check: NoCheck # TensorIterator variants: function - func: view_as(Tensor(a) self, Tensor other) -> Tensor(a) variants: method device_check: NoCheck device_guard: False # we define both of these because 'where' does the broadcast and '_s_where' doesn't; # this allows us to implicitly calculate the broadcast derivative, while only dealing with the # _s_where derivative. - func: where.self(Tensor condition, Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor variants: function - func: where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor variants: function - func: where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor variants: function - func: where(Tensor condition) -> Tensor[] device_check: NoCheck # TensorIterator variants: function - func: _s_where(Tensor condition, Tensor self, Tensor other) -> Tensor variants: function dispatch: CPU, CUDA: _s_where - func: norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor variants: function # VariableType::_weight_norm does not want to be given a gap in the autograd graph, # so we don't define "dispatch" variants for it. - func: _weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor variants: function - func: _weight_norm_cuda_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor) variants: function dispatch: CUDA: weight_norm_cuda - func: _weight_norm_cuda_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) variants: function dispatch: CUDA: weight_norm_cuda_backward - func: _weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) variants: function - func: zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor device_check: NoCheck device_guard: False - func: _efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor dispatch: CPU: _efficientzerotensor CUDA: _efficientzerotensor_cuda - func: zeros(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: zeros.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) - func: zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - func: _standard_gamma_grad(Tensor self, Tensor output) -> Tensor variants: function dispatch: CPU: _standard_gamma_grad_cpu CUDA: _standard_gamma_grad_cuda - func: _standard_gamma(Tensor self, Generator? generator=None) -> Tensor variants: function dispatch: CPU: _s_gamma_cpu CUDA: _s_gamma_cuda - func: _dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor dispatch: CPU: _dirichlet_grad_cpu CUDA: _dirichlet_grad_cuda - func: _sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor variants: function dispatch: CPU: _s_dirichlet_cpu CUDA: _s_dirichlet_cuda - func: poisson(Tensor self, Generator? generator=None) -> Tensor device_check: NoCheck # TensorIterator dispatch: CPU: _s_poisson_cpu CUDA: _s_poisson_cuda - func: binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor device_check: NoCheck # TensorIterator dispatch: CPU: _s_binomial_cpu CUDA: _s_binomial_cuda # When more variants get ported to native, this dispatch will get more # complicated - func: native_norm(Tensor self, Scalar p=2) -> Tensor dispatch: SparseCPU, SparseCUDA: norm_sparse - func: native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor dispatch: SparseCPU, SparseCUDA: norm_sparse # TODO: reduce signatures down to one when optional args is available - func: _sparse_sum(Tensor self) -> Tensor - func: _sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor - func: _sparse_sum.dim(Tensor self, int[1] dim) -> Tensor dispatch: CompositeExplicitAutograd: _sparse_sum - func: _sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor - func: _sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor dispatch: SparseCPU: _sparse_sum_backward_cpu SparseCUDA: _sparse_sum_backward_cuda - func: _sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor python_module: sparse variants: function - func: _sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor python_module: sparse variants: function - func: _sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor python_module: sparse dispatch: SparseCPU: softmax_sparse_cpu SparseCUDA: softmax_sparse_cuda - func: _sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor dispatch: SparseCPU: softmax_backward_sparse_cpu SparseCUDA: softmax_backward_sparse_cuda - func: _sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor python_module: sparse variants: function - func: _sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor python_module: sparse variants: function - func: _sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor python_module: sparse dispatch: SparseCPU: log_softmax_sparse_cpu SparseCUDA: log_softmax_sparse_cuda - func: _sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor dispatch: SparseCPU: log_softmax_backward_sparse_cpu SparseCUDA: log_softmax_backward_sparse_cuda - func: norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: norm - func: norm.Scalar(Tensor self, Scalar p=2) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: norm - func: norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor structured_delegate: norm.dtype_out device_check: NoCheck # TensorIterator variants: function, method dispatch: SparseCPU, SparseCUDA: sparse_dtype_norm - func: norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor structured_delegate: norm.out device_check: NoCheck # TensorIterator variants: function, method dispatch: SparseCPU, SparseCUDA: sparse_norm - func: norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) structured: True device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: norm_dtype_out - func: norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) structured: True device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: norm_out # These four redispatch in their implementation, so OK to be CompositeImplicitAutograd - func: norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent) variants: method, function dispatch: CompositeExplicitAutograd: frexp - func: frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent) dispatch: CPU, CUDA: frexp_out - func: frobenius_norm(Tensor self) -> Tensor variants: function - func: frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor variants: function - func: frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) variants: function - func: nuclear_norm(Tensor self, bool keepdim=False) -> Tensor variants: function - func: nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) variants: function - func: nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor variants: function - func: nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) variants: function - func: clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: clone SparseCPU, SparseCUDA: clone_sparse SparseCsrCPU, SparseCsrCUDA: clone_sparse_csr MkldnnCPU: mkldnn_clone QuantizedCPU, QuantizedCUDA: quantized_clone - func: positive(Tensor(a) self) -> Tensor(a) variants: function, method - func: resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!) use_const_ref_for_mutable_tensors: True variants: function, method dispatch: CompositeExplicitAutograd: resize_as_ - func: resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!) use_const_ref_for_mutable_tensors: True variants: function dispatch: SparseCPU, SparseCUDA: resize_as_sparse_ SparseCsrCPU, SparseCsrCUDA: resize_as_sparse_csr_ - func: zero_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: zero_ Meta: zero_meta_ SparseCPU, SparseCUDA: zero_sparse_ MkldnnCPU: mkldnn_zero_ - func: sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: sub_out SparseCPU, SparseCUDA: sub_out_sparse - func: sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor device_check: NoCheck # TensorIterator variants: function, method structured_delegate: sub.out dispatch: SparseCPU, SparseCUDA: sub_sparse - func: sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method structured_delegate: sub.out dispatch: SparseCPU, SparseCUDA: sub_sparse_ # For C++ only, until we have conversion from C++ numbers to Tensor - func: sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: sub - func: sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: sub_ # subtract, alias for sub - func: subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) - func: subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor variants: function, method - func: subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) variants: method # For C++ only, until we have conversion from C++ numbers to Tensor - func: subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor variants: function, method - func: subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) variants: method - func: rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: rsub - func: heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: heaviside_out - func: heaviside(Tensor self, Tensor values) -> Tensor device_check: NoCheck # TensorIterator variants: function, method structured_delegate: heaviside.out - func: heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method structured_delegate: heaviside.out # For C++ only, until we have conversion from C++ numbers to Tensor - func: rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor device_check: NoCheck # TensorIterator variants: function dispatch: CompositeExplicitAutograd: rsub # Functionally the same as addmm, but we give it a different derivative formula # that doesn't propagate gradients to non-present entries on sparse. - func: _sparse_addmm(Tensor self, Tensor sparse, Tensor dense, *, Scalar beta=1, Scalar alpha=1) -> Tensor python_module: sparse dispatch: CompositeExplicitAutograd: _sparse_addmm - func: sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) python_module: sparse dispatch: SparseCsrCUDA: sparse_sampled_addmm_out_sparse_csr_cuda - func: sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor python_module: sparse dispatch: SparseCsrCUDA: sparse_sampled_addmm_sparse_csr_cuda - func: addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) structured: True dispatch: CPU: addmm_out_cpu CUDA: addmm_out_cuda SparseCPU: addmm_out_sparse_dense_cpu SparseCUDA: addmm_out_sparse_dense_cuda SparseCsrCPU: addmm_out_sparse_csr_cpu SparseCsrCUDA: addmm_out_sparse_csr_cuda - func: addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor structured_delegate: addmm.out variants: function, method dispatch: SparseCPU: addmm_sparse_dense_cpu SparseCUDA: addmm_sparse_dense_cuda SparseCsrCPU, SparseCsrCUDA: addmm_sparse_csr_dense - func: addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) structured_delegate: addmm.out variants: method dispatch: # Warning! For whatever reason, the inplace sparse addmm is NON # broadcasting SparseCPU: s_addmm_sparse_dense_cpu_ SparseCUDA: s_addmm_sparse_dense_cuda_ # NOTE [ Sparse: autograd and API ] # # # Sparse Tensor Constructors # ~~~~~~~~~~~~~~~~~~~~~~~~~~ # # The API entry points to sparse tensor construction should be # `sparse_coo tensor` and `_sparse_coo_tensor_unsafe`. Depending on whether the # indices and values tensors are given, they eventually dispatch to either # `sparse_coo_tensor_with_dims` or `sparse_coo_tensor_with_dims_and_tensors`. # # The autograd support for ctor is implement on `sparse_coo_tensor_with_dims_and_tensors`. # # The API methods `sparse_coo tensor` and `_sparse_coo_tensor_unsafe` # **must not** have specific type dispatches because otherwise codegen will # consider them as abstract methods (see Note [Abstract ATen methods]), dispatch # using **Tensor** type, and thus lose autograd tracking on the actual method # they dispatch to, e.g., `sparse_coo_tensor_with_dims_and_tensors`. # # # Sparse Methods API Design # ~~~~~~~~~~~~~~~~~~~~~~~~~ # # Goals: 1. Flexible API for users to write custom sparse ops # 2. ctor and member accessor with autograd support # # To achieve 1, we need to provide a set of *dangerous* APIs (dangerous in the # sense that misusing them will break sparse tensor invariant and may out in # unexpected behavior, e.g., crash). These methods are all prefixed with # underscore "_" to indicate that they should be used with care. We provide: # # + `_indices()`: returns the *raw* indices within the sparse tensor (not just # sharing storage). Any inplace operation will change the # actual indices, including t_, set_, as_strided_, resize_, # etc. # + `_values()`: returns the *raw* values within the sparse tensor. Similar # semantics as `_indices()` # + `_nnz()`: returns the number of non-zero entries. This will always be # determined by the shapes of indices and values. # + `_coalesced_(bool)`: inplace sets whether the tensor is coalesced, and # returns itself. # # These methods are very useful in writing new operations, e.g., a custom # autograd Function. # # We also provide other public *safe* APIs: # + `indices()`: returns a **view** of the indices tensor if the sparse tensor # is **coalesced**. # + `values()`: returns a **view** of the values tensor if the containing # sparse tensor is **coalesced**. # + `sparse_dim()`: number of sparse dimensions # + `dense_dim()`: number of dense dimensions # + `is_coalesced()`: whether the sparse tensor is coalesced # # `_indices()` and `_values()` should returns the raw indices and values dense # tensors within a sparse tensor. They can be quite unsafe with inplace # operations like `t_()`, and exposes uncoalesced indices and values. The public # recommended API is `indices()` and `values()`, both of which first check that # the tensor is coalesced and return views on those tensors. # # # Autograd Support # ~~~~~~~~~~~~~~~~ # # Autograd is supported on `values()` and sparse tensor ctor with indices and # values tensors. E.g., `torch.sparse_coo_tensor(i, v).values().sum()` is # differentiable w.r.t. `v`. # # NB: The `values()` and `_values()` operators are special in that they are # layout-aware, i.e., the output depends not just on the data it represents, but # also on the input layout details (in this case, the `indices` tensor). See # NOTE [ as_strided Backward and layout-aware/agnostic autograd ] in Functions.cpp # for discussion on layout-aware vs layout-agnostic autograd. Since PyTorch ops # operate in the layout-agnostic mode, similar to `as_strided`, backward of # these two operators need to consider them in a layout-agnostic way: # + `values()`: # Input is coalesced. # We just pretend having `input.indices()` as an additional argument # `input_indices`, then forward is similar to # `input.to(kStrided).index_select(input_indices)` regardless of the layout. # Note that `values()` normally is layout-aware even if we constrain # ourselves on sparse inputs since it may include all zeros values entries # as "present" entries. # + `_values()`: # Input may be uncoalesced. # It is not straightforward to construct a layout-agnostic version because # duplicate indices entries may exist and additional parameterization is # needed to distribute the value into different values entries. Furthermore, # this op is intended to provide ways to write custom sparse ops, rather # than being used in autograd graph, so it is marked as *non-differentiable* # in derivatives.yaml. # # Before reading the following, see NOTE [ Autograd Variable Views ] in # variable.h for details on views that are tracked by autograd, and views that # are not. # # Moreover, these methods return tensors that share storage with inputs, so we # mark these methods as view ops to support autograd history tracking. # The sparse tensor ctor output should technically be view of both input indices # and values tensors, but currently we only support setting as view of a single # Variable, so it is only view of the values tensor. # TODO: clone indices in sparse tensor ctor. # # For other methods that return outputs that share storage with inputs, i.e., # `indices()` and `_indices()`. We mark their outputs as non-differentiable, so # the view relation is not tracked by autograd, but the version counter is still # shared. In other words, their outputs are non-differentiable views of the # sparse tensor. # FIXME: would be nicer if TensorOptions was optional based; not adding default arguments for options given # the default would never make sense. - func: sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor - func: sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor - func: _sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor - func: sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: _sparse_coo_tensor_unsafe(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: _validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size) -> () - func: _validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> () - func: _sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor dispatch: SparseCPU, SparseCUDA: new_with_dims_sparse - func: _sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, int[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor dispatch: SparseCPU, SparseCUDA: new_with_dims_and_tensor_sparse - func: sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) use_const_ref_for_mutable_tensors: True variants: method dispatch: SparseCPU, SparseCUDA: sparse_resize_ - func: sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) use_const_ref_for_mutable_tensors: True variants: method dispatch: SparseCPU, SparseCUDA: sparse_resize_and_clear_ - func: sparse_mask(Tensor self, Tensor mask) -> Tensor variants: method dispatch: SparseCPU: sparse_mask_cpu SparseCUDA: sparse_mask_cuda - func: _to_cpu(Tensor[] tensors) -> Tensor[] variants: function - func: to_dense(Tensor self, ScalarType? dtype=None) -> Tensor variants: method dispatch: SparseCPU, SparseCUDA, SparseCsrCPU, SparseCsrCUDA: sparse_to_dense MkldnnCPU: mkldnn_to_dense - func: to_dense_backward(Tensor grad, Tensor input) -> Tensor - func: sparse_dim(Tensor self) -> int variants: method dispatch: SparseCPU, SparseCUDA: sparse_dim_sparse device_check: NoCheck device_guard: False # legacy method - func: _dimI(Tensor self) -> int variants: method dispatch: SparseCPU, SparseCUDA: sparse_dim_sparse device_check: NoCheck device_guard: False - func: dense_dim(Tensor self) -> int variants: method dispatch: SparseCPU, SparseCUDA: dense_dim_sparse device_check: NoCheck device_guard: False # legacy method - func: _dimV(Tensor self) -> int variants: method dispatch: SparseCPU, SparseCUDA: dense_dim_sparse device_check: NoCheck device_guard: False - func: _nnz(Tensor self) -> int variants: method dispatch: SparseCPU, SparseCUDA: _nnz_sparse SparseCsrCPU, SparseCsrCUDA: _nnz_sparse_csr device_check: NoCheck device_guard: False # NOTE: [ coalesce autograd ] # coalesce returns self directly for already coalesced sparse tensors. # This means coalesce cannot have a derivative registered, otherwise it creates # circular references in the autograd graph (see gh-52874). # Instead, the derivative is registered on the slow-path "_coalesce" - func: coalesce(Tensor(a) self) -> Tensor(a) variants: method - func: _coalesce(Tensor self) -> Tensor dispatch: SparseCPU: _coalesce_sparse_cpu SparseCUDA: _coalesce_sparse_cuda - func: is_coalesced(Tensor self) -> bool variants: method dispatch: SparseCPU, SparseCUDA: is_coalesced_sparse device_check: NoCheck device_guard: False - func: _indices(Tensor(a) self) -> Tensor(a) variants: method dispatch: SparseCPU, SparseCUDA: _indices_sparse device_check: NoCheck device_guard: False - func: _values(Tensor(a) self) -> Tensor(a) variants: method dispatch: SparseCPU, SparseCUDA: _values_sparse device_check: NoCheck device_guard: False # This method doesn't do any check but only directly sets the flag. So it can be # a bit unsafe. Similar to _indices and _values, this is useful for implementing # custom sparse operations in Python/C++ extension. - func: _coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!) variants: method dispatch: SparseCPU, SparseCUDA: _coalesced_sparse_ device_check: NoCheck device_guard: False - func: indices(Tensor(a) self) -> Tensor(a) variants: method dispatch: SparseCPU, SparseCUDA: indices_sparse device_check: NoCheck device_guard: False - func: values(Tensor(a) self) -> Tensor(a) variants: method dispatch: SparseCPU, SparseCUDA: values_sparse SparseCsrCPU, SparseCsrCUDA: values_sparse_csr device_check: NoCheck device_guard: False - func: crow_indices(Tensor(a) self) -> Tensor(a) variants: method dispatch: SparseCsrCPU, SparseCsrCUDA: crow_indices_sparse_csr device_check: NoCheck device_guard: False - func: col_indices(Tensor(a) self) -> Tensor(a) variants: method dispatch: SparseCsrCPU, SparseCsrCUDA: col_indices_sparse_csr device_check: NoCheck device_guard: False - func: hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) dispatch: SparseCPU: hspmm_out_sparse_cpu SparseCUDA: hspmm_out_sparse_cuda - func: hspmm(Tensor mat1, Tensor mat2) -> Tensor dispatch: SparseCPU: hspmm_sparse_cpu SparseCUDA: hspmm_sparse_cuda - func: copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) device_check: NoCheck # Allows copy into different device variants: function dispatch: SparseCPU, SparseCUDA: copy_sparse_ - func: unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[] variants: function, method dispatch: CompositeExplicitAutograd: unbind - func: unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[] variants: function, method - func: to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor variants: method dispatch: CPU, CUDA: dense_to_sparse - func: to_sparse(Tensor self) -> Tensor variants: method dispatch: CPU, CUDA: dense_to_sparse - func: to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor variants: method dispatch: CPU: dense_to_mkldnn - func: mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1) -> Tensor variants: function python_module: nn dispatch: MkldnnCPU: mkldnn_reorder_conv2d_weight - func: mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor variants: function python_module: nn dispatch: MkldnnCPU: mkldnn_reorder_conv3d_weight - func: to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor - func: quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor variants: function dispatch: CPU, CUDA: quantize_per_tensor_dynamic - func: quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor variants: function dispatch: CPU, CUDA: quantize_per_tensor - func: quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor variants: function dispatch: CPU, CUDA: quantize_per_tensor_tensor_qparams - func: quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[] variants: function dispatch: CPU: quantize_per_tensor_list_cpu - func: quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor variants: function dispatch: CPU, CUDA: quantize_per_channel - func: dequantize.self(Tensor self) -> Tensor variants: function, method dispatch: CPU, CUDA: dequantize_cpu_or_cuda QuantizedCPU, QuantizedCUDA: dequantize_quantized - func: dequantize.tensors(Tensor[] tensors) -> Tensor[] variants: function dispatch: QuantizedCPU: dequantize_tensors_quantized_cpu - func: q_scale(Tensor self) -> float variants: function, method dispatch: QuantizedCPU, QuantizedCUDA: q_scale_quant - func: q_zero_point(Tensor self) -> int variants: function, method dispatch: QuantizedCPU, QuantizedCUDA: q_zero_point_quant - func: q_per_channel_scales(Tensor self) -> Tensor variants: function, method dispatch: QuantizedCPU, QuantizedCUDA: q_per_channel_scales - func: q_per_channel_zero_points(Tensor self) -> Tensor variants: function, method dispatch: QuantizedCPU, QuantizedCUDA: q_per_channel_zero_points - func: q_per_channel_axis(Tensor self) -> int variants: function, method dispatch: QuantizedCPU, QuantizedCUDA: q_per_channel_axis - func: int_repr(Tensor self) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: QuantizedCPU: int_repr_quantized_cpu QuantizedCUDA: int_repr_quantized_cuda - func: _make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor dispatch: CPU: make_per_tensor_quantized_tensor_cpu CUDA: make_per_tensor_quantized_tensor_cuda - func: _make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor dispatch: CPU: make_per_channel_quantized_tensor_cpu CUDA: make_per_channel_quantized_tensor_cuda - func: qscheme(Tensor self) -> QScheme variants: method dispatch: QuantizedCPU, QuantizedCUDA: qscheme_quant - func: fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor device_check: NoCheck # TensorIterator variants: function - func: fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor device_check: NoCheck # TensorIterator variants: function - func: fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask) variants: function dispatch: CPU, CUDA: fake_quantize_per_tensor_affine_cachemask - func: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask) variants: function dispatch: CPU, CUDA: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams - func: fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor variants: function - func: _fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor variants: function dispatch: CPU, CUDA: _fake_quantize_learnable_per_tensor_affine - func: _fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor) variants: function - func: fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor device_check: NoCheck # TensorIterator variants: function - func: fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask) variants: function dispatch: CPU, CUDA: fake_quantize_per_channel_affine_cachemask - func: fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor variants: function - func: _fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor variants: function dispatch: CPU, CUDA: _fake_quantize_learnable_per_channel_affine - func: _fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor) variants: function - func: fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor variants: function - func: _fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask) dispatch: CPU: fused_moving_avg_obs_fake_quant_cpu CUDA: fused_moving_avg_obs_fake_quant_cuda - func: _choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int) variants: function - func: _saturate_weight_to_fp16(Tensor weight) -> Tensor variants: function - func: choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor) variants: function - func: _autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a) variants: method device_guard: False - func: _autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a) variants: method device_guard: False - func: _to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: _to_copy # to(Device) must not exist because all constructors of Device also works for # TensorOptions. Otherwise, an ambiguity error is thrown. # See NOTE [ TensorOptions Constructors ]. - func: to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) variants: method device_check: NoCheck device_guard: False - func: to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) variants: method device_check: NoCheck device_guard: False - func: to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) variants: method device_check: NoCheck device_guard: False - func: to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) variants: method device_check: NoCheck device_guard: False - func: meshgrid(Tensor[] tensors) -> Tensor[] # TODO: Two weeks after this lands, combine these two overloads, # making "indexing" optional. These are temporarily distinct for # forward-compatibility reasons. - func: meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[] - func: cartesian_prod(Tensor[] tensors) -> Tensor variants: function - func: combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor variants: function - func: item(Tensor self) -> Scalar variants: method - func: result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType variants: function - func: result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType variants: function - func: result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType variants: function - func: result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType - func: can_cast(ScalarType from, ScalarType to) -> bool variants: function - func: promote_types(ScalarType type1, ScalarType type2) -> ScalarType variants: function # NB: Does NOT check precondition that numel == 1 - func: _local_scalar_dense(Tensor self) -> Scalar dispatch: CPU: _local_scalar_dense_cpu CUDA: _local_scalar_dense_cuda variants: function # Fused RNN kernels - func: _thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor) dispatch: CUDA: _thnn_fused_lstm_cell_cuda - func: _thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) dispatch: CUDA: _thnn_fused_lstm_cell_backward_cuda - func: _thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor) - func: _thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor) dispatch: CUDA: _thnn_fused_gru_cell_cuda - func: _thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) dispatch: CUDA: _thnn_fused_gru_cell_backward_cuda - func: _thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) # RNN cells and layers - func: lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) - func: lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor) - func: gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) - func: gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) - func: rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) - func: rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) - func: rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) - func: rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) - func: lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) - func: gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor - func: rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor - func: rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor # Quantized RNN layer registration has been moved to C10 dispatch in `RNN.cpp` # Quantized RNN layers # - func: quantized_lstm(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor) # - func: quantized_lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor) # Quantized GRU layers # - func: quantized_gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) # # - func: quantized_gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) # # Quantized RNN cells - func: quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor) - func: quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor - func: quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor - func: quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor # PackedSequence utilities - func: _pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor) dispatch: CompositeExplicitAutograd: _pack_padded_sequence - func: _pack_padded_sequence_backward(Tensor grad, int[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor - func: _pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor) # wrappers for legacy TH methods - func: set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!) variants: method device_check: NoCheck device_guard: False dispatch: CPU, CUDA: set_ - func: set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!) variants: method device_check: NoCheck device_guard: False dispatch: CPU: set_storage_cpu_ CUDA: set_storage_cuda_ QuantizedCPU, QuantizedCUDA: set_storage_quantized_ - func: set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!) variants: method device_check: NoCheck device_guard: False dispatch: CPU, CUDA: set_tensor_ - func: set_(Tensor(a!) self) -> Tensor(a!) variants: method dispatch: CPU: set_cpu_ CUDA: set_cuda_ - func: is_set_to(Tensor self, Tensor tensor) -> bool variants: method device_check: NoCheck device_guard: False dispatch: CPU, CUDA: is_set_to - func: masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU: masked_fill__cpu CUDA: masked_fill__cuda - func: masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: masked_fill - func: masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU: masked_fill__cpu CUDA: masked_fill__cuda - func: masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: masked_fill - func: masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!) variants: method dispatch: CPU: masked_scatter__cpu CUDA: masked_scatter__cuda - func: masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor variants: function, method dispatch: CompositeExplicitAutograd: masked_scatter - func: _masked_softmax(Tensor self, Tensor mask) -> Tensor dispatch: CUDA: masked_softmax_cuda CPU: masked_softmax_cpu - func: view(Tensor(a) self, int[] size) -> Tensor(a) variants: method device_check: NoCheck device_guard: False dispatch: ZeroTensor, CPU, CUDA, Meta, QuantizedCPU, QuantizedCUDA: view MkldnnCPU: mkldnn_view # Warning: If you want to change the name or overload name of this # operator, you might also want to change the `isBlockListedSchema` # function in `torch/csrc/jit/frontend/schema_catching.cpp`. # The name and overload name of this operator is hardcoded in that # function in order to workaround a bug: # https://github.com/pytorch/pytorch/issues/47964 - func: view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a) variants: method device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: view_dtype - func: put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!) variants: method dispatch: CPU, CUDA: put_ - func: put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor variants: function, method - func: index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) structured: True variants: function precomputed: - dim -> int dim dispatch: CPU: index_add_cpu_out CUDA: index_add_cuda_out - func: index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!) structured_delegate: index_add.out variants: method - func: index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor structured_delegate: index_add.out variants: function, method - func: index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor variants: function, method - func: index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU: index_fill_ CUDA: index_fill_ - func: index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: index_fill - func: index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: index_fill_ - func: index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor device_check: NoCheck # TensorIterator variants: function, method dispatch: CompositeExplicitAutograd: index_fill - func: index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method - func: index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method - func: index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor device_check: NoCheck # TensorIterator variants: function, method - func: scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor structured_delegate: scatter.src_out variants: function, method - func: scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) structured_delegate: scatter.src_out variants: method - func: scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) structured: True variants: function dispatch: CPU, CUDA: scatter_src_out - func: scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor structured_delegate: scatter.value_out variants: function, method - func: scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) structured_delegate: scatter.value_out variants: method - func: scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) structured: True variants: function dispatch: CPU, CUDA: scatter_value_out - func: scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor structured_delegate: scatter.reduce_out variants: function, method - func: scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!) structured_delegate: scatter.reduce_out variants: method - func: scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!) structured: True variants: function dispatch: CPU, CUDA: scatter_reduce_out - func: scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor structured_delegate: scatter.value_reduce_out variants: function, method - func: scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!) structured_delegate: scatter.value_reduce_out variants: method - func: scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!) structured: True variants: function dispatch: CPU, CUDA: scatter_value_reduce_out - func: scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor variants: function, method - func: scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor variants: function, method - func: scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor structured_delegate: scatter_add.out variants: function, method - func: scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) structured_delegate: scatter_add.out variants: method - func: scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) structured: True variants: function dispatch: CPU, CUDA: scatter_add - func: scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor variants: function, method - func: scatter_reduce.two(Tensor self, int dim, Tensor index, str reduce, *, int? output_size=None) -> Tensor variants: function, method dispatch: CPU: scatter_reduce_two_cpu - func: eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) structured_delegate: eq.Scalar_out device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: eq_ - func: eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) structured_delegate: eq.Tensor_out device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: eq_ - func: bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase variants: function dispatch: CPU, CUDA: bitwise_and_out - func: bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function dispatch: CompositeExplicitAutograd: bitwise_and_out - func: bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function dispatch: CompositeExplicitAutograd: bitwise_and - func: bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function structured_delegate: bitwise_and.Tensor_out - func: bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method - func: bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method structured_delegate: bitwise_and.Tensor_out - func: __and__.Scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function - func: __and__.Tensor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function - func: __iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method - func: __iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method - func: bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase variants: function dispatch: CPU, CUDA: bitwise_or_out - func: bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function dispatch: CompositeExplicitAutograd: bitwise_or_out - func: bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function - func: bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function structured_delegate: bitwise_or.Tensor_out - func: bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method - func: bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method structured_delegate: bitwise_or.Tensor_out - func: __or__.Scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function - func: __or__.Tensor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function - func: __ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method - func: __ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method - func: bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase variants: function dispatch: CPU, CUDA: bitwise_xor_out - func: bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function dispatch: CompositeExplicitAutograd: bitwise_xor_out - func: bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function - func: bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function structured_delegate: bitwise_xor.Tensor_out - func: bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method - func: bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method structured_delegate: bitwise_xor.Tensor_out - func: __xor__.Scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function - func: __xor__.Tensor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function - func: __ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method - func: __ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method - func: __lshift__.Scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: __lshift__ - func: __lshift__.Tensor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: __lshift__ - func: __ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: __ilshift__ - func: __ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: __ilshift__ - func: bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: function, method structured_delegate: bitwise_left_shift.Tensor_out - func: bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method structured_delegate: bitwise_left_shift.Tensor_out - func: bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: bitwise_left_shift_out - func: bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: bitwise_left_shift - func: bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: bitwise_left_shift_ - func: bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: bitwise_left_shift_out - func: bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: bitwise_left_shift - func: __rshift__.Scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: __rshift__ - func: __rshift__.Tensor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: __rshift__ - func: __irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: __irshift__ - func: __irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: __irshift__ - func: bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: function, method structured_delegate: bitwise_right_shift.Tensor_out - func: bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method structured_delegate: bitwise_right_shift.Tensor_out - func: bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: bitwise_right_shift_out - func: bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: bitwise_right_shift - func: bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: bitwise_right_shift_ - func: bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: bitwise_right_shift_out - func: bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: bitwise_right_shift - func: tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) structured_delegate: tril.out variants: method - func: triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) structured_delegate: triu.out variants: method - func: digamma_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: digamma.out variants: method - func: lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method structured_delegate: lerp.Scalar_out - func: lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method structured_delegate: lerp.Tensor_out - func: addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) variants: method dispatch: CPU, CUDA: addbmm_ - func: addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: addbmm_out - func: addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor variants: method, function dispatch: CPU, CUDA: addbmm - func: random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: random_ Meta: random_meta_ - func: random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: random_ Meta: random_meta_ - func: random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: random_ Meta: random_meta_ - func: uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: uniform_ Meta: uniform_meta_ - func: cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: cauchy_ - func: log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: log_normal_ - func: exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: exponential_ - func: geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: geometric_ # wrappers for TH functions - func: diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: diag_cpu_out CUDA: diag_cuda_out - func: diag(Tensor self, int diagonal=0) -> Tensor variants: method, function dispatch: CompositeExplicitAutograd: diag - func: diag_backward(Tensor grad, int[] input_sizes, int diagonal) -> Tensor variants: function device_check: NoCheck device_guard: False - func: cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) - func: cross(Tensor self, Tensor other, int? dim=None) -> Tensor variants: method, function - func: triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) structured: True dispatch: CPU: triu_cpu CUDA: triu_cuda - func: triu(Tensor self, int diagonal=0) -> Tensor structured_delegate: triu.out variants: method, function - func: tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) structured: True dispatch: CPU: tril_cpu CUDA: tril_cuda - func: tril(Tensor self, int diagonal=0) -> Tensor structured_delegate: tril.out variants: method, function - func: tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor dispatch: CPU: tril_indices_cpu CUDA: tril_indices_cuda - func: triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor dispatch: CPU: triu_indices_cpu CUDA: triu_indices_cuda - func: trace(Tensor self) -> Tensor variants: method, function dispatch: CPU: trace_cpu CUDA: trace_cuda - func: trace_backward(Tensor grad, int[] sizes) -> Tensor variants: function device_check: NoCheck device_guard: False - func: ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: ne_Scalar_out QuantizedCPU: ne_out_quantized_cpu - func: ne.Scalar(Tensor self, Scalar other) -> Tensor structured_delegate: ne.Scalar_out device_check: NoCheck # TensorIterator variants: method, function dispatch: QuantizedCPU: ne_quantized_cpu - func: ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: ne_Tensor_out QuantizedCPU: ne_out_quantized_cpu - func: ne.Tensor(Tensor self, Tensor other) -> Tensor structured_delegate: ne.Tensor_out device_check: NoCheck # TensorIterator variants: method, function dispatch: QuantizedCPU: ne_quantized_cpu - func: ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) structured_delegate: ne.Scalar_out device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: ne_ - func: ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) structured_delegate: ne.Tensor_out device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: ne_ # not_equal, alias for torch.ne - func: not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - func: not_equal.Scalar(Tensor self, Scalar other) -> Tensor variants: method, function - func: not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - func: not_equal.Tensor(Tensor self, Tensor other) -> Tensor variants: method, function - func: not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) variants: method - func: not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: method - func: eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: eq_Scalar_out QuantizedCPU: eq_out_quantized_cpu - func: eq.Scalar(Tensor self, Scalar other) -> Tensor structured_delegate: eq.Scalar_out device_check: NoCheck # TensorIterator variants: method, function dispatch: QuantizedCPU: eq_quantized_cpu - func: eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: eq_Tensor_out QuantizedCPU: eq_out_quantized_cpu - func: eq.Tensor(Tensor self, Tensor other) -> Tensor structured_delegate: eq.Tensor_out device_check: NoCheck # TensorIterator variants: method, function dispatch: QuantizedCPU: eq_quantized_cpu - func: ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: ge_Scalar_out QuantizedCPU: ge_out_quantized_cpu - func: ge.Scalar(Tensor self, Scalar other) -> Tensor structured_delegate: ge.Scalar_out device_check: NoCheck # TensorIterator variants: method, function dispatch: QuantizedCPU: ge_quantized_cpu - func: ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: ge_Tensor_out QuantizedCPU: ge_out_quantized_cpu - func: ge.Tensor(Tensor self, Tensor other) -> Tensor structured_delegate: ge.Tensor_out device_check: NoCheck # TensorIterator variants: method, function dispatch: QuantizedCPU: ge_quantized_cpu - func: ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) structured_delegate: ge.Scalar_out device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: ge_ - func: ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) structured_delegate: ge.Tensor_out device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: ge_ # greater_equal, alias for torch.ge - func: greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - func: greater_equal.Scalar(Tensor self, Scalar other) -> Tensor variants: method, function - func: greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - func: greater_equal.Tensor(Tensor self, Tensor other) -> Tensor variants: method, function - func: greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) variants: method - func: greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: method - func: le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: le_Scalar_out QuantizedCPU: le_out_quantized_cpu - func: le.Scalar(Tensor self, Scalar other) -> Tensor structured_delegate: le.Scalar_out device_check: NoCheck # TensorIterator variants: method, function dispatch: QuantizedCPU: le_quantized_cpu - func: le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: le_Tensor_out QuantizedCPU: le_out_quantized_cpu - func: le.Tensor(Tensor self, Tensor other) -> Tensor structured_delegate: le.Tensor_out device_check: NoCheck # TensorIterator variants: method, function dispatch: QuantizedCPU: le_quantized_cpu - func: le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) structured_delegate: le.Scalar_out device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: le_ - func: le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) structured_delegate: le.Tensor_out device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: le_ # less_equal, alias for torch.le - func: less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - func: less_equal.Scalar(Tensor self, Scalar other) -> Tensor variants: method, function - func: less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - func: less_equal.Tensor(Tensor self, Tensor other) -> Tensor variants: method, function - func: less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) variants: method - func: less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: method - func: gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: gt_Scalar_out QuantizedCPU: gt_out_quantized_cpu - func: gt.Scalar(Tensor self, Scalar other) -> Tensor structured_delegate: gt.Scalar_out device_check: NoCheck # TensorIterator variants: method, function dispatch: QuantizedCPU: gt_quantized_cpu - func: gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: gt_Tensor_out QuantizedCPU: gt_out_quantized_cpu - func: gt.Tensor(Tensor self, Tensor other) -> Tensor structured_delegate: gt.Tensor_out device_check: NoCheck # TensorIterator variants: method, function dispatch: QuantizedCPU: gt_quantized_cpu - func: gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) structured_delegate: gt.Scalar_out device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: gt_ - func: gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) structured_delegate: gt.Tensor_out device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: gt_ # greater, alias for torch.gt - func: greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - func: greater.Scalar(Tensor self, Scalar other) -> Tensor variants: method, function - func: greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - func: greater.Tensor(Tensor self, Tensor other) -> Tensor variants: method, function - func: greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) variants: method - func: greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: method - func: lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: lt_Scalar_out QuantizedCPU: lt_out_quantized_cpu - func: lt.Scalar(Tensor self, Scalar other) -> Tensor structured_delegate: lt.Scalar_out device_check: NoCheck # TensorIterator variants: method, function dispatch: QuantizedCPU: lt_quantized_cpu - func: lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: lt_Tensor_out QuantizedCPU: lt_out_quantized_cpu - func: lt.Tensor(Tensor self, Tensor other) -> Tensor structured_delegate: lt.Tensor_out device_check: NoCheck # TensorIterator variants: method, function dispatch: QuantizedCPU: lt_quantized_cpu - func: lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) structured_delegate: lt.Scalar_out device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: lt_ - func: lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) structured_delegate: lt.Tensor_out device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: lt_ # less, alias for torch.lt - func: less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) - func: less.Scalar(Tensor self, Scalar other) -> Tensor variants: method, function - func: less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - func: less.Tensor(Tensor self, Tensor other) -> Tensor variants: method, function - func: less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) variants: method - func: less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: method - func: take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: take_out - func: take(Tensor self, Tensor index) -> Tensor variants: method, function dispatch: CPU, CUDA: take - func: take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) - func: take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor variants: method, function - func: index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, QuantizedCPU: index_select_out_cpu_ CUDA, QuantizedCUDA: index_select_out_cuda - func: index_select(Tensor self, int dim, Tensor index) -> Tensor variants: method, function dispatch: CPU: index_select_cpu_ QuantizedCPU: index_select_quantized_cpu_ CUDA, QuantizedCUDA: index_select_cuda SparseCPU: index_select_sparse SparseCUDA: index_select_sparse - func: index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) - func: index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor variants: method, function - func: index_select_backward(Tensor grad, int[] self_sizes, int dim, Tensor index) -> Tensor variants: function device_check: NoCheck device_guard: False - func: masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: masked_select_out_cpu CUDA: masked_select_out_cuda - func: masked_select(Tensor self, Tensor mask) -> Tensor variants: method, function dispatch: CPU: masked_select_cpu CUDA: masked_select_cuda - func: masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor variants: function device_check: NoCheck device_guard: False - func: nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: nonzero_out_cpu CUDA: nonzero_out_cuda - func: nonzero(Tensor self) -> Tensor variants: method, function dispatch: CPU: nonzero_cpu CUDA: nonzero_cuda - func: nonzero_numpy(Tensor self) -> Tensor[] variants: method, function - func: argwhere(Tensor self) -> Tensor variants: method, function - func: gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) structured: True dispatch: CPU, CUDA: gather_out - func: gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor variants: method, function structured_delegate: gather.out - func: gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor variants: function device_check: NoCheck device_guard: False - func: gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) - func: gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor variants: method, function - func: _gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor - func: addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: addcmul_out - func: addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor structured_delegate: addcmul.out device_check: NoCheck # TensorIterator variants: method, function - func: addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) structured_delegate: addcmul.out device_check: NoCheck # TensorIterator variants: method - func: addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: addcdiv_out - func: addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor structured_delegate: addcdiv.out device_check: NoCheck # TensorIterator variants: method, function - func: addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) structured_delegate: addcdiv.out device_check: NoCheck # TensorIterator variants: method - func: cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, float label_smoothing=0.0) -> Tensor python_module: nn - func: lstsq.X(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR) dispatch: CPU: legacy_lstsq_out CUDA: legacy_lstsq_out_cuda - func: lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR) variants: method, function dispatch: CPU: legacy_lstsq CUDA: legacy_lstsq_cuda - func: triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient) structured: True dispatch: CPU, CUDA: triangular_solve_out SparseCsrCPU: triangular_solve_out_sparse_csr_cpu SparseCsrCUDA: triangular_solve_out_sparse_csr_cuda - func: triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient) structured_delegate: triangular_solve.X variants: method, function - func: _linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> () dispatch: CompositeExplicitAutograd: _linalg_check_errors - func: linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!) python_module: linalg dispatch: CPU, CUDA: linalg_solve_triangular_out - func: linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor python_module: linalg variants: method, function dispatch: CPU, CUDA: linalg_solve_triangular - func: symeig.e(Tensor self, bool eigenvectors=False, bool upper=True, *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) dispatch: CompositeExplicitAutograd: symeig_out - func: symeig(Tensor self, bool eigenvectors=False, bool upper=True) -> (Tensor eigenvalues, Tensor eigenvectors) variants: method, function dispatch: CompositeExplicitAutograd: symeig - func: _symeig_helper(Tensor self, bool eigenvectors, bool upper) -> (Tensor, Tensor) variants: function dispatch: CPU: _symeig_helper_cpu CUDA: _symeig_helper_cuda - func: eig.e(Tensor self, bool eigenvectors=False, *, Tensor(a!) e, Tensor(b!) v) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) dispatch: CompositeExplicitAutograd: eig_out - func: eig(Tensor self, bool eigenvectors=False) -> (Tensor eigenvalues, Tensor eigenvectors) variants: method, function dispatch: CompositeExplicitAutograd: eig - func: svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) - func: svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) variants: method, function # swapaxes, alias for transpose - func: swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a) variants: function, method device_check: NoCheck device_guard: False - func: swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!) variants: method device_check: NoCheck device_guard: False tags: inplace_view # swapdims, alias for transpose - func: swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a) variants: function, method device_check: NoCheck device_guard: False - func: swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) variants: method device_check: NoCheck device_guard: False tags: inplace_view - func: cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: cholesky_out - func: cholesky(Tensor self, bool upper=False) -> Tensor variants: method, function dispatch: CPU, CUDA: cholesky - func: cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) dispatch: CompositeExplicitAutograd: cholesky_solve_out - func: cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor variants: method, function dispatch: CompositeExplicitAutograd: cholesky_solve - func: _cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor variants: function dispatch: CPU: _cholesky_solve_helper_cpu CUDA: _cholesky_solve_helper_cuda - func: solve(Tensor self, Tensor A) -> (Tensor solution, Tensor LU) variants: function, method dispatch: CompositeExplicitAutograd: solve - func: solve.solution(Tensor self, Tensor A, *, Tensor(a!) solution, Tensor(b!) lu) -> (Tensor(a!) solution, Tensor(b!) LU) dispatch: CompositeExplicitAutograd: solve_out - func: _solve_helper(Tensor self, Tensor A) -> (Tensor, Tensor) variants: function dispatch: CPU: _solve_helper_cpu CUDA: _solve_helper_cuda - func: cholesky_inverse(Tensor self, bool upper=False) -> Tensor variants: method, function dispatch: CPU, CUDA: cholesky_inverse - func: cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: cholesky_inverse_out - func: qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) - func: qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R) variants: method, function - func: geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau) dispatch: CPU, CUDA: geqrf_out - func: geqrf(Tensor self) -> (Tensor a, Tensor tau) variants: method, function dispatch: CPU, CUDA: geqrf # orgqr, alias for linalg_householder_product - func: orgqr(Tensor self, Tensor input2) -> Tensor variants: method, function - func: orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) - func: ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: ormqr_out - func: ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor variants: method, function dispatch: CPU, CUDA: ormqr - func: _lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info) variants: function - func: lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: lu_solve_out - func: lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor variants: method, function dispatch: CPU, CUDA: lu_solve - func: lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U) variants: function dispatch: CPU, CUDA: lu_unpack - func: lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) variants: function dispatch: CPU, CUDA: lu_unpack_out # TODO: remove dispatch section when porting TH CUDA to ATen - func: multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: multinomial_out - func: multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor variants: method, function dispatch: CPU, CUDA: multinomial - func: lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: lgamma_out - func: lgamma_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: lgamma.out variants: method - func: lgamma(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: lgamma.out variants: method, function - func: digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: digamma_out - func: digamma(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: digamma.out variants: method, function - func: polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: polygamma_out - func: polygamma(int n, Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: polygamma.out variants: method, function - func: polygamma_(Tensor(a!) self, int n) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: polygamma_ - func: erfinv(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: erfinv.out variants: method, function dispatch: SparseCPU, SparseCUDA: erfinv_sparse SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr - func: erfinv_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: erfinv.out variants: method dispatch: SparseCPU, SparseCUDA: erfinv_sparse_ SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr_ - func: erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: erfinv_out SparseCPU, SparseCUDA: erfinv_sparse_out SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr_out - func: i0(Tensor self) -> Tensor structured_delegate: i0.out variants: function, method - func: i0_(Tensor(a!) self) -> Tensor(a!) structured_delegate: i0.out variants: function, method - func: i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: i0_out - func: sign(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: sign.out variants: function, method dispatch: CompositeExplicitAutograd: sign SparseCPU, SparseCUDA: sign_sparse SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr - func: sign_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: sign.out variants: method dispatch: CompositeExplicitAutograd: sign_ SparseCPU, SparseCUDA: sign_sparse_ SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr_ - func: sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: sign_out SparseCPU, SparseCUDA: sign_sparse_out SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr_out - func: signbit(Tensor self) -> Tensor variants: function, method structured_delegate: signbit.out dispatch: SparseCPU, SparseCUDA: signbit_sparse SparseCsrCPU, SparseCsrCUDA: signbit_sparse_csr - func: signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU: signbit_out CUDA: signbit_out SparseCPU, SparseCUDA: signbit_sparse_out SparseCsrCPU, SparseCsrCUDA: signbit_sparse_csr_out - func: dist(Tensor self, Tensor other, Scalar p=2) -> Tensor device_check: NoCheck # TensorIterator variants: method, function dispatch: CompositeExplicitAutograd: dist - func: atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: atan2_out - func: atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: atan2.out variants: method - func: atan2(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: atan2.out variants: method, function # arctan2, alias of atan2 - func: arctan2(Tensor self, Tensor other) -> Tensor variants: method, function - func: arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: method - func: lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: lerp_Scalar - func: lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: lerp_Tensor - func: lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor device_check: NoCheck # TensorIterator variants: method, function structured_delegate: lerp.Scalar_out - func: lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor device_check: NoCheck # TensorIterator variants: method, function structured_delegate: lerp.Tensor_out - func: histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: histogram_histc_cpu_out CUDA: _histc_out_cuda - func: histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor variants: method, function dispatch: CPU: histogram_histc_cpu CUDA: _histc_cuda - func: histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) dispatch: CPU: histogram_out_cpu - func: histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) variants: method, function dispatch: CPU: histogram_cpu - func: histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) dispatch: CPU: histogram_out_cpu - func: histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) variants: method, function dispatch: CPU: histogram_cpu - func: _histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[] dispatch: CPU: histogramdd_bin_edges_cpu - func: _histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor dispatch: CPU: histogramdd_cpu - func: _histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor dispatch: CPU: histogramdd_cpu - func: fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator dispatch: CompositeExplicitAutograd: fmod_out - func: fmod.Scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function dispatch: CompositeExplicitAutograd: fmod - func: fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CompositeExplicitAutograd: fmod_ - func: fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: fmod_out - func: fmod.Tensor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: fmod.Tensor_out variants: method, function - func: fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method structured_delegate: fmod.Tensor_out - func: hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: hypot_out - func: hypot(Tensor self, Tensor other) -> Tensor structured_delegate: hypot.out variants: method, function - func: hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!) structured_delegate: hypot.out variants: method dispatch: CompositeExplicitAutograd: hypot_ - func: igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: igamma_out - func: igamma(Tensor self, Tensor other) -> Tensor structured_delegate: igamma.out variants: method, function - func: igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!) structured_delegate: igamma.out variants: method - func: igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: igammac_out - func: igammac(Tensor self, Tensor other) -> Tensor structured_delegate: igammac.out variants: method, function - func: igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!) structured_delegate: igammac.out variants: method - func: nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: nextafter_out - func: nextafter(Tensor self, Tensor other) -> Tensor structured_delegate: nextafter.out variants: method, function - func: nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!) structured_delegate: nextafter.out variants: method dispatch: CompositeExplicitAutograd: nextafter_ - func: remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) dispatch: CompositeExplicitAutograd: remainder_out - func: remainder.Scalar(Tensor self, Scalar other) -> Tensor variants: method, function dispatch: CompositeExplicitAutograd: remainder - func: remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) variants: method dispatch: CompositeExplicitAutograd: remainder_ - func: remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: remainder_out - func: remainder.Tensor(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: remainder.Tensor_out variants: method, function - func: remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: remainder.Tensor_out variants: method - func: remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: function dispatch: CPU, CUDA: remainder - func: min(Tensor self) -> Tensor device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: min QuantizedCPU: min_quantized_cpu - func: fmin(Tensor self, Tensor other) -> Tensor structured_delegate: fmin.out device_check: NoCheck # TensorIterator variants: method, function - func: fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: fmin_out - func: max(Tensor self) -> Tensor device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU, CUDA: max QuantizedCPU: max_quantized_cpu - func: fmax(Tensor self, Tensor other) -> Tensor structured_delegate: fmax.out device_check: NoCheck # TensorIterator variants: method, function - func: fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: fmax_out - func: maximum(Tensor self, Tensor other) -> Tensor structured_delegate: maximum.out device_check: NoCheck # TensorIterator variants: method, function - func: maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: maximum_out # binary max, alias of maximum # NOTE: max is not an alias for maximum, since there is also unary max - func: max.other(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function - func: max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: minimum(Tensor self, Tensor other) -> Tensor structured_delegate: minimum.out device_check: NoCheck # TensorIterator variants: method, function - func: minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator dispatch: CPU, CUDA: minimum_out # binary min, alias for minimum # NOTE: min is not an alias for minimum, since there is also unary min - func: min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator - func: min.other(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator variants: method, function - func: quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor variants: method, function - func: quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) - func: quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor variants: method, function - func: quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) - func: nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor variants: method, function - func: nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) - func: nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor variants: method, function - func: nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) - func: sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) device_check: NoCheck # TensorIterator dispatch: CPU: sort_out_cpu CUDA: sort_out_cuda - func: sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) dispatch: CPU: sort_out_cpu_stable CUDA: sort_out_stable_cuda - func: sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) device_check: NoCheck # TensorIterator variants: method, function dispatch: CPU: sort_cpu CUDA: sort_cuda QuantizedCPU: sort_quantized_cpu - func: sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) variants: method, function dispatch: CPU: sort_cpu_stable CUDA: sort_stable_cuda QuantizedCPU: sort_quantized_cpu_stable - func: sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - func: sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - func: sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) variants: method, function - func: sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) variants: method, function - func: msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - func: msort(Tensor self) -> Tensor variants: method, function - func: argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor device_check: NoCheck # TensorIterator variants: method, function - func: argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor variants: method, function - func: topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) structured: True dispatch: CPU: topk_out_cpu CUDA: topk_out_cuda - func: topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) variants: method, function structured_delegate: topk.values dispatch: QuantizedCPU: topk_quantized_cpu - func: all(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: all.all_out variants: method, function - func: all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck structured: True dispatch: CPU, CUDA: all_all_out - func: any(Tensor self) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: any.all_out variants: method, function dispatch: SparseCPU, SparseCUDA: any_sparse - func: any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck structured: True dispatch: CPU, CUDA: any_all_out - func: renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True dispatch: CPU, CUDA: renorm_out - func: renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor device_check: NoCheck # TensorIterator variants: method, function structured_delegate: renorm.out - func: renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method structured_delegate: renorm.out - func: unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a) variants: method device_check: NoCheck device_guard: False dispatch: CPU, CUDA: unfold QuantizedCPU, QuantizedCUDA: unfold - func: unfold_backward(Tensor grad_in, int[] input_sizes, int dim, int size, int step) -> Tensor variants: function dispatch: CPU, CUDA: unfold_backward - func: equal(Tensor self, Tensor other) -> bool variants: method, function dispatch: CPU: cpu_equal CUDA: cuda_equal QuantizedCPU: equal_quantized_cpu - func: pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: pow_Tensor_Tensor_out - func: pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: pow.Tensor_Tensor_out variants: method, function - func: pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True dispatch: CPU, CUDA: pow_Scalar_out - func: pow.Scalar(Scalar self, Tensor exponent) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: pow.Scalar_out - func: pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: pow_Tensor_Scalar_out SparseCPU, SparseCUDA: pow_out_sparse_scalar - func: pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: pow.Tensor_Scalar_out variants: function, method dispatch: SparseCPU, SparseCUDA: pow_sparse_scalar - func: pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: pow.Tensor_Scalar_out variants: method - func: pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) device_check: NoCheck # TensorIterator structured_delegate: pow.Tensor_Tensor_out variants: method - func: float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) - func: float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor variants: function, method - func: float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) - func: float_power.Scalar(Scalar self, Tensor exponent) -> Tensor - func: float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) - func: float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor variants: function, method - func: float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) variants: method - func: float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) variants: method - func: normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!) device_check: NoCheck # TensorIterator variants: method dispatch: CPU, CUDA: normal_ Meta: normal_meta_ SparseCsrCPU, SparseCsrCUDA: normal_sparse_csr_ - func: normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: normal_out - func: normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor dispatch: CPU, CUDA: normal - func: normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: normal_out - func: normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor dispatch: CPU, CUDA: normal - func: normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) dispatch: CPU, CUDA: normal_out - func: normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor dispatch: CPU, CUDA: normal - func: normal.float_float(float mean, float std, int[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: normal.float_float_out(float mean, float std, int[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) - func: alias(Tensor(a) self) -> Tensor(a) variants: method, function dispatch: CompositeExplicitAutograd: alias - func: _index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) dispatch: CPU: _index_copy_impl_ CUDA: _index_copy_impl_ - func: _amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> () variants: function dispatch: CUDA: _amp_foreach_non_finite_check_and_unscale_cuda_ - func: _amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!) variants: function dispatch: CUDA: _amp_update_scale_cuda_ - func: _cat(Tensor[] tensors, int dim=0) -> Tensor dispatch: CPU: _cat_cpu CUDA: cat_cuda QuantizedCPU: cat_quantized_cpu - func: _cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: _cat_out_cpu CUDA: cat_out_cuda QuantizedCPU: cat_out_quantized_cpu - func: _foreach_add.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_add_scalar_kernel_slow CUDA: foreach_tensor_add_scalar_kernel_cuda - func: _foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_add_scalar_kernel_slow_ CUDA: foreach_tensor_add_scalar_kernel_cuda_ - func: _foreach_sub.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sub_scalar_kernel_slow CUDA: foreach_tensor_sub_scalar_kernel_cuda - func: _foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sub_scalar_kernel_slow_ CUDA: foreach_tensor_sub_scalar_kernel_cuda_ - func: _foreach_mul.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_mul_scalar_kernel_slow CUDA: foreach_tensor_mul_scalar_kernel_cuda - func: _foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_mul_scalar_kernel_slow_ CUDA: foreach_tensor_mul_scalar_kernel_cuda_ - func: _foreach_div.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_div_scalar_kernel_slow CUDA: foreach_tensor_div_scalar_kernel_cuda - func: _foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_div_scalar_kernel_slow_ CUDA: foreach_tensor_div_scalar_kernel_cuda_ - func: _foreach_add.List(Tensor[] tensors1, Tensor[] tensors2, *, Scalar alpha=1) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_add_list_kernel_slow CUDA: foreach_tensor_add_list_kernel_cuda - func: _foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_add_list_kernel_slow_ CUDA: foreach_tensor_add_list_kernel_cuda_ - func: _foreach_sub.List(Tensor[] tensors1, Tensor[] tensors2, *, Scalar alpha=1) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sub_list_kernel_slow CUDA: foreach_tensor_sub_list_kernel_cuda - func: _foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sub_list_kernel_slow_ CUDA: foreach_tensor_sub_list_kernel_cuda_ - func: _foreach_mul.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_mul_list_kernel_slow CUDA: foreach_tensor_mul_list_kernel_cuda - func: _foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_mul_list_kernel_slow_ CUDA: foreach_tensor_mul_list_kernel_cuda_ - func: _foreach_div.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_div_list_kernel_slow CUDA: foreach_tensor_div_list_kernel_cuda - func: _foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_div_list_kernel_slow_ CUDA: foreach_tensor_div_list_kernel_cuda_ - func: _foreach_add.ScalarList(Tensor[] tensors, Scalar[] scalars) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_add_scalarlist_kernel_slow CUDA: foreach_tensor_add_scalarlist_kernel_cuda - func: _foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_add_scalarlist_kernel_slow_ CUDA: foreach_tensor_add_scalarlist_kernel_cuda_ - func: _foreach_sub.ScalarList(Tensor[] tensors, Scalar[] scalars) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sub_scalarlist_kernel_slow CUDA: foreach_tensor_sub_scalarlist_kernel_cuda - func: _foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sub_scalarlist_kernel_slow_ CUDA: foreach_tensor_sub_scalarlist_kernel_cuda_ - func: _foreach_div.ScalarList(Tensor[] tensors, Scalar[] scalars) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_div_scalarlist_kernel_slow CUDA: foreach_tensor_div_scalarlist_kernel_cuda - func: _foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_div_scalarlist_kernel_slow_ CUDA: foreach_tensor_div_scalarlist_kernel_cuda_ - func: _foreach_mul.ScalarList(Tensor[] tensors, Scalar[] scalars) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_mul_scalarlist_kernel_slow CUDA: foreach_tensor_mul_scalarlist_kernel_cuda - func: _foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_mul_scalarlist_kernel_slow_ CUDA: foreach_tensor_mul_scalarlist_kernel_cuda_ - func: _foreach_exp(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_exp_slow CUDA: foreach_tensor_exp_cuda - func: _foreach_zero_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_zero_slow_ CUDA: foreach_tensor_zero_cuda_ - func: _foreach_exp_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_exp_slow_ CUDA: foreach_tensor_exp_cuda_ - func: _foreach_sqrt(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sqrt_slow CUDA: foreach_tensor_sqrt_cuda - func: _foreach_sqrt_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sqrt_slow_ CUDA: foreach_tensor_sqrt_cuda_ - func: _foreach_abs(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_abs_slow CUDA: foreach_tensor_abs_cuda - func: _foreach_abs_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_abs_slow_ CUDA: foreach_tensor_abs_cuda_ - func: _foreach_acos(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_acos_slow CUDA: foreach_tensor_acos_cuda - func: _foreach_acos_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_acos_slow_ CUDA: foreach_tensor_acos_cuda_ - func: _foreach_asin(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_asin_slow CUDA: foreach_tensor_asin_cuda - func: _foreach_asin_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_asin_slow_ CUDA: foreach_tensor_asin_cuda_ - func: _foreach_atan(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_atan_slow CUDA: foreach_tensor_atan_cuda - func: _foreach_atan_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_atan_slow_ CUDA: foreach_tensor_atan_cuda_ - func: _foreach_ceil(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_ceil_slow CUDA: foreach_tensor_ceil_cuda - func: _foreach_ceil_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_ceil_slow_ CUDA: foreach_tensor_ceil_cuda_ - func: _foreach_cos(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_cos_slow CUDA: foreach_tensor_cos_cuda - func: _foreach_cos_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_cos_slow_ CUDA: foreach_tensor_cos_cuda_ - func: _foreach_cosh(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_cosh_slow CUDA: foreach_tensor_cosh_cuda - func: _foreach_cosh_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_cosh_slow_ CUDA: foreach_tensor_cosh_cuda_ - func: _foreach_erf(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_erf_slow CUDA: foreach_tensor_erf_cuda - func: _foreach_erf_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_erf_slow_ CUDA: foreach_tensor_erf_cuda_ - func: _foreach_erfc(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_erfc_slow CUDA: foreach_tensor_erfc_cuda - func: _foreach_erfc_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_erfc_slow_ CUDA: foreach_tensor_erfc_cuda_ - func: _foreach_expm1(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_expm1_slow CUDA: foreach_tensor_expm1_cuda - func: _foreach_expm1_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_expm1_slow_ CUDA: foreach_tensor_expm1_cuda_ - func: _foreach_floor(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_floor_slow CUDA: foreach_tensor_floor_cuda - func: _foreach_floor_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_floor_slow_ CUDA: foreach_tensor_floor_cuda_ - func: _foreach_log(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_log_slow CUDA: foreach_tensor_log_cuda - func: _foreach_log_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_log_slow_ CUDA: foreach_tensor_log_cuda_ - func: _foreach_log10(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_log10_slow CUDA: foreach_tensor_log10_cuda - func: _foreach_log10_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_log10_slow_ CUDA: foreach_tensor_log10_cuda_ - func: _foreach_log1p(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_log1p_slow CUDA: foreach_tensor_log1p_cuda - func: _foreach_log1p_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_log1p_slow_ CUDA: foreach_tensor_log1p_cuda_ - func: _foreach_log2(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_log2_slow CUDA: foreach_tensor_log2_cuda - func: _foreach_log2_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_log2_slow_ CUDA: foreach_tensor_log2_cuda_ - func: _foreach_neg(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_neg_slow CUDA: foreach_tensor_neg_cuda - func: _foreach_neg_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_neg_slow_ CUDA: foreach_tensor_neg_cuda_ - func: _foreach_tan(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_tan_slow CUDA: foreach_tensor_tan_cuda - func: _foreach_tan_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_tan_slow_ CUDA: foreach_tensor_tan_cuda_ - func: _foreach_tanh(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_tanh_slow CUDA: foreach_tensor_tanh_cuda - func: _foreach_tanh_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_tanh_slow_ CUDA: foreach_tensor_tanh_cuda_ - func: _foreach_sin(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sin_slow CUDA: foreach_tensor_sin_cuda - func: _foreach_sin_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sin_slow_ CUDA: foreach_tensor_sin_cuda_ - func: _foreach_sinh(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sinh_slow CUDA: foreach_tensor_sinh_cuda - func: _foreach_sinh_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sinh_slow_ CUDA: foreach_tensor_sinh_cuda_ - func: _foreach_round(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_round_slow CUDA: foreach_tensor_round_cuda - func: _foreach_round_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_round_slow_ CUDA: foreach_tensor_round_cuda_ - func: _foreach_lgamma(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_lgamma_slow CUDA: foreach_tensor_lgamma_cuda - func: _foreach_lgamma_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_lgamma_slow_ CUDA: foreach_tensor_lgamma_cuda_ - func: _foreach_frac(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_frac_slow CUDA: foreach_tensor_frac_cuda - func: _foreach_frac_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_frac_slow_ CUDA: foreach_tensor_frac_cuda_ - func: _foreach_reciprocal(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_reciprocal_slow CUDA: foreach_tensor_reciprocal_cuda - func: _foreach_reciprocal_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_reciprocal_slow_ CUDA: foreach_tensor_reciprocal_cuda_ - func: _foreach_sigmoid(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sigmoid_slow CUDA: foreach_tensor_sigmoid_cuda - func: _foreach_sigmoid_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_sigmoid_slow_ CUDA: foreach_tensor_sigmoid_cuda_ - func: _foreach_trunc(Tensor[] tensors) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_trunc_slow CUDA: foreach_tensor_trunc_cuda - func: _foreach_trunc_(Tensor(a!)[] self) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_trunc_slow_ CUDA: foreach_tensor_trunc_cuda_ - func: _foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_addcdiv_scalar_slow_ CUDA: foreach_tensor_addcdiv_scalar_cuda_ - func: _foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_addcmul_scalar_slow_ CUDA: foreach_tensor_addcmul_scalar_cuda_ - func: _foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_addcdiv_scalarlist_slow_ CUDA: foreach_tensor_addcdiv_scalarlist_cuda_ - func: _foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_addcmul_scalarlist_slow_ CUDA: foreach_tensor_addcmul_scalarlist_cuda_ - func: _foreach_addcdiv.Scalar(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_addcdiv_scalar_slow CUDA: foreach_tensor_addcdiv_scalar_cuda - func: _foreach_addcmul.Scalar(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_addcmul_scalar_slow CUDA: foreach_tensor_addcmul_scalar_cuda - func: _foreach_addcdiv.ScalarList(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_addcdiv_scalarlist_slow CUDA: foreach_tensor_addcdiv_scalarlist_cuda - func: _foreach_addcmul.ScalarList(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_addcmul_scalarlist_slow CUDA: foreach_tensor_addcmul_scalarlist_cuda - func: _foreach_maximum.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_maximum_slow CUDA: foreach_tensor_maximum_cuda - func: _foreach_minimum.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_minimum_slow CUDA: foreach_tensor_minimum_cuda - func: _foreach_norm.Scalar(Tensor[] tensors, Scalar ord=2) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices variants: function dispatch: CPU: foreach_tensor_norm_slow CUDA: foreach_tensor_norm_cuda - func: bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor dispatch: CPU: bucketize_cpu CUDA: bucketize_cuda - func: bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: bucketize_out_cpu CUDA: bucketize_out_cuda - func: bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor dispatch: CPU: bucketize_cpu CUDA: bucketize_cuda - func: searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor dispatch: CPU: searchsorted_cpu CUDA: searchsorted_cuda # [Note about _torch_cuda_cu_linker_symbol_op and torch_cuda_cu] # This is a DUMMY function to force the linking against torch_cuda_cu on Windows. # Otherwise, the Windows linker will optimize and not include torch_cuda_cu even when we # want it to be included. This is similar to what we do with warp_size for torch_cuda_cpp, # described as the solution to this issue: https://github.com/pytorch/pytorch/issues/31611 # This op should NOT be used or exposed or edited or else Windows builds (with BUILD_SPLIT_CUDA) will break. - func: _torch_cuda_cu_linker_symbol_op(Tensor self) -> Tensor dispatch: CUDA: _torch_cuda_cu_linker_symbol_op_cuda - func: searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: searchsorted_out_cpu CUDA: searchsorted_out_cuda - func: searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor dispatch: CPU: searchsorted_cpu CUDA: searchsorted_cuda - func: _convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor structured_delegate: _convert_indices_from_coo_to_csr.out - func: _convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!) structured: True dispatch: CPU: _convert_indices_from_coo_to_csr_structured_cpu CUDA: _convert_indices_from_coo_to_csr_structured_cuda - func: _convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor structured_delegate: _convert_indices_from_csr_to_coo.out - func: _convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!) structured: True dispatch: CPU: _convert_indices_from_csr_to_coo_structured_cpu CUDA: _convert_indices_from_csr_to_coo_structured_cuda ## NN wrappers - func: mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: mse_loss_out - func: mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: mse_loss - func: mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU, CUDA: mse_loss_backward_out - func: mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor python_module: nn dispatch: CPU, CUDA: mse_loss_backward - func: l1_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CompositeExplicitAutograd: l1_loss_out - func: l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: l1_loss - func: l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU, CUDA: l1_loss_backward_out - func: l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: l1_loss_backward - func: multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: multi_margin_loss_cpu_out CUDA: multi_margin_loss_cuda_out - func: multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor python_module: nn dispatch: CPU: multi_margin_loss_cpu CUDA: multi_margin_loss_cuda - func: multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: multi_margin_loss_cpu_backward_out CUDA: multi_margin_loss_cuda_backward_out - func: multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor python_module: nn dispatch: CPU: multi_margin_loss_cpu_backward CUDA: multi_margin_loss_cuda_backward - func: multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) python_module: nn - func: multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor python_module: nn - func: multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!)) python_module: nn dispatch: CPU: multilabel_margin_loss_forward_out_cpu CUDA: multilabel_margin_loss_forward_out_cuda - func: multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target) python_module: nn dispatch: CPU: multilabel_margin_loss_forward_cpu CUDA: multilabel_margin_loss_forward_cuda - func: multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: multilabel_margin_loss_backward_cpu_out CUDA: multilabel_margin_loss_backward_cuda_out - func: multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor python_module: nn dispatch: CPU: multilabel_margin_loss_backward_cpu CUDA: multilabel_margin_loss_backward_cuda - func: nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) python_module: nn - func: nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor python_module: nn - func: nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor python_module: nn - func: nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) python_module: nn structured: True dispatch: CPU: nll_loss_forward_out_cpu CUDA: nll_loss_forward_out_cuda - func: nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight) python_module: nn structured_delegate: nll_loss_forward.output - func: nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: nll_loss_backward_out_cpu CUDA: nll_loss_backward_out_cuda - func: nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor python_module: nn structured_delegate: nll_loss_backward.grad_input - func: nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) python_module: nn - func: nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor python_module: nn - func: nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) python_module: nn dispatch: CPU: nll_loss2d_forward_out_cpu CUDA: nll_loss2d_forward_out_cuda - func: nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight) python_module: nn dispatch: CPU: nll_loss2d_forward_cpu CUDA: nll_loss2d_forward_cuda - func: nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: nll_loss2d_backward_out_cpu CUDA: nll_loss2d_backward_out_cuda - func: nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor python_module: nn dispatch: CPU: nll_loss2d_backward_cpu CUDA: nll_loss2d_backward_cuda - func: smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase python_module: nn dispatch: CPU, CUDA: smooth_l1_loss_out - func: smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor device_check: NoCheck # TensorIterator structured_delegate: smooth_l1_loss.out python_module: nn - func: smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: smooth_l1_loss_backward_out CUDA: smooth_l1_loss_backward_out - func: smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: smooth_l1_loss_backward - func: huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU, CUDA: huber_loss_out - func: huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor python_module: nn dispatch: CPU, CUDA: huber_loss - func: huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU, CUDA: huber_loss_backward_out - func: huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: huber_loss_backward - func: soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CompositeExplicitAutograd: soft_margin_loss_out - func: soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: soft_margin_loss - func: soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CompositeExplicitAutograd: soft_margin_loss_backward_out - func: soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: soft_margin_loss_backward - func: elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: elu_out - func: elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor structured_delegate: elu.out device_check: NoCheck # TensorIterator python_module: nn - func: elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase python_module: nn dispatch: CPU, CUDA: elu_backward_out - func: elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor structured_delegate: elu_backward.grad_input python_module: nn - func: elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!) structured_delegate: elu.out device_check: NoCheck # TensorIterator python_module: nn dispatch: CompositeExplicitAutograd: elu_ - func: glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase python_module: nn dispatch: CPU, CUDA: glu_out - func: glu(Tensor self, int dim=-1) -> Tensor structured_delegate: glu.out device_check: NoCheck # TensorIterator python_module: nn - func: glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: glu_backward_cpu_out CUDA: glu_backward_cuda_out - func: glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor python_module: nn dispatch: CPU: glu_backward_cpu CUDA: glu_backward_cuda - func: hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: hardsigmoid_out QuantizedCPU: hardsigmoid_out_quantized_cpu - func: hardsigmoid(Tensor self) -> Tensor structured_delegate: hardsigmoid.out device_check: NoCheck # TensorIterator python_module: nn dispatch: QuantizedCPU: hardsigmoid_quantized_cpu - func: hardsigmoid_(Tensor(a!) self) -> Tensor(a!) structured_delegate: hardsigmoid.out device_check: NoCheck # TensorIterator python_module: nn - func: hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase python_module: nn dispatch: CPU, CUDA: hardsigmoid_backward_out - func: hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor structured_delegate: hardsigmoid_backward.grad_input python_module: nn - func: hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: hardtanh_out QuantizedCPU: hardtanh_out_quantized_cpu - func: hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: hardtanh QuantizedCPU: hardtanh_quantized_cpu - func: hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU, CUDA: hardtanh_backward_out - func: hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor python_module: nn dispatch: CPU, CUDA: hardtanh_backward - func: hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!) device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: hardtanh_ QuantizedCPU: hardtanh_quantized_cpu_ - func: hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: hardswish_out - func: hardswish(Tensor self) -> Tensor device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: hardswish - func: hardswish_(Tensor(a!) self) -> Tensor(a!) device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: hardswish_ - func: hardswish_backward(Tensor grad_output, Tensor self) -> Tensor python_module: nn dispatch: CPU, CUDA: hardswish_backward - func: leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: leaky_relu_out QuantizedCPU: leaky_relu_out_quantized_cpu - func: leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor structured_delegate: leaky_relu.out device_check: NoCheck # TensorIterator python_module: nn dispatch: QuantizedCPU: leaky_relu_quantized_cpu - func: leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase python_module: nn dispatch: CPU, CUDA: leaky_relu_backward_out - func: leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor structured_delegate: leaky_relu_backward.grad_input python_module: nn - func: leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!) structured_delegate: leaky_relu.out device_check: NoCheck # TensorIterator python_module: nn dispatch: QuantizedCPU: leaky_relu_quantized_cpu_ - func: log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator python_module: nn - func: log_sigmoid(Tensor self) -> Tensor device_check: NoCheck # TensorIterator python_module: nn - func: log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU: log_sigmoid_forward_out_cpu CUDA: log_sigmoid_forward_out_cuda - func: log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer) device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU: log_sigmoid_forward_cpu CUDA: log_sigmoid_forward_cuda - func: log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: log_sigmoid_backward_cpu_out CUDA: log_sigmoid_backward_cuda_out - func: log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor python_module: nn dispatch: CPU: log_sigmoid_backward_cpu CUDA: log_sigmoid_backward_cuda - func: rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: rrelu_with_noise_out_cpu CUDA: rrelu_with_noise_out_cuda - func: rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor python_module: nn dispatch: CPU: rrelu_with_noise_cpu CUDA: rrelu_with_noise_cuda - func: rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: rrelu_with_noise_backward - func: rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) python_module: nn dispatch: CPU: rrelu_with_noise_cpu_ CUDA: rrelu_with_noise_cuda_ - func: softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: softplus_out - func: softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor structured_delegate: softplus.out device_check: NoCheck # TensorIterator python_module: nn - func: softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase python_module: nn dispatch: CPU, CUDA: softplus_backward_out - func: softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor structured_delegate: softplus_backward.grad_input python_module: nn - func: softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase device_check: NoCheck # TensorIterator python_module: nn dispatch: CPU, CUDA: softshrink_out - func: softshrink(Tensor self, Scalar lambd=0.5) -> Tensor structured_delegate: softshrink.out device_check: NoCheck # TensorIterator python_module: nn - func: softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase python_module: nn dispatch: CPU, CUDA: softshrink_backward_out - func: softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor structured_delegate: softshrink_backward.grad_input python_module: nn - func: adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: adaptive_avg_pool2d_out_cpu CUDA: adaptive_avg_pool2d_out_cuda MkldnnCPU: mkldnn_adaptive_avg_pool2d_out - func: adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor python_module: nn - func: mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor dispatch: MkldnnCPU: mkldnn_adaptive_avg_pool2d - func: mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor dispatch: MkldnnCPU: mkldnn_adaptive_avg_pool2d_backward - func: _adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor dispatch: CPU: adaptive_avg_pool2d_cpu CUDA: adaptive_avg_pool2d_cuda QuantizedCPU: adaptive_avg_pool2d_quantized_cpu - func: _adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor python_module: nn dispatch: CPU: adaptive_avg_pool2d_backward_cpu CUDA: adaptive_avg_pool2d_backward_cuda - func: adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: adaptive_avg_pool3d_out_cpu CUDA: adaptive_avg_pool3d_out_cuda QuantizedCPU: adaptive_avg_pool3d_out_quantized_cpu - func: adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor python_module: nn - func: _adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor dispatch: CPU: adaptive_avg_pool3d_cpu CUDA: adaptive_avg_pool3d_cuda QuantizedCPU: adaptive_avg_pool3d_quantized_cpu - func: adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: adaptive_avg_pool3d_backward_out_cpu CUDA: adaptive_avg_pool3d_backward_out_cuda - func: _adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor python_module: nn dispatch: CPU: adaptive_avg_pool3d_backward_cpu CUDA: adaptive_avg_pool3d_backward_cuda # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) python_module: nn structured: True dispatch: CPU: adaptive_max_pool2d_out_cpu CUDA: adaptive_max_pool2d_out_cuda # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor) python_module: nn structured_delegate: adaptive_max_pool2d.out - func: adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: adaptive_max_pool2d_backward_out_cpu CUDA: adaptive_max_pool2d_backward_out_cuda - func: adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor python_module: nn structured_delegate: adaptive_max_pool2d_backward.grad_input # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) python_module: nn structured: True dispatch: CPU: adaptive_max_pool3d_out_cpu CUDA: adaptive_max_pool3d_out_cuda # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor) python_module: nn structured_delegate: adaptive_max_pool3d.out - func: adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: adaptive_max_pool3d_backward_out_cpu CUDA: adaptive_max_pool3d_backward_out_cuda - func: adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor python_module: nn structured_delegate: adaptive_max_pool3d_backward.grad_input - func: avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True precomputed: - kernel_size -> int kH, int kW - stride -> int dH, int dW - padding -> int padH, int padW dispatch: CPU: avg_pool2d_out_cpu CUDA: avg_pool2d_out_cuda MkldnnCPU: mkldnn_avg_pool2d_out - func: avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor python_module: nn structured_delegate: avg_pool2d.out dispatch: MkldnnCPU: mkldnn_avg_pool2d QuantizedCPU: avg_pool2d_quantized_cpu - func: avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: avg_pool2d_backward_out_cpu CUDA: avg_pool2d_backward_out_cuda MkldnnCPU: mkldnn_avg_pool2d_backward_out - func: avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor python_module: nn structured_delegate: avg_pool2d_backward.grad_input dispatch: MkldnnCPU: mkldnn_avg_pool2d_backward - func: avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: avg_pool3d_out_cpu CUDA: avg_pool3d_out_cuda MkldnnCPU: mkldnn_avg_pool3d_out - func: avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor python_module: nn structured_delegate: avg_pool3d.out dispatch: MkldnnCPU: mkldnn_avg_pool3d QuantizedCPU: avg_pool3d_quantized_cpu - func: avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: avg_pool3d_backward_out_cpu CUDA: avg_pool3d_backward_out_cuda MkldnnCPU: mkldnn_avg_pool3d_backward_out - func: avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor python_module: nn structured_delegate: avg_pool3d_backward.grad_input dispatch: MkldnnCPU: mkldnn_avg_pool3d_backward # Return: (Tensor output, Tensor indices) - func: fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) python_module: nn structured: True dispatch: CPU: fractional_max_pool2d_out_cpu CUDA: fractional_max_pool2d_out_cuda # Return: (Tensor output, Tensor indices) - func: fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor) python_module: nn structured_delegate: fractional_max_pool2d.output - func: fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: fractional_max_pool2d_backward_cpu CUDA: fractional_max_pool2d_backward_cuda - func: fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor python_module: nn structured_delegate: fractional_max_pool2d_backward.grad_input # Return: (Tensor output, Tensor indices) - func: fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) python_module: nn structured: True precomputed: - kernel_size -> int poolSizeT, int poolSizeH, int poolSizeW - output_size -> int outputT, int outputH, int outputW dispatch: CPU: fractional_max_pool3d_out_cpu CUDA: fractional_max_pool3d_out_cuda # Return: (Tensor output, Tensor indices) - func: fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor) python_module: nn structured_delegate: fractional_max_pool3d.output - func: fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: fractional_max_pool3d_backward_out_cpu CUDA: fractional_max_pool3d_backward_out_cuda - func: fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor python_module: nn dispatch: CPU: fractional_max_pool3d_backward_cpu CUDA: fractional_max_pool3d_backward_cuda # Return: (Tensor output, Tensor indices) - func: max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) python_module: nn structured: True dispatch: CPU: max_pool2d_with_indices_out_cpu CUDA: max_pool2d_with_indices_out_cuda # Return: (Tensor output, Tensor indices) - func: max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) python_module: nn structured_delegate: max_pool2d_with_indices.out - func: max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: max_pool2d_with_indices_backward_out_cpu CUDA: max_pool2d_with_indices_backward_out_cuda - func: max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor python_module: nn structured_delegate: max_pool2d_with_indices_backward.grad_input # Return: (Tensor output, Tensor indices) - func: max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) python_module: nn dispatch: CPU: max_pool3d_with_indices_out_cpu CUDA: max_pool3d_with_indices_out_cuda # Return: (Tensor output, Tensor indices) - func: max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) python_module: nn dispatch: CPU: max_pool3d_with_indices_cpu CUDA: max_pool3d_with_indices_cuda - func: max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: max_pool3d_with_indices_backward_out_cpu CUDA: max_pool3d_with_indices_backward_out_cuda - func: max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor python_module: nn dispatch: CPU: max_pool3d_with_indices_backward_cpu CUDA: max_pool3d_with_indices_backward_cuda - func: max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: max_unpooling2d_forward_out_cpu CUDA: max_unpooling2d_forward_out_cuda - func: max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor python_module: nn dispatch: CPU: max_unpooling2d_forward_cpu CUDA: max_unpooling2d_forward_cuda - func: max_unpool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: max_unpooling2d_backward_out_cpu CUDA: max_unpooling2d_backward_out_cuda - func: max_unpool2d_backward(Tensor grad_output, Tensor self, Tensor indices, int[2] output_size) -> Tensor python_module: nn dispatch: CPU: max_unpooling2d_backward_cpu CUDA: max_unpooling2d_backward_cuda - func: max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: max_unpooling3d_forward_out_cpu CUDA: max_unpooling3d_forward_out_cuda - func: max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor python_module: nn dispatch: CPU: max_unpooling3d_forward_cpu CUDA: max_unpooling3d_forward_cuda - func: max_unpool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: max_unpooling3d_backward_out_cpu CUDA: max_unpooling3d_backward_out_cuda - func: max_unpool3d_backward(Tensor grad_output, Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor python_module: nn dispatch: CPU: max_unpooling3d_backward_cpu CUDA: max_unpooling3d_backward_cuda - func: reflection_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU, QuantizedCPU: reflection_pad1d_out_cpu CUDA: reflection_pad1d_out_cuda - func: reflection_pad1d(Tensor self, int[2] padding) -> Tensor python_module: nn structured_delegate: reflection_pad1d.out dispatch: QuantizedCPU: reflection_pad1d_cpu - func: reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: reflection_pad1d_backward_out_cpu CUDA: reflection_pad1d_backward_out_cuda - func: reflection_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor python_module: nn structured_delegate: reflection_pad1d_backward.grad_input - func: reflection_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU, QuantizedCPU: reflection_pad2d_out_cpu CUDA: reflection_pad2d_out_cuda - func: reflection_pad2d(Tensor self, int[4] padding) -> Tensor python_module: nn dispatch: CPU, QuantizedCPU: reflection_pad2d_cpu CUDA: reflection_pad2d_cuda - func: reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: reflection_pad2d_backward_out_cpu CUDA: reflection_pad2d_backward_out_cuda - func: reflection_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor python_module: nn dispatch: CPU: reflection_pad2d_backward_cpu CUDA: reflection_pad2d_backward_cuda - func: reflection_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: reflection_pad3d_out_cpu CUDA: reflection_pad3d_out_cuda - func: reflection_pad3d(Tensor self, int[6] padding) -> Tensor python_module: nn structured_delegate: reflection_pad3d.out - func: reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: reflection_pad3d_backward_out_cpu CUDA: reflection_pad3d_backward_out_cuda - func: reflection_pad3d_backward(Tensor grad_output, Tensor self, int[6] padding) -> Tensor python_module: nn structured_delegate: reflection_pad3d_backward.grad_input - func: replication_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: replication_pad1d_out_cpu CUDA: replication_pad1d_out_cuda - func: replication_pad1d(Tensor self, int[2] padding) -> Tensor python_module: nn structured_delegate: replication_pad1d.out - func: replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: replication_pad1d_backward_out_cpu CUDA: replication_pad1d_backward_out_cuda - func: replication_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor python_module: nn structured_delegate: replication_pad1d_backward.grad_input - func: replication_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: replication_pad2d_out_cpu CUDA: replication_pad2d_out_cuda - func: replication_pad2d(Tensor self, int[4] padding) -> Tensor python_module: nn structured_delegate: replication_pad2d.out - func: replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: replication_pad2d_backward_out_cpu CUDA: replication_pad2d_backward_out_cuda - func: replication_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor python_module: nn dispatch: CPU: replication_pad2d_backward_cpu CUDA: replication_pad2d_backward_cuda - func: replication_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: replication_pad3d_out_cpu CUDA: replication_pad3d_out_cuda - func: replication_pad3d(Tensor self, int[6] padding) -> Tensor python_module: nn structured_delegate: replication_pad3d.out - func: replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: replication_pad3d_backward_out_cpu CUDA: replication_pad3d_backward_out_cuda - func: replication_pad3d_backward(Tensor grad_output, Tensor self, int[6] padding) -> Tensor python_module: nn dispatch: CPU: replication_pad3d_backward_cpu CUDA: replication_pad3d_backward_cuda - func: upsample_linear1d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: upsample_linear1d - func: upsample_linear1d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: upsample_linear1d_backward - func: upsample_bilinear2d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: upsample_bilinear2d - func: upsample_bilinear2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: upsample_bilinear2d_backward - func: _upsample_bilinear2d_aa.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: _upsample_bilinear2d_aa - func: _upsample_bilinear2d_aa_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: _upsample_bilinear2d_aa_backward - func: upsample_trilinear3d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: upsample_trilinear3d - func: upsample_trilinear3d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: upsample_trilinear3d_backward - func: upsample_bicubic2d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: upsample_bicubic2d - func: upsample_bicubic2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: upsample_bicubic2d_backward - func: _upsample_bicubic2d_aa.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: _upsample_bicubic2d_aa - func: _upsample_bicubic2d_aa_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: _upsample_bicubic2d_aa_backward - func: upsample_nearest1d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: upsample_nearest1d - func: _upsample_nearest_exact1d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: _upsample_nearest_exact1d - func: upsample_nearest1d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: upsample_nearest1d_backward - func: _upsample_nearest_exact1d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: _upsample_nearest_exact1d_backward - func: upsample_nearest2d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: upsample_nearest2d - func: _upsample_nearest_exact2d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: _upsample_nearest_exact2d - func: upsample_nearest2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: upsample_nearest2d_backward - func: _upsample_nearest_exact2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: _upsample_nearest_exact2d_backward - func: upsample_nearest3d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor python_module: nn dispatch: CPU: upsample_nearest3d_cpu CUDA: upsample_nearest3d_cuda QuantizedCPU: upsample_nearest3d_quantized_cpu - func: _upsample_nearest_exact3d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor python_module: nn dispatch: CPU: _upsample_nearest_exact3d_cpu CUDA: _upsample_nearest_exact3d_cuda QuantizedCPU: _upsample_nearest_exact3d_quantized_cpu - func: upsample_nearest3d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor python_module: nn dispatch: CPU: upsample_nearest3d_backward_cpu CUDA: upsample_nearest3d_backward_cuda - func: _upsample_nearest_exact3d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor python_module: nn dispatch: CPU: _upsample_nearest_exact3d_backward_cpu CUDA: _upsample_nearest_exact3d_backward_cuda # NOTE: all of the non-"vec" upsample overloads are only kept for backward compatibility. - func: upsample_linear1d.out(Tensor self, int[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: upsample_linear1d_out_cpu CUDA: upsample_linear1d_out_cuda - func: upsample_linear1d(Tensor self, int[1] output_size, bool align_corners, float? scales=None) -> Tensor python_module: nn structured_delegate: upsample_linear1d.out - func: upsample_linear1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: upsample_linear1d_backward_out_cpu CUDA: upsample_linear1d_backward_out_cuda - func: upsample_linear1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners, float? scales=None) -> Tensor python_module: nn structured_delegate: upsample_linear1d_backward.grad_input - func: upsample_bilinear2d.out(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: upsample_bilinear2d_out_cpu CUDA: upsample_bilinear2d_out_cuda - func: upsample_bilinear2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: upsample_bilinear2d.out dispatch: QuantizedCPU: upsample_bilinear2d_quantized_cpu - func: upsample_bilinear2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: upsample_bilinear2d_backward_out_cpu CUDA: upsample_bilinear2d_backward_out_cuda - func: upsample_bilinear2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: upsample_bilinear2d_backward.grad_input - func: _upsample_bilinear2d_aa.out(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: _upsample_bilinear2d_aa_out_cpu CUDA: _upsample_bilinear2d_aa_out_cuda - func: _upsample_bilinear2d_aa(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: _upsample_bilinear2d_aa.out - func: _upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: _upsample_bilinear2d_aa_backward_out_cpu CUDA: _upsample_bilinear2d_aa_backward_out_cuda - func: _upsample_bilinear2d_aa_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: _upsample_bilinear2d_aa_backward.grad_input - func: upsample_bicubic2d.out(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: upsample_bicubic2d_out_cpu CUDA: upsample_bicubic2d_out_cuda - func: upsample_bicubic2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: upsample_bicubic2d.out - func: upsample_bicubic2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: upsample_bicubic2d_backward_out_cpu CUDA: upsample_bicubic2d_backward_out_cuda - func: upsample_bicubic2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: upsample_bicubic2d_backward.grad_input - func: _upsample_bicubic2d_aa.out(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: _upsample_bicubic2d_aa_out_cpu CUDA: _upsample_bicubic2d_aa_out_cuda - func: _upsample_bicubic2d_aa(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: _upsample_bicubic2d_aa.out - func: _upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: _upsample_bicubic2d_aa_backward_out_cpu CUDA: _upsample_bicubic2d_aa_backward_out_cuda - func: _upsample_bicubic2d_aa_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: _upsample_bicubic2d_aa_backward.grad_input - func: upsample_trilinear3d.out(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: upsample_trilinear3d_out_cpu CUDA: upsample_trilinear3d_out_cuda - func: upsample_trilinear3d(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: upsample_trilinear3d.out - func: upsample_trilinear3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: upsample_trilinear3d_backward_out_cpu CUDA: upsample_trilinear3d_backward_out_cuda - func: upsample_trilinear3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: upsample_trilinear3d_backward.grad_input - func: upsample_nearest1d.out(Tensor self, int[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: upsample_nearest1d_out_cpu CUDA: upsample_nearest1d_out_cuda - func: _upsample_nearest_exact1d.out(Tensor self, int[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: _upsample_nearest_exact1d_out_cpu CUDA: _upsample_nearest_exact1d_out_cuda - func: upsample_nearest1d(Tensor self, int[1] output_size, float? scales=None) -> Tensor python_module: nn structured_delegate: upsample_nearest1d.out - func: _upsample_nearest_exact1d(Tensor self, int[1] output_size, float? scales=None) -> Tensor python_module: nn structured_delegate: _upsample_nearest_exact1d.out - func: upsample_nearest1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: upsample_nearest1d_backward_out_cpu CUDA: upsample_nearest1d_backward_out_cuda - func: _upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: _upsample_nearest_exact1d_backward_out_cpu CUDA: _upsample_nearest_exact1d_backward_out_cuda - func: upsample_nearest1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, float? scales=None) -> Tensor python_module: nn structured_delegate: upsample_nearest1d_backward.grad_input - func: _upsample_nearest_exact1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, float? scales=None) -> Tensor python_module: nn structured_delegate: _upsample_nearest_exact1d_backward.grad_input - func: upsample_nearest2d.out(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: upsample_nearest2d_out_cpu CUDA: upsample_nearest2d_out_cuda - func: _upsample_nearest_exact2d.out(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: _upsample_nearest_exact2d_out_cpu CUDA: _upsample_nearest_exact2d_out_cuda - func: upsample_nearest2d(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: upsample_nearest2d.out dispatch: QuantizedCPU: upsample_nearest2d_quantized_cpu - func: _upsample_nearest_exact2d(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: _upsample_nearest_exact2d.out dispatch: QuantizedCPU: _upsample_nearest_exact2d_quantized_cpu - func: upsample_nearest2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: upsample_nearest2d_backward_out_cpu CUDA: upsample_nearest2d_backward_out_cuda - func: _upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: _upsample_nearest_exact2d_backward_out_cpu CUDA: _upsample_nearest_exact2d_backward_out_cuda - func: upsample_nearest2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: upsample_nearest2d_backward.grad_input - func: _upsample_nearest_exact2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: _upsample_nearest_exact2d_backward.grad_input - func: upsample_nearest3d.out(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: upsample_nearest3d_out_cpu CUDA: upsample_nearest3d_out_cuda - func: _upsample_nearest_exact3d.out(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: _upsample_nearest_exact3d_out_cpu CUDA: _upsample_nearest_exact3d_out_cuda - func: upsample_nearest3d(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: upsample_nearest3d.out dispatch: QuantizedCPU: upsample_nearest3d_quantized_cpu - func: _upsample_nearest_exact3d(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: _upsample_nearest_exact3d.out dispatch: QuantizedCPU: _upsample_nearest_exact3d_quantized_cpu - func: upsample_nearest3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: upsample_nearest3d_backward_out_cpu CUDA: upsample_nearest3d_backward_out_cuda - func: _upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: _upsample_nearest_exact3d_backward_out_cpu CUDA: _upsample_nearest_exact3d_backward_out_cuda - func: upsample_nearest3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: upsample_nearest3d_backward.grad_input - func: _upsample_nearest_exact3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn structured_delegate: _upsample_nearest_exact3d_backward.grad_input - func: sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: sigmoid_backward_out - func: sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor python_module: nn structured_delegate: sigmoid_backward.grad_input - func: logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: logit_backward_out - func: logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor python_module: nn structured_delegate: logit_backward.grad_input - func: tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: tanh_backward_out - func: tanh_backward(Tensor grad_output, Tensor output) -> Tensor python_module: nn structured_delegate: tanh_backward.grad_input # What's a thnn_conv_ versus a slow_conv_? # # Historically, we have inefficient implementations of convolutions # coming from the THNN/THCUNN library. These convolutions typically # operated by computing the Toeplitz matrix and then doing a matrix # multiply with the input; this is very memory inefficient! However, # occasionally, we really don't have anything better, so it's helpful # to have these fallbacks when there is no more optimized implementation # in cudnn or mkldnn, etc. Both thnn_ and slow_ convolutions fall # into this bucket. # # The difference between these two designations, is that thnn_ refers # to a convolution that is still written in the "legacy" style; that is, # C code in the THNN/ or THCUNN/ directory. A slow_ convolution is # one that is written in the native style: modern C++. Algorithmically, # these are the same thing, but we give them different prefixes to # make the operational distinction clear. - func: slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) python_module: nn structured: True dispatch: CPU: slow_conv_transpose2d_structured_cpu CUDA: slow_conv_transpose2d_structured_cuda - func: slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1) -> Tensor python_module: nn structured_delegate: slow_conv_transpose2d.out - func: slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: slow_conv_transpose3d_out_cpu CUDA: slow_conv_transpose3d_out_cuda - func: slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1) -> Tensor python_module: nn dispatch: CPU: slow_conv_transpose3d_cpu CUDA: slow_conv_transpose3d_cuda - func: thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) python_module: nn - func: thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor python_module: nn - func: _slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!) python_module: nn dispatch: CPU: slow_conv2d_forward_out_cpu CUDA: slow_conv2d_forward_out_cuda - func: _slow_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> Tensor python_module: nn dispatch: CPU: slow_conv2d_forward_cpu CUDA: slow_conv2d_forward_cuda - func: _slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) python_module: nn dispatch: CPU: slow_conv2d_backward_out_cpu CUDA: slow_conv2d_backward_out_cuda - func: _slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) python_module: nn dispatch: CPU: slow_conv2d_backward_cpu CUDA: slow_conv2d_backward_cuda - func: _conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!) use_const_ref_for_mutable_tensors: True python_module: nn dispatch: CUDA: conv_depthwise2d_cuda_out - func: _conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation) -> Tensor python_module: nn dispatch: CUDA: conv_depthwise2d_cuda - func: conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, int[3] dilation) -> Tensor python_module: nn dispatch: CUDA: conv_depthwise3d_cuda - func: slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) python_module: nn - func: slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0) -> Tensor python_module: nn - func: slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, *, Tensor(a!) output) -> Tensor(a!) python_module: nn dispatch: CPU: slow_conv3d_forward_out_cpu - func: slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding) -> Tensor python_module: nn dispatch: CPU: slow_conv3d_forward_cpu - func: slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor python_module: nn dispatch: CPU: slow_conv_dilated2d_cpu CUDA: slow_conv_dilated2d_cuda - func: slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1) -> Tensor python_module: nn dispatch: CPU: slow_conv_dilated3d_cpu CUDA: slow_conv_dilated3d_cuda - func: col2im.out(Tensor self, int[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: col2im_out_cpu CUDA: col2im_out_cuda - func: col2im(Tensor self, int[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor python_module: nn dispatch: CPU: col2im_cpu CUDA: col2im_cuda - func: col2im_backward.grad_input(Tensor grad_output, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: col2im_backward_out_cpu CUDA: col2im_backward_out_cuda - func: col2im_backward(Tensor grad_output, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor python_module: nn dispatch: CPU: col2im_backward_cpu CUDA: col2im_backward_cuda - func: column_stack(Tensor[] tensors) -> Tensor - func: column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) - func: im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: im2col_out_cpu CUDA: im2col_out_cuda - func: im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor python_module: nn dispatch: CPU: im2col_cpu CUDA: im2col_cuda - func: im2col_backward.grad_input(Tensor grad_output, int[2] input_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: im2col_backward_out_cpu CUDA: im2col_backward_out_cuda - func: im2col_backward(Tensor grad_output, int[2] input_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor python_module: nn dispatch: CPU: im2col_backward_cpu CUDA: im2col_backward_cuda - func: isfinite(Tensor self) -> Tensor variants: function, method device_check: NoCheck device_guard: False - func: isinf(Tensor self) -> Tensor variants: function, method device_check: NoCheck device_guard: False dispatch: CompositeExplicitAutograd: isinf SparseCPU, SparseCUDA: isinf_sparse SparseCsrCPU, SparseCsrCUDA: isinf_sparse_csr - func: record_stream(Tensor(a!) self, Stream s) -> () variants: method dispatch: CUDA: record_stream_cuda - func: isposinf(Tensor self) -> Tensor variants: function, method structured_delegate: isposinf.out dispatch: SparseCPU, SparseCUDA: isposinf_sparse SparseCsrCPU, SparseCsrCUDA: isposinf_sparse_csr - func: isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: isposinf_out SparseCPU, SparseCUDA: isposinf_sparse_out SparseCsrCPU, SparseCsrCUDA: isposinf_sparse_csr_out - func: isneginf(Tensor self) -> Tensor variants: function, method structured_delegate: isneginf.out dispatch: SparseCPU, SparseCUDA: isneginf_sparse SparseCsrCPU, SparseCsrCUDA: isneginf_sparse_csr - func: isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: isneginf_out SparseCPU, SparseCUDA: isneginf_sparse_out SparseCsrCPU, SparseCsrCUDA: isneginf_sparse_csr_out # NOTE [_add_batch_dim and _remove_batch_dim] # _add_batch_dim and _remove_batch_dim are meant to be used in the implementation # of the vmap frontend API (see torch/_vmap_internals.py). They are not # user-facing, hence the leading underscore. Please don't use them them anywhere else. - func: _add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor variants: function # See NOTE [_add_batch_dim and _remove_batch_dim] - func: _remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor variants: function ## Functions related to the `torch.special` namespace # Note [special namespace binding] # Functions in the special python module should have their names start with # "special_" underscore and be bound to the desired Python name in # torch/special/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/special.h. # The "special_" names should be hidden from the user and not documented. - func: special_entr(Tensor self) -> Tensor structured_delegate: special_entr.out python_module: special variants: function - func: special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase python_module: special variants: function dispatch: CPU, CUDA: special_entr_out - func: special_ndtri(Tensor self) -> Tensor structured_delegate: special_ndtri.out python_module: special variants: function - func: special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) structured: True structured_inherits: TensorIteratorBase python_module: special variants: function dispatch: CPU, CUDA: special_ndtri_out - func: special_expm1(Tensor self) -> Tensor python_module: special variants: function - func: special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special variants: function - func: special_exp2(Tensor self) -> Tensor python_module: special variants: function - func: special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special variants: function - func: special_psi(Tensor self) -> Tensor python_module: special variants: function - func: special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special variants: function - func: special_digamma(Tensor self) -> Tensor python_module: special variants: function - func: special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special variants: function - func: special_gammaln(Tensor self) -> Tensor python_module: special variants: function - func: special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special variants: function - func: special_erf(Tensor self) -> Tensor python_module: special variants: function - func: special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special variants: function - func: special_erfc(Tensor self) -> Tensor python_module: special variants: function - func: special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special - func: special_erfcx(Tensor self) -> Tensor python_module: special variants: function structured_delegate: special_erfcx.out - func: special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: special_erfcx_out - func: special_erfinv(Tensor self) -> Tensor python_module: special variants: function - func: special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special - func: special_ndtr(Tensor self) -> Tensor python_module: special variants: function - func: special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special variants: function - func: special_xlog1py(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator python_module: special variants: function structured_delegate: special_xlog1py.out - func: special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator python_module: special variants: function dispatch: CompositeExplicitAutograd: special_xlog1py - func: special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator python_module: special variants: function dispatch: CompositeExplicitAutograd: special_xlog1py - func: special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase python_module: special variants: function dispatch: CPU, CUDA: special_xlog1py_out - func: special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator python_module: special variants: function dispatch: CompositeExplicitAutograd: special_xlog1py_out - func: special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator python_module: special variants: function dispatch: CompositeExplicitAutograd: special_xlog1py_out - func: special_xlogy(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator python_module: special variants: function - func: special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator python_module: special variants: function - func: special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator python_module: special variants: function - func: special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator python_module: special variants: function - func: special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator python_module: special variants: function - func: special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator python_module: special variants: function - func: special_zeta(Tensor self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator python_module: special variants: function structured_delegate: special_zeta.out dispatch: CompositeExplicitAutograd: special_zeta - func: special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor device_check: NoCheck # TensorIterator python_module: special variants: function dispatch: CompositeExplicitAutograd: special_zeta - func: special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor device_check: NoCheck # TensorIterator python_module: special variants: function dispatch: CompositeExplicitAutograd: special_zeta - func: special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator structured: True structured_inherits: TensorIteratorBase python_module: special variants: function dispatch: CPU, CUDA: special_zeta_out - func: special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator python_module: special variants: function dispatch: CompositeExplicitAutograd: special_zeta_out - func: special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) device_check: NoCheck # TensorIterator python_module: special variants: function dispatch: CompositeExplicitAutograd: special_zeta_out - func: special_i0(Tensor self) -> Tensor python_module: special variants: function - func: special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special variants: function - func: special_i0e(Tensor self) -> Tensor python_module: special variants: function structured_delegate: special_i0e.out - func: special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: special_i0e_out - func: special_i1(Tensor self) -> Tensor python_module: special variants: function structured_delegate: special_i1.out - func: special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: special_i1_out - func: special_i1e(Tensor self) -> Tensor python_module: special variants: function structured_delegate: special_i1e.out - func: special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special structured: True structured_inherits: TensorIteratorBase dispatch: CPU, CUDA: special_i1e_out - func: special_logit(Tensor self, float? eps=None) -> Tensor python_module: special variants: function - func: special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) python_module: special - func: special_polygamma(int n, Tensor self) -> Tensor python_module: special variants: function, method - func: special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special - func: special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor python_module: special variants: function - func: special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) python_module: special - func: special_expit(Tensor self) -> Tensor python_module: special variants: function - func: special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special variants: function - func: special_sinc(Tensor self) -> Tensor python_module: special variants: function - func: special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special variants: function - func: special_round(Tensor self, *, int decimals=0) -> Tensor python_module: special variants: function - func: special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!) python_module: special variants: function - func: special_log1p(Tensor self) -> Tensor python_module: special variants: function - func: special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: special variants: function - func: special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor python_module: special variants: function - func: special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) python_module: special variants: function - func: special_gammainc(Tensor self, Tensor other) -> Tensor python_module: special variants: function - func: special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) python_module: special variants: function - func: special_gammaincc(Tensor self, Tensor other) -> Tensor python_module: special variants: function - func: special_multigammaln(Tensor self, int p) -> Tensor python_module: special variants: function - func: special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) python_module: special variants: function - func: special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor python_module: special variants: function ## Functions related to the fast Fourier transform and the torch.fft namespace # Note [FFT namespace binding] # Functions in the fft python module should have their names start with # "fft_" underscore and be bound to the desired Python name in # torch/fft/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/fft.h. # The "fft_" names should be hidden from the user and not documented. # # See fft_fft as an example. # torch.fft.fft # NOTE: NOT an alias for torch.fft, which has different semantics - func: fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) python_module: fft variants: function - func: fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) python_module: fft variants: function - func: fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) python_module: fft variants: function - func: fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) python_module: fft variants: function - func: fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) python_module: fft variants: function - func: fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) python_module: fft variants: function - func: fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor python_module: fft variants: function - func: fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) python_module: fft variants: function - func: fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor python_module: fft variants: function - func: fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) python_module: fft variants: function - func: fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor python_module: fft variants: function - func: fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) python_module: fft variants: function - func: fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor python_module: fft variants: function - func: fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) python_module: fft variants: function - func: fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor use_const_ref_for_mutable_tensors: True python_module: fft variants: function - func: fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) use_const_ref_for_mutable_tensors: True python_module: fft variants: function - func: fft_ihfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor use_const_ref_for_mutable_tensors: True python_module: fft variants: function - func: fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) use_const_ref_for_mutable_tensors: True python_module: fft variants: function - func: fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) python_module: fft variants: function - func: fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) python_module: fft variants: function - func: fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) python_module: fft variants: function - func: fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor python_module: fft variants: function - func: fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) python_module: fft variants: function - func: fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor use_const_ref_for_mutable_tensors: True python_module: fft variants: function - func: fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) use_const_ref_for_mutable_tensors: True python_module: fft variants: function - func: fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor use_const_ref_for_mutable_tensors: True python_module: fft variants: function - func: fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) use_const_ref_for_mutable_tensors: True python_module: fft variants: function - func: fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor python_module: fft variants: function - func: fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) python_module: fft variants: function - func: fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor python_module: fft variants: function - func: fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) python_module: fft variants: function - func: fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor python_module: fft variants: function - func: fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor python_module: fft variants: function ## Functions for linear algebra and the torch.linalg namespace # Note [linalg namespace binding] # Functions in the linalg python module should have their names start with # "linalg_" and be bound to the desired Python name in # torch/linalg/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/linalg.h. # The "linalg_" names should be hidden from the user and not documented. # # See linalg_det as an example. # "_ex" stands for experimental - func: linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info) python_module: linalg variants: function dispatch: CPU, CUDA: linalg_cholesky_ex - func: linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) python_module: linalg variants: function dispatch: CPU, CUDA: linalg_cholesky_ex_out - func: linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor python_module: linalg variants: function - func: linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) python_module: linalg variants: function - func: linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor python_module: linalg variants: function dispatch: CPU, CUDA: linalg_cross - func: linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) python_module: linalg dispatch: CPU, CUDA: linalg_cross_out # linalg.lu_factor - func: linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots) python_module: linalg variants: function - func: linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) python_module: linalg variants: function - func: linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info) python_module: linalg structured_delegate: linalg_lu_factor_ex.out variants: function - func: linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) python_module: linalg variants: function structured: True dispatch: CPU, CUDA: linalg_lu_factor_ex_out - func: linalg_det(Tensor self) -> Tensor python_module: linalg variants: function - func: linalg_det.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg # torch.det, alias for torch.linalg.det - func: det(Tensor self) -> Tensor variants: function, method - func: _det_lu_based_helper(Tensor self) -> (Tensor det, Tensor lu, Tensor pivs) variants: function dispatch: CPU, CUDA: _det_lu_based_helper - func: _det_lu_based_helper_backward_helper(Tensor det_grad, Tensor det, Tensor self, Tensor lu, Tensor pivs) -> Tensor variants: function dispatch: CPU, CUDA: _det_lu_based_helper_backward_helper - func: linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values) python_module: linalg variants: function dispatch: CompositeExplicitAutograd: linalg_lstsq - func: linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) python_module: linalg variants: function dispatch: CPU, CUDA: linalg_lstsq_out # torch.linalg.matmul, alias for torch.matmul - func: linalg_matmul(Tensor self, Tensor other) -> Tensor python_module: linalg variants: function - func: linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg - func: linalg_matrix_exp(Tensor self) -> Tensor python_module: linalg variants: function dispatch: CPU, CUDA: linalg_matrix_exp - func: linalg_slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) python_module: linalg variants: function dispatch: CPU, CUDA: linalg_slogdet - func: linalg_slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) python_module: linalg dispatch: CPU, CUDA: linalg_slogdet_out - func: linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors) python_module: linalg variants: function dispatch: CPU, CUDA: linalg_eig - func: linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) python_module: linalg dispatch: CPU, CUDA: linalg_eig_out - func: linalg_eigvals(Tensor self) -> Tensor python_module: linalg - func: linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg - func: linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors) python_module: linalg variants: function dispatch: CPU, CUDA: linalg_eigh - func: linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) python_module: linalg dispatch: CPU, CUDA: linalg_eigh_out - func: linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor python_module: linalg variants: function - func: linalg_eigvalsh.out(Tensor self, str UPLO='L', *, Tensor(a!) out) -> Tensor(a!) python_module: linalg dispatch: CPU, CUDA: linalg_eigvalsh_out - func: linalg_householder_product(Tensor input, Tensor tau) -> Tensor python_module: linalg variants: function dispatch: CPU, CUDA: linalg_householder_product - func: linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg dispatch: CPU, CUDA: linalg_householder_product_out - func: _linalg_inv_out_helper_(Tensor(a!) self, Tensor(b!) infos_lu, Tensor(c!) infos_getri) -> Tensor(a!) variants: function dispatch: CPU: _linalg_inv_out_helper_cpu CUDA: _linalg_inv_out_helper_cuda - func: linalg_inv_ex(Tensor self, *, bool check_errors=False) -> (Tensor inverse, Tensor info) python_module: linalg variants: function dispatch: CompositeExplicitAutograd: linalg_inv_ex - func: linalg_inv_ex.inverse(Tensor self, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) python_module: linalg variants: function dispatch: CompositeExplicitAutograd: linalg_inv_ex_out - func: linalg_inv(Tensor self) -> Tensor python_module: linalg variants: function - func: linalg_inv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg variants: function - func: inner(Tensor self, Tensor other) -> Tensor variants: function, method - func: inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) - func: outer(Tensor self, Tensor vec2) -> Tensor variants: function, method - func: outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) # torch.ger, alias for torch.outer - func: ger(Tensor self, Tensor vec2) -> Tensor variants: function, method - func: ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) - func: linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor python_module: linalg variants: function - func: linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor python_module: linalg variants: function - func: linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) python_module: linalg variants: function - func: linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) python_module: linalg variants: function - func: linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor python_module: linalg variants: function dispatch: CPU, CUDA: linalg_vector_norm - func: linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) python_module: linalg dispatch: CPU, CUDA: linalg_vector_norm_out - func: linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor python_module: linalg - func: linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) python_module: linalg - func: linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor python_module: linalg - func: linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) python_module: linalg # This function is exposes the `compute_uv` flag, which is then used to implement `linalg.svd` and # `linalg.svdvals` as composite functions that call this one - func: _linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor Vh) variants: function structured_delegate: _linalg_svd.U - func: _linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) structured: True dispatch: CPU, CUDA: _linalg_svd_out - func: linalg_svd(Tensor A, bool full_matrices=True) -> (Tensor U, Tensor S, Tensor Vh) python_module: linalg variants: function - func: linalg_svd.U(Tensor A, bool full_matrices=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) python_module: linalg variants: function - func: linalg_svdvals(Tensor A) -> Tensor python_module: linalg variants: function - func: linalg_svdvals.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg variants: function - func: linalg_cond(Tensor self, Scalar? p=None) -> Tensor python_module: linalg variants: function - func: linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg variants: function - func: linalg_cond.p_str(Tensor self, str p) -> Tensor python_module: linalg variants: function - func: linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg variants: function - func: linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor python_module: linalg variants: function dispatch: CompositeExplicitAutograd: linalg_pinv - func: linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) python_module: linalg variants: function dispatch: CompositeExplicitAutograd: linalg_pinv_out - func: linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor cpp_no_default_args: ['atol', 'rtol'] python_module: linalg variants: function - func: linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) cpp_no_default_args: ['atol', 'rtol'] python_module: linalg variants: function - func: linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor python_module: linalg variants: function - func: linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor python_module: linalg variants: function - func: linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg variants: function - func: linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg variants: function - func: linalg_solve(Tensor input, Tensor other) -> Tensor python_module: linalg variants: function dispatch: CPU, CUDA: linalg_solve - func: linalg_solve.out(Tensor input, Tensor other, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg dispatch: CPU, CUDA: linalg_solve_out - func: linalg_tensorinv(Tensor self, int ind=2) -> Tensor python_module: linalg variants: function - func: linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg variants: function - func: linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor python_module: linalg variants: function - func: linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg variants: function - func: linalg_qr(Tensor self, str mode='reduced') -> (Tensor Q, Tensor R) python_module: linalg variants: function dispatch: CompositeExplicitAutograd: linalg_qr - func: linalg_qr.out(Tensor self, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) python_module: linalg variants: function dispatch: CompositeExplicitAutograd: linalg_qr_out - func: _linalg_qr_helper(Tensor self, str mode) -> (Tensor, Tensor) variants: function dispatch: CPU: _linalg_qr_helper_default CUDA: _linalg_qr_helper_cuda - func: linalg_matrix_power(Tensor self, int n) -> Tensor python_module: linalg - func: linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg - func: linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor python_module: linalg variants: function - func: linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) python_module: linalg variants: function - func: linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor cpp_no_default_args: ['atol', 'rtol'] python_module: linalg variants: function - func: linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) cpp_no_default_args: ['atol', 'rtol'] python_module: linalg variants: function - func: linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor python_module: linalg variants: function - func: linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg variants: function - func: linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor python_module: linalg variants: function - func: linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg variants: function - func: linalg_multi_dot(Tensor[] tensors) -> Tensor python_module: linalg - func: linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) python_module: linalg ## Functions that are only for testing # It is undocumented and should not be used outside of tests. - func: _test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor # Note: this function is only for testing. - func: _test_optional_intlist(Tensor values, int[]? addends) -> Tensor python_module: nn dispatch: CPU: _test_optional_intlist # Note: this function is only for testing. - func: _test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor python_module: nn dispatch: CPU: _test_optional_intlist # Note: this function is only for testing. - func: _test_optional_floatlist(Tensor values, float[]? addends) -> Tensor python_module: nn dispatch: CPU: _test_optional_floatlist # Note: this function is only for testing. - func: _test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor python_module: nn # Note: this function is only for testing. - func: _test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor python_module: nn # Note: this function is only for testing. - func: _test_ambiguous_defaults.b(Tensor dummy, int a=2, str b="2") -> Tensor cpp_no_default_args: ['a', 'b'] python_module: nn # Note: this function is only for testing. - func: _test_warn_in_autograd(Tensor self) -> Tensor python_module: nn dispatch: CompositeExplicitAutograd: _test_warn_in_autograd - func: segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor variants: function dispatch: CPU, CUDA: segment_reduce_kernel - func: _segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, int axis=0) -> Tensor variants: function dispatch: CPU, CUDA: _segment_reduce_backward_kernel - func: pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor python_module: nn variants: function - func: flatten_dense_tensors(Tensor[] tensors) -> Tensor variants: function python_module: nn - func: unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[] variants: function python_module: nn