# See README.md in this directory for more guidance # Temporary type cast operators. These are needed to trace type-casts now since # Type's are not supported in the IR. Instead, we call down to these # specialized operators for each datatype. # TODO: remove when we have Type support in the IR - func: _cast_Byte(Tensor self, bool non_blocking=False) -> Tensor use_c10_dispatcher: full variants: function - func: _cast_Char(Tensor self, bool non_blocking=False) -> Tensor use_c10_dispatcher: full variants: function - func: _cast_Double(Tensor self, bool non_blocking=False) -> Tensor use_c10_dispatcher: full variants: function - func: _cast_Float(Tensor self, bool non_blocking=False) -> Tensor use_c10_dispatcher: full variants: function - func: _cast_Int(Tensor self, bool non_blocking=False) -> Tensor use_c10_dispatcher: full variants: function - func: _cast_Long(Tensor self, bool non_blocking=False) -> Tensor use_c10_dispatcher: full variants: function - func: _cast_Short(Tensor self, bool non_blocking=False) -> Tensor use_c10_dispatcher: full variants: function - func: _cast_Half(Tensor self, bool non_blocking=False) -> Tensor use_c10_dispatcher: full variants: function - func: backward(Tensor self, Tensor? gradient=None, bool keep_graph=False, bool create_graph=False) -> void variants: method - func: set_data(Tensor(a!) self, Tensor new_data) -> void variants: method - func: data(Tensor self) -> Tensor use_c10_dispatcher: unboxed_only variants: method - func: is_leaf(Tensor self) -> bool variants: method - func: output_nr(Tensor self) -> int variants: method supports_named_tensor: True - func: _version(Tensor self) -> int variants: method - func: rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!) variants: method supports_named_tensor: True - func: rename(Tensor(a) self, Dimname[]? names) -> Tensor(a) variants: method supports_named_tensor: True - func: align_to(Tensor(a) self, DimnameList names) -> Tensor(a) variants: method supports_named_tensor: True - func: align_as(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: unboxed_only variants: method supports_named_tensor: True - func: align_tensors(Tensor[] tensors) -> Tensor[] use_c10_dispatcher: unboxed_only supports_named_tensor: True - func: refine_names(Tensor(a) self, DimnameList names) -> Tensor(a) variants: method supports_named_tensor: True - func: unflatten(Tensor self, Dimname dim, int[] sizes, DimnameList names) -> Tensor variants: method supports_named_tensor: True - func: unflatten(Tensor self, int dim, int[] sizes, DimnameList names) -> Tensor variants: method supports_named_tensor: True - func: _cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only dispatch: CUDA: _cudnn_ctc_loss - func: _cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, int input_size, int mode, int hidden_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor use_c10_dispatcher: unboxed_only dispatch: CUDA: _cudnn_rnn_flatten_weight - func: _cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) dispatch: CUDA: _cudnn_rnn - func: _cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) dispatch: CUDA: _cudnn_rnn_backward - func: _cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor dispatch: CUDA: _cudnn_init_dropout_state - func: _debug_has_internal_overlap(Tensor self) -> int use_c10_dispatcher: full variants: function - func: _fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor) use_c10_dispatcher: 'unboxed_only' variants: function dispatch: CUDA: fused_dropout_cuda supports_named_tensor: True - func: _masked_scale(Tensor self, Tensor mask, float scale) -> Tensor use_c10_dispatcher: full variants: function dispatch: CUDA: masked_scale_cuda - func: _sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor) - func: _sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!) use_c10_dispatcher: unboxed_only - func: _sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!) use_c10_dispatcher: unboxed_only - func: _sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!) use_c10_dispatcher: unboxed_only - func: _reshape_from_tensor(Tensor self, Tensor shape) -> Tensor use_c10_dispatcher: full - func: _shape_as_tensor(Tensor self) -> Tensor use_c10_dispatcher: full - func: dropout(Tensor input, float p, bool train) -> Tensor use_c10_dispatcher: full supports_named_tensor: True - func: dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True - func: feature_dropout(Tensor input, float p, bool train) -> Tensor use_c10_dispatcher: full - func: feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) use_c10_dispatcher: unboxed_only - func: alpha_dropout(Tensor input, float p, bool train) -> Tensor use_c10_dispatcher: full - func: alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) use_c10_dispatcher: unboxed_only - func: feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor use_c10_dispatcher: full - func: feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) use_c10_dispatcher: unboxed_only - func: abs(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method supports_named_tensor: True - func: abs_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: function, method supports_named_tensor: True dispatch: CPU: _abs__cpu CUDA: _abs__cuda - func: abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _abs_out_cpu CUDA: _abs_out_cuda - func: acos(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: acos_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _acos__cpu CUDA: _acos__cuda - func: acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _acos_out_cpu CUDA: _acos_out_cuda - func: avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor use_c10_dispatcher: unboxed_only - func: adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor use_c10_dispatcher: unboxed_only # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only - func: add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: CPU: add CUDA: add SparseCPU: add_sparse SparseCUDA: add_sparse MkldnnCPU: mkldnn_add supports_named_tensor: True - func: add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: add_ CUDA: add_ SparseCPU: add_sparse_ SparseCUDA: add_sparse_ MkldnnCPU: mkldnn_add_ supports_named_tensor: True - func: add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: add_out CUDA: add_out SparseCPU: add_out_sparse_cpu SparseCUDA: add_out_sparse_cuda MkldnnCPU: mkldnn_add_out supports_named_tensor: True # For C++ only, until we have conversion from C++ numbers to Tensor - func: add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: function, method supports_named_tensor: True - func: add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method supports_named_tensor: True - func: addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: CPU: legacy::cpu::_th_addmv CUDA: legacy::cuda::_th_addmv supports_named_tensor: True - func: addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: function, method dispatch: CPU: legacy::cpu::_th_addmv_ CUDA: legacy::cuda::_th_addmv_ supports_named_tensor: True - func: addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_addmv_out CUDA: legacy::cuda::_th_addmv_out supports_named_tensor: True - func: addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: function, method - func: addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) - func: affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor use_c10_dispatcher: unboxed_only variants: function - func: affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor use_c10_dispatcher: unboxed_only variants: function - func: all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor use_c10_dispatcher: full variants: function, method - func: all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - func: all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor variants: function, method - func: all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - func: allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool use_c10_dispatcher: full variants: function, method - func: any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor use_c10_dispatcher: full variants: function, method - func: any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - func: any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor variants: function, method - func: any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - func: arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: arange.start_step(Scalar start, Scalar end, Scalar step, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!) - func: arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: arange_cpu_out CUDA: arange_cuda_out # This function is a temporary hack to allow tracing of arange like constructs with dynamic # bounds on arange. Normal arange is not traceable because it does not take any tensor inputs; # if the range you need is based on another tensor, calling this function directly will # preserve tracing. Get rid of this when arange can directly take tensors for bounds # (so that it can be traced directly). - func: _dim_arange(Tensor like, int dim) -> Tensor use_c10_dispatcher: full - func: argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor use_c10_dispatcher: full variants: function, method - func: argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor use_c10_dispatcher: full variants: function, method - func: as_strided(Tensor(a) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a) use_c10_dispatcher: unboxed_only variants: function, method dispatch: CPU: as_strided_tensorimpl CUDA: as_strided_tensorimpl QuantizedCPU: as_strided_qtensorimpl device_guard: False supports_named_tensor: True - func: as_strided_(Tensor(a!) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: function, method device_guard: False - func: asin(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: asin_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _asin__cpu CUDA: _asin__cuda - func: asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _asin_out_cpu CUDA: _asin_out_cuda - func: atan(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: atan_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _atan__cpu CUDA: _atan__cuda - func: atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _atan_out_cpu CUDA: _atan_out_cuda - func: baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: CPU: baddbmm_cpu CUDA: baddbmm_cuda - func: baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: baddbmm__cpu CUDA: baddbmm__cuda - func: _baddbmm_mkl_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: function - func: baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) variants: function dispatch: CPU: baddbmm_out_cpu CUDA: baddbmm_out_cuda - func: bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor - func: _batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, int) - func: _batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor) # Sample bernoulli with values in `self` as probability. - func: bernoulli(Tensor self, *, Generator? generator=None) -> Tensor use_c10_dispatcher: 'unboxed_only' variants: function, method supports_named_tensor: True - func: bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) variants: function supports_named_tensor: True - func: bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!) use_c10_dispatcher: 'unboxed_only' variants: method dispatch: CPU: bernoulli_tensor_cpu_ CUDA: bernoulli_tensor_cuda_ supports_named_tensor: True - func: bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!) use_c10_dispatcher: 'unboxed_only' variants: method dispatch: CPU: bernoulli_scalar_cpu_ CUDA: bernoulli_scalar_cuda_ supports_named_tensor: True # This out-of-place version isn't used explicitly, but needed by jit. # There is no default valid on `p` here because it would introduce ambiguity # with `bernoulli(Tensor self, *, Generator? generator=None)` declaration. - func: bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor use_c10_dispatcher: 'unboxed_only' variants: function, method - func: bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias) -> Tensor - func: binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor variants: function - func: binary_cross_entropy_with_logits_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor variants: function - func: bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor variants: function, method dispatch: CPU: _bincount_cpu CUDA: _bincount_cuda - func: bitwise_not(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: bitwise_not_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: method - func: bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: bitwise_not_out CUDA: bitwise_not_out - func: logical_not(Tensor self) -> Tensor use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method - func: logical_not_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: method - func: logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: logical_not_out CUDA: logical_not_out - func: logical_xor(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: unboxed_only variants: function, method supports_named_tensor: True - func: logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method supports_named_tensor: True - func: logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: logical_xor_out CUDA: logical_xor_out supports_named_tensor: True - func: blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: bmm(Tensor self, Tensor mat2) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: CPU: bmm_cpu CUDA: bmm_cuda supports_named_tensor: True - func: bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) variants: function dispatch: CPU: bmm_out_cpu CUDA: bmm_out_cuda supports_named_tensor: True - func: broadcast_tensors(Tensor[] tensors) -> Tensor[] use_c10_dispatcher: unboxed_only device_guard: False - func: cat(Tensor[] tensors, int dim=0) -> Tensor use_c10_dispatcher: unboxed_only supports_named_tensor: True - func: cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: cat.names(Tensor[] tensors, Dimname dim) -> Tensor supports_named_tensor: True - func: cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: ceil(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: ceil_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method - func: ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: ceil_out CUDA: ceil_out - func: chain_matmul(Tensor[] matrices) -> Tensor use_c10_dispatcher: unboxed_only variants: function - func: chunk(Tensor(a) self, int chunks, int dim=0) -> Tensor(a)[] use_c10_dispatcher: unboxed_only variants: function, method device_guard: False supports_named_tensor: True - func: clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _clamp__cpu CUDA: _clamp__cuda - func: clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _clamp_out_cpu CUDA: _clamp_out_cuda - func: clamp_max(Tensor self, Scalar max) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _clamp_max__cpu CUDA: _clamp_max__cuda - func: clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _clamp_max_out_cpu CUDA: _clamp_max_out_cuda - func: clamp_min(Tensor self, Scalar min) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _clamp_min__cpu CUDA: _clamp_min__cuda - func: clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _clamp_min_out_cpu CUDA: _clamp_min_out_cuda - func: cudnn_is_acceptable(Tensor self) -> bool use_c10_dispatcher: full device_guard: False - func: constant_pad_nd(Tensor self, int[] pad, Scalar value=0) -> Tensor use_c10_dispatcher: unboxed_only variants: function - func: contiguous(Tensor self, *, MemoryFormat memory_format=contiguous_format) -> Tensor variants: method supports_named_tensor: True - func: convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor - func: convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor - func: convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) - func: _convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor - func: _convolution_nogroup(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding) -> Tensor - func: _convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool[3] output_mask) -> (Tensor, Tensor, Tensor) - func: conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor - func: conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor - func: conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor - func: conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor use_c10_dispatcher: full - func: conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: unboxed_only # NB: we inherit the goofy argument order from PyTorch torch.nn.functional - func: conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor - func: conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor - func: conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor - func: copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method device_guard: False supports_named_tensor: True - func: _copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor use_c10_dispatcher: full dispatch: {} - func: cos(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: cos_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _cos__cpu CUDA: _cos__cuda - func: cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _cos_out_cpu CUDA: _cos_out_cuda - func: cosh(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: cosh_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _cosh__cpu CUDA: _cosh__cuda - func: cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _cosh_out_cpu CUDA: _cosh_out_cuda - func: cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor use_c10_dispatcher: full - func: cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid use_c10_dispatcher: full dispatch: CUDA: cudnn_affine_grid_generator_forward # TODO: Why do I have to call this grad?! - func: cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta use_c10_dispatcher: full dispatch: CUDA: cudnn_affine_grid_generator_backward - func: cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor) dispatch: CUDA: cudnn_batch_norm # NB: You can only use this if you used cudnn_batch_norm training=True - func: cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor) dispatch: CUDA: cudnn_batch_norm_backward - func: cudnn_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor dispatch: CUDA: cudnn_convolution - func: cudnn_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor use_c10_dispatcher: unboxed_only dispatch: CUDA: cudnn_convolution_backward_input - func: cudnn_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[3] output_mask) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: unboxed_only dispatch: CUDA: cudnn_convolution_backward - func: cudnn_convolution_backward_bias(Tensor grad_output) -> Tensor use_c10_dispatcher: full dispatch: CUDA: cudnn_convolution_backward_bias - func: cudnn_convolution_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor use_c10_dispatcher: unboxed_only dispatch: CUDA: cudnn_convolution_backward_weight - func: cudnn_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor dispatch: CUDA: cudnn_convolution_transpose # NB: output_padding not strictly needed here, but it's helpful for the float # backwards - func: cudnn_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[3] output_mask) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: unboxed_only dispatch: CUDA: cudnn_convolution_transpose_backward - func: cudnn_convolution_transpose_backward_bias(Tensor grad_output) -> Tensor use_c10_dispatcher: full dispatch: CUDA: cudnn_convolution_backward_bias - func: cudnn_convolution_transpose_backward_input(Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor use_c10_dispatcher: unboxed_only dispatch: CUDA: cudnn_convolution_transpose_backward_input - func: cudnn_convolution_transpose_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor use_c10_dispatcher: unboxed_only dispatch: CUDA: cudnn_convolution_transpose_backward_weight # NB: input is special cased in a way I don't quite understand - func: cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output use_c10_dispatcher: full dispatch: CUDA: cudnn_grid_sampler_forward - func: cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid) use_c10_dispatcher: unboxed_only dispatch: CUDA: cudnn_grid_sampler_backward - func: cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor supports_named_tensor: True variants: function, method - func: cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor supports_named_tensor: True variants: function, method - func: cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor supports_named_tensor: True variants: function, method - func: cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor supports_named_tensor: True variants: function, method - func: cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor use_c10_dispatcher: unboxed_only # convenience function that converts to intlists for you - func: ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor use_c10_dispatcher: full - func: _ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only dispatch: CPU: ctc_loss_cpu CUDA: ctc_loss_gpu - func: _ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor use_c10_dispatcher: unboxed_only dispatch: CPU: ctc_loss_backward_cpu CUDA: ctc_loss_backward_gpu - func: det(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method - func: diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor use_c10_dispatcher: full variants: function, method - func: diagflat(Tensor self, int offset=0) -> Tensor use_c10_dispatcher: full variants: function, method - func: diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) use_c10_dispatcher: unboxed_only variants: function, method - func: fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: div.Tensor(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: CPU: div CUDA: div SparseCPU: div_sparse SparseCUDA: div_sparse supports_named_tensor: True - func: div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: div_ CUDA: div_ SparseCPU: div_sparse_ SparseCUDA: div_sparse_ supports_named_tensor: True - func: div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: div_out CUDA: div_out SparseCPU: div_out_sparse_zerodim SparseCUDA: div_out_sparse_zerodim supports_named_tensor: True # For C++ only, until we have conversion from C++ numbers to Tensor - func: div.Scalar(Tensor self, Scalar other) -> Tensor use_c10_dispatcher: full variants: function, method supports_named_tensor: True - func: div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method supports_named_tensor: True - func: dot(Tensor self, Tensor tensor) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: CPU: legacy::cpu::_th_dot CUDA: legacy::cuda::_th_dot supports_named_tensor: True - func: dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: einsum(str equation, Tensor[] tensors) -> Tensor use_c10_dispatcher: unboxed_only - func: embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor use_c10_dispatcher: full - func: embedding_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor use_c10_dispatcher: full - func: embedding_dense_backward(Tensor grad_output, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor use_c10_dispatcher: full dispatch: CPU: embedding_dense_backward_cpu CUDA: embedding_dense_backward_cuda - func: embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!) use_c10_dispatcher: unboxed_only dispatch: CPU: embedding_renorm_cpu_ CUDA: embedding_renorm_cuda_ - func: embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor use_c10_dispatcher: full # NOTE [ embedding_bag Native Functions ] # The `_embedding_bag.*` variants assume that input tensors except for `weight`, # e.g. `indices` and `offsets` (and `offset2bag`), are contiguous. # We really only need to enforce this for `_embedding_bag` (the forward) because # the backward inputs are the same as forward ones. # The above `embedding_bag` wrapper is created to achieve this, e.g., # applying indices = indices.contiguous(). # The backward functions apply a check that these input tensors are contiguous. - func: embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None) -> (Tensor, Tensor, Tensor, Tensor) - func: _embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None) -> (Tensor, Tensor, Tensor, Tensor) dispatch: CPU: _embedding_bag_cpu CUDA: _embedding_bag_cuda - func: _embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights) -> Tensor - func: _embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights) -> Tensor - func: _embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights) -> Tensor dispatch: CPU: _embedding_bag_dense_backward_cpu CUDA: _embedding_bag_dense_backward_cuda - func: _embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode) -> Tensor use_c10_dispatcher: full dispatch: CPU: _embedding_bag_per_sample_weights_backward_cpu CUDA: _embedding_bag_per_sample_weights_backward_cuda - func: empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor device_guard: False - func: empty.memory_format(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor dispatch: CPU: empty_cpu CUDA: empty_cuda MkldnnCPU: empty_mkldnn SparseCPU: empty_sparse SparseCUDA: empty_sparse - func: new_empty(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor variants: method - func: new_full(Tensor self, int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor variants: method # other overrides are to provide a more helpful error message that dtype is required - func: _empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor dispatch: CPU: empty_affine_quantized_other_backends_stub QuantizedCPU: empty_affine_quantized_cpu # it's a factory function receiving a tensor argument, thus overriding explicitly # other overrides are to provide a more helpful error message that dtype is required - func: _empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor category_override: factory dispatch: CPU: empty_per_channel_affine_quantized_other_backends_stub QuantizedCPU: empty_per_channel_affine_quantized_cpu - func: resize_(Tensor(a!) self, int[] size) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: method device_guard: False dispatch: CPU: resize_cpu_ CUDA: resize_cuda_ QuantizedCPU: quantized_resize_cpu_ - func: empty.out(int[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) device_guard: False - func: empty_like(Tensor self) -> Tensor use_c10_dispatcher: full device_guard: False supports_named_tensor: True - func: empty_like.dtype(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False, MemoryFormat? memory_format=contiguous_format) -> Tensor device_guard: False supports_named_tensor: True - func: empty_strided(int[] size, int[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor dispatch: CPU: empty_strided_cpu CUDA: empty_strided_cuda - func: erf(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: erf_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _erf__cpu CUDA: _erf__cuda - func: erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _erf_out_cpu CUDA: _erf_out_cuda - func: erfc(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: erfc_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _erfc__cpu CUDA: _erfc__cuda - func: erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _erfc_out_cpu CUDA: _erfc_out_cuda - func: exp(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: exp_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _exp__cpu CUDA: _exp__cuda - func: exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _exp_out_cpu CUDA: _exp_out_cuda - func: expm1(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: expm1_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method - func: expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: expm1_out CUDA: expm1_out - func: expand(Tensor(a) self, int[] size, *, bool implicit=False) -> Tensor(a) use_c10_dispatcher: unboxed_only variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too. device_guard: False supports_named_tensor: True - func: expand_as(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too. device_guard: False - func: eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: eye.out(int n, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: eye_out_cpu CUDA: eye_out_cuda - func: eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: eye_out_cpu CUDA: eye_out_cuda - func: flatten.using_ints(Tensor self, int start_dim=0, int end_dim=-1) -> Tensor use_c10_dispatcher: full variants: function, method supports_named_tensor: True - func: flatten.named_out_dim(Tensor self, int start_dim, int end_dim, Dimname out_dim) -> Tensor variants: function, method supports_named_tensor: True - func: flatten.using_names(Tensor self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor variants: function, method supports_named_tensor: True - func: flatten.DimnameList(Tensor self, DimnameList dims, Dimname out_dim) -> Tensor variants: function, method supports_named_tensor: True - func: fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method - func: fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method - func: floor(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: floor_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method - func: floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: floor_out CUDA: floor_out - func: frac(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: frac_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _frac__cpu CUDA: _frac__cuda - func: frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _frac_out_cpu CUDA: _frac_out_cuda - func: full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor device_guard: False - func: full(int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: full.out(int[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) - func: full_like(Tensor self, Scalar fill_value) -> Tensor use_c10_dispatcher: full - func: full_like.dtype(Tensor self, Scalar fill_value, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor - func: from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor dispatch: CPU: from_file # NOTE [ grid_sampler Native Functions ] # `grid_sampler` does all the shape checking and then dispatches to one of # `cudnn_grid_sampler`, `grid_sampler_2d`, or `grid_sampler_3d`, each of which # has the corresponding backward defined as native functions as well. Therefore, # in these functions and their backwards, no more shape checking is done. # # Additionally, arguments `padding_mode` and `interpolation_mode` are cast to # enums defined in `native/GridSampler.h`. `cudnn_grid_sampler` doesn't take in # `interpolation_mode` because it only supports Bilinear interpolation mode. # Nor does it take in `align_corners` because it only supports the mode # `align_corners = True`. - func: grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor use_c10_dispatcher: full - func: grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor use_c10_dispatcher: full dispatch: CPU: grid_sampler_2d_cpu CUDA: grid_sampler_2d_cuda - func: grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only dispatch: CPU: grid_sampler_2d_backward_cpu CUDA: grid_sampler_2d_backward_cuda - func: grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor use_c10_dispatcher: full dispatch: CPU: grid_sampler_3d_cpu CUDA: grid_sampler_3d_cuda - func: grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only dispatch: CPU: grid_sampler_3d_backward_cpu CUDA: grid_sampler_3d_backward_cuda - func: hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor use_c10_dispatcher: full - func: ger(Tensor self, Tensor vec2) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: CPU: legacy::cpu::_th_ger CUDA: legacy::cuda::_th_ger - func: ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_ger_out CUDA: legacy::cuda::_th_ger_out - func: group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor # FFT - func: fft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor use_c10_dispatcher: full variants: function, method - func: ifft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor use_c10_dispatcher: full variants: function, method - func: rfft(Tensor self, int signal_ndim, bool normalized=False, bool onesided=True) -> Tensor use_c10_dispatcher: full variants: function, method - func: irfft(Tensor self, int signal_ndim, bool normalized=False, bool onesided=True, int[] signal_sizes=[]) -> Tensor use_c10_dispatcher: unboxed_only variants: function, method - func: _fft_with_size(Tensor self, int signal_ndim, bool complex_input, bool complex_output, bool inverse, int[] checked_signal_sizes, bool normalized, bool onesided, int[] output_sizes) -> Tensor use_c10_dispatcher: unboxed_only variants: function dispatch: CPU: _fft_mkl CUDA: _fft_cufft - func: _cufft_get_plan_cache_size(int device_index) -> int use_c10_dispatcher: full - func: _cufft_get_plan_cache_max_size(int device_index) -> int use_c10_dispatcher: full - func: _cufft_set_plan_cache_max_size(int device_index, int max_size) -> void - func: _cufft_clear_plan_cache(int device_index) -> void - func: index.Tensor(Tensor self, Tensor?[] indices) -> Tensor variants: function, method # NB: This function is special-cased in tools/autograd/gen_variable_type.py - func: index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor use_c10_dispatcher: full variants: function, method - func: index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!) variants: method - func: index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor variants: function, method - func: index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) variants: function, method - func: index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor variants: function, method - func: _index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!) variants: function - func: instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor variants: function - func: inverse(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method - func: inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) - func: _inverse_helper(Tensor self) -> Tensor use_c10_dispatcher: full variants: function dispatch: CPU: _inverse_helper_cpu CUDA: _inverse_helper_cuda - func: isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor use_c10_dispatcher: full variants: function, method - func: isnan(Tensor self) -> Tensor use_c10_dispatcher: full variants: function device_guard: False supports_named_tensor: True - func: is_distributed(Tensor self) -> bool use_c10_dispatcher: full variants: function, method device_guard: False - func: is_floating_point(Tensor self) -> bool use_c10_dispatcher: full variants: function, method device_guard: False supports_named_tensor: True - func: is_complex(Tensor self) -> bool use_c10_dispatcher: full variants: function, method device_guard: False supports_named_tensor: True - func: is_nonzero(Tensor self) -> bool use_c10_dispatcher: full variants: function, method device_guard: False supports_named_tensor: True - func: is_same_size(Tensor self, Tensor other) -> bool use_c10_dispatcher: full variants: function, method device_guard: False supports_named_tensor: True - func: is_signed(Tensor self) -> bool use_c10_dispatcher: full variants: function, method device_guard: False supports_named_tensor: True - func: kl_div(Tensor self, Tensor target, int reduction=Mean) -> Tensor use_c10_dispatcher: full - func: kl_div_backward(Tensor grad_output, Tensor self, Tensor target, int reduction=Mean) -> Tensor use_c10_dispatcher: full dispatch: CPU: kl_div_backward_cpu CUDA: kl_div_backward_cuda - func: kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method - func: kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) supports_named_tensor: True dispatch: CPU: kthvalue_out_cpu CUDA: kthvalue_out_cuda - func: kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) supports_named_tensor: True variants: function, method - func: kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) supports_named_tensor: True - func: layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor - func: native_layer_norm(Tensor input, Tensor? weight, Tensor? bias, int M, int N, float eps) -> (Tensor, Tensor, Tensor) dispatch: CPU: layer_norm_cpu - func: native_layer_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, int M, int N, bool[3] output_mask) -> (Tensor, Tensor, Tensor) dispatch: CPU: layer_norm_backward_cpu - func: native_layer_norm_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, int M, int N, bool[3] output_mask) -> (Tensor, Tensor, Tensor) dispatch: CPU: layer_norm_double_backward_cpu - func: linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor python_module: nn - func: mkldnn_linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor python_module: nn dispatch: MkldnnCPU: mkldnn_linear - func: fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor use_c10_dispatcher: unboxed_only - func: fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor use_c10_dispatcher: full - func: fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int) use_c10_dispatcher: unboxed_only - func: fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor use_c10_dispatcher: full - func: fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor use_c10_dispatcher: unboxed_only - func: fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor use_c10_dispatcher: full - func: fbgemm_pack_quantized_matrix(Tensor input) -> Tensor use_c10_dispatcher: full - func: fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor use_c10_dispatcher: full - func: linspace(Scalar start, Scalar end, int steps=100, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: linspace.out(Scalar start, Scalar end, int steps=100, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: linspace_cpu_out CUDA: linspace_cuda_out - func: log(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: log_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method - func: log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: log_out CUDA: log_out - func: log10(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: log10_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _log10__cpu CUDA: _log10__cuda - func: log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _log10_out_cpu CUDA: _log10_out_cuda - func: log1p(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: log1p_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _log1p__cpu CUDA: _log1p__cuda SparseCPU: log1p_sparse_ SparseCUDA: log1p_sparse_ - func: log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _log1p_out_cpu CUDA: _log1p_out_cuda SparseCPU: log1p_out_sparse SparseCUDA: log1p_out_sparse - func: log2(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: log2_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _log2__cpu CUDA: _log2__cuda - func: log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _log2_out_cpu CUDA: _log2_out_cuda - func: logdet(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method - func: logspace(Scalar start, Scalar end, int steps=100, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: logspace.out(Scalar start, Scalar end, int steps=100, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: logspace_cpu_out CUDA: logspace_cuda_out # log_softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models. - func: log_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor variants: function, method supports_named_tensor: True - func: log_softmax(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor variants: function, method supports_named_tensor: True - func: _log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor use_c10_dispatcher: full dispatch: CPU: log_softmax_cpu CUDA: log_softmax_cuda - func: _log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor use_c10_dispatcher: full dispatch: CPU: log_softmax_backward_cpu CUDA: log_softmax_backward_cuda - func: logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method - func: logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor supports_named_tensor: True variants: function, method - func: logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor use_c10_dispatcher: full - func: matmul(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: function, method supports_named_tensor: True - func: matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: matrix_rank.tol(Tensor self, float tol, bool symmetric=False) -> Tensor use_c10_dispatcher: full - func: matrix_rank(Tensor self, bool symmetric=False) -> Tensor use_c10_dispatcher: full - func: matrix_power(Tensor self, int n) -> Tensor use_c10_dispatcher: full variants: function, method - func: max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) use_c10_dispatcher: unboxed_only variants: function, method supports_named_tensor: True - func: max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) supports_named_tensor: True - func: max_values(Tensor self, int[1] dim, bool keepdim=False) -> Tensor use_c10_dispatcher: unboxed_only variants: function, method - func: max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method supports_named_tensor: True - func: max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) supports_named_tensor: True - func: max_values.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor variants: function, method # Return: (Tensor output, Tensor indices) - func: max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only - func: max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor use_c10_dispatcher: unboxed_only - func: max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor use_c10_dispatcher: unboxed_only - func: mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor use_c10_dispatcher: unboxed_only requires_tensor: True dispatch: MkldnnCPU: mkldnn_max_pool2d - func: quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor use_c10_dispatcher: unboxed_only requires_tensor: True dispatch: QuantizedCPU: quantized_max_pool2d - func: max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor use_c10_dispatcher: unboxed_only # The CPU and GPU dispatch variants are named weirdly here because otherwise there # are namespacing issues in C++ - func: mean(Tensor self, *, ScalarType? dtype=None) -> Tensor variants: function, method supports_named_tensor: True dispatch: CPU: mean_cpu_gpu CUDA: mean_cpu_gpu QuantizedCPU: quantized_mean_cpu - func: mean.dim(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor variants: function, method supports_named_tensor: True dispatch: CPU: mean_cpu_gpu CUDA: mean_cpu_gpu QuantizedCPU: quantized_mean_cpu - func: mean.out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: mean_out_cpu_gpu CUDA: mean_out_cpu_gpu QuantizedCPU: quantized_mean_out_cpu - func: mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor variants: function, method supports_named_tensor: True dispatch: CPU: mean_cpu_gpu CUDA: mean_cpu_gpu - func: mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: mean_out_cpu_gpu CUDA: mean_out_cpu_gpu - func: median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method - func: median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) supports_named_tensor: True - func: median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) supports_named_tensor: True variants: function, method - func: median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) supports_named_tensor: True - func: min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) use_c10_dispatcher: unboxed_only variants: function, method supports_named_tensor: True - func: min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) supports_named_tensor: True - func: min_values(Tensor self, int[1] dim, bool keepdim=False) -> Tensor use_c10_dispatcher: unboxed_only variants: function, method - func: min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method supports_named_tensor: True - func: min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) supports_named_tensor: True - func: min_values.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor variants: function, method - func: mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor - func: mkldnn_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool bias_defined) -> Tensor use_c10_dispatcher: unboxed_only - func: mkldnn_convolution_backward_weights(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool bias_defined) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only - func: mkldnn_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: unboxed_only - func: miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor) dispatch: CUDA: miopen_batch_norm - func: miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor) dispatch: CUDA: miopen_batch_norm_backward - func: miopen_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor dispatch: CUDA: miopen_convolution - func: miopen_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor use_c10_dispatcher: unboxed_only dispatch: CUDA: miopen_convolution_backward_input - func: miopen_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[3] output_mask) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: unboxed_only dispatch: CUDA: miopen_convolution_backward - func: miopen_convolution_backward_bias(Tensor grad_output) -> Tensor use_c10_dispatcher: full dispatch: CUDA: miopen_convolution_backward_bias - func: miopen_convolution_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor use_c10_dispatcher: unboxed_only dispatch: CUDA: miopen_convolution_backward_weight - func: miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor dispatch: CUDA: miopen_convolution_transpose # NB: output_padding not strictly needed here, but it's helpful for the float # backwards - func: miopen_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[3] output_mask) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: unboxed_only dispatch: CUDA: miopen_convolution_transpose_backward - func: miopen_convolution_transpose_backward_input(Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor use_c10_dispatcher: unboxed_only dispatch: CUDA: miopen_convolution_transpose_backward_input - func: miopen_convolution_transpose_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor use_c10_dispatcher: unboxed_only dispatch: CUDA: miopen_convolution_transpose_backward_weight - func: miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor dispatch: CUDA: miopen_depthwise_convolution - func: miopen_depthwise_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor use_c10_dispatcher: unboxed_only dispatch: CUDA: miopen_depthwise_convolution_backward_input - func: miopen_depthwise_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[3] output_mask) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: unboxed_only dispatch: CUDA: miopen_depthwise_convolution_backward - func: miopen_depthwise_convolution_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor use_c10_dispatcher: unboxed_only dispatch: CUDA: miopen_depthwise_convolution_backward_weight - func: miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) dispatch: CUDA: miopen_rnn - func: miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) dispatch: CUDA: miopen_rnn_backward - func: mm(Tensor self, Tensor mat2) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: CPU: legacy::cpu::_th_mm CUDA: legacy::cuda::_th_mm SparseCPU: _sparse_mm SparseCUDA: _sparse_mm supports_named_tensor: True - func: mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_mm_out CUDA: legacy::cuda::_th_mm_out SparseCPU: _sparse_mm_out SparseCUDA: _sparse_mm_out supports_named_tensor: True - func: _sparse_mm(Tensor sparse, Tensor dense) -> Tensor use_c10_dispatcher: full - func: mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method - func: mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) supports_named_tensor: True - func: mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method supports_named_tensor: True - func: mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) supports_named_tensor: True - func: mul.Tensor(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: CPU: mul CUDA: mul SparseCPU: mul_sparse SparseCUDA: mul_sparse MkldnnCPU: mkldnn_mul supports_named_tensor: True - func: mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: mul_ CUDA: mul_ SparseCPU: mul_sparse_ SparseCUDA: mul_sparse_ MkldnnCPU: mkldnn_mul_ supports_named_tensor: True - func: mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: mul_out CUDA: mul_out SparseCPU: mul_out_sparse_cpu SparseCUDA: mul_out_sparse_cuda MkldnnCPU: mkldnn_mul_out supports_named_tensor: True # For C++ only, until we have conversion from C++ numbers to Tensor - func: mul.Scalar(Tensor self, Scalar other) -> Tensor use_c10_dispatcher: full variants: function, method - func: mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: mv(Tensor self, Tensor vec) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: CPU: legacy::cpu::_th_mv CUDA: legacy::cuda::_th_mv supports_named_tensor: True - func: mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_mv_out CUDA: legacy::cuda::_th_mv_out supports_named_tensor: True - func: mvlgamma(Tensor self, int p) -> Tensor use_c10_dispatcher: full variants: function, method - func: mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: narrow_copy(Tensor self, int dim, int start, int length) -> Tensor use_c10_dispatcher: full variants: method dispatch: CPU: narrow_copy_dense CUDA: narrow_copy_dense SparseCPU: narrow_copy_sparse SparseCUDA: narrow_copy_sparse - func: narrow(Tensor(a) self, int dim, int start, int length) -> Tensor(a) use_c10_dispatcher: unboxed_only variants: function, method device_guard: False supports_named_tensor: True - func: native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) dispatch: CPU: batch_norm_cpu CUDA: batch_norm_cuda MkldnnCPU: mkldnn_batch_norm - func: batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only dispatch: CUDA: batch_norm_stats_cuda - func: batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor dispatch: CUDA: batch_norm_elemt_cuda # for backward compatibility - func: batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor) dispatch: CUDA: batch_norm_gather_stats_cuda - func: batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int[] counts) -> (Tensor, Tensor) dispatch: CUDA: batch_norm_gather_stats_with_counts_cuda - func: native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor) dispatch: CPU: batch_norm_backward_cpu CUDA: batch_norm_backward_cuda - func: batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor) dispatch: CUDA: batch_norm_backward_reduce_cuda - func: batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu) -> Tensor dispatch: CUDA: batch_norm_backward_elemt_cuda - func: batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor) dispatch: CPU: batch_norm_update_stats_cpu CUDA: batch_norm_update_stats_cuda - func: _nnpack_available() -> bool use_c10_dispatcher: full - func: _nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, int[2] padding) -> Tensor variants: function - func: _nnpack_spatial_convolution_backward(Tensor input, Tensor grad_output, Tensor weight, int[2] padding, bool[3] output_mask) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function - func: _nnpack_spatial_convolution_backward_input(Tensor input, Tensor grad_output, Tensor weight, int[2] padding) -> Tensor use_c10_dispatcher: unboxed_only variants: function - func: _nnpack_spatial_convolution_backward_weight(Tensor input, int[] weightsize, Tensor grad_output, int[2] padding) -> Tensor use_c10_dispatcher: unboxed_only variants: function - func: ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor device_guard: False - func: ones(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: ones.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) - func: ones_like(Tensor self) -> Tensor use_c10_dispatcher: full - func: ones_like.dtype(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor - func: pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor use_c10_dispatcher: full - func: cdist(Tensor x1, Tensor x2, float p=2) -> Tensor use_c10_dispatcher: full - func: _cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor use_c10_dispatcher: full - func: pdist(Tensor self, float p=2) -> Tensor use_c10_dispatcher: full - func: _pdist_forward(Tensor self, float p=2) -> Tensor use_c10_dispatcher: full - func: _pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor use_c10_dispatcher: full - func: cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor use_c10_dispatcher: full variants: function - func: permute(Tensor(a) self, int[] dims) -> Tensor(a) use_c10_dispatcher: unboxed_only variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too. # Only exposed from C++ -- in Python, # we expose it as an attribute `T`, not a function. # # I'd like to name this "T" in C++ too, but # calling a native function "T" causes undefined # behavior on Windows, for reasons I don't understand # (maybe related to capital letter collation somehow...) - func: numpy_T(Tensor(a) self) -> Tensor(a) use_c10_dispatcher: unboxed_only variants: method - func: pixel_shuffle(Tensor self, int upscale_factor) -> Tensor use_c10_dispatcher: full - func: is_pinned(Tensor self) -> bool use_c10_dispatcher: full variants: method supports_named_tensor: True - func: pin_memory(Tensor self) -> Tensor use_c10_dispatcher: full variants: method - func: pinverse(Tensor self, float rcond=1e-15) -> Tensor use_c10_dispatcher: full variants: function, method - func: poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor use_c10_dispatcher: full variants: function - func: scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: rand.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor device_guard: False - func: rand.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor device_guard: False - func: rand(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: rand.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: rand.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) - func: rand.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) - func: rand_like(Tensor self) -> Tensor use_c10_dispatcher: full - func: rand_like.dtype(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor - func: randint(int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: randint.generator(int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: randint.low(int low, int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: randint.low_generator(int low, int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: randint.out(int high, int[] size, *, Tensor(a!) out) -> Tensor(a!) - func: randint.generator_out(int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) - func: randint.low_out(int low, int high, int[] size, *, Tensor(a!) out) -> Tensor(a!) - func: randint.low_generator_out(int low, int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) - func: randint_like(Tensor self, int high) -> Tensor use_c10_dispatcher: full - func: randint_like.low(Tensor self, int low, int high) -> Tensor use_c10_dispatcher: full - func: randint_like.dtype(Tensor self, int high, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor - func: randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor - func: randn(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: randn.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: randn.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor device_guard: False - func: randn.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor device_guard: False - func: randn.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) - func: randn.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) - func: randn_like(Tensor self) -> Tensor use_c10_dispatcher: full - func: randn_like.dtype(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor - func: randperm(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: randperm.generator(int n, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!) - func: randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: randperm_out_cpu CUDA: randperm_out_cuda - func: range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: range_cpu_out CUDA: range_cuda_out - func: reciprocal(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: reciprocal_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _reciprocal__cpu CUDA: _reciprocal__cuda - func: reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _reciprocal_out_cpu CUDA: _reciprocal_out_cuda - func: neg(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: neg_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method - func: neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: neg_out CUDA: neg_out - func: repeat(Tensor self, int[] repeats) -> Tensor use_c10_dispatcher: unboxed_only variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too. - func: repeat_interleave.Tensor(Tensor repeats) -> Tensor use_c10_dispatcher: full variants: function dispatch: CPU: repeat_interleave_cpu CUDA: repeat_interleave_cuda - func: repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None) -> Tensor use_c10_dispatcher: full variants: function, method - func: repeat_interleave.self_int(Tensor self, int repeats, int? dim=None) -> Tensor use_c10_dispatcher: full variants: function, method - func: reshape(Tensor self, int[] shape) -> Tensor use_c10_dispatcher: unboxed_only variants: function, method device_guard: False supports_named_tensor: True - func: _mkldnn_reshape(Tensor self, int[] shape) -> Tensor use_c10_dispatcher: unboxed_only device_guard: False requires_tensor: True dispatch: MkldnnCPU: mkldnn_reshape - func: reshape_as(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: method device_guard: False - func: round(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: round_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method - func: round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: round_out CUDA: round_out - func: rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor use_c10_dispatcher: 'unboxed_only' - func: rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) use_c10_dispatcher: 'unboxed_only' - func: relu(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: CPU: relu CUDA: relu MkldnnCPU: mkldnn_relu QuantizedCPU: quantized_relu supports_named_tensor: True - func: relu_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: relu_ CUDA: relu_ MkldnnCPU: mkldnn_relu_ QuantizedCPU: quantized_relu_ - func: prelu(Tensor self, Tensor weight) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: CPU: prelu_cpu CUDA: prelu_cuda - func: prelu_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function, method dispatch: CPU: prelu_backward_cpu CUDA: prelu_backward_cuda - func: gelu(Tensor self) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: gelu_cpu CUDA: gelu_cuda - func: gelu_backward(Tensor grad, Tensor self) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: gelu_backward_cpu CUDA: gelu_backward_cuda - func: hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: CPU: hardshrink_cpu CUDA: hardshrink_cuda - func: hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: CPU: hardshrink_backward_cpu CUDA: hardshrink_backward_cuda - func: rsqrt(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: rsqrt_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method - func: rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: rsqrt_out CUDA: rsqrt_out - func: select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a) variants: function, method device_guard: False supports_named_tensor: True - func: select.int(Tensor(a) self, int dim, int index) -> Tensor(a) use_c10_dispatcher: unboxed_only variants: function, method device_guard: False supports_named_tensor: True - func: selu(Tensor self) -> Tensor use_c10_dispatcher: full - func: selu_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only - func: celu(Tensor self, Scalar alpha=1.0) -> Tensor use_c10_dispatcher: full - func: celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!) use_c10_dispatcher: unboxed_only - func: sigmoid(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method dispatch: CPU: sigmoid CUDA: sigmoid MkldnnCPU: mkldnn_sigmoid - func: sigmoid_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _sigmoid__cpu CUDA: _sigmoid__cuda MkldnnCPU: mkldnn_sigmoid_ - func: sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _sigmoid_out_cpu CUDA: _sigmoid_out_cuda - func: sin(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: sin_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _sin__cpu CUDA: _sin__cuda - func: sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _sin_out_cpu CUDA: _sin_out_cuda - func: sinh(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: sinh_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _sinh__cpu CUDA: _sinh__cuda - func: sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _sinh_out_cpu CUDA: _sinh_out_cuda - func: detach(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: detach_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method - func: size.int(Tensor self, int dim) -> int use_c10_dispatcher: full variants: function, method device_guard: False supports_named_tensor: True - func: size.Dimname(Tensor self, Dimname dim) -> int variants: function, method device_guard: False supports_named_tensor: True - func: slice.Tensor(Tensor(a) self, int dim=0, int start=0, int end=9223372036854775807, int step=1) -> Tensor(a) use_c10_dispatcher: unboxed_only variants: function, method device_guard: False supports_named_tensor: True - func: slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) use_c10_dispatcher: unboxed_only variants: function, method - func: smm(Tensor self, Tensor mat2) -> Tensor use_c10_dispatcher: full variants: function, method # softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models. - func: softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor variants: function, method supports_named_tensor: True - func: softmax(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor variants: function, method supports_named_tensor: True - func: _softmax(Tensor self, int dim, bool half_to_float) -> Tensor use_c10_dispatcher: full dispatch: CPU: softmax_cpu CUDA: softmax_cuda MkldnnCPU: mkldnn_softmax - func: _softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor use_c10_dispatcher: full dispatch: CPU: softmax_backward_cpu CUDA: softmax_backward_cuda - func: split.Tensor(Tensor(a) self, int split_size, int dim=0) -> Tensor(a)[] use_c10_dispatcher: unboxed_only variants: function, method device_guard: False supports_named_tensor: True - func: split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> Tensor[] use_c10_dispatcher: unboxed_only variants: function, method device_guard: False supports_named_tensor: True - func: squeeze(Tensor(a) self) -> Tensor(a) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method device_guard: False - func: squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method device_guard: False - func: squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a) supports_named_tensor: True variants: function, method device_guard: False - func: squeeze_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method device_guard: False - func: squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method device_guard: False - func: squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!) variants: method device_guard: False - func: sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: function, method - func: sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: _sspaddmm_out_only_sparse CUDA: _sspaddmm_out_only_sparse_cuda SparseCPU: _sspaddmm_out_cpu SparseCUDA: _sspaddmm_out_cuda - func: stack(Tensor[] tensors, int dim=0) -> Tensor use_c10_dispatcher: unboxed_only - func: stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) # The signature is designed to be consistent with librosa except that it is # missing the `pad_mode` and `center` arguments, which are taken care of at # `torch.functional.py`. They shall be moved here once we have mapping between # Python strings and C++ Enum in codegen. - func: stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool onesided=True) -> Tensor variants: function, method - func: stride.int(Tensor self, int dim) -> int use_c10_dispatcher: full variants: function, method device_guard: False supports_named_tensor: True - func: stride.Dimname(Tensor self, Dimname dim) -> int variants: function, method device_guard: False supports_named_tensor: True - func: sum(Tensor self, *, ScalarType? dtype=None) -> Tensor variants: function, method supports_named_tensor: True - func: sum.dim_IntList(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor variants: function, method supports_named_tensor: True - func: sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor variants: function, method supports_named_tensor: True - func: sum.IntList_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: sum_to_size(Tensor self, int[] size) -> Tensor use_c10_dispatcher: unboxed_only variants: method device_guard: False - func: sqrt(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: sqrt_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _sqrt__cpu CUDA: _sqrt__cuda - func: sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _sqrt_out_cpu CUDA: _sqrt_out_cuda - func: std(Tensor self, bool unbiased=True) -> Tensor use_c10_dispatcher: full variants: function, method supports_named_tensor: True - func: std.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor use_c10_dispatcher: unboxed_only variants: function, method supports_named_tensor: True - func: std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function supports_named_tensor: True - func: std_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function supports_named_tensor: True - func: std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) variants: function supports_named_tensor: True - func: std.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor variants: function, method supports_named_tensor: True - func: std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: prod(Tensor self, *, ScalarType? dtype=None) -> Tensor variants: function, method supports_named_tensor: True - func: prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor variants: function, method supports_named_tensor: True - func: prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor variants: function, method supports_named_tensor: True - func: prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: t(Tensor(a) self) -> Tensor(a) use_c10_dispatcher: unboxed_only device_guard: False variants: function, method supports_named_tensor: True - func: t_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only device_guard: False variants: method - func: tan(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: tan_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _tan__cpu CUDA: _tan__cuda - func: tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _tan_out_cpu CUDA: _tan_out_cuda - func: tanh(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: tanh_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method dispatch: CPU: _tanh__cpu CUDA: _tanh__cuda - func: tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _tanh_out_cpu CUDA: _tanh_out_cuda - func: tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor use_c10_dispatcher: unboxed_only variants: function # TODO: namespace threshold in 'nn' - func: threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor use_c10_dispatcher: full variants: function supports_named_tensor: True - func: threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: function supports_named_tensor: True - func: threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor use_c10_dispatcher: full variants: function - func: transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) use_c10_dispatcher: unboxed_only variants: function, method device_guard: False supports_named_tensor: True - func: transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a) variants: function, method device_guard: False supports_named_tensor: True - func: _mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor use_c10_dispatcher: full device_guard: False requires_tensor: True dispatch: MkldnnCPU: mkldnn_transpose - func: transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method device_guard: False - func: _mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) use_c10_dispatcher: unboxed_only device_guard: False requires_tensor: True dispatch: MkldnnCPU: mkldnn_transpose_ - func: one_hot(Tensor self, int num_classes=-1) -> Tensor use_c10_dispatcher: full python_module: nn variants: function - func: flip(Tensor self, int[] dims) -> Tensor use_c10_dispatcher: unboxed_only variants: function, method dispatch: CPU: flip_cpu CUDA: flip_cuda - func: roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor use_c10_dispatcher: unboxed_only variants: function, method dispatch: CPU: roll_cpu CUDA: roll_cuda # default int[] value [0,1] should not add space after comma, since native_parse.py uses ', ' to split args - func: rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor use_c10_dispatcher: unboxed_only variants: function, method - func: trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor use_c10_dispatcher: full - func: trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor use_c10_dispatcher: full - func: _trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor use_c10_dispatcher: unboxed_only - func: triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor use_c10_dispatcher: full - func: trunc(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: trunc_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method - func: trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: trunc_out CUDA: trunc_out - func: type_as(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: method - func: _has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool use_c10_dispatcher: full variants: function - func: _unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function dispatch: CPU: _unique_cpu CUDA: _unique_cuda - func: unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function dispatch: CPU: unique_dim_cpu CUDA: unique_dim_cuda - func: unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function dispatch: CPU: unique_consecutive_cpu CUDA: unique_consecutive_cuda - func: unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function dispatch: CPU: unique_dim_consecutive_cpu CUDA: unique_dim_consecutive_cuda # _unique and _unique_dim are fragile and modifying them easily cause internal break # the below operator is a temporary hack for adding return_counts support # Please don't rely on these two operators, they will be removed soon - func: _unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function dispatch: CPU: _unique2_cpu CUDA: _unique2_cuda - func: _unsafe_view(Tensor self, int[] size) -> Tensor use_c10_dispatcher: unboxed_only - func: unsqueeze(Tensor(a) self, int dim) -> Tensor(a) use_c10_dispatcher: unboxed_only variants: function, method device_guard: False - func: unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method device_guard: False - func: var(Tensor self, bool unbiased=True) -> Tensor use_c10_dispatcher: full variants: function, method supports_named_tensor: True - func: var.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor use_c10_dispatcher: unboxed_only variants: function, method supports_named_tensor: True - func: var.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor variants: function, method supports_named_tensor: True - func: var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function supports_named_tensor: True - func: var_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function supports_named_tensor: True - func: var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) variants: function supports_named_tensor: True - func: view_as(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: method device_guard: False # we define both of these because 'where' does the broadcast and '_s_where' doesn't; # this allows us to implicitly calculate the broadcast derivative, while only dealing with the # _s_where derivative. - func: where.self(Tensor condition, Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: function, method - func: where(Tensor condition) -> Tensor[] use_c10_dispatcher: unboxed_only variants: function - func: _s_where(Tensor condition, Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: function dispatch: CPU: _s_where_cpu CUDA: _s_where_cuda - func: norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor use_c10_dispatcher: unboxed_only variants: function # VariableType::_weight_norm does not want to be given a gap in the autograd graph, # so we don't define "dispatch" variants for it. - func: _weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor use_c10_dispatcher: full variants: function - func: _weight_norm_cuda_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function dispatch: CUDA: weight_norm_cuda - func: _weight_norm_cuda_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function dispatch: CUDA: weight_norm_cuda_backward - func: _weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function - func: zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor device_guard: False - func: zeros(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: zeros.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) - func: zeros_like(Tensor self) -> Tensor use_c10_dispatcher: full - func: zeros_like.dtype(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor - func: _standard_gamma_grad(Tensor self, Tensor output) -> Tensor use_c10_dispatcher: full variants: function dispatch: CPU: _standard_gamma_grad_cpu CUDA: _standard_gamma_grad_cuda - func: _standard_gamma(Tensor self, Generator? generator=None) -> Tensor use_c10_dispatcher: 'unboxed_only' variants: function dispatch: CPU: _s_gamma_cpu CUDA: _s_gamma_cuda - func: _dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor use_c10_dispatcher: full dispatch: CPU: _dirichlet_grad_cpu CUDA: _dirichlet_grad_cuda - func: _sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor use_c10_dispatcher: 'unboxed_only' variants: function dispatch: CPU: _s_dirichlet_cpu CUDA: _s_dirichlet_cuda - func: poisson(Tensor self, Generator? generator=None) -> Tensor use_c10_dispatcher: 'unboxed_only' dispatch: CPU: _s_poisson_cpu CUDA: _s_poisson_cuda # When more variants get ported to native, this dispatch will get more # complicated - func: native_norm(Tensor self, Scalar p=2) -> Tensor use_c10_dispatcher: full dispatch: SparseCPU: norm_sparse SparseCUDA: norm_sparse # TODO: reduce signatures down to one when optional args is available - func: _sparse_sum(Tensor self) -> Tensor use_c10_dispatcher: full - func: _sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor - func: _sparse_sum.dim(Tensor self, int[1] dim) -> Tensor use_c10_dispatcher: unboxed_only - func: _sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor - func: _sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor use_c10_dispatcher: unboxed_only dispatch: SparseCPU: _sparse_sum_backward_cpu SparseCUDA: _sparse_sum_backward_cuda - func: norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor variants: function, method - func: norm.Scalar(Tensor self, Scalar p=2) -> Tensor use_c10_dispatcher: full variants: function, method - func: norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor variants: function, method - func: norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor use_c10_dispatcher: unboxed_only variants: function, method - func: norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) - func: norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - func: norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor variants: function, method - func: norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor variants: function, method - func: norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) - func: norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) - func: frobenius_norm(Tensor self) -> Tensor use_c10_dispatcher: full variants: function - func: frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor use_c10_dispatcher: unboxed_only variants: function - func: frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) variants: function - func: nuclear_norm(Tensor self, bool keepdim=False) -> Tensor use_c10_dispatcher: full variants: function - func: nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) variants: function - func: nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor use_c10_dispatcher: unboxed_only variants: function - func: nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) variants: function - func: clone(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: CPU: clone CUDA: clone SparseCPU: clone_sparse SparseCUDA: clone_sparse MkldnnCPU: mkldnn_clone QuantizedCPU: quantized_clone supports_named_tensor: True - func: resize_as_(Tensor(a!) self, Tensor the_template) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: function, method - func: pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: pow_out CUDA: pow_out SparseCPU: pow_out_sparse_scalar SparseCUDA: pow_out_sparse_scalar - func: pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor use_c10_dispatcher: full variants: function, method supports_named_tensor: True dispatch: CPU: pow CUDA: pow SparseCPU: pow_sparse_scalar SparseCUDA: pow_sparse_scalar - func: zero_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: method, function dispatch: CPU: legacy::cpu::_th_zero_ CUDA: legacy::cuda::_th_zero_ SparseCPU: zero_sparse_ SparseCUDA: zero_sparse_ MkldnnCPU: mkldnn_zero_ - func: sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: sub_out CUDA: sub_out SparseCPU: sub_out_sparse SparseCUDA: sub_out_sparse supports_named_tensor: True - func: sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: CPU: sub CUDA: sub SparseCPU: sub_sparse SparseCUDA: sub_sparse supports_named_tensor: True - func: sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: sub_ CUDA: sub_ SparseCPU: sub_sparse_ SparseCUDA: sub_sparse_ supports_named_tensor: True # For C++ only, until we have conversion from C++ numbers to Tensor - func: sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: function, method supports_named_tensor: True - func: sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method supports_named_tensor: True - func: rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: function supports_named_tensor: True # For C++ only, until we have conversion from C++ numbers to Tensor - func: rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: function supports_named_tensor: True # Functionally the same as addmm, but we give it a different derivative formula # that doesn't propagate gradients to non-present entries on sparse. - func: _sparse_addmm(Tensor self, Tensor sparse, Tensor dense, *, Scalar beta=1, Scalar alpha=1) -> Tensor use_c10_dispatcher: full named_guard: False - func: addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_addmm_out CUDA: legacy::cuda::_th_addmm_out SparseCPU: addmm_out_sparse_dense_cpu SparseCUDA: addmm_out_sparse_dense_cuda supports_named_tensor: True - func: addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: CPU: legacy::cpu::_th_addmm CUDA: legacy::cuda::_th_addmm SparseCPU: addmm_sparse_dense_cpu SparseCUDA: addmm_sparse_dense_cuda supports_named_tensor: True - func: addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_addmm_ CUDA: legacy::cuda::_th_addmm_ # Warning! For whatever reason, the inplace sparse addmm is NON # broadcasting SparseCPU: s_addmm_sparse_dense_cpu_ SparseCUDA: s_addmm_sparse_dense_cuda_ supports_named_tensor: True # NOTE [ Sparse: autograd and API ] # # # Sparse Tensor Constructors # ~~~~~~~~~~~~~~~~~~~~~~~~~~ # # The API entry points to sparse tensor construction should be # `sparse_coo tensor` and `_sparse_coo_tensor_unsafe`. Depending on whether the # indices and values tensors are given, they eventually dispatch to either # `sparse_coo_tensor_with_dims` or `sparse_coo_tensor_with_dims_and_tensors`. # # The autograd support for ctor is implement on `sparse_coo_tensor_with_dims_and_tensors`. # # The API methods `sparse_coo tensor` and `_sparse_coo_tensor_unsafe` # **must not** have specific type dispatches because otherwise codegen will # consider them as abstract methods (see Note [Abstract ATen methods]), dispatch # using **Tensor** type, and thus lose autograd tracking on the actual method # they dispatch to, e.g., `sparse_coo_tensor_with_dims_and_tensors`. # # The actual ctors `sparse_coo_tensor_with_dims` and `sparse_coo_tensor_with_dims_and_tensors`, # on the other hand, need to create `SparseTensorImpl` and know nothing about # how `VariableType`s work. So they need to be dispatched using Tensor types. # We thus put `requires_tensor=True` to ensure that `VariableType` will unwrap # the given variables and call with the Tensor type. # # # Sparse Methods API Design # ~~~~~~~~~~~~~~~~~~~~~~~~~ # # Goals: 1. Flexible API for users to write custom sparse ops # 2. ctor and member accessor with autograd support # # To achieve 1, we need to provide a set of *dangerous* APIs (dangerous in the # sense that misusing them will break sparse tensor invariant and may out in # unexpected behavior, e.g., crash). These methods are all prefixed with # underscore "_" to indicate that they should be used with care. We provide: # # + `_indices()`: returns the *raw* indices within the sparse tensor (not just # sharing storage). Any inplace operation will change the # actual indices, including t_, set_, as_strided_, resize_, # etc. # + `_values()`: returns the *raw* values within the sparse tensor. Similar # semantics as `_indices()` # + `_nnz()`: returns the number of non-zero entries. This will always be # determined by the shapes of indices and values. # + `_coalesced_(bool)`: inplace sets whether the tensor is coalesced, and # returns itself. # # These methods are very useful in writing new operations, e.g., a custom # autograd Function. # # We also provide other public *safe* APIs: # + `indices()`: returns a **view** of the indices tensor if the sparse tensor # is **coalesced**. # + `values()`: returns a **view** of the values tensor if the containing # sparse tensor is **coalesced**. # + `sparse_dim()`: number of sparse dimensions # + `dense_dim()`: number of dense dimensions # + `is_coalesced()`: whether the sparse tensor is coalesced # # `_indices()` and `_values()` should returns the raw indices and values dense # tensors within a sparse tensor. They can be quite unsafe with inplace # operations like `t_()`, and exposes uncoalesced indices and values. The public # recommended API is `indices()` and `values()`, both of which first check that # the tensor is coalesced and return views on those tensors. # # # Autograd Support # ~~~~~~~~~~~~~~~~ # # Autograd is supported on `values()` and sparse tensor ctor with indices and # values tensors. E.g., `torch.sparse_coo_tensor(i, v).values().sum()` is # differentiable w.r.t. `v`. # # NB: The `values()` and `_values()` operators are special in that they are # layout-aware, i.e., the output depends not just on the data it represents, but # also on the input layout details (in this case, the `indices` tensor). See # NOTE [ as_strided Backward and layout-aware/agnostic autograd ] in Functions.cpp # for discussion on layout-aware vs layout-agnostic autograd. Since PyTorch ops # operate in the layout-agnostic mode, similar to `as_strided`, backward of # these two operators need to consider them in a layout-agnostic way: # + `values()`: # Input is coalesced. # We just pretend having `input.indices()` as an additional argument # `input_indices`, then forward is similar to # `input.to(kStrided).index_select(input_indices)` regardless of the layout. # Note that `values()` normally is layout-aware even if we constrain # ourselves on sparse inputs since it may include all zeros values entries # as "present" entries. # + `_values()`: # Input may be uncoalesced. # It is not straightforward to construct a layout-agnostic version because # duplicate indices entries may exist and additional parameterization is # needed to distribute the value into different values entries. Furthermore, # this op is intended to provide ways to write custom sparse ops, rather # than being used in autograd graph, so it is marked as *non-differentiable* # in derivatives.yaml. # # Before reading the following, see NOTE [ Autograd Variable Views ] in # variable.h for details on views that are tracked by autograd, and views that # are not. # # Moreover, these methods return tensors that share storage with inputs, so we # mark these methods as view ops to support autograd history tracking. # The sparse tensor ctor output should technically be view of both input indices # and values tensors, but currently we only support setting as view of a single # Variable, so it is only view of the values tensor. # TODO: clone indices in sparse tensor ctor. # # For other methods that return outputs that share storage with inputs, i.e., # `indices()` and `_indices()`. We mark their outputs as non-differentiable, so # the view relation is not tracked by autograd, but the version counter is still # shared. In other words, their outputs are non-differentiable views of the # sparse tensor. # FIXME: would be nicer if TensorOptions was optional based; not adding default arguments for options given # the default would never make sense. - func: sparse_coo_tensor.size(int[] size, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor - func: sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: _sparse_coo_tensor_unsafe(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: _sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor dispatch: SparseCPU: new_with_dims_sparse SparseCUDA: new_with_dims_sparse requires_tensor: True - func: _sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, int[] size, Tensor indices, Tensor values, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor dispatch: SparseCPU: new_with_dims_and_tensor_sparse SparseCUDA: new_with_dims_and_tensor_sparse requires_tensor: True - func: sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: SparseCPU: sparse_resize_ SparseCUDA: sparse_resize_ requires_tensor: True - func: sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: SparseCPU: sparse_resize_and_clear_ SparseCUDA: sparse_resize_and_clear_ requires_tensor: True - func: sparse_mask(Tensor self, Tensor mask) -> Tensor use_c10_dispatcher: full variants: method dispatch: SparseCPU: sparse_mask_cpu SparseCUDA: sparse_mask_cuda requires_tensor: True - func: to_dense(Tensor self) -> Tensor use_c10_dispatcher: full variants: method dispatch: SparseCPU: sparse_to_dense SparseCUDA: sparse_to_dense MkldnnCPU: mkldnn_to_dense requires_tensor: True - func: to_dense_backward(Tensor grad, Tensor input) -> Tensor use_c10_dispatcher: full - func: sparse_dim(Tensor self) -> int use_c10_dispatcher: full variants: method dispatch: SparseCPU: sparse_dim_sparse SparseCUDA: sparse_dim_sparse requires_tensor: True device_guard: False # legacy method - func: _dimI(Tensor self) -> int use_c10_dispatcher: full variants: method dispatch: SparseCPU: sparse_dim_sparse SparseCUDA: sparse_dim_sparse requires_tensor: True device_guard: False - func: dense_dim(Tensor self) -> int use_c10_dispatcher: full variants: method dispatch: SparseCPU: dense_dim_sparse SparseCUDA: dense_dim_sparse requires_tensor: True device_guard: False # legacy method - func: _dimV(Tensor self) -> int use_c10_dispatcher: full variants: method dispatch: SparseCPU: dense_dim_sparse SparseCUDA: dense_dim_sparse requires_tensor: True device_guard: False - func: _nnz(Tensor self) -> int use_c10_dispatcher: full variants: method dispatch: SparseCPU: _nnz_sparse SparseCUDA: _nnz_sparse requires_tensor: True device_guard: False - func: coalesce(Tensor self) -> Tensor use_c10_dispatcher: full variants: method dispatch: SparseCPU: coalesce_sparse_cpu SparseCUDA: coalesce_sparse_cuda requires_tensor: True - func: is_coalesced(Tensor self) -> bool use_c10_dispatcher: full variants: method dispatch: SparseCPU: is_coalesced_sparse SparseCUDA: is_coalesced_sparse requires_tensor: True device_guard: False supports_named_tensor: True - func: _indices(Tensor(a) self) -> Tensor(a) use_c10_dispatcher: unboxed_only variants: method dispatch: SparseCPU: _indices_sparse SparseCUDA: _indices_sparse requires_tensor: True device_guard: False - func: _values(Tensor(a) self) -> Tensor(a) use_c10_dispatcher: unboxed_only variants: method dispatch: SparseCPU: _values_sparse SparseCUDA: _values_sparse requires_tensor: True device_guard: False # This method doesn't do any check but only directly sets the flag. So it can be # a bit unsafe. Similar to _indices and _values, this is useful for implementing # custom sparse operations in Python/C++ extension. - func: _coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: SparseCPU: _coalesced_sparse_ SparseCUDA: _coalesced_sparse_ requires_tensor: True device_guard: False - func: indices(Tensor(a) self) -> Tensor(a) use_c10_dispatcher: unboxed_only variants: method dispatch: SparseCPU: indices_sparse SparseCUDA: indices_sparse requires_tensor: True device_guard: False - func: values(Tensor(a) self) -> Tensor(a) use_c10_dispatcher: unboxed_only variants: method dispatch: SparseCPU: values_sparse SparseCUDA: values_sparse requires_tensor: True device_guard: False - func: hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) dispatch: SparseCPU: hspmm_out_sparse_cpu SparseCUDA: hspmm_out_sparse_cuda requires_tensor: True - func: hspmm(Tensor mat1, Tensor mat2) -> Tensor use_c10_dispatcher: full dispatch: SparseCPU: hspmm_sparse_cpu SparseCUDA: hspmm_sparse_cuda requires_tensor: True - func: copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: function dispatch: SparseCPU: copy_sparse_ SparseCUDA: copy_sparse_ requires_tensor: True - func: numel(Tensor self) -> int use_c10_dispatcher: full variants: function, method device_guard: False supports_named_tensor: True - func: unbind.int(Tensor(a) self, int dim=0) -> Tensor(a)[] use_c10_dispatcher: unboxed_only variants: function, method supports_named_tensor: True - func: unbind.Dimname(Tensor(a) self, Dimname dim) -> Tensor(a)[] variants: function, method supports_named_tensor: True - func: to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor use_c10_dispatcher: full variants: method dispatch: CPU: dense_to_sparse CUDA: dense_to_sparse - func: to_sparse(Tensor self) -> Tensor use_c10_dispatcher: full variants: method dispatch: CPU: dense_to_sparse CUDA: dense_to_sparse - func: to_mkldnn(Tensor self) -> Tensor use_c10_dispatcher: full variants: method dispatch: CPU: dense_to_mkldnn - func: mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1) -> Tensor use_c10_dispatcher: unboxed_only variants: function python_module: nn dispatch: MkldnnCPU: mkldnn_reorder_conv2d_weight - func: to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor use_c10_dispatcher: full - func: quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor variants: function dispatch: CPU: quantize_per_tensor_cpu - func: quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor variants: function dispatch: CPU: quantize_per_channel_cpu - func: dequantize(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: QuantizedCPU: dequantize_quant - func: q_scale(Tensor self) -> float use_c10_dispatcher: full variants: function, method dispatch: QuantizedCPU: q_scale_quant - func: q_zero_point(Tensor self) -> int use_c10_dispatcher: full variants: function, method dispatch: QuantizedCPU: q_zero_point_quant - func: q_per_channel_scales(Tensor self) -> Tensor use_c10_dispatcher: unboxed_only variants: function, method dispatch: QuantizedCPU: q_per_channel_scales_quant - func: q_per_channel_zero_points(Tensor self) -> Tensor use_c10_dispatcher: unboxed_only variants: function, method dispatch: QuantizedCPU: q_per_channel_zero_points_quant - func: q_per_channel_axis(Tensor self) -> int variants: function, method dispatch: QuantizedCPU: q_per_channel_axis_quant - func: int_repr(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method dispatch: QuantizedCPU: int_repr_quant - func: _make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor use_c10_dispatcher: full dispatch: CPU: make_per_tensor_quantized_tensor_cpu - func: _make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor use_c10_dispatcher: unboxed_only dispatch: CPU: make_per_channel_quantized_tensor_cpu - func: qscheme(Tensor self) -> QScheme variants: method dispatch: QuantizedCPU: qscheme_quant - func: fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor use_c10_dispatcher: full variants: function dispatch: CPU: fake_quantize_per_tensor_affine_cpu CUDA: fake_quantize_per_tensor_affine_cuda - func: fake_quantize_per_tensor_affine_backward(Tensor grad, Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor use_c10_dispatcher: full variants: function dispatch: CPU: fake_quantize_per_tensor_affine_backward_cpu CUDA: fake_quantize_per_tensor_affine_backward_cuda - func: fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor variants: function dispatch: CPU: fake_quantize_per_channel_affine_cpu CUDA: fake_quantize_per_channel_affine_cuda - func: fake_quantize_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor variants: function dispatch: CPU: fake_quantize_per_channel_affine_backward_cpu CUDA: fake_quantize_per_channel_affine_backward_cuda # to(Device) must not exist because all constructors of Device also works for # TensorOptions. Otherwise, an ambiguity error is thrown. # See NOTE [ TensorOptions Constructors ]. - func: to.dtype_layout(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False, bool non_blocking=False, bool copy=False) -> Tensor variants: method device_guard: False supports_named_tensor: True - func: to.device(Tensor self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False) -> Tensor variants: method device_guard: False supports_named_tensor: True - func: to.dtype(Tensor self, ScalarType dtype, bool non_blocking=False, bool copy=False) -> Tensor variants: method device_guard: False supports_named_tensor: True - func: to.other(Tensor self, Tensor other, bool non_blocking=False, bool copy=False) -> Tensor use_c10_dispatcher: full variants: method device_guard: False - func: meshgrid(Tensor[] tensors) -> Tensor[] use_c10_dispatcher: unboxed_only - func: cartesian_prod(Tensor[] tensors) -> Tensor use_c10_dispatcher: unboxed_only variants: function - func: combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor use_c10_dispatcher: full variants: function - func: item(Tensor self) -> Scalar use_c10_dispatcher: full variants: method supports_named_tensor: True - func: result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType variants: function - func: result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType variants: function - func: result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType variants: function - func: result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType - func: can_cast(ScalarType from, ScalarType to) -> bool variants: function - func: promote_types(ScalarType type1, ScalarType type2) -> ScalarType variants: function # NB: Does NOT check precondition that numel == 1 - func: _local_scalar_dense(Tensor self) -> Scalar use_c10_dispatcher: full dispatch: CPU: _local_scalar_dense_cpu CUDA: _local_scalar_dense_cuda variants: function supports_named_tensor: True # Fused RNN kernels - func: _thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor) dispatch: CUDA: _thnn_fused_lstm_cell_cuda - func: _thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) dispatch: CUDA: _thnn_fused_lstm_cell_backward_cuda - func: _thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor) - func: _thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor) dispatch: CUDA: _thnn_fused_gru_cell_cuda - func: _thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) use_c10_dispatcher: unboxed_only dispatch: CUDA: _thnn_fused_gru_cell_backward_cuda - func: _thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) # RNN cells and layers - func: lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: unboxed_only - func: lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: unboxed_only - func: gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only - func: gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only - func: rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only - func: rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only - func: rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only - func: rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only - func: lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) - func: gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor - func: rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor - func: rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor # Quantized RNN layers - func: quantized_lstm(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor) # Quantized GRU layers - func: quantized_gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only - func: quantized_gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only # Quantized RNN cells - func: quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only - func: quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor use_c10_dispatcher: full - func: quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor use_c10_dispatcher: full - func: quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor use_c10_dispatcher: full # PackedSequence utilities - func: _pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only - func: _pack_padded_sequence_backward(Tensor grad, int[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor use_c10_dispatcher: unboxed_only - func: _pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only # wrappers for legacy TH methods - func: set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!) variants: method device_guard: False dispatch: CPU: legacy::cpu::_th_set_ CUDA: legacy::cuda::_th_set_ - func: set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!) variants: method device_guard: False dispatch: CPU: legacy::cpu::_th_set_ CUDA: legacy::cuda::_th_set_ QuantizedCPU: set_storage - func: set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method device_guard: False dispatch: CPU: legacy::cpu::_th_set_ CUDA: legacy::cuda::_th_set_ - func: set_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_set_ CUDA: legacy::cuda::_th_set_ - func: set_quantizer_(Tensor(a!) self, ConstQuantizerPtr quantizer) -> Tensor(a!) variants: method dispatch: QuantizedCPU: set_quantizer_ - func: is_set_to(Tensor self, Tensor tensor) -> bool use_c10_dispatcher: full variants: method device_guard: False dispatch: CPU: legacy::cpu::_th_is_set_to CUDA: legacy::cuda::_th_is_set_to - func: masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: masked_fill__cpu CUDA: masked_fill__cuda supports_named_tensor: True - func: masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor use_c10_dispatcher: full variants: function, method supports_named_tensor: True - func: masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: masked_fill__cpu CUDA: masked_fill__cuda supports_named_tensor: True - func: masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor use_c10_dispatcher: full variants: function, method supports_named_tensor: True - func: masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: masked_scatter__cpu CUDA: masked_scatter__cuda - func: masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor use_c10_dispatcher: full variants: function, method - func: view(Tensor(a) self, int[] size) -> Tensor(a) use_c10_dispatcher: unboxed_only variants: method device_guard: False dispatch: CPU: view CUDA: view MkldnnCPU: mkldnn_view QuantizedCPU: view - func: put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_put_ CUDA: legacy::cuda::_th_put_ - func: index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_index_add_ CUDA: legacy::cuda::_th_index_add_ - func: index_add(Tensor self, int dim, Tensor index, Tensor source) -> Tensor use_c10_dispatcher: full variants: function, method - func: index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor variants: function, method - func: index_fill_.Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method supports_named_tensor: True dispatch: CPU: legacy::cpu::_th_index_fill_ CUDA: legacy::cuda::_th_index_fill_ - func: index_fill.Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: function, method - func: index_fill_.Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_index_fill_ CUDA: legacy::cuda::_th_index_fill_ supports_named_tensor: True - func: index_fill.Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor use_c10_dispatcher: full variants: function, method supports_named_tensor: True - func: index_fill_.dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!) variants: method supports_named_tensor: True - func: index_fill_.dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!) variants: method supports_named_tensor: True - func: index_fill.dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor variants: function, method supports_named_tensor: True - func: index_fill.dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor variants: function, method supports_named_tensor: True - func: scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_scatter_ CUDA: legacy::cuda::_th_scatter_ - func: scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor use_c10_dispatcher: full variants: function, method - func: scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_scatter_ CUDA: legacy::cuda::_th_scatter_ - func: scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor use_c10_dispatcher: full variants: function, method - func: scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor variants: function, method - func: scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor variants: function, method - func: scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_scatter_add_ CUDA: legacy::cuda::_th_scatter_add_ - func: scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor use_c10_dispatcher: full variants: function, method - func: scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor variants: function, method - func: lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: __and__.Scalar(Tensor self, Scalar other) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_and CUDA: legacy::cuda::_th_and - func: __and__.Tensor(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_and CUDA: legacy::cuda::_th_and - func: __iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_iand_ CUDA: legacy::cuda::_th_iand_ - func: __iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_iand_ CUDA: legacy::cuda::_th_iand_ - func: __or__.Scalar(Tensor self, Scalar other) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_or CUDA: legacy::cuda::_th_or - func: __or__.Tensor(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_or CUDA: legacy::cuda::_th_or - func: __ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_ior_ CUDA: legacy::cuda::_th_ior_ - func: __ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_ior_ CUDA: legacy::cuda::_th_ior_ - func: __xor__.Scalar(Tensor self, Scalar other) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_xor CUDA: legacy::cuda::_th_xor - func: __xor__.Tensor(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_xor CUDA: legacy::cuda::_th_xor - func: __ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_ixor_ CUDA: legacy::cuda::_th_ixor_ - func: __ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_ixor_ CUDA: legacy::cuda::_th_ixor_ - func: __lshift__.Scalar(Tensor self, Scalar other) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_lshift CUDA: legacy::cuda::_th_lshift - func: __lshift__.Tensor(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_lshift CUDA: legacy::cuda::_th_lshift - func: __ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_ilshift_ CUDA: legacy::cuda::_th_ilshift_ - func: __ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_ilshift_ CUDA: legacy::cuda::_th_ilshift_ - func: __rshift__.Scalar(Tensor self, Scalar other) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_rshift CUDA: legacy::cuda::_th_rshift - func: __rshift__.Tensor(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_rshift CUDA: legacy::cuda::_th_rshift - func: __irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_irshift_ CUDA: legacy::cuda::_th_irshift_ - func: __irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_irshift_ CUDA: legacy::cuda::_th_irshift_ - func: lgamma_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: method dispatch: CPU: _lgamma__cpu CUDA: _lgamma__cuda - func: atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: method - func: tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: tril_cpu_ CUDA: tril_cuda_ - func: triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: triu_cpu_ CUDA: triu_cuda_ - func: digamma_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: method - func: polygamma_(Tensor(a!) self, int n) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: method - func: renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_renorm_ CUDA: legacy::cuda::_th_renorm_ - func: pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: method dispatch: CPU: pow_ CUDA: pow_ - func: pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: method dispatch: CPU: pow_ CUDA: pow_ - func: lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: lerp_cpu_scalar_ CUDA: lerp_cuda_scalar_ - func: lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: lerp_cpu_tensor_ CUDA: lerp_cuda_tensor_ - func: fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_fmod_ CUDA: legacy::cuda::_th_fmod_ - func: fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_fmod_ CUDA: legacy::cuda::_th_fmod_ - func: remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_remainder_ CUDA: legacy::cuda::_th_remainder_ - func: remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_remainder_ CUDA: legacy::cuda::_th_remainder_ - func: addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_addbmm_ CUDA: legacy::cuda::_th_addbmm_ - func: addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_addbmm_out CUDA: legacy::cuda::_th_addbmm_out - func: addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_addbmm CUDA: legacy::cuda::_th_addbmm - func: addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: random_.from(Tensor(a!) self, int from, int to, *, Generator? generator=None) -> Tensor(a!) use_c10_dispatcher: 'unboxed_only' variants: method dispatch: CPU: legacy::cpu::_th_random_ CUDA: clamped_random_cuda_ supports_named_tensor: True - func: random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!) use_c10_dispatcher: 'unboxed_only' variants: method dispatch: CPU: legacy::cpu::_th_random_ CUDA: capped_random_cuda_ supports_named_tensor: True - func: random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!) use_c10_dispatcher: 'unboxed_only' variants: method dispatch: CPU: legacy::cpu::_th_random_ CUDA: random_cuda_ supports_named_tensor: True - func: uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!) use_c10_dispatcher: 'unboxed_only' variants: method dispatch: CPU: legacy::cpu::_th_uniform_ CUDA: uniform_cuda_ supports_named_tensor: True - func: normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!) use_c10_dispatcher: 'unboxed_only' variants: method dispatch: CPU: legacy::cpu::_th_normal_ CUDA: normal_cuda_ supports_named_tensor: True - func: cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!) use_c10_dispatcher: 'unboxed_only' variants: method dispatch: CPU: legacy::cpu::_th_cauchy_ CUDA: cauchy_cuda_ supports_named_tensor: True - func: log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!) use_c10_dispatcher: 'unboxed_only' variants: method dispatch: CPU: legacy::cpu::_th_log_normal_ CUDA: log_normal_cuda_ supports_named_tensor: True - func: exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!) use_c10_dispatcher: 'unboxed_only' variants: method dispatch: CPU: legacy::cpu::_th_exponential_ CUDA: exponential_cuda_ supports_named_tensor: True - func: geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!) use_c10_dispatcher: 'unboxed_only' variants: method dispatch: CPU: legacy::cpu::_th_geometric_ CUDA: geometric_cuda_ supports_named_tensor: True # wrappers for TH functions - func: diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_diag_out CUDA: legacy::cuda::_th_diag_out - func: diag(Tensor self, int diagonal=0) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_diag CUDA: legacy::cuda::_th_diag - func: cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) - func: cross(Tensor self, Tensor other, int? dim=None) -> Tensor use_c10_dispatcher: full variants: method, function - func: triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: triu_cpu_out CUDA: triu_cuda_out - func: triu(Tensor self, int diagonal=0) -> Tensor use_c10_dispatcher: full variants: method, function - func: tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: tril_cpu_out CUDA: tril_cuda_out - func: tril(Tensor self, int diagonal=0) -> Tensor use_c10_dispatcher: full variants: method, function - func: tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor dispatch: CPU: tril_indices_cpu CUDA: tril_indices_cuda - func: triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor dispatch: CPU: triu_indices_cpu CUDA: triu_indices_cuda - func: trace(Tensor self) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_trace CUDA: legacy::cuda::_th_trace - func: ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: ne_out CUDA: ne_out QuantizedCPU: ne_out_quantized_cpu - func: ne.Scalar(Tensor self, Scalar other) -> Tensor supports_named_tensor: True use_c10_dispatcher: full variants: method, function dispatch: CPU: ne CUDA: ne QuantizedCPU: ne_quantized_cpu - func: ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: ne_out CUDA: ne_out QuantizedCPU: ne_out_quantized_cpu - func: ne.Tensor(Tensor self, Tensor other) -> Tensor supports_named_tensor: True use_c10_dispatcher: full variants: method, function dispatch: CPU: ne CUDA: ne QuantizedCPU: ne_quantized_cpu - func: eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: eq_out CUDA: eq_out QuantizedCPU: eq_out_quantized_cpu - func: eq.Scalar(Tensor self, Scalar other) -> Tensor supports_named_tensor: True use_c10_dispatcher: full variants: method, function dispatch: CPU: eq CUDA: eq QuantizedCPU: eq_quantized_cpu - func: eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: eq_out CUDA: eq_out QuantizedCPU: eq_out_quantized_cpu - func: eq.Tensor(Tensor self, Tensor other) -> Tensor supports_named_tensor: True use_c10_dispatcher: full variants: method, function dispatch: CPU: eq CUDA: eq QuantizedCPU: eq_quantized_cpu - func: ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: ge_out CUDA: ge_out QuantizedCPU: ge_out_quantized_cpu - func: ge.Scalar(Tensor self, Scalar other) -> Tensor supports_named_tensor: True use_c10_dispatcher: full variants: method, function dispatch: CPU: ge CUDA: ge QuantizedCPU: ge_quantized_cpu - func: ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: ge_out CUDA: ge_out QuantizedCPU: ge_out_quantized_cpu - func: ge.Tensor(Tensor self, Tensor other) -> Tensor supports_named_tensor: True use_c10_dispatcher: full variants: method, function dispatch: CPU: ge CUDA: ge QuantizedCPU: ge_quantized_cpu - func: le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: le_out CUDA: le_out QuantizedCPU: le_out_quantized_cpu - func: le.Scalar(Tensor self, Scalar other) -> Tensor supports_named_tensor: True use_c10_dispatcher: full variants: method, function dispatch: CPU: le CUDA: le QuantizedCPU: le_quantized_cpu - func: le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: le_out CUDA: le_out QuantizedCPU: le_out_quantized_cpu - func: le.Tensor(Tensor self, Tensor other) -> Tensor supports_named_tensor: True use_c10_dispatcher: full variants: method, function dispatch: CPU: le CUDA: le QuantizedCPU: le_quantized_cpu - func: gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: gt_out CUDA: gt_out QuantizedCPU: gt_out_quantized_cpu - func: gt.Scalar(Tensor self, Scalar other) -> Tensor supports_named_tensor: True use_c10_dispatcher: full variants: method, function dispatch: CPU: gt CUDA: gt QuantizedCPU: gt_quantized_cpu - func: gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: gt_out CUDA: gt_out QuantizedCPU: gt_out_quantized_cpu - func: gt.Tensor(Tensor self, Tensor other) -> Tensor supports_named_tensor: True use_c10_dispatcher: full variants: method, function dispatch: CPU: gt CUDA: gt QuantizedCPU: gt_quantized_cpu - func: lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: lt_out CUDA: lt_out QuantizedCPU: lt_out_quantized_cpu - func: lt.Scalar(Tensor self, Scalar other) -> Tensor supports_named_tensor: True use_c10_dispatcher: full variants: method, function dispatch: CPU: lt CUDA: lt QuantizedCPU: lt_quantized_cpu - func: lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: lt_out CUDA: lt_out QuantizedCPU: lt_out_quantized_cpu - func: lt.Tensor(Tensor self, Tensor other) -> Tensor supports_named_tensor: True use_c10_dispatcher: full variants: method, function dispatch: CPU: lt CUDA: lt QuantizedCPU: lt_quantized_cpu - func: take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_take_out CUDA: legacy::cuda::_th_take_out - func: take(Tensor self, Tensor index) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_take CUDA: legacy::cuda::_th_take - func: index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_index_select_out CUDA: legacy::cuda::_th_index_select_out - func: index_select(Tensor self, int dim, Tensor index) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_index_select CUDA: legacy::cuda::_th_index_select SparseCPU: index_select_sparse SparseCUDA: index_select_sparse - func: index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) - func: index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor variants: method, function - func: masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: masked_select_out_cpu CUDA: masked_select_out_cuda supports_named_tensor: True - func: masked_select(Tensor self, Tensor mask) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: masked_select_cpu CUDA: masked_select_cuda supports_named_tensor: True - func: nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_nonzero_out CUDA: legacy::cuda::_th_nonzero_out - func: nonzero(Tensor self) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_nonzero CUDA: legacy::cuda::_th_nonzero - func: nonzero_numpy(Tensor self) -> Tensor[] use_c10_dispatcher: unboxed_only variants: method, function - func: gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: gather_out_cpu CUDA: gather_out_cuda - func: gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: gather_cpu CUDA: gather_cuda - func: gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) - func: gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor variants: method, function - func: _gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor use_c10_dispatcher: full - func: addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) - func: addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor use_c10_dispatcher: full variants: method, function - func: addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method - func: addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) - func: addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor use_c10_dispatcher: full variants: method, function - func: lstsq.X(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR) dispatch: CPU: legacy::cpu::_th_gels_out CUDA: legacy::cuda::_th_gels_out - func: lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR) use_c10_dispatcher: unboxed_only variants: method, function dispatch: CPU: legacy::cpu::_th_gels CUDA: legacy::cuda::_th_gels - func: triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient) - func: triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient) use_c10_dispatcher: unboxed_only variants: method, function - func: _triangular_solve_helper(Tensor self, Tensor A, bool upper, bool transpose, bool unitriangular) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function dispatch: CPU: _triangular_solve_helper_cpu CUDA: _triangular_solve_helper_cuda - func: symeig.e(Tensor self, bool eigenvectors=False, bool upper=True, *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) - func: symeig(Tensor self, bool eigenvectors=False, bool upper=True) -> (Tensor eigenvalues, Tensor eigenvectors) use_c10_dispatcher: unboxed_only variants: method, function - func: _symeig_helper(Tensor self, bool eigenvectors, bool upper) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function dispatch: CPU: _symeig_helper_cpu CUDA: _symeig_helper_cuda - func: eig.e(Tensor self, bool eigenvectors=False, *, Tensor(a!) e, Tensor(b!) v) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) dispatch: CPU: legacy::cpu::_th_eig_out CUDA: legacy::cuda::_th_eig_out - func: eig(Tensor self, bool eigenvectors=False) -> (Tensor eigenvalues, Tensor eigenvectors) use_c10_dispatcher: unboxed_only variants: method, function dispatch: CPU: legacy::cpu::_th_eig CUDA: legacy::cuda::_th_eig - func: svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) - func: svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) use_c10_dispatcher: unboxed_only variants: method, function - func: _svd_helper(Tensor self, bool some, bool compute_uv) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function dispatch: CPU: _svd_helper_cpu CUDA: _svd_helper_cuda - func: cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) - func: cholesky(Tensor self, bool upper=False) -> Tensor use_c10_dispatcher: full variants: method, function - func: _cholesky_helper(Tensor self, bool upper) -> Tensor use_c10_dispatcher: full variants: function dispatch: CPU: _cholesky_helper_cpu CUDA: _cholesky_helper_cuda - func: cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) - func: cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor use_c10_dispatcher: full variants: method, function - func: _cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor use_c10_dispatcher: full variants: function dispatch: CPU: _cholesky_solve_helper_cpu CUDA: _cholesky_solve_helper_cuda - func: solve(Tensor self, Tensor A) -> (Tensor solution, Tensor LU) use_c10_dispatcher: unboxed_only variants: function, method - func: solve.solution(Tensor self, Tensor A, *, Tensor(a!) solution, Tensor(b!) lu) -> (Tensor(a!) solution, Tensor(b!) LU) - func: _solve_helper(Tensor self, Tensor A) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function dispatch: CPU: _solve_helper_cpu CUDA: _solve_helper_cuda - func: cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_potri_out CUDA: legacy::cuda::_th_potri_out - func: cholesky_inverse(Tensor self, bool upper=False) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_potri CUDA: legacy::cuda::_th_potri - func: qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) - func: qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R) use_c10_dispatcher: unboxed_only variants: method, function - func: _qr_helper(Tensor self, bool some) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function dispatch: CPU: _qr_helper_cpu CUDA: _qr_helper_cuda - func: geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau) dispatch: CPU: legacy::cpu::_th_geqrf_out CUDA: legacy::cuda::_th_geqrf_out - func: geqrf(Tensor self) -> (Tensor a, Tensor tau) use_c10_dispatcher: unboxed_only variants: method, function dispatch: CPU: legacy::cpu::_th_geqrf CUDA: legacy::cuda::_th_geqrf - func: orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_orgqr_out - func: orgqr(Tensor self, Tensor input2) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_orgqr - func: ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_ormqr_out - func: ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_ormqr - func: _lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function dispatch: CPU: _lu_with_info_cpu CUDA: _lu_with_info_cuda - func: lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) - func: lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor use_c10_dispatcher: full variants: method, function - func: _lu_solve_helper(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor use_c10_dispatcher: full variants: function dispatch: CPU: _lu_solve_helper_cpu CUDA: _lu_solve_helper_cuda # TODO: remove dispatch section when porting TH CUDA to ATen - func: multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: multinomial_out CUDA: multinomial_out - func: multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor use_c10_dispatcher: 'unboxed_only' variants: method, function dispatch: CPU: multinomial CUDA: multinomial - func: _multinomial_alias_setup(Tensor probs) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only variants: function dispatch: CPU: legacy::cpu::_th_multinomial_alias_setup CUDA: legacy::cuda::_th_multinomial_alias_setup - func: _multinomial_alias_draw(Tensor J, Tensor q, int num_samples, *, Generator? generator=None) -> Tensor use_c10_dispatcher: 'unboxed_only' variants: function dispatch: CPU: legacy::cpu::_th_multinomial_alias_draw CUDA: legacy::cuda::_th_multinomial_alias_draw - func: lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _lgamma_out_cpu CUDA: _lgamma_out_cuda - func: lgamma(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: method, function dispatch: CPU: lgamma CUDA: lgamma - func: digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: digamma(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: method, function - func: polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: polygamma(int n, Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: method, function - func: erfinv(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: method, function dispatch: CPU: erfinv CUDA: erfinv - func: erfinv_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only supports_named_tensor: True variants: method dispatch: CPU: _erfinv__cpu CUDA: _erfinv__cuda - func: erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: _erfinv_out_cpu CUDA: _erfinv_out_cuda - func: sign(Tensor self) -> Tensor use_c10_dispatcher: unboxed_only variants: function, method supports_named_tensor: True - func: sign_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: unboxed_only variants: method supports_named_tensor: True - func: sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: sign_out CUDA: sign_out - func: dist(Tensor self, Tensor other, Scalar p=2) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_dist CUDA: legacy::cuda::_th_dist - func: atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True - func: atan2(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: method, function - func: lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: lerp_cpu_scalar_out CUDA: lerp_cuda_scalar_out - func: lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: lerp_cpu_tensor_out CUDA: lerp_cuda_tensor_out - func: lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: lerp_cpu_scalar CUDA: lerp_cuda_scalar - func: lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: lerp_cpu_tensor CUDA: lerp_cuda_tensor - func: histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_histc_out CUDA: _histc_out_cuda - func: histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_histc CUDA: _histc_cuda - func: fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_fmod_out CUDA: legacy::cuda::_th_fmod_out - func: fmod.Scalar(Tensor self, Scalar other) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_fmod CUDA: legacy::cuda::_th_fmod - func: fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_fmod_out CUDA: legacy::cuda::_th_fmod_out - func: fmod.Tensor(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_fmod CUDA: legacy::cuda::_th_fmod - func: remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_remainder_out CUDA: legacy::cuda::_th_remainder_out - func: remainder.Scalar(Tensor self, Scalar other) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_remainder CUDA: legacy::cuda::_th_remainder - func: remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_remainder_out CUDA: legacy::cuda::_th_remainder_out - func: remainder.Tensor(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_remainder CUDA: legacy::cuda::_th_remainder - func: min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_min_out CUDA: legacy::cuda::_th_min_out - func: min.other(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_min CUDA: legacy::cuda::_th_min - func: min(Tensor self) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_min CUDA: legacy::cuda::_th_min QuantizedCPU: min_quant supports_named_tensor: True - func: max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_max_out CUDA: legacy::cuda::_th_max_out - func: max.other(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_max CUDA: legacy::cuda::_th_max - func: max(Tensor self) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_max CUDA: legacy::cuda::_th_max QuantizedCPU: max_quant supports_named_tensor: True - func: median(Tensor self) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: median_cpu CUDA: median_cuda supports_named_tensor: True - func: sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) dispatch: CPU: legacy::cpu::_th_sort_out CUDA: legacy::cuda::_th_sort_out - func: sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) use_c10_dispatcher: unboxed_only variants: method, function dispatch: CPU: legacy::cpu::_th_sort CUDA: legacy::cuda::_th_sort QuantizedCPU: sort_quant - func: sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) - func: sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) variants: method, function - func: argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor use_c10_dispatcher: full variants: method, function - func: argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor variants: method, function - func: topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) ->(Tensor(a!) values, Tensor(b!) indices) dispatch: CPU: topk_out_cpu CUDA: legacy::cuda::_th_topk_out - func: topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) use_c10_dispatcher: unboxed_only variants: method, function dispatch: CPU: topk CUDA: topk QuantizedCPU: quantized_topk_cpu - func: all(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: method, function - func: any(Tensor self) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: method, function - func: renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_renorm_out CUDA: legacy::cuda::_th_renorm_out - func: renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_renorm CUDA: legacy::cuda::_th_renorm - func: unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a) use_c10_dispatcher: unboxed_only variants: method dispatch: CPU: legacy::cpu::_th_unfold CUDA: legacy::cuda::_th_unfold - func: equal(Tensor self, Tensor other) -> bool use_c10_dispatcher: full variants: method, function dispatch: CPU: legacy::cpu::_th_equal CUDA: legacy::cuda::_th_equal QuantizedCPU: quantized_equal - func: pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: pow_out CUDA: pow_out - func: pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor use_c10_dispatcher: full supports_named_tensor: True variants: method, function dispatch: CPU: pow CUDA: pow - func: pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True dispatch: CPU: pow_out CUDA: pow_out - func: pow.Scalar(Scalar self, Tensor exponent) -> Tensor use_c10_dispatcher: full supports_named_tensor: True dispatch: CPU: pow CUDA: pow - func: normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_normal_out CUDA: normal_out_cuda - func: normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor use_c10_dispatcher: 'unboxed_only' dispatch: CPU: legacy::cpu::_th_normal CUDA: normal_cuda - func: normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_normal_out CUDA: normal_out_cuda - func: normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor use_c10_dispatcher: 'unboxed_only' dispatch: CPU: legacy::cpu::_th_normal CUDA: normal_cuda - func: normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_normal_out CUDA: normal_out_cuda - func: normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor use_c10_dispatcher: 'unboxed_only' dispatch: CPU: legacy::cpu::_th_normal CUDA: normal_cuda - func: normal.float_float(float mean, float std, int[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - func: normal.float_float_out(float mean, float std, int[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) - func: alias(Tensor(a) self) -> Tensor(a) use_c10_dispatcher: unboxed_only variants: method, function supports_named_tensor: True - func: _addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor use_c10_dispatcher: full dispatch: CPU: legacy::cpu::_th_addr CUDA: legacy::cuda::_th_addr - func: _addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) use_c10_dispatcher: unboxed_only dispatch: CPU: legacy::cpu::_th_addr_ CUDA: legacy::cuda::_th_addr_ - func: _addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_addr_out CUDA: legacy::cuda::_th_addr_out - func: _index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) use_c10_dispatcher: unboxed_only dispatch: CPU: legacy::cpu::_th_index_copy_ CUDA: legacy::cuda::_th_index_copy_ - func: _cumsum(Tensor self, int dim) -> Tensor use_c10_dispatcher: full dispatch: CPU: legacy::cpu::_th_cumsum CUDA: legacy::cuda::_th_cumsum - func: _cumsum.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_cumsum_out CUDA: legacy::cuda::_th_cumsum_out - func: _cumprod(Tensor self, int dim) -> Tensor use_c10_dispatcher: full dispatch: CPU: legacy::cpu::_th_cumprod CUDA: legacy::cuda::_th_cumprod - func: _cumprod.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_cumprod_out CUDA: legacy::cuda::_th_cumprod_out - func: _var(Tensor self, bool unbiased=True) -> Tensor use_c10_dispatcher: full dispatch: CPU: legacy::cpu::_th_var CUDA: legacy::cuda::_th_var supports_named_tensor: True - func: _std(Tensor self, bool unbiased=True) -> Tensor use_c10_dispatcher: full dispatch: CPU: legacy::cpu::_th_std CUDA: legacy::cuda::_th_std supports_named_tensor: True - func: _cat(Tensor[] tensors, int dim=0) -> Tensor use_c10_dispatcher: unboxed_only dispatch: CPU: legacy::cpu::_th_cat CUDA: legacy::cuda::_th_cat - func: _cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) dispatch: CPU: legacy::cpu::_th_cat_out CUDA: legacy::cuda::_th_cat_out - func: _mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only dispatch: CPU: legacy::cpu::_th_mode CUDA: legacy::cuda::_th_mode - func: _mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) dispatch: CPU: legacy::cpu::_th_mode_out CUDA: legacy::cuda::_th_mode_out - func: _max(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only dispatch: CPU: legacy::cpu::_th_max CUDA: legacy::cuda::_th_max - func: _max.max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_indices) -> (Tensor(a!), Tensor(b!)) dispatch: CPU: legacy::cpu::_th_max_out CUDA: legacy::cuda::_th_max_out - func: _min(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only dispatch: CPU: legacy::cpu::_th_min CUDA: legacy::cuda::_th_min - func: _min.min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!), Tensor(b!)) dispatch: CPU: legacy::cpu::_th_min_out CUDA: legacy::cuda::_th_min_out ## NN wrappers - func: binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_binary_cross_entropy_forward_out CUDA: legacy::cuda::_thnn_binary_cross_entropy_forward_out - func: binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor python_module: nn dispatch: CPU: legacy::cpu::_thnn_binary_cross_entropy_forward CUDA: legacy::cuda::_thnn_binary_cross_entropy_forward - func: binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_binary_cross_entropy_backward_out CUDA: legacy::cuda::_thnn_binary_cross_entropy_backward_out - func: binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor python_module: nn dispatch: CPU: legacy::cpu::_thnn_binary_cross_entropy_backward CUDA: legacy::cuda::_thnn_binary_cross_entropy_backward - func: mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_mse_loss_forward_out CUDA: legacy::cuda::_thnn_mse_loss_forward_out - func: mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_mse_loss_forward CUDA: legacy::cuda::_thnn_mse_loss_forward - func: mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_mse_loss_backward_out CUDA: legacy::cuda::_thnn_mse_loss_backward_out - func: mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_mse_loss_backward CUDA: legacy::cuda::_thnn_mse_loss_backward - func: l1_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_l1_loss_forward_out CUDA: legacy::cuda::_thnn_l1_loss_forward_out - func: l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_l1_loss_forward CUDA: legacy::cuda::_thnn_l1_loss_forward - func: l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_l1_loss_backward_out CUDA: legacy::cuda::_thnn_l1_loss_backward_out - func: l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_l1_loss_backward CUDA: legacy::cuda::_thnn_l1_loss_backward - func: multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_multi_margin_loss_forward_out CUDA: legacy::cuda::_thnn_multi_margin_loss_forward_out - func: multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor python_module: nn dispatch: CPU: legacy::cpu::_thnn_multi_margin_loss_forward CUDA: legacy::cuda::_thnn_multi_margin_loss_forward - func: multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_multi_margin_loss_backward_out CUDA: legacy::cuda::_thnn_multi_margin_loss_backward_out - func: multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor python_module: nn dispatch: CPU: legacy::cpu::_thnn_multi_margin_loss_backward CUDA: legacy::cuda::_thnn_multi_margin_loss_backward - func: multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) python_module: nn - func: multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor use_c10_dispatcher: full python_module: nn - func: multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!)) python_module: nn dispatch: CPU: legacy::cpu::_thnn_multilabel_margin_loss_forward_out CUDA: legacy::cuda::_thnn_multilabel_margin_loss_forward_out - func: multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: legacy::cpu::_thnn_multilabel_margin_loss_forward CUDA: legacy::cuda::_thnn_multilabel_margin_loss_forward - func: multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_multilabel_margin_loss_backward_out CUDA: legacy::cuda::_thnn_multilabel_margin_loss_backward_out - func: multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_multilabel_margin_loss_backward CUDA: legacy::cuda::_thnn_multilabel_margin_loss_backward - func: nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) python_module: nn - func: nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor python_module: nn - func: nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) python_module: nn dispatch: CPU: legacy::cpu::_thnn_nll_loss_forward_out CUDA: legacy::cuda::_thnn_nll_loss_forward_out - func: nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight) python_module: nn dispatch: CPU: legacy::cpu::_thnn_nll_loss_forward CUDA: legacy::cuda::_thnn_nll_loss_forward - func: nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_nll_loss_backward_out CUDA: legacy::cuda::_thnn_nll_loss_backward_out - func: nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor python_module: nn dispatch: CPU: legacy::cpu::_thnn_nll_loss_backward CUDA: legacy::cuda::_thnn_nll_loss_backward - func: nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) python_module: nn - func: nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor python_module: nn - func: nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) python_module: nn dispatch: CPU: legacy::cpu::_thnn_nll_loss2d_forward_out CUDA: legacy::cuda::_thnn_nll_loss2d_forward_out - func: nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight) python_module: nn dispatch: CPU: legacy::cpu::_thnn_nll_loss2d_forward CUDA: legacy::cuda::_thnn_nll_loss2d_forward - func: nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_nll_loss2d_backward_out CUDA: legacy::cuda::_thnn_nll_loss2d_backward_out - func: nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor python_module: nn dispatch: CPU: legacy::cpu::_thnn_nll_loss2d_backward CUDA: legacy::cuda::_thnn_nll_loss2d_backward - func: smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_smooth_l1_loss_forward_out CUDA: legacy::cuda::_thnn_smooth_l1_loss_forward_out - func: smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_smooth_l1_loss_forward CUDA: legacy::cuda::_thnn_smooth_l1_loss_forward - func: smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_smooth_l1_loss_backward_out CUDA: legacy::cuda::_thnn_smooth_l1_loss_backward_out - func: smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_smooth_l1_loss_backward CUDA: legacy::cuda::_thnn_smooth_l1_loss_backward - func: soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_soft_margin_loss_forward_out CUDA: legacy::cuda::_thnn_soft_margin_loss_forward_out - func: soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_soft_margin_loss_forward CUDA: legacy::cuda::_thnn_soft_margin_loss_forward - func: soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_soft_margin_loss_backward_out CUDA: legacy::cuda::_thnn_soft_margin_loss_backward_out - func: soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_soft_margin_loss_backward CUDA: legacy::cuda::_thnn_soft_margin_loss_backward - func: elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_elu_forward_out CUDA: legacy::cuda::_thnn_elu_forward_out - func: elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_elu_forward CUDA: legacy::cuda::_thnn_elu_forward - func: elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_elu_backward_out CUDA: legacy::cuda::_thnn_elu_backward_out - func: elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor output) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_elu_backward CUDA: legacy::cuda::_thnn_elu_backward - func: elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: legacy::cpu::_thnn_elu_forward_ CUDA: legacy::cuda::_thnn_elu_forward_ - func: glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_glu_forward_out CUDA: legacy::cuda::_thnn_glu_forward_out - func: glu(Tensor self, int dim=-1) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_glu_forward CUDA: legacy::cuda::_thnn_glu_forward - func: glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_glu_backward_out CUDA: legacy::cuda::_thnn_glu_backward_out - func: glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_glu_backward CUDA: legacy::cuda::_thnn_glu_backward - func: hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_hardtanh_forward_out CUDA: legacy::cuda::_thnn_hardtanh_forward_out - func: hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_hardtanh_forward CUDA: legacy::cuda::_thnn_hardtanh_forward - func: hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_hardtanh_backward_out CUDA: legacy::cuda::_thnn_hardtanh_backward_out - func: hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_hardtanh_backward CUDA: legacy::cuda::_thnn_hardtanh_backward - func: hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: legacy::cpu::_thnn_hardtanh_forward_ CUDA: legacy::cuda::_thnn_hardtanh_forward_ - func: leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_leaky_relu_forward_out CUDA: legacy::cuda::_thnn_leaky_relu_forward_out - func: leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_leaky_relu_forward CUDA: legacy::cuda::_thnn_leaky_relu_forward - func: leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_leaky_relu_backward_out CUDA: legacy::cuda::_thnn_leaky_relu_backward_out - func: leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_leaky_relu_backward CUDA: legacy::cuda::_thnn_leaky_relu_backward - func: leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: legacy::cpu::_thnn_leaky_relu_forward_ CUDA: legacy::cuda::_thnn_leaky_relu_forward_ - func: log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: nn - func: log_sigmoid(Tensor self) -> Tensor use_c10_dispatcher: full python_module: nn - func: log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) python_module: nn dispatch: CPU: legacy::cpu::_thnn_log_sigmoid_forward_out CUDA: legacy::cuda::_thnn_log_sigmoid_forward_out - func: log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: legacy::cpu::_thnn_log_sigmoid_forward CUDA: legacy::cuda::_thnn_log_sigmoid_forward - func: log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_log_sigmoid_backward_out CUDA: legacy::cuda::_thnn_log_sigmoid_backward_out - func: log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_log_sigmoid_backward CUDA: legacy::cuda::_thnn_log_sigmoid_backward - func: rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_rrelu_with_noise_forward_out CUDA: legacy::cuda::_thnn_rrelu_with_noise_forward_out - func: rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor use_c10_dispatcher: 'unboxed_only' python_module: nn dispatch: CPU: legacy::cpu::_thnn_rrelu_with_noise_forward CUDA: legacy::cuda::_thnn_rrelu_with_noise_forward - func: rrelu_with_noise_backward.grad_input(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_rrelu_with_noise_backward_out CUDA: legacy::cuda::_thnn_rrelu_with_noise_backward_out - func: rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_rrelu_with_noise_backward CUDA: legacy::cuda::_thnn_rrelu_with_noise_backward - func: rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) use_c10_dispatcher: 'unboxed_only' python_module: nn dispatch: CPU: legacy::cpu::_thnn_rrelu_with_noise_forward_ CUDA: legacy::cuda::_thnn_rrelu_with_noise_forward_ - func: softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_softplus_forward_out CUDA: legacy::cuda::_thnn_softplus_forward_out - func: softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_softplus_forward CUDA: legacy::cuda::_thnn_softplus_forward - func: softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_softplus_backward_out CUDA: legacy::cuda::_thnn_softplus_backward_out - func: softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor output) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_softplus_backward CUDA: legacy::cuda::_thnn_softplus_backward - func: softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_softshrink_forward_out CUDA: legacy::cuda::_thnn_softshrink_forward_out - func: softshrink(Tensor self, Scalar lambd=0.5) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_softshrink_forward CUDA: legacy::cuda::_thnn_softshrink_forward - func: softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_softshrink_backward_out CUDA: legacy::cuda::_thnn_softshrink_backward_out - func: softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_softshrink_backward CUDA: legacy::cuda::_thnn_softshrink_backward - func: adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: adaptive_avg_pool2d_out_cpu CUDA: adaptive_avg_pool2d_out_cuda MkldnnCPU: mkldnn_adaptive_avg_pool2d_out - func: adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn - func: mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor use_c10_dispatcher: unboxed_only dispatch: MkldnnCPU: mkldnn_adaptive_avg_pool2d requires_tensor: True - func: _adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor use_c10_dispatcher: unboxed_only dispatch: CPU: adaptive_avg_pool2d_cpu CUDA: adaptive_avg_pool2d_cuda QuantizedCPU: quantized_adaptive_avg_pool2d - func: _adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: adaptive_avg_pool2d_backward_cpu CUDA: adaptive_avg_pool2d_backward_cuda - func: adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: adaptive_avg_pool3d_out_cpu CUDA: adaptive_avg_pool3d_out_cuda - func: adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: adaptive_avg_pool3d_cpu CUDA: adaptive_avg_pool3d_cuda - func: adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: adaptive_avg_pool3d_backward_out_cpu CUDA: adaptive_avg_pool3d_backward_out_cuda - func: adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: adaptive_avg_pool3d_backward_cpu CUDA: adaptive_avg_pool3d_backward_cuda # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) python_module: nn dispatch: CPU: adaptive_max_pool2d_out_cpu CUDA: adaptive_max_pool2d_out_cuda # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: adaptive_max_pool2d_cpu CUDA: adaptive_max_pool2d_cuda - func: adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: adaptive_max_pool2d_backward_out_cpu CUDA: adaptive_max_pool2d_backward_out_cuda - func: adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: adaptive_max_pool2d_backward_cpu CUDA: adaptive_max_pool2d_backward_cuda # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) python_module: nn dispatch: CPU: adaptive_max_pool3d_out_cpu CUDA: adaptive_max_pool3d_out_cuda # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: adaptive_max_pool3d_cpu CUDA: adaptive_max_pool3d_cuda - func: adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: adaptive_max_pool3d_backward_out_cpu CUDA: adaptive_max_pool3d_backward_out_cuda - func: adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: adaptive_max_pool3d_backward_cpu CUDA: adaptive_max_pool3d_backward_cuda - func: avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: avg_pool2d_out_cpu CUDA: avg_pool2d_out_cuda MkldnnCPU: mkldnn_avg_pool2d_out - func: avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: avg_pool2d_cpu CUDA: avg_pool2d_cuda MkldnnCPU: mkldnn_avg_pool2d QuantizedCPU: quantized_avg_pool2d - func: avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: avg_pool2d_backward_out_cpu CUDA: avg_pool2d_backward_out_cuda - func: avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: avg_pool2d_backward_cpu CUDA: avg_pool2d_backward_cuda - func: avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: avg_pool3d_out_cpu CUDA: avg_pool3d_out_cuda - func: avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: avg_pool3d_cpu CUDA: avg_pool3d_cuda - func: avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: avg_pool3d_backward_out_cpu CUDA: avg_pool3d_backward_out_cuda - func: avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: avg_pool3d_backward_cpu CUDA: avg_pool3d_backward_cuda # Return: (Tensor output, Tensor indices) - func: fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) python_module: nn dispatch: CPU: fractional_max_pool2d_out_cpu CUDA: fractional_max_pool2d_out_cuda # Return: (Tensor output, Tensor indices) - func: fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: fractional_max_pool2d_cpu CUDA: fractional_max_pool2d_cuda - func: fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: fractional_max_pool2d_backward_out_cpu CUDA: fractional_max_pool2d_backward_out_cuda - func: fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: fractional_max_pool2d_backward_cpu CUDA: fractional_max_pool2d_backward_cuda # Return: (Tensor output, Tensor indices) - func: fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) python_module: nn dispatch: CPU: fractional_max_pool3d_out_cpu CUDA: fractional_max_pool3d_out_cuda # Return: (Tensor output, Tensor indices) - func: fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: fractional_max_pool3d_cpu CUDA: fractional_max_pool3d_cuda - func: fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: fractional_max_pool3d_backward_out_cpu CUDA: fractional_max_pool3d_backward_out_cuda - func: fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: fractional_max_pool3d_backward_cpu CUDA: fractional_max_pool3d_backward_cuda # Return: (Tensor output, Tensor indices) - func: max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) python_module: nn dispatch: CPU: max_pool2d_with_indices_out_cpu CUDA: max_pool2d_with_indices_out_cuda # Return: (Tensor output, Tensor indices) - func: max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: max_pool2d_with_indices_cpu CUDA: max_pool2d_with_indices_cuda - func: max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: max_pool2d_with_indices_backward_out_cpu CUDA: max_pool2d_with_indices_backward_out_cuda - func: max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: max_pool2d_with_indices_backward_cpu CUDA: max_pool2d_with_indices_backward_cuda # Return: (Tensor output, Tensor indices) - func: max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) python_module: nn dispatch: CPU: max_pool3d_with_indices_out_cpu CUDA: max_pool3d_with_indices_out_cuda # Return: (Tensor output, Tensor indices) - func: max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: max_pool3d_with_indices_cpu CUDA: max_pool3d_with_indices_cuda - func: max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: max_pool3d_with_indices_backward_out_cpu CUDA: max_pool3d_with_indices_backward_out_cuda - func: max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: max_pool3d_with_indices_backward_cpu CUDA: max_pool3d_with_indices_backward_cuda - func: max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: max_unpooling2d_forward_out_cpu CUDA: max_unpooling2d_forward_out_cuda - func: max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: max_unpooling2d_forward_cpu CUDA: max_unpooling2d_forward_cuda - func: max_unpool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: max_unpooling2d_backward_out_cpu CUDA: max_unpooling2d_backward_out_cuda - func: max_unpool2d_backward(Tensor grad_output, Tensor self, Tensor indices, int[2] output_size) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: max_unpooling2d_backward_cpu CUDA: max_unpooling2d_backward_cuda - func: max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: max_unpooling3d_forward_out_cpu CUDA: max_unpooling3d_forward_out_cuda - func: max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: max_unpooling3d_forward_cpu CUDA: max_unpooling3d_forward_cuda - func: max_unpool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: max_unpooling3d_backward_out_cpu CUDA: max_unpooling3d_backward_out_cuda - func: max_unpool3d_backward(Tensor grad_output, Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: max_unpooling3d_backward_cpu CUDA: max_unpooling3d_backward_cuda - func: reflection_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: reflection_pad1d_out_cpu CUDA: reflection_pad1d_out_cuda - func: reflection_pad1d(Tensor self, int[2] padding) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: reflection_pad1d_cpu CUDA: reflection_pad1d_cuda - func: reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: reflection_pad1d_backward_out_cpu CUDA: reflection_pad1d_backward_out_cuda - func: reflection_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: reflection_pad1d_backward_cpu CUDA: reflection_pad1d_backward_cuda - func: reflection_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: reflection_pad2d_out_cpu CUDA: reflection_pad2d_out_cuda - func: reflection_pad2d(Tensor self, int[4] padding) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: reflection_pad2d_cpu CUDA: reflection_pad2d_cuda - func: reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: reflection_pad2d_backward_out_cpu CUDA: reflection_pad2d_backward_out_cuda - func: reflection_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: reflection_pad2d_backward_cpu CUDA: reflection_pad2d_backward_cuda - func: replication_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: replication_pad1d_out_cpu CUDA: replication_pad1d_out_cuda - func: replication_pad1d(Tensor self, int[2] padding) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: replication_pad1d_cpu CUDA: replication_pad1d_cuda - func: replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: replication_pad1d_backward_out_cpu CUDA: replication_pad1d_backward_out_cuda - func: replication_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: replication_pad1d_backward_cpu CUDA: replication_pad1d_backward_cuda - func: replication_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: replication_pad2d_out_cpu CUDA: replication_pad2d_out_cuda - func: replication_pad2d(Tensor self, int[4] padding) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: replication_pad2d_cpu CUDA: replication_pad2d_cuda - func: replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: replication_pad2d_backward_out_cpu CUDA: replication_pad2d_backward_out_cuda - func: replication_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: replication_pad2d_backward_cpu CUDA: replication_pad2d_backward_cuda - func: replication_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: replication_pad3d_out_cpu CUDA: replication_pad3d_out_cuda - func: replication_pad3d(Tensor self, int[6] padding) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: replication_pad3d_cpu CUDA: replication_pad3d_cuda - func: replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: replication_pad3d_backward_out_cpu CUDA: replication_pad3d_backward_out_cuda - func: replication_pad3d_backward(Tensor grad_output, Tensor self, int[6] padding) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: replication_pad3d_backward_cpu CUDA: replication_pad3d_backward_cuda - func: upsample_linear1d.out(Tensor self, int[1] output_size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: upsample_linear1d_out_cpu CUDA: upsample_linear1d_out_cuda - func: upsample_linear1d(Tensor self, int[1] output_size, bool align_corners) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: upsample_linear1d_cpu CUDA: upsample_linear1d_cuda - func: upsample_linear1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: upsample_linear1d_backward_out_cpu CUDA: upsample_linear1d_backward_out_cuda - func: upsample_linear1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: upsample_linear1d_backward_cpu CUDA: upsample_linear1d_backward_cuda - func: upsample_bilinear2d.out(Tensor self, int[2] output_size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: upsample_bilinear2d_out_cpu CUDA: upsample_bilinear2d_out_cuda - func: upsample_bilinear2d(Tensor self, int[2] output_size, bool align_corners) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: upsample_bilinear2d_cpu CUDA: upsample_bilinear2d_cuda QuantizedCPU: quantized_upsample_bilinear2d_cpu - func: upsample_bilinear2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: upsample_bilinear2d_backward_out_cpu CUDA: upsample_bilinear2d_backward_out_cuda - func: upsample_bilinear2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: upsample_bilinear2d_backward_cpu CUDA: upsample_bilinear2d_backward_cuda - func: upsample_bicubic2d.out(Tensor self, int[2] output_size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: upsample_bicubic2d_out_cpu CUDA: upsample_bicubic2d_out_cuda - func: upsample_bicubic2d(Tensor self, int[2] output_size, bool align_corners) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: upsample_bicubic2d_cpu CUDA: upsample_bicubic2d_cuda - func: upsample_bicubic2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: upsample_bicubic2d_backward_out_cpu CUDA: upsample_bicubic2d_backward_out_cuda - func: upsample_bicubic2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: upsample_bicubic2d_backward_cpu CUDA: upsample_bicubic2d_backward_cuda - func: upsample_trilinear3d.out(Tensor self, int[3] output_size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: upsample_trilinear3d_out_cpu CUDA: upsample_trilinear3d_out_cuda - func: upsample_trilinear3d(Tensor self, int[3] output_size, bool align_corners) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: upsample_trilinear3d_cpu CUDA: upsample_trilinear3d_cuda - func: upsample_trilinear3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: upsample_trilinear3d_backward_out_cpu CUDA: upsample_trilinear3d_backward_out_cuda - func: upsample_trilinear3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: upsample_trilinear3d_backward_cpu CUDA: upsample_trilinear3d_backward_cuda - func: upsample_nearest1d.out(Tensor self, int[1] output_size, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: upsample_nearest1d_out_cpu CUDA: upsample_nearest1d_out_cuda - func: upsample_nearest1d(Tensor self, int[1] output_size) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: upsample_nearest1d_cpu CUDA: upsample_nearest1d_cuda - func: upsample_nearest1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: upsample_nearest1d_backward_out_cpu CUDA: upsample_nearest1d_backward_out_cuda - func: upsample_nearest1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: upsample_nearest1d_backward_cpu CUDA: upsample_nearest1d_backward_cuda - func: upsample_nearest2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: upsample_nearest2d_out_cpu CUDA: upsample_nearest2d_out_cuda - func: upsample_nearest2d(Tensor self, int[2] output_size) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: upsample_nearest2d_cpu CUDA: upsample_nearest2d_cuda QuantizedCPU: quantized_upsample_nearest2d_cpu - func: upsample_nearest2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: upsample_nearest2d_backward_out_cpu CUDA: upsample_nearest2d_backward_out_cuda - func: upsample_nearest2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: upsample_nearest2d_backward_cpu CUDA: upsample_nearest2d_backward_cuda - func: upsample_nearest3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: upsample_nearest3d_out_cpu CUDA: upsample_nearest3d_out_cuda - func: upsample_nearest3d(Tensor self, int[3] output_size) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: upsample_nearest3d_cpu CUDA: upsample_nearest3d_cuda - func: upsample_nearest3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: upsample_nearest3d_backward_out_cpu CUDA: upsample_nearest3d_backward_out_cuda - func: upsample_nearest3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: upsample_nearest3d_backward_cpu CUDA: upsample_nearest3d_backward_cuda - func: sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_sigmoid_backward_out CUDA: legacy::cuda::_thnn_sigmoid_backward_out - func: sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_sigmoid_backward CUDA: legacy::cuda::_thnn_sigmoid_backward - func: tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: legacy::cpu::_thnn_tanh_backward_out CUDA: legacy::cuda::_thnn_tanh_backward_out - func: tanh_backward(Tensor grad_output, Tensor output) -> Tensor use_c10_dispatcher: full python_module: nn dispatch: CPU: legacy::cpu::_thnn_tanh_backward CUDA: legacy::cuda::_thnn_tanh_backward # What's a thnn_conv_ versus a slow_conv_? # # Historically, we have inefficient implementations of convolutions # coming from the THNN/THCUNN library. These convolutions typically # operated by computing the Toeplitz matrix and then doing a matrix # multiply with the input; this is very memory inefficient! However, # occasionally, we really don't have anything better, so it's helpful # to have these fallbacks when there is no more optimized implementation # in cudnn or mkldnn, etc. Both thnn_ and slow_ convolutions fall # into this bucket. # # The difference between these two designations, is that thnn_ refers # to a convolution that is still written in the "legacy" style; that is, # C code in the THNN/ or THCUNN/ directory. A slow_ convolution is # one that is written in the native style: modern C++. Algorithmically, # these are the same thing, but we give them different prefixes to # make the operational distinction clear. - func: slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: slow_conv_transpose2d_out_cpu CUDA: slow_conv_transpose2d_out_cuda - func: slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1) -> Tensor python_module: nn dispatch: CPU: slow_conv_transpose2d_cpu CUDA: slow_conv_transpose2d_cuda - func: slow_conv_transpose2d_backward.grad_output(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] output_padding, int[2] dilation, Tensor columns, Tensor ones, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) python_module: nn dispatch: CPU: slow_conv_transpose2d_backward_out_cpu CUDA: slow_conv_transpose2d_backward_out_cuda - func: slow_conv_transpose2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] output_padding, int[2] dilation, Tensor columns, Tensor ones, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: slow_conv_transpose2d_backward_cpu CUDA: slow_conv_transpose2d_backward_cuda - func: slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: slow_conv_transpose3d_out_cpu CUDA: slow_conv_transpose3d_out_cuda - func: slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1) -> Tensor python_module: nn dispatch: CPU: slow_conv_transpose3d_cpu CUDA: slow_conv_transpose3d_cuda - func: slow_conv_transpose3d_backward.grad_output(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] output_padding, int[3] dilation, Tensor finput, Tensor fgrad_input, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) python_module: nn dispatch: CPU: slow_conv_transpose3d_backward_out_cpu CUDA: slow_conv_transpose3d_backward_out_cuda - func: slow_conv_transpose3d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] output_padding, int[3] dilation, Tensor finput, Tensor fgrad_input, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: slow_conv_transpose3d_backward_cpu CUDA: slow_conv_transpose3d_backward_cuda - func: thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) python_module: nn - func: thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor python_module: nn - func: thnn_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output, Tensor(b!) finput, Tensor(c!) fgrad_input) -> (Tensor(a!), Tensor(b!), Tensor(c!)) python_module: nn dispatch: CPU: legacy::cpu::_thnn_conv2d_forward_out CUDA: legacy::cuda::_thnn_conv2d_forward_out - func: thnn_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> (Tensor output, Tensor finput, Tensor fgrad_input) python_module: nn dispatch: CPU: legacy::cpu::_thnn_conv2d_forward CUDA: legacy::cuda::_thnn_conv2d_forward - func: thnn_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, Tensor finput, Tensor fgrad_input, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) python_module: nn dispatch: CPU: legacy::cpu::_thnn_conv2d_backward_out CUDA: legacy::cuda::_thnn_conv2d_backward_out - func: thnn_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, Tensor finput, Tensor fgrad_input, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: legacy::cpu::_thnn_conv2d_backward CUDA: legacy::cuda::_thnn_conv2d_backward - func: thnn_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) python_module: nn - func: thnn_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor python_module: nn - func: thnn_conv_depthwise2d_forward.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CUDA: legacy::cuda::_thnn_conv_depthwise2d_forward_out - func: thnn_conv_depthwise2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation) -> Tensor python_module: nn dispatch: CUDA: legacy::cuda::_thnn_conv_depthwise2d_forward - func: thnn_conv_depthwise2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight) -> (Tensor(a!), Tensor(b!)) python_module: nn dispatch: CUDA: legacy::cuda::_thnn_conv_depthwise2d_backward_out - func: thnn_conv_depthwise2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool[2] output_mask) -> (Tensor grad_input, Tensor grad_weight) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CUDA: legacy::cuda::_thnn_conv_depthwise2d_backward - func: thnn_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) python_module: nn - func: thnn_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0) -> Tensor python_module: nn - func: thnn_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, *, Tensor(a!) output, Tensor(b!) finput, Tensor(c!) fgrad_input) -> (Tensor(a!), Tensor(b!), Tensor(c!)) python_module: nn dispatch: CPU: legacy::cpu::_thnn_conv3d_forward_out - func: thnn_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding) -> (Tensor output, Tensor finput, Tensor fgrad_input) python_module: nn dispatch: CPU: legacy::cpu::_thnn_conv3d_forward - func: thnn_conv3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, Tensor finput, Tensor fgrad_input, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) python_module: nn dispatch: CPU: legacy::cpu::_thnn_conv3d_backward_out - func: thnn_conv3d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, Tensor finput, Tensor fgrad_input, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: legacy::cpu::_thnn_conv3d_backward - func: slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor python_module: nn dispatch: CPU: slow_conv_dilated2d_cpu CUDA: slow_conv_dilated2d_cuda - func: slow_conv_dilated2d_backward(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: slow_conv_dilated2d_backward_cpu CUDA: slow_conv_dilated2d_backward_cuda - func: slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1) -> Tensor python_module: nn dispatch: CPU: slow_conv_dilated3d_cpu CUDA: slow_conv_dilated3d_cuda - func: slow_conv_dilated3d_backward(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: slow_conv_dilated3d_backward_cpu CUDA: slow_conv_dilated3d_backward_cuda - func: col2im.out(Tensor self, int[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: col2im_out_cpu CUDA: col2im_out_cuda - func: col2im(Tensor self, int[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: col2im_cpu CUDA: col2im_cuda - func: col2im_backward.grad_input(Tensor grad_output, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: col2im_backward_out_cpu CUDA: col2im_backward_out_cuda - func: col2im_backward(Tensor grad_output, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: col2im_backward_cpu CUDA: col2im_backward_cuda - func: im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) python_module: nn dispatch: CPU: im2col_out_cpu CUDA: im2col_out_cuda - func: im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: im2col_cpu CUDA: im2col_cuda - func: im2col_backward.grad_input(Tensor grad_output, int[2] input_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn dispatch: CPU: im2col_backward_out_cpu CUDA: im2col_backward_out_cuda - func: im2col_backward(Tensor grad_output, int[2] input_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor use_c10_dispatcher: unboxed_only python_module: nn dispatch: CPU: im2col_backward_cpu CUDA: im2col_backward_cuda