// generated by rake generate:functions // do not edit by hand #include #include #include "templates.hpp" void add_torch_functions(Module m) { m .define_singleton_method( "_abs", *[](const Tensor &self) { return torch::abs(self); }) .define_singleton_method( "_abs_", *[](Tensor &self) { return torch::abs_(self); }) .define_singleton_method( "_abs_out", *[](const Tensor &self, Tensor &out) { return torch::abs_out(out, self); }) .define_singleton_method( "_acos", *[](const Tensor &self) { return torch::acos(self); }) .define_singleton_method( "_acos_", *[](Tensor &self) { return torch::acos_(self); }) .define_singleton_method( "_acos_out", *[](const Tensor &self, Tensor &out) { return torch::acos_out(out, self); }) .define_singleton_method( "_adaptive_avg_pool1d", *[](const Tensor &self, IntArrayRef output_size) { return torch::adaptive_avg_pool1d(self, output_size); }) .define_singleton_method( "_adaptive_max_pool1d", *[](const Tensor &self, IntArrayRef output_size) { return torch::adaptive_max_pool1d(self, output_size); }) .define_singleton_method( "_add_out", *[](const Tensor &self, const Tensor &other, Scalar alpha, Tensor &out) { return torch::add_out(out, self, other, alpha); }) .define_singleton_method( "_add_scalar", *[](const Tensor &self, Scalar other, Scalar alpha) { return torch::add(self, other, alpha); }) .define_singleton_method( "_add_tensor", *[](const Tensor &self, const Tensor &other, Scalar alpha) { return torch::add(self, other, alpha); }) .define_singleton_method( "_addbmm", *[](const Tensor &self, const Tensor &batch1, const Tensor &batch2, Scalar beta, Scalar alpha) { return torch::addbmm(self, batch1, batch2, beta, alpha); }) .define_singleton_method( "_addbmm_out", *[](const Tensor &self, const Tensor &batch1, const Tensor &batch2, Scalar beta, Scalar alpha, Tensor &out) { return torch::addbmm_out(out, self, batch1, batch2, beta, alpha); }) .define_singleton_method( "_addcdiv", *[](const Tensor &self, const Tensor &tensor1, const Tensor &tensor2, Scalar value) { return torch::addcdiv(self, tensor1, tensor2, value); }) .define_singleton_method( "_addcdiv_out", *[](const Tensor &self, const Tensor &tensor1, const Tensor &tensor2, Scalar value, Tensor &out) { return torch::addcdiv_out(out, self, tensor1, tensor2, value); }) .define_singleton_method( "_addcmul", *[](const Tensor &self, const Tensor &tensor1, const Tensor &tensor2, Scalar value) { return torch::addcmul(self, tensor1, tensor2, value); }) .define_singleton_method( "_addcmul_out", *[](const Tensor &self, const Tensor &tensor1, const Tensor &tensor2, Scalar value, Tensor &out) { return torch::addcmul_out(out, self, tensor1, tensor2, value); }) .define_singleton_method( "_addmm", *[](const Tensor &self, const Tensor &mat1, const Tensor &mat2, Scalar beta, Scalar alpha) { return torch::addmm(self, mat1, mat2, beta, alpha); }) .define_singleton_method( "_addmm_out", *[](const Tensor &self, const Tensor &mat1, const Tensor &mat2, Scalar beta, Scalar alpha, Tensor &out) { return torch::addmm_out(out, self, mat1, mat2, beta, alpha); }) .define_singleton_method( "_addmv", *[](const Tensor &self, const Tensor &mat, const Tensor &vec, Scalar beta, Scalar alpha) { return torch::addmv(self, mat, vec, beta, alpha); }) .define_singleton_method( "_addmv_", *[](Tensor &self, const Tensor &mat, const Tensor &vec, Scalar beta, Scalar alpha) { return torch::addmv_(self, mat, vec, beta, alpha); }) .define_singleton_method( "_addmv_out", *[](const Tensor &self, const Tensor &mat, const Tensor &vec, Scalar beta, Scalar alpha, Tensor &out) { return torch::addmv_out(out, self, mat, vec, beta, alpha); }) .define_singleton_method( "_addr", *[](const Tensor &self, const Tensor &vec1, const Tensor &vec2, Scalar beta, Scalar alpha) { return torch::addr(self, vec1, vec2, beta, alpha); }) .define_singleton_method( "_addr_out", *[](const Tensor &self, const Tensor &vec1, const Tensor &vec2, Scalar beta, Scalar alpha, Tensor &out) { return torch::addr_out(out, self, vec1, vec2, beta, alpha); }) .define_singleton_method( "_affine_grid_generator", *[](const Tensor &theta, IntArrayRef size, bool align_corners) { return torch::affine_grid_generator(theta, size, align_corners); }) .define_singleton_method( "_alias", *[](Tensor &self) { return torch::alias(self); }) .define_singleton_method( "_align_tensors", *[](TensorList tensors) { return torch::align_tensors(tensors); }) .define_singleton_method( "_all", *[](const Tensor &self) { return torch::all(self); }) .define_singleton_method( "_all_dim", *[](const Tensor &self, int64_t dim, bool keepdim) { return torch::all(self, dim, keepdim); }) .define_singleton_method( "_all_out", *[](const Tensor &self, int64_t dim, bool keepdim, Tensor &out) { return torch::all_out(out, self, dim, keepdim); }) .define_singleton_method( "_allclose", *[](const Tensor &self, const Tensor &other, double rtol, double atol, bool equal_nan) { return torch::allclose(self, other, rtol, atol, equal_nan); }) .define_singleton_method( "_alpha_dropout", *[](const Tensor &input, double p, bool train) { return torch::alpha_dropout(input, p, train); }) .define_singleton_method( "_alpha_dropout_", *[](Tensor &self, double p, bool train) { return torch::alpha_dropout_(self, p, train); }) .define_singleton_method( "_any", *[](const Tensor &self) { return torch::any(self); }) .define_singleton_method( "_any_dim", *[](const Tensor &self, int64_t dim, bool keepdim) { return torch::any(self, dim, keepdim); }) .define_singleton_method( "_any_out", *[](const Tensor &self, int64_t dim, bool keepdim, Tensor &out) { return torch::any_out(out, self, dim, keepdim); }) .define_singleton_method( "_arange_out", *[](Scalar end, Tensor &out) { return torch::arange_out(out, end); }) .define_singleton_method( "_arange_start_out", *[](Scalar start, Scalar end, Scalar step, Tensor &out) { return torch::arange_out(out, start, end, step); }) .define_singleton_method( "_argmax", *[](const Tensor &self) { return torch::argmax(self); }) .define_singleton_method( "_argmax_dim", *[](const Tensor &self, int64_t dim, bool keepdim) { return torch::argmax(self, dim, keepdim); }) .define_singleton_method( "_argmin", *[](const Tensor &self) { return torch::argmin(self); }) .define_singleton_method( "_argmin_dim", *[](const Tensor &self, int64_t dim, bool keepdim) { return torch::argmin(self, dim, keepdim); }) .define_singleton_method( "_argsort", *[](const Tensor &self, int64_t dim, bool descending) { return torch::argsort(self, dim, descending); }) .define_singleton_method( "_as_strided", *[](Tensor &self, IntArrayRef size, IntArrayRef stride) { return torch::as_strided(self, size, stride); }) .define_singleton_method( "_as_strided_", *[](Tensor &self, IntArrayRef size, IntArrayRef stride) { return torch::as_strided_(self, size, stride); }) .define_singleton_method( "_as_strided__storage_offset", *[](Tensor &self, IntArrayRef size, IntArrayRef stride, int64_t storage_offset) { return torch::as_strided_(self, size, stride, storage_offset); }) .define_singleton_method( "_as_strided_storage_offset", *[](Tensor &self, IntArrayRef size, IntArrayRef stride, int64_t storage_offset) { return torch::as_strided(self, size, stride, storage_offset); }) .define_singleton_method( "_asin", *[](const Tensor &self) { return torch::asin(self); }) .define_singleton_method( "_asin_", *[](Tensor &self) { return torch::asin_(self); }) .define_singleton_method( "_asin_out", *[](const Tensor &self, Tensor &out) { return torch::asin_out(out, self); }) .define_singleton_method( "_atan", *[](const Tensor &self) { return torch::atan(self); }) .define_singleton_method( "_atan2", *[](const Tensor &self, const Tensor &other) { return torch::atan2(self, other); }) .define_singleton_method( "_atan2_out", *[](const Tensor &self, const Tensor &other, Tensor &out) { return torch::atan2_out(out, self, other); }) .define_singleton_method( "_atan_", *[](Tensor &self) { return torch::atan_(self); }) .define_singleton_method( "_atan_out", *[](const Tensor &self, Tensor &out) { return torch::atan_out(out, self); }) .define_singleton_method( "_avg_pool1d", *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { return torch::avg_pool1d(self, kernel_size, stride, padding, ceil_mode, count_include_pad); }) .define_singleton_method( "_baddbmm", *[](const Tensor &self, const Tensor &batch1, const Tensor &batch2, Scalar beta, Scalar alpha) { return torch::baddbmm(self, batch1, batch2, beta, alpha); }) .define_singleton_method( "_baddbmm_out", *[](const Tensor &self, const Tensor &batch1, const Tensor &batch2, Scalar beta, Scalar alpha, Tensor &out) { return torch::baddbmm_out(out, self, batch1, batch2, beta, alpha); }) .define_singleton_method( "_batch_norm", *[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, OptionalTensor running_mean, OptionalTensor running_var, bool training, double momentum, double eps, bool cudnn_enabled) { return torch::batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); }) .define_singleton_method( "_batch_norm_backward_elemt", *[](const Tensor &grad_out, const Tensor &input, const Tensor &mean, const Tensor &invstd, OptionalTensor weight, const Tensor &mean_dy, const Tensor &mean_dy_xmu) { return torch::batch_norm_backward_elemt(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu); }) .define_singleton_method( "_batch_norm_backward_reduce", *[](const Tensor &grad_out, const Tensor &input, const Tensor &mean, const Tensor &invstd, OptionalTensor weight, bool input_g, bool weight_g, bool bias_g) { return torch::batch_norm_backward_reduce(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g); }) .define_singleton_method( "_batch_norm_elemt", *[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, const Tensor &mean, const Tensor &invstd, double eps) { return torch::batch_norm_elemt(input, weight, bias, mean, invstd, eps); }) .define_singleton_method( "_batch_norm_gather_stats", *[](const Tensor &input, const Tensor &mean, const Tensor &invstd, OptionalTensor running_mean, OptionalTensor running_var, double momentum, double eps, int64_t count) { return torch::batch_norm_gather_stats(input, mean, invstd, running_mean, running_var, momentum, eps, count); }) .define_singleton_method( "_batch_norm_gather_stats_with_counts", *[](const Tensor &input, const Tensor &mean, const Tensor &invstd, OptionalTensor running_mean, OptionalTensor running_var, double momentum, double eps, IntArrayRef counts) { return torch::batch_norm_gather_stats_with_counts(input, mean, invstd, running_mean, running_var, momentum, eps, counts); }) .define_singleton_method( "_batch_norm_stats", *[](const Tensor &input, double eps) { return torch::batch_norm_stats(input, eps); }) .define_singleton_method( "_batch_norm_update_stats", *[](const Tensor &input, OptionalTensor running_mean, OptionalTensor running_var, double momentum) { return torch::batch_norm_update_stats(input, running_mean, running_var, momentum); }) .define_singleton_method( "_bernoulli", *[](const Tensor &self) { return torch::bernoulli(self); }) .define_singleton_method( "_bernoulli_out", *[](const Tensor &self, Tensor &out) { return torch::bernoulli_out(out, self); }) .define_singleton_method( "_bernoulli_p", *[](const Tensor &self, double p) { return torch::bernoulli(self, p); }) .define_singleton_method( "_bilinear", *[](const Tensor &input1, const Tensor &input2, const Tensor &weight, OptionalTensor bias) { return torch::bilinear(input1, input2, weight, bias); }) .define_singleton_method( "_binary_cross_entropy_with_logits", *[](const Tensor &self, const Tensor &target, OptionalTensor weight, OptionalTensor pos_weight, MyReduction reduction) { return torch::binary_cross_entropy_with_logits(self, target, weight, pos_weight, reduction); }) .define_singleton_method( "_bincount", *[](const Tensor &self, OptionalTensor weights, int64_t minlength) { return torch::bincount(self, weights, minlength); }) .define_singleton_method( "_bitwise_not", *[](const Tensor &self) { return torch::bitwise_not(self); }) .define_singleton_method( "_bitwise_not_out", *[](const Tensor &self, Tensor &out) { return torch::bitwise_not_out(out, self); }) .define_singleton_method( "_bmm", *[](const Tensor &self, const Tensor &mat2) { return torch::bmm(self, mat2); }) .define_singleton_method( "_bmm_out", *[](const Tensor &self, const Tensor &mat2, Tensor &out) { return torch::bmm_out(out, self, mat2); }) .define_singleton_method( "_broadcast_tensors", *[](TensorList tensors) { return torch::broadcast_tensors(tensors); }) .define_singleton_method( "_cartesian_prod", *[](TensorList tensors) { return torch::cartesian_prod(tensors); }) .define_singleton_method( "_cat", *[](TensorList tensors, int64_t dim) { return torch::cat(tensors, dim); }) .define_singleton_method( "_cat_out", *[](TensorList tensors, int64_t dim, Tensor &out) { return torch::cat_out(out, tensors, dim); }) .define_singleton_method( "_cdist", *[](const Tensor &x1, const Tensor &x2, double p) { return torch::cdist(x1, x2, p); }) .define_singleton_method( "_ceil", *[](const Tensor &self) { return torch::ceil(self); }) .define_singleton_method( "_ceil_", *[](Tensor &self) { return torch::ceil_(self); }) .define_singleton_method( "_ceil_out", *[](const Tensor &self, Tensor &out) { return torch::ceil_out(out, self); }) .define_singleton_method( "_celu", *[](const Tensor &self, Scalar alpha) { return torch::celu(self, alpha); }) .define_singleton_method( "_celu_", *[](Tensor &self, Scalar alpha) { return torch::celu_(self, alpha); }) .define_singleton_method( "_chain_matmul", *[](TensorList matrices) { return torch::chain_matmul(matrices); }) .define_singleton_method( "_cholesky", *[](const Tensor &self, bool upper) { return torch::cholesky(self, upper); }) .define_singleton_method( "_cholesky_inverse", *[](const Tensor &self, bool upper) { return torch::cholesky_inverse(self, upper); }) .define_singleton_method( "_cholesky_inverse_out", *[](const Tensor &self, bool upper, Tensor &out) { return torch::cholesky_inverse_out(out, self, upper); }) .define_singleton_method( "_cholesky_out", *[](const Tensor &self, bool upper, Tensor &out) { return torch::cholesky_out(out, self, upper); }) .define_singleton_method( "_cholesky_solve", *[](const Tensor &self, const Tensor &input2, bool upper) { return torch::cholesky_solve(self, input2, upper); }) .define_singleton_method( "_cholesky_solve_out", *[](const Tensor &self, const Tensor &input2, bool upper, Tensor &out) { return torch::cholesky_solve_out(out, self, input2, upper); }) .define_singleton_method( "_chunk", *[](Tensor &self, int64_t chunks, int64_t dim) { return torch::chunk(self, chunks, dim); }) .define_singleton_method( "_clamp_max", *[](const Tensor &self, Scalar max) { return torch::clamp_max(self, max); }) .define_singleton_method( "_clamp_max_", *[](Tensor &self, Scalar max) { return torch::clamp_max_(self, max); }) .define_singleton_method( "_clamp_max_out", *[](const Tensor &self, Scalar max, Tensor &out) { return torch::clamp_max_out(out, self, max); }) .define_singleton_method( "_clamp_min", *[](const Tensor &self, Scalar min) { return torch::clamp_min(self, min); }) .define_singleton_method( "_clamp_min_", *[](Tensor &self, Scalar min) { return torch::clamp_min_(self, min); }) .define_singleton_method( "_clamp_min_out", *[](const Tensor &self, Scalar min, Tensor &out) { return torch::clamp_min_out(out, self, min); }) .define_singleton_method( "_clone", *[](const Tensor &self) { return torch::clone(self); }) .define_singleton_method( "_combinations", *[](const Tensor &self, int64_t r, bool with_replacement) { return torch::combinations(self, r, with_replacement); }) .define_singleton_method( "_constant_pad_nd", *[](const Tensor &self, IntArrayRef pad, Scalar value) { return torch::constant_pad_nd(self, pad, value); }) .define_singleton_method( "_conv1d", *[](const Tensor &input, const Tensor &weight, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) { return torch::conv1d(input, weight, bias, stride, padding, dilation, groups); }) .define_singleton_method( "_conv2d", *[](const Tensor &input, const Tensor &weight, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) { return torch::conv2d(input, weight, bias, stride, padding, dilation, groups); }) .define_singleton_method( "_conv3d", *[](const Tensor &input, const Tensor &weight, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) { return torch::conv3d(input, weight, bias, stride, padding, dilation, groups); }) .define_singleton_method( "_conv_tbc", *[](const Tensor &self, const Tensor &weight, const Tensor &bias, int64_t pad) { return torch::conv_tbc(self, weight, bias, pad); }) .define_singleton_method( "_conv_transpose1d", *[](const Tensor &input, const Tensor &weight, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) { return torch::conv_transpose1d(input, weight, bias, stride, padding, output_padding, groups, dilation); }) .define_singleton_method( "_conv_transpose2d_input", *[](const Tensor &input, const Tensor &weight, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) { return torch::conv_transpose2d(input, weight, bias, stride, padding, output_padding, groups, dilation); }) .define_singleton_method( "_conv_transpose3d_input", *[](const Tensor &input, const Tensor &weight, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) { return torch::conv_transpose3d(input, weight, bias, stride, padding, output_padding, groups, dilation); }) .define_singleton_method( "_convolution", *[](const Tensor &input, const Tensor &weight, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups) { return torch::convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); }) .define_singleton_method( "_convolution_overrideable", *[](const Tensor &input, const Tensor &weight, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups) { return torch::convolution_overrideable(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); }) .define_singleton_method( "_copy_sparse_to_sparse_", *[](Tensor &self, const Tensor &src, bool non_blocking) { return torch::copy_sparse_to_sparse_(self, src, non_blocking); }) .define_singleton_method( "_cos", *[](const Tensor &self) { return torch::cos(self); }) .define_singleton_method( "_cos_", *[](Tensor &self) { return torch::cos_(self); }) .define_singleton_method( "_cos_out", *[](const Tensor &self, Tensor &out) { return torch::cos_out(out, self); }) .define_singleton_method( "_cosh", *[](const Tensor &self) { return torch::cosh(self); }) .define_singleton_method( "_cosh_", *[](Tensor &self) { return torch::cosh_(self); }) .define_singleton_method( "_cosh_out", *[](const Tensor &self, Tensor &out) { return torch::cosh_out(out, self); }) .define_singleton_method( "_cosine_embedding_loss", *[](const Tensor &input1, const Tensor &input2, const Tensor &target, double margin, MyReduction reduction) { return torch::cosine_embedding_loss(input1, input2, target, margin, reduction); }) .define_singleton_method( "_cosine_similarity", *[](const Tensor &x1, const Tensor &x2, int64_t dim, double eps) { return torch::cosine_similarity(x1, x2, dim, eps); }) .define_singleton_method( "_ctc_loss_intlist", *[](const Tensor &log_probs, const Tensor &targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, MyReduction reduction, bool zero_infinity) { return torch::ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); }) .define_singleton_method( "_ctc_loss_tensor", *[](const Tensor &log_probs, const Tensor &targets, const Tensor &input_lengths, const Tensor &target_lengths, int64_t blank, MyReduction reduction, bool zero_infinity) { return torch::ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); }) .define_singleton_method( "_cudnn_affine_grid_generator", *[](const Tensor &theta, int64_t N, int64_t C, int64_t H, int64_t W) { return torch::cudnn_affine_grid_generator(theta, N, C, H, W); }) .define_singleton_method( "_cudnn_batch_norm", *[](const Tensor &input, const Tensor &weight, OptionalTensor bias, OptionalTensor running_mean, OptionalTensor running_var, bool training, double exponential_average_factor, double epsilon) { return torch::cudnn_batch_norm(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); }) .define_singleton_method( "_cudnn_convolution", *[](const Tensor &self, const Tensor &weight, OptionalTensor bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { return torch::cudnn_convolution(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); }) .define_singleton_method( "_cudnn_convolution_backward_bias", *[](const Tensor &grad_output) { return torch::cudnn_convolution_backward_bias(grad_output); }) .define_singleton_method( "_cudnn_convolution_backward_input", *[](IntArrayRef self_size, const Tensor &grad_output, const Tensor &weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { return torch::cudnn_convolution_backward_input(self_size, grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic); }) .define_singleton_method( "_cudnn_convolution_backward_weight", *[](IntArrayRef weight_size, const Tensor &grad_output, const Tensor &self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { return torch::cudnn_convolution_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic); }) .define_singleton_method( "_cudnn_convolution_transpose", *[](const Tensor &self, const Tensor &weight, OptionalTensor bias, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { return torch::cudnn_convolution_transpose(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic); }) .define_singleton_method( "_cudnn_convolution_transpose_backward_bias", *[](const Tensor &grad_output) { return torch::cudnn_convolution_transpose_backward_bias(grad_output); }) .define_singleton_method( "_cudnn_convolution_transpose_backward_input", *[](const Tensor &grad_output, const Tensor &weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { return torch::cudnn_convolution_transpose_backward_input(grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic); }) .define_singleton_method( "_cudnn_convolution_transpose_backward_weight", *[](IntArrayRef weight_size, const Tensor &grad_output, const Tensor &self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { return torch::cudnn_convolution_transpose_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic); }) .define_singleton_method( "_cudnn_grid_sampler", *[](const Tensor &self, const Tensor &grid) { return torch::cudnn_grid_sampler(self, grid); }) .define_singleton_method( "_cudnn_is_acceptable", *[](const Tensor &self) { return torch::cudnn_is_acceptable(self); }) .define_singleton_method( "_dequantize", *[](const Tensor &self) { return torch::dequantize(self); }) .define_singleton_method( "_det", *[](const Tensor &self) { return torch::det(self); }) .define_singleton_method( "_detach", *[](const Tensor &self) { return torch::detach(self); }) .define_singleton_method( "_detach_", *[](Tensor &self) { return torch::detach_(self); }) .define_singleton_method( "_diag", *[](const Tensor &self, int64_t diagonal) { return torch::diag(self, diagonal); }) .define_singleton_method( "_diag_embed", *[](const Tensor &self, int64_t offset, int64_t dim1, int64_t dim2) { return torch::diag_embed(self, offset, dim1, dim2); }) .define_singleton_method( "_diag_out", *[](const Tensor &self, int64_t diagonal, Tensor &out) { return torch::diag_out(out, self, diagonal); }) .define_singleton_method( "_diagflat", *[](const Tensor &self, int64_t offset) { return torch::diagflat(self, offset); }) .define_singleton_method( "_diagonal", *[](Tensor &self, int64_t offset, int64_t dim1, int64_t dim2) { return torch::diagonal(self, offset, dim1, dim2); }) .define_singleton_method( "_digamma", *[](const Tensor &self) { return torch::digamma(self); }) .define_singleton_method( "_digamma_out", *[](const Tensor &self, Tensor &out) { return torch::digamma_out(out, self); }) .define_singleton_method( "_dist", *[](const Tensor &self, const Tensor &other, Scalar p) { return torch::dist(self, other, p); }) .define_singleton_method( "_div_out", *[](const Tensor &self, const Tensor &other, Tensor &out) { return torch::div_out(out, self, other); }) .define_singleton_method( "_div_scalar", *[](const Tensor &self, Scalar other) { return torch::div(self, other); }) .define_singleton_method( "_div_tensor", *[](const Tensor &self, const Tensor &other) { return torch::div(self, other); }) .define_singleton_method( "_dot", *[](const Tensor &self, const Tensor &tensor) { return torch::dot(self, tensor); }) .define_singleton_method( "_dot_out", *[](const Tensor &self, const Tensor &tensor, Tensor &out) { return torch::dot_out(out, self, tensor); }) .define_singleton_method( "_dropout", *[](const Tensor &input, double p, bool train) { return torch::dropout(input, p, train); }) .define_singleton_method( "_dropout_", *[](Tensor &self, double p, bool train) { return torch::dropout_(self, p, train); }) .define_singleton_method( "_eig", *[](const Tensor &self, bool eigenvectors) { return torch::eig(self, eigenvectors); }) .define_singleton_method( "_eig_e", *[](const Tensor &self, bool eigenvectors, Tensor &e, Tensor &v) { return torch::eig_out(e, v, self, eigenvectors); }) .define_singleton_method( "_embedding", *[](const Tensor &weight, const Tensor &indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) { return torch::embedding(weight, indices, padding_idx, scale_grad_by_freq, sparse); }) .define_singleton_method( "_embedding_bag", *[](const Tensor &weight, const Tensor &indices, const Tensor &offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, OptionalTensor per_sample_weights) { return torch::embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights); }) .define_singleton_method( "_embedding_renorm_", *[](Tensor &self, const Tensor &indices, double max_norm, double norm_type) { return torch::embedding_renorm_(self, indices, max_norm, norm_type); }) .define_singleton_method( "_empty_like", *[](const Tensor &self) { return torch::empty_like(self); }) .define_singleton_method( "_eq_scalar", *[](const Tensor &self, Scalar other) { return torch::eq(self, other); }) .define_singleton_method( "_eq_scalar_out", *[](const Tensor &self, Scalar other, Tensor &out) { return torch::eq_out(out, self, other); }) .define_singleton_method( "_eq_tensor", *[](const Tensor &self, const Tensor &other) { return torch::eq(self, other); }) .define_singleton_method( "_eq_tensor_out", *[](const Tensor &self, const Tensor &other, Tensor &out) { return torch::eq_out(out, self, other); }) .define_singleton_method( "_equal", *[](const Tensor &self, const Tensor &other) { return torch::equal(self, other); }) .define_singleton_method( "_erf", *[](const Tensor &self) { return torch::erf(self); }) .define_singleton_method( "_erf_", *[](Tensor &self) { return torch::erf_(self); }) .define_singleton_method( "_erf_out", *[](const Tensor &self, Tensor &out) { return torch::erf_out(out, self); }) .define_singleton_method( "_erfc", *[](const Tensor &self) { return torch::erfc(self); }) .define_singleton_method( "_erfc_", *[](Tensor &self) { return torch::erfc_(self); }) .define_singleton_method( "_erfc_out", *[](const Tensor &self, Tensor &out) { return torch::erfc_out(out, self); }) .define_singleton_method( "_erfinv", *[](const Tensor &self) { return torch::erfinv(self); }) .define_singleton_method( "_erfinv_out", *[](const Tensor &self, Tensor &out) { return torch::erfinv_out(out, self); }) .define_singleton_method( "_exp", *[](const Tensor &self) { return torch::exp(self); }) .define_singleton_method( "_exp_", *[](Tensor &self) { return torch::exp_(self); }) .define_singleton_method( "_exp_out", *[](const Tensor &self, Tensor &out) { return torch::exp_out(out, self); }) .define_singleton_method( "_expm1", *[](const Tensor &self) { return torch::expm1(self); }) .define_singleton_method( "_expm1_", *[](Tensor &self) { return torch::expm1_(self); }) .define_singleton_method( "_expm1_out", *[](const Tensor &self, Tensor &out) { return torch::expm1_out(out, self); }) .define_singleton_method( "_eye_m_out", *[](int64_t n, int64_t m, Tensor &out) { return torch::eye_out(out, n, m); }) .define_singleton_method( "_eye_out", *[](int64_t n, Tensor &out) { return torch::eye_out(out, n); }) .define_singleton_method( "_fake_quantize_per_channel_affine", *[](const Tensor &self, const Tensor &scale, const Tensor &zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { return torch::fake_quantize_per_channel_affine(self, scale, zero_point, axis, quant_min, quant_max); }) .define_singleton_method( "_fake_quantize_per_tensor_affine", *[](const Tensor &self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { return torch::fake_quantize_per_tensor_affine(self, scale, zero_point, quant_min, quant_max); }) .define_singleton_method( "_fbgemm_linear_fp16_weight", *[](const Tensor &input, const Tensor &packed_weight, const Tensor &bias) { return torch::fbgemm_linear_fp16_weight(input, packed_weight, bias); }) .define_singleton_method( "_fbgemm_linear_fp16_weight_fp32_activation", *[](const Tensor &input, const Tensor &packed_weight, const Tensor &bias) { return torch::fbgemm_linear_fp16_weight_fp32_activation(input, packed_weight, bias); }) .define_singleton_method( "_fbgemm_linear_int8_weight", *[](const Tensor &input, const Tensor &weight, const Tensor &packed, const Tensor &col_offsets, Scalar weight_scale, Scalar weight_zero_point, const Tensor &bias) { return torch::fbgemm_linear_int8_weight(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); }) .define_singleton_method( "_fbgemm_linear_int8_weight_fp32_activation", *[](const Tensor &input, const Tensor &weight, const Tensor &packed, const Tensor &col_offsets, Scalar weight_scale, Scalar weight_zero_point, const Tensor &bias) { return torch::fbgemm_linear_int8_weight_fp32_activation(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); }) .define_singleton_method( "_fbgemm_linear_quantize_weight", *[](const Tensor &input) { return torch::fbgemm_linear_quantize_weight(input); }) .define_singleton_method( "_fbgemm_pack_gemm_matrix_fp16", *[](const Tensor &input) { return torch::fbgemm_pack_gemm_matrix_fp16(input); }) .define_singleton_method( "_fbgemm_pack_quantized_matrix", *[](const Tensor &input) { return torch::fbgemm_pack_quantized_matrix(input); }) .define_singleton_method( "_fbgemm_pack_quantized_matrix_kn", *[](const Tensor &input, int64_t K, int64_t N) { return torch::fbgemm_pack_quantized_matrix(input, K, N); }) .define_singleton_method( "_feature_alpha_dropout", *[](const Tensor &input, double p, bool train) { return torch::feature_alpha_dropout(input, p, train); }) .define_singleton_method( "_feature_alpha_dropout_", *[](Tensor &self, double p, bool train) { return torch::feature_alpha_dropout_(self, p, train); }) .define_singleton_method( "_feature_dropout", *[](const Tensor &input, double p, bool train) { return torch::feature_dropout(input, p, train); }) .define_singleton_method( "_feature_dropout_", *[](Tensor &self, double p, bool train) { return torch::feature_dropout_(self, p, train); }) .define_singleton_method( "_fft", *[](const Tensor &self, int64_t signal_ndim, bool normalized) { return torch::fft(self, signal_ndim, normalized); }) .define_singleton_method( "_fill__scalar", *[](Tensor &self, Scalar value) { return torch::fill_(self, value); }) .define_singleton_method( "_fill__tensor", *[](Tensor &self, const Tensor &value) { return torch::fill_(self, value); }) .define_singleton_method( "_flatten_using_ints", *[](const Tensor &self, int64_t start_dim, int64_t end_dim) { return torch::flatten(self, start_dim, end_dim); }) .define_singleton_method( "_flip", *[](const Tensor &self, IntArrayRef dims) { return torch::flip(self, dims); }) .define_singleton_method( "_floor", *[](const Tensor &self) { return torch::floor(self); }) .define_singleton_method( "_floor_", *[](Tensor &self) { return torch::floor_(self); }) .define_singleton_method( "_floor_out", *[](const Tensor &self, Tensor &out) { return torch::floor_out(out, self); }) .define_singleton_method( "_fmod_scalar", *[](const Tensor &self, Scalar other) { return torch::fmod(self, other); }) .define_singleton_method( "_fmod_scalar_out", *[](const Tensor &self, Scalar other, Tensor &out) { return torch::fmod_out(out, self, other); }) .define_singleton_method( "_fmod_tensor", *[](const Tensor &self, const Tensor &other) { return torch::fmod(self, other); }) .define_singleton_method( "_fmod_tensor_out", *[](const Tensor &self, const Tensor &other, Tensor &out) { return torch::fmod_out(out, self, other); }) .define_singleton_method( "_frac", *[](const Tensor &self) { return torch::frac(self); }) .define_singleton_method( "_frac_", *[](Tensor &self) { return torch::frac_(self); }) .define_singleton_method( "_frac_out", *[](const Tensor &self, Tensor &out) { return torch::frac_out(out, self); }) .define_singleton_method( "_frobenius_norm", *[](const Tensor &self) { return torch::frobenius_norm(self); }) .define_singleton_method( "_frobenius_norm_dim", *[](const Tensor &self, IntArrayRef dim, bool keepdim) { return torch::frobenius_norm(self, dim, keepdim); }) .define_singleton_method( "_frobenius_norm_out", *[](const Tensor &self, IntArrayRef dim, bool keepdim, Tensor &out) { return torch::frobenius_norm_out(out, self, dim, keepdim); }) .define_singleton_method( "_full_like", *[](const Tensor &self, Scalar fill_value) { return torch::full_like(self, fill_value); }) .define_singleton_method( "_full_out", *[](IntArrayRef size, Scalar fill_value, Tensor &out) { return torch::full_out(out, size, fill_value); }) .define_singleton_method( "_gather", *[](const Tensor &self, int64_t dim, const Tensor &index, bool sparse_grad) { return torch::gather(self, dim, index, sparse_grad); }) .define_singleton_method( "_gather_out", *[](const Tensor &self, int64_t dim, const Tensor &index, bool sparse_grad, Tensor &out) { return torch::gather_out(out, self, dim, index, sparse_grad); }) .define_singleton_method( "_ge_scalar", *[](const Tensor &self, Scalar other) { return torch::ge(self, other); }) .define_singleton_method( "_ge_scalar_out", *[](const Tensor &self, Scalar other, Tensor &out) { return torch::ge_out(out, self, other); }) .define_singleton_method( "_ge_tensor", *[](const Tensor &self, const Tensor &other) { return torch::ge(self, other); }) .define_singleton_method( "_ge_tensor_out", *[](const Tensor &self, const Tensor &other, Tensor &out) { return torch::ge_out(out, self, other); }) .define_singleton_method( "_geqrf", *[](const Tensor &self) { return torch::geqrf(self); }) .define_singleton_method( "_geqrf_a", *[](const Tensor &self, Tensor &a, Tensor &tau) { return torch::geqrf_out(a, tau, self); }) .define_singleton_method( "_ger", *[](const Tensor &self, const Tensor &vec2) { return torch::ger(self, vec2); }) .define_singleton_method( "_ger_out", *[](const Tensor &self, const Tensor &vec2, Tensor &out) { return torch::ger_out(out, self, vec2); }) .define_singleton_method( "_grid_sampler", *[](const Tensor &input, const Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { return torch::grid_sampler(input, grid, interpolation_mode, padding_mode, align_corners); }) .define_singleton_method( "_grid_sampler_2d", *[](const Tensor &input, const Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { return torch::grid_sampler_2d(input, grid, interpolation_mode, padding_mode, align_corners); }) .define_singleton_method( "_grid_sampler_3d", *[](const Tensor &input, const Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { return torch::grid_sampler_3d(input, grid, interpolation_mode, padding_mode, align_corners); }) .define_singleton_method( "_group_norm", *[](const Tensor &input, int64_t num_groups, OptionalTensor weight, OptionalTensor bias, double eps, bool cudnn_enabled) { return torch::group_norm(input, num_groups, weight, bias, eps, cudnn_enabled); }) .define_singleton_method( "_gru_cell", *[](const Tensor &input, const Tensor &hx, const Tensor &w_ih, const Tensor &w_hh, OptionalTensor b_ih, OptionalTensor b_hh) { return torch::gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh); }) .define_singleton_method( "_gru_data", *[](const Tensor &data, const Tensor &batch_sizes, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { return torch::gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); }) .define_singleton_method( "_gru_input", *[](const Tensor &input, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { return torch::gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); }) .define_singleton_method( "_gt_scalar", *[](const Tensor &self, Scalar other) { return torch::gt(self, other); }) .define_singleton_method( "_gt_scalar_out", *[](const Tensor &self, Scalar other, Tensor &out) { return torch::gt_out(out, self, other); }) .define_singleton_method( "_gt_tensor", *[](const Tensor &self, const Tensor &other) { return torch::gt(self, other); }) .define_singleton_method( "_gt_tensor_out", *[](const Tensor &self, const Tensor &other, Tensor &out) { return torch::gt_out(out, self, other); }) .define_singleton_method( "_hardshrink", *[](const Tensor &self, Scalar lambd) { return torch::hardshrink(self, lambd); }) .define_singleton_method( "_hinge_embedding_loss", *[](const Tensor &self, const Tensor &target, double margin, MyReduction reduction) { return torch::hinge_embedding_loss(self, target, margin, reduction); }) .define_singleton_method( "_histc", *[](const Tensor &self, int64_t bins, Scalar min, Scalar max) { return torch::histc(self, bins, min, max); }) .define_singleton_method( "_histc_out", *[](const Tensor &self, int64_t bins, Scalar min, Scalar max, Tensor &out) { return torch::histc_out(out, self, bins, min, max); }) .define_singleton_method( "_hspmm", *[](const Tensor &mat1, const Tensor &mat2) { return torch::hspmm(mat1, mat2); }) .define_singleton_method( "_hspmm_out", *[](const Tensor &mat1, const Tensor &mat2, Tensor &out) { return torch::hspmm_out(out, mat1, mat2); }) .define_singleton_method( "_ifft", *[](const Tensor &self, int64_t signal_ndim, bool normalized) { return torch::ifft(self, signal_ndim, normalized); }) .define_singleton_method( "_index_add", *[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &source) { return torch::index_add(self, dim, index, source); }) .define_singleton_method( "_index_copy", *[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &source) { return torch::index_copy(self, dim, index, source); }) .define_singleton_method( "_index_fill_scalar", *[](const Tensor &self, int64_t dim, const Tensor &index, Scalar value) { return torch::index_fill(self, dim, index, value); }) .define_singleton_method( "_index_fill_tensor", *[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &value) { return torch::index_fill(self, dim, index, value); }) .define_singleton_method( "_index_select", *[](const Tensor &self, int64_t dim, const Tensor &index) { return torch::index_select(self, dim, index); }) .define_singleton_method( "_index_select_out", *[](const Tensor &self, int64_t dim, const Tensor &index, Tensor &out) { return torch::index_select_out(out, self, dim, index); }) .define_singleton_method( "_instance_norm", *[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, OptionalTensor running_mean, OptionalTensor running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) { return torch::instance_norm(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled); }) .define_singleton_method( "_int_repr", *[](const Tensor &self) { return torch::int_repr(self); }) .define_singleton_method( "_inverse", *[](const Tensor &self) { return torch::inverse(self); }) .define_singleton_method( "_inverse_out", *[](const Tensor &self, Tensor &out) { return torch::inverse_out(out, self); }) .define_singleton_method( "_irfft", *[](const Tensor &self, int64_t signal_ndim, bool normalized, bool onesided, IntArrayRef signal_sizes) { return torch::irfft(self, signal_ndim, normalized, onesided, signal_sizes); }) .define_singleton_method( "_is_complex", *[](const Tensor &self) { return torch::is_complex(self); }) .define_singleton_method( "_is_distributed", *[](const Tensor &self) { return torch::is_distributed(self); }) .define_singleton_method( "_is_floating_point", *[](const Tensor &self) { return torch::is_floating_point(self); }) .define_singleton_method( "_is_nonzero", *[](const Tensor &self) { return torch::is_nonzero(self); }) .define_singleton_method( "_is_same_size", *[](const Tensor &self, const Tensor &other) { return torch::is_same_size(self, other); }) .define_singleton_method( "_is_signed", *[](const Tensor &self) { return torch::is_signed(self); }) .define_singleton_method( "_isclose", *[](const Tensor &self, const Tensor &other, double rtol, double atol, bool equal_nan) { return torch::isclose(self, other, rtol, atol, equal_nan); }) .define_singleton_method( "_isnan", *[](const Tensor &self) { return torch::isnan(self); }) .define_singleton_method( "_kl_div", *[](const Tensor &self, const Tensor &target, MyReduction reduction) { return torch::kl_div(self, target, reduction); }) .define_singleton_method( "_kthvalue", *[](const Tensor &self, int64_t k, int64_t dim, bool keepdim) { return torch::kthvalue(self, k, dim, keepdim); }) .define_singleton_method( "_kthvalue_values", *[](const Tensor &self, int64_t k, int64_t dim, bool keepdim, Tensor &values, Tensor &indices) { return torch::kthvalue_out(values, indices, self, k, dim, keepdim); }) .define_singleton_method( "_layer_norm", *[](const Tensor &input, IntArrayRef normalized_shape, OptionalTensor weight, OptionalTensor bias, double eps, bool cudnn_enable) { return torch::layer_norm(input, normalized_shape, weight, bias, eps, cudnn_enable); }) .define_singleton_method( "_le_scalar", *[](const Tensor &self, Scalar other) { return torch::le(self, other); }) .define_singleton_method( "_le_scalar_out", *[](const Tensor &self, Scalar other, Tensor &out) { return torch::le_out(out, self, other); }) .define_singleton_method( "_le_tensor", *[](const Tensor &self, const Tensor &other) { return torch::le(self, other); }) .define_singleton_method( "_le_tensor_out", *[](const Tensor &self, const Tensor &other, Tensor &out) { return torch::le_out(out, self, other); }) .define_singleton_method( "_lerp_scalar", *[](const Tensor &self, const Tensor &end, Scalar weight) { return torch::lerp(self, end, weight); }) .define_singleton_method( "_lerp_scalar_out", *[](const Tensor &self, const Tensor &end, Scalar weight, Tensor &out) { return torch::lerp_out(out, self, end, weight); }) .define_singleton_method( "_lerp_tensor", *[](const Tensor &self, const Tensor &end, const Tensor &weight) { return torch::lerp(self, end, weight); }) .define_singleton_method( "_lerp_tensor_out", *[](const Tensor &self, const Tensor &end, const Tensor &weight, Tensor &out) { return torch::lerp_out(out, self, end, weight); }) .define_singleton_method( "_lgamma", *[](const Tensor &self) { return torch::lgamma(self); }) .define_singleton_method( "_lgamma_out", *[](const Tensor &self, Tensor &out) { return torch::lgamma_out(out, self); }) .define_singleton_method( "_linspace_out", *[](Scalar start, Scalar end, int64_t steps, Tensor &out) { return torch::linspace_out(out, start, end, steps); }) .define_singleton_method( "_log", *[](const Tensor &self) { return torch::log(self); }) .define_singleton_method( "_log10", *[](const Tensor &self) { return torch::log10(self); }) .define_singleton_method( "_log10_", *[](Tensor &self) { return torch::log10_(self); }) .define_singleton_method( "_log10_out", *[](const Tensor &self, Tensor &out) { return torch::log10_out(out, self); }) .define_singleton_method( "_log1p", *[](const Tensor &self) { return torch::log1p(self); }) .define_singleton_method( "_log1p_", *[](Tensor &self) { return torch::log1p_(self); }) .define_singleton_method( "_log1p_out", *[](const Tensor &self, Tensor &out) { return torch::log1p_out(out, self); }) .define_singleton_method( "_log2", *[](const Tensor &self) { return torch::log2(self); }) .define_singleton_method( "_log2_", *[](Tensor &self) { return torch::log2_(self); }) .define_singleton_method( "_log2_out", *[](const Tensor &self, Tensor &out) { return torch::log2_out(out, self); }) .define_singleton_method( "_log_", *[](Tensor &self) { return torch::log_(self); }) .define_singleton_method( "_log_out", *[](const Tensor &self, Tensor &out) { return torch::log_out(out, self); }) .define_singleton_method( "_logdet", *[](const Tensor &self) { return torch::logdet(self); }) .define_singleton_method( "_logical_not", *[](const Tensor &self) { return torch::logical_not(self); }) .define_singleton_method( "_logical_not_out", *[](const Tensor &self, Tensor &out) { return torch::logical_not_out(out, self); }) .define_singleton_method( "_logical_xor", *[](const Tensor &self, const Tensor &other) { return torch::logical_xor(self, other); }) .define_singleton_method( "_logical_xor_out", *[](const Tensor &self, const Tensor &other, Tensor &out) { return torch::logical_xor_out(out, self, other); }) .define_singleton_method( "_logspace_out", *[](Scalar start, Scalar end, int64_t steps, double base, Tensor &out) { return torch::logspace_out(out, start, end, steps, base); }) .define_singleton_method( "_logsumexp", *[](const Tensor &self, IntArrayRef dim, bool keepdim) { return torch::logsumexp(self, dim, keepdim); }) .define_singleton_method( "_logsumexp_out", *[](const Tensor &self, IntArrayRef dim, bool keepdim, Tensor &out) { return torch::logsumexp_out(out, self, dim, keepdim); }) .define_singleton_method( "_lstm_cell", *[](const Tensor &input, TensorList hx, const Tensor &w_ih, const Tensor &w_hh, OptionalTensor b_ih, OptionalTensor b_hh) { return torch::lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh); }) .define_singleton_method( "_lstm_data", *[](const Tensor &data, const Tensor &batch_sizes, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { return torch::lstm(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); }) .define_singleton_method( "_lstm_input", *[](const Tensor &input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { return torch::lstm(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); }) .define_singleton_method( "_lstsq", *[](const Tensor &self, const Tensor &A) { return torch::lstsq(self, A); }) .define_singleton_method( "_lstsq_x", *[](const Tensor &self, const Tensor &A, Tensor &X, Tensor &qr) { return torch::lstsq_out(X, qr, self, A); }) .define_singleton_method( "_lt_scalar", *[](const Tensor &self, Scalar other) { return torch::lt(self, other); }) .define_singleton_method( "_lt_scalar_out", *[](const Tensor &self, Scalar other, Tensor &out) { return torch::lt_out(out, self, other); }) .define_singleton_method( "_lt_tensor", *[](const Tensor &self, const Tensor &other) { return torch::lt(self, other); }) .define_singleton_method( "_lt_tensor_out", *[](const Tensor &self, const Tensor &other, Tensor &out) { return torch::lt_out(out, self, other); }) .define_singleton_method( "_lu_solve", *[](const Tensor &self, const Tensor &LU_data, const Tensor &LU_pivots) { return torch::lu_solve(self, LU_data, LU_pivots); }) .define_singleton_method( "_lu_solve_out", *[](const Tensor &self, const Tensor &LU_data, const Tensor &LU_pivots, Tensor &out) { return torch::lu_solve_out(out, self, LU_data, LU_pivots); }) .define_singleton_method( "_margin_ranking_loss", *[](const Tensor &input1, const Tensor &input2, const Tensor &target, double margin, MyReduction reduction) { return torch::margin_ranking_loss(input1, input2, target, margin, reduction); }) .define_singleton_method( "_masked_fill_scalar", *[](const Tensor &self, const Tensor &mask, Scalar value) { return torch::masked_fill(self, mask, value); }) .define_singleton_method( "_masked_fill_tensor", *[](const Tensor &self, const Tensor &mask, const Tensor &value) { return torch::masked_fill(self, mask, value); }) .define_singleton_method( "_masked_scatter", *[](const Tensor &self, const Tensor &mask, const Tensor &source) { return torch::masked_scatter(self, mask, source); }) .define_singleton_method( "_masked_select", *[](const Tensor &self, const Tensor &mask) { return torch::masked_select(self, mask); }) .define_singleton_method( "_masked_select_out", *[](const Tensor &self, const Tensor &mask, Tensor &out) { return torch::masked_select_out(out, self, mask); }) .define_singleton_method( "_matmul", *[](const Tensor &self, const Tensor &other) { return torch::matmul(self, other); }) .define_singleton_method( "_matmul_out", *[](const Tensor &self, const Tensor &other, Tensor &out) { return torch::matmul_out(out, self, other); }) .define_singleton_method( "_matrix_power", *[](const Tensor &self, int64_t n) { return torch::matrix_power(self, n); }) .define_singleton_method( "_matrix_rank", *[](const Tensor &self, bool symmetric) { return torch::matrix_rank(self, symmetric); }) .define_singleton_method( "_matrix_rank_tol", *[](const Tensor &self, double tol, bool symmetric) { return torch::matrix_rank(self, tol, symmetric); }) .define_singleton_method( "_max", *[](const Tensor &self) { return torch::max(self); }) .define_singleton_method( "_max_dim", *[](const Tensor &self, int64_t dim, bool keepdim) { return torch::max(self, dim, keepdim); }) .define_singleton_method( "_max_dim_max", *[](const Tensor &self, int64_t dim, bool keepdim, Tensor &max, Tensor &max_values) { return torch::max_out(max, max_values, self, dim, keepdim); }) .define_singleton_method( "_max_other", *[](const Tensor &self, const Tensor &other) { return torch::max(self, other); }) .define_singleton_method( "_max_out", *[](const Tensor &self, const Tensor &other, Tensor &out) { return torch::max_out(out, self, other); }) .define_singleton_method( "_max_pool1d", *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { return torch::max_pool1d(self, kernel_size, stride, padding, dilation, ceil_mode); }) .define_singleton_method( "_max_pool1d_with_indices", *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { return torch::max_pool1d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode); }) .define_singleton_method( "_max_pool2d", *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { return torch::max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode); }) .define_singleton_method( "_max_pool3d", *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { return torch::max_pool3d(self, kernel_size, stride, padding, dilation, ceil_mode); }) .define_singleton_method( "_max_values", *[](const Tensor &self, IntArrayRef dim, bool keepdim) { return torch::max_values(self, dim, keepdim); }) .define_singleton_method( "_median", *[](const Tensor &self) { return torch::median(self); }) .define_singleton_method( "_median_dim", *[](const Tensor &self, int64_t dim, bool keepdim) { return torch::median(self, dim, keepdim); }) .define_singleton_method( "_median_dim_values", *[](const Tensor &self, int64_t dim, bool keepdim, Tensor &values, Tensor &indices) { return torch::median_out(values, indices, self, dim, keepdim); }) .define_singleton_method( "_meshgrid", *[](TensorList tensors) { return torch::meshgrid(tensors); }) .define_singleton_method( "_min", *[](const Tensor &self) { return torch::min(self); }) .define_singleton_method( "_min_dim", *[](const Tensor &self, int64_t dim, bool keepdim) { return torch::min(self, dim, keepdim); }) .define_singleton_method( "_min_dim_min", *[](const Tensor &self, int64_t dim, bool keepdim, Tensor &min, Tensor &min_indices) { return torch::min_out(min, min_indices, self, dim, keepdim); }) .define_singleton_method( "_min_other", *[](const Tensor &self, const Tensor &other) { return torch::min(self, other); }) .define_singleton_method( "_min_out", *[](const Tensor &self, const Tensor &other, Tensor &out) { return torch::min_out(out, self, other); }) .define_singleton_method( "_min_values", *[](const Tensor &self, IntArrayRef dim, bool keepdim) { return torch::min_values(self, dim, keepdim); }) .define_singleton_method( "_miopen_batch_norm", *[](const Tensor &input, const Tensor &weight, OptionalTensor bias, OptionalTensor running_mean, OptionalTensor running_var, bool training, double exponential_average_factor, double epsilon) { return torch::miopen_batch_norm(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); }) .define_singleton_method( "_miopen_convolution", *[](const Tensor &self, const Tensor &weight, OptionalTensor bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { return torch::miopen_convolution(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); }) .define_singleton_method( "_miopen_convolution_backward_bias", *[](const Tensor &grad_output) { return torch::miopen_convolution_backward_bias(grad_output); }) .define_singleton_method( "_miopen_convolution_backward_input", *[](IntArrayRef self_size, const Tensor &grad_output, const Tensor &weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { return torch::miopen_convolution_backward_input(self_size, grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic); }) .define_singleton_method( "_miopen_convolution_backward_weight", *[](IntArrayRef weight_size, const Tensor &grad_output, const Tensor &self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { return torch::miopen_convolution_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic); }) .define_singleton_method( "_miopen_convolution_transpose", *[](const Tensor &self, const Tensor &weight, OptionalTensor bias, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { return torch::miopen_convolution_transpose(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic); }) .define_singleton_method( "_miopen_convolution_transpose_backward_input", *[](const Tensor &grad_output, const Tensor &weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { return torch::miopen_convolution_transpose_backward_input(grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic); }) .define_singleton_method( "_miopen_convolution_transpose_backward_weight", *[](IntArrayRef weight_size, const Tensor &grad_output, const Tensor &self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { return torch::miopen_convolution_transpose_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic); }) .define_singleton_method( "_miopen_depthwise_convolution", *[](const Tensor &self, const Tensor &weight, OptionalTensor bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { return torch::miopen_depthwise_convolution(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); }) .define_singleton_method( "_miopen_depthwise_convolution_backward_input", *[](IntArrayRef self_size, const Tensor &grad_output, const Tensor &weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { return torch::miopen_depthwise_convolution_backward_input(self_size, grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic); }) .define_singleton_method( "_miopen_depthwise_convolution_backward_weight", *[](IntArrayRef weight_size, const Tensor &grad_output, const Tensor &self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { return torch::miopen_depthwise_convolution_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic); }) .define_singleton_method( "_miopen_rnn", *[](const Tensor &input, TensorList weight, int64_t weight_stride0, const Tensor &hx, OptionalTensor cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, OptionalTensor dropout_state) { return torch::miopen_rnn(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state); }) .define_singleton_method( "_mkldnn_adaptive_avg_pool2d", *[](const Tensor &self, IntArrayRef output_size) { return torch::mkldnn_adaptive_avg_pool2d(self, output_size); }) .define_singleton_method( "_mkldnn_convolution", *[](const Tensor &self, const Tensor &weight, OptionalTensor bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups) { return torch::mkldnn_convolution(self, weight, bias, padding, stride, dilation, groups); }) .define_singleton_method( "_mkldnn_convolution_backward_input", *[](IntArrayRef self_size, const Tensor &grad_output, const Tensor &weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined) { return torch::mkldnn_convolution_backward_input(self_size, grad_output, weight, padding, stride, dilation, groups, bias_defined); }) .define_singleton_method( "_mkldnn_convolution_backward_weights", *[](IntArrayRef weight_size, const Tensor &grad_output, const Tensor &self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined) { return torch::mkldnn_convolution_backward_weights(weight_size, grad_output, self, padding, stride, dilation, groups, bias_defined); }) .define_singleton_method( "_mkldnn_max_pool2d", *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { return torch::mkldnn_max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode); }) .define_singleton_method( "_mm", *[](const Tensor &self, const Tensor &mat2) { return torch::mm(self, mat2); }) .define_singleton_method( "_mm_out", *[](const Tensor &self, const Tensor &mat2, Tensor &out) { return torch::mm_out(out, self, mat2); }) .define_singleton_method( "_mode", *[](const Tensor &self, int64_t dim, bool keepdim) { return torch::mode(self, dim, keepdim); }) .define_singleton_method( "_mode_values", *[](const Tensor &self, int64_t dim, bool keepdim, Tensor &values, Tensor &indices) { return torch::mode_out(values, indices, self, dim, keepdim); }) .define_singleton_method( "_mul_out", *[](const Tensor &self, const Tensor &other, Tensor &out) { return torch::mul_out(out, self, other); }) .define_singleton_method( "_mul_scalar", *[](const Tensor &self, Scalar other) { return torch::mul(self, other); }) .define_singleton_method( "_mul_tensor", *[](const Tensor &self, const Tensor &other) { return torch::mul(self, other); }) .define_singleton_method( "_multinomial", *[](const Tensor &self, int64_t num_samples, bool replacement) { return torch::multinomial(self, num_samples, replacement); }) .define_singleton_method( "_multinomial_out", *[](const Tensor &self, int64_t num_samples, bool replacement, Tensor &out) { return torch::multinomial_out(out, self, num_samples, replacement); }) .define_singleton_method( "_mv", *[](const Tensor &self, const Tensor &vec) { return torch::mv(self, vec); }) .define_singleton_method( "_mv_out", *[](const Tensor &self, const Tensor &vec, Tensor &out) { return torch::mv_out(out, self, vec); }) .define_singleton_method( "_mvlgamma", *[](const Tensor &self, int64_t p) { return torch::mvlgamma(self, p); }) .define_singleton_method( "_narrow", *[](Tensor &self, int64_t dim, int64_t start, int64_t length) { return torch::narrow(self, dim, start, length); }) .define_singleton_method( "_native_batch_norm", *[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, OptionalTensor running_mean, OptionalTensor running_var, bool training, double momentum, double eps) { return torch::native_batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps); }) .define_singleton_method( "_native_layer_norm", *[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, int64_t M, int64_t N, double eps) { return torch::native_layer_norm(input, weight, bias, M, N, eps); }) .define_singleton_method( "_native_norm", *[](const Tensor &self, Scalar p) { return torch::native_norm(self, p); }) .define_singleton_method( "_ne_scalar", *[](const Tensor &self, Scalar other) { return torch::ne(self, other); }) .define_singleton_method( "_ne_scalar_out", *[](const Tensor &self, Scalar other, Tensor &out) { return torch::ne_out(out, self, other); }) .define_singleton_method( "_ne_tensor", *[](const Tensor &self, const Tensor &other) { return torch::ne(self, other); }) .define_singleton_method( "_ne_tensor_out", *[](const Tensor &self, const Tensor &other, Tensor &out) { return torch::ne_out(out, self, other); }) .define_singleton_method( "_neg", *[](const Tensor &self) { return torch::neg(self); }) .define_singleton_method( "_neg_", *[](Tensor &self) { return torch::neg_(self); }) .define_singleton_method( "_neg_out", *[](const Tensor &self, Tensor &out) { return torch::neg_out(out, self); }) .define_singleton_method( "_nonzero", *[](const Tensor &self) { return torch::nonzero(self); }) .define_singleton_method( "_nonzero_numpy", *[](const Tensor &self) { return torch::nonzero_numpy(self); }) .define_singleton_method( "_nonzero_out", *[](const Tensor &self, Tensor &out) { return torch::nonzero_out(out, self); }) .define_singleton_method( "_norm_except_dim", *[](const Tensor &v, int64_t pow, int64_t dim) { return torch::norm_except_dim(v, pow, dim); }) .define_singleton_method( "_norm_scalar", *[](const Tensor &self, Scalar p) { return torch::norm(self, p); }) .define_singleton_method( "_nuclear_norm", *[](const Tensor &self, bool keepdim) { return torch::nuclear_norm(self, keepdim); }) .define_singleton_method( "_nuclear_norm_dim", *[](const Tensor &self, IntArrayRef dim, bool keepdim) { return torch::nuclear_norm(self, dim, keepdim); }) .define_singleton_method( "_nuclear_norm_dim_out", *[](const Tensor &self, IntArrayRef dim, bool keepdim, Tensor &out) { return torch::nuclear_norm_out(out, self, dim, keepdim); }) .define_singleton_method( "_nuclear_norm_out", *[](const Tensor &self, bool keepdim, Tensor &out) { return torch::nuclear_norm_out(out, self, keepdim); }) .define_singleton_method( "_numel", *[](const Tensor &self) { return torch::numel(self); }) .define_singleton_method( "_ones_like", *[](const Tensor &self) { return torch::ones_like(self); }) .define_singleton_method( "_ones_out", *[](IntArrayRef size, Tensor &out) { return torch::ones_out(out, size); }) .define_singleton_method( "_orgqr", *[](const Tensor &self, const Tensor &input2) { return torch::orgqr(self, input2); }) .define_singleton_method( "_orgqr_out", *[](const Tensor &self, const Tensor &input2, Tensor &out) { return torch::orgqr_out(out, self, input2); }) .define_singleton_method( "_ormqr", *[](const Tensor &self, const Tensor &input2, const Tensor &input3, bool left, bool transpose) { return torch::ormqr(self, input2, input3, left, transpose); }) .define_singleton_method( "_ormqr_out", *[](const Tensor &self, const Tensor &input2, const Tensor &input3, bool left, bool transpose, Tensor &out) { return torch::ormqr_out(out, self, input2, input3, left, transpose); }) .define_singleton_method( "_pairwise_distance", *[](const Tensor &x1, const Tensor &x2, double p, double eps, bool keepdim) { return torch::pairwise_distance(x1, x2, p, eps, keepdim); }) .define_singleton_method( "_pdist", *[](const Tensor &self, double p) { return torch::pdist(self, p); }) .define_singleton_method( "_pinverse", *[](const Tensor &self, double rcond) { return torch::pinverse(self, rcond); }) .define_singleton_method( "_pixel_shuffle", *[](const Tensor &self, int64_t upscale_factor) { return torch::pixel_shuffle(self, upscale_factor); }) .define_singleton_method( "_poisson", *[](const Tensor &self) { return torch::poisson(self); }) .define_singleton_method( "_poisson_nll_loss", *[](const Tensor &input, const Tensor &target, bool log_input, bool full, double eps, MyReduction reduction) { return torch::poisson_nll_loss(input, target, log_input, full, eps, reduction); }) .define_singleton_method( "_polygamma", *[](int64_t n, const Tensor &self) { return torch::polygamma(n, self); }) .define_singleton_method( "_polygamma_out", *[](int64_t n, const Tensor &self, Tensor &out) { return torch::polygamma_out(out, n, self); }) .define_singleton_method( "_pow_scalar", *[](Scalar self, const Tensor &exponent) { return torch::pow(self, exponent); }) .define_singleton_method( "_pow_scalar_out", *[](Scalar self, const Tensor &exponent, Tensor &out) { return torch::pow_out(out, self, exponent); }) .define_singleton_method( "_pow_tensor_scalar", *[](const Tensor &self, Scalar exponent) { return torch::pow(self, exponent); }) .define_singleton_method( "_pow_tensor_scalar_out", *[](const Tensor &self, Scalar exponent, Tensor &out) { return torch::pow_out(out, self, exponent); }) .define_singleton_method( "_pow_tensor_tensor", *[](const Tensor &self, const Tensor &exponent) { return torch::pow(self, exponent); }) .define_singleton_method( "_pow_tensor_tensor_out", *[](const Tensor &self, const Tensor &exponent, Tensor &out) { return torch::pow_out(out, self, exponent); }) .define_singleton_method( "_prelu", *[](const Tensor &self, const Tensor &weight) { return torch::prelu(self, weight); }) .define_singleton_method( "_q_per_channel_axis", *[](const Tensor &self) { return torch::q_per_channel_axis(self); }) .define_singleton_method( "_q_per_channel_scales", *[](const Tensor &self) { return torch::q_per_channel_scales(self); }) .define_singleton_method( "_q_per_channel_zero_points", *[](const Tensor &self) { return torch::q_per_channel_zero_points(self); }) .define_singleton_method( "_q_scale", *[](const Tensor &self) { return torch::q_scale(self); }) .define_singleton_method( "_q_zero_point", *[](const Tensor &self) { return torch::q_zero_point(self); }) .define_singleton_method( "_qr", *[](const Tensor &self, bool some) { return torch::qr(self, some); }) .define_singleton_method( "_qr_q", *[](const Tensor &self, bool some, Tensor &Q, Tensor &R) { return torch::qr_out(Q, R, self, some); }) .define_singleton_method( "_quantized_gru_cell", *[](const Tensor &input, const Tensor &hx, const Tensor &w_ih, const Tensor &w_hh, const Tensor &b_ih, const Tensor &b_hh, const Tensor &packed_ih, const Tensor &packed_hh, const Tensor &col_offsets_ih, const Tensor &col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) { return torch::quantized_gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); }) .define_singleton_method( "_quantized_gru_data", *[](const Tensor &data, const Tensor &batch_sizes, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { return torch::quantized_gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); }) .define_singleton_method( "_quantized_gru_input", *[](const Tensor &input, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { return torch::quantized_gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); }) .define_singleton_method( "_quantized_lstm_cell", *[](const Tensor &input, TensorList hx, const Tensor &w_ih, const Tensor &w_hh, const Tensor &b_ih, const Tensor &b_hh, const Tensor &packed_ih, const Tensor &packed_hh, const Tensor &col_offsets_ih, const Tensor &col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) { return torch::quantized_lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); }) .define_singleton_method( "_quantized_max_pool2d", *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { return torch::quantized_max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode); }) .define_singleton_method( "_quantized_rnn_relu_cell", *[](const Tensor &input, const Tensor &hx, const Tensor &w_ih, const Tensor &w_hh, const Tensor &b_ih, const Tensor &b_hh, const Tensor &packed_ih, const Tensor &packed_hh, const Tensor &col_offsets_ih, const Tensor &col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) { return torch::quantized_rnn_relu_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); }) .define_singleton_method( "_quantized_rnn_tanh_cell", *[](const Tensor &input, const Tensor &hx, const Tensor &w_ih, const Tensor &w_hh, const Tensor &b_ih, const Tensor &b_hh, const Tensor &packed_ih, const Tensor &packed_hh, const Tensor &col_offsets_ih, const Tensor &col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) { return torch::quantized_rnn_tanh_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); }) .define_singleton_method( "_rand_generator_out", *[](IntArrayRef size, Tensor &out) { return torch::rand_out(out, size); }) .define_singleton_method( "_rand_like", *[](const Tensor &self) { return torch::rand_like(self); }) .define_singleton_method( "_rand_out", *[](IntArrayRef size, Tensor &out) { return torch::rand_out(out, size); }) .define_singleton_method( "_randint_generator_out", *[](int64_t high, IntArrayRef size, Tensor &out) { return torch::randint_out(out, high, size); }) .define_singleton_method( "_randint_like", *[](const Tensor &self, int64_t high) { return torch::randint_like(self, high); }) .define_singleton_method( "_randint_like_low", *[](const Tensor &self, int64_t low, int64_t high) { return torch::randint_like(self, low, high); }) .define_singleton_method( "_randint_low_generator_out", *[](int64_t low, int64_t high, IntArrayRef size, Tensor &out) { return torch::randint_out(out, low, high, size); }) .define_singleton_method( "_randint_low_out", *[](int64_t low, int64_t high, IntArrayRef size, Tensor &out) { return torch::randint_out(out, low, high, size); }) .define_singleton_method( "_randint_out", *[](int64_t high, IntArrayRef size, Tensor &out) { return torch::randint_out(out, high, size); }) .define_singleton_method( "_randn_generator_out", *[](IntArrayRef size, Tensor &out) { return torch::randn_out(out, size); }) .define_singleton_method( "_randn_like", *[](const Tensor &self) { return torch::randn_like(self); }) .define_singleton_method( "_randn_out", *[](IntArrayRef size, Tensor &out) { return torch::randn_out(out, size); }) .define_singleton_method( "_randperm_generator_out", *[](int64_t n, Tensor &out) { return torch::randperm_out(out, n); }) .define_singleton_method( "_randperm_out", *[](int64_t n, Tensor &out) { return torch::randperm_out(out, n); }) .define_singleton_method( "_range_out", *[](Scalar start, Scalar end, Scalar step, Tensor &out) { return torch::range_out(out, start, end, step); }) .define_singleton_method( "_reciprocal", *[](const Tensor &self) { return torch::reciprocal(self); }) .define_singleton_method( "_reciprocal_", *[](Tensor &self) { return torch::reciprocal_(self); }) .define_singleton_method( "_reciprocal_out", *[](const Tensor &self, Tensor &out) { return torch::reciprocal_out(out, self); }) .define_singleton_method( "_relu", *[](const Tensor &self) { return torch::relu(self); }) .define_singleton_method( "_relu_", *[](Tensor &self) { return torch::relu_(self); }) .define_singleton_method( "_remainder_scalar", *[](const Tensor &self, Scalar other) { return torch::remainder(self, other); }) .define_singleton_method( "_remainder_scalar_out", *[](const Tensor &self, Scalar other, Tensor &out) { return torch::remainder_out(out, self, other); }) .define_singleton_method( "_remainder_tensor", *[](const Tensor &self, const Tensor &other) { return torch::remainder(self, other); }) .define_singleton_method( "_remainder_tensor_out", *[](const Tensor &self, const Tensor &other, Tensor &out) { return torch::remainder_out(out, self, other); }) .define_singleton_method( "_renorm", *[](const Tensor &self, Scalar p, int64_t dim, Scalar maxnorm) { return torch::renorm(self, p, dim, maxnorm); }) .define_singleton_method( "_renorm_out", *[](const Tensor &self, Scalar p, int64_t dim, Scalar maxnorm, Tensor &out) { return torch::renorm_out(out, self, p, dim, maxnorm); }) .define_singleton_method( "_repeat_interleave_self_int", *[](const Tensor &self, int64_t repeats) { return torch::repeat_interleave(self, repeats); }) .define_singleton_method( "_repeat_interleave_self_int_dim", *[](const Tensor &self, int64_t repeats, int64_t dim) { return torch::repeat_interleave(self, repeats, dim); }) .define_singleton_method( "_repeat_interleave_self_tensor", *[](const Tensor &self, const Tensor &repeats) { return torch::repeat_interleave(self, repeats); }) .define_singleton_method( "_repeat_interleave_self_tensor_dim", *[](const Tensor &self, const Tensor &repeats, int64_t dim) { return torch::repeat_interleave(self, repeats, dim); }) .define_singleton_method( "_repeat_interleave_tensor", *[](const Tensor &repeats) { return torch::repeat_interleave(repeats); }) .define_singleton_method( "_reshape", *[](const Tensor &self, IntArrayRef shape) { return torch::reshape(self, shape); }) .define_singleton_method( "_resize_as_", *[](Tensor &self, const Tensor &the_template) { return torch::resize_as_(self, the_template); }) .define_singleton_method( "_result_type_scalar", *[](const Tensor &tensor, Scalar other) { return torch::result_type(tensor, other); }) .define_singleton_method( "_result_type_scalar_scalar", *[](Scalar scalar1, Scalar scalar2) { return torch::result_type(scalar1, scalar2); }) .define_singleton_method( "_result_type_scalar_tensor", *[](Scalar scalar, const Tensor &tensor) { return torch::result_type(scalar, tensor); }) .define_singleton_method( "_result_type_tensor", *[](const Tensor &tensor, const Tensor &other) { return torch::result_type(tensor, other); }) .define_singleton_method( "_rfft", *[](const Tensor &self, int64_t signal_ndim, bool normalized, bool onesided) { return torch::rfft(self, signal_ndim, normalized, onesided); }) .define_singleton_method( "_rnn_relu_cell", *[](const Tensor &input, const Tensor &hx, const Tensor &w_ih, const Tensor &w_hh, OptionalTensor b_ih, OptionalTensor b_hh) { return torch::rnn_relu_cell(input, hx, w_ih, w_hh, b_ih, b_hh); }) .define_singleton_method( "_rnn_relu_data", *[](const Tensor &data, const Tensor &batch_sizes, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { return torch::rnn_relu(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); }) .define_singleton_method( "_rnn_relu_input", *[](const Tensor &input, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { return torch::rnn_relu(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); }) .define_singleton_method( "_rnn_tanh_cell", *[](const Tensor &input, const Tensor &hx, const Tensor &w_ih, const Tensor &w_hh, OptionalTensor b_ih, OptionalTensor b_hh) { return torch::rnn_tanh_cell(input, hx, w_ih, w_hh, b_ih, b_hh); }) .define_singleton_method( "_rnn_tanh_data", *[](const Tensor &data, const Tensor &batch_sizes, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { return torch::rnn_tanh(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); }) .define_singleton_method( "_rnn_tanh_input", *[](const Tensor &input, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { return torch::rnn_tanh(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); }) .define_singleton_method( "_roll", *[](const Tensor &self, IntArrayRef shifts, IntArrayRef dims) { return torch::roll(self, shifts, dims); }) .define_singleton_method( "_rot90", *[](const Tensor &self, int64_t k, IntArrayRef dims) { return torch::rot90(self, k, dims); }) .define_singleton_method( "_round", *[](const Tensor &self) { return torch::round(self); }) .define_singleton_method( "_round_", *[](Tensor &self) { return torch::round_(self); }) .define_singleton_method( "_round_out", *[](const Tensor &self, Tensor &out) { return torch::round_out(out, self); }) .define_singleton_method( "_rrelu", *[](const Tensor &self, Scalar lower, Scalar upper, bool training) { return torch::rrelu(self, lower, upper, training); }) .define_singleton_method( "_rrelu_", *[](Tensor &self, Scalar lower, Scalar upper, bool training) { return torch::rrelu_(self, lower, upper, training); }) .define_singleton_method( "_rsqrt", *[](const Tensor &self) { return torch::rsqrt(self); }) .define_singleton_method( "_rsqrt_", *[](Tensor &self) { return torch::rsqrt_(self); }) .define_singleton_method( "_rsqrt_out", *[](const Tensor &self, Tensor &out) { return torch::rsqrt_out(out, self); }) .define_singleton_method( "_rsub_scalar", *[](const Tensor &self, Scalar other, Scalar alpha) { return torch::rsub(self, other, alpha); }) .define_singleton_method( "_rsub_tensor", *[](const Tensor &self, const Tensor &other, Scalar alpha) { return torch::rsub(self, other, alpha); }) .define_singleton_method( "_scatter_add", *[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &src) { return torch::scatter_add(self, dim, index, src); }) .define_singleton_method( "_scatter_src", *[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &src) { return torch::scatter(self, dim, index, src); }) .define_singleton_method( "_scatter_value", *[](const Tensor &self, int64_t dim, const Tensor &index, Scalar value) { return torch::scatter(self, dim, index, value); }) .define_singleton_method( "_select_int", *[](Tensor &self, int64_t dim, int64_t index) { return torch::select(self, dim, index); }) .define_singleton_method( "_selu", *[](const Tensor &self) { return torch::selu(self); }) .define_singleton_method( "_selu_", *[](Tensor &self) { return torch::selu_(self); }) .define_singleton_method( "_sigmoid", *[](const Tensor &self) { return torch::sigmoid(self); }) .define_singleton_method( "_sigmoid_", *[](Tensor &self) { return torch::sigmoid_(self); }) .define_singleton_method( "_sigmoid_out", *[](const Tensor &self, Tensor &out) { return torch::sigmoid_out(out, self); }) .define_singleton_method( "_sign", *[](const Tensor &self) { return torch::sign(self); }) .define_singleton_method( "_sign_out", *[](const Tensor &self, Tensor &out) { return torch::sign_out(out, self); }) .define_singleton_method( "_sin", *[](const Tensor &self) { return torch::sin(self); }) .define_singleton_method( "_sin_", *[](Tensor &self) { return torch::sin_(self); }) .define_singleton_method( "_sin_out", *[](const Tensor &self, Tensor &out) { return torch::sin_out(out, self); }) .define_singleton_method( "_sinh", *[](const Tensor &self) { return torch::sinh(self); }) .define_singleton_method( "_sinh_", *[](Tensor &self) { return torch::sinh_(self); }) .define_singleton_method( "_sinh_out", *[](const Tensor &self, Tensor &out) { return torch::sinh_out(out, self); }) .define_singleton_method( "_size_int", *[](const Tensor &self, int64_t dim) { return torch::size(self, dim); }) .define_singleton_method( "_slice_tensor", *[](Tensor &self, int64_t dim, int64_t start, int64_t end, int64_t step) { return torch::slice(self, dim, start, end, step); }) .define_singleton_method( "_slogdet", *[](const Tensor &self) { return torch::slogdet(self); }) .define_singleton_method( "_smm", *[](const Tensor &self, const Tensor &mat2) { return torch::smm(self, mat2); }) .define_singleton_method( "_solve", *[](const Tensor &self, const Tensor &A) { return torch::solve(self, A); }) .define_singleton_method( "_solve_solution", *[](const Tensor &self, const Tensor &A, Tensor &solution, Tensor &lu) { return torch::solve_out(solution, lu, self, A); }) .define_singleton_method( "_sort", *[](const Tensor &self, int64_t dim, bool descending) { return torch::sort(self, dim, descending); }) .define_singleton_method( "_sort_values", *[](const Tensor &self, int64_t dim, bool descending, Tensor &values, Tensor &indices) { return torch::sort_out(values, indices, self, dim, descending); }) .define_singleton_method( "_split_tensor", *[](Tensor &self, int64_t split_size, int64_t dim) { return torch::split(self, split_size, dim); }) .define_singleton_method( "_split_with_sizes", *[](const Tensor &self, IntArrayRef split_sizes, int64_t dim) { return torch::split_with_sizes(self, split_sizes, dim); }) .define_singleton_method( "_sqrt", *[](const Tensor &self) { return torch::sqrt(self); }) .define_singleton_method( "_sqrt_", *[](Tensor &self) { return torch::sqrt_(self); }) .define_singleton_method( "_sqrt_out", *[](const Tensor &self, Tensor &out) { return torch::sqrt_out(out, self); }) .define_singleton_method( "_squeeze", *[](Tensor &self) { return torch::squeeze(self); }) .define_singleton_method( "_squeeze_dim", *[](Tensor &self, int64_t dim) { return torch::squeeze(self, dim); }) .define_singleton_method( "_sspaddmm", *[](const Tensor &self, const Tensor &mat1, const Tensor &mat2, Scalar beta, Scalar alpha) { return torch::sspaddmm(self, mat1, mat2, beta, alpha); }) .define_singleton_method( "_sspaddmm_out", *[](const Tensor &self, const Tensor &mat1, const Tensor &mat2, Scalar beta, Scalar alpha, Tensor &out) { return torch::sspaddmm_out(out, self, mat1, mat2, beta, alpha); }) .define_singleton_method( "_stack", *[](TensorList tensors, int64_t dim) { return torch::stack(tensors, dim); }) .define_singleton_method( "_stack_out", *[](TensorList tensors, int64_t dim, Tensor &out) { return torch::stack_out(out, tensors, dim); }) .define_singleton_method( "_std", *[](const Tensor &self, bool unbiased) { return torch::std(self, unbiased); }) .define_singleton_method( "_std_dim", *[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) { return torch::std(self, dim, unbiased, keepdim); }) .define_singleton_method( "_std_mean", *[](const Tensor &self, bool unbiased) { return torch::std_mean(self, unbiased); }) .define_singleton_method( "_std_mean_dim", *[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) { return torch::std_mean(self, dim, unbiased, keepdim); }) .define_singleton_method( "_std_out", *[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim, Tensor &out) { return torch::std_out(out, self, dim, unbiased, keepdim); }) .define_singleton_method( "_stride_int", *[](const Tensor &self, int64_t dim) { return torch::stride(self, dim); }) .define_singleton_method( "_sub_out", *[](const Tensor &self, const Tensor &other, Scalar alpha, Tensor &out) { return torch::sub_out(out, self, other, alpha); }) .define_singleton_method( "_sub_scalar", *[](const Tensor &self, Scalar other, Scalar alpha) { return torch::sub(self, other, alpha); }) .define_singleton_method( "_sub_tensor", *[](const Tensor &self, const Tensor &other, Scalar alpha) { return torch::sub(self, other, alpha); }) .define_singleton_method( "_svd", *[](const Tensor &self, bool some, bool compute_uv) { return torch::svd(self, some, compute_uv); }) .define_singleton_method( "_svd_u", *[](const Tensor &self, bool some, bool compute_uv, Tensor &U, Tensor &S, Tensor &V) { return torch::svd_out(U, S, V, self, some, compute_uv); }) .define_singleton_method( "_symeig", *[](const Tensor &self, bool eigenvectors, bool upper) { return torch::symeig(self, eigenvectors, upper); }) .define_singleton_method( "_symeig_e", *[](const Tensor &self, bool eigenvectors, bool upper, Tensor &e, Tensor &V) { return torch::symeig_out(e, V, self, eigenvectors, upper); }) .define_singleton_method( "_t", *[](Tensor &self) { return torch::t(self); }) .define_singleton_method( "_take", *[](const Tensor &self, const Tensor &index) { return torch::take(self, index); }) .define_singleton_method( "_take_out", *[](const Tensor &self, const Tensor &index, Tensor &out) { return torch::take_out(out, self, index); }) .define_singleton_method( "_tan", *[](const Tensor &self) { return torch::tan(self); }) .define_singleton_method( "_tan_", *[](Tensor &self) { return torch::tan_(self); }) .define_singleton_method( "_tan_out", *[](const Tensor &self, Tensor &out) { return torch::tan_out(out, self); }) .define_singleton_method( "_tanh", *[](const Tensor &self) { return torch::tanh(self); }) .define_singleton_method( "_tanh_", *[](Tensor &self) { return torch::tanh_(self); }) .define_singleton_method( "_tanh_out", *[](const Tensor &self, Tensor &out) { return torch::tanh_out(out, self); }) .define_singleton_method( "_tensordot", *[](const Tensor &self, const Tensor &other, IntArrayRef dims_self, IntArrayRef dims_other) { return torch::tensordot(self, other, dims_self, dims_other); }) .define_singleton_method( "_threshold", *[](const Tensor &self, Scalar threshold, Scalar value) { return torch::threshold(self, threshold, value); }) .define_singleton_method( "_threshold_", *[](Tensor &self, Scalar threshold, Scalar value) { return torch::threshold_(self, threshold, value); }) .define_singleton_method( "_threshold_out", *[](const Tensor &self, Scalar threshold, Scalar value, Tensor &out) { return torch::threshold_out(out, self, threshold, value); }) .define_singleton_method( "_topk", *[](const Tensor &self, int64_t k, int64_t dim, bool largest, bool sorted) { return torch::topk(self, k, dim, largest, sorted); }) .define_singleton_method( "_topk_values", *[](const Tensor &self, int64_t k, int64_t dim, bool largest, bool sorted, Tensor &values, Tensor &indices) { return torch::topk_out(values, indices, self, k, dim, largest, sorted); }) .define_singleton_method( "_trace", *[](const Tensor &self) { return torch::trace(self); }) .define_singleton_method( "_transpose_int", *[](Tensor &self, int64_t dim0, int64_t dim1) { return torch::transpose(self, dim0, dim1); }) .define_singleton_method( "_trapz_dx", *[](const Tensor &y, double dx, int64_t dim) { return torch::trapz(y, dx, dim); }) .define_singleton_method( "_trapz_x", *[](const Tensor &y, const Tensor &x, int64_t dim) { return torch::trapz(y, x, dim); }) .define_singleton_method( "_triangular_solve", *[](const Tensor &self, const Tensor &A, bool upper, bool transpose, bool unitriangular) { return torch::triangular_solve(self, A, upper, transpose, unitriangular); }) .define_singleton_method( "_triangular_solve_x", *[](const Tensor &self, const Tensor &A, bool upper, bool transpose, bool unitriangular, Tensor &X, Tensor &M) { return torch::triangular_solve_out(X, M, self, A, upper, transpose, unitriangular); }) .define_singleton_method( "_tril", *[](const Tensor &self, int64_t diagonal) { return torch::tril(self, diagonal); }) .define_singleton_method( "_tril_out", *[](const Tensor &self, int64_t diagonal, Tensor &out) { return torch::tril_out(out, self, diagonal); }) .define_singleton_method( "_triplet_margin_loss", *[](const Tensor &anchor, const Tensor &positive, const Tensor &negative, double margin, double p, double eps, bool swap, MyReduction reduction) { return torch::triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction); }) .define_singleton_method( "_triu", *[](const Tensor &self, int64_t diagonal) { return torch::triu(self, diagonal); }) .define_singleton_method( "_triu_out", *[](const Tensor &self, int64_t diagonal, Tensor &out) { return torch::triu_out(out, self, diagonal); }) .define_singleton_method( "_trunc", *[](const Tensor &self) { return torch::trunc(self); }) .define_singleton_method( "_trunc_", *[](Tensor &self) { return torch::trunc_(self); }) .define_singleton_method( "_trunc_out", *[](const Tensor &self, Tensor &out) { return torch::trunc_out(out, self); }) .define_singleton_method( "_unbind_int", *[](Tensor &self, int64_t dim) { return torch::unbind(self, dim); }) .define_singleton_method( "_unique_consecutive", *[](const Tensor &self, bool return_inverse, bool return_counts) { return torch::unique_consecutive(self, return_inverse, return_counts); }) .define_singleton_method( "_unique_consecutive_dim", *[](const Tensor &self, bool return_inverse, bool return_counts, int64_t dim) { return torch::unique_consecutive(self, return_inverse, return_counts, dim); }) .define_singleton_method( "_unique_dim", *[](const Tensor &self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) { return torch::unique_dim(self, dim, sorted, return_inverse, return_counts); }) .define_singleton_method( "_unsqueeze", *[](Tensor &self, int64_t dim) { return torch::unsqueeze(self, dim); }) .define_singleton_method( "_var", *[](const Tensor &self, bool unbiased) { return torch::var(self, unbiased); }) .define_singleton_method( "_var_dim", *[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) { return torch::var(self, dim, unbiased, keepdim); }) .define_singleton_method( "_var_mean", *[](const Tensor &self, bool unbiased) { return torch::var_mean(self, unbiased); }) .define_singleton_method( "_var_mean_dim", *[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) { return torch::var_mean(self, dim, unbiased, keepdim); }) .define_singleton_method( "_var_out", *[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim, Tensor &out) { return torch::var_out(out, self, dim, unbiased, keepdim); }) .define_singleton_method( "_where", *[](const Tensor &condition) { return torch::where(condition); }) .define_singleton_method( "_where_self", *[](const Tensor &condition, const Tensor &self, const Tensor &other) { return torch::where(condition, self, other); }) .define_singleton_method( "_zero_", *[](Tensor &self) { return torch::zero_(self); }) .define_singleton_method( "_zeros_like", *[](const Tensor &self) { return torch::zeros_like(self); }) .define_singleton_method( "_zeros_out", *[](IntArrayRef size, Tensor &out) { return torch::zeros_out(out, size); }); }