ext/torch/torch_functions.cpp in torch-rb-0.1.5 vs ext/torch/torch_functions.cpp in torch-rb-0.1.6
- old
+ new
@@ -43,11 +43,11 @@
return torch::adaptive_avg_pool1d(self, output_size);
})
.define_singleton_method(
"_adaptive_max_pool1d",
*[](const Tensor &self, IntArrayRef output_size) {
- return torch::adaptive_max_pool1d(self, output_size);
+ return wrap(torch::adaptive_max_pool1d(self, output_size));
})
.define_singleton_method(
"_add_out",
*[](const Tensor &self, const Tensor &other, Scalar alpha, Tensor &out) {
return torch::add_out(out, self, other, alpha);
@@ -308,36 +308,36 @@
return torch::batch_norm_backward_elemt(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu);
})
.define_singleton_method(
"_batch_norm_backward_reduce",
*[](const Tensor &grad_out, const Tensor &input, const Tensor &mean, const Tensor &invstd, OptionalTensor weight, bool input_g, bool weight_g, bool bias_g) {
- return torch::batch_norm_backward_reduce(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g);
+ return wrap(torch::batch_norm_backward_reduce(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g));
})
.define_singleton_method(
"_batch_norm_elemt",
*[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, const Tensor &mean, const Tensor &invstd, double eps) {
return torch::batch_norm_elemt(input, weight, bias, mean, invstd, eps);
})
.define_singleton_method(
"_batch_norm_gather_stats",
*[](const Tensor &input, const Tensor &mean, const Tensor &invstd, OptionalTensor running_mean, OptionalTensor running_var, double momentum, double eps, int64_t count) {
- return torch::batch_norm_gather_stats(input, mean, invstd, running_mean, running_var, momentum, eps, count);
+ return wrap(torch::batch_norm_gather_stats(input, mean, invstd, running_mean, running_var, momentum, eps, count));
})
.define_singleton_method(
"_batch_norm_gather_stats_with_counts",
*[](const Tensor &input, const Tensor &mean, const Tensor &invstd, OptionalTensor running_mean, OptionalTensor running_var, double momentum, double eps, IntArrayRef counts) {
- return torch::batch_norm_gather_stats_with_counts(input, mean, invstd, running_mean, running_var, momentum, eps, counts);
+ return wrap(torch::batch_norm_gather_stats_with_counts(input, mean, invstd, running_mean, running_var, momentum, eps, counts));
})
.define_singleton_method(
"_batch_norm_stats",
*[](const Tensor &input, double eps) {
- return torch::batch_norm_stats(input, eps);
+ return wrap(torch::batch_norm_stats(input, eps));
})
.define_singleton_method(
"_batch_norm_update_stats",
*[](const Tensor &input, OptionalTensor running_mean, OptionalTensor running_var, double momentum) {
- return torch::batch_norm_update_stats(input, running_mean, running_var, momentum);
+ return wrap(torch::batch_norm_update_stats(input, running_mean, running_var, momentum));
})
.define_singleton_method(
"_bernoulli",
*[](const Tensor &self) {
return torch::bernoulli(self);
@@ -391,10 +391,15 @@
"_broadcast_tensors",
*[](TensorList tensors) {
return torch::broadcast_tensors(tensors);
})
.define_singleton_method(
+ "_can_cast",
+ *[](ScalarType from, ScalarType to) {
+ return torch::can_cast(from, to);
+ })
+ .define_singleton_method(
"_cartesian_prod",
*[](TensorList tensors) {
return torch::cartesian_prod(tensors);
})
.define_singleton_method(
@@ -628,11 +633,11 @@
return torch::cudnn_affine_grid_generator(theta, N, C, H, W);
})
.define_singleton_method(
"_cudnn_batch_norm",
*[](const Tensor &input, const Tensor &weight, OptionalTensor bias, OptionalTensor running_mean, OptionalTensor running_var, bool training, double exponential_average_factor, double epsilon) {
- return torch::cudnn_batch_norm(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
+ return wrap(torch::cudnn_batch_norm(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon));
})
.define_singleton_method(
"_cudnn_convolution",
*[](const Tensor &self, const Tensor &weight, OptionalTensor bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
return torch::cudnn_convolution(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
@@ -681,10 +686,30 @@
"_cudnn_is_acceptable",
*[](const Tensor &self) {
return torch::cudnn_is_acceptable(self);
})
.define_singleton_method(
+ "_cumprod",
+ *[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
+ return torch::cumprod(self, dim, dtype);
+ })
+ .define_singleton_method(
+ "_cumprod_out",
+ *[](const Tensor &self, int64_t dim, OptionalScalarType dtype, Tensor &out) {
+ return torch::cumprod_out(out, self, dim, dtype);
+ })
+ .define_singleton_method(
+ "_cumsum",
+ *[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
+ return torch::cumsum(self, dim, dtype);
+ })
+ .define_singleton_method(
+ "_cumsum_out",
+ *[](const Tensor &self, int64_t dim, OptionalScalarType dtype, Tensor &out) {
+ return torch::cumsum_out(out, self, dim, dtype);
+ })
+ .define_singleton_method(
"_dequantize",
*[](const Tensor &self) {
return torch::dequantize(self);
})
.define_singleton_method(
@@ -778,26 +803,26 @@
return torch::dropout_(self, p, train);
})
.define_singleton_method(
"_eig",
*[](const Tensor &self, bool eigenvectors) {
- return torch::eig(self, eigenvectors);
+ return wrap(torch::eig(self, eigenvectors));
})
.define_singleton_method(
"_eig_e",
*[](const Tensor &self, bool eigenvectors, Tensor &e, Tensor &v) {
- return torch::eig_out(e, v, self, eigenvectors);
+ return wrap(torch::eig_out(e, v, self, eigenvectors));
})
.define_singleton_method(
"_embedding",
*[](const Tensor &weight, const Tensor &indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) {
return torch::embedding(weight, indices, padding_idx, scale_grad_by_freq, sparse);
})
.define_singleton_method(
"_embedding_bag",
*[](const Tensor &weight, const Tensor &indices, const Tensor &offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, OptionalTensor per_sample_weights) {
- return torch::embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights);
+ return wrap(torch::embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights));
})
.define_singleton_method(
"_embedding_renorm_",
*[](Tensor &self, const Tensor &indices, double max_norm, double norm_type) {
return torch::embedding_renorm_(self, indices, max_norm, norm_type);
@@ -943,11 +968,11 @@
return torch::fbgemm_linear_int8_weight_fp32_activation(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
})
.define_singleton_method(
"_fbgemm_linear_quantize_weight",
*[](const Tensor &input) {
- return torch::fbgemm_linear_quantize_weight(input);
+ return wrap(torch::fbgemm_linear_quantize_weight(input));
})
.define_singleton_method(
"_fbgemm_pack_gemm_matrix_fp16",
*[](const Tensor &input) {
return torch::fbgemm_pack_gemm_matrix_fp16(input);
@@ -1113,16 +1138,16 @@
return torch::ge_out(out, self, other);
})
.define_singleton_method(
"_geqrf",
*[](const Tensor &self) {
- return torch::geqrf(self);
+ return wrap(torch::geqrf(self));
})
.define_singleton_method(
"_geqrf_a",
*[](const Tensor &self, Tensor &a, Tensor &tau) {
- return torch::geqrf_out(a, tau, self);
+ return wrap(torch::geqrf_out(a, tau, self));
})
.define_singleton_method(
"_ger",
*[](const Tensor &self, const Tensor &vec2) {
return torch::ger(self, vec2);
@@ -1158,16 +1183,16 @@
return torch::gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
})
.define_singleton_method(
"_gru_data",
*[](const Tensor &data, const Tensor &batch_sizes, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
- return torch::gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
+ return wrap(torch::gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional));
})
.define_singleton_method(
"_gru_input",
*[](const Tensor &input, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
- return torch::gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
+ return wrap(torch::gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first));
})
.define_singleton_method(
"_gt_scalar",
*[](const Tensor &self, Scalar other) {
return torch::gt(self, other);
@@ -1323,16 +1348,16 @@
return torch::kl_div(self, target, reduction);
})
.define_singleton_method(
"_kthvalue",
*[](const Tensor &self, int64_t k, int64_t dim, bool keepdim) {
- return torch::kthvalue(self, k, dim, keepdim);
+ return wrap(torch::kthvalue(self, k, dim, keepdim));
})
.define_singleton_method(
"_kthvalue_values",
*[](const Tensor &self, int64_t k, int64_t dim, bool keepdim, Tensor &values, Tensor &indices) {
- return torch::kthvalue_out(values, indices, self, k, dim, keepdim);
+ return wrap(torch::kthvalue_out(values, indices, self, k, dim, keepdim));
})
.define_singleton_method(
"_layer_norm",
*[](const Tensor &input, IntArrayRef normalized_shape, OptionalTensor weight, OptionalTensor bias, double eps, bool cudnn_enable) {
return torch::layer_norm(input, normalized_shape, weight, bias, eps, cudnn_enable);
@@ -1451,10 +1476,15 @@
"_log_out",
*[](const Tensor &self, Tensor &out) {
return torch::log_out(out, self);
})
.define_singleton_method(
+ "_log_softmax",
+ *[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
+ return torch::log_softmax(self, dim, dtype);
+ })
+ .define_singleton_method(
"_logdet",
*[](const Tensor &self) {
return torch::logdet(self);
})
.define_singleton_method(
@@ -1493,31 +1523,31 @@
return torch::logsumexp_out(out, self, dim, keepdim);
})
.define_singleton_method(
"_lstm_cell",
*[](const Tensor &input, TensorList hx, const Tensor &w_ih, const Tensor &w_hh, OptionalTensor b_ih, OptionalTensor b_hh) {
- return torch::lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
+ return wrap(torch::lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh));
})
.define_singleton_method(
"_lstm_data",
*[](const Tensor &data, const Tensor &batch_sizes, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
- return torch::lstm(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
+ return wrap(torch::lstm(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional));
})
.define_singleton_method(
"_lstm_input",
*[](const Tensor &input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
- return torch::lstm(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
+ return wrap(torch::lstm(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first));
})
.define_singleton_method(
"_lstsq",
*[](const Tensor &self, const Tensor &A) {
- return torch::lstsq(self, A);
+ return wrap(torch::lstsq(self, A));
})
.define_singleton_method(
"_lstsq_x",
*[](const Tensor &self, const Tensor &A, Tensor &X, Tensor &qr) {
- return torch::lstsq_out(X, qr, self, A);
+ return wrap(torch::lstsq_out(X, qr, self, A));
})
.define_singleton_method(
"_lt_scalar",
*[](const Tensor &self, Scalar other) {
return torch::lt(self, other);
@@ -1608,16 +1638,16 @@
return torch::max(self);
})
.define_singleton_method(
"_max_dim",
*[](const Tensor &self, int64_t dim, bool keepdim) {
- return torch::max(self, dim, keepdim);
+ return wrap(torch::max(self, dim, keepdim));
})
.define_singleton_method(
"_max_dim_max",
*[](const Tensor &self, int64_t dim, bool keepdim, Tensor &max, Tensor &max_values) {
- return torch::max_out(max, max_values, self, dim, keepdim);
+ return wrap(torch::max_out(max, max_values, self, dim, keepdim));
})
.define_singleton_method(
"_max_other",
*[](const Tensor &self, const Tensor &other) {
return torch::max(self, other);
@@ -1633,11 +1663,11 @@
return torch::max_pool1d(self, kernel_size, stride, padding, dilation, ceil_mode);
})
.define_singleton_method(
"_max_pool1d_with_indices",
*[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
- return torch::max_pool1d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode);
+ return wrap(torch::max_pool1d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode));
})
.define_singleton_method(
"_max_pool2d",
*[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
return torch::max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode);
@@ -1651,23 +1681,38 @@
"_max_values",
*[](const Tensor &self, IntArrayRef dim, bool keepdim) {
return torch::max_values(self, dim, keepdim);
})
.define_singleton_method(
+ "_mean",
+ *[](const Tensor &self, OptionalScalarType dtype) {
+ return torch::mean(self, dtype);
+ })
+ .define_singleton_method(
+ "_mean_dim",
+ *[](const Tensor &self, IntArrayRef dim, bool keepdim, OptionalScalarType dtype) {
+ return torch::mean(self, dim, keepdim, dtype);
+ })
+ .define_singleton_method(
+ "_mean_out",
+ *[](const Tensor &self, IntArrayRef dim, bool keepdim, OptionalScalarType dtype, Tensor &out) {
+ return torch::mean_out(out, self, dim, keepdim, dtype);
+ })
+ .define_singleton_method(
"_median",
*[](const Tensor &self) {
return torch::median(self);
})
.define_singleton_method(
"_median_dim",
*[](const Tensor &self, int64_t dim, bool keepdim) {
- return torch::median(self, dim, keepdim);
+ return wrap(torch::median(self, dim, keepdim));
})
.define_singleton_method(
"_median_dim_values",
*[](const Tensor &self, int64_t dim, bool keepdim, Tensor &values, Tensor &indices) {
- return torch::median_out(values, indices, self, dim, keepdim);
+ return wrap(torch::median_out(values, indices, self, dim, keepdim));
})
.define_singleton_method(
"_meshgrid",
*[](TensorList tensors) {
return torch::meshgrid(tensors);
@@ -1678,16 +1723,16 @@
return torch::min(self);
})
.define_singleton_method(
"_min_dim",
*[](const Tensor &self, int64_t dim, bool keepdim) {
- return torch::min(self, dim, keepdim);
+ return wrap(torch::min(self, dim, keepdim));
})
.define_singleton_method(
"_min_dim_min",
*[](const Tensor &self, int64_t dim, bool keepdim, Tensor &min, Tensor &min_indices) {
- return torch::min_out(min, min_indices, self, dim, keepdim);
+ return wrap(torch::min_out(min, min_indices, self, dim, keepdim));
})
.define_singleton_method(
"_min_other",
*[](const Tensor &self, const Tensor &other) {
return torch::min(self, other);
@@ -1703,11 +1748,11 @@
return torch::min_values(self, dim, keepdim);
})
.define_singleton_method(
"_miopen_batch_norm",
*[](const Tensor &input, const Tensor &weight, OptionalTensor bias, OptionalTensor running_mean, OptionalTensor running_var, bool training, double exponential_average_factor, double epsilon) {
- return torch::miopen_batch_norm(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
+ return wrap(torch::miopen_batch_norm(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon));
})
.define_singleton_method(
"_miopen_convolution",
*[](const Tensor &self, const Tensor &weight, OptionalTensor bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
return torch::miopen_convolution(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
@@ -1758,11 +1803,11 @@
return torch::miopen_depthwise_convolution_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic);
})
.define_singleton_method(
"_miopen_rnn",
*[](const Tensor &input, TensorList weight, int64_t weight_stride0, const Tensor &hx, OptionalTensor cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, OptionalTensor dropout_state) {
- return torch::miopen_rnn(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
+ return wrap(torch::miopen_rnn(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state));
})
.define_singleton_method(
"_mkldnn_adaptive_avg_pool2d",
*[](const Tensor &self, IntArrayRef output_size) {
return torch::mkldnn_adaptive_avg_pool2d(self, output_size);
@@ -1778,11 +1823,11 @@
return torch::mkldnn_convolution_backward_input(self_size, grad_output, weight, padding, stride, dilation, groups, bias_defined);
})
.define_singleton_method(
"_mkldnn_convolution_backward_weights",
*[](IntArrayRef weight_size, const Tensor &grad_output, const Tensor &self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined) {
- return torch::mkldnn_convolution_backward_weights(weight_size, grad_output, self, padding, stride, dilation, groups, bias_defined);
+ return wrap(torch::mkldnn_convolution_backward_weights(weight_size, grad_output, self, padding, stride, dilation, groups, bias_defined));
})
.define_singleton_method(
"_mkldnn_max_pool2d",
*[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
return torch::mkldnn_max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode);
@@ -1798,16 +1843,16 @@
return torch::mm_out(out, self, mat2);
})
.define_singleton_method(
"_mode",
*[](const Tensor &self, int64_t dim, bool keepdim) {
- return torch::mode(self, dim, keepdim);
+ return wrap(torch::mode(self, dim, keepdim));
})
.define_singleton_method(
"_mode_values",
*[](const Tensor &self, int64_t dim, bool keepdim, Tensor &values, Tensor &indices) {
- return torch::mode_out(values, indices, self, dim, keepdim);
+ return wrap(torch::mode_out(values, indices, self, dim, keepdim));
})
.define_singleton_method(
"_mul_out",
*[](const Tensor &self, const Tensor &other, Tensor &out) {
return torch::mul_out(out, self, other);
@@ -1853,16 +1898,16 @@
return torch::narrow(self, dim, start, length);
})
.define_singleton_method(
"_native_batch_norm",
*[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, OptionalTensor running_mean, OptionalTensor running_var, bool training, double momentum, double eps) {
- return torch::native_batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps);
+ return wrap(torch::native_batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps));
})
.define_singleton_method(
"_native_layer_norm",
*[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, int64_t M, int64_t N, double eps) {
- return torch::native_layer_norm(input, weight, bias, M, N, eps);
+ return wrap(torch::native_layer_norm(input, weight, bias, M, N, eps));
})
.define_singleton_method(
"_native_norm",
*[](const Tensor &self, Scalar p) {
return torch::native_norm(self, p);
@@ -2056,10 +2101,30 @@
"_prelu",
*[](const Tensor &self, const Tensor &weight) {
return torch::prelu(self, weight);
})
.define_singleton_method(
+ "_prod",
+ *[](const Tensor &self, OptionalScalarType dtype) {
+ return torch::prod(self, dtype);
+ })
+ .define_singleton_method(
+ "_prod_dim_int",
+ *[](const Tensor &self, int64_t dim, bool keepdim, OptionalScalarType dtype) {
+ return torch::prod(self, dim, keepdim, dtype);
+ })
+ .define_singleton_method(
+ "_prod_int_out",
+ *[](const Tensor &self, int64_t dim, bool keepdim, OptionalScalarType dtype, Tensor &out) {
+ return torch::prod_out(out, self, dim, keepdim, dtype);
+ })
+ .define_singleton_method(
+ "_promote_types",
+ *[](ScalarType type1, ScalarType type2) {
+ return torch::promote_types(type1, type2);
+ })
+ .define_singleton_method(
"_q_per_channel_axis",
*[](const Tensor &self) {
return torch::q_per_channel_axis(self);
})
.define_singleton_method(
@@ -2083,36 +2148,51 @@
return torch::q_zero_point(self);
})
.define_singleton_method(
"_qr",
*[](const Tensor &self, bool some) {
- return torch::qr(self, some);
+ return wrap(torch::qr(self, some));
})
.define_singleton_method(
"_qr_q",
*[](const Tensor &self, bool some, Tensor &Q, Tensor &R) {
- return torch::qr_out(Q, R, self, some);
+ return wrap(torch::qr_out(Q, R, self, some));
})
.define_singleton_method(
+ "_quantize_per_channel",
+ *[](const Tensor &self, const Tensor &scales, const Tensor &zero_points, int64_t axis, ScalarType dtype) {
+ return torch::quantize_per_channel(self, scales, zero_points, axis, dtype);
+ })
+ .define_singleton_method(
+ "_quantize_per_tensor",
+ *[](const Tensor &self, double scale, int64_t zero_point, ScalarType dtype) {
+ return torch::quantize_per_tensor(self, scale, zero_point, dtype);
+ })
+ .define_singleton_method(
"_quantized_gru_cell",
*[](const Tensor &input, const Tensor &hx, const Tensor &w_ih, const Tensor &w_hh, const Tensor &b_ih, const Tensor &b_hh, const Tensor &packed_ih, const Tensor &packed_hh, const Tensor &col_offsets_ih, const Tensor &col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) {
return torch::quantized_gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
})
.define_singleton_method(
"_quantized_gru_data",
*[](const Tensor &data, const Tensor &batch_sizes, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
- return torch::quantized_gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
+ return wrap(torch::quantized_gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional));
})
.define_singleton_method(
"_quantized_gru_input",
*[](const Tensor &input, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
- return torch::quantized_gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
+ return wrap(torch::quantized_gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first));
})
.define_singleton_method(
+ "_quantized_lstm",
+ *[](const Tensor &input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, OptionalScalarType dtype, bool use_dynamic) {
+ return wrap(torch::quantized_lstm(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, dtype, use_dynamic));
+ })
+ .define_singleton_method(
"_quantized_lstm_cell",
*[](const Tensor &input, TensorList hx, const Tensor &w_ih, const Tensor &w_hh, const Tensor &b_ih, const Tensor &b_hh, const Tensor &packed_ih, const Tensor &packed_hh, const Tensor &col_offsets_ih, const Tensor &col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) {
- return torch::quantized_lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
+ return wrap(torch::quantized_lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh));
})
.define_singleton_method(
"_quantized_max_pool2d",
*[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
return torch::quantized_max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode);
@@ -2323,31 +2403,31 @@
return torch::rnn_relu_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
})
.define_singleton_method(
"_rnn_relu_data",
*[](const Tensor &data, const Tensor &batch_sizes, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
- return torch::rnn_relu(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
+ return wrap(torch::rnn_relu(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional));
})
.define_singleton_method(
"_rnn_relu_input",
*[](const Tensor &input, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
- return torch::rnn_relu(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
+ return wrap(torch::rnn_relu(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first));
})
.define_singleton_method(
"_rnn_tanh_cell",
*[](const Tensor &input, const Tensor &hx, const Tensor &w_ih, const Tensor &w_hh, OptionalTensor b_ih, OptionalTensor b_hh) {
return torch::rnn_tanh_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
})
.define_singleton_method(
"_rnn_tanh_data",
*[](const Tensor &data, const Tensor &batch_sizes, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
- return torch::rnn_tanh(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
+ return wrap(torch::rnn_tanh(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional));
})
.define_singleton_method(
"_rnn_tanh_input",
*[](const Tensor &input, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
- return torch::rnn_tanh(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
+ return wrap(torch::rnn_tanh(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first));
})
.define_singleton_method(
"_roll",
*[](const Tensor &self, IntArrayRef shifts, IntArrayRef dims) {
return torch::roll(self, shifts, dims);
@@ -2503,36 +2583,41 @@
return torch::slice(self, dim, start, end, step);
})
.define_singleton_method(
"_slogdet",
*[](const Tensor &self) {
- return torch::slogdet(self);
+ return wrap(torch::slogdet(self));
})
.define_singleton_method(
"_smm",
*[](const Tensor &self, const Tensor &mat2) {
return torch::smm(self, mat2);
})
.define_singleton_method(
+ "_softmax",
+ *[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
+ return torch::softmax(self, dim, dtype);
+ })
+ .define_singleton_method(
"_solve",
*[](const Tensor &self, const Tensor &A) {
- return torch::solve(self, A);
+ return wrap(torch::solve(self, A));
})
.define_singleton_method(
"_solve_solution",
*[](const Tensor &self, const Tensor &A, Tensor &solution, Tensor &lu) {
- return torch::solve_out(solution, lu, self, A);
+ return wrap(torch::solve_out(solution, lu, self, A));
})
.define_singleton_method(
"_sort",
*[](const Tensor &self, int64_t dim, bool descending) {
- return torch::sort(self, dim, descending);
+ return wrap(torch::sort(self, dim, descending));
})
.define_singleton_method(
"_sort_values",
*[](const Tensor &self, int64_t dim, bool descending, Tensor &values, Tensor &indices) {
- return torch::sort_out(values, indices, self, dim, descending);
+ return wrap(torch::sort_out(values, indices, self, dim, descending));
})
.define_singleton_method(
"_split_tensor",
*[](Tensor &self, int64_t split_size, int64_t dim) {
return torch::split(self, split_size, dim);
@@ -2598,16 +2683,16 @@
return torch::std(self, dim, unbiased, keepdim);
})
.define_singleton_method(
"_std_mean",
*[](const Tensor &self, bool unbiased) {
- return torch::std_mean(self, unbiased);
+ return wrap(torch::std_mean(self, unbiased));
})
.define_singleton_method(
"_std_mean_dim",
*[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) {
- return torch::std_mean(self, dim, unbiased, keepdim);
+ return wrap(torch::std_mean(self, dim, unbiased, keepdim));
})
.define_singleton_method(
"_std_out",
*[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim, Tensor &out) {
return torch::std_out(out, self, dim, unbiased, keepdim);
@@ -2631,28 +2716,43 @@
"_sub_tensor",
*[](const Tensor &self, const Tensor &other, Scalar alpha) {
return torch::sub(self, other, alpha);
})
.define_singleton_method(
+ "_sum",
+ *[](const Tensor &self, OptionalScalarType dtype) {
+ return torch::sum(self, dtype);
+ })
+ .define_singleton_method(
+ "_sum_dim_intlist",
+ *[](const Tensor &self, IntArrayRef dim, bool keepdim, OptionalScalarType dtype) {
+ return torch::sum(self, dim, keepdim, dtype);
+ })
+ .define_singleton_method(
+ "_sum_intlist_out",
+ *[](const Tensor &self, IntArrayRef dim, bool keepdim, OptionalScalarType dtype, Tensor &out) {
+ return torch::sum_out(out, self, dim, keepdim, dtype);
+ })
+ .define_singleton_method(
"_svd",
*[](const Tensor &self, bool some, bool compute_uv) {
- return torch::svd(self, some, compute_uv);
+ return wrap(torch::svd(self, some, compute_uv));
})
.define_singleton_method(
"_svd_u",
*[](const Tensor &self, bool some, bool compute_uv, Tensor &U, Tensor &S, Tensor &V) {
- return torch::svd_out(U, S, V, self, some, compute_uv);
+ return wrap(torch::svd_out(U, S, V, self, some, compute_uv));
})
.define_singleton_method(
"_symeig",
*[](const Tensor &self, bool eigenvectors, bool upper) {
- return torch::symeig(self, eigenvectors, upper);
+ return wrap(torch::symeig(self, eigenvectors, upper));
})
.define_singleton_method(
"_symeig_e",
*[](const Tensor &self, bool eigenvectors, bool upper, Tensor &e, Tensor &V) {
- return torch::symeig_out(e, V, self, eigenvectors, upper);
+ return wrap(torch::symeig_out(e, V, self, eigenvectors, upper));
})
.define_singleton_method(
"_t",
*[](Tensor &self) {
return torch::t(self);
@@ -2718,16 +2818,16 @@
return torch::threshold_out(out, self, threshold, value);
})
.define_singleton_method(
"_topk",
*[](const Tensor &self, int64_t k, int64_t dim, bool largest, bool sorted) {
- return torch::topk(self, k, dim, largest, sorted);
+ return wrap(torch::topk(self, k, dim, largest, sorted));
})
.define_singleton_method(
"_topk_values",
*[](const Tensor &self, int64_t k, int64_t dim, bool largest, bool sorted, Tensor &values, Tensor &indices) {
- return torch::topk_out(values, indices, self, k, dim, largest, sorted);
+ return wrap(torch::topk_out(values, indices, self, k, dim, largest, sorted));
})
.define_singleton_method(
"_trace",
*[](const Tensor &self) {
return torch::trace(self);
@@ -2748,16 +2848,16 @@
return torch::trapz(y, x, dim);
})
.define_singleton_method(
"_triangular_solve",
*[](const Tensor &self, const Tensor &A, bool upper, bool transpose, bool unitriangular) {
- return torch::triangular_solve(self, A, upper, transpose, unitriangular);
+ return wrap(torch::triangular_solve(self, A, upper, transpose, unitriangular));
})
.define_singleton_method(
"_triangular_solve_x",
*[](const Tensor &self, const Tensor &A, bool upper, bool transpose, bool unitriangular, Tensor &X, Tensor &M) {
- return torch::triangular_solve_out(X, M, self, A, upper, transpose, unitriangular);
+ return wrap(torch::triangular_solve_out(X, M, self, A, upper, transpose, unitriangular));
})
.define_singleton_method(
"_tril",
*[](const Tensor &self, int64_t diagonal) {
return torch::tril(self, diagonal);
@@ -2803,21 +2903,21 @@
return torch::unbind(self, dim);
})
.define_singleton_method(
"_unique_consecutive",
*[](const Tensor &self, bool return_inverse, bool return_counts) {
- return torch::unique_consecutive(self, return_inverse, return_counts);
+ return wrap(torch::unique_consecutive(self, return_inverse, return_counts));
})
.define_singleton_method(
"_unique_consecutive_dim",
*[](const Tensor &self, bool return_inverse, bool return_counts, int64_t dim) {
- return torch::unique_consecutive(self, return_inverse, return_counts, dim);
+ return wrap(torch::unique_consecutive(self, return_inverse, return_counts, dim));
})
.define_singleton_method(
"_unique_dim",
*[](const Tensor &self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) {
- return torch::unique_dim(self, dim, sorted, return_inverse, return_counts);
+ return wrap(torch::unique_dim(self, dim, sorted, return_inverse, return_counts));
})
.define_singleton_method(
"_unsqueeze",
*[](Tensor &self, int64_t dim) {
return torch::unsqueeze(self, dim);
@@ -2833,15 +2933,15 @@
return torch::var(self, dim, unbiased, keepdim);
})
.define_singleton_method(
"_var_mean",
*[](const Tensor &self, bool unbiased) {
- return torch::var_mean(self, unbiased);
+ return wrap(torch::var_mean(self, unbiased));
})
.define_singleton_method(
"_var_mean_dim",
*[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) {
- return torch::var_mean(self, dim, unbiased, keepdim);
+ return wrap(torch::var_mean(self, dim, unbiased, keepdim));
})
.define_singleton_method(
"_var_out",
*[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim, Tensor &out) {
return torch::var_out(out, self, dim, unbiased, keepdim);