ext/torch/nn_functions.cpp in torch-rb-0.1.5 vs ext/torch/nn_functions.cpp in torch-rb-0.1.6

- old
+ new

@@ -28,28 +28,48 @@ return torch::adaptive_avg_pool3d_out(out, self, output_size); }) .define_singleton_method( "_adaptive_max_pool2d", *[](const Tensor &self, IntArrayRef output_size) { - return torch::adaptive_max_pool2d(self, output_size); + return wrap(torch::adaptive_max_pool2d(self, output_size)); }) .define_singleton_method( "_adaptive_max_pool2d_out", *[](const Tensor &self, IntArrayRef output_size, Tensor &out, Tensor &indices) { - return torch::adaptive_max_pool2d_out(out, indices, self, output_size); + return wrap(torch::adaptive_max_pool2d_out(out, indices, self, output_size)); }) .define_singleton_method( "_adaptive_max_pool3d", *[](const Tensor &self, IntArrayRef output_size) { - return torch::adaptive_max_pool3d(self, output_size); + return wrap(torch::adaptive_max_pool3d(self, output_size)); }) .define_singleton_method( "_adaptive_max_pool3d_out", *[](const Tensor &self, IntArrayRef output_size, Tensor &out, Tensor &indices) { - return torch::adaptive_max_pool3d_out(out, indices, self, output_size); + return wrap(torch::adaptive_max_pool3d_out(out, indices, self, output_size)); }) .define_singleton_method( + "_avg_pool2d", + *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { + return torch::avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad); + }) + .define_singleton_method( + "_avg_pool2d_divisor_override", + *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, int64_t divisor_override) { + return torch::avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + }) + .define_singleton_method( + "_avg_pool3d", + *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { + return torch::avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad); + }) + .define_singleton_method( + "_avg_pool3d_divisor_override", + *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, int64_t divisor_override) { + return torch::avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + }) + .define_singleton_method( "_binary_cross_entropy", *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction) { return torch::binary_cross_entropy(self, target, weight, reduction); }) .define_singleton_method( @@ -83,26 +103,26 @@ return torch::elu_out(out, self, alpha, scale, input_scale); }) .define_singleton_method( "_fractional_max_pool2d", *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor &random_samples) { - return torch::fractional_max_pool2d(self, kernel_size, output_size, random_samples); + return wrap(torch::fractional_max_pool2d(self, kernel_size, output_size, random_samples)); }) .define_singleton_method( "_fractional_max_pool2d_output", *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor &random_samples, Tensor &output, Tensor &indices) { - return torch::fractional_max_pool2d_out(output, indices, self, kernel_size, output_size, random_samples); + return wrap(torch::fractional_max_pool2d_out(output, indices, self, kernel_size, output_size, random_samples)); }) .define_singleton_method( "_fractional_max_pool3d", *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor &random_samples) { - return torch::fractional_max_pool3d(self, kernel_size, output_size, random_samples); + return wrap(torch::fractional_max_pool3d(self, kernel_size, output_size, random_samples)); }) .define_singleton_method( "_fractional_max_pool3d_output", *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor &random_samples, Tensor &output, Tensor &indices) { - return torch::fractional_max_pool3d_out(output, indices, self, kernel_size, output_size, random_samples); + return wrap(torch::fractional_max_pool3d_out(output, indices, self, kernel_size, output_size, random_samples)); }) .define_singleton_method( "_gelu", *[](const Tensor &self) { return torch::gelu(self); @@ -178,41 +198,41 @@ return torch::log_sigmoid(self); }) .define_singleton_method( "_log_sigmoid_forward", *[](const Tensor &self) { - return torch::log_sigmoid_forward(self); + return wrap(torch::log_sigmoid_forward(self)); }) .define_singleton_method( "_log_sigmoid_forward_output", *[](const Tensor &self, Tensor &output, Tensor &buffer) { - return torch::log_sigmoid_forward_out(output, buffer, self); + return wrap(torch::log_sigmoid_forward_out(output, buffer, self)); }) .define_singleton_method( "_log_sigmoid_out", *[](const Tensor &self, Tensor &out) { return torch::log_sigmoid_out(out, self); }) .define_singleton_method( "_max_pool2d_with_indices", *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { - return torch::max_pool2d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode); + return wrap(torch::max_pool2d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode)); }) .define_singleton_method( "_max_pool2d_with_indices_out", *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor &out, Tensor &indices) { - return torch::max_pool2d_with_indices_out(out, indices, self, kernel_size, stride, padding, dilation, ceil_mode); + return wrap(torch::max_pool2d_with_indices_out(out, indices, self, kernel_size, stride, padding, dilation, ceil_mode)); }) .define_singleton_method( "_max_pool3d_with_indices", *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { - return torch::max_pool3d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode); + return wrap(torch::max_pool3d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode)); }) .define_singleton_method( "_max_pool3d_with_indices_out", *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor &out, Tensor &indices) { - return torch::max_pool3d_with_indices_out(out, indices, self, kernel_size, stride, padding, dilation, ceil_mode); + return wrap(torch::max_pool3d_with_indices_out(out, indices, self, kernel_size, stride, padding, dilation, ceil_mode)); }) .define_singleton_method( "_max_unpool2d", *[](const Tensor &self, const Tensor &indices, IntArrayRef output_size) { return torch::max_unpool2d(self, indices, output_size); @@ -268,16 +288,16 @@ return torch::multilabel_margin_loss(self, target, reduction); }) .define_singleton_method( "_multilabel_margin_loss_forward", *[](const Tensor &self, const Tensor &target, MyReduction reduction) { - return torch::multilabel_margin_loss_forward(self, target, reduction); + return wrap(torch::multilabel_margin_loss_forward(self, target, reduction)); }) .define_singleton_method( "_multilabel_margin_loss_forward_output", *[](const Tensor &self, const Tensor &target, MyReduction reduction, Tensor &output, Tensor &is_target) { - return torch::multilabel_margin_loss_forward_out(output, is_target, self, target, reduction); + return wrap(torch::multilabel_margin_loss_forward_out(output, is_target, self, target, reduction)); }) .define_singleton_method( "_multilabel_margin_loss_out", *[](const Tensor &self, const Tensor &target, MyReduction reduction, Tensor &out) { return torch::multilabel_margin_loss_out(out, self, target, reduction); @@ -293,31 +313,31 @@ return torch::nll_loss2d(self, target, weight, reduction, ignore_index); }) .define_singleton_method( "_nll_loss2d_forward", *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index) { - return torch::nll_loss2d_forward(self, target, weight, reduction, ignore_index); + return wrap(torch::nll_loss2d_forward(self, target, weight, reduction, ignore_index)); }) .define_singleton_method( "_nll_loss2d_forward_output", *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index, Tensor &output, Tensor &total_weight) { - return torch::nll_loss2d_forward_out(output, total_weight, self, target, weight, reduction, ignore_index); + return wrap(torch::nll_loss2d_forward_out(output, total_weight, self, target, weight, reduction, ignore_index)); }) .define_singleton_method( "_nll_loss2d_out", *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index, Tensor &out) { return torch::nll_loss2d_out(out, self, target, weight, reduction, ignore_index); }) .define_singleton_method( "_nll_loss_forward", *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index) { - return torch::nll_loss_forward(self, target, weight, reduction, ignore_index); + return wrap(torch::nll_loss_forward(self, target, weight, reduction, ignore_index)); }) .define_singleton_method( "_nll_loss_forward_output", *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index, Tensor &output, Tensor &total_weight) { - return torch::nll_loss_forward_out(output, total_weight, self, target, weight, reduction, ignore_index); + return wrap(torch::nll_loss_forward_out(output, total_weight, self, target, weight, reduction, ignore_index)); }) .define_singleton_method( "_nll_loss_out", *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index, Tensor &out) { return torch::nll_loss_out(out, self, target, weight, reduction, ignore_index); @@ -468,16 +488,16 @@ return torch::thnn_conv2d(self, weight, kernel_size, bias, stride, padding); }) .define_singleton_method( "_thnn_conv2d_forward", *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding) { - return torch::thnn_conv2d_forward(self, weight, kernel_size, bias, stride, padding); + return wrap(torch::thnn_conv2d_forward(self, weight, kernel_size, bias, stride, padding)); }) .define_singleton_method( "_thnn_conv2d_forward_output", *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, Tensor &output, Tensor &finput, Tensor &fgrad_input) { - return torch::thnn_conv2d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding); + return wrap(torch::thnn_conv2d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding)); }) .define_singleton_method( "_thnn_conv2d_out", *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, Tensor &out) { return torch::thnn_conv2d_out(out, self, weight, kernel_size, bias, stride, padding); @@ -488,15 +508,15 @@ return torch::thnn_conv3d(self, weight, kernel_size, bias, stride, padding); }) .define_singleton_method( "_thnn_conv3d_forward", *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding) { - return torch::thnn_conv3d_forward(self, weight, kernel_size, bias, stride, padding); + return wrap(torch::thnn_conv3d_forward(self, weight, kernel_size, bias, stride, padding)); }) .define_singleton_method( "_thnn_conv3d_forward_output", *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, Tensor &output, Tensor &finput, Tensor &fgrad_input) { - return torch::thnn_conv3d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding); + return wrap(torch::thnn_conv3d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding)); }) .define_singleton_method( "_thnn_conv3d_out", *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, Tensor &out) { return torch::thnn_conv3d_out(out, self, weight, kernel_size, bias, stride, padding);