ext/torch/ext.cpp in torch-rb-0.1.4 vs ext/torch/ext.cpp in torch-rb-0.1.5
- old
+ new
@@ -4,262 +4,50 @@
#include <rice/Array.hpp>
#include <rice/Class.hpp>
#include <rice/Constructor.hpp>
-using namespace Rice;
+#include "templates.hpp"
-template<>
-inline
-long long from_ruby<long long>(Object x)
-{
- return NUM2LL(x);
-}
+// generated with:
+// rake generate:functions
+#include "torch_functions.hpp"
+#include "tensor_functions.hpp"
+#include "nn_functions.hpp"
-template<>
-inline
-Object to_ruby<long long>(long long const & x)
-{
- return LL2NUM(x);
-}
+using namespace Rice;
-template<>
-inline
-unsigned long long from_ruby<unsigned long long>(Object x)
-{
- return NUM2ULL(x);
-}
-
-template<>
-inline
-Object to_ruby<unsigned long long>(unsigned long long const & x)
-{
- return ULL2NUM(x);
-}
-
-template<>
-inline
-short from_ruby<short>(Object x)
-{
- return NUM2SHORT(x);
-}
-
-template<>
-inline
-Object to_ruby<short>(short const & x)
-{
- return INT2NUM(x);
-}
-
-template<>
-inline
-unsigned short from_ruby<unsigned short>(Object x)
-{
- return NUM2USHORT(x);
-}
-
-template<>
-inline
-Object to_ruby<unsigned short>(unsigned short const & x)
-{
- return UINT2NUM(x);
-}
-
-// need to wrap torch::IntArrayRef() since
-// it doesn't own underlying data
-class IntArrayRef {
- std::vector<int64_t> vec;
- public:
- IntArrayRef(Object o) {
- Array a = Array(o);
- for (size_t i = 0; i < a.size(); i++) {
- vec.push_back(from_ruby<int64_t>(a[i]));
- }
- }
- operator torch::IntArrayRef() {
- return torch::IntArrayRef(vec);
- }
-};
-
-template<>
-inline
-IntArrayRef from_ruby<IntArrayRef>(Object x)
-{
- return IntArrayRef(x);
-}
-
-// for now
-class Scalar {
- torch::Scalar value;
- public:
- Scalar(Object o) {
- // TODO cast based on Ruby type
- if (o.rb_type() == T_FIXNUM) {
- value = torch::Scalar(from_ruby<int64_t>(o));
- } else {
- value = torch::Scalar(from_ruby<float>(o));
- }
- }
- operator torch::Scalar() {
- return value;
- }
-};
-
-template<>
-inline
-Scalar from_ruby<Scalar>(Object x)
-{
- return Scalar(x);
-}
-
-class TensorList {
- std::vector<torch::Tensor> vec;
- public:
- TensorList(Object o) {
- Array a = Array(o);
- for (size_t i = 0; i < a.size(); i++) {
- vec.push_back(from_ruby<torch::Tensor>(a[i]));
- }
- }
- operator torch::TensorList() {
- return torch::TensorList(vec);
- }
-};
-
-template<>
-inline
-TensorList from_ruby<TensorList>(Object x)
-{
- return TensorList(x);
-}
-
-class FanModeType {
- std::string s;
- public:
- FanModeType(Object o) {
- s = String(o).str();
- }
- // TODO switch NonlinearityType after LibTorch 1.4 release
- operator torch::nn::init::FanMode() {
- if (s == "fan_in") {
- return torch::nn::init::FanMode::FanIn;
- } else if (s == "fan_out") {
- return torch::nn::init::FanMode::FanOut;
- } else {
- throw std::runtime_error("Unsupported nonlinearity type: " + s);
- }
- }
-};
-
-template<>
-inline
-FanModeType from_ruby<FanModeType>(Object x)
-{
- return FanModeType(x);
-}
-
-class NonlinearityType {
- std::string s;
- public:
- NonlinearityType(Object o) {
- s = String(o).str();
- }
- // TODO switch NonlinearityType after LibTorch 1.4 release
- operator torch::nn::init::Nonlinearity() {
- if (s == "linear") {
- return torch::nn::init::Nonlinearity::Linear;
- } else if (s == "conv1d") {
- return torch::nn::init::Nonlinearity::Conv1D;
- } else if (s == "conv2d") {
- return torch::nn::init::Nonlinearity::Conv2D;
- } else if (s == "conv3d") {
- return torch::nn::init::Nonlinearity::Conv3D;
- } else if (s == "conv_transpose1d") {
- return torch::nn::init::Nonlinearity::ConvTranspose1D;
- } else if (s == "conv_transpose2d") {
- return torch::nn::init::Nonlinearity::ConvTranspose2D;
- } else if (s == "conv_transpose3d") {
- return torch::nn::init::Nonlinearity::ConvTranspose3D;
- } else if (s == "sigmoid") {
- return torch::nn::init::Nonlinearity::Sigmoid;
- } else if (s == "tanh") {
- return torch::nn::init::Nonlinearity::Tanh;
- } else if (s == "relu") {
- return torch::nn::init::Nonlinearity::ReLU;
- } else if (s == "leaky_relu") {
- return torch::nn::init::Nonlinearity::LeakyReLU;
- } else {
- throw std::runtime_error("Unsupported nonlinearity type: " + s);
- }
- }
-};
-
-template<>
-inline
-NonlinearityType from_ruby<NonlinearityType>(Object x)
-{
- return NonlinearityType(x);
-}
-
-class MyReduction {
- Object value;
- public:
- MyReduction(Object o) {
- value = o;
- }
- operator int64_t() {
- if (value.is_nil()) {
- return Reduction::None;
- }
-
- std::string s = String(value).str();
- if (s == "mean") {
- return Reduction::Mean;
- } else if (s == "sum") {
- return Reduction::Sum;
- } else {
- throw std::runtime_error("Unsupported reduction: " + s);
- }
- }
-};
-
-template<>
-inline
-MyReduction from_ruby<MyReduction>(Object x)
-{
- return MyReduction(x);
-}
-
-typedef torch::Tensor Tensor;
-
Object tensor_array(std::tuple<torch::Tensor, torch::Tensor> x) {
Array a;
a.push(to_ruby<torch::Tensor>(std::get<0>(x)));
a.push(to_ruby<torch::Tensor>(std::get<1>(x)));
return Object(a);
}
extern "C"
void Init_ext()
{
- Module rb_mTorch = define_module("Torch")
- .define_singleton_method(
+ Module rb_mTorch = define_module("Torch");
+ add_torch_functions(rb_mTorch);
+
+ Class rb_cTensor = define_class_under<torch::Tensor>(rb_mTorch, "Tensor");
+ add_tensor_functions(rb_cTensor);
+
+ Module rb_mNN = define_module_under(rb_mTorch, "NN");
+ add_nn_functions(rb_mNN);
+
+ rb_mTorch.define_singleton_method(
"grad_enabled?",
*[]() {
return torch::GradMode::is_enabled();
})
.define_singleton_method(
"_set_grad_enabled",
*[](bool enabled) {
torch::GradMode::set_enabled(enabled);
})
.define_singleton_method(
- "floating_point?",
- *[](Tensor& input) {
- return torch::is_floating_point(input);
- })
- .define_singleton_method(
"manual_seed",
*[](uint64_t seed) {
return torch::manual_seed(seed);
})
// begin tensor creation
@@ -343,202 +131,30 @@
"_sum_dim",
*[](Tensor& input, int64_t dim, bool keepdim) {
return torch::sum(input, dim, keepdim);
})
.define_singleton_method(
- "_argmax",
- *[](Tensor& input) {
- return torch::argmax(input);
- })
- .define_singleton_method(
- "_argmax_dim",
- *[](Tensor& input, int64_t dim, bool keepdim) {
- return torch::argmax(input, dim, keepdim);
- })
- .define_singleton_method(
- "_cat",
- *[](TensorList tensors, int64_t dim) {
- return torch::cat(tensors, dim);
- })
- .define_singleton_method(
- "_norm",
- *[](Tensor& input) {
- return torch::norm(input);
- })
- .define_singleton_method(
- "_min",
- *[](Tensor& input) {
- return torch::min(input);
- })
- .define_singleton_method(
- "_max",
- *[](Tensor& input) {
- return torch::max(input);
- })
- .define_singleton_method(
"_max_out",
*[](Tensor &max, Tensor &max_indices, const Tensor &input, int64_t dim, bool keepdim) {
return tensor_array(torch::_max_out(max, max_indices, input, dim, keepdim));
})
.define_singleton_method(
- "_sqrt",
- *[](Tensor& input) {
- return torch::sqrt(input);
- })
- .define_singleton_method(
- "_exp",
- *[](Tensor& input) {
- return torch::exp(input);
- })
- .define_singleton_method(
- "_log",
- *[](Tensor& input) {
- return torch::log(input);
- })
- .define_singleton_method(
- "_sign",
- *[](Tensor& input) {
- return torch::sign(input);
- })
- .define_singleton_method(
- "_unsqueeze",
- *[](Tensor& input, int64_t dim) {
- return torch::unsqueeze(input, dim);
- })
- .define_singleton_method(
- "_dot",
- *[](Tensor& input, Tensor& tensor) {
- return torch::dot(input, tensor);
- })
- .define_singleton_method(
- "_matmul",
- *[](Tensor& input, Tensor& other) {
- return torch::matmul(input, other);
- })
- .define_singleton_method(
- "_eq",
- *[](Tensor& input, Tensor& other) {
- return torch::eq(input, other);
- })
- .define_singleton_method(
- "_gt",
- // TODO support tensors
- *[](Tensor& input, Scalar other) {
- return torch::gt(input, other);
- })
- .define_singleton_method(
- "_lt",
- // TODO support tensors
- *[](Tensor& input, Scalar other) {
- return torch::lt(input, other);
- })
- .define_singleton_method(
- "_add",
- *[](Tensor& input, Tensor& other) {
- return torch::add(input, other);
- })
- .define_singleton_method(
- "_add_scalar",
- *[](Tensor& input, Scalar other) {
- return torch::add(input, other);
- })
- .define_singleton_method(
- "_add_out",
- *[](Tensor& out, Tensor& input, Tensor& other) {
- return torch::add_out(out, input, other);
- })
- .define_singleton_method(
- "_sub",
- *[](Tensor& input, Tensor& other) {
- return torch::sub(input, other);
- })
- .define_singleton_method(
- "_sub_scalar",
- *[](Tensor& input, Scalar other) {
- return torch::sub(input, other);
- })
- .define_singleton_method(
- "_mul",
- *[](Tensor& input, Tensor& other) {
- return torch::mul(input, other);
- })
- .define_singleton_method(
- "_mul_scalar",
- *[](Tensor& input, Scalar other) {
- return torch::mul(input, other);
- })
- .define_singleton_method(
- "_div",
- *[](Tensor& input, Tensor& other) {
- return torch::div(input, other);
- })
- .define_singleton_method(
- "_div_scalar",
- *[](Tensor& input, Scalar other) {
- return torch::div(input, other);
- })
- .define_singleton_method(
- "_remainder",
- *[](Tensor& input, Tensor& other) {
- return torch::remainder(input, other);
- })
- .define_singleton_method(
- "_remainder_scalar",
- *[](Tensor& input, Scalar other) {
- return torch::remainder(input, other);
- })
- .define_singleton_method(
- "_pow",
- *[](Tensor& input, Scalar exponent) {
- return torch::pow(input, exponent);
- })
- .define_singleton_method(
"_topk",
*[](Tensor& input, int64_t k) {
return tensor_array(torch::topk(input, k));
})
.define_singleton_method(
- "_sigmoid",
- *[](Tensor& input) {
- return torch::sigmoid(input);
- })
- .define_singleton_method(
- "_softplus",
- *[](const Tensor &input, Scalar beta, Scalar threshold) {
- return torch::softplus(input, beta, threshold);
- })
- .define_singleton_method(
"_softmax",
*[](const Tensor &input, int64_t dim) {
return torch::softmax(input, dim);
})
.define_singleton_method(
"_log_softmax",
*[](Tensor& input, int64_t dim) {
return torch::log_softmax(input, dim);
})
.define_singleton_method(
- "_abs",
- *[](Tensor& input) {
- return torch::abs(input);
- })
- .define_singleton_method(
- "_neg",
- *[](Tensor& input) {
- return torch::neg(input);
- })
- .define_singleton_method(
- "_reshape",
- *[](Tensor& input, IntArrayRef shape) {
- return torch::reshape(input, shape);
- })
- .define_singleton_method(
- "_flatten",
- *[](Tensor& input, int64_t start_dim, int64_t end_dim) {
- return torch::flatten(input, start_dim, end_dim);
- })
- .define_singleton_method(
"relu",
*[](Tensor& input) {
return torch::relu(input);
})
.define_singleton_method(
@@ -577,109 +193,14 @@
"avg_pool2d",
*[](Tensor& input, IntArrayRef kernel_size) {
return torch::avg_pool2d(input, kernel_size);
})
.define_singleton_method(
- "_dropout",
- *[](Tensor& input, float p, bool train) {
- return torch::dropout(input, p, train);
+ "_binary_cross_entropy_with_logits",
+ *[](const Tensor &input, const Tensor &target, OptionalTensor weight, OptionalTensor pos_weight, MyReduction reduction) {
+ return torch::binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction);
})
- .define_singleton_method(
- "_dropout!",
- *[](Tensor& input, float p, bool train) {
- return torch::dropout_(input, p, train);
- })
- .define_singleton_method(
- "_feature_dropout",
- *[](Tensor& input, float p, bool train) {
- return torch::feature_dropout(input, p, train);
- })
- .define_singleton_method(
- "_feature_dropout!",
- *[](Tensor& input, float p, bool train) {
- return torch::feature_dropout_(input, p, train);
- })
- .define_singleton_method(
- "_alpha_dropout",
- *[](Tensor& input, float p, bool train) {
- return torch::alpha_dropout(input, p, train);
- })
- .define_singleton_method(
- "_alpha_dropout!",
- *[](Tensor& input, float p, bool train) {
- return torch::alpha_dropout_(input, p, train);
- })
- .define_singleton_method(
- "_feature_alpha_dropout",
- *[](Tensor& input, float p, bool train) {
- return torch::feature_alpha_dropout(input, p, train);
- })
- .define_singleton_method(
- "_feature_alpha_dropout!",
- *[](Tensor& input, float p, bool train) {
- return torch::feature_alpha_dropout_(input, p, train);
- })
- // sparse layers
- .define_singleton_method(
- "_embedding",
- // weight and indices are swapped from Python interface
- *[](const Tensor &indices, const Tensor &weight, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) {
- return torch::embedding(weight, indices, padding_idx, scale_grad_by_freq, sparse);
- })
- .define_singleton_method(
- "_embedding_bag",
- // weight and indices are swapped from Python interface
- *[](const Tensor &weight, const Tensor &indices, const Tensor &offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const Tensor &per_sample_weights) {
- return torch::embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights);
- })
- // distance functions
- .define_singleton_method(
- "_cosine_similarity",
- *[](const Tensor &x1, const Tensor &x2, int64_t dim, double eps) {
- return torch::cosine_similarity(x1, x2, dim, eps);
- })
- .define_singleton_method(
- "_pairwise_distance",
- *[](const Tensor &x1, const Tensor &x2, double p, double eps, bool keepdim) {
- return torch::pairwise_distance(x1, x2, p, eps, keepdim);
- })
- // loss functions
- .define_singleton_method(
- "binary_cross_entropy",
- *[](Tensor& input, Tensor& target, MyReduction reduction) {
- return torch::binary_cross_entropy(input, target, {}, reduction);
- })
- .define_singleton_method(
- "ctc_loss",
- *[](const Tensor &log_probs, const Tensor &targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, MyReduction reduction, bool zero_infinity) {
- return torch::ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
- })
- .define_singleton_method(
- "kl_div",
- *[](Tensor& input, Tensor& target, MyReduction reduction) {
- return torch::kl_div(input, target, reduction);
- })
- .define_singleton_method(
- "l1_loss",
- *[](Tensor& input, Tensor& target, MyReduction reduction) {
- return torch::l1_loss(input, target, reduction);
- })
- .define_singleton_method(
- "mse_loss",
- *[](Tensor& input, Tensor& target, MyReduction reduction) {
- return torch::mse_loss(input, target, reduction);
- })
- .define_singleton_method(
- "nll_loss",
- *[](Tensor& input, Tensor& target, MyReduction reduction, int64_t ignore_index) {
- return torch::nll_loss(input, target, {}, reduction, ignore_index);
- })
- .define_singleton_method(
- "poisson_nll_loss",
- *[](const Tensor &input, const Tensor &target, bool log_input, bool full, double eps, MyReduction reduction) {
- return torch::poisson_nll_loss(input, target, log_input, full, eps, reduction);
- })
.define_singleton_method("numel", &torch::numel)
.define_singleton_method(
"_from_blob",
*[](String s, IntArrayRef size, const torch::TensorOptions &options) {
void *data = const_cast<char *>(s.c_str());
@@ -701,11 +222,11 @@
t = torch::tensor(vec, options);
}
return t.reshape(size);
});
- Class rb_cTensor = define_class_under<torch::Tensor>(rb_mTorch, "Tensor")
+ rb_cTensor
.define_method("cuda?", &torch::Tensor::is_cuda)
.define_method("distributed?", &torch::Tensor::is_distributed)
.define_method("complex?", &torch::Tensor::is_complex)
.define_method("floating_point?", &torch::Tensor::is_floating_point)
.define_method("signed?", &torch::Tensor::is_signed)
@@ -739,20 +260,10 @@
"detach!",
*[](Tensor& self) {
return self.detach_();
})
.define_method(
- "_select",
- *[](Tensor& self, int64_t dim, int64_t index) {
- return self.select(dim, index);
- })
- .define_method(
- "_slice",
- *[](Tensor& self, int64_t dim, int64_t start, int64_t end, int64_t step) {
- return self.slice(dim, start, end, step);
- })
- .define_method(
"_requires_grad!",
*[](Tensor& self, bool requires_grad) {
return self.set_requires_grad(requires_grad);
})
.define_method(
@@ -788,15 +299,10 @@
std::stringstream s;
s << self.device();
return s.str();
})
.define_method(
- "_view",
- *[](Tensor& self, IntArrayRef size) {
- return self.view(size);
- })
- .define_method(
"resize_as!",
*[](Tensor& self, Tensor& other) {
return self.resize_as_(other);
})
.define_method(
@@ -808,25 +314,10 @@
"relu!",
*[](Tensor& self) {
return self.relu_();
})
.define_method(
- "_add!",
- *[](Tensor& self, Tensor& other) {
- return self.add_(other);
- })
- .define_method(
- "_add_alpha!",
- *[](Tensor& self, Tensor& other, Scalar alpha) {
- return self.add_(other, alpha);
- })
- .define_method(
- "_add_scalar!",
- *[](Tensor& self, Scalar other) {
- return self.add_(other);
- })
- .define_method(
"normal!",
*[](Tensor& self, double mean, double std) {
return self.normal_(mean, std);
})
.define_method(
@@ -838,20 +329,10 @@
"sub!",
*[](Tensor& self, Tensor& other) {
return self.sub_(other);
})
.define_method(
- "_mul!",
- *[](Tensor& self, Tensor& other) {
- return self.mul_(other);
- })
- .define_method(
- "_mul_scalar!",
- *[](Tensor& self, Scalar other) {
- return self.mul_(other);
- })
- .define_method(
"div!",
*[](Tensor& self, Tensor& other) {
return self.div_(other);
})
.define_method(
@@ -878,11 +359,11 @@
"data",
*[](Tensor& self) {
return self.data();
})
.define_method(
- "_data",
+ "_flat_data",
*[](Tensor& self) {
Array a;
auto dtype = self.dtype();
// TODO DRY if someone knows C++
@@ -930,15 +411,10 @@
throw std::runtime_error("Unsupported type");
}
return a;
})
.define_method(
- "_size",
- *[](Tensor& self, int i) {
- return self.size(i);
- })
- .define_method(
"_to",
*[](Tensor& self, torch::Device device, int dtype, bool non_blocking, bool copy) {
return self.to(device, (torch::ScalarType) dtype, non_blocking, copy);
})
.define_singleton_method(
@@ -987,11 +463,9 @@
.define_method(
"requires_grad",
*[](torch::TensorOptions& self, bool requires_grad) {
return self.requires_grad(requires_grad);
});
-
- Module rb_mNN = define_module_under(rb_mTorch, "NN");
Module rb_mInit = define_module_under(rb_mNN, "Init")
.define_singleton_method(
"_calculate_gain",
*[](NonlinearityType nonlinearity, double param) {