ext/torch/tensor.cpp in torch-rb-0.8.3 vs ext/torch/tensor.cpp in torch-rb-0.9.0

- old
+ new

@@ -105,11 +105,11 @@ "_backward(Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False)" }); ParsedArgs<4> parsed_args; auto _r = parser.parse(self_, argc, argv, parsed_args); // _backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> () - auto dispatch__backward = [](const Tensor & self, TensorList inputs, const OptionalTensor & gradient, c10::optional<bool> retain_graph, bool create_graph) -> void { + auto dispatch__backward = [](const Tensor & self, TensorList inputs, const c10::optional<at::Tensor> & gradient, c10::optional<bool> retain_graph, bool create_graph) -> void { // in future, release GVL self._backward(inputs, gradient, retain_graph, create_graph); }; dispatch__backward(self, {}, _r.optionalTensor(0), _r.toBoolOptional(1), _r.toBool(2)); RETURN_NIL @@ -123,16 +123,16 @@ THPVariableClass = rb_cTensor.value(); rb_define_method(rb_cTensor, "backward", (VALUE (*)(...)) tensor__backward, -1); rb_cTensor - .define_method("cuda?", &torch::Tensor::is_cuda) - .define_method("sparse?", &torch::Tensor::is_sparse) - .define_method("quantized?", &torch::Tensor::is_quantized) - .define_method("dim", &torch::Tensor::dim) - .define_method("numel", &torch::Tensor::numel) - .define_method("element_size", &torch::Tensor::element_size) - .define_method("requires_grad", &torch::Tensor::requires_grad) + .define_method("cuda?", [](Tensor& self) { return self.is_cuda(); }) + .define_method("sparse?", [](Tensor& self) { return self.is_sparse(); }) + .define_method("quantized?", [](Tensor& self) { return self.is_quantized(); }) + .define_method("dim", [](Tensor& self) { return self.dim(); }) + .define_method("numel", [](Tensor& self) { return self.numel(); }) + .define_method("element_size", [](Tensor& self) { return self.element_size(); }) + .define_method("requires_grad", [](Tensor& self) { return self.requires_grad(); }) .define_method( "_size", [](Tensor& self, int64_t dim) { return self.size(dim); })