lib/torch.rb in torch-rb-0.3.6 vs lib/torch.rb in torch-rb-0.3.7
- old
+ new
@@ -172,10 +172,13 @@
require "torch/nn/poisson_nll_loss"
require "torch/nn/smooth_l1_loss"
require "torch/nn/soft_margin_loss"
require "torch/nn/triplet_margin_loss"
+# nn vision
+require "torch/nn/upsample"
+
# nn other
require "torch/nn/functional"
require "torch/nn/init"
# utils
@@ -194,10 +197,36 @@
def message
"This feature has not been implemented yet. Consider submitting a PR."
end
end
+ # legacy
+ # but may make it easier to port tutorials
+ module Autograd
+ class Variable
+ def self.new(x)
+ raise ArgumentError, "Variable data has to be a tensor, but got #{x.class.name}" unless x.is_a?(Tensor)
+ warn "[torch] The Variable API is deprecated. Use tensors with requires_grad: true instead."
+ x
+ end
+ end
+ end
+
+ # TODO move to C++
+ class ByteStorage
+ # private
+ attr_reader :bytes
+
+ def initialize(bytes)
+ @bytes = bytes
+ end
+
+ def self.from_buffer(bytes)
+ new(bytes)
+ end
+ end
+
# keys: https://pytorch.org/docs/stable/tensor_attributes.html#torch.torch.dtype
# values: https://github.com/pytorch/pytorch/blob/master/c10/core/ScalarType.h
DTYPE_TO_ENUM = {
uint8: 0,
int8: 1,
@@ -222,21 +251,27 @@
qint32: 14,
bfloat16: 15
}
ENUM_TO_DTYPE = DTYPE_TO_ENUM.map(&:reverse).to_h
+ TENSOR_TYPE_CLASSES = []
+
def self._make_tensor_class(dtype, cuda = false)
cls = Class.new
device = cuda ? "cuda" : "cpu"
cls.define_singleton_method("new") do |*args|
if args.size == 1 && args.first.is_a?(Tensor)
args.first.send(dtype).to(device)
+ elsif args.size == 1 && args.first.is_a?(ByteStorage) && dtype == :uint8
+ bytes = args.first.bytes
+ Torch._from_blob(bytes, [bytes.bytesize], TensorOptions.new.dtype(DTYPE_TO_ENUM[dtype]))
elsif args.size == 1 && args.first.is_a?(Array)
Torch.tensor(args.first, dtype: dtype, device: device)
else
Torch.empty(*args, dtype: dtype, device: device)
end
end
+ TENSOR_TYPE_CLASSES << cls
cls
end
DTYPE_TO_CLASS = {
float32: "FloatTensor",