lib/torch.rb in torch-rb-0.1.4 vs lib/torch.rb in torch-rb-0.1.5
- old
+ new
@@ -1,8 +1,13 @@
# ext
require "torch/ext"
+# native functions
+require "torch/native/generator"
+require "torch/native/parser"
+require "torch/native/dispatcher"
+
# modules
require "torch/inspector"
require "torch/tensor"
require "torch/version"
@@ -37,10 +42,14 @@
require "torch/nn/max_poolnd"
require "torch/nn/max_pool2d"
require "torch/nn/avg_poolnd"
require "torch/nn/avg_pool2d"
+# nn recurrent layers
+require "torch/nn/rnn_base"
+require "torch/nn/rnn"
+
# nn linear layers
require "torch/nn/bilinear"
require "torch/nn/identity"
require "torch/nn/linear"
@@ -75,27 +84,27 @@
# nn loss functions
require "torch/nn/loss"
require "torch/nn/weighted_loss"
require "torch/nn/bce_loss"
-# require "torch/nn/bce_with_logits_loss"
-# require "torch/nn/cosine_embedding_loss"
+require "torch/nn/bce_with_logits_loss"
+require "torch/nn/cosine_embedding_loss"
require "torch/nn/cross_entropy_loss"
require "torch/nn/ctc_loss"
-# require "torch/nn/hinge_embedding_loss"
+require "torch/nn/hinge_embedding_loss"
require "torch/nn/kl_div_loss"
require "torch/nn/l1_loss"
-# require "torch/nn/margin_ranking_loss"
+require "torch/nn/margin_ranking_loss"
require "torch/nn/mse_loss"
-# require "torch/nn/multi_label_margin_loss"
-# require "torch/nn/multi_label_soft_margin_loss"
-# require "torch/nn/multi_margin_loss"
+require "torch/nn/multi_label_margin_loss"
+require "torch/nn/multi_label_soft_margin_loss"
+require "torch/nn/multi_margin_loss"
require "torch/nn/nll_loss"
require "torch/nn/poisson_nll_loss"
-# require "torch/nn/smooth_l1_loss"
-# require "torch/nn/soft_margin_loss"
-# require "torch/nn/triplet_margin_loss"
+require "torch/nn/smooth_l1_loss"
+require "torch/nn/soft_margin_loss"
+require "torch/nn/triplet_margin_loss"
# nn other
require "torch/nn/functional"
require "torch/nn/init"
@@ -140,10 +149,45 @@
qint32: 14,
bfloat16: 15
}
ENUM_TO_DTYPE = DTYPE_TO_ENUM.map(&:reverse).to_h
+ def self._make_tensor_class(dtype, cuda = false)
+ cls = Class.new
+ device = cuda ? "cuda" : "cpu"
+ cls.define_singleton_method("new") do |*args|
+ if args.size == 1 && args.first.is_a?(Tensor)
+ args.first.send(dtype).to(device)
+ elsif args.size == 1 && args.first.is_a?(Array)
+ Torch.tensor(args.first, dtype: dtype, device: device)
+ else
+ Torch.empty(*args, dtype: dtype, device: device)
+ end
+ end
+ cls
+ end
+
+ FloatTensor = _make_tensor_class(:float32)
+ DoubleTensor = _make_tensor_class(:float64)
+ HalfTensor = _make_tensor_class(:float16)
+ ByteTensor = _make_tensor_class(:uint8)
+ CharTensor = _make_tensor_class(:int8)
+ ShortTensor = _make_tensor_class(:int16)
+ IntTensor = _make_tensor_class(:int32)
+ LongTensor = _make_tensor_class(:int64)
+ BoolTensor = _make_tensor_class(:bool)
+
+ CUDA::FloatTensor = _make_tensor_class(:float32, true)
+ CUDA::DoubleTensor = _make_tensor_class(:float64, true)
+ CUDA::HalfTensor = _make_tensor_class(:float16, true)
+ CUDA::ByteTensor = _make_tensor_class(:uint8, true)
+ CUDA::CharTensor = _make_tensor_class(:int8, true)
+ CUDA::ShortTensor = _make_tensor_class(:int16, true)
+ CUDA::IntTensor = _make_tensor_class(:int32, true)
+ CUDA::LongTensor = _make_tensor_class(:int64, true)
+ CUDA::BoolTensor = _make_tensor_class(:bool, true)
+
class << self
# Torch.float, Torch.long, etc
DTYPE_TO_ENUM.each_key do |dtype|
define_method(dtype) do
dtype
@@ -189,10 +233,24 @@
float32: Numo::SFloat,
float64: Numo::DFloat
}
end
+ def no_grad
+ previous_value = grad_enabled?
+ begin
+ _set_grad_enabled(false)
+ yield
+ ensure
+ _set_grad_enabled(previous_value)
+ end
+ end
+
+ def device(str)
+ Device.new(str)
+ end
+
# --- begin tensor creation: https://pytorch.org/cppdocs/notes/tensor_creation.html ---
def arange(start, finish = nil, step = 1, **options)
# ruby doesn't support start = 0, finish, step = 1, ...
if finish.nil?
@@ -306,30 +364,10 @@
zeros(input.size, like_options(input, options))
end
# --- begin operations ---
- %w(add sub mul div remainder).each do |op|
- define_method(op) do |input, other, **options|
- execute_op(op, input, other, **options)
- end
- end
-
- def neg(input)
- _neg(input)
- end
-
- def no_grad
- previous_value = grad_enabled?
- begin
- _set_grad_enabled(false)
- yield
- ensure
- _set_grad_enabled(previous_value)
- end
- end
-
# TODO support out
def mean(input, dim = nil, keepdim: false)
if dim
_mean_dim(input, dim, keepdim)
else
@@ -344,131 +382,32 @@
else
_sum(input)
end
end
- def argmax(input, dim = nil, keepdim: false)
- if dim
- _argmax_dim(input, dim, keepdim)
- else
- _argmax(input)
- end
- end
-
- def eq(input, other)
- _eq(input, other)
- end
-
- def norm(input)
- _norm(input)
- end
-
- def pow(input, exponent)
- _pow(input, exponent)
- end
-
def topk(input, k)
_topk(input, k)
end
- def min(input)
- _min(input)
- end
-
def max(input, dim = nil, keepdim: false, out: nil)
if dim
raise NotImplementedYet unless out
_max_out(out[0], out[1], input, dim, keepdim)
else
_max(input)
end
end
- def exp(input)
- _exp(input)
- end
-
- def log(input)
- _log(input)
- end
-
- def sign(input)
- _sign(input)
- end
-
- def sigmoid(input)
- _sigmoid(input)
- end
-
- def gt(input, other)
- _gt(input, other)
- end
-
- def lt(input, other)
- _lt(input, other)
- end
-
- def unsqueeze(input, dim)
- _unsqueeze(input, dim)
- end
-
- def dot(input, tensor)
- _dot(input, tensor)
- end
-
- def cat(tensors, dim = 0)
- _cat(tensors, dim)
- end
-
- def matmul(input, other)
- _matmul(input, other)
- end
-
- def reshape(input, shape)
- _reshape(input, shape)
- end
-
- def flatten(input, start_dim: 0, end_dim: -1)
- _flatten(input, start_dim, end_dim)
- end
-
- def sqrt(input)
- _sqrt(input)
- end
-
# TODO make dim keyword argument
def log_softmax(input, dim)
_log_softmax(input, dim)
end
def softmax(input, dim: nil)
_softmax(input, dim)
end
- def abs(input)
- _abs(input)
- end
-
- def device(str)
- Device.new(str)
- end
-
private
-
- def execute_op(op, input, other, out: nil)
- scalar = other.is_a?(Numeric)
- if out
- # TODO make work with scalars
- raise Error, "out not supported with scalar yet" if scalar
- send("_#{op}_out", out, input, other)
- else
- if scalar
- send("_#{op}_scalar", input, other)
- else
- send("_#{op}", input, other)
- end
- end
- end
def tensor_size(size)
size.flatten
end