lib/torch.rb in torch-rb-0.1.8 vs lib/torch.rb in torch-rb-0.2.0
- old
+ new
@@ -23,11 +23,16 @@
require "torch/optim/rprop"
require "torch/optim/sgd"
# optim lr_scheduler
require "torch/optim/lr_scheduler/lr_scheduler"
+require "torch/optim/lr_scheduler/lambda_lr"
+require "torch/optim/lr_scheduler/multiplicative_lr"
require "torch/optim/lr_scheduler/step_lr"
+require "torch/optim/lr_scheduler/multi_step_lr"
+require "torch/optim/lr_scheduler/exponential_lr"
+require "torch/optim/lr_scheduler/cosine_annealing_lr"
# nn parameters
require "torch/nn/parameter"
require "torch/nn/utils"
@@ -57,10 +62,18 @@
require "torch/nn/avg_pool2d"
require "torch/nn/avg_pool3d"
require "torch/nn/lp_poolnd"
require "torch/nn/lp_pool1d"
require "torch/nn/lp_pool2d"
+require "torch/nn/adaptive_max_poolnd"
+require "torch/nn/adaptive_max_pool1d"
+require "torch/nn/adaptive_max_pool2d"
+require "torch/nn/adaptive_max_pool3d"
+require "torch/nn/adaptive_avg_poolnd"
+require "torch/nn/adaptive_avg_pool1d"
+require "torch/nn/adaptive_avg_pool2d"
+require "torch/nn/adaptive_avg_pool3d"
# nn padding layers
require "torch/nn/reflection_padnd"
require "torch/nn/reflection_pad1d"
require "torch/nn/reflection_pad2d"
@@ -164,10 +177,13 @@
require "torch/utils/data/tensor_dataset"
# random
require "torch/random"
+# hub
+require "torch/hub"
+
module Torch
class Error < StandardError; end
class NotImplementedYet < StandardError
def message
"This feature has not been implemented yet. Consider submitting a PR."
@@ -363,9 +379,14 @@
def zeros(*size, **options)
_zeros(tensor_size(size), tensor_options(**options))
end
def tensor(data, **options)
+ if options[:dtype].nil? && defined?(Numo::NArray) && data.is_a?(Numo::NArray)
+ numo_to_dtype = _dtype_to_numo.map(&:reverse).to_h
+ options[:dtype] = numo_to_dtype[data.class]
+ end
+
size = []
if data.respond_to?(:to_a)
data = data.to_a
d = data
while d.is_a?(Array)