lib/torch.rb in torch-rb-0.1.5 vs lib/torch.rb in torch-rb-0.1.6
- old
+ new
@@ -27,28 +27,73 @@
require "torch/optim/lr_scheduler/lr_scheduler"
require "torch/optim/lr_scheduler/step_lr"
# nn parameters
require "torch/nn/parameter"
+require "torch/nn/utils"
# nn containers
require "torch/nn/module"
require "torch/nn/sequential"
# nn convolution layers
require "torch/nn/convnd"
+require "torch/nn/conv1d"
require "torch/nn/conv2d"
+require "torch/nn/conv3d"
+require "torch/nn/unfold"
+require "torch/nn/fold"
# nn pooling layers
require "torch/nn/max_poolnd"
+require "torch/nn/max_pool1d"
require "torch/nn/max_pool2d"
+require "torch/nn/max_pool3d"
+require "torch/nn/max_unpoolnd"
+require "torch/nn/max_unpool1d"
+require "torch/nn/max_unpool2d"
+require "torch/nn/max_unpool3d"
require "torch/nn/avg_poolnd"
+require "torch/nn/avg_pool1d"
require "torch/nn/avg_pool2d"
+require "torch/nn/avg_pool3d"
+require "torch/nn/lp_poolnd"
+require "torch/nn/lp_pool1d"
+require "torch/nn/lp_pool2d"
+# nn padding layers
+require "torch/nn/reflection_padnd"
+require "torch/nn/reflection_pad1d"
+require "torch/nn/reflection_pad2d"
+require "torch/nn/replication_padnd"
+require "torch/nn/replication_pad1d"
+require "torch/nn/replication_pad2d"
+require "torch/nn/replication_pad3d"
+require "torch/nn/constant_padnd"
+require "torch/nn/constant_pad1d"
+require "torch/nn/constant_pad2d"
+require "torch/nn/constant_pad3d"
+require "torch/nn/zero_pad2d"
+
+# nn normalization layers
+require "torch/nn/batch_norm"
+require "torch/nn/batch_norm1d"
+require "torch/nn/batch_norm2d"
+require "torch/nn/batch_norm3d"
+require "torch/nn/group_norm"
+require "torch/nn/instance_norm"
+require "torch/nn/instance_norm1d"
+require "torch/nn/instance_norm2d"
+require "torch/nn/instance_norm3d"
+require "torch/nn/layer_norm"
+require "torch/nn/local_response_norm"
+
# nn recurrent layers
require "torch/nn/rnn_base"
require "torch/nn/rnn"
+require "torch/nn/lstm"
+require "torch/nn/gru"
# nn linear layers
require "torch/nn/bilinear"
require "torch/nn/identity"
require "torch/nn/linear"
@@ -60,15 +105,21 @@
require "torch/nn/dropout2d"
require "torch/nn/dropout3d"
require "torch/nn/feature_alpha_dropout"
# nn activations
+require "torch/nn/hardshrink"
require "torch/nn/leaky_relu"
+require "torch/nn/log_sigmoid"
require "torch/nn/prelu"
require "torch/nn/relu"
require "torch/nn/sigmoid"
require "torch/nn/softplus"
+require "torch/nn/softshrink"
+require "torch/nn/softsign"
+require "torch/nn/tanh"
+require "torch/nn/tanhshrink"
# nn activations other
require "torch/nn/log_softmax"
require "torch/nn/softmax"
require "torch/nn/softmax2d"
@@ -360,51 +411,9 @@
randn(input.size, like_options(input, options))
end
def zeros_like(input, **options)
zeros(input.size, like_options(input, options))
- end
-
- # --- begin operations ---
-
- # TODO support out
- def mean(input, dim = nil, keepdim: false)
- if dim
- _mean_dim(input, dim, keepdim)
- else
- _mean(input)
- end
- end
-
- # TODO support dtype
- def sum(input, dim = nil, keepdim: false)
- if dim
- _sum_dim(input, dim, keepdim)
- else
- _sum(input)
- end
- end
-
- def topk(input, k)
- _topk(input, k)
- end
-
- def max(input, dim = nil, keepdim: false, out: nil)
- if dim
- raise NotImplementedYet unless out
- _max_out(out[0], out[1], input, dim, keepdim)
- else
- _max(input)
- end
- end
-
- # TODO make dim keyword argument
- def log_softmax(input, dim)
- _log_softmax(input, dim)
- end
-
- def softmax(input, dim: nil)
- _softmax(input, dim)
end
private
def tensor_size(size)