lib/torch/nn/functional.rb in torch-rb-0.1.2 vs lib/torch/nn/functional.rb in torch-rb-0.1.3
- old
+ new
@@ -4,23 +4,15 @@
class << self
def relu(input)
Torch.relu(input)
end
- def conv2d(input, weight, bias, stride: 1, padding: 0)
+ def conv2d(input, weight, bias, stride: 1, padding: 0, dilation: 1, groups: 1)
# TODO pair stride and padding when needed
- Torch.conv2d(input, weight, bias, stride, padding)
+ Torch.conv2d(input, weight, bias, stride, padding, dilation, groups)
end
- def prelu(input, weight)
- Torch.prelu(input, weight)
- end
-
- def leaky_relu(input, negative_slope = 0.01)
- Torch.leaky_relu(input, negative_slope)
- end
-
def max_pool2d(input, kernel_size)
kernel_size = [kernel_size, kernel_size] if kernel_size.is_a?(Integer)
Torch.max_pool2d(input, kernel_size)
end
@@ -39,16 +31,66 @@
def cross_entropy(input, target)
nll_loss(log_softmax(input, 1), target)
end
- def nll_loss(input, target)
+ def nll_loss(input, target, reduction: "mean")
# TODO fix for non-1d
- Torch.nll_loss(input, target)
+ Torch.nll_loss(input, target, reduction)
end
def log_softmax(input, dim)
input.log_softmax(dim)
+ end
+
+ def dropout(input, p: 0.5, training: true, inplace: false)
+ if inplace
+ Torch._dropout!(input, p, training)
+ else
+ Torch._dropout(input, p, training)
+ end
+ end
+
+ def dropout2d(input, p: 0.5, training: true, inplace: false)
+ raise ArgumentError, "dropout probability has to be between 0 and 1, but got #{p}" if p < 0 || p > 1
+
+ if inplace
+ Torch._feature_dropout!(input, p, training)
+ else
+ Torch._feature_dropout(input, p, training)
+ end
+ end
+
+ def dropout3d(input, p: 0.5, training: true, inplace: false)
+ if inplace
+ Torch._feature_dropout!(input, p, training)
+ else
+ Torch._feature_dropout(input, p, training)
+ end
+ end
+
+ def alpha_dropout(input, p: 0.5, training: true, inplace: false)
+ if inplace
+ Torch._alpha_dropout!(input, p, training)
+ else
+ Torch._alpha_dropout(input, p, training)
+ end
+ end
+
+ def feature_alpha_dropout(input, p: 0.5, training: true, inplace: false)
+ if inplace
+ Torch._feature_alpha_dropout!(input, p, training)
+ else
+ Torch._feature_alpha_dropout(input, p, training)
+ end
+ end
+
+ def embedding(input, weight, padding_idx: nil, max_norm: nil, norm_type: 2.0, scale_grad_by_freq: false, sparse: false)
+ # TODO handle max_norm and norm_type
+ raise NotImplementedYet unless max_norm.nil? && norm_type == 2.0
+
+ padding_idx ||= -1
+ Torch._embedding(input, weight, padding_idx, scale_grad_by_freq, sparse)
end
end
end
# shortcut