lib/torch/nn/functional.rb in torch-rb-0.1.4 vs lib/torch/nn/functional.rb in torch-rb-0.1.5
- old
+ new
@@ -48,11 +48,12 @@
def embedding(input, weight, padding_idx: nil, max_norm: nil, norm_type: 2.0, scale_grad_by_freq: false, sparse: false)
# TODO handle max_norm and norm_type
raise NotImplementedYet unless max_norm.nil? && norm_type == 2.0
padding_idx ||= -1
- Torch._embedding(input, weight, padding_idx, scale_grad_by_freq, sparse)
+ # weight and indices are swapped from Python interface
+ Torch._embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
end
def embedding_bag(input, weight, offsets: nil, max_norm: nil, norm_type: 2, scale_grad_by_freq: false, mode: "mean", sparse: false, per_sample_weights: nil)
# need to handle nils
raise NotImplementedYet
@@ -74,44 +75,82 @@
end
# loss functions
def binary_cross_entropy(input, target, weight: nil, reduction: "mean")
- raise NotImplementedYet if weight
- Torch.binary_cross_entropy(input, target, reduction)
+ NN._binary_cross_entropy(input, target, weight, reduction)
end
+ def binary_cross_entropy_with_logits(input, target, weight: nil, reduction: "mean", pos_weight: nil)
+ Torch._binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction)
+ end
+
+ def cosine_embedding_loss(input1, input2, target, margin: 0, reduction: "mean")
+ raise NotImplementedYet
+ end
+
def cross_entropy(input, target, weight: nil, ignore_index: -100, reduction: "mean")
nll_loss(log_softmax(input, 1), target, weight: weight, ignore_index: ignore_index, reduction: reduction)
end
def ctc_loss(log_probs, targets, input_lengths, target_lengths, blank: 0, reduction: "mean", zero_infinity: false)
# call to_a on input_lengths and target_lengths for C++
- Torch.ctc_loss(log_probs, targets, input_lengths.to_a, target_lengths.to_a, blank, reduction, zero_infinity)
+ Torch._ctc_loss_intlist(log_probs, targets, input_lengths.to_a, target_lengths.to_a, blank, reduction, zero_infinity)
end
+ def hinge_embedding_loss(input, target, margin: 1.0, reduction: "mean")
+ Torch._hinge_embedding_loss(input, target, margin, reduction)
+ end
+
def kl_div(input, target, reduction: "mean")
- Torch.kl_div(input, target, reduction)
+ Torch._kl_div(input, target, reduction)
end
def l1_loss(input, target, reduction: "mean")
- Torch.l1_loss(input, target, reduction)
+ NN._l1_loss(input, target, reduction)
end
+ def margin_ranking_loss(input1, input2, target, margin: 0, reduction: "mean")
+ raise NotImplementedYet
+ end
+
def mse_loss(input, target, reduction: "mean")
- Torch.mse_loss(input, target, reduction)
+ NN._mse_loss(input, target, reduction)
end
+ def multilabel_margin_loss(input, target, reduction: "mean")
+ NN._multilabel_margin_loss(input, target, reduction)
+ end
+
+ def multilabel_soft_margin_loss(input, target, weight: nil)
+ raise NotImplementedYet
+ end
+
+ def multi_margin_loss(input, target, p: 1, margin: 1.0, weight: nil, reduction: "mean")
+ NN._multi_margin_loss(input, target, p, margin, weight, reduction)
+ end
+
def nll_loss(input, target, weight: nil, ignore_index: -100, reduction: "mean")
- raise NotImplementedYet if weight
- Torch.nll_loss(input, target, reduction, ignore_index)
+ NN._nll_loss(input, target, weight, reduction, ignore_index)
end
def poisson_nll_loss(input, target, log_input: true, full: false, eps: 1e-8, reduction: "mean")
- Torch.poisson_nll_loss(input, target, log_input, full, eps, reduction)
+ Torch._poisson_nll_loss(input, target, log_input, full, eps, reduction)
end
+ def soft_margin_loss(input, target, reduction: "mean")
+ NN._soft_margin_loss(input, target, reduction)
+ end
+
+ def smooth_l1_loss(input, target, reduction: "mean")
+ NN._smooth_l1_loss(input, target, reduction)
+ end
+
+ def triplet_margin_loss(anchor, positive, negative, margin: 1.0, p: 2, eps: 1e-06, swap: false, reduction: "mean")
+ Torch._triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction)
+ end
+
# end loss
def softmax(input, dim: nil)
dim ||= softmax_dim(input.dim)
input.softmax(dim: dim)
@@ -121,55 +160,55 @@
dim ||= softmax_dim(input.dim)
(-input).softmax(dim: dim)
end
def softplus(input, beta: 1, threshold: 20)
- Torch._softplus(input, beta, threshold)
+ NN._softplus(input, beta, threshold)
end
# TODO make dim keyword argument and update examples
def log_softmax(input, dim = nil)
dim ||= softmax_dim(input.dim)
input.log_softmax(dim)
end
def dropout(input, p: 0.5, training: true, inplace: false)
if inplace
- Torch._dropout!(input, p, training)
+ Torch._dropout_(input, p, training)
else
Torch._dropout(input, p, training)
end
end
def dropout2d(input, p: 0.5, training: true, inplace: false)
raise ArgumentError, "dropout probability has to be between 0 and 1, but got #{p}" if p < 0 || p > 1
if inplace
- Torch._feature_dropout!(input, p, training)
+ Torch._feature_dropout_(input, p, training)
else
Torch._feature_dropout(input, p, training)
end
end
def dropout3d(input, p: 0.5, training: true, inplace: false)
if inplace
- Torch._feature_dropout!(input, p, training)
+ Torch._feature_dropout_(input, p, training)
else
Torch._feature_dropout(input, p, training)
end
end
def alpha_dropout(input, p: 0.5, training: true, inplace: false)
if inplace
- Torch._alpha_dropout!(input, p, training)
+ Torch._alpha_dropout_(input, p, training)
else
Torch._alpha_dropout(input, p, training)
end
end
def feature_alpha_dropout(input, p: 0.5, training: true, inplace: false)
if inplace
- Torch._feature_alpha_dropout!(input, p, training)
+ Torch._feature_alpha_dropout_(input, p, training)
else
Torch._feature_alpha_dropout(input, p, training)
end
end