lib/torch/optim/adagrad.rb in torch-rb-0.4.1 vs lib/torch/optim/adagrad.rb in torch-rb-0.4.2

- old
+ new

@@ -47,10 +47,10 @@ if group[:weight_decay] != 0 if p.grad.data.sparse? raise Error, "weight_decay option is not compatible with sparse gradients" end - grad = grad.add(group[:weight_decay], p.data) + grad = grad.add(p.data, alpha: group[:weight_decay]) end clr = group[:lr] / (1 + (state[:step] - 1) * group[:lr_decay]) if grad.sparse?