Sha256: 8bae70aae4ac1084c726914a7e6183cf3cab6bd9b6e23d519189b32fed9e5923

Contents?: true

Size: 1.83 KB

Versions: 22

Compression:

Stored size: 1.83 KB

Contents

# ported from https://github.com/pytorch/pytorch/blob/master/torch/optim/adadelta.py
module Torch
  module Optim
    class Adadelta < Optimizer
      def initialize(params, lr: 1.0, rho: 0.9, eps: 1e-6, weight_decay: 0)
        raise ArgumentError, "Invalid learning rate: #{lr}" if lr < 0
        raise ArgumentError, "Invalid rho value: #{rho}" if rho < 0 || rho > 1
        raise ArgumentError, "Invalid epsilon value: #{eps}" if eps < 0
        raise ArgumentError, "Invalid weight_decay value: #{weight_decay}" if weight_decay < 0

        defaults = {lr: lr, rho: rho, eps: eps, weight_decay: weight_decay}
        super(params, defaults)
      end

      def step(closure = nil)
        loss = nil
        if closure
          loss = closure.call
        end

        @param_groups.each do |group|
          group[:params].each do |p|
            next unless p.grad
            grad = p.grad.data
            if grad.sparse?
              raise Error, "Adadelta does not support sparse gradients"
            end
            state = @state[p]

            if state.size == 0
              state[:step] = 0
              state[:square_avg] = Torch.zeros_like(p.data)
              state[:acc_delta] = Torch.zeros_like(p.data)
            end

            square_avg, acc_delta = state[:square_avg], state[:acc_delta]
            rho, eps = group[:rho], group[:eps]

            state[:step] += 1

            if group[:weight_decay] != 0
              grad = grad.add(group[:weight_decay], p.data)
            end

            square_avg.mul!(rho).addcmul!(1 - rho, grad, grad)
            std = square_avg.add(eps).sqrt!
            delta = acc_delta.add(eps).sqrt!.div!(std).mul!(grad)
            p.data.add!(-group[:lr], delta)
            acc_delta.mul!(rho).addcmul!(1 - rho, delta, delta)
          end
        end

        loss
      end
    end
  end
end

Version data entries

22 entries across 22 versions & 1 rubygems

Version Path
torch-rb-0.3.7 lib/torch/optim/adadelta.rb
torch-rb-0.3.6 lib/torch/optim/adadelta.rb
torch-rb-0.3.5 lib/torch/optim/adadelta.rb
torch-rb-0.3.4 lib/torch/optim/adadelta.rb
torch-rb-0.3.3 lib/torch/optim/adadelta.rb
torch-rb-0.3.2 lib/torch/optim/adadelta.rb
torch-rb-0.3.1 lib/torch/optim/adadelta.rb
torch-rb-0.3.0 lib/torch/optim/adadelta.rb
torch-rb-0.2.7 lib/torch/optim/adadelta.rb
torch-rb-0.2.6 lib/torch/optim/adadelta.rb
torch-rb-0.2.5 lib/torch/optim/adadelta.rb
torch-rb-0.2.4 lib/torch/optim/adadelta.rb
torch-rb-0.2.3 lib/torch/optim/adadelta.rb
torch-rb-0.2.2 lib/torch/optim/adadelta.rb
torch-rb-0.2.1 lib/torch/optim/adadelta.rb
torch-rb-0.2.0 lib/torch/optim/adadelta.rb
torch-rb-0.1.8 lib/torch/optim/adadelta.rb
torch-rb-0.1.7 lib/torch/optim/adadelta.rb
torch-rb-0.1.6 lib/torch/optim/adadelta.rb
torch-rb-0.1.5 lib/torch/optim/adadelta.rb