lib/dnn/core/optimizers.rb in ruby-dnn-0.4.2 vs lib/dnn/core/optimizers.rb in ruby-dnn-0.4.3
- old
+ new
@@ -48,18 +48,18 @@
super({momentum: @momentum})
end
end
- class AdaGrad
+ class AdaGrad < Optimizer
def initialize(learning_rate = 0.01)
super(learning_rate)
@g = {}
end
def self.load_hash(hash)
- @learning_rate = hash[:learning_rate]
+ self.new(hash[:learning_rate])
end
def update(layer)
@g[layer] ||= {}
layer.params.each_key do |key|
@@ -71,18 +71,18 @@
end
class RMSProp < Optimizer
attr_accessor :muse
+
+ def self.load_hash(hash)
+ self.new(hash[:learning_rate], hash[:muse])
+ end
def initialize(learning_rate = 0.001, muse = 0.9)
super(learning_rate)
@muse = muse
@g = {}
- end
-
- def self.load_hash(hash)
- self.new(hash[:learning_rate], hash[:muse])
end
def update(layer)
@g[layer] ||= {}
layer.params.each_key do |key|