lib/dnn/core/layers.rb in ruby-dnn-0.4.1 vs lib/dnn/core/layers.rb in ruby-dnn-0.4.2
- old
+ new
@@ -1,53 +1,54 @@
module DNN
module Layers
- #Super class of all optimizer classes.
+ # Super class of all optimizer classes.
class Layer
include Xumo
def initialize
@built = false
end
- #Build the layer.
+ # Build the layer.
def build(model)
@built = true
@model = model
end
- #Does the layer have already been built?
+ # Does the layer have already been built?
def built?
@built
end
- #Forward propagation.
+ # Forward propagation.
def forward() end
- #Backward propagation.
+ # Backward propagation.
def backward() end
- #Get the shape of the layer.
+ # Get the shape of the layer.
def shape
prev_layer.shape
end
- #Layer to a hash.
- def to_hash
- {name: self.class.name}
+ # Layer to a hash.
+ def to_hash(hash)
+ {name: self.class.name}.merge(hash)
end
- #Get the previous layer.
+ # Get the previous layer.
def prev_layer
@model.layers[@model.layers.index(self) - 1]
end
end
+ # This class is a superclass of all classes with learning parameters.
class HasParamLayer < Layer
- attr_reader :params #The parameters of the layer.
- attr_reader :grads #Differential value of parameter of layer.
+ attr_reader :params # The parameters of the layer.
+ attr_reader :grads # Differential value of parameter of layer.
def initialize
super
@params = {}
@grads = {}
@@ -56,18 +57,18 @@
def build(model)
super
init_params
end
- #Update the parameters.
+ # Update the parameters.
def update
@model.optimizer.update(self)
end
private
- #Initialize of the parameters.
+ # Initialize of the parameters.
def init_params() end
end
class InputLayer < Layer
@@ -89,11 +90,11 @@
def backward(dout)
dout
end
def to_hash
- {name: self.class.name, shape: @shape}
+ super({shape: @shape})
end
end
class Dense < HasParamLayer
@@ -138,17 +139,14 @@
def shape
[@num_nodes]
end
def to_hash
- {
- name: self.class.name,
- num_nodes: @num_nodes,
- weight_initializer: @weight_initializer.to_hash,
- bias_initializer: @bias_initializer.to_hash,
- weight_decay: @weight_decay,
- }
+ super({num_nodes: @num_nodes,
+ weight_initializer: @weight_initializer.to_hash,
+ bias_initializer: @bias_initializer.to_hash,
+ weight_decay: @weight_decay})
end
private
def init_params
@@ -290,20 +288,17 @@
def shape
[*@out_size, @num_filters]
end
def to_hash
- {
- name: self.class.name,
- num_filters: @num_filters,
- filter_size: @filter_size,
- weight_initializer: @weight_initializer.to_hash,
- bias_initializer: @bias_initializer.to_hash,
- strides: @strides,
- padding: @padding,
- weight_decay: @weight_decay,
- }
+ super({num_filters: @num_filters,
+ filter_size: @filter_size,
+ weight_initializer: @weight_initializer.to_hash,
+ bias_initializer: @bias_initializer.to_hash,
+ strides: @strides,
+ padding: @padding,
+ weight_decay: @weight_decay})
end
private
def init_params
@@ -369,17 +364,14 @@
def shape
[*@out_size, @num_channel]
end
def to_hash
- {
- name: self.class.name,
- pool_width: @pool_width,
- pool_height: @pool_height,
- strides: @strides,
- padding: @padding,
- }
+ super({pool_width: @pool_width,
+ pool_height: @pool_height,
+ strides: @strides,
+ padding: @padding})
end
end
class UnPool2D < Layer
@@ -421,14 +413,11 @@
def shape
[@out_width, @out_height, @num_channel]
end
def to_hash
- {
- name: self.class.name,
- unpool_size: @unpool_size,
- }
+ super({unpool_size: @unpool_size})
end
end
class Flatten < Layer
@@ -468,11 +457,11 @@
def backward(dout)
dout.reshape(@x_shape)
end
def to_hash
- {name: self.class.name, shape: @shape}
+ super({shape: @shape})
end
end
class OutputLayer < Layer
@@ -516,11 +505,11 @@
dout[@mask] = 0 if @model.training?
dout
end
def to_hash
- {name: self.class.name, dropout_ratio: @dropout_ratio}
+ super({dropout_ratio: @dropout_ratio})
end
end
class BatchNormalization < HasParamLayer
@@ -574,15 +563,12 @@
dmean = dxc.sum(0)
dxc - dmean / batch_size
end
def to_hash
- {
- name: self.class.name,
- momentum: @momentum,
- running_mean: @running_mean.to_a,
- running_var: @running_var.to_a,
- }
+ super({momentum: @momentum,
+ running_mean: @running_mean.to_a,
+ running_var: @running_var.to_a})
end
private
def init_params