lib/dnn/core/models.rb in ruby-dnn-0.15.3 vs lib/dnn/core/models.rb in ruby-dnn-0.16.0
- old
+ new
@@ -1,8 +1,9 @@
module DNN
module Models
+ # This class is used to hold multiple layers in an array.
class LayersList < Array
def self.from_hash_list(hash_list)
layers_list = new
hash_list.each do |hash|
obj_class = DNN.const_get(hash[:class])
@@ -17,11 +18,11 @@
end
layers_list
end
def to_hash_list
- map { |layer| layer.to_hash }
+ map(&:to_hash)
end
# Get the all layers.
# @return [Array] All layers array.
def layers
@@ -36,14 +37,24 @@
layers_array
end
end
class Chain
- def call(x)
- raise NotImplementedError, "Class '#{self.class.name}' has implement method 'call'"
+ # Forward propagation.
+ # @param [Tensor] input_tensor Input tensor.
+ # @return [Tensor] Output tensor.
+ def forward(input_tensor)
+ raise NotImplementedError, "Class '#{self.class.name}' has implement method 'forward'"
end
+ # Forward propagation and create a link.
+ # @param [Tensor] input_tensor Input tensor.
+ # @return [Tensor] Output tensor.
+ def call(input_tensor)
+ forward(input_tensor)
+ end
+
# Get the all layers.
# @return [Array] All layers array.
def layers
layers_array = []
instance_variables.sort.each do |ivar|
@@ -107,17 +118,23 @@
end
def initialize
@optimizer = nil
@loss_func = nil
- @last_link = nil
@built = false
@callbacks = []
@layers_cache = nil
@last_log = {}
end
+ def call(inputs)
+ @layers_cache = nil
+ output_tensor = forward(inputs)
+ @built = true unless @built
+ output_tensor
+ end
+
# Set optimizer and loss_func to model.
# @param [DNN::Optimizers::Optimizer] optimizer Optimizer to use for learning.
# @param [DNN::Losses::Loss] loss_func Loss function to use for learning.
def setup(optimizer, loss_func)
unless optimizer.is_a?(Optimizers::Optimizer)
@@ -202,14 +219,14 @@
print log if verbose
end
if test
acc, loss = if test.is_a?(Array)
- evaluate(test[0], test[1], batch_size: batch_size)
- else
- evaluate_by_iterator(test, batch_size: batch_size)
- end
+ evaluate(test[0], test[1], batch_size: batch_size)
+ else
+ evaluate_by_iterator(test, batch_size: batch_size)
+ end
print " " + metrics_to_str({ accuracy: acc, test_loss: loss }) if verbose
end
puts "" if verbose
call_callbacks(:after_epoch)
end
@@ -240,19 +257,18 @@
def train_on_batch(x, y)
raise DNN_Error, "The model is not optimizer setup complete." unless @optimizer
raise DNN_Error, "The model is not loss_func setup complete." unless @loss_func
check_xy_type(x, y)
call_callbacks(:before_train_on_batch)
- x = forward(x, true)
- loss_value = @loss_func.loss(x, y, layers)
- dy = @loss_func.backward(x, y)
- backward(dy)
- @optimizer.update(layers)
- @loss_func.regularizers_backward(layers)
- @last_log[:train_loss] = loss_value
+ DNN.learning_phase = true
+ out = call(Tensor.convert(x))
+ loss = @loss_func.loss(out, Tensor.convert(y), layers)
+ loss.link.backward(Xumo::SFloat.zeros(y[0...1, false].shape))
+ @optimizer.update(get_all_trainable_params)
+ @last_log[:train_loss] = loss.data
call_callbacks(:after_train_on_batch)
- loss_value
+ loss.data
end
# Evaluate model and get accuracy and loss of test data.
# @param [Numo::SFloat] x Input test data.
# @param [Numo::SFloat] y Output test data.
@@ -289,15 +305,16 @@
# @param [Numo::SFloat] x Input test data.
# @param [Numo::SFloat] y Output test data.
# @return [Array] Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].
def test_on_batch(x, y)
call_callbacks(:before_test_on_batch)
- x = forward(x, false)
- correct = accuracy(x, y)
- loss_value = @loss_func.loss(x, y)
+ DNN.learning_phase = false
+ out = call(Tensor.convert(x))
+ correct = accuracy(out.data, y)
+ loss = @loss_func.(out, Tensor.convert(y))
call_callbacks(:after_test_on_batch)
- [correct, loss_value]
+ [correct, loss.data]
end
# Implement the process to accuracy this model.
# @param [Numo::SFloat] x Input test data.
# @param [Numo::SFloat] y Output test data.
@@ -321,11 +338,13 @@
# Predict data.
# @param [Numo::SFloat] x Input data.
# @param [Boolean] use_loss_activation Use loss activation when loss has an activation.
def predict(x, use_loss_activation: true)
check_xy_type(x)
- y = forward(x, false)
+ DNN.learning_phase = false
+ out = call(Tensor.convert(x))
+ y = out.data
if use_loss_activation && @loss_func.class.respond_to?(:activation)
y = @loss_func.class.activation(y)
end
y
end
@@ -384,27 +403,22 @@
# Get the layer that the model has.
# @param [Symbol] name The name of the layer to get.
# @return [DNN::Layers::Layer] Return the layer.
def get_layer(name)
layer = instance_variable_get("@#{name}")
- if layer.is_a?(Layers::Layer) || layer.is_a?(Chain) || layer.is_a?(LayersList)
- return layer
- end
+ return layer if layer.is_a?(Layers::Layer) || layer.is_a?(Chain) || layer.is_a?(LayersList)
nil
end
# @return [Boolean] If model have already been built then return true.
def built?
@built
end
def clean_layers
- layers.each do |layer|
- layer.clean
- end
+ layers.each(&:clean)
@loss_func.clean
- @last_link = nil
@layers_cache = nil
end
def get_all_params_data
trainable_layers.map do |layer|
@@ -422,30 +436,16 @@
end
end
private
- def forward(x, learning_phase)
- DNN.learning_phase = learning_phase
- @layers_cache = nil
- inputs = if x.is_a?(Array)
- x.map { |a| Tensor.new(a, nil) }
- else
- Tensor.new(x, nil)
- end
- output_tensor = call(inputs)
- @last_link = output_tensor.link
- unless @built
- @built = true
- end
- output_tensor.data
+ def get_all_trainable_params
+ layers.select { |layer| layer.is_a?(Layers::TrainableLayer) && layer.trainable }
+ .map { |layer| layer.get_params.values }.flatten.compact
+ .select(&:grad)
end
- def backward(dy)
- @last_link.backward(dy)
- end
-
def call_callbacks(event)
@callbacks.each do |callback|
callback.send(event) if callback.respond_to?(event)
end
end
@@ -510,10 +510,10 @@
# @return [Boolean] Return true if success for remove layer.
def remove(layer)
@stack.delete(layer) ? true : false
end
- def call(x)
+ def forward(x)
@stack.each do |layer|
x = layer.(x)
end
x
end