lib/dnn/core/models.rb in ruby-dnn-0.13.2 vs lib/dnn/core/models.rb in ruby-dnn-0.13.3
- old
+ new
@@ -80,24 +80,26 @@
# Start training.
# Setup the model before use this method.
# @param [Numo::SFloat] x Input training data.
# @param [Numo::SFloat] y Output training data.
# @param [Integer] epochs Number of training.
+ # @param [Integer] initial_epoch Initial epoch.
# @param [Integer] batch_size Batch size used for one training.
# @param [Array | NilClass] test If you to test the model for every 1 epoch,
# specify [x_test, y_test]. Don't test to the model, specify nil.
# @param [Boolean] verbose Set true to display the log. If false is set, the log is not displayed.
def train(x, y, epochs,
batch_size: 1,
+ initial_epoch: 1,
test: nil,
verbose: true)
raise DNN_Error.new("The model is not optimizer setup complete.") unless @optimizer
raise DNN_Error.new("The model is not loss_func setup complete.") unless @loss_func
check_xy_type(x, y)
iter = Iterator.new(x, y)
num_train_datas = x.is_a?(Array) ? x[0].shape[0] : x.shape[0]
- (1..epochs).each do |epoch|
+ (initial_epoch..epochs).each do |epoch|
call_callbacks(:before_epoch, epoch)
puts "【 epoch #{epoch}/#{epochs} 】" if verbose
iter.foreach(batch_size) do |x_batch, y_batch, index|
loss_value = train_on_batch(x_batch, y_batch)
if loss_value.is_a?(Xumo::SFloat)
@@ -134,29 +136,31 @@
# Training once.
# Setup the model before use this method.
# @param [Numo::SFloat] x Input training data.
# @param [Numo::SFloat] y Output training data.
+ # @param [Integer] batch_size Batch size used for one test.
# @return [Float | Numo::SFloat] Return loss value in the form of Float or Numo::SFloat.
def train_on_batch(x, y)
raise DNN_Error.new("The model is not optimizer setup complete.") unless @optimizer
raise DNN_Error.new("The model is not loss_func setup complete.") unless @loss_func
check_xy_type(x, y)
call_callbacks(:before_train_on_batch)
x = forward(x, true)
loss_value = @loss_func.loss(x, y, layers)
dy = @loss_func.backward(x, y)
backward(dy)
- @optimizer.update(layers.uniq)
+ @optimizer.update(layers)
@loss_func.regularizers_backward(layers)
call_callbacks(:after_train_on_batch, loss_value)
loss_value
end
# Evaluate model and get accuracy of test data.
# @param [Numo::SFloat] x Input test data.
# @param [Numo::SFloat] y Output test data.
+ # @param [Integer] batch_size Batch size used for one test.
# @return [Array] Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].
def accuracy(x, y, batch_size: 100)
check_xy_type(x, y)
num_test_datas = x.is_a?(Array) ? x[0].shape[0] : x.shape[0]
batch_size = batch_size >= num_test_datas[0] ? num_test_datas : batch_size
@@ -184,22 +188,25 @@
loss_value = @loss_func.loss(x, y, layers)
call_callbacks(:after_test_on_batch, loss_value)
[correct, loss_value]
end
- private def evaluate(y, t)
- if y.shape[1..-1] == [1]
+ # Implement the process to evaluate this model.
+ # @param [Numo::SFloat] x Input test data.
+ # @param [Numo::SFloat] y Output test data.
+ private def evaluate(x, y)
+ if x.shape[1..-1] == [1]
correct = 0
- y.shape[0].times do |i|
+ x.shape[0].times do |i|
if @loss_func.is_a?(Losses::SigmoidCrossEntropy)
- correct += 1 if (y[i, 0] < 0 && t[i, 0] < 0.5) || (y[i, 0] >= 0 && t[i, 0] >= 0.5)
+ correct += 1 if (x[i, 0] < 0 && y[i, 0] < 0.5) || (x[i, 0] >= 0 && y[i, 0] >= 0.5)
else
- correct += 1 if (y[i, 0] < 0 && t[i, 0] < 0) || (y[i, 0] >= 0 && t[i, 0] >= 0)
+ correct += 1 if (x[i, 0] < 0 && y[i, 0] < 0) || (x[i, 0] >= 0 && y[i, 0] >= 0)
end
end
else
- correct = y.max_index(axis: 1).eq(t.max_index(axis: 1)).count
+ correct = x.max_index(axis: 1).eq(y.max_index(axis: 1)).count
end
correct
end
# Predict data.
@@ -269,11 +276,11 @@
else
get_layers.(link.prev)
end
end
get_layers.(@last_link)
- @layers_cache = layers
+ @layers_cache = layers.uniq
end
# Get the all has param layers.
# @return [Array] All has param layers array.
def has_param_layers
@@ -314,11 +321,11 @@
callback.call(*args)
end
end
def naming
- layers.uniq.each do |layer|
- id = layers.uniq.select { |l| l.is_a?(layer.class) }.index(layer)
+ layers.each do |layer|
+ id = layers.select { |l| l.is_a?(layer.class) }.index(layer)
class_name = layer.class.name.split("::").last
layer.name = "#{class_name}_#{id}".to_sym unless layer.name
if layer.is_a?(Layers::HasParamLayer)
layer.get_params.each do |param_key, param|
param.name = "#{layer.name}__#{param_key}".to_sym unless param.name