lib/dnn/core/models.rb in ruby-dnn-0.14.1 vs lib/dnn/core/models.rb in ruby-dnn-0.14.2
- old
+ new
@@ -143,11 +143,11 @@
# @param [Numo::SFloat] y Output training data.
# @param [Integer] batch_size Batch size used for one test.
# @return [Hash] Hash of contents to be output to log.
private def test(x, y, batch_size: 100)
acc, test_loss = accuracy(x, y, batch_size: batch_size)
- { accuracy: acc, test_loss: test_loss.mean }
+ { accuracy: acc, test_loss: test_loss }
end
# Training once.
# Setup the model before use this method.
# @param [Numo::SFloat] x Input training data.
@@ -178,11 +178,11 @@
check_xy_type(x, y)
num_test_datas = x.is_a?(Array) ? x[0].shape[0] : x.shape[0]
batch_size = batch_size >= num_test_datas[0] ? num_test_datas : batch_size
iter = Iterator.new(x, y, random: false)
total_correct = 0
- sum_loss = Xumo::SFloat[0]
+ sum_loss = 0
max_steps = (num_test_datas.to_f / batch_size).ceil
iter.foreach(batch_size) do |x_batch, y_batch|
correct, loss_value = test_on_batch(x_batch, y_batch)
total_correct += correct
sum_loss += loss_value
@@ -200,10 +200,10 @@
# @return [Array] Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].
def test_on_batch(x, y)
call_callbacks(:before_test_on_batch)
x = forward(x, false)
correct = evaluate(x, y)
- loss_value = @loss_func.loss(x, y, layers)
+ loss_value = @loss_func.loss(x, y)
call_callbacks(:after_test_on_batch)
[correct, loss_value]
end
# Implement the process to evaluate this model.