examples/cifar10_example.rb in ruby-dnn-0.1.8 vs examples/cifar10_example.rb in ruby-dnn-0.2.0
- old
+ new
@@ -10,38 +10,38 @@
CIFAR10 = DNN::CIFAR10
x_train, y_train = CIFAR10.load_train
x_test, y_test = CIFAR10.load_test
-x_train = SFloat.cast(x_train).reshape(x_train.shape[0], 32, 32, 3).transpose(0, 3, 2, 1)
-x_test = SFloat.cast(x_test).reshape(x_test.shape[0], 32, 32, 3).transpose(0, 3, 2, 1)
+x_train = SFloat.cast(x_train).transpose(0, 2, 3, 1)
+x_test = SFloat.cast(x_test).transpose(0, 2, 3, 1)
x_train /= 255
x_test /= 255
y_train = DNN::Util.to_categorical(y_train, 10)
y_test = DNN::Util.to_categorical(y_test, 10)
model = Model.new
-model << InputLayer.new([3, 32, 32])
+model << InputLayer.new([32, 32, 3])
-model << Conv2D.new(16, 5, 5)
+model << Conv2D.new(16, 5, 5, padding: true)
model << BatchNormalization.new
model << ReLU.new
-model << Conv2D.new(16, 5, 5)
+model << Conv2D.new(16, 5, 5, padding: true)
model << BatchNormalization.new
model << ReLU.new
model << MaxPool2D.new(2, 2)
-model << Conv2D.new(32, 5, 5)
+model << Conv2D.new(32, 5, 5, padding: true)
model << BatchNormalization.new
model << ReLU.new
-model << Conv2D.new(32, 5, 5)
+model << Conv2D.new(32, 5, 5, padding: true)
model << BatchNormalization.new
model << ReLU.new
model << Flatten.new
@@ -53,10 +53,6 @@
model << Dense.new(10)
model << SoftmaxWithLoss.new
model.compile(Adam.new)
-model.train(x_train, y_train, 20, batch_size: 100) do
- model.test(x_test, y_test)
-end
-
-model.save("trained_cifar10.marshal")
+model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])