lib/ai4r/neural_network/backpropagation.rb in ai4r-1.12 vs lib/ai4r/neural_network/backpropagation.rb in ai4r-1.13
- old
+ new
@@ -42,11 +42,11 @@
# = Parameters
#
# Use class method get_parameters_info to obtain details on the algorithm
# parameters. Use set_parameters to set values for this parameters.
#
- # * :disable_bias => If true, the alforithm will not use bias nodes.
+ # * :disable_bias => If true, the algorithm will not use bias nodes.
# False by default.
# * :initial_weight_function => f(n, i, j) must return the initial
# weight for the conection between the node i in layer n, and node j in
# layer n+1. By default a random number in [-1, 1) range.
# * :propagation_function => By default:
@@ -84,11 +84,11 @@
# Url:: http://ai4r.org
class Backpropagation
include Ai4r::Data::Parameterizable
- parameters_info :disable_bias => "If true, the alforithm will not use "+
+ parameters_info :disable_bias => "If true, the algorithm will not use "+
"bias nodes. False by default.",
:initial_weight_function => "f(n, i, j) must return the initial "+
"weight for the conection between the node i in layer n, and "+
"node j in layer n+1. By default a random number in [-1, 1) range.",
:propagation_function => "By default: " +
@@ -134,10 +134,21 @@
init_network if !@weights
feedforward(input_values)
return @activation_nodes.last.clone
end
+ # Evaluates the input and returns most active node
+ # E.g.
+ # net = Backpropagation.new([4, 3, 2])
+ # net.eval_result([25, 32.3, 12.8, 1.5])
+ # # eval gives [0.83, 0.03]
+ # # => 0
+ def eval_result(input_values)
+ result = eval(input_values)
+ result.index(result.max)
+ end
+
# This method trains the network using the backpropagation algorithm.
#
# input: Networks input
#
# output: Expected output for the given input.
@@ -176,23 +187,23 @@
@momentum,
@weights,
@last_changes,
@activation_nodes
]
- end
+ end
- def marshal_load(ary)
- @structure,
- @disable_bias,
- @learning_rate,
- @momentum,
- @weights,
- @last_changes,
- @activation_nodes = ary
- @initial_weight_function = lambda { |n, i, j| ((rand 2000)/1000.0) - 1}
- @propagation_function = lambda { |x| 1/(1+Math.exp(-1*(x))) } #lambda { |x| Math.tanh(x) }
- @derivative_propagation_function = lambda { |y| y*(1-y) } #lambda { |y| 1.0 - y**2 }
- end
+ def marshal_load(ary)
+ @structure,
+ @disable_bias,
+ @learning_rate,
+ @momentum,
+ @weights,
+ @last_changes,
+ @activation_nodes = ary
+ @initial_weight_function = lambda { |n, i, j| ((rand 2000)/1000.0) - 1}
+ @propagation_function = lambda { |x| 1/(1+Math.exp(-1*(x))) } #lambda { |x| Math.tanh(x) }
+ @derivative_propagation_function = lambda { |y| y*(1-y) } #lambda { |y| 1.0 - y**2 }
+ end
# Propagate error backwards
def backpropagate(expected_output_values)
check_output_dimension(expected_output_values.length)