lib/glm/base.rb in glm-0.0.1 vs lib/glm/base.rb in glm-0.0.2
- old
+ new
@@ -1,12 +1,12 @@
class GLM::Base
-
- def initialize(x,y,alpha = 0.1)
+ @@initial_weight = 1
+ def initialize(x,y,alpha = 0.05)
@x = x
@y = y
@@alpha = alpha
- @theta = Array.new(x.column_size,1)
+ @theta = GSL::Vector.alloc(Array.new(x.size2, @@initial_weight))
end
#Log partition function <b>a(eta)</b>, intended to be overriden
def a
raise 'Log partition function a(eta) undefined'
@@ -16,26 +16,20 @@
def b
raise 'b undefined'
end
def format(x)
- if x.is_a? Array
- if x[0].is_a?(Array)
- x.map {|e|
- output(e)}
- else
- output(x)
- end
- #Assuming x.is_a?(Matrix) == true
- else
- x.row_vectors.map {|e|
- output(Matrix.row_vector(e))
- }
+ if x.is_a? GSL::Vector
+ return output(x)
+ elsif x.is_a? GSL::Matrix
+ tmp = GSL::Vector.alloc x.size1
+ (0...x.size1).each {|i|
+ tmp[i]= output(x.row(i))}
+ return tmp
end
end
-
# Estimator
# =Arguments:
# x: a feature vector in Array
# =Returns:
# Estimation
@@ -44,16 +38,16 @@
end
#Output estimation from E(y|theta,x)
#Need overriding, except for plain linear regression
def output(x)
- return h(x.t)
+ return h(x)
end
#Natural parameter eta
def eta(x)
- tmp = (Matrix.column_vector(@theta).t * x)[0,0]
+ tmp = @theta * x.transpose
return tmp
end
#Sufficient statistic <b>T</b>
@@ -67,11 +61,12 @@
end
#Gradient on one sample
def gradient(x,y,v)
tmp = h(v)
- return (y - tmp) * x
+ res = (y - tmp) * x
+ return res
end
# Hypothesis function, outputs E(y|theta, x), mean of y given x parameterized by theta
# =Parameters:
# x: a feature vector
@@ -87,14 +82,16 @@
end
#One complete loop of stochastic gradient descend
def sto_update()
- (0...(@x.row_size)).each do |i|
- (0...(@x.column_size)).each do |j|
- @theta[j] += @@alpha * gradient(@x[i,j], @y[i,0], Matrix.column_vector(@x.row(i)))
+ (0...(@x.size1)).each do |i|
+ (0...(@x.size2)).each do |j|
+ updates = gradient(@x[i,j], @y[i], @x.row(i))
+ @theta[j] = @theta[j] + @@alpha * updates
end
end
+ pp @theta
end
def theta()
return @theta
end