lib/tensorflow/math.rb in tensorflow-0.1.1 vs lib/tensorflow/math.rb in tensorflow-0.1.2
- old
+ new
@@ -222,12 +222,14 @@
def log1p(x)
RawOps.log1p(x: x)
end
- # def log_sigmoid
- # end
+ def log_sigmoid(x)
+ x = TensorFlow.convert_to_tensor(x)
+ negative(RawOps.softplus(features: -x))
+ end
def log_softmax(logits)
RawOps.log_softmax(logits: logits)
end
@@ -241,12 +243,13 @@
def logical_or(x, y)
RawOps.logical_or(x: x, y: y)
end
- # def logical_xor
- # end
+ def logical_xor(x, y)
+ logical_and(logical_or(x, y), logical_not(logical_and(x, y)))
+ end
def maximum(x, y)
RawOps.maximum(x: x, y: y)
end
@@ -260,15 +263,17 @@
def multiply(x, y)
RawOps.mul(x: x, y: y)
end
- # def multiply_no_nan
- # end
+ def multiply_no_nan(x, y)
+ RawOps.mul_no_nan(x: x, y: y)
+ end
- # def negative
- # end
+ def negative(x)
+ RawOps.neg(x: x)
+ end
# def nextafter
# end
def not_equal(x, y)
@@ -298,39 +303,62 @@
# end
# def reduce_all
# end
- # def reduce_any
- # end
+ def reduce_any(input_tensor, axis: nil, keepdims: false)
+ input_tensor = TensorFlow.convert_to_tensor(input_tensor)
+ axis ||= reduction_dims(input_tensor)
+ RawOps.any(input: input_tensor, reduction_indices: axis, keep_dims: keepdims)
+ end
# def reduce_euclidean_norm
# end
# def reduce_logsumexp
# end
- # def reduce_max
- # end
+ def reduce_max(input_tensor, axis: nil, keepdims: false)
+ input_tensor = TensorFlow.convert_to_tensor(input_tensor)
+ axis ||= reduction_dims(input_tensor)
+ RawOps.max(input: input_tensor, reduction_indices: axis, keep_dims: keepdims)
+ end
- # def reduce_mean
- # end
+ def reduce_mean(input_tensor, axis: nil, keepdims: false)
+ input_tensor = TensorFlow.convert_to_tensor(input_tensor)
+ axis ||= reduction_dims(input_tensor)
+ RawOps.mean(input: input_tensor, reduction_indices: axis, keep_dims: keepdims)
+ end
- # def reduce_min
- # end
+ def reduce_min(input_tensor, axis: nil, keepdims: false)
+ input_tensor = TensorFlow.convert_to_tensor(input_tensor)
+ axis ||= reduction_dims(input_tensor)
+ RawOps.min(input: input_tensor, reduction_indices: axis, keep_dims: keepdims)
+ end
- # def reduce_prod
- # end
+ def reduce_prod(input_tensor, axis: nil, keepdims: false)
+ input_tensor = TensorFlow.convert_to_tensor(input_tensor)
+ axis ||= reduction_dims(input_tensor)
+ RawOps.prod(input: input_tensor, reduction_indices: axis, keep_dims: keepdims)
+ end
- # def reduce_std
- # end
+ def reduce_std(input_tensor, axis: nil, keepdims: false)
+ variance = reduce_variance(input_tensor, axis: axis, keepdims: keepdims)
+ sqrt(variance)
+ end
- # def reduce_sum
- # end
+ def reduce_sum(input_tensor, axis: nil, keepdims: false)
+ input_tensor = TensorFlow.convert_to_tensor(input_tensor)
+ axis ||= reduction_dims(input_tensor)
+ RawOps.sum(input: input_tensor, reduction_indices: axis, keep_dims: keepdims)
+ end
- # def reduce_variance
- # end
+ def reduce_variance(input_tensor, axis: nil, keepdims: false)
+ means = reduce_mean(input_tensor, axis: axis, keepdims: true)
+ squared_deviations = RawOps.square(x: input_tensor - means)
+ reduce_mean(squared_deviations, axis: axis, keepdims: keepdims)
+ end
def rint(x)
RawOps.rint(x: x)
end
@@ -457,9 +485,16 @@
# def zero_fraction
# end
def zeta(x, q)
RawOps.zeta(x: x, q: q)
+ end
+
+ private
+
+ def reduction_dims(input_tensor)
+ rank = RawOps.rank(input: input_tensor).value
+ (0...rank).to_a
end
end
end
end