lib/zk/pool.rb in zk-1.9.6 vs lib/zk/pool.rb in zk-1.10.0
- old
+ new
@@ -1,20 +1,20 @@
module ZK
module Pool
- # Base class for a ZK connection pool. There are some applications that may
+ # Base class for a ZK connection pool. There are some applications that may
# require high synchronous throughput, which would be a suitable use for a
# connection pool. The ZK::Client::Threaded class is threadsafe, so it's
# not a problem accessing it from multiple threads, but it is limited to
# one outgoing synchronous request at a time, which could cause throughput
- # issues for apps that are making very heavy use of zookeeper.
+ # issues for apps that are making very heavy use of zookeeper.
#
# The problem with using a connection pool is the added complexity when you
# try to use watchers. It may be possible to register a watch with one
# connection, and then call `:watch => true` on a different connection if
# you're not careful. Events delivered as part of an event handler have a
# `zk` attribute which can be used to access the connection that the
- # callback is registered with.
+ # callback is registered with.
#
# Unless you're sure you *need* a connection pool, then avoid the added
# complexity.
#
class Base
@@ -25,11 +25,11 @@
def initialize
@state = :init
@mutex = Monitor.new
@checkin_cond = @mutex.new_cond
-
+
@connections = [] # all connections we control
@pool = [] # currently available connections
# this is required for 1.8.7 compatibility
@on_connected_subs = {}
@@ -56,11 +56,11 @@
@state == :forced
end
# close all the connections on the pool
def close_all!
- @mutex.synchronize do
+ @mutex.synchronize do
return unless open?
@state = :closing
@checkin_cond.wait_until { (@pool.size == @connections.length) or closed? }
@@ -84,11 +84,11 @@
end
end
@state = :closed
- # free any waiting
+ # free any waiting
@checkin_cond.broadcast
end
end
# @private
@@ -144,11 +144,11 @@
def synchronize
@mutex.synchronize { yield }
end
def assert_open!
- raise Exceptions::PoolIsShuttingDownException, "pool is shutting down" unless open?
+ raise Exceptions::PoolIsShuttingDownException, "pool is shutting down" unless open?
end
end # Base
# like a Simple pool but has high/low watermarks, and can grow dynamically as needed
@@ -209,11 +209,11 @@
# number of threads waiting for connections
def count_waiters #:nodoc:
@mutex.synchronize { @count_waiters }
end
- def checkout(blocking=true)
+ def checkout(blocking=true)
raise ArgumentError, "checkout does not take a block, use .with_connection" if block_given?
@mutex.lock
begin
while true
assert_open!
@@ -222,11 +222,11 @@
cnx = @pool.shift
# XXX(slyphon): not really sure how this case happens, but protect against it as we're
# seeing an issue in production
next if cnx.nil?
-
+
# if the connection isn't connected, then set up an on_connection
# handler and try the next one in the pool
unless cnx.connected?
logger.debug { "connection #{cnx.object_id} is not connected" }
handle_checkin_on_connection(cnx)
@@ -261,11 +261,11 @@
end
def add_connection!
@mutex.synchronize do
cnx = create_connection
- @connections << cnx
+ @connections << cnx
handle_checkin_on_connection(cnx)
end # synchronize
end
@@ -279,10 +279,10 @@
do_checkin.call
return
else
@on_connected_subs.synchronize do
- sub = cnx.on_connected do
+ sub = cnx.on_connected do
# this synchronization is to prevent a race between setting up the subscription
# and assigning it to the @on_connected_subs hash. It's possible that the callback
# would fire before we had a chance to add the sub to the hash.
@on_connected_subs.synchronize do
if sub = @on_connected_subs.delete(cnx)