lib/memcache.rb in memcache-client-1.6.3 vs lib/memcache.rb in memcache-client-1.6.5

- old
+ new

@@ -1,33 +1,49 @@ $TESTING = defined?($TESTING) && $TESTING require 'socket' require 'thread' -require 'timeout' require 'zlib' require 'digest/sha1' -require 'continuum' +begin + # Try to use the SystemTimer gem instead of Ruby's timeout library + # when running on something that looks like Ruby 1.8.x. See: + # http://ph7spot.com/articles/system_timer + # We don't want to bother trying to load SystemTimer on jruby and + # ruby 1.9+. + if !defined?(RUBY_ENGINE) + require 'system_timer' + MemCacheTimer = SystemTimer + else + require 'timeout' + MemCacheTimer = Timeout + end +rescue LoadError => e + puts "[memcache-client] Could not load SystemTimer gem, falling back to Ruby's slower/unsafe timeout library: #{e.message}" + require 'timeout' + MemCacheTimer = Timeout +end ## # A Ruby client library for memcached. # class MemCache ## # The version of MemCache you are using. - VERSION = '1.6.2' + VERSION = '1.6.5' ## # Default options for the cache object. DEFAULT_OPTIONS = { :namespace => nil, :readonly => false, - :multithread => false, + :multithread => true, :failover => true, :timeout => 0.5, :logger => nil, } @@ -55,11 +71,11 @@ # The servers this client talks to. Play at your own peril. attr_reader :servers ## - # Socket timeout limit with this client, defaults to 0.25 sec. + # Socket timeout limit with this client, defaults to 0.5 sec. # Set to nil to disable timeouts. attr_reader :timeout ## @@ -79,16 +95,18 @@ # # Valid options for +opts+ are: # # [:namespace] Prepends this value to all keys added or retrieved. # [:readonly] Raises an exception on cache writes when true. - # [:multithread] Wraps cache access in a Mutex for thread safety. + # [:multithread] Wraps cache access in a Mutex for thread safety. Defaults to true. # [:failover] Should the client try to failover to another server if the # first server is down? Defaults to true. - # [:timeout] Time to use as the socket read timeout. Defaults to 0.25 sec, - # set to nil to disable timeouts (this is a major performance penalty in Ruby 1.8). + # [:timeout] Time to use as the socket read timeout. Defaults to 0.5 sec, + # set to nil to disable timeouts (this is a major performance penalty in Ruby 1.8, + # "gem install SystemTimer' to remove most of the penalty). # [:logger] Logger to use for info/debug output, defaults to nil + # # Other options are ignored. def initialize(*args) servers = [] opts = {} @@ -118,10 +136,12 @@ @logger = opts[:logger] @mutex = Mutex.new if @multithread logger.info { "memcache-client #{VERSION} #{Array(servers).inspect}" } if logger + Thread.current[:memcache_client] = self.object_id if !@multithread + self.servers = servers end ## # Returns a string representation of the cache object. @@ -158,13 +178,10 @@ host, port, weight = server.split ':', 3 port ||= DEFAULT_PORT weight ||= DEFAULT_WEIGHT Server.new self, host, port, weight else - if server.multithread != @multithread then - raise ArgumentError, "can't mix threaded and non-threaded servers" - end server end end logger.debug { "Servers now: #{@servers.inspect}" } if logger @@ -218,10 +235,12 @@ # Returns a hash of values. # # cache["a"] = 1 # cache["b"] = 2 # cache.get_multi "a", "b" # => { "a" => 1, "b" => 2 } + # + # Note that get_multi assumes the values are marshalled. def get_multi(*keys) raise MemCacheError, 'No active servers' unless active? keys.flatten! @@ -351,23 +370,20 @@ def flush_all raise MemCacheError, 'No active servers' unless active? raise MemCacheError, "Update of readonly cache" if @readonly begin - @mutex.lock if @multithread @servers.each do |server| with_socket_management(server) do |socket| socket.write "flush_all\r\n" result = socket.gets raise_on_error_response! result result end end rescue IndexError => err handle_error nil, err - ensure - @mutex.unlock if @multithread end end ## # Reset the connection to all memcache servers. This should be called if @@ -604,10 +620,12 @@ # raises MemCacheError. Note that the socket connect code marks a server # dead for a timeout period, so retrying does not apply to connection attempt # failures (but does still apply to unexpectedly lost connections etc.). def with_socket_management(server, &block) + check_multithread_status! + @mutex.lock if @multithread retried = false begin socket = server.socket @@ -617,11 +635,11 @@ raise IndexError, "No connection to server (#{server.status})" if socket.nil? block.call(socket) - rescue SocketError => err + rescue SocketError, Timeout::Error => err logger.warn { "Socket failure: #{err.message}" } if logger server.mark_dead(err) handle_error(server, err) rescue MemCacheError, SystemCallError, IOError => err @@ -695,10 +713,22 @@ def entry_count_for(server, total_servers, total_weight) ((total_servers * Continuum::POINTS_PER_SERVER * server.weight) / Float(total_weight)).floor end + def check_multithread_status! + return if @multithread + + if Thread.current[:memcache_client] != self.object_id + raise MemCacheError, <<-EOM + You are accessing this memcache-client instance from multiple threads but have not enabled multithread support. + Normally: MemCache.new(['localhost:11211'], :multithread => true) + In Rails: config.cache_store = [:mem_cache_store, 'localhost:11211', { :multithread => true }] + EOM + end + end + ## # This class represents a memcached server instance. class Server @@ -738,11 +768,10 @@ ## # A text status string describing the state of the server. attr_reader :status - attr_reader :multithread attr_reader :logger ## # Create a new MemCache::Server object for the memcached instance # listening on the given host and port, weighted by the given weight. @@ -753,13 +782,10 @@ @host = host @port = port.to_i @weight = weight.to_i - @multithread = memcache.multithread - @mutex = Mutex.new - @sock = nil @retry = nil @status = 'NOT CONNECTED' @timeout = memcache.timeout @logger = memcache.logger @@ -785,11 +811,10 @@ ## # Try to connect to the memcached server targeted by this object. # Returns the connected socket object on success or nil on failure. def socket - @mutex.lock if @multithread return @sock if @sock and not @sock.closed? @sock = nil # If the host was dead, don't retry for a while. @@ -808,26 +833,21 @@ logger.warn { "Unable to open socket: #{err.class.name}, #{err.message}" } if logger mark_dead err end return @sock - ensure - @mutex.unlock if @multithread end ## # Close the connection to the memcached server targeted by this # object. The server is not considered dead. def close - @mutex.lock if @multithread @sock.close if @sock && !@sock.closed? @sock = nil @retry = nil @status = "NOT CONNECTED" - ensure - @mutex.unlock if @multithread end ## # Mark the server as dead and close its socket. @@ -852,30 +872,30 @@ # TCPSocket facade class which implements timeouts. class TCPTimeoutSocket def initialize(host, port, timeout) - Timeout::timeout(MemCache::Server::CONNECT_TIMEOUT, SocketError) do + MemCacheTimer.timeout(MemCache::Server::CONNECT_TIMEOUT) do @sock = TCPSocket.new(host, port) @len = timeout end end def write(*args) - Timeout::timeout(@len, SocketError) do + MemCacheTimer.timeout(@len) do @sock.write(*args) end end def gets(*args) - Timeout::timeout(@len, SocketError) do + MemCacheTimer.timeout(@len) do @sock.gets(*args) end end def read(*args) - Timeout::timeout(@len, SocketError) do + MemCacheTimer.timeout(@len) do @sock.read(*args) end end def _socket @@ -890,7 +910,46 @@ @sock.closed? end def close @sock.close + end +end + +module Continuum + POINTS_PER_SERVER = 160 # this is the default in libmemcached + + # Find the closest index in Continuum with value <= the given value + def self.binary_search(ary, value, &block) + upper = ary.size - 1 + lower = 0 + idx = 0 + + while(lower <= upper) do + idx = (lower + upper) / 2 + comp = ary[idx].value <=> value + + if comp == 0 + return idx + elsif comp > 0 + upper = idx - 1 + else + lower = idx + 1 + end + end + return upper + end + + class Entry + attr_reader :value + attr_reader :server + + def initialize(val, srv) + @value = val + @server = srv + end + + def inspect + "<#{value}, #{server.host}:#{server.port}>" + end end end