lib/active_support/cache.rb in activesupport-5.1.7 vs lib/active_support/cache.rb in activesupport-5.2.0.beta1
- old
+ new
@@ -1,5 +1,7 @@
+# frozen_string_literal: true
+
require "zlib"
require "active_support/core_ext/array/extract_options"
require "active_support/core_ext/array/wrap"
require "active_support/core_ext/module/attribute_accessors"
require "active_support/core_ext/numeric/bytes"
@@ -8,14 +10,15 @@
require "active_support/core_ext/string/inflections"
module ActiveSupport
# See ActiveSupport::Cache::Store for documentation.
module Cache
- autoload :FileStore, "active_support/cache/file_store"
- autoload :MemoryStore, "active_support/cache/memory_store"
- autoload :MemCacheStore, "active_support/cache/mem_cache_store"
- autoload :NullStore, "active_support/cache/null_store"
+ autoload :FileStore, "active_support/cache/file_store"
+ autoload :MemoryStore, "active_support/cache/memory_store"
+ autoload :MemCacheStore, "active_support/cache/mem_cache_store"
+ autoload :NullStore, "active_support/cache/null_store"
+ autoload :RedisCacheStore, "active_support/cache/redis_cache_store"
# These options mean something to all cache implementations. Individual cache
# implementations may support additional options.
UNIVERSAL_OPTIONS = [:namespace, :compress, :compress_threshold, :expires_in, :race_condition_ttl]
@@ -73,11 +76,11 @@
# ActiveSupport::Cache.expand_cache_key([:foo, :bar]) # => "foo/bar"
# ActiveSupport::Cache.expand_cache_key([:foo, :bar], "namespace") # => "namespace/foo/bar"
#
# The +key+ argument can also respond to +cache_key+ or +to_param+.
def expand_cache_key(key, namespace = nil)
- expanded_cache_key = namespace ? "#{namespace}/" : ""
+ expanded_cache_key = (namespace ? "#{namespace}/" : "").dup
if prefix = ENV["RAILS_CACHE_ID"] || ENV["RAILS_APP_VERSION"]
expanded_cache_key << "#{prefix}/"
end
@@ -86,20 +89,28 @@
end
private
def retrieve_cache_key(key)
case
- when key.respond_to?(:cache_key) then key.cache_key
- when key.is_a?(Array) then key.map { |element| retrieve_cache_key(element) }.to_param
- when key.respond_to?(:to_a) then retrieve_cache_key(key.to_a)
- else key.to_param
+ when key.respond_to?(:cache_key_with_version)
+ key.cache_key_with_version
+ when key.respond_to?(:cache_key)
+ key.cache_key
+ when key.is_a?(Hash)
+ key.sort_by { |k, _| k.to_s }.collect { |k, v| "#{k}=#{v}" }.to_param
+ when key.respond_to?(:to_a)
+ key.to_a.collect { |element| retrieve_cache_key(element) }.to_param
+ else
+ key.to_param
end.to_s
end
# Obtains the specified cache store class, given the name of the +store+.
# Raises an error when the store class cannot be found.
def retrieve_store_class(store)
+ # require_relative cannot be used here because the class might be
+ # provided by another gem, like redis-activesupport for example.
require "active_support/cache/#{store}"
rescue LoadError => e
raise "Could not find cache store adapter for #{store} (#{e})"
else
ActiveSupport::Cache.const_get(store.to_s.camelize)
@@ -141,16 +152,15 @@
# use application logic to invalidate keys.
#
# cache.namespace = -> { @last_mod_time } # Set the namespace to a variable
# @last_mod_time = Time.now # Invalidate the entire cache by changing namespace
#
- # Caches can also store values in a compressed format to save space and
- # reduce time spent sending data. Since there is overhead, values must be
- # large enough to warrant compression. To turn on compression either pass
- # <tt>compress: true</tt> in the initializer or as an option to +fetch+
- # or +write+. To specify the threshold at which to compress values, set the
- # <tt>:compress_threshold</tt> option. The default threshold is 16K.
+ # Cached data larger than 1kB are compressed by default. To turn off
+ # compression, pass <tt>compress: false</tt> to the initializer or to
+ # individual +fetch+ or +write+ method calls. The 1kB compression
+ # threshold is configurable with the <tt>:compress_threshold</tt> option,
+ # specified in bytes.
class Store
cattr_accessor :logger, instance_writer: true
attr_reader :silence, :options
alias :silence? :silence
@@ -205,22 +215,25 @@
#
# The +:force+ option is useful when you're calling some other method to
# ask whether you should force a cache write. Otherwise, it's clearer to
# just call <tt>Cache#write</tt>.
#
- # Setting <tt>:compress</tt> will store a large cache entry set by the call
- # in a compressed format.
+ # Setting <tt>compress: false</tt> disables compression of the cache entry.
#
# Setting <tt>:expires_in</tt> will set an expiration time on the cache.
# All caches support auto-expiring content after a specified number of
# seconds. This value can be specified as an option to the constructor
# (in which case all entries will be affected), or it can be supplied to
# the +fetch+ or +write+ method to effect just one entry.
#
# cache = ActiveSupport::Cache::MemoryStore.new(expires_in: 5.minutes)
# cache.write(key, value, expires_in: 1.minute) # Set a lower value for one entry
#
+ # Setting <tt>:version</tt> verifies the cache stored under <tt>name</tt>
+ # is of the same version. nil is returned on mismatches despite contents.
+ # This feature is used to support recyclable cache keys.
+ #
# Setting <tt>:race_condition_ttl</tt> is very useful in situations where
# a cache entry is used very frequently and is under heavy load. If a
# cache expires and due to heavy load several different processes will try
# to read data natively and then they all will try to write to cache. To
# avoid that case the first process to find an expired cache entry will
@@ -285,10 +298,11 @@
entry = nil
instrument(:read, name, options) do |payload|
cached_entry = read_entry(key, options) unless options[:force]
entry = handle_expired_entry(cached_entry, key, options)
+ entry = nil if entry && entry.mismatched?(normalize_version(name, options))
payload[:super_operation] = :fetch if payload
payload[:hit] = !!entry if payload
end
if entry
@@ -301,25 +315,34 @@
else
read(name, options)
end
end
- # Fetches data from the cache, using the given key. If there is data in
+ # Reads data from the cache, using the given key. If there is data in
# the cache with the given key, then that data is returned. Otherwise,
# +nil+ is returned.
#
+ # Note, if data was written with the <tt>:expires_in<tt> or <tt>:version</tt> options,
+ # both of these conditions are applied before the data is returned.
+ #
# Options are passed to the underlying cache implementation.
def read(name, options = nil)
options = merged_options(options)
- key = normalize_key(name, options)
+ key = normalize_key(name, options)
+ version = normalize_version(name, options)
+
instrument(:read, name, options) do |payload|
entry = read_entry(key, options)
+
if entry
if entry.expired?
delete_entry(key, options)
payload[:hit] = false if payload
nil
+ elsif entry.mismatched?(version)
+ payload[:hit] = false if payload
+ nil
else
payload[:hit] = true if payload
entry.value
end
else
@@ -339,23 +362,40 @@
options = names.extract_options!
options = merged_options(options)
results = {}
names.each do |name|
- key = normalize_key(name, options)
- entry = read_entry(key, options)
+ key = normalize_key(name, options)
+ version = normalize_version(name, options)
+ entry = read_entry(key, options)
+
if entry
if entry.expired?
delete_entry(key, options)
+ elsif entry.mismatched?(version)
+ # Skip mismatched versions
else
results[name] = entry.value
end
end
end
results
end
+ # Cache Storage API to write multiple values at once.
+ def write_multi(hash, options = nil)
+ options = merged_options(options)
+
+ instrument :write_multi, hash, options do |payload|
+ entries = hash.each_with_object({}) do |(name, value), memo|
+ memo[normalize_key(name, options)] = Entry.new(value, options.merge(version: normalize_version(name, options)))
+ end
+
+ write_multi_entries entries, options
+ end
+ end
+
# Fetches data from the cache, using the given keys. If there is data in
# the cache with the given keys, then that data is returned. Otherwise,
# the supplied block is called for each key for which there was no data,
# and the result will be written to the cache and returned.
# Therefore, you need to pass a block that returns the data to be written
@@ -376,29 +416,30 @@
def fetch_multi(*names)
raise ArgumentError, "Missing block: `Cache#fetch_multi` requires a block." unless block_given?
options = names.extract_options!
options = merged_options(options)
- results = read_multi(*names, options)
- names.each_with_object({}) do |name, memo|
- memo[name] = results.fetch(name) do
- value = yield name
- write(name, value, options)
- value
+ read_multi(*names, options).tap do |results|
+ writes = {}
+
+ (names - results.keys).each do |name|
+ results[name] = writes[name] = yield(name)
end
+
+ write_multi writes, options
end
end
# Writes the value to the cache, with the key.
#
# Options are passed to the underlying cache implementation.
def write(name, value, options = nil)
options = merged_options(options)
instrument(:write, name, options) do
- entry = Entry.new(value, options)
+ entry = Entry.new(value, options.merge(version: normalize_version(name, options)))
write_entry(normalize_key(name, options), entry, options)
end
end
# Deletes an entry in the cache. Returns +true+ if an entry is deleted.
@@ -418,11 +459,11 @@
def exist?(name, options = nil)
options = merged_options(options)
instrument(:exist?, name) do
entry = read_entry(normalize_key(name, options), options)
- (entry && !entry.expired?) || false
+ (entry && !entry.expired? && !entry.mismatched?(normalize_version(name, options))) || false
end
end
# Deletes all entries with keys matching the pattern.
#
@@ -500,10 +541,18 @@
# this method.
def write_entry(key, entry, options)
raise NotImplementedError.new
end
+ # Writes multiple entries to the cache implementation. Subclasses MAY
+ # implement this method.
+ def write_multi_entries(hash, options)
+ hash.each do |key, entry|
+ write_entry key, entry, options
+ end
+ end
+
# Deletes an entry from the cache implementation. Subclasses must
# implement this method.
def delete_entry(key, options)
raise NotImplementedError.new
end
@@ -515,40 +564,52 @@
else
options.dup
end
end
- # Expands key to be a consistent string value. Invokes +cache_key+ if
- # object responds to +cache_key+. Otherwise, +to_param+ method will be
- # called. If the key is a Hash, then keys will be sorted alphabetically.
- def expanded_key(key)
- return key.cache_key.to_s if key.respond_to?(:cache_key)
+ # Expands and namespaces the cache key. May be overridden by
+ # cache stores to do additional normalization.
+ def normalize_key(key, options = nil)
+ namespace_key Cache.expand_cache_key(key), options
+ end
- case key
- when Array
- if key.size > 1
- key = key.collect { |element| expanded_key(element) }
- else
- key = key.first
- end
- when Hash
- key = key.sort_by { |k, _| k.to_s }.collect { |k, v| "#{k}=#{v}" }
+ # Prefix the key with a namespace string:
+ #
+ # namespace_key 'foo', namespace: 'cache'
+ # # => 'cache:foo'
+ #
+ # With a namespace block:
+ #
+ # namespace_key 'foo', namespace: -> { 'cache' }
+ # # => 'cache:foo'
+ def namespace_key(key, options = nil)
+ options = merged_options(options)
+ namespace = options[:namespace]
+
+ if namespace.respond_to?(:call)
+ namespace = namespace.call
end
- key.to_param
+ if namespace
+ "#{namespace}:#{key}"
+ else
+ key
+ end
end
- # Prefixes a key with the namespace. Namespace and key will be delimited
- # with a colon.
- def normalize_key(key, options)
- key = expanded_key(key)
- namespace = options[:namespace] if options
- prefix = namespace.is_a?(Proc) ? namespace.call : namespace
- key = "#{prefix}:#{key}" if prefix
- key
+ def normalize_version(key, options = nil)
+ (options && options[:version].try(:to_param)) || expanded_version(key)
end
+ def expanded_version(key)
+ case
+ when key.respond_to?(:cache_version) then key.cache_version.to_param
+ when key.is_a?(Array) then key.map { |element| expanded_version(element) }.compact.to_param
+ when key.respond_to?(:to_a) then expanded_version(key.to_a)
+ end
+ end
+
def instrument(operation, key, options = nil)
log { "Cache #{operation}: #{normalize_key(key, options)}#{options.blank? ? "" : " (#{options.inspect})"}" }
payload = { key: key }
payload.merge!(options) if options.is_a?(Hash)
@@ -589,38 +650,46 @@
write(name, result, options)
result
end
end
- # This class is used to represent cache entries. Cache entries have a value and an optional
- # expiration time. The expiration time is used to support the :race_condition_ttl option
- # on the cache.
+ # This class is used to represent cache entries. Cache entries have a value, an optional
+ # expiration time, and an optional version. The expiration time is used to support the :race_condition_ttl option
+ # on the cache. The version is used to support the :version option on the cache for rejecting
+ # mismatches.
#
# Since cache entries in most instances will be serialized, the internals of this class are highly optimized
# using short instance variable names that are lazily defined.
class Entry # :nodoc:
- DEFAULT_COMPRESS_LIMIT = 16.kilobytes
+ attr_reader :version
+ DEFAULT_COMPRESS_LIMIT = 1.kilobyte
+
# Creates a new cache entry for the specified value. Options supported are
# +:compress+, +:compress_threshold+, and +:expires_in+.
def initialize(value, options = {})
if should_compress?(value, options)
@value = compress(value)
@compressed = true
else
@value = value
end
+ @version = options[:version]
@created_at = Time.now.to_f
@expires_in = options[:expires_in]
@expires_in = @expires_in.to_f if @expires_in
end
def value
compressed? ? uncompress(@value) : @value
end
+ def mismatched?(version)
+ @version && version && @version != version
+ end
+
# Checks if the entry is expired. The +expires_in+ parameter can override
# the value set when the entry was created.
def expired?
@expires_in && @created_at + @expires_in <= Time.now.to_f
end
@@ -666,11 +735,11 @@
end
end
private
def should_compress?(value, options)
- if value && options[:compress]
- compress_threshold = options[:compress_threshold] || DEFAULT_COMPRESS_LIMIT
+ if value && options.fetch(:compress, true)
+ compress_threshold = options.fetch(:compress_threshold, DEFAULT_COMPRESS_LIMIT)
serialized_value_size = (value.is_a?(String) ? value : Marshal.dump(value)).bytesize
return true if serialized_value_size >= compress_threshold
end