lib/active_support/cache.rb in activesupport-7.1.5 vs lib/active_support/cache.rb in activesupport-7.2.0.beta1
- old
+ new
@@ -50,11 +50,11 @@
module Strategy
autoload :LocalCache, "active_support/cache/strategy/local_cache"
end
- @format_version = 6.1
+ @format_version = 7.0
class << self
attr_accessor :format_version
# Creates a new Store object according to the given options.
@@ -84,17 +84,11 @@
# # => returns MyOwnCacheStore.new
def lookup_store(store = nil, *parameters)
case store
when Symbol
options = parameters.extract_options!
- # clean this up once Ruby 2.7 support is dropped
- # see https://github.com/rails/rails/pull/41522#discussion_r581186602
- if options.empty?
- retrieve_store_class(store).new(*parameters)
- else
- retrieve_store_class(store).new(*parameters, **options)
- end
+ retrieve_store_class(store).new(*parameters, **options)
when Array
lookup_store(*store)
when nil
ActiveSupport::Cache::MemoryStore.new
else
@@ -164,11 +158,11 @@
# by its +coder+'s +dump+ and +load+ methods.
#
# cache = ActiveSupport::Cache::MemoryStore.new
#
# cache.read('city') # => nil
- # cache.write('city', "Duckburgh")
+ # cache.write('city', "Duckburgh") # => true
# cache.read('city') # => "Duckburgh"
#
# cache.write('not serializable', Proc.new {}) # => TypeError
#
# Keys are always translated into Strings and are case sensitive. When an
@@ -204,28 +198,10 @@
private_constant :DEFAULT_POOL_OPTIONS
def retrieve_pool_options(options)
if options.key?(:pool)
pool_options = options.delete(:pool)
- elsif options.key?(:pool_size) || options.key?(:pool_timeout)
- pool_options = {}
-
- if options.key?(:pool_size)
- ActiveSupport.deprecator.warn(<<~MSG)
- Using :pool_size is deprecated and will be removed in Rails 7.2.
- Use `pool: { size: #{options[:pool_size].inspect} }` instead.
- MSG
- pool_options[:size] = options.delete(:pool_size)
- end
-
- if options.key?(:pool_timeout)
- ActiveSupport.deprecator.warn(<<~MSG)
- Using :pool_timeout is deprecated and will be removed in Rails 7.2.
- Use `pool: { timeout: #{options[:pool_timeout].inspect} }` instead.
- MSG
- pool_options[:timeout] = options.delete(:pool_timeout)
- end
else
pool_options = true
end
case pool_options
@@ -342,11 +318,11 @@
self
end
# Silences the logger within a block.
def mute
- previous_silence, @silence = defined?(@silence) && @silence, true
+ previous_silence, @silence = @silence, true
yield
ensure
@silence = previous_silence
end
@@ -409,36 +385,52 @@
# If the first process errors out while generating a new value, another
# process can try to generate a new value after the extended time window
# has elapsed.
#
# # Set all values to expire after one minute.
- # cache = ActiveSupport::Cache::MemoryStore.new(expires_in: 1.minute)
+ # cache = ActiveSupport::Cache::MemoryStore.new(expires_in: 1)
#
- # cache.write('foo', 'original value')
+ # cache.write("foo", "original value")
# val_1 = nil
# val_2 = nil
- # sleep 60
+ # p cache.read("foo") # => "original value"
#
- # Thread.new do
- # val_1 = cache.fetch('foo', race_condition_ttl: 10.seconds) do
+ # sleep 1 # wait until the cache expires
+ #
+ # t1 = Thread.new do
+ # # fetch does the following:
+ # # 1. gets an recent expired entry
+ # # 2. extends the expiry by 2 seconds (race_condition_ttl)
+ # # 3. regenerates the new value
+ # val_1 = cache.fetch("foo", race_condition_ttl: 2) do
# sleep 1
- # 'new value 1'
+ # "new value 1"
# end
# end
#
- # Thread.new do
- # val_2 = cache.fetch('foo', race_condition_ttl: 10.seconds) do
- # 'new value 2'
- # end
+ # # Wait until t1 extends the expiry of the entry
+ # # but before generating the new value
+ # sleep 0.1
+ #
+ # val_2 = cache.fetch("foo", race_condition_ttl: 2) do
+ # # This block won't be executed because t1 extended the expiry
+ # "new value 2"
# end
#
- # cache.fetch('foo') # => "original value"
- # sleep 10 # First thread extended the life of cache by another 10 seconds
- # cache.fetch('foo') # => "new value 1"
- # val_1 # => "new value 1"
- # val_2 # => "original value"
+ # t1.join
#
+ # p val_1 # => "new value 1"
+ # p val_2 # => "oritinal value"
+ # p cache.fetch("foo") # => "new value 1"
+ #
+ # # The entry requires 3 seconds to expire (expires_in + race_condition_ttl)
+ # # We have waited 2 seconds already (sleep(1) + t1.join) thus we need to wait 1
+ # # more second to see the entry expire.
+ # sleep 1
+ #
+ # p cache.fetch("foo") # => nil
+ #
# ==== Dynamic Options
#
# In some cases it may be necessary to dynamically compute options based
# on the cached value. To support this, an ActiveSupport::Cache::WriteOptions
# instance is passed as the second argument to the block. For example:
@@ -454,11 +446,11 @@
options = merged_options(options)
key = normalize_key(name, options)
entry = nil
unless options[:force]
- instrument(:read, name, options) do |payload|
+ instrument(:read, key, options) do |payload|
cached_entry = read_entry(key, **options, event: payload)
entry = handle_expired_entry(cached_entry, key, options)
if entry
if entry.mismatched?(normalize_version(name, options))
entry = nil
@@ -476,11 +468,11 @@
end
if entry
get_entry_value(entry, name, options)
else
- save_block_result_to_cache(name, options, &block)
+ save_block_result_to_cache(name, key, options, &block)
end
elsif options && options[:force]
raise ArgumentError, "Missing block: Calling `Cache#fetch` with `force: true` requires a block."
else
read(name, options)
@@ -506,11 +498,11 @@
def read(name, options = nil)
options = merged_options(options)
key = normalize_key(name, options)
version = normalize_version(name, options)
- instrument(:read, name, options) do |payload|
+ instrument(:read, key, options) do |payload|
entry = read_entry(key, **options, event: payload)
if entry
if entry.expired?
delete_entry(key, **options)
@@ -603,35 +595,40 @@
return {} if names.empty?
options = names.extract_options!
options = merged_options(options)
- instrument_multi :read_multi, names, options do |payload|
+ writes = {}
+ ordered = instrument_multi :read_multi, names, options do |payload|
if options[:force]
reads = {}
else
reads = read_multi_entries(names, **options)
end
- writes = {}
ordered = names.index_with do |name|
reads.fetch(name) { writes[name] = yield(name) }
end
writes.compact! if options[:skip_nil]
payload[:hits] = reads.keys
payload[:super_operation] = :fetch_multi
- write_multi(writes, options)
-
ordered
end
+
+ write_multi(writes, options)
+
+ ordered
end
# Writes the value to the cache with the key. The value must be supported
# by the +coder+'s +dump+ and +load+ methods.
#
+ # Returns +true+ if the write succeeded, +nil+ if there was an error talking
+ # to the cache backend, or +false+ if the write failed for another reason.
+ #
# By default, cache entries larger than 1kB are compressed. Compression
# allows more data to be stored in the same memory footprint, leading to
# fewer cache evictions and higher hit rates.
#
# ==== Options
@@ -660,26 +657,28 @@
# used to support recyclable cache keys.
#
# Other options will be handled by the specific cache store implementation.
def write(name, value, options = nil)
options = merged_options(options)
+ key = normalize_key(name, options)
- instrument(:write, name, options) do
+ instrument(:write, key, options) do
entry = Entry.new(value, **options.merge(version: normalize_version(name, options)))
- write_entry(normalize_key(name, options), entry, **options)
+ write_entry(key, entry, **options)
end
end
# Deletes an entry in the cache. Returns +true+ if an entry is deleted
# and +false+ otherwise.
#
# Options are passed to the underlying cache implementation.
def delete(name, options = nil)
options = merged_options(options)
+ key = normalize_key(name, options)
- instrument(:delete, name) do
- delete_entry(normalize_key(name, options), **options)
+ instrument(:delete, key) do
+ delete_entry(key, **options)
end
end
# Deletes multiple entries in the cache. Returns the number of deleted
# entries.
@@ -699,13 +698,14 @@
# Returns +true+ if the cache contains an entry for the given key.
#
# Options are passed to the underlying cache implementation.
def exist?(name, options = nil)
options = merged_options(options)
+ key = normalize_key(name, options)
- instrument(:exist?, name) do |payload|
- entry = read_entry(normalize_key(name, options), **options, event: payload)
+ instrument(:exist?, key) do |payload|
+ entry = read_entry(key, **options, event: payload)
(entry && !entry.expired? && !entry.mismatched?(normalize_version(name, options))) || false
end
end
def new_entry(value, options = nil) # :nodoc:
@@ -759,18 +759,10 @@
end
private
def default_serializer
case Cache.format_version
- when 6.1
- ActiveSupport.deprecator.warn <<~EOM
- Support for `config.active_support.cache_format_version = 6.1` has been deprecated and will be removed in Rails 7.2.
-
- Check the Rails upgrade guide at https://guides.rubyonrails.org/upgrading_ruby_on_rails.html#new-activesupport-cache-serialization-format
- for more information on how to upgrade.
- EOM
- Cache::SerializerWithFallback[:marshal_6_1]
when 7.0
Cache::SerializerWithFallback[:marshal_7_0]
when 7.1
Cache::SerializerWithFallback[:marshal_7_1]
else
@@ -1014,11 +1006,11 @@
if logger && logger.debug? && !silence?
debug_key =
if multi
": #{payload[:key].size} key(s) specified"
elsif payload[:key]
- ": #{normalize_key(payload[:key], options)}"
+ ": #{payload[:key]}"
end
debug_options = " (#{options.inspect})" unless options.blank?
logger.debug "Cache #{operation}#{debug_key}#{debug_options}"
@@ -1051,13 +1043,13 @@
def get_entry_value(entry, name, options)
instrument(:fetch_hit, name, options)
entry.value
end
- def save_block_result_to_cache(name, options)
+ def save_block_result_to_cache(name, key, options)
options = options.dup
- result = instrument(:generate, name, options) do
+ result = instrument(:generate, key, options) do
yield(name, WriteOptions.new(options))
end
write(name, result, options) unless result.nil? && options[:skip_nil]
result