lib/resque/plugins/retry.rb in resque-retry-0.2.2 vs lib/resque/plugins/retry.rb in resque-retry-1.0.0.a
- old
+ new
@@ -21,11 +21,11 @@
#
# @retry_limit = 8 # default: 1
# @retry_delay = 60 # default: 0
#
# # used to build redis key, for counting job attempts.
- # def self.identifier(url, hook_id, hmac_key)
+ # def self.retry_identifier(url, hook_id, hmac_key)
# "#{url}-#{hook_id}"
# end
#
# def self.perform(url, hook_id, hmac_key)
# heavy_lifting
@@ -38,30 +38,30 @@
def inherited(subclass)
super(subclass)
subclass.instance_variable_set("@retry_criteria_checks", retry_criteria_checks.dup)
end
- # @abstract You may override to implement a custom identifier,
+ # @abstract You may override to implement a custom retry identifier,
# you should consider doing this if your job arguments
- # are many/long or may not cleanly cleanly to strings.
+ # are many/long or may not cleanly convert to strings.
#
- # Builds an identifier using the job arguments. This identifier
+ # Builds a retry identifier using the job arguments. This identifier
# is used as part of the redis key.
#
# @param [Array] args job arguments
# @return [String] job identifier
- def identifier(*args)
+ def retry_identifier(*args)
args_string = args.join('-')
args_string.empty? ? nil : args_string
end
# Builds the redis key to be used for keeping state of the job
# attempts.
#
# @return [String] redis key
def redis_retry_key(*args)
- ['resque-retry', name, identifier(*args)].compact.join(":").gsub(/\s/, '')
+ ['resque-retry', name, retry_identifier(*args)].compact.join(":").gsub(/\s/, '')
end
# Maximum number of retrys we can attempt to successfully perform the job.
#
# A retry limit of 0 will *never* retry.
@@ -101,11 +101,15 @@
#
# @return [Number] number of seconds to sleep
def sleep_after_requeue
@sleep_after_requeue ||= 0
end
-
+
+ def retry_job_delegate
+ @retry_job_delegate ||= nil
+ end
+
# @abstract
# Modify the arguments used to retry the job. Use this to do something
# other than try the exact same job again.
#
# @return [Array] new job arguments
@@ -147,11 +151,11 @@
# We always want to retry if the exception matches.
should_retry = retry_exception?(exception.class)
# call user retry criteria check blocks.
retry_criteria_checks.each do |criteria_check|
- should_retry ||= !!criteria_check.call(exception, *args)
+ should_retry ||= !!instance_exec(exception, *args, &criteria_check)
end
should_retry
end
@@ -204,44 +208,76 @@
def try_again(exception, *args)
# some plugins define retry_delay and have it take no arguments, so rather than break those,
# we'll just check here to see whether it takes the additional exception class argument or not
temp_retry_delay = ([-1, 1].include?(method(:retry_delay).arity) ? retry_delay(exception.class) : retry_delay)
+ retry_in_queue = retry_job_delegate ? retry_job_delegate : self
if temp_retry_delay <= 0
# If the delay is 0, no point passing it through the scheduler
- Resque.enqueue(self, *args_for_retry(*args))
+ Resque.enqueue(retry_in_queue, *args_for_retry(*args))
else
- Resque.enqueue_in(temp_retry_delay, self, *args_for_retry(*args))
+ Resque.enqueue_in(temp_retry_delay, retry_in_queue, *args_for_retry(*args))
end
+
+ # remove retry key from redis if we handed retry off to another queue.
+ clean_retry_key(*args) if retry_job_delegate
+
+ # sleep after requeue if enabled.
sleep(sleep_after_requeue) if sleep_after_requeue > 0
end
# Resque before_perform hook.
#
# Increments and sets the `@retry_attempt` count.
def before_perform_retry(*args)
+ @on_failure_retry_hook_already_called = false
+
+ # store number of retry attempts.
retry_key = redis_retry_key(*args)
Resque.redis.setnx(retry_key, -1) # default to -1 if not set.
@retry_attempt = Resque.redis.incr(retry_key) # increment by 1.
end
# Resque after_perform hook.
#
# Deletes retry attempt count from Redis.
def after_perform_retry(*args)
- Resque.redis.del(redis_retry_key(*args))
+ clean_retry_key(*args)
end
# Resque on_failure hook.
#
# Checks if our retry criteria is valid, if it is we try again.
# Otherwise the retry attempt count is deleted from Redis.
+ #
+ # NOTE: This hook will only allow execution once per job perform attempt.
+ # This was added because Resque v1.20.0 calls the hook twice.
+ # IMO; this isn't something resque-retry should have to worry about!
def on_failure_retry(exception, *args)
+ return if @on_failure_retry_hook_already_called
+
if retry_criteria_valid?(exception, *args)
try_again(exception, *args)
else
- Resque.redis.del(redis_retry_key(*args))
+ clean_retry_key(*args)
end
+
+ @on_failure_retry_hook_already_called = true
+ end
+
+ def instance_exec(*args, &block)
+ mname = "__instance_exec_#{Thread.current.object_id.abs}"
+ class << self; self end.class_eval{ define_method(mname, &block) }
+ begin
+ ret = send(mname, *args)
+ ensure
+ class << self; self end.class_eval{ undef_method(mname) } rescue nil
+ end
+ ret
+ end
+
+ def clean_retry_key(*args)
+ Resque.redis.del(redis_retry_key(*args))
end
end
end
end