lib/resque/plugins/retry.rb in resque-retry-1.5.2 vs lib/resque/plugins/retry.rb in resque-retry-1.5.3

- old
+ new

@@ -52,27 +52,14 @@ # # @api private def self.extended(receiver) retry_exceptions = receiver.instance_variable_get('@retry_exceptions') fatal_exceptions = receiver.instance_variable_get('@fatal_exceptions') - ignore_exceptions = receiver.instance_variable_get('@ignore_exceptions') if fatal_exceptions && retry_exceptions raise AmbiguousRetryStrategyException.new(%{You can't define both "@fatal_exceptions" and "@retry_exceptions"}) end - - # Check that ignore_exceptions is a subset of retry_exceptions - if retry_exceptions.is_a?(Hash) - exceptions = retry_exceptions.keys - else - exceptions = Array(retry_exceptions) - end - - excess_exceptions = Array(ignore_exceptions) - exceptions - unless excess_exceptions.empty? - raise RetryConfigurationException, "The following exceptions are defined in @ignore_exceptions but not in @retry_exceptions: #{excess_exceptions.join(', ')}." - end end # Copy retry criteria checks, try again callbacks, and give up callbacks # on inheritance. # @@ -175,10 +162,20 @@ def retry_job_delegate @retry_job_delegate ||= nil end # @abstract + # Specify the queue that the job should be placed in upon failure + # + # @return [Symbol] Symbol representing queue that job should be placed in + # + # @api public + def retry_queue(exception, *args) + nil + end + + # @abstract # Modify the arguments used to retry the job. Use this to do something # other than try the exact same job again # # @return [Array] new job arguments # @@ -386,15 +383,28 @@ log_message 'try_again', args, exception run_try_again_callbacks(exception, *args) # some plugins define retry_delay and have it take no arguments, so rather than break those, # we'll just check here to see whether it takes the additional exception class argument or not - temp_retry_delay = ([-1, 1].include?(method(:retry_delay).arity) ? retry_delay(exception.class) : retry_delay) + # we also allow all job args to be passed to a custom `retry_delay` method + retry_delay_arity = method(:retry_delay).arity - retry_in_queue = retry_job_delegate ? retry_job_delegate : self - log_message "retry delay: #{temp_retry_delay} for class: #{retry_in_queue}", args, exception + temp_retry_delay = if [-2, 2].include?(retry_delay_arity) + retry_delay(exception.class, *args) + elsif [-1, 1].include?(retry_delay_arity) + retry_delay(exception.class) + else + retry_delay + end + retry_job_class = retry_job_delegate ? retry_job_delegate : self + + retry_in_queue = retry_queue(exception, *args) + retry_in_queue ||= Resque.queue_from_class(retry_job_class) + + log_message "retry delay: #{temp_retry_delay} for queue: #{retry_in_queue}", args, exception + # remember that this job is now being retried. before_perform_retry will increment # this so it represents the retry count, and MultipleWithRetrySuppression uses # the existence of this to determine if the job should be sent to the # parent failure backend (e.g. failed queue) or not. Removing this means # jobs that fail before ::perform will be both retried and sent to the failed queue. @@ -402,12 +412,12 @@ retry_args = retry_args_for_exception(exception, *args) if temp_retry_delay <= 0 # If the delay is 0, no point passing it through the scheduler - Resque.enqueue(retry_in_queue, *retry_args) + Resque.enqueue_to(retry_in_queue, retry_job_class, *retry_args) else - Resque.enqueue_in(temp_retry_delay, retry_in_queue, *retry_args) + Resque.enqueue_in_with_queue(retry_in_queue, temp_retry_delay, retry_job_class, *retry_args) end # remove retry key from redis if we handed retry off to another queue. clean_retry_key(*args) if retry_job_delegate