lib/graphql/dataloader.rb in graphql-1.12.8 vs lib/graphql/dataloader.rb in graphql-1.12.9

- old
+ new

@@ -27,11 +27,16 @@ schema.dataloader_class = self end def initialize @source_cache = Hash.new { |h, source_class| h[source_class] = Hash.new { |h2, batch_parameters| - source = source_class.new(*batch_parameters) + source = if RUBY_VERSION < "3" + source_class.new(*batch_parameters) + else + batch_args, batch_kwargs = batch_parameters + source_class.new(*batch_args, **batch_kwargs) + end source.setup(self) h2[batch_parameters] = source } } @pending_jobs = [] @@ -41,12 +46,19 @@ # # @param source_class [Class<GraphQL::Dataloader::Source] # @param batch_parameters [Array<Object>] # @return [GraphQL::Dataloader::Source] An instance of {source_class}, initialized with `self, *batch_parameters`, # and cached for the lifetime of this {Multiplex}. - def with(source_class, *batch_parameters) - @source_cache[source_class][batch_parameters] + if RUBY_VERSION < "3" + def with(source_class, *batch_parameters) + @source_cache[source_class][batch_parameters] + end + else + def with(source_class, *batch_args, **batch_kwargs) + batch_parameters = [batch_args, batch_kwargs] + @source_cache[source_class][batch_parameters] + end end # Tell the dataloader that this fiber is waiting for data. # # Dataloader will resume the fiber after the requested data has been loaded (by another Fiber). @@ -102,11 +114,11 @@ end while @pending_jobs.any? # Create a Fiber to consume jobs until one of the jobs yields # or jobs run out - f = Fiber.new { + f = spawn_fiber { while (job = @pending_jobs.shift) job.call end } resume(f) @@ -189,11 +201,11 @@ # the previous fiber would start up again. `c` would no longer be pending, but it would still receive `.run_pending_keys`. # That method is short-circuited since it isn't pending any more, but it's still a waste. # # This design could probably be improved by maintaining a `@pending_sources` queue which is shared by the fibers, # similar to `@pending_jobs`. That way, when a fiber is resumed, it would never pick up work that was finished by a different fiber. - source_fiber = Fiber.new do + source_fiber = spawn_fiber do pending_sources.each(&:run_pending_keys) end end source_fiber @@ -201,8 +213,27 @@ def resume(fiber) fiber.resume rescue UncaughtThrowError => e throw e.tag, e.value + end + + # Copies the thread local vars into the fiber thread local vars. Many + # gems (such as RequestStore, MiniRacer, etc.) rely on thread local vars + # to keep track of execution context, and without this they do not + # behave as expected. + # + # @see https://github.com/rmosolgo/graphql-ruby/issues/3449 + def spawn_fiber + fiber_locals = {} + + Thread.current.keys.each do |fiber_var_key| + fiber_locals[fiber_var_key] = Thread.current[fiber_var_key] + end + + Fiber.new do + fiber_locals.each { |k, v| Thread.current[k] = v } + yield + end end end end