Sha256: d97f2e3101cb275d1e342d878c503a6c83c004124ef889f397dd42506928da87
Contents?: true
Size: 1.67 KB
Versions: 4
Compression:
Stored size: 1.67 KB
Contents
# frozen_string_literal: true module PumaWorkerKiller class Reaper def initialize(max_ram, master = nil, reaper_status_logs = true, pre_term = nil, on_calculation = nil) @cluster = PumaWorkerKiller::PumaMemory.new(master) @max_ram = max_ram @reaper_status_logs = reaper_status_logs @pre_term = pre_term @on_calculation = on_calculation end # used for tes def get_total_memory @cluster.get_total_memory end def reap return false if @cluster.workers_stopped? total = get_total_memory @on_calculation&.call(total) if total > @max_ram @cluster.master.log "PumaWorkerKiller: Out of memory. #{@cluster.workers.count} workers consuming total: #{total} mb out of max: #{@max_ram} mb. Sending TERM to pid #{@cluster.largest_worker.pid} consuming #{@cluster.largest_worker_memory} mb." # Fetch the largest_worker so that both `@pre_term` and `term_worker` are called with the same worker # Avoids a race condition where: # Worker A consume 100 mb memory # Worker B consume 99 mb memory # pre_term gets called with Worker A # A new request comes in, Worker B takes it, and consumes 101 mb memory # term_largest_worker (previously here) gets called and terms Worker B (thus not passing the about-to-be-terminated worker to `@pre_term`) largest_worker = @cluster.largest_worker @pre_term&.call(largest_worker) @cluster.term_worker(largest_worker) elsif @reaper_status_logs @cluster.master.log "PumaWorkerKiller: Consuming #{total} mb with master and #{@cluster.workers.count} workers." end end end end
Version data entries
4 entries across 4 versions & 1 rubygems