lib/dumper/job.rb in dumper-1.0.2 vs lib/dumper/job.rb in dumper-1.1.0
- old
+ new
@@ -4,15 +4,12 @@
module Dumper
class Job
include POSIX::Spawn
include Dumper::Utility::LoggingMethods
- MAX_FILESIZE = 4.gigabytes
-
def initialize(agent, job)
@agent = agent
- @stack = agent.stack
@job = job
end
def run_and_exit
@job[:servers].each do |server|
@@ -23,18 +20,14 @@
log 'exiting...'
exit!(true) # Do not use exit or abort to skip at_exit execution, or pid could get deleted on thin
end
def perform(server)
- # Initialize database
+ # Find database
server_type = server[:type].to_sym
- if Dumper::Stack::DATABASES.keys.include?(server_type)
- @database = Dumper::Stack::DATABASES[server_type].new(@stack)
- @database.config = OpenStruct.new(@stack.configs[Dumper::Stack::DATABASES.key(@database.class)])
- else
- abort_with "invalid server type: #{server_type}"
- end
+ abort_with "invalid server type: #{server_type}" unless Dumper::Stack::DATABASES.keys.include?(server_type)
+ return log "database not found: #{@database.inspect}" unless @database = @agent.stack.databases[server_type]
# Prepare
json = @agent.api_request('backup/prepare', :params => { :server_id => server[:id], :manual => server[:manual].to_s, :ext => @database.file_ext })
abort_with('backup/prepare failed') unless json[:status] == 'ok'
@@ -59,10 +52,10 @@
Process.waitpid(pid)
end
dump_duration = Time.now - start_at
log "dump_duration = #{dump_duration}"
- if (filesize = File.size(@database.dump_path)) > MAX_FILESIZE
+ if (filesize = File.size(@database.dump_path)) > @agent.max_filesize
abort_with("max filesize exceeded: #{filesize}", :too_large)
end
upload_to_s3(json[:url], json[:fields])