module CloudCrowd # As you write your custom actions, have them inherit from CloudCrowd::Action. # All actions must implement a +process+ method, which should return a # JSON-serializable object that will be used as the output for the work unit. # See the default actions for examples. # # Optionally, actions may define +split+ and +merge+ methods to do mapping # and reducing around the +input+. +split+ should return an array of URLs -- # to be mapped into WorkUnits and processed in parallel. In the +merge+ step, # +input+ will be an array of all the resulting outputs from calling process. # # All actions have use of an individual +work_directory+, for scratch files, # and spend their duration inside of it, so relative paths work well. class Action FILE_URL = /\Afile:\/\// attr_reader :input, :input_path, :file_name, :options, :work_directory # Initializing an Action sets up all of the read-only variables that # form the bulk of the API for action subclasses. (Paths to read from and # write to). It creates the +work_directory+ and moves into it. # If we're not merging multiple results, it downloads the input file into # the +work_directory+ before starting. def initialize(status, input, options, store) @input, @options, @store = input, options, store @job_id, @work_unit_id = options['job_id'], options['work_unit_id'] @work_directory = File.expand_path(File.join(@store.temp_storage_path, storage_prefix)) FileUtils.mkdir_p(@work_directory) unless File.exists?(@work_directory) status == MERGING ? parse_input : download_input end # Each Action subclass must implement a +process+ method, overriding this. def process raise NotImplementedError.new("CloudCrowd::Actions must override 'process' with their own processing code.") end # Download a file to the specified path. def download(url, path) `curl -s "#{url}" > "#{path}"` return path # The previous implementation is below, and, although it would be # wonderful not to shell out, RestClient wasn't handling URLs with encoded # entities (%20, for example), and doesn't let you download to a given # location. Getting a RestClient patch in would be ideal. # # if url.match(FILE_URL) # FileUtils.cp(url.sub(FILE_URL, ''), path) # else # resp = RestClient::Request.execute(:url => url, :method => :get, :raw_response => true) # FileUtils.mv resp.file.path, path # end end # Takes a local filesystem path, saves the file to S3, and returns the # public (or authenticated) url on S3 where the file can be accessed. def save(file_path) save_path = File.join(storage_prefix, File.basename(file_path)) @store.save(file_path, save_path) end # After the Action has finished, we remove the work directory and return # to the root directory (where workers run by default). def cleanup_work_directory FileUtils.rm_r(@work_directory) if File.exists?(@work_directory) end private # Convert an unsafe URL into a filesystem-friendly filename. def safe_filename(url) ext = File.extname(url) name = URI.unescape(File.basename(url)).gsub(/[^a-zA-Z0-9_\-.]/, '-').gsub(/-+/, '-') File.basename(name, ext).gsub('.', '-') + ext end # The directory prefix to use for both local and S3 storage. # [action_name]/job_[job_id]/unit_[work_unit_it] def storage_prefix path_parts = [] path_parts << Inflector.underscore(self.class) path_parts << "job_#{@job_id}" path_parts << "unit_#{@work_unit_id}" if @work_unit_id @storage_prefix ||= File.join(path_parts) end # If we know that the input is JSON, replace it with the parsed form. def parse_input @input = JSON.parse(@input) end # If the input is a URL, download the file before beginning processing. def download_input Dir.chdir(@work_directory) do input_is_url = !!URI.parse(@input) rescue false return unless input_is_url @input_path = File.join(@work_directory, safe_filename(@input)) @file_name = File.basename(@input_path, File.extname(@input_path)) download(@input, @input_path) end end end end