lib/twitterscraper/cli.rb in twitterscraper-ruby-0.18.0 vs lib/twitterscraper/cli.rb in twitterscraper-ruby-0.19.0

- old
+ new

@@ -30,20 +30,21 @@ tweets = client.query_tweets(options['query'], query_options) export(options['query'], tweets) unless tweets.empty? end def export(name, tweets) - filepath = options['output'] - Dir.mkdir(File.dirname(filepath)) unless File.exist?(File.dirname(filepath)) - write_json = lambda { File.write(filepath, generate_json(tweets)) } + options['format'].split(',').map(&:strip).each do |format| + file = build_output_name(format, options) + Dir.mkdir(File.dirname(file)) unless File.exist?(File.dirname(file)) - if options['format'] == 'json' - write_json.call - elsif options['format'] == 'html' - File.write(filepath, Template.new.tweets_embedded_html(name, tweets, options)) - else - write_json.call + if format == 'json' + File.write(file, generate_json(tweets)) + elsif format == 'html' + File.write(file, Template.new.tweets_embedded_html(name, tweets, options)) + else + puts "Invalid format #{format}" + end end end def generate_json(tweets) if options['pretty'] @@ -88,21 +89,21 @@ options['daily_limit'] = options['daily_limit'].to_i if options['daily_limit'] options['threads'] = (options['threads'] || 10).to_i options['threads_granularity'] ||= 'auto' options['format'] ||= 'json' options['order'] ||= 'desc' - options['output'] ||= build_output_name(options) options['cache'] = options['cache'] != 'false' options['proxy'] = options['proxy'] != 'false' options end - def build_output_name(options) + def build_output_name(format, options) query = options['query'].gsub(/[ :?#&]/, '_') date = [options['start_date'], options['end_date']].select { |val| val && !val.empty? }.join('_') - File.join('out', [options['type'], 'tweets', date, query].compact.join('_') + '.' + options['format']) + file = [options['type'], 'tweets', date, query].compact.join('_') + '.' + format + File.join('out', file) end def initialize_logger Twitterscraper.logger.level = ::Logger::DEBUG if options['verbose'] end