bin/rmega-dl in rmega-0.2.2 vs bin/rmega-dl in rmega-0.2.4
- old
+ new
@@ -16,10 +16,11 @@
opts.banner << "\t#{File.basename(__FILE__)} url [options]\n"
opts.banner << "\t#{File.basename(__FILE__)} path [options]\n"
opts.banner << "Examples:\n"
opts.banner << "\t#{File.basename(__FILE__)} 'https://mega.nz/#!aBkHBKLX!n4kqzbJooqc3o_s96PZjN1tEJzQ4QQwskHf7YqKa'\n"
opts.banner << "\t#{File.basename(__FILE__)} https://www.reddit.com/r/megalinks\n"
+ opts.banner << "\t#{File.basename(__FILE__)} mylinks.txt\n"
opts.banner << "\t#{File.basename(__FILE__)} /remote/docs/myfile.txt -u email@localhost\n"
opts.banner << "Options:"
opts.on("-o PATH", "--output", "Local destination path") { |path|
cli_options[:output] = path
@@ -34,16 +35,33 @@
root = session.storage.root
node = traverse_storage(root, cli_options[:url].dup)
raise("Node not found - #{cli_options[:url]}") unless node
node.download(cli_options[:output] || Dir.pwd)
else
- urls = [cli_options[:url]]
+ urls = []
- unless mega_url?(cli_options[:url])
- html = Rmega::Session.new.http_get_content(cli_options[:url])
- urls = html.scan(Rmega::Nodes::Factory::URL_REGEXP).flatten.uniq
- raise("Nothing to download") if urls.empty?
+ if mega_url?(cli_options[:url])
+ # a valid MEGA urls
+ urls = [cli_options[:url]]
+ else
+ # A text file with a list of MEGA urls (considering only files < 1 Mb)
+ if File.exists?(cli_options[:url])
+ if File.size(cli_options[:url]) < 1_000_000
+ File.open(cli_options[:url], "rb") do |file|
+ file.each_line do |line|
+ line.strip!
+ urls << line if mega_url?(line)
+ end
+ end
+ end
+ else
+ # A link to a web page with some MEGA urls in its body
+ html = Rmega::Session.new.http_get_content(cli_options[:url])
+ urls = html.scan(Rmega::Nodes::Factory::URL_REGEXP).flatten.uniq
+ end
end
+
+ raise("Nothing to download") if urls.empty?
urls.each_with_index do |url, index|
node = Rmega::Nodes::Factory.build_from_url(url)
print "[#{index+1}/#{urls.size}] " if urls.size > 1
node.download(cli_options[:output] || Dir.pwd)