#!/usr/bin/env ruby # frozen_string_literal: true require 'optparse' require 'pwn' require 'pry' require 'sys/proctable' require 'yaml' opts = {} OptionParser.new do |options| options.banner = "USAGE: #{$PROGRAM_NAME} [opts] " options.on('-cPATH', '--yaml-config=PATH', '') do |p| opts[:yaml_config_path] = p end end.parse! begin def refresh_ps1_proc(opts = {}) mode = opts[:mode] proc do |_target_self, _nest_level, pi| pi.config.pwn_repl_line += 1 line_pad = format( '%0.3d', pi.config.pwn_repl_line ) pi.config.prompt_name = :pwn name = "\001\e[1m\002\001\e[31m\002#{pi.config.prompt_name}\001\e[0m\002" version = "\001\e[36m\002v#{PWN::VERSION}\001\e[0m\002" line_count = "\001\e[34m\002#{line_pad}\001\e[0m\002" dchars = "\001\e[32m\002>>>\001\e[0m\002" dchars = "\001\e[33m\002***\001\e[0m\002" if mode == :splat if pi.config.chat_gpt pi.config.prompt_name = 'chatGPT.pwn' pi.config.prompt_name = 'SPEAKING.chatGPT.pwn' if pi.config.chat_gpt_speak name = "\001\e[1m\002\001\e[33m\002#{pi.config.prompt_name}\001\e[0m\002" dchars = "\001\e[32m\002>>>\001\e[33m\002" dchars = "\001\e[33m\002***\001\e[33m\002" if mode == :splat if pi.config.chat_gpt_debug dchars = "\001\e[32m\002(DEBUG) >>>\001\e[33m\002" dchars = "\001\e[33m\002(DEBUG) ***\001\e[33m\002" if mode == :splat end end "#{name}[#{version}]:#{line_count} #{dchars} ".to_s.scrub end end # Pry Monkey Patches \_(oo)_/ class Pry # Overwrite Pry::History.push method in History class to get duplicate history entries # in order to properly replay automation in this prototyping driver class History def push(line) return line if line.empty? || invalid_readline_line?(line) begin last_line = @history[-1] rescue IndexError last_line = nil end @history << line @history_line_count += 1 @saver.call(line) if !should_ignore?(line) && Pry.config.history_save line end alias << push end def handle_line(line, options) if line.nil? config.control_d_handler.call(self) return end ensure_correct_encoding!(line) Pry.history << line unless options[:generated] @suppress_output = false inject_sticky_locals! begin unless process_command_safely(line) @eval_string += "#{line.chomp}\n" if !line.empty? || !@eval_string.empty? end rescue RescuableException => e self.last_exception = e result = e Pry.critical_section do show_result(result) end return end # This hook is supposed to be executed after each line of ruby code # has been read (regardless of whether eval_string is yet a complete expression) exec_hook :after_read, eval_string, self begin complete_expr = true if config.chat_gpt complete_expr = Pry::Code.complete_expression?(@eval_string) unless config.chat_gpt rescue SyntaxError => e output.puts e.message.gsub(/^.*syntax error, */, "SyntaxError: ") reset_eval_string end if complete_expr @suppress_output = true if @eval_string =~ /;\Z/ || @eval_string.empty? || @eval_string =~ /\A *#.*\n\z/ || config.chat_gpt # A bug in jruby makes java.lang.Exception not rescued by # `rescue Pry::RescuableException` clause. # # * https://github.com/pry/pry/issues/854 # * https://jira.codehaus.org/browse/JRUBY-7100 # # Until that gets fixed upstream, treat java.lang.Exception # as an additional exception to be rescued explicitly. # # This workaround has a side effect: java exceptions specified # in `Pry.config.unrescued_exceptions` are ignored. jruby_exceptions = [] jruby_exceptions << Java::JavaLang::Exception if Helpers::Platform.jruby? begin # Reset eval string, in case we're evaluating Ruby that does something # like open a nested REPL on this instance. eval_string = @eval_string reset_eval_string result = evaluate_ruby(eval_string) rescue RescuableException, *jruby_exceptions => e # Eliminate following warning: # warning: singleton on non-persistent Java type X # (http://wiki.jruby.org/Persistence) if Helpers::Platform.jruby? && e.class.respond_to?('__persistent__') e.class.__persistent__ = true end self.last_exception = e result = e end Pry.critical_section do show_result(result) end end throw(:breakout) if current_binding.nil? end # Ensure the return value in chat_gpt mode reflects the input def evaluate_ruby(code) if config.chat_gpt result = message = code.to_s return end inject_sticky_locals! exec_hook :before_eval, code, self result = current_binding.eval(code, Pry.eval_path, Pry.current_line) set_last_result(result, code) ensure update_input_history(code) exec_hook :after_eval, result, self end end # Define Custom REPL Commands Pry::Commands.create_command 'welcome-banner' do description 'Display the random welcome banner, including basic usage.' def process puts PWN::Banner.welcome end end Pry::Commands.create_command 'toggle-pager' do description 'Toggle less on returned objects surpassing the terminal.' def process pi = pry_instance pi.config.pager ? pi.config.pager = false : pi.config.pager = true end end Pry::Commands.create_command 'toggle-chatGPT' do description "Interact w/ OpenAI's ChatGPT" def process pi = pry_instance pi.config.chat_gpt ? pi.config.chat_gpt = false : pi.config.chat_gpt = true pi.config.color = false if pi.config.chat_gpt pi.config.color = true unless pi.config.chat_gpt end end Pry::Commands.create_command 'toggle-chatGPT-debug' do description "Display the response_history object while using OpenAI's ChatGPT." def process pi = pry_instance pi.config.chat_gpt_debug ? pi.config.chat_gpt_debug = false : pi.config.chat_gpt_debug = true end end Pry::Commands.create_command 'toggle-chatGPT-speaks' do description 'Use speech capabilities within PWN to speak OpenAI ChatGPT answers.' def process pi = pry_instance pi.config.chat_gpt_speak ? pi.config.chat_gpt_speak = false : pi.config.chat_gpt_speak = true end end # Define REPL Hooks # Welcome Banner Hook Pry.config.hooks.add_hook(:before_session, :welcome) do |output, _binding, _pi| output.puts PWN::Banner.welcome end # ChatGPT Hooks Pry.config.hooks.add_hook(:before_session, :init_opts) do |_output, _binding, pi| if opts[:yaml_config_path] && File.exist?(opts[:yaml_config_path]) yaml_config_path = opts[:yaml_config_path] yaml_config = YAML.load_file(yaml_config_path, symbolize_names: true) pi.config.chat_gpt_token = yaml_config[:bearer_token] end end Pry.config.hooks.add_hook(:after_read, :open_ai_hook) do |request, pi| if pi.config.chat_gpt && !request.chomp.empty? request = pi.input.line_buffer debug = pi.config.chat_gpt_debug token = pi.config.chat_gpt_token token ||= '' if token.empty? token = PWN::Plugins::AuthenticationHelper.mask_password( prompt: 'OpenAI API Key' ) pi.config.chat_gpt_token = token end response_history = pi.config.chat_gpt_response_history speak_answer = pi.config.chat_gpt_speak response = PWN::Plugins::OpenAI.chat( token: token, request: request.chomp, temp: 1, max_tokens: 0, response_history: response_history, speak_answer: speak_answer ) puts "\n\n\n\001\e[32m\002#{response[:choices].last[:content]}\001\e[31m\002\n\n\n" response_history = { id: response[:id], object: response[:object], model: response[:model], usage: response[:usage] } response_history[:choices] ||= response[:choices] if debug puts 'DEBUG: response_history => ' pp response_history puts "\nresponse_history[:choices] Length: #{response_history[:choices].length}\n" unless response_history.nil? end pi.config.chat_gpt_response_history = response_history end end # Define PS1 Prompt Pry.config.pwn_repl_line = 0 Pry.config.prompt_name = :pwn arrow_ps1_proc = refresh_ps1_proc splat_ps1_proc = refresh_ps1_proc(mode: :splat) prompt_ps1 = [arrow_ps1_proc, splat_ps1_proc] prompt = Pry::Prompt.new( :pwn, 'PWN Prototyping REPL', prompt_ps1 ) # Start PWN REPL pwn_pid = Process.pid Pry.start( self, prompt: prompt ) rescue StandardError => e raise e ensure child_pids = Sys::ProcTable.ps(smaps: false).select do |pe| pe.ppid == pwn_pid end.map(&:pid) grandkid_pids = Sys::ProcTable.ps(smaps: false).select do |pe| child_pids.include?(pe.ppid) end.map(&:pid) grandkid_pids.each do |grandkid_pid| Process.kill('TERM', grandkid_pid) end child_pids.each do |child_pid| Process.kill('TERM', child_pid) end end