lib/intelli_agent/openai.rb in intelli_agent-0.2.7 vs lib/intelli_agent/openai.rb in intelli_agent-0.2.8

- old
+ new

@@ -53,48 +53,48 @@ def response.content = dig('choices', 0, 'message', 'content').strip response end - def self.single_prompt(prompt:, model: :gpt_basic, response_format: nil, max_tokens: MAX_TOKENS, tools: nil, function_run_context: self) - chat(messages: [{ user: prompt }], model:, response_format:, max_tokens:, tools:, function_run_context:) + def self.single_prompt(prompt:, model: :gpt_basic, response_format: nil, max_tokens: MAX_TOKENS, tools: nil, auto_run_functions: false, function_context: nil) + chat(messages: [{ user: prompt }], model:, response_format:, max_tokens:, tools:, auto_run_functions:, function_context:) end - def self.single_chat(system:, user:, model: :gpt_basic, response_format: nil, max_tokens: MAX_TOKENS, tools: nil, function_run_context: nil) - chat(messages: [{ system: }, { user: }], model:, response_format:, max_tokens:, tools:, function_run_context:) + def self.single_chat(system:, user:, model: :gpt_basic, response_format: nil, max_tokens: MAX_TOKENS, tools: nil, auto_run_functions: false, function_context: nil) + chat(messages: [{ system: }, { user: }], model:, response_format:, max_tokens:, tools:, auto_run_functions:, function_context:) end - def self.chat(messages:, model: :gpt_basic, response_format: nil, max_tokens: MAX_TOKENS, tools: nil, function_run_context: self) + def self.chat(messages:, model: :gpt_basic, response_format: nil, max_tokens: MAX_TOKENS, tools: nil, auto_run_functions: false, function_context: nil) model = select_model(model) # o1 models doesn't support max_tokens, instead max_completion_tokens is_o1_model = model.start_with?('o1') max_completion_tokens = max_tokens if is_o1_model messages = parse_messages(messages) - parameters = { model:, messages: } + parameters = { model:, messages:, store: true } parameters[:max_completion_tokens] = max_completion_tokens if is_o1_model parameters[:max_tokens] = max_completion_tokens unless is_o1_model parameters[:response_format] = { type: 'json_object' } if response_format.eql?(:json) parameters[:tools] = tools if tools response = OpenAI::Client.new.chat(parameters:) response.extend(ResponseExtender) - if response.functions? - raise 'Function run context not provided' if function_run_context.nil? + if response.functions? && auto_run_functions + raise 'Function context not provided for auto-running functions' if function_context.nil? parameters[:messages] << response.message response.functions.each do |function| parameters[:messages] << { tool_call_id: function[:id], role: :tool, name: function[:name], - content: function_run_context.send(function[:name], **function[:arguments]) + content: function_context.send(function[:name], **function[:arguments]) } end response = OpenAI::Client.new.chat(parameters:) response.extend(ResponseExtender)