# Copyright (c) 2023 Contrast Security, Inc. See https://www.contrastsecurity.com/enduser-terms-0317a for more details.
# frozen_string_literal: true

require 'contrast/agent/reporting/details/cmd_injection_details'
require 'contrast/agent/reporting/attack_result/user_input'
require 'contrast/agent/protect/rule/cmdi/cmdi_input_classification'
require 'contrast/agent/reporting/attack_result/response_type'

module Contrast
  module Agent
    module Protect
      module Rule
        # The Ruby implementation of the Protect Command Injection Semantic
        # Dangerous Path sub-rule. This rule should report
        class CmdiBaseRule < Contrast::Agent::Protect::Rule::Base
          include Contrast::Components::Logger::InstanceMethods
          include Contrast::Agent::Reporting::InputType

          CHAINED_COMMAND_CHARS = /[;&|<>]/.cs__freeze
          APPLICABLE_USER_INPUTS = [
            BODY, COOKIE_VALUE, HEADER, PARAMETER_NAME,
            PARAMETER_VALUE, JSON_VALUE, MULTIPART_VALUE,
            MULTIPART_FIELD_NAME, XML_VALUE, DWR_VALUE
          ].cs__freeze

          # CMDI input classification
          #
          # @return [module<Contrast::Agent::Protect::Rule::CmdiInputClassification>]
          def classification
            @_classification ||= Contrast::Agent::Protect::Rule::CmdiInputClassification.cs__freeze
          end

          # CMDI Semantic infilter:
          # This rule does not have input classification.
          # If a value matches the CMDI applicable input types and it's length is > 2
          # we can check if it's used as command backdoors.
          #
          # @param context [Contrast::Agent::RequestContext] current request contest
          # @param classname [String] Name of the class
          # @param method [String] name of the method triggering the rule
          # @param command [String] potential dangerous command executed.
          # @raise [Contrast::SecurityException] if the rule mode ise set
          # to BLOCK and valid cdmi is detected.
          def infilter context, classname, method, command
            return unless infilter?(command)
            return if protect_excluded_by_url?(rule_name, context.request.path)
            return unless (result = build_violation(context, command))

            append_to_activity(context, result)
            raise_error(classname, method) if blocked_violation?(result)
          end

          def build_attack_with_match(context,
                                      input_analysis_result,
                                      result,
                                      candidate_string,
                                      **kwargs)
            return result if mode == :NO_ACTION || mode == :PERMIT

            result ||= build_attack_result(context)
            update_successful_attack_response(context, input_analysis_result, result, candidate_string)
            append_sample(context, input_analysis_result, result, candidate_string, **kwargs)
            result
          end

          protected

          # Used to customize the raised error message.
          #
          # @param classname [String] Name of the class
          # @param method [String] name of the method triggering the rule
          # @raise [Contrast::SecurityException]
          def raise_error classname, method
            raise(Contrast::SecurityException.new(self,
                                                  'Command Injection Rule triggered. ' \
                                                  "Call to #{ classname }.#{ method } blocked."))
          end

          # Allows for the InputAnalysis from service to be extracted early.
          # Because results are not necessarily on the context across
          # processes; extract early and pass into the method
          #
          # @param context [Contrast::Agent::RequestContext]
          # @param potential_attack_string [String, nil]
          # @param ia_results [Array<Contrast::Agent::Reporting::InputAnalysis>]
          # @param **kwargs
          # @return [Contrast::Agent::Reporting, nil]
          def find_attacker_with_results context, potential_attack_string, ia_results, **kwargs
            logger.trace('Checking vectors for attacks', rule: rule_name, input: potential_attack_string)
            result = super(context, potential_attack_string, ia_results, **kwargs) if ia_results
            if result.nil? && potential_attack_string
              result = find_probable_attacker(context, potential_attack_string, ia_results, **kwargs)
            end
            result
          end

          # Build a subclass of the RaspRuleSample using the query string and the
          # evaluation
          def build_sample context, input_analysis_result, candidate_string, **kwargs
            sample = build_base_sample(context, input_analysis_result)
            sample.details = Contrast::Agent::Reporting::Details::CmdInjectionDetails.new

            # extract data from kwargs for the agent_lib check
            agent_lib_result_struct = kwargs[:result_struct]
            command = candidate_string || input_analysis_result.value
            command = Contrast::Utils::StringUtils.protobuf_safe_string(command)
            handle_with_agent_lib(sample, agent_lib_result_struct, command)

            # This is a special case where the user input is UNKNOWN_USER_INPUT but
            # we want to send the attack value. Usually we do this for semantic rules.
            if input_analysis_result.nil?
              ui = Contrast::Agent::Reporting::UserInput.new
              ui.input_type = :UNKNOWN
              ui.value = command
              sample.user_input = ui
            end

            sample
          end

          private

          def report_command_execution context, command, **kwargs
            return unless report_any_command_execution?

            build_attack_with_match(context, nil, nil, command, **kwargs)
          end

          def find_probable_attacker context, potential_attack_string, ia_results, **kwargs
            return unless chained_command?(potential_attack_string)

            likely_attacker = ia_results.find { |input_analysis_result| chained_command?(input_analysis_result.value) }
            return unless likely_attacker

            build_attack_with_match(context, likely_attacker, nil, potential_attack_string, **kwargs)
          end

          def chained_command? command
            CHAINED_COMMAND_CHARS.match(command)
          end

          # Part of the Hardening for Command Injection detection is the
          # ability to detect and prevent any command execution from within the
          # application. This check determines if that hardening has been
          # enabled.
          # @return [Boolean] if the agent should report all command
          #   executions.
          def report_any_command_execution?
            ::Contrast::PROTECT.report_any_command_execution?
          end

          # handle agent_lib data being attached
          def handle_with_agent_lib sample, agent_lib_result_struct, command
            sample.details.cmd = command
            if agent_lib_result_struct&.cs__is_a?(Hash)
              sample.details.start_idx = agent_lib_result_struct[:start_index]
              sample.details.end_idx = if (agent_lib_result_struct[:end_index]).zero?
                                         command.length
                                       else
                                         agent_lib_result_struct[:end_index]
                                       end
            else
              sample.details.end_idx = command.length
            end
          end
        end
      end
    end
  end
end