# Copyright (c) 2023 Contrast Security, Inc. See https://www.contrastsecurity.com/enduser-terms-0317a for more details.
# frozen_string_literal: true

require 'contrast/components/logger'
require 'contrast/components/scope'
require 'contrast/utils/object_share'
require 'contrast/agent/reporting/attack_result/response_type'
require 'contrast/agent/reporting/attack_result/attack_result'
require 'contrast/agent/protect/rule/utils/builders'
require 'contrast/agent/protect/rule/utils/filters'

module Contrast
  module Agent
    module Protect
      module Rule
        # This is a basic rule for Protect. It's the abstract class which all other
        # protect rules extend in order to function.
        #
        # @abstract Subclass and override {#prefilter}, {#infilter}, {#find_attacker}, {#postfilter} to implement
        class Base
          include Contrast::Components::Logger::InstanceMethods
          include Contrast::Components::Scope::InstanceMethods
          include Contrast::Agent::Protect::Rule::Builders
          include Contrast::Agent::Protect::Rule::Filters

          RULE_NAME = 'base-rule'
          BLOCKING_MODES = Set.new(%i[BLOCK BLOCK_AT_PERIMETER]).cs__freeze
          STACK_COLLECTION_RESULTS = Set.new([
                                               Contrast::Agent::Reporting::ResponseType::BLOCKED,
                                               Contrast::Agent::Reporting::ResponseType::MONITORED
                                             ]).cs__freeze
          SUSPICIOUS_REPORTING_RULES = %w[
            unsafe-file-upload
            reflected-xss
            cmd-injection-semantic-dangerous-paths
            cmd-injection-semantic-chained-commands
            path-traversal-semantic-file-security-bypass
            sql-injection-semantic-dangerous-functions
          ].cs__freeze

          # @return [Symbol]
          attr_reader :mode

          # Initializes new rule and sets it mode from settings
          #
          # @return mode [Symbol]
          def initialize
            ::Contrast::PROTECT.defend_rules[rule_name] = self
            @mode = mode_from_settings
          end

          # Message to display when the rule is triggered.
          #
          # @return [String]
          def block_message
            "Contrast Security Protect #{ rule_name } Triggered. Response blocked."
          end

          # Should return the name as it is known to Teamserver; defaults to class
          #
          # @return [String]
          def rule_name
            RULE_NAME
          end

          # Should return the short name.
          #
          # @return [String]

          # Should return list of all sub_rules.
          # Extend for each main rule any sub-rules.
          #
          # @return [Array]
          def sub_rules
            Contrast::Utils::ObjectShare::EMPTY_ARRAY
          end

          # The classification module used for each specific rule to
          # classify input data and score it. Extend for each rule.
          #
          # @return [Module]
          def classification; end

          # Generic method forwarder, shorthand for classification:
          # Input Classification stage is done to determine if an user input is DEFINITEATTACK or to be ignored.
          #
          # @param input_type [Contrast::Agent::Reporting::InputType] The type of the user input.
          # @param value [Hash<String>] the value of the input.
          # @param input_analysis [Contrast::Agent::Reporting::InputAnalysis] Holds all the results from the
          #                                                       agent analysis from the current
          #                                                       Request.
          # @return ia [Contrast::Agent::Reporting::InputAnalysis, nil] with updated results.
          def classify input_type, value, input_analysis
            classification.classify(rule_name, input_type, value, input_analysis)
          end

          # Check if rule is enabled.
          #
          # @return [Boolean]
          def enabled?
            # 1. it is not enabled because protect is not enabled
            return false unless ::Contrast::AGENT.enabled?
            return false unless ::Contrast::PROTECT.enabled?

            # 2. it is not enabled because it is in the list of disabled protect rules
            return false if ::Contrast::PROTECT.rule_config&.disabled_rules&.include?(rule_name)

            # 3. it is enabled so long as its mode is not NO_ACTION
            @mode != :NO_ACTION
          end

          # Check if the rule is excluded by checking inside array of exclusions
          #
          # @param exclusions [Array<Contrast::Agent::Reporting::Settings::ExclusionBase>]
          def excluded? exclusions
            Array(exclusions).any? do |ex|
              ex.protection_rule?(rule_name)
            end
          end

          # Return false for rules that modify or inspect the response body
          # during postfilter
          #
          # @return [Boolean] if the rule can safely be evaluated in streaming requests
          def stream_safe?
            true
          end

          # Attach the given result to the current request's context to report
          # it to TeamServer
          #
          # @param context [Contrast::Agent::RequestContext] the context of the
          #   request in which this input is evaluated.
          # @param result [Contrast::Agent::Reporting::AttackResult]
          def append_to_activity context, result
            context.activity.attach_defend(result) if result
          end

          # With this we log to CEF
          #
          # @param result [Contrast::Agent::Reporting::AttackResult]
          # @param attack [Symbol] the type of message we want to send
          # @param value [String] the input value we want to log
          def cef_logging result, attack = :ineffective_attack, value: nil
            sample = result.samples[0]
            outcome = result.response.to_s
            input_type = sample.user_input.input_type.to_s
            input_value = sample.user_input.value || value
            cef_logger.send(attack, result.rule_id, outcome, input_type, input_value)
          end

          protected

          # Assign the mode from active settings.
          #
          # @return mode [Symbol]
          def mode_from_settings
            ::Contrast::PROTECT.rule_mode(rule_name).tap do |mode|
              logger.trace('Retrieving rule mode', rule: rule_name, mode: mode)
            end
          end

          # Rule blocked check
          #
          # @return [Boolean]
          def blocked?
            enabled? && BLOCKING_MODES.include?(mode)
          end

          # Check if the protect rules is excluded by url from the exclusion rules for this application.
          #
          # @param rule_id [String]
          def protect_excluded_by_url? rule_id
            Contrast::SETTINGS.excluder.protect_excluded_by_url?(rule_id)
          end

          # Check if the protect rules is excluded by input from the exclusion rules for this application.
          #
          # @param results [Array<Contrast::Agent::Reporting::InputAnalysis>]
          def protect_excluded_by_input? results
            Contrast::SETTINGS.excluder.protect_excluded_by_input?(results)
          end

          # Allows for the InputAnalysis from Agent Library to be extracted early
          #
          # @param context [Contrast::Agent::RequestContext]
          # @param potential_attack_string [String, nil]
          # @param ia_results [Array<Contrast::Agent::Reporting::InputAnalysis>]
          # @param **kwargs
          # @return [Contrast::Agent::Reporting, nil]
          def find_attacker_with_results context, potential_attack_string, ia_results, **kwargs
            logger.trace('Checking vectors for attacks', rule: rule_name, input: potential_attack_string)

            result = nil
            ia_results.each do |ia_result|
              if potential_attack_string
                idx = potential_attack_string.index(ia_result.value)
                next unless idx

                result = build_attack_with_match(context, ia_result, result, potential_attack_string, **kwargs)
              else
                result = build_attack_without_match(context, ia_result, result, **kwargs)
              end
            end
            result
          end

          # By default, rules do not have to find attackers as they do not have
          # Input Analysis. Any attack for the standard rule will be evaluated
          # at execution time. As such, those rules are expected to implement
          # this custom behavior
          #
          # @param context [Contrast::Agent::RequestContext] the context for
          #   the current request
          # @param potential_attack_string [String] the input that may violate
          #   the rule and matched the attack detection logic
          # @param **kwargs [Hash] key-value pairs used by the rule to build a
          #   report.
          def find_attacker context, potential_attack_string, **kwargs
            ia_results = gather_ia_results(context)
            find_attacker_with_results(context, potential_attack_string, ia_results, **kwargs)
          end

          # Update the message's response type to be send to TS, depending on
          # the attack results, and rule policy. The match with rules is also
          # logged, and if an attack counter of the ia_result is increased.
          #
          # @param context [Contrast::Agent::RequestContext] the context for
          #   the current request
          # @param ia_result [Contrast::Agent::Reporting::InputAnalysis]
          # @param result [Contrast::Agent::Reporting::AttackResult]
          # @param attack_string [String] Potential attack vector
          # @return [Contrast::Agent::Reporting::AttackResult]
          def update_successful_attack_response context, ia_result, result, attack_string = nil
            case mode
            when :MONITOR
              # We are checking the result as the ia_result would not contain the sub-rules.
              result.response = if SUSPICIOUS_REPORTING_RULES.include?(result&.rule_id)
                                  Contrast::Agent::Reporting::ResponseType::SUSPICIOUS
                                else
                                  Contrast::Agent::Reporting::ResponseType::MONITORED
                                end
            when :BLOCK
              result.response = Contrast::Agent::Reporting::ResponseType::BLOCKED
            end

            ia_result.attack_count = ia_result.attack_count + 1 if ia_result
            log_rule_matched(context, ia_result, result.response, attack_string)

            result
          end

          # Update the response type for perimeter rules ( BAP ).
          #
          # @param context [Contrast::Agent::RequestContext] the context of the
          #   request in which this input is evaluated.
          # @param ia_result [Contrast::Agent::Reporting::InputAnalysis] the
          #   analysis of the input that was determined to be an attack
          # @param result [Contrast::Agent::Reporting::AttackResult] previous
          #   attack result for this rule, if one exists, in the case of
          #   multiple inputs being found to violate the protection criteria
          # @return [Contrast::Agent::Reporting::AttackResult]
          def update_perimeter_attack_response context, ia_result, result
            if mode == :BLOCK_AT_PERIMETER
              result.response = if blocked_rule?(ia_result)
                                  Contrast::Agent::Reporting::ResponseType::BLOCKED
                                else
                                  Contrast::Agent::Reporting::ResponseType::BLOCKED_AT_PERIMETER
                                end
              log_rule_matched(context, ia_result, result.response)
            elsif ia_result.nil? || ia_result.attack_count.zero?
              result.response = assign_reporter_response_type(ia_result)
              log_rule_probed(context, ia_result)
            end

            result
          end

          # Builds a caller's stack and appends it to a passed Rasp rule sample.
          #
          # @param sample [Contrast::Agent::Reporting::RaspRuleSample]
          # @param result [Contrast::Agent::Reporting::AttackResult, nil] previous attack result for this rule, if one
          #   exists, in the case of multiple inputs being found to violate the protection criteria
          # @return [Contrast::Agent::Reporting::RaspRuleSample]
          def append_stack sample, result
            return unless sample
            return unless STACK_COLLECTION_RESULTS.include?(result&.response)

            stack = Contrast::Utils::StackTraceUtils.build_protect_report_stack_array
            return unless stack

            sample.stack.concat(stack)
          end

          # Appends an attack sample to existing attack results.
          #
          # @param context [Contrast::Agent::RequestContext] the context of the request in which this input is
          #   evaluated.
          # @param ia_result [Contrast::Agent::Reporting::Settings::InputAnalysisResult] the analysis of the input that
          #   was determined to be an attack
          # @param result [Contrast::Agent::Reporting::AttackResult, nil] previous attack result for this rule, if one
          #   exists, in the case of multiple inputs being found to violate the protection criteria
          # @param candidate_string [String] the value of the input which may be an attack
          # @param kwargs [Hash] key - value pairs of context individual rules
          #   need to build out details to send to TeamServer to tell the
          #   story of the attack
          def append_sample context, ia_result, result, candidate_string, **kwargs
            return unless result

            sample = build_sample(context, ia_result, candidate_string, **kwargs)
            return unless sample

            append_stack(sample, result)

            result.samples << sample
          end

          # Logs if a rule have matched an attack vector and it's being triggered.
          #
          def log_rule_matched _context, ia_result, response, _matched_string = nil
            logger.debug('A successful attack was detected',
                         rule: rule_name,
                         type: ia_result&.input_type || '',
                         name: ia_result&.key || '',
                         input: ia_result&.value || '',
                         result: response)
          end

          # This method will check against the current context IA and find any results that match
          # the rule id.
          #
          # @param context [Contrast::Agent::RequestContext]
          # @return [Array<Contrast::Agent::Reporting::InputAnalysis>]
          def gather_ia_results context
            return [] unless context&.agent_input_analysis&.results

            context.agent_input_analysis.results.select do |ia_result|
              ia_result.rule_id == rule_name && ia_result.score_level != Contrast::Agent::Reporting::ScoreLevel::IGNORE
            end
          end

          # Check to if result is blocked. Used for raise check.
          #
          # @param result [Contrast::Agent::Reporting::AttackResult]
          def blocked_violation? result
            return false unless result

            result.response == Contrast::Agent::Reporting::ResponseType::BLOCKED
          end

          private

          # Block At Perimeter mode has been deprecated in sqli_worth_watching_v2
          # and should be treated equivalent to Blocked mode if set
          def blocked_rule? ia_result
            [
              Contrast::Agent::Protect::Rule::Sqli::NAME,
              Contrast::Agent::Protect::Rule::NoSqli::NAME
            ].include?(ia_result&.rule_id)
          end

          def log_rule_probed _context, ia_result
            logger.debug('An unsuccessful attack was detected', rule: rule_name, type: ia_result&.input_type,
                                                                name: ia_result&.key, input: ia_result&.value)
          end

          # Some rules are reported as suspicious, rather than exploited or probed, b/c they don't actually follow
          # input tracing or other detection types that provide enough confidnece for a determination.
          #
          # @param ia_result
          # @return [Boolean]
          def suspicious_rule? ia_result
            SUSPICIOUS_REPORTING_RULES.include?(ia_result&.rule_id)
          end

          # Handles the Response type for different Protect rules. Some rules need to report SUSPICIOUS over PROBED in
          # MONITORED mode.
          #
          # @param ia_result [Contrast::Agent::Reporting::InputAnalysis] the analysis of the input that was
          #   determined to be an attack
          def assign_reporter_response_type ia_result
            if suspicious_rule?(ia_result)
              Contrast::Agent::Reporting::ResponseType::SUSPICIOUS
            else
              Contrast::Agent::Reporting::ResponseType::PROBED
            end
          end

          # @param context [Contrast::Agent::RequestContext]
          # @param potential_attack_string [String, nil]
          # @return [Contrast::Agent::Reporting, nil]
          def find_postfilter_attacker context, potential_attack_string, **kwargs
            ia_results = gather_ia_results(context)
            ia_results.select! do |ia_result|
              ia_result.score_level == Contrast::Agent::Reporting::ScoreLevel::DEFINITEATTACK
            end
            find_attacker_with_results(context, potential_attack_string, ia_results, **kwargs)
          end
        end
      end
    end
  end
end