# Copyright (c) 2023 Contrast Security, Inc. See https://www.contrastsecurity.com/enduser-terms-0317a for more details. # frozen_string_literal: true require 'contrast/agent/reporting/input_analysis/input_type' require 'contrast/agent/reporting/input_analysis/score_level' require 'contrast/agent/reporting/input_analysis/details/bot_blocker_details' require 'contrast/utils/input_classification_base' require 'contrast/utils/object_share' module Contrast module Agent module Protect module Rule # This module will do the Input Classification stage of CMD Injection rule # as a result input would be marked as WORTHWATCHING or IGNORE, # to be analyzed at the sink level. module BotBlockerInputClassification USER_AGENT = 'USER_AGENT' AGENT_LIB_HEADER_NAME = 'user-agent' BOT_BLOCKER_MATCH = 'bot-blocker-input-tracing-v1' class << self include InputClassificationBase # Input Classification stage is done to determine if an user input is # DEFINITEATTACK or to be ignored. # # @param rule_id [String] Name of the protect rule. # @param input_type [Contrast::Agent::Reporting::InputType] The type of the user input. # @param value [Hash] the value of the input. # @param input_analysis [Contrast::Agent::Reporting::InputAnalysis] Holds all the results from the # agent analysis from the current # Request. # @return ia [Contrast::Agent::Reporting::InputAnalysis, nil] with updated results. def classify rule_id, input_type, value, input_analysis return unless (rule = Contrast::PROTECT.rule(rule_id)) return unless rule.applicable_user_inputs.include?(input_type) return unless input_analysis.request value.each_value do |val| result = create_new_input_result(input_analysis.request, rule.rule_name, input_type, val) append_result(input_analysis, result) end input_analysis rescue StandardError => e logger.debug("An Error was recorded in the input classification of the #{ rule_id }", error: e) end private # This methods checks if input is tagged WORTHWATCHING or IGNORE matches value with it's # key if needed and Creates new instance of InputAnalysisResult. # # @param request [Contrast::Agent::Request] the current request context. # @param rule_id [String] The name of the Protect Rule. # @param input_type [Contrast::Agent::Reporting::InputType] The type of the user input. # @param value [String, Array] the value of the input. # # @return res [Contrast::Agent::Reporting::InputAnalysisResult] def create_new_input_result request, rule_id, input_type, value return unless request.headers.key(value) == USER_AGENT return unless Contrast::AGENT_LIB # If there is no match this would return nil. header_eval = Contrast::AGENT_LIB.eval_header(AGENT_LIB_HEADER_NAME, value, Contrast::AGENT_LIB.rule_set[rule_id], Contrast::AGENT_LIB.eval_option[:NONE]) ia_result = new_ia_result(rule_id, input_type, request.path, value) score = header_eval&.score || 0 if score >= THRESHOLD ia_result.score_level = DEFINITEATTACK ia_result.ids << BOT_BLOCKER_MATCH ia_result.details = Contrast::Agent::Reporting::BotBlockerDetails.new # details: add_details(ia_result, value) else ia_result.score_level = IGNORE end add_needed_key(request, ia_result, input_type, value) ia_result end def add_details ia_result, value ia_result.details.bot = value.downcase if value.include?(Contrast::Utils::ObjectShare::CARROT) ia_result.details.bot.delete!(Contrast::Utils::ObjectShare::CARROT) end ia_result.details.user_agent = value end end end end end end end