# Copyright (c) 2023 Contrast Security, Inc. See https://www.contrastsecurity.com/enduser-terms-0317a for more details. # frozen_string_literal: true require 'contrast/agent/reporting/input_analysis/input_type' require 'contrast/agent/reporting/input_analysis/score_level' require 'contrast/agent/reporting/details/bot_blocker_details' require 'contrast/agent/protect/rule/input_classification/base' require 'contrast/utils/object_share' module Contrast module Agent module Protect module Rule # This module will do the Input Classification stage of CMD Injection rule # as a result input would be marked as WORTHWATCHING or IGNORE, # to be analyzed at the sink level. module BotBlockerInputClassification USER_AGENT = 'USER_AGENT' AGENT_LIB_HEADER_NAME = 'user-agent' BOT_BLOCKER_MATCH = 'bot-blocker-input-tracing-v1' class << self include Contrast::Agent::Protect::Rule::InputClassification::Base private # This methods checks if input is tagged WORTHWATCHING or IGNORE matches value with it's # key if needed and Creates new instance of InputAnalysisResult. # # @param request [Contrast::Agent::Request] the current request context. # @param rule_id [String] The name of the Protect Rule. # @param input_type [Contrast::Agent::Reporting::InputType] The type of the user input. # @param value [String, Array] the value of the input. # # @return res [Contrast::Agent::Reporting::InputAnalysisResult, nil] def create_new_input_result request, rule_id, input_type, value return unless request.headers.key(value) == USER_AGENT super(request, rule_id, input_type, value) end # Creates new instance of AgentLib evaluation result with direct call to AgentLib. # # @param rule_id [String] The name of the Protect Rule. # @param _input_type [Contrast::Agent::Reporting::InputType] The type of the user input. # @param value [String, Array] the value of the input. def build_input_eval rule_id, _input_type, value Contrast::AGENT_LIB.eval_header(AGENT_LIB_HEADER_NAME, value, Contrast::AGENT_LIB.rule_set[rule_id], Contrast::AGENT_LIB.eval_option[:NONE]) end # Creates specific result from the AgentLib evaluation. # # @param rule_id [String] The name of the Protect Rule. # @param input_type [Contrast::Agent::Reporting::InputType] The type of the user input. # @param value [String, Array] the value of the input. # @param request [Contrast::Agent::Request] the current request context. # @param input_eval [Contrast::AgentLib::EvalResult] the result of the input evaluation. def build_ia_result rule_id, input_type, value, request, input_eval ia_result = new_ia_result(rule_id, input_type, request.path, value) score = input_eval&.score || 0 if score >= THRESHOLD ia_result.score_level = DEFINITEATTACK ia_result.ids << BOT_BLOCKER_MATCH ia_result.details = Contrast::Agent::Reporting::Details::BotBlockerDetails.new # details: add_details(ia_result, value) else ia_result.score_level = IGNORE end ia_result end def add_details ia_result, value ia_result.details.bot = value.downcase if value.include?(Contrast::Utils::ObjectShare::CARROT) ia_result.details.bot.delete!(Contrast::Utils::ObjectShare::CARROT) end ia_result.details.user_agent = value end end end end end end end