# Copyright (c) 2023 Contrast Security, Inc. See https://www.contrastsecurity.com/enduser-terms-0317a for more details. # frozen_string_literal: true require 'contrast/agent/protect/rule/base' require 'contrast/agent/protect/rule/xss/reflected_xss_input_classification' require 'contrast/agent/reporting/details/xss_details' require 'contrast/agent/reporting/details/xss_match' require 'contrast/agent/reporting/input_analysis/input_type' module Contrast module Agent module Protect module Rule # The Ruby implementation of the Protect Cross-Site Scripting rule. class Xss < Contrast::Agent::Protect::Rule::Base include Contrast::Agent::Reporting::InputType NAME = 'reflected-xss' BLOCK_MESSAGE = 'XSS rule triggered. Response blocked.' APPLICABLE_USER_INPUTS = [ BODY, PARAMETER_NAME, PARAMETER_VALUE, JSON_VALUE, MULTIPART_VALUE, MULTIPART_FIELD_NAME, XML_VALUE, DWR_VALUE, URI, QUERYSTRING ].cs__freeze def rule_name NAME end # Return the specific blocking message for this rule. # # @return [String] the reason for the raised security exception. def block_message BLOCK_MESSAGE end # Prefilter check always called before infilter to check if the rule is infilter # capable, not disabled or in other way excluded by url or input exclusions. # # @param context [Contrast::Agent::RequestContext] # @return [Boolean] def prefilter? context return false unless enabled? return false if protect_excluded_by_url?(rule_name) return false unless context return false unless (results = gather_ia_results(context)) && results.any? return false if protect_excluded_by_input?(results) true end def prefilter context return unless prefilter?(context) ia_results = gather_ia_results(context) ia_results.each do |ia_result| result = build_attack_result(context) result = build_attack_without_match(context, ia_result, result) next unless result append_to_activity(context, result) # XSS is being triggered, so we need to add it to the triggered rules, # So the IA won't be done for this rule again for the current request. record_triggered(context) raise(Contrast::SecurityException.new(self, block_message)) if blocked_violation?(result) end end # XSS is evaluated only on prefilter def infilter? _context false end # XSS is evaluated only on prefilter def postfilter? _context false end # XSS Upload input classification # # @return [module] def classification @_classification ||= Contrast::Agent::Protect::Rule::ReflectedXssInputClassification.cs__freeze end def stream_safe? false end def applicable_user_inputs APPLICABLE_USER_INPUTS end # Adding XSS details # # @param context [Contrast::Agent::RequestContext] # @param ia_result [Contrast::Agent::Reporting::InputAnalysisResult] # @param _xss_string # @param **_kwargs # @return [Contrast::Agent::Reporting::RaspRuleSample] def build_sample context, ia_result, _xss_string, **_kwargs sample = build_base_sample(context, ia_result) sample.details = Contrast::Agent::Reporting::Details::XssDetails.new sample.details.input = ia_result.value # TODO: RUBY-99999 check the if the ReflectedXss matches are needed. xss_match = Contrast::Agent::Reporting::Details::XssMatch.new(ia_result.value) sample.details.matches << xss_match unless xss_match.empty? sample end private # @param context [Contrast::Agent::RequestContext] # @param potential_attack_string [String, nil] # @return [Contrast::Agent::Reporting::AttackResult, nil] def find_postfilter_attacker context, potential_attack_string, **kwargs ia_results = gather_ia_results(context) ia_results.reject! do |ia_result| ia_result.score_level == Contrast::Agent::Reporting::ScoreLevel::IGNORE end find_attacker_with_results(context, potential_attack_string, ia_results, **kwargs) end end end end end end