# Copyright (c) 2023 Contrast Security, Inc. See https://www.contrastsecurity.com/enduser-terms-0317a for more details. # frozen_string_literal: true require 'contrast/utils/object_share' require 'contrast/agent/protect/rule/unsafe_file_upload/unsafe_file_upload' require 'contrast/agent/protect/input_analyzer/input_analyzer' require 'contrast/utils/input_classification_base' module Contrast module Agent module Protect module Rule # This module will do the Input Classification stage of Unsafe File Upload # rule. As a result input would be marked as DEFINITEATTACK or IGNORE. module UnsafeFileUploadInputClassification UNSAFE_UPLOAD_MATCH = 'unsafe-file-upload-input-tracing-v1' class << self include InputClassificationBase private # This methods checks if input is tagged DEFINITEATTACK or IGNORE matches value with it's # key if needed and Creates new isntance of InputAnalysisResult. # # @param request [Contrast::Agent::Request] the current request context. # @param rule_id [String] The name of the Protect Rule. # @param input_type [Contrast::Agent::Reporting::InputType] The type of the user input. # @param value [String, Array<String>] the value of the input. # # @return res [Contrast::Agent::Reporting::InputAnalysisResult] def create_new_input_result request, rule_id, input_type, value return unless Contrast::AGENT_LIB return unless (input_eval = Contrast::AGENT_LIB.eval_input(value, Contrast::AGENT_LIB.input_set[:MULTIPART_NAME], Contrast::AGENT_LIB.rule_set[rule_id], Contrast::AGENT_LIB.eval_option[:NONE])) ia_result = new_ia_result(rule_id, input_type, request.path, value) if input_eval.score >= THRESHOLD ia_result.score_level = DEFINITEATTACK ia_result.ids << UNSAFE_UPLOAD_MATCH else ia_result.score_level = IGNORE end ia_result.key = case input_type when MULTIPART_FIELD_NAME Contrast::Agent::Protect::InputAnalyzer::DISPOSITION_FILENAME when MULTIPART_NAME Contrast::Agent::Protect::InputAnalyzer::DISPOSITION_NAME else Contrast::Utils::ObjectShare::EMPTY_STRING end ia_result end end end end end end end