# frozen_string_literal: true

# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Auto-generated by gapic-generator-ruby. DO NOT EDIT!

require "google/cloud/errors"
require "google/cloud/aiplatform/v1/prediction_service_pb"
require "google/cloud/ai_platform/v1/prediction_service/rest/service_stub"
require "google/cloud/location/rest"
require "google/iam/v1/rest"

module Google
  module Cloud
    module AIPlatform
      module V1
        module PredictionService
          module Rest
            ##
            # REST client for the PredictionService service.
            #
            # A service for online predictions and explanations.
            #
            class Client
              # @private
              API_VERSION = ""

              # @private
              DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.$UNIVERSE_DOMAIN$"

              include Paths

              # @private
              attr_reader :prediction_service_stub

              ##
              # Configure the PredictionService Client class.
              #
              # See {::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client::Configuration}
              # for a description of the configuration fields.
              #
              # @example
              #
              #   # Modify the configuration for all PredictionService clients
              #   ::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.configure do |config|
              #     config.timeout = 10.0
              #   end
              #
              # @yield [config] Configure the Client client.
              # @yieldparam config [Client::Configuration]
              #
              # @return [Client::Configuration]
              #
              def self.configure
                @configure ||= begin
                  namespace = ["Google", "Cloud", "AIPlatform", "V1"]
                  parent_config = while namespace.any?
                                    parent_name = namespace.join "::"
                                    parent_const = const_get parent_name
                                    break parent_const.configure if parent_const.respond_to? :configure
                                    namespace.pop
                                  end
                  default_config = Client::Configuration.new parent_config

                  default_config
                end
                yield @configure if block_given?
                @configure
              end

              ##
              # Configure the PredictionService Client instance.
              #
              # The configuration is set to the derived mode, meaning that values can be changed,
              # but structural changes (adding new fields, etc.) are not allowed. Structural changes
              # should be made on {Client.configure}.
              #
              # See {::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client::Configuration}
              # for a description of the configuration fields.
              #
              # @yield [config] Configure the Client client.
              # @yieldparam config [Client::Configuration]
              #
              # @return [Client::Configuration]
              #
              def configure
                yield @config if block_given?
                @config
              end

              ##
              # The effective universe domain
              #
              # @return [String]
              #
              def universe_domain
                @prediction_service_stub.universe_domain
              end

              ##
              # Create a new PredictionService REST client object.
              #
              # @example
              #
              #   # Create a client using the default configuration
              #   client = ::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
              #
              #   # Create a client using a custom configuration
              #   client = ::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new do |config|
              #     config.timeout = 10.0
              #   end
              #
              # @yield [config] Configure the PredictionService client.
              # @yieldparam config [Client::Configuration]
              #
              def initialize
                # Create the configuration object
                @config = Configuration.new Client.configure

                # Yield the configuration if needed
                yield @config if block_given?

                # Create credentials
                credentials = @config.credentials
                # Use self-signed JWT if the endpoint is unchanged from default,
                # but only if the default endpoint does not have a region prefix.
                enable_self_signed_jwt = @config.endpoint.nil? ||
                                         (@config.endpoint == Configuration::DEFAULT_ENDPOINT &&
                                         !@config.endpoint.split(".").first.include?("-"))
                credentials ||= Credentials.default scope: @config.scope,
                                                    enable_self_signed_jwt: enable_self_signed_jwt
                if credentials.is_a?(::String) || credentials.is_a?(::Hash)
                  credentials = Credentials.new credentials, scope: @config.scope
                end

                @quota_project_id = @config.quota_project
                @quota_project_id ||= credentials.quota_project_id if credentials.respond_to? :quota_project_id

                @prediction_service_stub = ::Google::Cloud::AIPlatform::V1::PredictionService::Rest::ServiceStub.new(
                  endpoint: @config.endpoint,
                  endpoint_template: DEFAULT_ENDPOINT_TEMPLATE,
                  universe_domain: @config.universe_domain,
                  credentials: credentials
                )

                @location_client = Google::Cloud::Location::Locations::Rest::Client.new do |config|
                  config.credentials = credentials
                  config.quota_project = @quota_project_id
                  config.endpoint = @prediction_service_stub.endpoint
                  config.universe_domain = @prediction_service_stub.universe_domain
                  config.bindings_override = @config.bindings_override
                end

                @iam_policy_client = Google::Iam::V1::IAMPolicy::Rest::Client.new do |config|
                  config.credentials = credentials
                  config.quota_project = @quota_project_id
                  config.endpoint = @prediction_service_stub.endpoint
                  config.universe_domain = @prediction_service_stub.universe_domain
                  config.bindings_override = @config.bindings_override
                end
              end

              ##
              # Get the associated client for mix-in of the Locations.
              #
              # @return [Google::Cloud::Location::Locations::Rest::Client]
              #
              attr_reader :location_client

              ##
              # Get the associated client for mix-in of the IAMPolicy.
              #
              # @return [Google::Iam::V1::IAMPolicy::Rest::Client]
              #
              attr_reader :iam_policy_client

              # Service calls

              ##
              # Perform an online prediction.
              #
              # @overload predict(request, options = nil)
              #   Pass arguments to `predict` via a request object, either of type
              #   {::Google::Cloud::AIPlatform::V1::PredictRequest} or an equivalent Hash.
              #
              #   @param request [::Google::Cloud::AIPlatform::V1::PredictRequest, ::Hash]
              #     A request object representing the call parameters. Required. To specify no
              #     parameters, or to keep all the default parameter values, pass an empty Hash.
              #   @param options [::Gapic::CallOptions, ::Hash]
              #     Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
              #
              # @overload predict(endpoint: nil, instances: nil, parameters: nil)
              #   Pass arguments to `predict` via keyword arguments. Note that at
              #   least one keyword argument is required. To specify no parameters, or to keep all
              #   the default parameter values, pass an empty Hash as a request object (see above).
              #
              #   @param endpoint [::String]
              #     Required. The name of the Endpoint requested to serve the prediction.
              #     Format:
              #     `projects/{project}/locations/{location}/endpoints/{endpoint}`
              #   @param instances [::Array<::Google::Protobuf::Value, ::Hash>]
              #     Required. The instances that are the input to the prediction call.
              #     A DeployedModel may have an upper limit on the number of instances it
              #     supports per request, and when it is exceeded the prediction call errors
              #     in case of AutoML Models, or, in case of customer created Models, the
              #     behaviour is as documented by that Model.
              #     The schema of any single instance may be specified via Endpoint's
              #     DeployedModels' [Model's][google.cloud.aiplatform.v1.DeployedModel.model]
              #     [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
              #     {::Google::Cloud::AIPlatform::V1::PredictSchemata#instance_schema_uri instance_schema_uri}.
              #   @param parameters [::Google::Protobuf::Value, ::Hash]
              #     The parameters that govern the prediction. The schema of the parameters may
              #     be specified via Endpoint's DeployedModels' [Model's
              #     ][google.cloud.aiplatform.v1.DeployedModel.model]
              #     [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
              #     {::Google::Cloud::AIPlatform::V1::PredictSchemata#parameters_schema_uri parameters_schema_uri}.
              # @yield [result, operation] Access the result along with the TransportOperation object
              # @yieldparam result [::Google::Cloud::AIPlatform::V1::PredictResponse]
              # @yieldparam operation [::Gapic::Rest::TransportOperation]
              #
              # @return [::Google::Cloud::AIPlatform::V1::PredictResponse]
              #
              # @raise [::Google::Cloud::Error] if the REST call is aborted.
              #
              # @example Basic example
              #   require "google/cloud/ai_platform/v1"
              #
              #   # Create a client object. The client can be reused for multiple calls.
              #   client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
              #
              #   # Create a request. To set request fields, pass in keyword arguments.
              #   request = Google::Cloud::AIPlatform::V1::PredictRequest.new
              #
              #   # Call the predict method.
              #   result = client.predict request
              #
              #   # The returned object is of type Google::Cloud::AIPlatform::V1::PredictResponse.
              #   p result
              #
              def predict request, options = nil
                raise ::ArgumentError, "request must be provided" if request.nil?

                request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::PredictRequest

                # Converts hash and nil to an options object
                options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

                # Customize the options with defaults
                call_metadata = @config.rpcs.predict.metadata.to_h

                # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
                call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
                  lib_name: @config.lib_name, lib_version: @config.lib_version,
                  gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
                  transports_version_send: [:rest]

                call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
                call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id

                options.apply_defaults timeout:      @config.rpcs.predict.timeout,
                                       metadata:     call_metadata,
                                       retry_policy: @config.rpcs.predict.retry_policy

                options.apply_defaults timeout:      @config.timeout,
                                       metadata:     @config.metadata,
                                       retry_policy: @config.retry_policy

                @prediction_service_stub.predict request, options do |result, operation|
                  yield result, operation if block_given?
                  return result
                end
              rescue ::Gapic::Rest::Error => e
                raise ::Google::Cloud::Error.from_error(e)
              end

              ##
              # Perform an online prediction with an arbitrary HTTP payload.
              #
              # The response includes the following HTTP headers:
              #
              # * `X-Vertex-AI-Endpoint-Id`: ID of the
              # {::Google::Cloud::AIPlatform::V1::Endpoint Endpoint} that served this
              # prediction.
              #
              # * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's
              # {::Google::Cloud::AIPlatform::V1::DeployedModel DeployedModel} that served this
              # prediction.
              #
              # @overload raw_predict(request, options = nil)
              #   Pass arguments to `raw_predict` via a request object, either of type
              #   {::Google::Cloud::AIPlatform::V1::RawPredictRequest} or an equivalent Hash.
              #
              #   @param request [::Google::Cloud::AIPlatform::V1::RawPredictRequest, ::Hash]
              #     A request object representing the call parameters. Required. To specify no
              #     parameters, or to keep all the default parameter values, pass an empty Hash.
              #   @param options [::Gapic::CallOptions, ::Hash]
              #     Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
              #
              # @overload raw_predict(endpoint: nil, http_body: nil)
              #   Pass arguments to `raw_predict` via keyword arguments. Note that at
              #   least one keyword argument is required. To specify no parameters, or to keep all
              #   the default parameter values, pass an empty Hash as a request object (see above).
              #
              #   @param endpoint [::String]
              #     Required. The name of the Endpoint requested to serve the prediction.
              #     Format:
              #     `projects/{project}/locations/{location}/endpoints/{endpoint}`
              #   @param http_body [::Google::Api::HttpBody, ::Hash]
              #     The prediction input. Supports HTTP headers and arbitrary data payload.
              #
              #     A {::Google::Cloud::AIPlatform::V1::DeployedModel DeployedModel} may have an
              #     upper limit on the number of instances it supports per request. When this
              #     limit it is exceeded for an AutoML model, the
              #     {::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client#raw_predict RawPredict}
              #     method returns an error. When this limit is exceeded for a custom-trained
              #     model, the behavior varies depending on the model.
              #
              #     You can specify the schema for each instance in the
              #     {::Google::Cloud::AIPlatform::V1::PredictSchemata#instance_schema_uri predict_schemata.instance_schema_uri}
              #     field when you create a {::Google::Cloud::AIPlatform::V1::Model Model}. This
              #     schema applies when you deploy the `Model` as a `DeployedModel` to an
              #     {::Google::Cloud::AIPlatform::V1::Endpoint Endpoint} and use the `RawPredict`
              #     method.
              # @yield [result, operation] Access the result along with the TransportOperation object
              # @yieldparam result [::Google::Api::HttpBody]
              # @yieldparam operation [::Gapic::Rest::TransportOperation]
              #
              # @return [::Google::Api::HttpBody]
              #
              # @raise [::Google::Cloud::Error] if the REST call is aborted.
              #
              # @example Basic example
              #   require "google/cloud/ai_platform/v1"
              #
              #   # Create a client object. The client can be reused for multiple calls.
              #   client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
              #
              #   # Create a request. To set request fields, pass in keyword arguments.
              #   request = Google::Cloud::AIPlatform::V1::RawPredictRequest.new
              #
              #   # Call the raw_predict method.
              #   result = client.raw_predict request
              #
              #   # The returned object is of type Google::Api::HttpBody.
              #   p result
              #
              def raw_predict request, options = nil
                raise ::ArgumentError, "request must be provided" if request.nil?

                request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::RawPredictRequest

                # Converts hash and nil to an options object
                options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

                # Customize the options with defaults
                call_metadata = @config.rpcs.raw_predict.metadata.to_h

                # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
                call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
                  lib_name: @config.lib_name, lib_version: @config.lib_version,
                  gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
                  transports_version_send: [:rest]

                call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
                call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id

                options.apply_defaults timeout:      @config.rpcs.raw_predict.timeout,
                                       metadata:     call_metadata,
                                       retry_policy: @config.rpcs.raw_predict.retry_policy

                options.apply_defaults timeout:      @config.timeout,
                                       metadata:     @config.metadata,
                                       retry_policy: @config.retry_policy

                @prediction_service_stub.raw_predict request, options do |result, operation|
                  yield result, operation if block_given?
                  return result
                end
              rescue ::Gapic::Rest::Error => e
                raise ::Google::Cloud::Error.from_error(e)
              end

              ##
              # Perform a streaming online prediction with an arbitrary HTTP payload.
              #
              # @overload stream_raw_predict(request, options = nil)
              #   Pass arguments to `stream_raw_predict` via a request object, either of type
              #   {::Google::Cloud::AIPlatform::V1::StreamRawPredictRequest} or an equivalent Hash.
              #
              #   @param request [::Google::Cloud::AIPlatform::V1::StreamRawPredictRequest, ::Hash]
              #     A request object representing the call parameters. Required. To specify no
              #     parameters, or to keep all the default parameter values, pass an empty Hash.
              #   @param options [::Gapic::CallOptions, ::Hash]
              #     Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
              #
              # @overload stream_raw_predict(endpoint: nil, http_body: nil)
              #   Pass arguments to `stream_raw_predict` via keyword arguments. Note that at
              #   least one keyword argument is required. To specify no parameters, or to keep all
              #   the default parameter values, pass an empty Hash as a request object (see above).
              #
              #   @param endpoint [::String]
              #     Required. The name of the Endpoint requested to serve the prediction.
              #     Format:
              #     `projects/{project}/locations/{location}/endpoints/{endpoint}`
              #   @param http_body [::Google::Api::HttpBody, ::Hash]
              #     The prediction input. Supports HTTP headers and arbitrary data payload.
              # @return [::Enumerable<::Google::Api::HttpBody>]
              #
              # @raise [::Google::Cloud::Error] if the REST call is aborted.
              #
              # @example Basic example
              #   require "google/cloud/ai_platform/v1"
              #
              #   # Create a client object. The client can be reused for multiple calls.
              #   client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
              #
              #   # Create a request. To set request fields, pass in keyword arguments.
              #   request = Google::Cloud::AIPlatform::V1::StreamRawPredictRequest.new
              #
              #   # Call the stream_raw_predict method to start streaming.
              #   output = client.stream_raw_predict request
              #
              #   # The returned object is a streamed enumerable yielding elements of type
              #   # ::Google::Api::HttpBody
              #   output.each do |current_response|
              #     p current_response
              #   end
              #
              def stream_raw_predict request, options = nil
                raise ::ArgumentError, "request must be provided" if request.nil?

                request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::StreamRawPredictRequest

                # Converts hash and nil to an options object
                options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

                # Customize the options with defaults
                call_metadata = @config.rpcs.stream_raw_predict.metadata.to_h

                # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
                call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
                  lib_name: @config.lib_name, lib_version: @config.lib_version,
                  gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
                  transports_version_send: [:rest]

                call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
                call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id

                options.apply_defaults timeout:      @config.rpcs.stream_raw_predict.timeout,
                                       metadata:     call_metadata,
                                       retry_policy: @config.rpcs.stream_raw_predict.retry_policy

                options.apply_defaults timeout:      @config.timeout,
                                       metadata:     @config.metadata,
                                       retry_policy: @config.retry_policy

                ::Gapic::Rest::ServerStream.new(
                  ::Google::Api::HttpBody,
                  ::Gapic::Rest::ThreadedEnumerator.new do |in_q, out_q|
                    @prediction_service_stub.stream_raw_predict request, options do |chunk|
                      in_q.deq
                      out_q.enq chunk
                    end
                  end
                )
              rescue ::Gapic::Rest::Error => e
                raise ::Google::Cloud::Error.from_error(e)
              end

              ##
              # Perform an unary online prediction request to a gRPC model server for
              # Vertex first-party products and frameworks.
              #
              # @overload direct_predict(request, options = nil)
              #   Pass arguments to `direct_predict` via a request object, either of type
              #   {::Google::Cloud::AIPlatform::V1::DirectPredictRequest} or an equivalent Hash.
              #
              #   @param request [::Google::Cloud::AIPlatform::V1::DirectPredictRequest, ::Hash]
              #     A request object representing the call parameters. Required. To specify no
              #     parameters, or to keep all the default parameter values, pass an empty Hash.
              #   @param options [::Gapic::CallOptions, ::Hash]
              #     Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
              #
              # @overload direct_predict(endpoint: nil, inputs: nil, parameters: nil)
              #   Pass arguments to `direct_predict` via keyword arguments. Note that at
              #   least one keyword argument is required. To specify no parameters, or to keep all
              #   the default parameter values, pass an empty Hash as a request object (see above).
              #
              #   @param endpoint [::String]
              #     Required. The name of the Endpoint requested to serve the prediction.
              #     Format:
              #     `projects/{project}/locations/{location}/endpoints/{endpoint}`
              #   @param inputs [::Array<::Google::Cloud::AIPlatform::V1::Tensor, ::Hash>]
              #     The prediction input.
              #   @param parameters [::Google::Cloud::AIPlatform::V1::Tensor, ::Hash]
              #     The parameters that govern the prediction.
              # @yield [result, operation] Access the result along with the TransportOperation object
              # @yieldparam result [::Google::Cloud::AIPlatform::V1::DirectPredictResponse]
              # @yieldparam operation [::Gapic::Rest::TransportOperation]
              #
              # @return [::Google::Cloud::AIPlatform::V1::DirectPredictResponse]
              #
              # @raise [::Google::Cloud::Error] if the REST call is aborted.
              #
              # @example Basic example
              #   require "google/cloud/ai_platform/v1"
              #
              #   # Create a client object. The client can be reused for multiple calls.
              #   client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
              #
              #   # Create a request. To set request fields, pass in keyword arguments.
              #   request = Google::Cloud::AIPlatform::V1::DirectPredictRequest.new
              #
              #   # Call the direct_predict method.
              #   result = client.direct_predict request
              #
              #   # The returned object is of type Google::Cloud::AIPlatform::V1::DirectPredictResponse.
              #   p result
              #
              def direct_predict request, options = nil
                raise ::ArgumentError, "request must be provided" if request.nil?

                request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::DirectPredictRequest

                # Converts hash and nil to an options object
                options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

                # Customize the options with defaults
                call_metadata = @config.rpcs.direct_predict.metadata.to_h

                # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
                call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
                  lib_name: @config.lib_name, lib_version: @config.lib_version,
                  gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
                  transports_version_send: [:rest]

                call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
                call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id

                options.apply_defaults timeout:      @config.rpcs.direct_predict.timeout,
                                       metadata:     call_metadata,
                                       retry_policy: @config.rpcs.direct_predict.retry_policy

                options.apply_defaults timeout:      @config.timeout,
                                       metadata:     @config.metadata,
                                       retry_policy: @config.retry_policy

                @prediction_service_stub.direct_predict request, options do |result, operation|
                  yield result, operation if block_given?
                  return result
                end
              rescue ::Gapic::Rest::Error => e
                raise ::Google::Cloud::Error.from_error(e)
              end

              ##
              # Perform an unary online prediction request to a gRPC model server for
              # custom containers.
              #
              # @overload direct_raw_predict(request, options = nil)
              #   Pass arguments to `direct_raw_predict` via a request object, either of type
              #   {::Google::Cloud::AIPlatform::V1::DirectRawPredictRequest} or an equivalent Hash.
              #
              #   @param request [::Google::Cloud::AIPlatform::V1::DirectRawPredictRequest, ::Hash]
              #     A request object representing the call parameters. Required. To specify no
              #     parameters, or to keep all the default parameter values, pass an empty Hash.
              #   @param options [::Gapic::CallOptions, ::Hash]
              #     Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
              #
              # @overload direct_raw_predict(endpoint: nil, method_name: nil, input: nil)
              #   Pass arguments to `direct_raw_predict` via keyword arguments. Note that at
              #   least one keyword argument is required. To specify no parameters, or to keep all
              #   the default parameter values, pass an empty Hash as a request object (see above).
              #
              #   @param endpoint [::String]
              #     Required. The name of the Endpoint requested to serve the prediction.
              #     Format:
              #     `projects/{project}/locations/{location}/endpoints/{endpoint}`
              #   @param method_name [::String]
              #     Fully qualified name of the API method being invoked to perform
              #     predictions.
              #
              #     Format:
              #     `/namespace.Service/Method/`
              #     Example:
              #     `/tensorflow.serving.PredictionService/Predict`
              #   @param input [::String]
              #     The prediction input.
              # @yield [result, operation] Access the result along with the TransportOperation object
              # @yieldparam result [::Google::Cloud::AIPlatform::V1::DirectRawPredictResponse]
              # @yieldparam operation [::Gapic::Rest::TransportOperation]
              #
              # @return [::Google::Cloud::AIPlatform::V1::DirectRawPredictResponse]
              #
              # @raise [::Google::Cloud::Error] if the REST call is aborted.
              #
              # @example Basic example
              #   require "google/cloud/ai_platform/v1"
              #
              #   # Create a client object. The client can be reused for multiple calls.
              #   client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
              #
              #   # Create a request. To set request fields, pass in keyword arguments.
              #   request = Google::Cloud::AIPlatform::V1::DirectRawPredictRequest.new
              #
              #   # Call the direct_raw_predict method.
              #   result = client.direct_raw_predict request
              #
              #   # The returned object is of type Google::Cloud::AIPlatform::V1::DirectRawPredictResponse.
              #   p result
              #
              def direct_raw_predict request, options = nil
                raise ::ArgumentError, "request must be provided" if request.nil?

                request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::DirectRawPredictRequest

                # Converts hash and nil to an options object
                options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

                # Customize the options with defaults
                call_metadata = @config.rpcs.direct_raw_predict.metadata.to_h

                # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
                call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
                  lib_name: @config.lib_name, lib_version: @config.lib_version,
                  gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
                  transports_version_send: [:rest]

                call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
                call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id

                options.apply_defaults timeout:      @config.rpcs.direct_raw_predict.timeout,
                                       metadata:     call_metadata,
                                       retry_policy: @config.rpcs.direct_raw_predict.retry_policy

                options.apply_defaults timeout:      @config.timeout,
                                       metadata:     @config.metadata,
                                       retry_policy: @config.retry_policy

                @prediction_service_stub.direct_raw_predict request, options do |result, operation|
                  yield result, operation if block_given?
                  return result
                end
              rescue ::Gapic::Rest::Error => e
                raise ::Google::Cloud::Error.from_error(e)
              end

              ##
              # Perform a server-side streaming online prediction request for Vertex
              # LLM streaming.
              #
              # @overload server_streaming_predict(request, options = nil)
              #   Pass arguments to `server_streaming_predict` via a request object, either of type
              #   {::Google::Cloud::AIPlatform::V1::StreamingPredictRequest} or an equivalent Hash.
              #
              #   @param request [::Google::Cloud::AIPlatform::V1::StreamingPredictRequest, ::Hash]
              #     A request object representing the call parameters. Required. To specify no
              #     parameters, or to keep all the default parameter values, pass an empty Hash.
              #   @param options [::Gapic::CallOptions, ::Hash]
              #     Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
              #
              # @overload server_streaming_predict(endpoint: nil, inputs: nil, parameters: nil)
              #   Pass arguments to `server_streaming_predict` via keyword arguments. Note that at
              #   least one keyword argument is required. To specify no parameters, or to keep all
              #   the default parameter values, pass an empty Hash as a request object (see above).
              #
              #   @param endpoint [::String]
              #     Required. The name of the Endpoint requested to serve the prediction.
              #     Format:
              #     `projects/{project}/locations/{location}/endpoints/{endpoint}`
              #   @param inputs [::Array<::Google::Cloud::AIPlatform::V1::Tensor, ::Hash>]
              #     The prediction input.
              #   @param parameters [::Google::Cloud::AIPlatform::V1::Tensor, ::Hash]
              #     The parameters that govern the prediction.
              # @return [::Enumerable<::Google::Cloud::AIPlatform::V1::StreamingPredictResponse>]
              #
              # @raise [::Google::Cloud::Error] if the REST call is aborted.
              #
              # @example Basic example
              #   require "google/cloud/ai_platform/v1"
              #
              #   # Create a client object. The client can be reused for multiple calls.
              #   client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
              #
              #   # Create a request. To set request fields, pass in keyword arguments.
              #   request = Google::Cloud::AIPlatform::V1::StreamingPredictRequest.new
              #
              #   # Call the server_streaming_predict method to start streaming.
              #   output = client.server_streaming_predict request
              #
              #   # The returned object is a streamed enumerable yielding elements of type
              #   # ::Google::Cloud::AIPlatform::V1::StreamingPredictResponse
              #   output.each do |current_response|
              #     p current_response
              #   end
              #
              def server_streaming_predict request, options = nil
                raise ::ArgumentError, "request must be provided" if request.nil?

                request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::StreamingPredictRequest

                # Converts hash and nil to an options object
                options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

                # Customize the options with defaults
                call_metadata = @config.rpcs.server_streaming_predict.metadata.to_h

                # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
                call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
                  lib_name: @config.lib_name, lib_version: @config.lib_version,
                  gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
                  transports_version_send: [:rest]

                call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
                call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id

                options.apply_defaults timeout:      @config.rpcs.server_streaming_predict.timeout,
                                       metadata:     call_metadata,
                                       retry_policy: @config.rpcs.server_streaming_predict.retry_policy

                options.apply_defaults timeout:      @config.timeout,
                                       metadata:     @config.metadata,
                                       retry_policy: @config.retry_policy

                ::Gapic::Rest::ServerStream.new(
                  ::Google::Cloud::AIPlatform::V1::StreamingPredictResponse,
                  ::Gapic::Rest::ThreadedEnumerator.new do |in_q, out_q|
                    @prediction_service_stub.server_streaming_predict request, options do |chunk|
                      in_q.deq
                      out_q.enq chunk
                    end
                  end
                )
              rescue ::Gapic::Rest::Error => e
                raise ::Google::Cloud::Error.from_error(e)
              end

              ##
              # Perform an online explanation.
              #
              # If
              # {::Google::Cloud::AIPlatform::V1::ExplainRequest#deployed_model_id deployed_model_id}
              # is specified, the corresponding DeployModel must have
              # {::Google::Cloud::AIPlatform::V1::DeployedModel#explanation_spec explanation_spec}
              # populated. If
              # {::Google::Cloud::AIPlatform::V1::ExplainRequest#deployed_model_id deployed_model_id}
              # is not specified, all DeployedModels must have
              # {::Google::Cloud::AIPlatform::V1::DeployedModel#explanation_spec explanation_spec}
              # populated.
              #
              # @overload explain(request, options = nil)
              #   Pass arguments to `explain` via a request object, either of type
              #   {::Google::Cloud::AIPlatform::V1::ExplainRequest} or an equivalent Hash.
              #
              #   @param request [::Google::Cloud::AIPlatform::V1::ExplainRequest, ::Hash]
              #     A request object representing the call parameters. Required. To specify no
              #     parameters, or to keep all the default parameter values, pass an empty Hash.
              #   @param options [::Gapic::CallOptions, ::Hash]
              #     Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
              #
              # @overload explain(endpoint: nil, instances: nil, parameters: nil, explanation_spec_override: nil, deployed_model_id: nil)
              #   Pass arguments to `explain` via keyword arguments. Note that at
              #   least one keyword argument is required. To specify no parameters, or to keep all
              #   the default parameter values, pass an empty Hash as a request object (see above).
              #
              #   @param endpoint [::String]
              #     Required. The name of the Endpoint requested to serve the explanation.
              #     Format:
              #     `projects/{project}/locations/{location}/endpoints/{endpoint}`
              #   @param instances [::Array<::Google::Protobuf::Value, ::Hash>]
              #     Required. The instances that are the input to the explanation call.
              #     A DeployedModel may have an upper limit on the number of instances it
              #     supports per request, and when it is exceeded the explanation call errors
              #     in case of AutoML Models, or, in case of customer created Models, the
              #     behaviour is as documented by that Model.
              #     The schema of any single instance may be specified via Endpoint's
              #     DeployedModels' [Model's][google.cloud.aiplatform.v1.DeployedModel.model]
              #     [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
              #     {::Google::Cloud::AIPlatform::V1::PredictSchemata#instance_schema_uri instance_schema_uri}.
              #   @param parameters [::Google::Protobuf::Value, ::Hash]
              #     The parameters that govern the prediction. The schema of the parameters may
              #     be specified via Endpoint's DeployedModels' [Model's
              #     ][google.cloud.aiplatform.v1.DeployedModel.model]
              #     [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
              #     {::Google::Cloud::AIPlatform::V1::PredictSchemata#parameters_schema_uri parameters_schema_uri}.
              #   @param explanation_spec_override [::Google::Cloud::AIPlatform::V1::ExplanationSpecOverride, ::Hash]
              #     If specified, overrides the
              #     {::Google::Cloud::AIPlatform::V1::DeployedModel#explanation_spec explanation_spec}
              #     of the DeployedModel. Can be used for explaining prediction results with
              #     different configurations, such as:
              #      - Explaining top-5 predictions results as opposed to top-1;
              #      - Increasing path count or step count of the attribution methods to reduce
              #        approximate errors;
              #      - Using different baselines for explaining the prediction results.
              #   @param deployed_model_id [::String]
              #     If specified, this ExplainRequest will be served by the chosen
              #     DeployedModel, overriding
              #     {::Google::Cloud::AIPlatform::V1::Endpoint#traffic_split Endpoint.traffic_split}.
              # @yield [result, operation] Access the result along with the TransportOperation object
              # @yieldparam result [::Google::Cloud::AIPlatform::V1::ExplainResponse]
              # @yieldparam operation [::Gapic::Rest::TransportOperation]
              #
              # @return [::Google::Cloud::AIPlatform::V1::ExplainResponse]
              #
              # @raise [::Google::Cloud::Error] if the REST call is aborted.
              #
              # @example Basic example
              #   require "google/cloud/ai_platform/v1"
              #
              #   # Create a client object. The client can be reused for multiple calls.
              #   client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
              #
              #   # Create a request. To set request fields, pass in keyword arguments.
              #   request = Google::Cloud::AIPlatform::V1::ExplainRequest.new
              #
              #   # Call the explain method.
              #   result = client.explain request
              #
              #   # The returned object is of type Google::Cloud::AIPlatform::V1::ExplainResponse.
              #   p result
              #
              def explain request, options = nil
                raise ::ArgumentError, "request must be provided" if request.nil?

                request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::ExplainRequest

                # Converts hash and nil to an options object
                options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

                # Customize the options with defaults
                call_metadata = @config.rpcs.explain.metadata.to_h

                # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
                call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
                  lib_name: @config.lib_name, lib_version: @config.lib_version,
                  gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
                  transports_version_send: [:rest]

                call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
                call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id

                options.apply_defaults timeout:      @config.rpcs.explain.timeout,
                                       metadata:     call_metadata,
                                       retry_policy: @config.rpcs.explain.retry_policy

                options.apply_defaults timeout:      @config.timeout,
                                       metadata:     @config.metadata,
                                       retry_policy: @config.retry_policy

                @prediction_service_stub.explain request, options do |result, operation|
                  yield result, operation if block_given?
                  return result
                end
              rescue ::Gapic::Rest::Error => e
                raise ::Google::Cloud::Error.from_error(e)
              end

              ##
              # Generate content with multimodal inputs.
              #
              # @overload generate_content(request, options = nil)
              #   Pass arguments to `generate_content` via a request object, either of type
              #   {::Google::Cloud::AIPlatform::V1::GenerateContentRequest} or an equivalent Hash.
              #
              #   @param request [::Google::Cloud::AIPlatform::V1::GenerateContentRequest, ::Hash]
              #     A request object representing the call parameters. Required. To specify no
              #     parameters, or to keep all the default parameter values, pass an empty Hash.
              #   @param options [::Gapic::CallOptions, ::Hash]
              #     Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
              #
              # @overload generate_content(model: nil, contents: nil, system_instruction: nil, tools: nil, tool_config: nil, safety_settings: nil, generation_config: nil)
              #   Pass arguments to `generate_content` via keyword arguments. Note that at
              #   least one keyword argument is required. To specify no parameters, or to keep all
              #   the default parameter values, pass an empty Hash as a request object (see above).
              #
              #   @param model [::String]
              #     Required. The name of the publisher model requested to serve the
              #     prediction. Format:
              #     `projects/{project}/locations/{location}/publishers/*/models/*`
              #   @param contents [::Array<::Google::Cloud::AIPlatform::V1::Content, ::Hash>]
              #     Required. The content of the current conversation with the model.
              #
              #     For single-turn queries, this is a single instance. For multi-turn queries,
              #     this is a repeated field that contains conversation history + latest
              #     request.
              #   @param system_instruction [::Google::Cloud::AIPlatform::V1::Content, ::Hash]
              #     Optional. The user provided system instructions for the model.
              #     Note: only text should be used in parts and content in each part will be in
              #     a separate paragraph.
              #   @param tools [::Array<::Google::Cloud::AIPlatform::V1::Tool, ::Hash>]
              #     Optional. A list of `Tools` the model may use to generate the next
              #     response.
              #
              #     A `Tool` is a piece of code that enables the system to interact with
              #     external systems to perform an action, or set of actions, outside of
              #     knowledge and scope of the model.
              #   @param tool_config [::Google::Cloud::AIPlatform::V1::ToolConfig, ::Hash]
              #     Optional. Tool config. This config is shared for all tools provided in the
              #     request.
              #   @param safety_settings [::Array<::Google::Cloud::AIPlatform::V1::SafetySetting, ::Hash>]
              #     Optional. Per request settings for blocking unsafe content.
              #     Enforced on GenerateContentResponse.candidates.
              #   @param generation_config [::Google::Cloud::AIPlatform::V1::GenerationConfig, ::Hash]
              #     Optional. Generation config.
              # @yield [result, operation] Access the result along with the TransportOperation object
              # @yieldparam result [::Google::Cloud::AIPlatform::V1::GenerateContentResponse]
              # @yieldparam operation [::Gapic::Rest::TransportOperation]
              #
              # @return [::Google::Cloud::AIPlatform::V1::GenerateContentResponse]
              #
              # @raise [::Google::Cloud::Error] if the REST call is aborted.
              #
              # @example Basic example
              #   require "google/cloud/ai_platform/v1"
              #
              #   # Create a client object. The client can be reused for multiple calls.
              #   client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
              #
              #   # Create a request. To set request fields, pass in keyword arguments.
              #   request = Google::Cloud::AIPlatform::V1::GenerateContentRequest.new
              #
              #   # Call the generate_content method.
              #   result = client.generate_content request
              #
              #   # The returned object is of type Google::Cloud::AIPlatform::V1::GenerateContentResponse.
              #   p result
              #
              def generate_content request, options = nil
                raise ::ArgumentError, "request must be provided" if request.nil?

                request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::GenerateContentRequest

                # Converts hash and nil to an options object
                options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

                # Customize the options with defaults
                call_metadata = @config.rpcs.generate_content.metadata.to_h

                # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
                call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
                  lib_name: @config.lib_name, lib_version: @config.lib_version,
                  gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
                  transports_version_send: [:rest]

                call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
                call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id

                options.apply_defaults timeout:      @config.rpcs.generate_content.timeout,
                                       metadata:     call_metadata,
                                       retry_policy: @config.rpcs.generate_content.retry_policy

                options.apply_defaults timeout:      @config.timeout,
                                       metadata:     @config.metadata,
                                       retry_policy: @config.retry_policy

                @prediction_service_stub.generate_content request, options do |result, operation|
                  yield result, operation if block_given?
                  return result
                end
              rescue ::Gapic::Rest::Error => e
                raise ::Google::Cloud::Error.from_error(e)
              end

              ##
              # Generate content with multimodal inputs with streaming support.
              #
              # @overload stream_generate_content(request, options = nil)
              #   Pass arguments to `stream_generate_content` via a request object, either of type
              #   {::Google::Cloud::AIPlatform::V1::GenerateContentRequest} or an equivalent Hash.
              #
              #   @param request [::Google::Cloud::AIPlatform::V1::GenerateContentRequest, ::Hash]
              #     A request object representing the call parameters. Required. To specify no
              #     parameters, or to keep all the default parameter values, pass an empty Hash.
              #   @param options [::Gapic::CallOptions, ::Hash]
              #     Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
              #
              # @overload stream_generate_content(model: nil, contents: nil, system_instruction: nil, tools: nil, tool_config: nil, safety_settings: nil, generation_config: nil)
              #   Pass arguments to `stream_generate_content` via keyword arguments. Note that at
              #   least one keyword argument is required. To specify no parameters, or to keep all
              #   the default parameter values, pass an empty Hash as a request object (see above).
              #
              #   @param model [::String]
              #     Required. The name of the publisher model requested to serve the
              #     prediction. Format:
              #     `projects/{project}/locations/{location}/publishers/*/models/*`
              #   @param contents [::Array<::Google::Cloud::AIPlatform::V1::Content, ::Hash>]
              #     Required. The content of the current conversation with the model.
              #
              #     For single-turn queries, this is a single instance. For multi-turn queries,
              #     this is a repeated field that contains conversation history + latest
              #     request.
              #   @param system_instruction [::Google::Cloud::AIPlatform::V1::Content, ::Hash]
              #     Optional. The user provided system instructions for the model.
              #     Note: only text should be used in parts and content in each part will be in
              #     a separate paragraph.
              #   @param tools [::Array<::Google::Cloud::AIPlatform::V1::Tool, ::Hash>]
              #     Optional. A list of `Tools` the model may use to generate the next
              #     response.
              #
              #     A `Tool` is a piece of code that enables the system to interact with
              #     external systems to perform an action, or set of actions, outside of
              #     knowledge and scope of the model.
              #   @param tool_config [::Google::Cloud::AIPlatform::V1::ToolConfig, ::Hash]
              #     Optional. Tool config. This config is shared for all tools provided in the
              #     request.
              #   @param safety_settings [::Array<::Google::Cloud::AIPlatform::V1::SafetySetting, ::Hash>]
              #     Optional. Per request settings for blocking unsafe content.
              #     Enforced on GenerateContentResponse.candidates.
              #   @param generation_config [::Google::Cloud::AIPlatform::V1::GenerationConfig, ::Hash]
              #     Optional. Generation config.
              # @return [::Enumerable<::Google::Cloud::AIPlatform::V1::GenerateContentResponse>]
              #
              # @raise [::Google::Cloud::Error] if the REST call is aborted.
              #
              # @example Basic example
              #   require "google/cloud/ai_platform/v1"
              #
              #   # Create a client object. The client can be reused for multiple calls.
              #   client = Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new
              #
              #   # Create a request. To set request fields, pass in keyword arguments.
              #   request = Google::Cloud::AIPlatform::V1::GenerateContentRequest.new
              #
              #   # Call the stream_generate_content method to start streaming.
              #   output = client.stream_generate_content request
              #
              #   # The returned object is a streamed enumerable yielding elements of type
              #   # ::Google::Cloud::AIPlatform::V1::GenerateContentResponse
              #   output.each do |current_response|
              #     p current_response
              #   end
              #
              def stream_generate_content request, options = nil
                raise ::ArgumentError, "request must be provided" if request.nil?

                request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::AIPlatform::V1::GenerateContentRequest

                # Converts hash and nil to an options object
                options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h

                # Customize the options with defaults
                call_metadata = @config.rpcs.stream_generate_content.metadata.to_h

                # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
                call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
                  lib_name: @config.lib_name, lib_version: @config.lib_version,
                  gapic_version: ::Google::Cloud::AIPlatform::V1::VERSION,
                  transports_version_send: [:rest]

                call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
                call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id

                options.apply_defaults timeout:      @config.rpcs.stream_generate_content.timeout,
                                       metadata:     call_metadata,
                                       retry_policy: @config.rpcs.stream_generate_content.retry_policy

                options.apply_defaults timeout:      @config.timeout,
                                       metadata:     @config.metadata,
                                       retry_policy: @config.retry_policy

                ::Gapic::Rest::ServerStream.new(
                  ::Google::Cloud::AIPlatform::V1::GenerateContentResponse,
                  ::Gapic::Rest::ThreadedEnumerator.new do |in_q, out_q|
                    @prediction_service_stub.stream_generate_content request, options do |chunk|
                      in_q.deq
                      out_q.enq chunk
                    end
                  end
                )
              rescue ::Gapic::Rest::Error => e
                raise ::Google::Cloud::Error.from_error(e)
              end

              ##
              # Configuration class for the PredictionService REST API.
              #
              # This class represents the configuration for PredictionService REST,
              # providing control over timeouts, retry behavior, logging, transport
              # parameters, and other low-level controls. Certain parameters can also be
              # applied individually to specific RPCs. See
              # {::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client::Configuration::Rpcs}
              # for a list of RPCs that can be configured independently.
              #
              # Configuration can be applied globally to all clients, or to a single client
              # on construction.
              #
              # @example
              #
              #   # Modify the global config, setting the timeout for
              #   # predict to 20 seconds,
              #   # and all remaining timeouts to 10 seconds.
              #   ::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.configure do |config|
              #     config.timeout = 10.0
              #     config.rpcs.predict.timeout = 20.0
              #   end
              #
              #   # Apply the above configuration only to a new client.
              #   client = ::Google::Cloud::AIPlatform::V1::PredictionService::Rest::Client.new do |config|
              #     config.timeout = 10.0
              #     config.rpcs.predict.timeout = 20.0
              #   end
              #
              # @!attribute [rw] endpoint
              #   A custom service endpoint, as a hostname or hostname:port. The default is
              #   nil, indicating to use the default endpoint in the current universe domain.
              #   @return [::String,nil]
              # @!attribute [rw] credentials
              #   Credentials to send with calls. You may provide any of the following types:
              #    *  (`String`) The path to a service account key file in JSON format
              #    *  (`Hash`) A service account key as a Hash
              #    *  (`Google::Auth::Credentials`) A googleauth credentials object
              #       (see the [googleauth docs](https://rubydoc.info/gems/googleauth/Google/Auth/Credentials))
              #    *  (`Signet::OAuth2::Client`) A signet oauth2 client object
              #       (see the [signet docs](https://rubydoc.info/gems/signet/Signet/OAuth2/Client))
              #    *  (`nil`) indicating no credentials
              #   @return [::Object]
              # @!attribute [rw] scope
              #   The OAuth scopes
              #   @return [::Array<::String>]
              # @!attribute [rw] lib_name
              #   The library name as recorded in instrumentation and logging
              #   @return [::String]
              # @!attribute [rw] lib_version
              #   The library version as recorded in instrumentation and logging
              #   @return [::String]
              # @!attribute [rw] timeout
              #   The call timeout in seconds.
              #   @return [::Numeric]
              # @!attribute [rw] metadata
              #   Additional headers to be sent with the call.
              #   @return [::Hash{::Symbol=>::String}]
              # @!attribute [rw] retry_policy
              #   The retry policy. The value is a hash with the following keys:
              #    *  `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
              #    *  `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
              #    *  `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
              #    *  `:retry_codes` (*type:* `Array<String>`) - The error codes that should
              #       trigger a retry.
              #   @return [::Hash]
              # @!attribute [rw] quota_project
              #   A separate project against which to charge quota.
              #   @return [::String]
              # @!attribute [rw] universe_domain
              #   The universe domain within which to make requests. This determines the
              #   default endpoint URL. The default value of nil uses the environment
              #   universe (usually the default "googleapis.com" universe).
              #   @return [::String,nil]
              #
              class Configuration
                extend ::Gapic::Config

                # @private
                # The endpoint specific to the default "googleapis.com" universe. Deprecated.
                DEFAULT_ENDPOINT = "aiplatform.googleapis.com"

                config_attr :endpoint,      nil, ::String, nil
                config_attr :credentials,   nil do |value|
                  allowed = [::String, ::Hash, ::Proc, ::Symbol, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
                  allowed.any? { |klass| klass === value }
                end
                config_attr :scope,         nil, ::String, ::Array, nil
                config_attr :lib_name,      nil, ::String, nil
                config_attr :lib_version,   nil, ::String, nil
                config_attr :timeout,       nil, ::Numeric, nil
                config_attr :metadata,      nil, ::Hash, nil
                config_attr :retry_policy,  nil, ::Hash, ::Proc, nil
                config_attr :quota_project, nil, ::String, nil
                config_attr :universe_domain, nil, ::String, nil

                # @private
                # Overrides for http bindings for the RPCs of this service
                # are only used when this service is used as mixin, and only
                # by the host service.
                # @return [::Hash{::Symbol=>::Array<::Gapic::Rest::GrpcTranscoder::HttpBinding>}]
                config_attr :bindings_override, {}, ::Hash, nil

                # @private
                def initialize parent_config = nil
                  @parent_config = parent_config unless parent_config.nil?

                  yield self if block_given?
                end

                ##
                # Configurations for individual RPCs
                # @return [Rpcs]
                #
                def rpcs
                  @rpcs ||= begin
                    parent_rpcs = nil
                    parent_rpcs = @parent_config.rpcs if defined?(@parent_config) && @parent_config.respond_to?(:rpcs)
                    Rpcs.new parent_rpcs
                  end
                end

                ##
                # Configuration RPC class for the PredictionService API.
                #
                # Includes fields providing the configuration for each RPC in this service.
                # Each configuration object is of type `Gapic::Config::Method` and includes
                # the following configuration fields:
                #
                #  *  `timeout` (*type:* `Numeric`) - The call timeout in seconds
                #  *  `metadata` (*type:* `Hash{Symbol=>String}`) - Additional headers
                #  *  `retry_policy (*type:* `Hash`) - The retry policy. The policy fields
                #     include the following keys:
                #      *  `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
                #      *  `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
                #      *  `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
                #      *  `:retry_codes` (*type:* `Array<String>`) - The error codes that should
                #         trigger a retry.
                #
                class Rpcs
                  ##
                  # RPC-specific configuration for `predict`
                  # @return [::Gapic::Config::Method]
                  #
                  attr_reader :predict
                  ##
                  # RPC-specific configuration for `raw_predict`
                  # @return [::Gapic::Config::Method]
                  #
                  attr_reader :raw_predict
                  ##
                  # RPC-specific configuration for `stream_raw_predict`
                  # @return [::Gapic::Config::Method]
                  #
                  attr_reader :stream_raw_predict
                  ##
                  # RPC-specific configuration for `direct_predict`
                  # @return [::Gapic::Config::Method]
                  #
                  attr_reader :direct_predict
                  ##
                  # RPC-specific configuration for `direct_raw_predict`
                  # @return [::Gapic::Config::Method]
                  #
                  attr_reader :direct_raw_predict
                  ##
                  # RPC-specific configuration for `server_streaming_predict`
                  # @return [::Gapic::Config::Method]
                  #
                  attr_reader :server_streaming_predict
                  ##
                  # RPC-specific configuration for `explain`
                  # @return [::Gapic::Config::Method]
                  #
                  attr_reader :explain
                  ##
                  # RPC-specific configuration for `generate_content`
                  # @return [::Gapic::Config::Method]
                  #
                  attr_reader :generate_content
                  ##
                  # RPC-specific configuration for `stream_generate_content`
                  # @return [::Gapic::Config::Method]
                  #
                  attr_reader :stream_generate_content

                  # @private
                  def initialize parent_rpcs = nil
                    predict_config = parent_rpcs.predict if parent_rpcs.respond_to? :predict
                    @predict = ::Gapic::Config::Method.new predict_config
                    raw_predict_config = parent_rpcs.raw_predict if parent_rpcs.respond_to? :raw_predict
                    @raw_predict = ::Gapic::Config::Method.new raw_predict_config
                    stream_raw_predict_config = parent_rpcs.stream_raw_predict if parent_rpcs.respond_to? :stream_raw_predict
                    @stream_raw_predict = ::Gapic::Config::Method.new stream_raw_predict_config
                    direct_predict_config = parent_rpcs.direct_predict if parent_rpcs.respond_to? :direct_predict
                    @direct_predict = ::Gapic::Config::Method.new direct_predict_config
                    direct_raw_predict_config = parent_rpcs.direct_raw_predict if parent_rpcs.respond_to? :direct_raw_predict
                    @direct_raw_predict = ::Gapic::Config::Method.new direct_raw_predict_config
                    server_streaming_predict_config = parent_rpcs.server_streaming_predict if parent_rpcs.respond_to? :server_streaming_predict
                    @server_streaming_predict = ::Gapic::Config::Method.new server_streaming_predict_config
                    explain_config = parent_rpcs.explain if parent_rpcs.respond_to? :explain
                    @explain = ::Gapic::Config::Method.new explain_config
                    generate_content_config = parent_rpcs.generate_content if parent_rpcs.respond_to? :generate_content
                    @generate_content = ::Gapic::Config::Method.new generate_content_config
                    stream_generate_content_config = parent_rpcs.stream_generate_content if parent_rpcs.respond_to? :stream_generate_content
                    @stream_generate_content = ::Gapic::Config::Method.new stream_generate_content_config

                    yield self if block_given?
                  end
                end
              end
            end
          end
        end
      end
    end
  end
end