lib/google/cloud/dialogflow/v2/doc/google/cloud/dialogflow/v2/session.rb in google-cloud-dialogflow-0.2.3 vs lib/google/cloud/dialogflow/v2/doc/google/cloud/dialogflow/v2/session.rb in google-cloud-dialogflow-0.3.0
- old
+ new
@@ -1,6 +1,6 @@
-# Copyright 2018 Google LLC
+# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
@@ -36,10 +36,15 @@
# which instructs the speech recognizer how to process the speech audio,
#
# 2. a conversational query in the form of text, or
#
# 3. an event that specifies which intent to trigger.
+ # @!attribute [rw] output_audio_config
+ # @return [Google::Cloud::Dialogflow::V2::OutputAudioConfig]
+ # Optional. Instructs the speech synthesizer how to generate the output
+ # audio. If this field is not set and agent-level speech synthesizer is not
+ # configured, no output audio is generated.
# @!attribute [rw] input_audio
# @return [String]
# Optional. The natural language speech audio to be processed. This field
# should be populated iff `query_input` is set to an input audio config.
# A single request can contain up to 1 minute of speech audio data.
@@ -50,15 +55,26 @@
# @return [String]
# The unique identifier of the response. It can be used to
# locate a response in the training example set or for reporting issues.
# @!attribute [rw] query_result
# @return [Google::Cloud::Dialogflow::V2::QueryResult]
- # The results of the conversational query or event processing.
+ # The selected results of the conversational query or event processing.
+ # See `alternative_query_results` for additional potential results.
# @!attribute [rw] webhook_status
# @return [Google::Rpc::Status]
- # Specifies the status of the webhook request. `webhook_status`
- # is never populated in webhook requests.
+ # Specifies the status of the webhook request.
+ # @!attribute [rw] output_audio
+ # @return [String]
+ # The audio data bytes encoded as specified in the request.
+ # Note: The output audio is generated based on the values of default platform
+ # text responses found in the `query_result.fulfillment_messages` field. If
+ # multiple default text responses exist, they will be concatenated when
+ # generating audio. If no default platform text responses exist, the
+ # generated audio content will be empty.
+ # @!attribute [rw] output_audio_config
+ # @return [Google::Cloud::Dialogflow::V2::OutputAudioConfig]
+ # The config used by the speech synthesizer to generate the output audio.
class DetectIntentResponse; end
# Represents the parameters of the conversational query.
# @!attribute [rw] time_zone
# @return [String]
@@ -77,17 +93,21 @@
# @return [true, false]
# Optional. Specifies whether to delete all contexts in the current session
# before the new ones are activated.
# @!attribute [rw] session_entity_types
# @return [Array<Google::Cloud::Dialogflow::V2::SessionEntityType>]
- # Optional. The collection of session entity types to replace or extend
- # developer entities with for this query only. The entity synonyms apply
- # to all languages.
+ # Optional. Additional session entity types to replace or extend developer
+ # entity types with. The entity synonyms apply to all languages and persist
+ # for the session of this query.
# @!attribute [rw] payload
# @return [Google::Protobuf::Struct]
# Optional. This field can be used to pass custom data into the webhook
# associated with the agent. Arbitrary JSON objects are supported.
+ # @!attribute [rw] sentiment_analysis_request_config
+ # @return [Google::Cloud::Dialogflow::V2::SentimentAnalysisRequestConfig]
+ # Optional. Configures the type of sentiment analysis to perform. If not
+ # provided, sentiment analysis is not performed.
class QueryParameters; end
# Represents the query input. It can contain either:
#
# 1. An audio config which
@@ -118,23 +138,24 @@
# multiple alternatives, a particular one is picked.
# * If an event was provided as input, `query_text` is not set.
# @!attribute [rw] language_code
# @return [String]
# The language that was triggered during intent detection.
- # See [Language Support](https://dialogflow.com/docs/reference/language)
+ # See [Language
+ # Support](https://cloud.google.com/dialogflow-enterprise/docs/reference/language)
# for a list of the currently supported language codes.
# @!attribute [rw] speech_recognition_confidence
# @return [Float]
# The Speech recognition confidence between 0.0 and 1.0. A higher number
# indicates an estimated greater likelihood that the recognized words are
# correct. The default of 0.0 is a sentinel value indicating that confidence
# was not set.
#
- # You should not rely on this field as it isn't guaranteed to be accurate, or
- # even set. In particular this field isn't set in Webhook calls and for
- # StreamingDetectIntent since the streaming endpoint has separate confidence
- # estimates per portion of the audio in StreamingRecognitionResult.
+ # This field is not guaranteed to be accurate or set. In particular this
+ # field isn't set for StreamingDetectIntent since the streaming endpoint has
+ # separate confidence estimates per portion of the audio in
+ # StreamingRecognitionResult.
# @!attribute [rw] action
# @return [String]
# The action name from the matched intent.
# @!attribute [rw] parameters
# @return [Google::Protobuf::Struct]
@@ -147,10 +168,11 @@
# * `true` if all required parameter values have been collected, or if the
# matched intent doesn't contain any required parameters.
# @!attribute [rw] fulfillment_text
# @return [String]
# The text to be pronounced to the user or shown on the screen.
+ # Note: This is a legacy field, `fulfillment_messages` should be preferred.
# @!attribute [rw] fulfillment_messages
# @return [Array<Google::Cloud::Dialogflow::V2::Intent::Message>]
# The collection of rich messages to present to the user.
# @!attribute [rw] webhook_source
# @return [String]
@@ -173,14 +195,21 @@
# `name`, `display_name` and `webhook_state`.
# @!attribute [rw] intent_detection_confidence
# @return [Float]
# The intent detection confidence. Values range from 0.0
# (completely uncertain) to 1.0 (completely certain).
+ # If there are `multiple knowledge_answers` messages, this value is set to
+ # the greatest `knowledgeAnswers.match_confidence` value in the list.
# @!attribute [rw] diagnostic_info
# @return [Google::Protobuf::Struct]
- # The free-form diagnostic info. For example, this field
- # could contain webhook call latency.
+ # The free-form diagnostic info. For example, this field could contain
+ # webhook call latency. The string keys of the Struct's fields map can change
+ # without notice.
+ # @!attribute [rw] sentiment_analysis_result
+ # @return [Google::Cloud::Dialogflow::V2::SentimentAnalysisResult]
+ # The sentiment analysis result, which depends on the
+ # `sentiment_analysis_request_config` specified in the request.
class QueryResult; end
# The top-level message sent by the client to the
# `StreamingDetectIntent` method.
#
@@ -195,11 +224,11 @@
# @!attribute [rw] session
# @return [String]
# Required. The name of the session the query is sent to.
# Format of the session name:
# `projects/<Project ID>/agent/sessions/<Session ID>`. It’s up to the API
- # caller to choose an appropriate <Session ID>. It can be a random number or
+ # caller to choose an appropriate `Session ID`. It can be a random number or
# some type of user identifier (preferably hashed). The length of the session
# ID must not exceed 36 characters.
# @!attribute [rw] query_params
# @return [Google::Cloud::Dialogflow::V2::QueryParameters]
# Optional. The parameters of this query.
@@ -221,10 +250,15 @@
# audio. Recognition ceases when it detects the audio's voice has
# stopped or paused. In this case, once a detected intent is received, the
# client should close the stream and start a new request with a new stream as
# needed.
# This setting is ignored when `query_input` is a piece of text or an event.
+ # @!attribute [rw] output_audio_config
+ # @return [Google::Cloud::Dialogflow::V2::OutputAudioConfig]
+ # Optional. Instructs the speech synthesizer how to generate the output
+ # audio. If this field is not set and agent-level speech synthesizer is not
+ # configured, no output audio is generated.
# @!attribute [rw] input_audio
# @return [String]
# Optional. The input audio content to be recognized. Must be sent if
# `query_input` was set to a streaming input audio config. The complete audio
# over all streaming messages must not exceed 1 minute.
@@ -253,10 +287,18 @@
# @return [Google::Cloud::Dialogflow::V2::QueryResult]
# The result of the conversational query or event processing.
# @!attribute [rw] webhook_status
# @return [Google::Rpc::Status]
# Specifies the status of the webhook request.
+ # @!attribute [rw] output_audio
+ # @return [String]
+ # The audio data bytes encoded as specified in the request.
+ # @!attribute [rw] output_audio_config
+ # @return [Google::Cloud::Dialogflow::V2::OutputAudioConfig]
+ # Instructs the speech synthesizer how to generate the output audio. This
+ # field is populated from the agent-level speech synthesizer configuration,
+ # if enabled.
class StreamingDetectIntentResponse; end
# Contains a speech recognition result corresponding to a portion of the audio
# that is currently being processed or an indication that this is the end
# of the single requested utterance.
@@ -274,11 +316,11 @@
#
# 5. transcript: " that's"
#
# 6. transcript: " that is"
#
- # 7. recognition_event_type: `RECOGNITION_EVENT_END_OF_SINGLE_UTTERANCE`
+ # 7. message_type: `MESSAGE_TYPE_END_OF_SINGLE_UTTERANCE`
#
# 8. transcript: " that is the question"
# is_final: true
#
# Only two of the responses contain final results (#4 and #8 indicated by
@@ -287,25 +329,24 @@
#
# In each response we populate:
#
# * for `MESSAGE_TYPE_TRANSCRIPT`: `transcript` and possibly `is_final`.
#
- # * for `MESSAGE_TYPE_END_OF_SINGLE_UTTERANCE`: only `event_type`.
+ # * for `MESSAGE_TYPE_END_OF_SINGLE_UTTERANCE`: only `message_type`.
# @!attribute [rw] message_type
# @return [Google::Cloud::Dialogflow::V2::StreamingRecognitionResult::MessageType]
# Type of the result message.
# @!attribute [rw] transcript
# @return [String]
# Transcript text representing the words that the user spoke.
- # Populated if and only if `event_type` = `RECOGNITION_EVENT_TRANSCRIPT`.
+ # Populated if and only if `message_type` = `MESSAGE_TYPE_TRANSCRIPT`.
# @!attribute [rw] is_final
# @return [true, false]
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
# If `false`, the `StreamingRecognitionResult` represents an
# interim result that may change. If `true`, the recognizer will not return
# any further hypotheses about this piece of the audio. May only be populated
- # for `event_type` = `RECOGNITION_EVENT_TRANSCRIPT`.
+ # for `message_type` = `MESSAGE_TYPE_TRANSCRIPT`.
# @!attribute [rw] confidence
# @return [Float]
# The Speech confidence between 0.0 and 1.0 for the current portion of audio.
# A higher number indicates an estimated greater likelihood that the
# recognized words are correct. The default of 0.0 is a sentinel value
@@ -338,60 +379,93 @@
# @return [Google::Cloud::Dialogflow::V2::AudioEncoding]
# Required. Audio encoding of the audio content to process.
# @!attribute [rw] sample_rate_hertz
# @return [Integer]
# Required. Sample rate (in Hertz) of the audio content sent in the query.
- # Refer to [Cloud Speech API documentation](https://cloud.google.com/speech/docs/basics) for more
- # details.
+ # Refer to
+ # [Cloud Speech API
+ # documentation](https://cloud.google.com/speech-to-text/docs/basics) for
+ # more details.
# @!attribute [rw] language_code
# @return [String]
# Required. The language of the supplied audio. Dialogflow does not do
# translations. See [Language
- # Support](https://dialogflow.com/docs/languages) for a list of the
- # currently supported language codes. Note that queries in the same session
- # do not necessarily need to specify the same language.
+ # Support](https://cloud.google.com/dialogflow-enterprise/docs/reference/language)
+ # for a list of the currently supported language codes. Note that queries in
+ # the same session do not necessarily need to specify the same language.
# @!attribute [rw] phrase_hints
# @return [Array<String>]
# Optional. The collection of phrase hints which are used to boost accuracy
# of speech recognition.
- # Refer to [Cloud Speech API documentation](https://cloud.google.com/speech/docs/basics#phrase-hints)
+ # Refer to
+ # [Cloud Speech API
+ # documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
# for more details.
class InputAudioConfig; end
# Represents the natural language text to be processed.
# @!attribute [rw] text
# @return [String]
# Required. The UTF-8 encoded natural language text to be processed.
- # Text length must not exceed 256 bytes.
+ # Text length must not exceed 256 characters.
# @!attribute [rw] language_code
# @return [String]
# Required. The language of this conversational query. See [Language
- # Support](https://dialogflow.com/docs/languages) for a list of the
- # currently supported language codes. Note that queries in the same session
- # do not necessarily need to specify the same language.
+ # Support](https://cloud.google.com/dialogflow-enterprise/docs/reference/language)
+ # for a list of the currently supported language codes. Note that queries in
+ # the same session do not necessarily need to specify the same language.
class TextInput; end
# Events allow for matching intents by event name instead of the natural
- # language input. For instance, input `<event: { name: “welcome_event”,
- # parameters: { name: “Sam” } }>` can trigger a personalized welcome response.
+ # language input. For instance, input `<event: { name: "welcome_event",
+ # parameters: { name: "Sam" } }>` can trigger a personalized welcome response.
# The parameter `name` may be used by the agent in the response:
- # `“Hello #welcome_event.name! What can I do for you today?”`.
+ # `"Hello #welcome_event.name! What can I do for you today?"`.
# @!attribute [rw] name
# @return [String]
# Required. The unique identifier of the event.
# @!attribute [rw] parameters
# @return [Google::Protobuf::Struct]
# Optional. The collection of parameters associated with the event.
# @!attribute [rw] language_code
# @return [String]
# Required. The language of this query. See [Language
- # Support](https://dialogflow.com/docs/languages) for a list of the
- # currently supported language codes. Note that queries in the same session
- # do not necessarily need to specify the same language.
+ # Support](https://cloud.google.com/dialogflow-enterprise/docs/reference/language)
+ # for a list of the currently supported language codes. Note that queries in
+ # the same session do not necessarily need to specify the same language.
class EventInput; end
+ # Configures the types of sentiment analysis to perform.
+ # @!attribute [rw] analyze_query_text_sentiment
+ # @return [true, false]
+ # Optional. Instructs the service to perform sentiment analysis on
+ # `query_text`. If not provided, sentiment analysis is not performed on
+ # `query_text`.
+ class SentimentAnalysisRequestConfig; end
+
+ # The result of sentiment analysis as configured by
+ # `sentiment_analysis_request_config`.
+ # @!attribute [rw] query_text_sentiment
+ # @return [Google::Cloud::Dialogflow::V2::Sentiment]
+ # The sentiment analysis result for `query_text`.
+ class SentimentAnalysisResult; end
+
+ # The sentiment, such as positive/negative feeling or association, for a unit
+ # of analysis, such as the query text.
+ # @!attribute [rw] score
+ # @return [Float]
+ # Sentiment score between -1.0 (negative sentiment) and 1.0 (positive
+ # sentiment).
+ # @!attribute [rw] magnitude
+ # @return [Float]
+ # A non-negative number in the [0, +inf) range, which represents the absolute
+ # magnitude of sentiment, regardless of score (positive or negative).
+ class Sentiment; end
+
# Audio encoding of the audio content sent in the conversational query request.
- # Refer to the [Cloud Speech API documentation](https://cloud.google.com/speech/docs/basics) for more
+ # Refer to the
+ # [Cloud Speech API
+ # documentation](https://cloud.google.com/speech-to-text/docs/basics) for more
# details.
module AudioEncoding
# Not specified.
AUDIO_ENCODING_UNSPECIFIED = 0
\ No newline at end of file