lib/google/cloud/dialogflow/v2/doc/google/cloud/dialogflow/v2/session.rb in google-cloud-dialogflow-0.2.1 vs lib/google/cloud/dialogflow/v2/doc/google/cloud/dialogflow/v2/session.rb in google-cloud-dialogflow-0.2.2
- old
+ new
@@ -19,11 +19,11 @@
module V2
# The request to detect user's intent.
# @!attribute [rw] session
# @return [String]
# Required. The name of the session this query is sent to. Format:
- # +projects/<Project ID>/agent/sessions/<Session ID>+. It's up to the API
+ # `projects/<Project ID>/agent/sessions/<Session ID>`. It's up to the API
# caller to choose an appropriate session ID. It can be a random number or
# some type of user identifier (preferably hashed). The length of the session
# ID must not exceed 36 bytes.
# @!attribute [rw] query_params
# @return [Google::Cloud::Dialogflow::V2::QueryParameters]
@@ -39,11 +39,11 @@
#
# 3. an event that specifies which intent to trigger.
# @!attribute [rw] input_audio
# @return [String]
# Optional. The natural language speech audio to be processed. This field
- # should be populated iff +query_input+ is set to an input audio config.
+ # should be populated iff `query_input` is set to an input audio config.
# A single request can contain up to 1 minute of speech audio data.
class DetectIntentRequest; end
# The message returned from the DetectIntent method.
# @!attribute [rw] response_id
@@ -53,11 +53,11 @@
# @!attribute [rw] query_result
# @return [Google::Cloud::Dialogflow::V2::QueryResult]
# The results of the conversational query or event processing.
# @!attribute [rw] webhook_status
# @return [Google::Rpc::Status]
- # Specifies the status of the webhook request. +webhook_status+
+ # Specifies the status of the webhook request. `webhook_status`
# is never populated in webhook requests.
class DetectIntentResponse; end
# Represents the parameters of the conversational query.
# @!attribute [rw] time_zone
@@ -109,16 +109,16 @@
# Represents the result of conversational query or event processing.
# @!attribute [rw] query_text
# @return [String]
# The original conversational query text:
- # * If natural language text was provided as input, +query_text+ contains
+ # * If natural language text was provided as input, `query_text` contains
# a copy of the input.
- # * If natural language speech audio was provided as input, +query_text+
+ # * If natural language speech audio was provided as input, `query_text`
# contains the speech recognition result. If speech recognizer produced
# multiple alternatives, a particular one is picked.
- # * If an event was provided as input, +query_text+ is not set.
+ # * If an event was provided as input, `query_text` is not set.
# @!attribute [rw] language_code
# @return [String]
# The language that was triggered during intent detection.
# See [Language Support](https://dialogflow.com/docs/reference/language)
# for a list of the currently supported language codes.
@@ -140,39 +140,39 @@
# @return [Google::Protobuf::Struct]
# The collection of extracted parameters.
# @!attribute [rw] all_required_params_present
# @return [true, false]
# This field is set to:
- # * +false+ if the matched intent has required parameters and not all of
+ # * `false` if the matched intent has required parameters and not all of
# the required parameter values have been collected.
- # * +true+ if all required parameter values have been collected, or if the
+ # * `true` if all required parameter values have been collected, or if the
# matched intent doesn't contain any required parameters.
# @!attribute [rw] fulfillment_text
# @return [String]
# The text to be pronounced to the user or shown on the screen.
# @!attribute [rw] fulfillment_messages
# @return [Array<Google::Cloud::Dialogflow::V2::Intent::Message>]
# The collection of rich messages to present to the user.
# @!attribute [rw] webhook_source
# @return [String]
# If the query was fulfilled by a webhook call, this field is set to the
- # value of the +source+ field returned in the webhook response.
+ # value of the `source` field returned in the webhook response.
# @!attribute [rw] webhook_payload
# @return [Google::Protobuf::Struct]
# If the query was fulfilled by a webhook call, this field is set to the
- # value of the +payload+ field returned in the webhook response.
+ # value of the `payload` field returned in the webhook response.
# @!attribute [rw] output_contexts
# @return [Array<Google::Cloud::Dialogflow::V2::Context>]
# The collection of output contexts. If applicable,
- # +output_contexts.parameters+ contains entries with name
- # +<parameter name>.original+ containing the original parameter values
+ # `output_contexts.parameters` contains entries with name
+ # `<parameter name>.original` containing the original parameter values
# before the query.
# @!attribute [rw] intent
# @return [Google::Cloud::Dialogflow::V2::Intent]
# The intent that matched the conversational query. Some, not
# all fields are filled in this message, including but not limited to:
- # +name+, +display_name+ and +webhook_state+.
+ # `name`, `display_name` and `webhook_state`.
# @!attribute [rw] intent_detection_confidence
# @return [Float]
# The intent detection confidence. Values range from 0.0
# (completely uncertain) to 1.0 (completely certain).
# @!attribute [rw] diagnostic_info
@@ -180,25 +180,25 @@
# The free-form diagnostic info. For example, this field
# could contain webhook call latency.
class QueryResult; end
# The top-level message sent by the client to the
- # +StreamingDetectIntent+ method.
+ # `StreamingDetectIntent` method.
#
# Multiple request messages should be sent in order:
#
- # 1. The first message must contain +session+, +query_input+ plus optionally
- # +query_params+ and/or +single_utterance+. The message must not contain +input_audio+.
+ # 1. The first message must contain `session`, `query_input` plus optionally
+ # `query_params` and/or `single_utterance`. The message must not contain `input_audio`.
#
- # 2. If +query_input+ was set to a streaming input audio config,
- # all subsequent messages must contain only +input_audio+.
+ # 2. If `query_input` was set to a streaming input audio config,
+ # all subsequent messages must contain only `input_audio`.
# Otherwise, finish the request stream.
# @!attribute [rw] session
# @return [String]
# Required. The name of the session the query is sent to.
# Format of the session name:
- # +projects/<Project ID>/agent/sessions/<Session ID>+. It’s up to the API
+ # `projects/<Project ID>/agent/sessions/<Session ID>`. It’s up to the API
# caller to choose an appropriate <Session ID>. It can be a random number or
# some type of user identifier (preferably hashed). The length of the session
# ID must not exceed 36 characters.
# @!attribute [rw] query_params
# @return [Google::Cloud::Dialogflow::V2::QueryParameters]
@@ -213,37 +213,37 @@
# 2. a conversational query in the form of text, or
#
# 3. an event that specifies which intent to trigger.
# @!attribute [rw] single_utterance
# @return [true, false]
- # Optional. If +false+ (default), recognition does not cease until the
+ # Optional. If `false` (default), recognition does not cease until the
# client closes the stream.
- # If +true+, the recognizer will detect a single spoken utterance in input
+ # If `true`, the recognizer will detect a single spoken utterance in input
# audio. Recognition ceases when it detects the audio's voice has
# stopped or paused. In this case, once a detected intent is received, the
# client should close the stream and start a new request with a new stream as
# needed.
- # This setting is ignored when +query_input+ is a piece of text or an event.
+ # This setting is ignored when `query_input` is a piece of text or an event.
# @!attribute [rw] input_audio
# @return [String]
# Optional. The input audio content to be recognized. Must be sent if
- # +query_input+ was set to a streaming input audio config. The complete audio
+ # `query_input` was set to a streaming input audio config. The complete audio
# over all streaming messages must not exceed 1 minute.
class StreamingDetectIntentRequest; end
# The top-level message returned from the
- # +StreamingDetectIntent+ method.
+ # `StreamingDetectIntent` method.
#
# Multiple response messages can be returned in order:
#
# 1. If the input was set to streaming audio, the first one or more messages
- # contain +recognition_result+. Each +recognition_result+ represents a more
- # complete transcript of what the user said. The last +recognition_result+
- # has +is_final+ set to +true+.
+ # contain `recognition_result`. Each `recognition_result` represents a more
+ # complete transcript of what the user said. The last `recognition_result`
+ # has `is_final` set to `true`.
#
- # 2. The next message contains +response_id+, +query_result+
- # and optionally +webhook_status+ if a WebHook was called.
+ # 2. The next message contains `response_id`, `query_result`
+ # and optionally `webhook_status` if a WebHook was called.
# @!attribute [rw] response_id
# @return [String]
# The unique identifier of the response. It can be used to
# locate a response in the training example set or for reporting issues.
# @!attribute [rw] recognition_result
@@ -274,46 +274,46 @@
#
# 5. transcript: " that's"
#
# 6. transcript: " that is"
#
- # 7. recognition_event_type: +RECOGNITION_EVENT_END_OF_SINGLE_UTTERANCE+
+ # 7. recognition_event_type: `RECOGNITION_EVENT_END_OF_SINGLE_UTTERANCE`
#
# 8. transcript: " that is the question"
# is_final: true
#
# Only two of the responses contain final results (#4 and #8 indicated by
- # +is_final: true+). Concatenating these generates the full transcript: "to be
+ # `is_final: true`). Concatenating these generates the full transcript: "to be
# or not to be that is the question".
#
# In each response we populate:
#
- # * for +MESSAGE_TYPE_TRANSCRIPT+: +transcript+ and possibly +is_final+.
+ # * for `MESSAGE_TYPE_TRANSCRIPT`: `transcript` and possibly `is_final`.
#
- # * for +MESSAGE_TYPE_END_OF_SINGLE_UTTERANCE+: only +event_type+.
+ # * for `MESSAGE_TYPE_END_OF_SINGLE_UTTERANCE`: only `event_type`.
# @!attribute [rw] message_type
# @return [Google::Cloud::Dialogflow::V2::StreamingRecognitionResult::MessageType]
# Type of the result message.
# @!attribute [rw] transcript
# @return [String]
# Transcript text representing the words that the user spoke.
- # Populated if and only if +event_type+ = +RECOGNITION_EVENT_TRANSCRIPT+.
+ # Populated if and only if `event_type` = `RECOGNITION_EVENT_TRANSCRIPT`.
# @!attribute [rw] is_final
# @return [true, false]
- # The default of 0.0 is a sentinel value indicating +confidence+ was not set.
- # If +false+, the +StreamingRecognitionResult+ represents an
- # interim result that may change. If +true+, the recognizer will not return
+ # The default of 0.0 is a sentinel value indicating `confidence` was not set.
+ # If `false`, the `StreamingRecognitionResult` represents an
+ # interim result that may change. If `true`, the recognizer will not return
# any further hypotheses about this piece of the audio. May only be populated
- # for +event_type+ = +RECOGNITION_EVENT_TRANSCRIPT+.
+ # for `event_type` = `RECOGNITION_EVENT_TRANSCRIPT`.
# @!attribute [rw] confidence
# @return [Float]
# The Speech confidence between 0.0 and 1.0 for the current portion of audio.
# A higher number indicates an estimated greater likelihood that the
# recognized words are correct. The default of 0.0 is a sentinel value
# indicating that confidence was not set.
#
- # This field is typically only provided if +is_final+ is true and you should
+ # This field is typically only provided if `is_final` is true and you should
# not rely on it being accurate or even set.
class StreamingRecognitionResult
# Type of the response message.
module MessageType
# Not specified. Should never be used.
@@ -326,11 +326,11 @@
# utterance and expects no additional speech. Therefore, the server will
# not process additional audio (although it may subsequently return
# additional results). The client should stop sending additional audio
# data, half-close the gRPC connection, and wait for any additional results
# until the server closes the gRPC connection. This message is only sent if
- # +single_utterance+ was set to +true+, and is not used otherwise.
+ # `single_utterance` was set to `true`, and is not used otherwise.
END_OF_SINGLE_UTTERANCE = 2
end
end
# Instructs the speech recognizer how to process the audio content.
@@ -369,14 +369,14 @@
# currently supported language codes. Note that queries in the same session
# do not necessarily need to specify the same language.
class TextInput; end
# Events allow for matching intents by event name instead of the natural
- # language input. For instance, input +<event: { name: “welcome_event”,
- # parameters: { name: “Sam” } }>+ can trigger a personalized welcome response.
- # The parameter +name+ may be used by the agent in the response:
- # +“Hello #welcome_event.name! What can I do for you today?”+.
+ # language input. For instance, input `<event: { name: “welcome_event”,
+ # parameters: { name: “Sam” } }>` can trigger a personalized welcome response.
+ # The parameter `name` may be used by the agent in the response:
+ # `“Hello #welcome_event.name! What can I do for you today?”`.
# @!attribute [rw] name
# @return [String]
# Required. The unique identifier of the event.
# @!attribute [rw] parameters
# @return [Google::Protobuf::Struct]
@@ -397,43 +397,43 @@
AUDIO_ENCODING_UNSPECIFIED = 0
# Uncompressed 16-bit signed little-endian samples (Linear PCM).
AUDIO_ENCODING_LINEAR_16 = 1
- # [+FLAC+](https://xiph.org/flac/documentation.html) (Free Lossless Audio
+ # [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio
# Codec) is the recommended encoding because it is lossless (therefore
# recognition is not compromised) and requires only about half the
- # bandwidth of +LINEAR16+. +FLAC+ stream encoding supports 16-bit and
- # 24-bit samples, however, not all fields in +STREAMINFO+ are supported.
+ # bandwidth of `LINEAR16`. `FLAC` stream encoding supports 16-bit and
+ # 24-bit samples, however, not all fields in `STREAMINFO` are supported.
AUDIO_ENCODING_FLAC = 2
# 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
AUDIO_ENCODING_MULAW = 3
- # Adaptive Multi-Rate Narrowband codec. +sample_rate_hertz+ must be 8000.
+ # Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
AUDIO_ENCODING_AMR = 4
- # Adaptive Multi-Rate Wideband codec. +sample_rate_hertz+ must be 16000.
+ # Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
AUDIO_ENCODING_AMR_WB = 5
# Opus encoded audio frames in Ogg container
# ([OggOpus](https://wiki.xiph.org/OggOpus)).
- # +sample_rate_hertz+ must be 16000.
+ # `sample_rate_hertz` must be 16000.
AUDIO_ENCODING_OGG_OPUS = 6
# Although the use of lossy encodings is not recommended, if a very low
- # bitrate encoding is required, +OGG_OPUS+ is highly preferred over
+ # bitrate encoding is required, `OGG_OPUS` is highly preferred over
# Speex encoding. The [Speex](https://speex.org/) encoding supported by
# Dialogflow API has a header byte in each block, as in MIME type
- # +audio/x-speex-with-header-byte+.
+ # `audio/x-speex-with-header-byte`.
# It is a variant of the RTP Speex encoding defined in
# [RFC 5574](https://tools.ietf.org/html/rfc5574).
# The stream is a sequence of blocks, one block per RTP packet. Each block
# starts with a byte containing the length of the block, in bytes, followed
# by one or more frames of Speex data, padded to an integral number of
# bytes (octets) as specified in RFC 5574. In other words, each RTP header
# is replaced with a single byte containing the block length. Only Speex
- # wideband is supported. +sample_rate_hertz+ must be 16000.
+ # wideband is supported. `sample_rate_hertz` must be 16000.
AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE = 7
end
end
end
end
\ No newline at end of file