proto_docs/google/cloud/automl/v1beta1/io.rb in google-cloud-automl-v1beta1-0.1.0 vs proto_docs/google/cloud/automl/v1beta1/io.rb in google-cloud-automl-v1beta1-0.1.1
- old
+ new
@@ -23,11 +23,11 @@
module V1beta1
# Input configuration for ImportData Action.
#
# The format of input depends on dataset_metadata the Dataset into which
# the import is happening has. As input source the
- # {Google::Cloud::AutoML::V1beta1::InputConfig#gcs_source gcs_source}
+ # {::Google::Cloud::AutoML::V1beta1::InputConfig#gcs_source gcs_source}
# is expected, unless specified otherwise. Additionally any input .CSV file
# by itself must be 100MB or smaller, unless specified otherwise.
# If an "example" file (that is, image, video etc.) with identical content
# (even if it had different GCS_FILE_PATH) is mentioned multiple times, then
# its label, bounding boxes etc. are appended. The same file should be always
@@ -290,13 +290,13 @@
# TEST,"Thank you for this product.",4
# VALIDATE,gs://folder/content.txt,2
#
# * For Tables:
# Either
- # {Google::Cloud::AutoML::V1beta1::InputConfig#gcs_source gcs_source} or
+ # {::Google::Cloud::AutoML::V1beta1::InputConfig#gcs_source gcs_source} or
#
- # {Google::Cloud::AutoML::V1beta1::InputConfig#bigquery_source bigquery_source}
+ # {::Google::Cloud::AutoML::V1beta1::InputConfig#bigquery_source bigquery_source}
# can be used. All inputs is concatenated into a single
#
# [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_name]
# For gcs_source:
# CSV file(s), where the first row of the first file is the header,
@@ -377,47 +377,47 @@
# percent of CSV rows cannot be processed then the operation fails and
# nothing is imported. Regardless of overall success or failure the per-row
# failures, up to a certain count cap, is listed in
# Operation.metadata.partial_failures.
# @!attribute [rw] gcs_source
- # @return [Google::Cloud::AutoML::V1beta1::GcsSource]
+ # @return [::Google::Cloud::AutoML::V1beta1::GcsSource]
# The Google Cloud Storage location for the input content.
# In ImportData, the gcs_source points to a csv with structure described in
# the comment.
# @!attribute [rw] bigquery_source
- # @return [Google::Cloud::AutoML::V1beta1::BigQuerySource]
+ # @return [::Google::Cloud::AutoML::V1beta1::BigQuerySource]
# The BigQuery location for the input content.
# @!attribute [rw] params
- # @return [Google::Protobuf::Map{String => String}]
+ # @return [::Google::Protobuf::Map{::String => ::String}]
# Additional domain-specific parameters describing the semantic of the
# imported data, any string must be up to 25000
# characters long.
#
# * For Tables:
# `schema_inference_version` - (integer) Required. The version of the
# algorithm that should be used for the initial inference of the
# schema (columns' DataTypes) of the table the data is being imported
# into. Allowed values: "1".
class InputConfig
- include Google::Protobuf::MessageExts
- extend Google::Protobuf::MessageExts::ClassMethods
+ include ::Google::Protobuf::MessageExts
+ extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
- # @return [String]
+ # @return [::String]
# @!attribute [rw] value
- # @return [String]
+ # @return [::String]
class ParamsEntry
- include Google::Protobuf::MessageExts
- extend Google::Protobuf::MessageExts::ClassMethods
+ include ::Google::Protobuf::MessageExts
+ extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# Input configuration for BatchPredict Action.
#
# The format of input depends on the ML problem of the model used for
# prediction. As input source the
- # {Google::Cloud::AutoML::V1beta1::InputConfig#gcs_source gcs_source}
+ # {::Google::Cloud::AutoML::V1beta1::InputConfig#gcs_source gcs_source}
# is expected, unless specified otherwise.
#
# The formats are represented in EBNF with commas being literal and with
# non-terminal symbols defined near the end of this comment. The formats
# are:
@@ -550,24 +550,24 @@
# }
# }
#
# * For Tables:
# Either
- # {Google::Cloud::AutoML::V1beta1::InputConfig#gcs_source gcs_source} or
+ # {::Google::Cloud::AutoML::V1beta1::InputConfig#gcs_source gcs_source} or
#
- # {Google::Cloud::AutoML::V1beta1::InputConfig#bigquery_source bigquery_source}.
+ # {::Google::Cloud::AutoML::V1beta1::InputConfig#bigquery_source bigquery_source}.
# GCS case:
# CSV file(s), each by itself 10GB or smaller and total size must be
# 100GB or smaller, where first file must have a header containing
# column names. If the first row of a subsequent file is the same as
# the header, then it is also treated as a header. All other rows
# contain values for the corresponding columns.
# The column names must contain the model's
#
# [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs]
#
- # {Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name-s}
+ # {::Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name-s}
# (order doesn't matter). The columns corresponding to the model's
# input feature column specs must contain values compatible with the
# column spec's data types. Prediction on all the rows, i.e. the CSV
# lines, will be attempted. For FORECASTING
#
@@ -587,11 +587,11 @@
# table must be 100GB or smaller.
# The column names must contain the model's
#
# [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs]
#
- # {Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name-s}
+ # {::Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name-s}
# (order doesn't matter). The columns corresponding to the model's
# input feature column specs must contain values compatible with the
# column spec's data types. Prediction on all the rows of the table
# will be attempted. For FORECASTING
#
@@ -622,30 +622,30 @@
# percent of CSV rows cannot be processed then the operation fails and
# prediction does not happen. Regardless of overall success or failure the
# per-row failures, up to a certain count cap, will be listed in
# Operation.metadata.partial_failures.
# @!attribute [rw] gcs_source
- # @return [Google::Cloud::AutoML::V1beta1::GcsSource]
+ # @return [::Google::Cloud::AutoML::V1beta1::GcsSource]
# The Google Cloud Storage location for the input content.
# @!attribute [rw] bigquery_source
- # @return [Google::Cloud::AutoML::V1beta1::BigQuerySource]
+ # @return [::Google::Cloud::AutoML::V1beta1::BigQuerySource]
# The BigQuery location for the input content.
class BatchPredictInputConfig
- include Google::Protobuf::MessageExts
- extend Google::Protobuf::MessageExts::ClassMethods
+ include ::Google::Protobuf::MessageExts
+ extend ::Google::Protobuf::MessageExts::ClassMethods
end
- # Input configuration of a {Google::Cloud::AutoML::V1beta1::Document Document}.
+ # Input configuration of a {::Google::Cloud::AutoML::V1beta1::Document Document}.
# @!attribute [rw] gcs_source
- # @return [Google::Cloud::AutoML::V1beta1::GcsSource]
+ # @return [::Google::Cloud::AutoML::V1beta1::GcsSource]
# The Google Cloud Storage location of the document file. Only a single path
# should be given.
# Max supported size: 512MB.
# Supported extensions: .PDF.
class DocumentInputConfig
- include Google::Protobuf::MessageExts
- extend Google::Protobuf::MessageExts::ClassMethods
+ include ::Google::Protobuf::MessageExts
+ extend ::Google::Protobuf::MessageExts::ClassMethods
end
# * For Translation:
# CSV file `translation.csv`, with each line in format:
# ML_USE,GCS_FILE_PATH
@@ -657,18 +657,18 @@
# * For Tables:
# Output depends on whether the dataset was imported from GCS or
# BigQuery.
# GCS case:
#
- # {Google::Cloud::AutoML::V1beta1::OutputConfig#gcs_destination gcs_destination}
+ # {::Google::Cloud::AutoML::V1beta1::OutputConfig#gcs_destination gcs_destination}
# must be set. Exported are CSV file(s) `tables_1.csv`,
# `tables_2.csv`,...,`tables_N.csv` with each having as header line
# the table's column names, and all other lines contain values for
# the header columns.
# BigQuery case:
#
- # {Google::Cloud::AutoML::V1beta1::OutputConfig#bigquery_destination bigquery_destination}
+ # {::Google::Cloud::AutoML::V1beta1::OutputConfig#bigquery_destination bigquery_destination}
# pointing to a BigQuery project must be set. In the given project a
# new dataset will be created with name
#
# `export_data_<automl-dataset-display-name>_<timestamp-of-export-call>`
# where <automl-dataset-display-name> will be made
@@ -676,30 +676,30 @@
# become underscores), and timestamp will be in
# YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that
# dataset a new table called `primary_table` will be created, and
# filled with precisely the same data as this obtained on import.
# @!attribute [rw] gcs_destination
- # @return [Google::Cloud::AutoML::V1beta1::GcsDestination]
+ # @return [::Google::Cloud::AutoML::V1beta1::GcsDestination]
# The Google Cloud Storage location where the output is to be written to.
# For Image Object Detection, Text Extraction, Video Classification and
# Tables, in the given directory a new directory will be created with name:
# export_data-<dataset-display-name>-<timestamp-of-export-call> where
# timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export
# output will be written into that directory.
# @!attribute [rw] bigquery_destination
- # @return [Google::Cloud::AutoML::V1beta1::BigQueryDestination]
+ # @return [::Google::Cloud::AutoML::V1beta1::BigQueryDestination]
# The BigQuery location where the output is to be written to.
class OutputConfig
- include Google::Protobuf::MessageExts
- extend Google::Protobuf::MessageExts::ClassMethods
+ include ::Google::Protobuf::MessageExts
+ extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Output configuration for BatchPredict Action.
#
# As destination the
#
- # {Google::Cloud::AutoML::V1beta1::BatchPredictOutputConfig#gcs_destination gcs_destination}
+ # {::Google::Cloud::AutoML::V1beta1::BatchPredictOutputConfig#gcs_destination gcs_destination}
# must be set unless specified otherwise for a domain. If gcs_destination is
# set then in the given directory a new directory is created. Its name
# will be
# "prediction-<model-display-name>-<timestamp-of-prediction-call>",
# where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents
@@ -885,47 +885,47 @@
# containing only `code` and `message`.
#
# * For Tables:
# Output depends on whether
#
- # {Google::Cloud::AutoML::V1beta1::BatchPredictOutputConfig#gcs_destination gcs_destination}
+ # {::Google::Cloud::AutoML::V1beta1::BatchPredictOutputConfig#gcs_destination gcs_destination}
# or
#
- # {Google::Cloud::AutoML::V1beta1::BatchPredictOutputConfig#bigquery_destination bigquery_destination}
+ # {::Google::Cloud::AutoML::V1beta1::BatchPredictOutputConfig#bigquery_destination bigquery_destination}
# is set (either is allowed).
# GCS case:
# In the created directory files `tables_1.csv`, `tables_2.csv`,...,
# `tables_N.csv` will be created, where N may be 1, and depends on
# the total number of the successfully predicted rows.
# For all CLASSIFICATION
#
# [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:
# Each .csv file will contain a header, listing all columns'
#
- # {Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name-s}
+ # {::Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name-s}
# given on input followed by M target column names in the format of
#
- # "<{Google::Cloud::AutoML::V1beta1::TablesModelMetadata#target_column_spec target_column_specs}
+ # "<{::Google::Cloud::AutoML::V1beta1::TablesModelMetadata#target_column_spec target_column_specs}
#
- # {Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name}>_<target
+ # {::Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name}>_<target
# value>_score" where M is the number of distinct target values,
# i.e. number of distinct values in the target column of the table
# used to train the model. Subsequent lines will contain the
# respective values of successfully predicted rows, with the last,
# i.e. the target, columns having the corresponding prediction
- # {Google::Cloud::AutoML::V1beta1::TablesAnnotation#score scores}.
+ # {::Google::Cloud::AutoML::V1beta1::TablesAnnotation#score scores}.
# For REGRESSION and FORECASTING
#
# [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:
# Each .csv file will contain a header, listing all columns'
# [display_name-s][google.cloud.automl.v1beta1.display_name] given
# on input followed by the predicted target column with name in the
# format of
#
- # "predicted_<{Google::Cloud::AutoML::V1beta1::TablesModelMetadata#target_column_spec target_column_specs}
+ # "predicted_<{::Google::Cloud::AutoML::V1beta1::TablesModelMetadata#target_column_spec target_column_specs}
#
- # {Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name}>"
+ # {::Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name}>"
# Subsequent lines will contain the respective values of
# successfully predicted rows, with the last, i.e. the target,
# column having the predicted target value.
# If prediction for any rows failed, then an additional
# `errors_1.csv`, `errors_2.csv`,..., `errors_N.csv` will be
@@ -937,79 +937,79 @@
# //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
# represented as a JSON string, and containing only `code` and
# `message`.
# BigQuery case:
#
- # {Google::Cloud::AutoML::V1beta1::OutputConfig#bigquery_destination bigquery_destination}
+ # {::Google::Cloud::AutoML::V1beta1::OutputConfig#bigquery_destination bigquery_destination}
# pointing to a BigQuery project must be set. In the given project a
# new dataset will be created with name
# `prediction_<model-display-name>_<timestamp-of-prediction-call>`
# where <model-display-name> will be made
# BigQuery-dataset-name compatible (e.g. most special characters will
# become underscores), and timestamp will be in
# YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset
# two tables will be created, `predictions`, and `errors`.
# The `predictions` table's column names will be the input columns'
#
- # {Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name-s}
+ # {::Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name-s}
# followed by the target column with name in the format of
#
- # "predicted_<{Google::Cloud::AutoML::V1beta1::TablesModelMetadata#target_column_spec target_column_specs}
+ # "predicted_<{::Google::Cloud::AutoML::V1beta1::TablesModelMetadata#target_column_spec target_column_specs}
#
- # {Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name}>"
+ # {::Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name}>"
# The input feature columns will contain the respective values of
# successfully predicted rows, with the target column having an
# ARRAY of
#
- # {Google::Cloud::AutoML::V1beta1::AnnotationPayload AnnotationPayloads},
+ # {::Google::Cloud::AutoML::V1beta1::AnnotationPayload AnnotationPayloads},
# represented as STRUCT-s, containing
- # {Google::Cloud::AutoML::V1beta1::TablesAnnotation TablesAnnotation}.
+ # {::Google::Cloud::AutoML::V1beta1::TablesAnnotation TablesAnnotation}.
# The `errors` table contains rows for which the prediction has
# failed, it has analogous input columns while the target column name
# is in the format of
#
- # "errors_<{Google::Cloud::AutoML::V1beta1::TablesModelMetadata#target_column_spec target_column_specs}
+ # "errors_<{::Google::Cloud::AutoML::V1beta1::TablesModelMetadata#target_column_spec target_column_specs}
#
- # {Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name}>",
+ # {::Google::Cloud::AutoML::V1beta1::ColumnSpec#display_name display_name}>",
# and as a value has
#
# [`google.rpc.Status`](https:
# //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
# represented as a STRUCT, and containing only `code` and `message`.
# @!attribute [rw] gcs_destination
- # @return [Google::Cloud::AutoML::V1beta1::GcsDestination]
+ # @return [::Google::Cloud::AutoML::V1beta1::GcsDestination]
# The Google Cloud Storage location of the directory where the output is to
# be written to.
# @!attribute [rw] bigquery_destination
- # @return [Google::Cloud::AutoML::V1beta1::BigQueryDestination]
+ # @return [::Google::Cloud::AutoML::V1beta1::BigQueryDestination]
# The BigQuery location where the output is to be written to.
class BatchPredictOutputConfig
- include Google::Protobuf::MessageExts
- extend Google::Protobuf::MessageExts::ClassMethods
+ include ::Google::Protobuf::MessageExts
+ extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Output configuration for ModelExport Action.
# @!attribute [rw] gcs_destination
- # @return [Google::Cloud::AutoML::V1beta1::GcsDestination]
+ # @return [::Google::Cloud::AutoML::V1beta1::GcsDestination]
# The Google Cloud Storage location where the model is to be written to.
# This location may only be set for the following model formats:
# "tflite", "edgetpu_tflite", "tf_saved_model", "tf_js", "core_ml".
#
# Under the directory given as the destination a new one with name
# "model-export-<model-display-name>-<timestamp-of-export-call>",
# where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format,
# will be created. Inside the model and any of its supporting files
# will be written.
# @!attribute [rw] gcr_destination
- # @return [Google::Cloud::AutoML::V1beta1::GcrDestination]
+ # @return [::Google::Cloud::AutoML::V1beta1::GcrDestination]
# The GCR location where model image is to be pushed to. This location
# may only be set for the following model formats:
# "docker".
#
# The model image will be created under the given URI.
# @!attribute [rw] model_format
- # @return [String]
+ # @return [::String]
# The format in which the model must be exported. The available, and default,
# formats depend on the problem and model type (if given problem and type
# combination doesn't have a format listed, it means its models are not
# exportable):
#
@@ -1036,119 +1036,119 @@
#
# quickstart](https:
# //cloud.google.com/vision/automl/docs/containers-gcs-quickstart)
# * core_ml - Used for iOS mobile devices.
# @!attribute [rw] params
- # @return [Google::Protobuf::Map{String => String}]
+ # @return [::Google::Protobuf::Map{::String => ::String}]
# Additional model-type and format specific parameters describing the
# requirements for the to be exported model files, any string must be up to
# 25000 characters long.
#
# * For `docker` format:
# `cpu_architecture` - (string) "x86_64" (default).
# `gpu_architecture` - (string) "none" (default), "nvidia".
class ModelExportOutputConfig
- include Google::Protobuf::MessageExts
- extend Google::Protobuf::MessageExts::ClassMethods
+ include ::Google::Protobuf::MessageExts
+ extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
- # @return [String]
+ # @return [::String]
# @!attribute [rw] value
- # @return [String]
+ # @return [::String]
class ParamsEntry
- include Google::Protobuf::MessageExts
- extend Google::Protobuf::MessageExts::ClassMethods
+ include ::Google::Protobuf::MessageExts
+ extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# Output configuration for ExportEvaluatedExamples Action. Note that this call
# is available only for 30 days since the moment the model was evaluated.
# The output depends on the domain, as follows (note that only examples from
# the TEST set are exported):
#
# * For Tables:
#
- # {Google::Cloud::AutoML::V1beta1::OutputConfig#bigquery_destination bigquery_destination}
+ # {::Google::Cloud::AutoML::V1beta1::OutputConfig#bigquery_destination bigquery_destination}
# pointing to a BigQuery project must be set. In the given project a
# new dataset will be created with name
#
# `export_evaluated_examples_<model-display-name>_<timestamp-of-export-call>`
# where <model-display-name> will be made BigQuery-dataset-name
# compatible (e.g. most special characters will become underscores),
# and timestamp will be in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601"
# format. In the dataset an `evaluated_examples` table will be
# created. It will have all the same columns as the
#
- # {Google::Cloud::AutoML::V1beta1::TablesDatasetMetadata#primary_table_spec_id primary_table}
+ # {::Google::Cloud::AutoML::V1beta1::TablesDatasetMetadata#primary_table_spec_id primary_table}
# of the
- # {Google::Cloud::AutoML::V1beta1::Model#dataset_id dataset} from which
+ # {::Google::Cloud::AutoML::V1beta1::Model#dataset_id dataset} from which
# the model was created, as they were at the moment of model's
# evaluation (this includes the target column with its ground
# truth), followed by a column called "predicted_<target_column>". That
# last column will contain the model's prediction result for each
# respective row, given as ARRAY of
- # {Google::Cloud::AutoML::V1beta1::AnnotationPayload AnnotationPayloads},
+ # {::Google::Cloud::AutoML::V1beta1::AnnotationPayload AnnotationPayloads},
# represented as STRUCT-s, containing
- # {Google::Cloud::AutoML::V1beta1::TablesAnnotation TablesAnnotation}.
+ # {::Google::Cloud::AutoML::V1beta1::TablesAnnotation TablesAnnotation}.
# @!attribute [rw] bigquery_destination
- # @return [Google::Cloud::AutoML::V1beta1::BigQueryDestination]
+ # @return [::Google::Cloud::AutoML::V1beta1::BigQueryDestination]
# The BigQuery location where the output is to be written to.
class ExportEvaluatedExamplesOutputConfig
- include Google::Protobuf::MessageExts
- extend Google::Protobuf::MessageExts::ClassMethods
+ include ::Google::Protobuf::MessageExts
+ extend ::Google::Protobuf::MessageExts::ClassMethods
end
# The Google Cloud Storage location for the input content.
# @!attribute [rw] input_uris
- # @return [Array<String>]
+ # @return [::Array<::String>]
# Required. Google Cloud Storage URIs to input files, up to 2000 characters
# long. Accepted forms:
# * Full object path, e.g. gs://bucket/directory/object.csv
class GcsSource
- include Google::Protobuf::MessageExts
- extend Google::Protobuf::MessageExts::ClassMethods
+ include ::Google::Protobuf::MessageExts
+ extend ::Google::Protobuf::MessageExts::ClassMethods
end
# The BigQuery location for the input content.
# @!attribute [rw] input_uri
- # @return [String]
+ # @return [::String]
# Required. BigQuery URI to a table, up to 2000 characters long.
# Accepted forms:
# * BigQuery path e.g. bq://projectId.bqDatasetId.bqTableId
class BigQuerySource
- include Google::Protobuf::MessageExts
- extend Google::Protobuf::MessageExts::ClassMethods
+ include ::Google::Protobuf::MessageExts
+ extend ::Google::Protobuf::MessageExts::ClassMethods
end
# The Google Cloud Storage location where the output is to be written to.
# @!attribute [rw] output_uri_prefix
- # @return [String]
+ # @return [::String]
# Required. Google Cloud Storage URI to output directory, up to 2000
# characters long.
# Accepted forms:
# * Prefix path: gs://bucket/directory
# The requesting user must have write permission to the bucket.
# The directory is created if it doesn't exist.
class GcsDestination
- include Google::Protobuf::MessageExts
- extend Google::Protobuf::MessageExts::ClassMethods
+ include ::Google::Protobuf::MessageExts
+ extend ::Google::Protobuf::MessageExts::ClassMethods
end
# The BigQuery location for the output content.
# @!attribute [rw] output_uri
- # @return [String]
+ # @return [::String]
# Required. BigQuery URI to a project, up to 2000 characters long.
# Accepted forms:
# * BigQuery path e.g. bq://projectId
class BigQueryDestination
- include Google::Protobuf::MessageExts
- extend Google::Protobuf::MessageExts::ClassMethods
+ include ::Google::Protobuf::MessageExts
+ extend ::Google::Protobuf::MessageExts::ClassMethods
end
# The GCR location where the image must be pushed to.
# @!attribute [rw] output_uri
- # @return [String]
+ # @return [::String]
# Required. Google Contained Registry URI of the new image, up to 2000
# characters long. See
#
# https:
# //cloud.google.com/container-registry/do
@@ -1157,11 +1157,11 @@
# * [HOSTNAME]/[PROJECT-ID]/[IMAGE]
# * [HOSTNAME]/[PROJECT-ID]/[IMAGE]:[TAG]
#
# The requesting user must have permission to push images the project.
class GcrDestination
- include Google::Protobuf::MessageExts
- extend Google::Protobuf::MessageExts::ClassMethods
+ include ::Google::Protobuf::MessageExts
+ extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
end
end
end