# frozen_string_literal: true
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
module Google
module Cloud
module Dataproc
module V1
# Describes the identifying information, config, and status of
# a Dataproc cluster
# @!attribute [rw] project_id
# @return [::String]
# Required. The Google Cloud Platform project ID that the cluster belongs to.
# @!attribute [rw] cluster_name
# @return [::String]
# Required. The cluster name, which must be unique within a project.
# The name must start with a lowercase letter, and can contain
# up to 51 lowercase letters, numbers, and hyphens. It cannot end
# with a hyphen. The name of a deleted cluster can be reused.
# @!attribute [rw] config
# @return [::Google::Cloud::Dataproc::V1::ClusterConfig]
# Optional. The cluster config for a cluster of Compute Engine Instances.
# Note that Dataproc may set default values, and values may change
# when clusters are updated.
#
# Exactly one of ClusterConfig or VirtualClusterConfig must be specified.
# @!attribute [rw] virtual_cluster_config
# @return [::Google::Cloud::Dataproc::V1::VirtualClusterConfig]
# Optional. The virtual cluster config is used when creating a Dataproc
# cluster that does not directly control the underlying compute resources,
# for example, when creating a [Dataproc-on-GKE
# cluster](https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview).
# Dataproc may set default values, and values may change when
# clusters are updated. Exactly one of
# {::Google::Cloud::Dataproc::V1::Cluster#config config} or
# {::Google::Cloud::Dataproc::V1::Cluster#virtual_cluster_config virtual_cluster_config}
# must be specified.
# @!attribute [rw] labels
# @return [::Google::Protobuf::Map{::String => ::String}]
# Optional. The labels to associate with this cluster.
# Label **keys** must contain 1 to 63 characters, and must conform to
# [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
# Label **values** may be empty, but, if present, must contain 1 to 63
# characters, and must conform to [RFC
# 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
# associated with a cluster.
# @!attribute [r] status
# @return [::Google::Cloud::Dataproc::V1::ClusterStatus]
# Output only. Cluster status.
# @!attribute [r] status_history
# @return [::Array<::Google::Cloud::Dataproc::V1::ClusterStatus>]
# Output only. The previous cluster status.
# @!attribute [r] cluster_uuid
# @return [::String]
# Output only. A cluster UUID (Unique Universal Identifier). Dataproc
# generates this value when it creates the cluster.
# @!attribute [r] metrics
# @return [::Google::Cloud::Dataproc::V1::ClusterMetrics]
# Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
#
# **Beta Feature**: This report is available for testing purposes only. It
# may be changed before final release.
class Cluster
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class LabelsEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# The cluster config.
# @!attribute [rw] config_bucket
# @return [::String]
# Optional. A Cloud Storage bucket used to stage job
# dependencies, config files, and job driver console output.
# If you do not specify a staging bucket, Cloud
# Dataproc will determine a Cloud Storage location (US,
# ASIA, or EU) for your cluster's staging bucket according to the
# Compute Engine zone where your cluster is deployed, and then create
# and manage this project-level, per-location bucket (see
# [Dataproc staging and temp
# buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
# **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
# a Cloud Storage bucket.**
# @!attribute [rw] temp_bucket
# @return [::String]
# Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs
# data, such as Spark and MapReduce history files. If you do not specify a
# temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or
# EU) for your cluster's temp bucket according to the Compute Engine zone
# where your cluster is deployed, and then create and manage this
# project-level, per-location bucket. The default bucket has a TTL of 90
# days, but you can use any TTL (or none) if you specify a bucket (see
# [Dataproc staging and temp
# buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
# **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
# a Cloud Storage bucket.**
# @!attribute [rw] gce_cluster_config
# @return [::Google::Cloud::Dataproc::V1::GceClusterConfig]
# Optional. The shared Compute Engine config settings for
# all instances in a cluster.
# @!attribute [rw] master_config
# @return [::Google::Cloud::Dataproc::V1::InstanceGroupConfig]
# Optional. The Compute Engine config settings for
# the cluster's master instance.
# @!attribute [rw] worker_config
# @return [::Google::Cloud::Dataproc::V1::InstanceGroupConfig]
# Optional. The Compute Engine config settings for
# the cluster's worker instances.
# @!attribute [rw] secondary_worker_config
# @return [::Google::Cloud::Dataproc::V1::InstanceGroupConfig]
# Optional. The Compute Engine config settings for
# a cluster's secondary worker instances
# @!attribute [rw] software_config
# @return [::Google::Cloud::Dataproc::V1::SoftwareConfig]
# Optional. The config settings for cluster software.
# @!attribute [rw] initialization_actions
# @return [::Array<::Google::Cloud::Dataproc::V1::NodeInitializationAction>]
# Optional. Commands to execute on each node after config is
# completed. By default, executables are run on master and all worker nodes.
# You can test a node's `role` metadata to run an executable on
# a master or worker node, as shown below using `curl` (you can also use
# `wget`):
#
# ROLE=$(curl -H Metadata-Flavor:Google
# http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)
# if [[ "${ROLE}" == 'Master' ]]; then
# ... master specific actions ...
# else
# ... worker specific actions ...
# fi
# @!attribute [rw] encryption_config
# @return [::Google::Cloud::Dataproc::V1::EncryptionConfig]
# Optional. Encryption settings for the cluster.
# @!attribute [rw] autoscaling_config
# @return [::Google::Cloud::Dataproc::V1::AutoscalingConfig]
# Optional. Autoscaling config for the policy associated with the cluster.
# Cluster does not autoscale if this field is unset.
# @!attribute [rw] security_config
# @return [::Google::Cloud::Dataproc::V1::SecurityConfig]
# Optional. Security settings for the cluster.
# @!attribute [rw] lifecycle_config
# @return [::Google::Cloud::Dataproc::V1::LifecycleConfig]
# Optional. Lifecycle setting for the cluster.
# @!attribute [rw] endpoint_config
# @return [::Google::Cloud::Dataproc::V1::EndpointConfig]
# Optional. Port/endpoint configuration for this cluster
# @!attribute [rw] metastore_config
# @return [::Google::Cloud::Dataproc::V1::MetastoreConfig]
# Optional. Metastore configuration.
# @!attribute [rw] dataproc_metric_config
# @return [::Google::Cloud::Dataproc::V1::DataprocMetricConfig]
# Optional. The config for Dataproc metrics.
# @!attribute [rw] auxiliary_node_groups
# @return [::Array<::Google::Cloud::Dataproc::V1::AuxiliaryNodeGroup>]
# Optional. The node group settings.
class ClusterConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# The Dataproc cluster config for a cluster that does not directly control the
# underlying compute resources, such as a [Dataproc-on-GKE
# cluster](https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview).
# @!attribute [rw] staging_bucket
# @return [::String]
# Optional. A Cloud Storage bucket used to stage job
# dependencies, config files, and job driver console output.
# If you do not specify a staging bucket, Cloud
# Dataproc will determine a Cloud Storage location (US,
# ASIA, or EU) for your cluster's staging bucket according to the
# Compute Engine zone where your cluster is deployed, and then create
# and manage this project-level, per-location bucket (see
# [Dataproc staging and temp
# buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
# **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
# a Cloud Storage bucket.**
# @!attribute [rw] kubernetes_cluster_config
# @return [::Google::Cloud::Dataproc::V1::KubernetesClusterConfig]
# Required. The configuration for running the Dataproc cluster on
# Kubernetes.
# @!attribute [rw] auxiliary_services_config
# @return [::Google::Cloud::Dataproc::V1::AuxiliaryServicesConfig]
# Optional. Configuration of auxiliary services used by this cluster.
class VirtualClusterConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Auxiliary services configuration for a Cluster.
# @!attribute [rw] metastore_config
# @return [::Google::Cloud::Dataproc::V1::MetastoreConfig]
# Optional. The Hive Metastore configuration for this workload.
# @!attribute [rw] spark_history_server_config
# @return [::Google::Cloud::Dataproc::V1::SparkHistoryServerConfig]
# Optional. The Spark History Server configuration for the workload.
class AuxiliaryServicesConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Endpoint config for this cluster
# @!attribute [r] http_ports
# @return [::Google::Protobuf::Map{::String => ::String}]
# Output only. The map of port descriptions to URLs. Will only be populated
# if enable_http_port_access is true.
# @!attribute [rw] enable_http_port_access
# @return [::Boolean]
# Optional. If true, enable http access to specific ports on the cluster
# from external sources. Defaults to false.
class EndpointConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class HttpPortsEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# Autoscaling Policy config associated with the cluster.
# @!attribute [rw] policy_uri
# @return [::String]
# Optional. The autoscaling policy used by the cluster.
#
# Only resource names including projectid and location (region) are valid.
# Examples:
#
# * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`
# * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`
#
# Note that the policy must be in the same project and Dataproc region.
class AutoscalingConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Encryption settings for the cluster.
# @!attribute [rw] gce_pd_kms_key_name
# @return [::String]
# Optional. The Cloud KMS key name to use for PD disk encryption for all
# instances in the cluster.
class EncryptionConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Common config settings for resources of Compute Engine cluster
# instances, applicable to all instances in the cluster.
# @!attribute [rw] zone_uri
# @return [::String]
# Optional. The Compute Engine zone where the Dataproc cluster will be
# located. If omitted, the service will pick a zone in the cluster's Compute
# Engine region. On a get request, zone will always be present.
#
# A full URL, partial URI, or short name are valid. Examples:
#
# * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`
# * `projects/[project_id]/zones/[zone]`
# * `[zone]`
# @!attribute [rw] network_uri
# @return [::String]
# Optional. The Compute Engine network to be used for machine
# communications. Cannot be specified with subnetwork_uri. If neither
# `network_uri` nor `subnetwork_uri` is specified, the "default" network of
# the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
# [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks) for
# more information).
#
# A full URL, partial URI, or short name are valid. Examples:
#
# * `https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default`
# * `projects/[project_id]/global/networks/default`
# * `default`
# @!attribute [rw] subnetwork_uri
# @return [::String]
# Optional. The Compute Engine subnetwork to be used for machine
# communications. Cannot be specified with network_uri.
#
# A full URL, partial URI, or short name are valid. Examples:
#
# * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0`
# * `projects/[project_id]/regions/[region]/subnetworks/sub0`
# * `sub0`
# @!attribute [rw] internal_ip_only
# @return [::Boolean]
# Optional. If true, all instances in the cluster will only have internal IP
# addresses. By default, clusters are not restricted to internal IP
# addresses, and will have ephemeral external IP addresses assigned to each
# instance. This `internal_ip_only` restriction can only be enabled for
# subnetwork enabled networks, and all off-cluster dependencies must be
# configured to be accessible without external IP addresses.
# @!attribute [rw] private_ipv6_google_access
# @return [::Google::Cloud::Dataproc::V1::GceClusterConfig::PrivateIpv6GoogleAccess]
# Optional. The type of IPv6 access for a cluster.
# @!attribute [rw] service_account
# @return [::String]
# Optional. The [Dataproc service
# account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc)
# (also see [VM Data Plane
# identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity))
# used by Dataproc cluster VM instances to access Google Cloud Platform
# services.
#
# If not specified, the
# [Compute Engine default service
# account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account)
# is used.
# @!attribute [rw] service_account_scopes
# @return [::Array<::String>]
# Optional. The URIs of service account scopes to be included in
# Compute Engine instances. The following base set of scopes is always
# included:
#
# * https://www.googleapis.com/auth/cloud.useraccounts.readonly
# * https://www.googleapis.com/auth/devstorage.read_write
# * https://www.googleapis.com/auth/logging.write
#
# If no scopes are specified, the following defaults are also provided:
#
# * https://www.googleapis.com/auth/bigquery
# * https://www.googleapis.com/auth/bigtable.admin.table
# * https://www.googleapis.com/auth/bigtable.data
# * https://www.googleapis.com/auth/devstorage.full_control
# @!attribute [rw] tags
# @return [::Array<::String>]
# The Compute Engine tags to add to all instances (see [Tagging
# instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).
# @!attribute [rw] metadata
# @return [::Google::Protobuf::Map{::String => ::String}]
# Optional. The Compute Engine metadata entries to add to all instances (see
# [Project and instance
# metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
# @!attribute [rw] reservation_affinity
# @return [::Google::Cloud::Dataproc::V1::ReservationAffinity]
# Optional. Reservation Affinity for consuming Zonal reservation.
# @!attribute [rw] node_group_affinity
# @return [::Google::Cloud::Dataproc::V1::NodeGroupAffinity]
# Optional. Node Group Affinity for sole-tenant clusters.
# @!attribute [rw] shielded_instance_config
# @return [::Google::Cloud::Dataproc::V1::ShieldedInstanceConfig]
# Optional. Shielded Instance Config for clusters using [Compute Engine
# Shielded
# VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
# @!attribute [rw] confidential_instance_config
# @return [::Google::Cloud::Dataproc::V1::ConfidentialInstanceConfig]
# Optional. Confidential Instance Config for clusters using [Confidential
# VMs](https://cloud.google.com/compute/confidential-vm/docs).
class GceClusterConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class MetadataEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# `PrivateIpv6GoogleAccess` controls whether and how Dataproc cluster nodes
# can communicate with Google Services through gRPC over IPv6.
# These values are directly mapped to corresponding values in the
# [Compute Engine Instance
# fields](https://cloud.google.com/compute/docs/reference/rest/v1/instances).
module PrivateIpv6GoogleAccess
# If unspecified, Compute Engine default behavior will apply, which
# is the same as
# {::Google::Cloud::Dataproc::V1::GceClusterConfig::PrivateIpv6GoogleAccess::INHERIT_FROM_SUBNETWORK INHERIT_FROM_SUBNETWORK}.
PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED = 0
# Private access to and from Google Services configuration
# inherited from the subnetwork configuration. This is the
# default Compute Engine behavior.
INHERIT_FROM_SUBNETWORK = 1
# Enables outbound private IPv6 access to Google Services from the Dataproc
# cluster.
OUTBOUND = 2
# Enables bidirectional private IPv6 access between Google Services and the
# Dataproc cluster.
BIDIRECTIONAL = 3
end
end
# Node Group Affinity for clusters using sole-tenant node groups.
# **The Dataproc `NodeGroupAffinity` resource is not related to the
# Dataproc {::Google::Cloud::Dataproc::V1::NodeGroup NodeGroup} resource.**
# @!attribute [rw] node_group_uri
# @return [::String]
# Required. The URI of a
# sole-tenant [node group
# resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)
# that the cluster will be created on.
#
# A full URL, partial URI, or node group name are valid. Examples:
#
# * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1`
# * `projects/[project_id]/zones/[zone]/nodeGroups/node-group-1`
# * `node-group-1`
class NodeGroupAffinity
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Shielded Instance Config for clusters using [Compute Engine Shielded
# VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
# @!attribute [rw] enable_secure_boot
# @return [::Boolean]
# Optional. Defines whether instances have Secure Boot enabled.
# @!attribute [rw] enable_vtpm
# @return [::Boolean]
# Optional. Defines whether instances have the vTPM enabled.
# @!attribute [rw] enable_integrity_monitoring
# @return [::Boolean]
# Optional. Defines whether instances have integrity monitoring enabled.
class ShieldedInstanceConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Confidential Instance Config for clusters using [Confidential
# VMs](https://cloud.google.com/compute/confidential-vm/docs)
# @!attribute [rw] enable_confidential_compute
# @return [::Boolean]
# Optional. Defines whether the instance should have confidential compute
# enabled.
class ConfidentialInstanceConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# The config settings for Compute Engine resources in
# an instance group, such as a master or worker group.
# @!attribute [rw] num_instances
# @return [::Integer]
# Optional. The number of VM instances in the instance group.
# For [HA
# cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)
# [master_config](#FIELDS.master_config) groups, **must be set to 3**.
# For standard cluster [master_config](#FIELDS.master_config) groups,
# **must be set to 1**.
# @!attribute [r] instance_names
# @return [::Array<::String>]
# Output only. The list of instance names. Dataproc derives the names
# from `cluster_name`, `num_instances`, and the instance group.
# @!attribute [r] instance_references
# @return [::Array<::Google::Cloud::Dataproc::V1::InstanceReference>]
# Output only. List of references to Compute Engine instances.
# @!attribute [rw] image_uri
# @return [::String]
# Optional. The Compute Engine image resource used for cluster instances.
#
# The URI can represent an image or image family.
#
# Image examples:
#
# * `https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id]`
# * `projects/[project_id]/global/images/[image-id]`
# * `image-id`
#
# Image family examples. Dataproc will use the most recent
# image from the family:
#
# * `https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name]`
# * `projects/[project_id]/global/images/family/[custom-image-family-name]`
#
# If the URI is unspecified, it will be inferred from
# `SoftwareConfig.image_version` or the system default.
# @!attribute [rw] machine_type_uri
# @return [::String]
# Optional. The Compute Engine machine type used for cluster instances.
#
# A full URL, partial URI, or short name are valid. Examples:
#
# * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2`
# * `projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2`
# * `n1-standard-2`
#
# **Auto Zone Exception**: If you are using the Dataproc
# [Auto Zone
# Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
# feature, you must use the short name of the machine type
# resource, for example, `n1-standard-2`.
# @!attribute [rw] disk_config
# @return [::Google::Cloud::Dataproc::V1::DiskConfig]
# Optional. Disk option config settings.
# @!attribute [r] is_preemptible
# @return [::Boolean]
# Output only. Specifies that this instance group contains preemptible
# instances.
# @!attribute [rw] preemptibility
# @return [::Google::Cloud::Dataproc::V1::InstanceGroupConfig::Preemptibility]
# Optional. Specifies the preemptibility of the instance group.
#
# The default value for master and worker groups is
# `NON_PREEMPTIBLE`. This default cannot be changed.
#
# The default value for secondary instances is
# `PREEMPTIBLE`.
# @!attribute [r] managed_group_config
# @return [::Google::Cloud::Dataproc::V1::ManagedGroupConfig]
# Output only. The config for Compute Engine Instance Group
# Manager that manages this group.
# This is only used for preemptible instance groups.
# @!attribute [rw] accelerators
# @return [::Array<::Google::Cloud::Dataproc::V1::AcceleratorConfig>]
# Optional. The Compute Engine accelerator configuration for these
# instances.
# @!attribute [rw] min_cpu_platform
# @return [::String]
# Optional. Specifies the minimum cpu platform for the Instance Group.
# See [Dataproc -> Minimum CPU
# Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
# @!attribute [rw] min_num_instances
# @return [::Integer]
# Optional. The minimum number of primary worker instances to create.
# If `min_num_instances` is set, cluster creation will succeed if
# the number of primary workers created is at least equal to the
# `min_num_instances` number.
#
# Example: Cluster creation request with `num_instances` = `5` and
# `min_num_instances` = `3`:
#
# * If 4 VMs are created and 1 instance fails,
# the failed VM is deleted. The cluster is
# resized to 4 instances and placed in a `RUNNING` state.
# * If 2 instances are created and 3 instances fail,
# the cluster in placed in an `ERROR` state. The failed VMs
# are not deleted.
# @!attribute [rw] instance_flexibility_policy
# @return [::Google::Cloud::Dataproc::V1::InstanceFlexibilityPolicy]
# Optional. Instance flexibility Policy allowing a mixture of VM shapes and
# provisioning models.
# @!attribute [rw] startup_config
# @return [::Google::Cloud::Dataproc::V1::StartupConfig]
# Optional. Configuration to handle the startup of instances during cluster
# create and update process.
class InstanceGroupConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# Controls the use of preemptible instances within the group.
module Preemptibility
# Preemptibility is unspecified, the system will choose the
# appropriate setting for each instance group.
PREEMPTIBILITY_UNSPECIFIED = 0
# Instances are non-preemptible.
#
# This option is allowed for all instance groups and is the only valid
# value for Master and Worker instance groups.
NON_PREEMPTIBLE = 1
# Instances are [preemptible]
# (https://cloud.google.com/compute/docs/instances/preemptible).
#
# This option is allowed only for [secondary worker]
# (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms)
# groups.
PREEMPTIBLE = 2
# Instances are [Spot VMs]
# (https://cloud.google.com/compute/docs/instances/spot).
#
# This option is allowed only for [secondary worker]
# (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms)
# groups. Spot VMs are the latest version of [preemptible VMs]
# (https://cloud.google.com/compute/docs/instances/preemptible), and
# provide additional features.
SPOT = 3
end
end
# Configuration to handle the startup of instances during cluster create and
# update process.
# @!attribute [rw] required_registration_fraction
# @return [::Float]
# Optional. The config setting to enable cluster creation/ updation to be
# successful only after required_registration_fraction of instances are up
# and running. This configuration is applicable to only secondary workers for
# now. The cluster will fail if required_registration_fraction of instances
# are not available. This will include instance creation, agent registration,
# and service registration (if enabled).
class StartupConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# A reference to a Compute Engine instance.
# @!attribute [rw] instance_name
# @return [::String]
# The user-friendly name of the Compute Engine instance.
# @!attribute [rw] instance_id
# @return [::String]
# The unique identifier of the Compute Engine instance.
# @!attribute [rw] public_key
# @return [::String]
# The public RSA key used for sharing data with this instance.
# @!attribute [rw] public_ecies_key
# @return [::String]
# The public ECIES key used for sharing data with this instance.
class InstanceReference
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Specifies the resources used to actively manage an instance group.
# @!attribute [r] instance_template_name
# @return [::String]
# Output only. The name of the Instance Template used for the Managed
# Instance Group.
# @!attribute [r] instance_group_manager_name
# @return [::String]
# Output only. The name of the Instance Group Manager for this group.
# @!attribute [r] instance_group_manager_uri
# @return [::String]
# Output only. The partial URI to the instance group manager for this group.
# E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm.
class ManagedGroupConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Instance flexibility Policy allowing a mixture of VM shapes and provisioning
# models.
# @!attribute [rw] instance_selection_list
# @return [::Array<::Google::Cloud::Dataproc::V1::InstanceFlexibilityPolicy::InstanceSelection>]
# Optional. List of instance selection options that the group will use when
# creating new VMs.
# @!attribute [r] instance_selection_results
# @return [::Array<::Google::Cloud::Dataproc::V1::InstanceFlexibilityPolicy::InstanceSelectionResult>]
# Output only. A list of instance selection results in the group.
class InstanceFlexibilityPolicy
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# Defines machines types and a rank to which the machines types belong.
# @!attribute [rw] machine_types
# @return [::Array<::String>]
# Optional. Full machine-type names, e.g. "n1-standard-16".
# @!attribute [rw] rank
# @return [::Integer]
# Optional. Preference of this instance selection. Lower number means
# higher preference. Dataproc will first try to create a VM based on the
# machine-type with priority rank and fallback to next rank based on
# availability. Machine types and instance selections with the same
# priority have the same preference.
class InstanceSelection
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Defines a mapping from machine types to the number of VMs that are created
# with each machine type.
# @!attribute [r] machine_type
# @return [::String]
# Output only. Full machine-type names, e.g. "n1-standard-16".
# @!attribute [r] vm_count
# @return [::Integer]
# Output only. Number of VM provisioned with the machine_type.
class InstanceSelectionResult
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# Specifies the type and number of accelerator cards attached to the instances
# of an instance. See [GPUs on Compute
# Engine](https://cloud.google.com/compute/docs/gpus/).
# @!attribute [rw] accelerator_type_uri
# @return [::String]
# Full URL, partial URI, or short name of the accelerator type resource to
# expose to this instance. See
# [Compute Engine
# AcceleratorTypes](https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).
#
# Examples:
#
# * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-k80`
# * `projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-k80`
# * `nvidia-tesla-k80`
#
# **Auto Zone Exception**: If you are using the Dataproc
# [Auto Zone
# Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
# feature, you must use the short name of the accelerator type
# resource, for example, `nvidia-tesla-k80`.
# @!attribute [rw] accelerator_count
# @return [::Integer]
# The number of the accelerator cards of this type exposed to this instance.
class AcceleratorConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Specifies the config of disk options for a group of VM instances.
# @!attribute [rw] boot_disk_type
# @return [::String]
# Optional. Type of the boot disk (default is "pd-standard").
# Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive),
# "pd-ssd" (Persistent Disk Solid State Drive),
# or "pd-standard" (Persistent Disk Hard Disk Drive).
# See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).
# @!attribute [rw] boot_disk_size_gb
# @return [::Integer]
# Optional. Size in GB of the boot disk (default is 500GB).
# @!attribute [rw] num_local_ssds
# @return [::Integer]
# Optional. Number of attached SSDs, from 0 to 8 (default is 0).
# If SSDs are not attached, the boot disk is used to store runtime logs and
# [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data.
# If one or more SSDs are attached, this runtime bulk
# data is spread across them, and the boot disk contains only basic
# config and installed binaries.
#
# Note: Local SSD options may vary by machine type and number of vCPUs
# selected.
# @!attribute [rw] local_ssd_interface
# @return [::String]
# Optional. Interface type of local SSDs (default is "scsi").
# Valid values: "scsi" (Small Computer System Interface),
# "nvme" (Non-Volatile Memory Express).
# See [local SSD
# performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
class DiskConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Node group identification and configuration information.
# @!attribute [rw] node_group
# @return [::Google::Cloud::Dataproc::V1::NodeGroup]
# Required. Node group configuration.
# @!attribute [rw] node_group_id
# @return [::String]
# Optional. A node group ID. Generated if not specified.
#
# The ID must contain only letters (a-z, A-Z), numbers (0-9),
# underscores (_), and hyphens (-). Cannot begin or end with underscore
# or hyphen. Must consist of from 3 to 33 characters.
class AuxiliaryNodeGroup
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Dataproc Node Group.
# **The Dataproc `NodeGroup` resource is not related to the
# Dataproc {::Google::Cloud::Dataproc::V1::NodeGroupAffinity NodeGroupAffinity}
# resource.**
# @!attribute [rw] name
# @return [::String]
# The Node group [resource name](https://aip.dev/122).
# @!attribute [rw] roles
# @return [::Array<::Google::Cloud::Dataproc::V1::NodeGroup::Role>]
# Required. Node group roles.
# @!attribute [rw] node_group_config
# @return [::Google::Cloud::Dataproc::V1::InstanceGroupConfig]
# Optional. The node group instance group configuration.
# @!attribute [rw] labels
# @return [::Google::Protobuf::Map{::String => ::String}]
# Optional. Node group labels.
#
# * Label **keys** must consist of from 1 to 63 characters and conform to
# [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
# * Label **values** can be empty. If specified, they must consist of from
# 1 to 63 characters and conform to [RFC 1035]
# (https://www.ietf.org/rfc/rfc1035.txt).
# * The node group must have no more than 32 labels.
class NodeGroup
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class LabelsEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Node pool roles.
module Role
# Required unspecified role.
ROLE_UNSPECIFIED = 0
# Job drivers run on the node pool.
DRIVER = 1
end
end
# Specifies an executable to run on a fully configured node and a
# timeout period for executable completion.
# @!attribute [rw] executable_file
# @return [::String]
# Required. Cloud Storage URI of executable file.
# @!attribute [rw] execution_timeout
# @return [::Google::Protobuf::Duration]
# Optional. Amount of time executable has to complete. Default is
# 10 minutes (see JSON representation of
# [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
#
# Cluster creation fails with an explanatory error message (the
# name of the executable that caused the error and the exceeded timeout
# period) if the executable is not completed at end of the timeout period.
class NodeInitializationAction
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# The status of a cluster and its instances.
# @!attribute [r] state
# @return [::Google::Cloud::Dataproc::V1::ClusterStatus::State]
# Output only. The cluster's state.
# @!attribute [r] detail
# @return [::String]
# Optional. Output only. Details of cluster's state.
# @!attribute [r] state_start_time
# @return [::Google::Protobuf::Timestamp]
# Output only. Time when this state was entered (see JSON representation of
# [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
# @!attribute [r] substate
# @return [::Google::Cloud::Dataproc::V1::ClusterStatus::Substate]
# Output only. Additional state information that includes
# status reported by the agent.
class ClusterStatus
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# The cluster state.
module State
# The cluster state is unknown.
UNKNOWN = 0
# The cluster is being created and set up. It is not ready for use.
CREATING = 1
# The cluster is currently running and healthy. It is ready for use.
#
# **Note:** The cluster state changes from "creating" to "running" status
# after the master node(s), first two primary worker nodes (and the last
# primary worker node if primary workers > 2) are running.
RUNNING = 2
# The cluster encountered an error. It is not ready for use.
ERROR = 3
# The cluster has encountered an error while being updated. Jobs can
# be submitted to the cluster, but the cluster cannot be updated.
ERROR_DUE_TO_UPDATE = 9
# The cluster is being deleted. It cannot be used.
DELETING = 4
# The cluster is being updated. It continues to accept and process jobs.
UPDATING = 5
# The cluster is being stopped. It cannot be used.
STOPPING = 6
# The cluster is currently stopped. It is not ready for use.
STOPPED = 7
# The cluster is being started. It is not ready for use.
STARTING = 8
# The cluster is being repaired. It is not ready for use.
REPAIRING = 10
end
# The cluster substate.
module Substate
# The cluster substate is unknown.
UNSPECIFIED = 0
# The cluster is known to be in an unhealthy state
# (for example, critical daemons are not running or HDFS capacity is
# exhausted).
#
# Applies to RUNNING state.
UNHEALTHY = 1
# The agent-reported status is out of date (may occur if
# Dataproc loses communication with Agent).
#
# Applies to RUNNING state.
STALE_STATUS = 2
end
end
# Security related configuration, including encryption, Kerberos, etc.
# @!attribute [rw] kerberos_config
# @return [::Google::Cloud::Dataproc::V1::KerberosConfig]
# Optional. Kerberos related configuration.
# @!attribute [rw] identity_config
# @return [::Google::Cloud::Dataproc::V1::IdentityConfig]
# Optional. Identity related configuration, including service account based
# secure multi-tenancy user mappings.
class SecurityConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Specifies Kerberos related configuration.
# @!attribute [rw] enable_kerberos
# @return [::Boolean]
# Optional. Flag to indicate whether to Kerberize the cluster (default:
# false). Set this field to true to enable Kerberos on a cluster.
# @!attribute [rw] root_principal_password_uri
# @return [::String]
# Optional. The Cloud Storage URI of a KMS encrypted file containing the root
# principal password.
# @!attribute [rw] kms_key_uri
# @return [::String]
# Optional. The uri of the KMS key used to encrypt various sensitive
# files.
# @!attribute [rw] keystore_uri
# @return [::String]
# Optional. The Cloud Storage URI of the keystore file used for SSL
# encryption. If not provided, Dataproc will provide a self-signed
# certificate.
# @!attribute [rw] truststore_uri
# @return [::String]
# Optional. The Cloud Storage URI of the truststore file used for SSL
# encryption. If not provided, Dataproc will provide a self-signed
# certificate.
# @!attribute [rw] keystore_password_uri
# @return [::String]
# Optional. The Cloud Storage URI of a KMS encrypted file containing the
# password to the user provided keystore. For the self-signed certificate,
# this password is generated by Dataproc.
# @!attribute [rw] key_password_uri
# @return [::String]
# Optional. The Cloud Storage URI of a KMS encrypted file containing the
# password to the user provided key. For the self-signed certificate, this
# password is generated by Dataproc.
# @!attribute [rw] truststore_password_uri
# @return [::String]
# Optional. The Cloud Storage URI of a KMS encrypted file containing the
# password to the user provided truststore. For the self-signed certificate,
# this password is generated by Dataproc.
# @!attribute [rw] cross_realm_trust_realm
# @return [::String]
# Optional. The remote realm the Dataproc on-cluster KDC will trust, should
# the user enable cross realm trust.
# @!attribute [rw] cross_realm_trust_kdc
# @return [::String]
# Optional. The KDC (IP or hostname) for the remote trusted realm in a cross
# realm trust relationship.
# @!attribute [rw] cross_realm_trust_admin_server
# @return [::String]
# Optional. The admin server (IP or hostname) for the remote trusted realm in
# a cross realm trust relationship.
# @!attribute [rw] cross_realm_trust_shared_password_uri
# @return [::String]
# Optional. The Cloud Storage URI of a KMS encrypted file containing the
# shared password between the on-cluster Kerberos realm and the remote
# trusted realm, in a cross realm trust relationship.
# @!attribute [rw] kdc_db_key_uri
# @return [::String]
# Optional. The Cloud Storage URI of a KMS encrypted file containing the
# master key of the KDC database.
# @!attribute [rw] tgt_lifetime_hours
# @return [::Integer]
# Optional. The lifetime of the ticket granting ticket, in hours.
# If not specified, or user specifies 0, then default value 10
# will be used.
# @!attribute [rw] realm
# @return [::String]
# Optional. The name of the on-cluster Kerberos realm.
# If not specified, the uppercased domain of hostnames will be the realm.
class KerberosConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Identity related configuration, including service account based
# secure multi-tenancy user mappings.
# @!attribute [rw] user_service_account_mapping
# @return [::Google::Protobuf::Map{::String => ::String}]
# Required. Map of user to service account.
class IdentityConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class UserServiceAccountMappingEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# Specifies the selection and config of software inside the cluster.
# @!attribute [rw] image_version
# @return [::String]
# Optional. The version of software inside the cluster. It must be one of the
# supported [Dataproc
# Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions),
# such as "1.2" (including a subminor version, such as "1.2.29"), or the
# ["preview"
# version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
# If unspecified, it defaults to the latest Debian version.
# @!attribute [rw] properties
# @return [::Google::Protobuf::Map{::String => ::String}]
# Optional. The properties to set on daemon config files.
#
# Property keys are specified in `prefix:property` format, for example
# `core:hadoop.tmp.dir`. The following are supported prefixes
# and their mappings:
#
# * capacity-scheduler: `capacity-scheduler.xml`
# * core: `core-site.xml`
# * distcp: `distcp-default.xml`
# * hdfs: `hdfs-site.xml`
# * hive: `hive-site.xml`
# * mapred: `mapred-site.xml`
# * pig: `pig.properties`
# * spark: `spark-defaults.conf`
# * yarn: `yarn-site.xml`
#
# For more information, see [Cluster
# properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
# @!attribute [rw] optional_components
# @return [::Array<::Google::Cloud::Dataproc::V1::Component>]
# Optional. The set of components to activate on the cluster.
class SoftwareConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class PropertiesEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# Specifies the cluster auto-delete schedule configuration.
# @!attribute [rw] idle_delete_ttl
# @return [::Google::Protobuf::Duration]
# Optional. The duration to keep the cluster alive while idling (when no jobs
# are running). Passing this threshold will cause the cluster to be
# deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
# representation of
# [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
# @!attribute [rw] auto_delete_time
# @return [::Google::Protobuf::Timestamp]
# Optional. The time when cluster will be auto-deleted (see JSON
# representation of
# [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
# @!attribute [rw] auto_delete_ttl
# @return [::Google::Protobuf::Duration]
# Optional. The lifetime duration of cluster. The cluster will be
# auto-deleted at the end of this period. Minimum value is 10 minutes;
# maximum value is 14 days (see JSON representation of
# [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
# @!attribute [r] idle_start_time
# @return [::Google::Protobuf::Timestamp]
# Output only. The time when cluster became idle (most recent job finished)
# and became eligible for deletion due to idleness (see JSON representation
# of
# [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
class LifecycleConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Specifies a Metastore configuration.
# @!attribute [rw] dataproc_metastore_service
# @return [::String]
# Required. Resource name of an existing Dataproc Metastore service.
#
# Example:
#
# * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`
class MetastoreConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Contains cluster daemon metrics, such as HDFS and YARN stats.
#
# **Beta Feature**: This report is available for testing purposes only. It may
# be changed before final release.
# @!attribute [rw] hdfs_metrics
# @return [::Google::Protobuf::Map{::String => ::Integer}]
# The HDFS metrics.
# @!attribute [rw] yarn_metrics
# @return [::Google::Protobuf::Map{::String => ::Integer}]
# YARN metrics.
class ClusterMetrics
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::Integer]
class HdfsMetricsEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::Integer]
class YarnMetricsEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# Dataproc metric config.
# @!attribute [rw] metrics
# @return [::Array<::Google::Cloud::Dataproc::V1::DataprocMetricConfig::Metric>]
# Required. Metrics sources to enable.
class DataprocMetricConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# A Dataproc custom metric.
# @!attribute [rw] metric_source
# @return [::Google::Cloud::Dataproc::V1::DataprocMetricConfig::MetricSource]
# Required. A standard set of metrics is collected unless `metricOverrides`
# are specified for the metric source (see [Custom metrics]
# (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics)
# for more information).
# @!attribute [rw] metric_overrides
# @return [::Array<::String>]
# Optional. Specify one or more [Custom metrics]
# (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics)
# to collect for the metric course (for the `SPARK` metric source (any
# [Spark metric]
# (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be
# specified).
#
# Provide metrics in the following format:
# METRIC_SOURCE:INSTANCE:GROUP:METRIC
# Use camelcase as appropriate.
#
# Examples:
#
# ```
# yarn:ResourceManager:QueueMetrics:AppsCompleted
# spark:driver:DAGScheduler:job.allJobs
# sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed
# hiveserver2:JVM:Memory:NonHeapMemoryUsage.used
# ```
#
# Notes:
#
# * Only the specified overridden metrics are collected for the
# metric source. For example, if one or more `spark:executive` metrics
# are listed as metric overrides, other `SPARK` metrics are not
# collected. The collection of the metrics for other enabled custom
# metric sources is unaffected. For example, if both `SPARK` andd `YARN`
# metric sources are enabled, and overrides are provided for Spark
# metrics only, all YARN metrics are collected.
class Metric
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# A source for the collection of Dataproc custom metrics (see [Custom
# metrics]
# (https://cloud.google.com//dataproc/docs/guides/dataproc-metrics#custom_metrics)).
module MetricSource
# Required unspecified metric source.
METRIC_SOURCE_UNSPECIFIED = 0
# Monitoring agent metrics. If this source is enabled,
# Dataproc enables the monitoring agent in Compute Engine,
# and collects monitoring agent metrics, which are published
# with an `agent.googleapis.com` prefix.
MONITORING_AGENT_DEFAULTS = 1
# HDFS metric source.
HDFS = 2
# Spark metric source.
SPARK = 3
# YARN metric source.
YARN = 4
# Spark History Server metric source.
SPARK_HISTORY_SERVER = 5
# Hiveserver2 metric source.
HIVESERVER2 = 6
# hivemetastore metric source
HIVEMETASTORE = 7
end
end
# A request to create a cluster.
# @!attribute [rw] project_id
# @return [::String]
# Required. The ID of the Google Cloud Platform project that the cluster
# belongs to.
# @!attribute [rw] region
# @return [::String]
# Required. The Dataproc region in which to handle the request.
# @!attribute [rw] cluster
# @return [::Google::Cloud::Dataproc::V1::Cluster]
# Required. The cluster to create.
# @!attribute [rw] request_id
# @return [::String]
# Optional. A unique ID used to identify the request. If the server receives
# two
# [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s
# with the same id, then the second request will be ignored and the
# first {::Google::Longrunning::Operation google.longrunning.Operation} created
# and stored in the backend is returned.
#
# It is recommended to always set this value to a
# [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
#
# The ID must contain only letters (a-z, A-Z), numbers (0-9),
# underscores (_), and hyphens (-). The maximum length is 40 characters.
# @!attribute [rw] action_on_failed_primary_workers
# @return [::Google::Cloud::Dataproc::V1::FailureAction]
# Optional. Failure action when primary worker creation fails.
class CreateClusterRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# A request to update a cluster.
# @!attribute [rw] project_id
# @return [::String]
# Required. The ID of the Google Cloud Platform project the
# cluster belongs to.
# @!attribute [rw] region
# @return [::String]
# Required. The Dataproc region in which to handle the request.
# @!attribute [rw] cluster_name
# @return [::String]
# Required. The cluster name.
# @!attribute [rw] cluster
# @return [::Google::Cloud::Dataproc::V1::Cluster]
# Required. The changes to the cluster.
# @!attribute [rw] graceful_decommission_timeout
# @return [::Google::Protobuf::Duration]
# Optional. Timeout for graceful YARN decommissioning. Graceful
# decommissioning allows removing nodes from the cluster without
# interrupting jobs in progress. Timeout specifies how long to wait for jobs
# in progress to finish before forcefully removing nodes (and potentially
# interrupting jobs). Default timeout is 0 (for forceful decommission), and
# the maximum allowed timeout is 1 day. (see JSON representation of
# [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
#
# Only supported on Dataproc image versions 1.2 and higher.
# @!attribute [rw] update_mask
# @return [::Google::Protobuf::FieldMask]
# Required. Specifies the path, relative to `Cluster`, of
# the field to update. For example, to change the number of workers
# in a cluster to 5, the `update_mask` parameter would be
# specified as `config.worker_config.num_instances`,
# and the `PATCH` request body would specify the new value, as follows:
#
# {
# "config":{
# "workerConfig":{
# "numInstances":"5"
# }
# }
# }
# Similarly, to change the number of preemptible workers in a cluster to 5,
# the `update_mask` parameter would be
# `config.secondary_worker_config.num_instances`, and the `PATCH` request
# body would be set as follows:
#
# {
# "config":{
# "secondaryWorkerConfig":{
# "numInstances":"5"
# }
# }
# }
# Note: Currently, only the following fields can be updated:
#
#
Mask | #Purpose | #
labels | #Update labels | #
config.worker_config.num_instances | #Resize primary worker group | #
config.secondary_worker_config.num_instances | #Resize secondary worker group | #
config.autoscaling_config.policy_uri | Use, stop using, or # change autoscaling policies | #