# frozen_string_literal: true
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
module Google
module Cloud
module Dataproc
module V1
# The runtime logging config of the job.
# @!attribute [rw] driver_log_levels
# @return [::Google::Protobuf::Map{::String => ::Google::Cloud::Dataproc::V1::LoggingConfig::Level}]
# The per-package log levels for the driver. This may include
# "root" package name to configure rootLogger.
# Examples:
# 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
class LoggingConfig
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::Google::Cloud::Dataproc::V1::LoggingConfig::Level]
class DriverLogLevelsEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# The Log4j level for job execution. When running an
# [Apache Hive](https://hive.apache.org/) job, Cloud
# Dataproc configures the Hive client to an equivalent verbosity level.
module Level
# Level is unspecified. Use default level for log4j.
LEVEL_UNSPECIFIED = 0
# Use ALL level for log4j.
ALL = 1
# Use TRACE level for log4j.
TRACE = 2
# Use DEBUG level for log4j.
DEBUG = 3
# Use INFO level for log4j.
INFO = 4
# Use WARN level for log4j.
WARN = 5
# Use ERROR level for log4j.
ERROR = 6
# Use FATAL level for log4j.
FATAL = 7
# Turn off log4j.
OFF = 8
end
end
# A Dataproc job for running
# [Apache Hadoop
# MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
# jobs on [Apache Hadoop
# YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
# @!attribute [rw] main_jar_file_uri
# @return [::String]
# The HCFS URI of the jar file containing the main class.
# Examples:
# 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
# 'hdfs:/tmp/test-samples/custom-wordcount.jar'
# 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
# @!attribute [rw] main_class
# @return [::String]
# The name of the driver's main class. The jar file containing the class
# must be in the default CLASSPATH or specified in `jar_file_uris`.
# @!attribute [rw] args
# @return [::Array<::String>]
# Optional. The arguments to pass to the driver. Do not
# include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
# job properties, since a collision may occur that causes an incorrect job
# submission.
# @!attribute [rw] jar_file_uris
# @return [::Array<::String>]
# Optional. Jar file URIs to add to the CLASSPATHs of the
# Hadoop driver and tasks.
# @!attribute [rw] file_uris
# @return [::Array<::String>]
# Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
# to the working directory of Hadoop drivers and distributed tasks. Useful
# for naively parallel tasks.
# @!attribute [rw] archive_uris
# @return [::Array<::String>]
# Optional. HCFS URIs of archives to be extracted in the working directory of
# Hadoop drivers and tasks. Supported file types:
# .jar, .tar, .tar.gz, .tgz, or .zip.
# @!attribute [rw] properties
# @return [::Google::Protobuf::Map{::String => ::String}]
# Optional. A mapping of property names to values, used to configure Hadoop.
# Properties that conflict with values set by the Dataproc API may be
# overwritten. Can include properties set in /etc/hadoop/conf/*-site and
# classes in user code.
# @!attribute [rw] logging_config
# @return [::Google::Cloud::Dataproc::V1::LoggingConfig]
# Optional. The runtime log config for job execution.
class HadoopJob
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class PropertiesEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# A Dataproc job for running [Apache Spark](http://spark.apache.org/)
# applications on YARN.
# @!attribute [rw] main_jar_file_uri
# @return [::String]
# The HCFS URI of the jar file that contains the main class.
# @!attribute [rw] main_class
# @return [::String]
# The name of the driver's main class. The jar file that contains the class
# must be in the default CLASSPATH or specified in `jar_file_uris`.
# @!attribute [rw] args
# @return [::Array<::String>]
# Optional. The arguments to pass to the driver. Do not include arguments,
# such as `--conf`, that can be set as job properties, since a collision may
# occur that causes an incorrect job submission.
# @!attribute [rw] jar_file_uris
# @return [::Array<::String>]
# Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
# Spark driver and tasks.
# @!attribute [rw] file_uris
# @return [::Array<::String>]
# Optional. HCFS URIs of files to be placed in the working directory of
# each executor. Useful for naively parallel tasks.
# @!attribute [rw] archive_uris
# @return [::Array<::String>]
# Optional. HCFS URIs of archives to be extracted into the working directory
# of each executor. Supported file types:
# .jar, .tar, .tar.gz, .tgz, and .zip.
# @!attribute [rw] properties
# @return [::Google::Protobuf::Map{::String => ::String}]
# Optional. A mapping of property names to values, used to configure Spark.
# Properties that conflict with values set by the Dataproc API may be
# overwritten. Can include properties set in
# /etc/spark/conf/spark-defaults.conf and classes in user code.
# @!attribute [rw] logging_config
# @return [::Google::Cloud::Dataproc::V1::LoggingConfig]
# Optional. The runtime log config for job execution.
class SparkJob
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class PropertiesEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# A Dataproc job for running
# [Apache
# PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
# applications on YARN.
# @!attribute [rw] main_python_file_uri
# @return [::String]
# Required. The HCFS URI of the main Python file to use as the driver. Must
# be a .py file.
# @!attribute [rw] args
# @return [::Array<::String>]
# Optional. The arguments to pass to the driver. Do not include arguments,
# such as `--conf`, that can be set as job properties, since a collision may
# occur that causes an incorrect job submission.
# @!attribute [rw] python_file_uris
# @return [::Array<::String>]
# Optional. HCFS file URIs of Python files to pass to the PySpark
# framework. Supported file types: .py, .egg, and .zip.
# @!attribute [rw] jar_file_uris
# @return [::Array<::String>]
# Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
# Python driver and tasks.
# @!attribute [rw] file_uris
# @return [::Array<::String>]
# Optional. HCFS URIs of files to be placed in the working directory of
# each executor. Useful for naively parallel tasks.
# @!attribute [rw] archive_uris
# @return [::Array<::String>]
# Optional. HCFS URIs of archives to be extracted into the working directory
# of each executor. Supported file types:
# .jar, .tar, .tar.gz, .tgz, and .zip.
# @!attribute [rw] properties
# @return [::Google::Protobuf::Map{::String => ::String}]
# Optional. A mapping of property names to values, used to configure PySpark.
# Properties that conflict with values set by the Dataproc API may be
# overwritten. Can include properties set in
# /etc/spark/conf/spark-defaults.conf and classes in user code.
# @!attribute [rw] logging_config
# @return [::Google::Cloud::Dataproc::V1::LoggingConfig]
# Optional. The runtime log config for job execution.
class PySparkJob
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class PropertiesEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# A list of queries to run on a cluster.
# @!attribute [rw] queries
# @return [::Array<::String>]
# Required. The queries to execute. You do not need to end a query expression
# with a semicolon. Multiple queries can be specified in one
# string by separating each with a semicolon. Here is an example of a
# Dataproc API snippet that uses a QueryList to specify a HiveJob:
#
# "hiveJob": {
# "queryList": {
# "queries": [
# "query1",
# "query2",
# "query3;query4",
# ]
# }
# }
class QueryList
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# A Dataproc job for running [Apache Hive](https://hive.apache.org/)
# queries on YARN.
# @!attribute [rw] query_file_uri
# @return [::String]
# The HCFS URI of the script that contains Hive queries.
# @!attribute [rw] query_list
# @return [::Google::Cloud::Dataproc::V1::QueryList]
# A list of queries.
# @!attribute [rw] continue_on_failure
# @return [::Boolean]
# Optional. Whether to continue executing queries if a query fails.
# The default value is `false`. Setting to `true` can be useful when
# executing independent parallel queries.
# @!attribute [rw] script_variables
# @return [::Google::Protobuf::Map{::String => ::String}]
# Optional. Mapping of query variable names to values (equivalent to the
# Hive command: `SET name="value";`).
# @!attribute [rw] properties
# @return [::Google::Protobuf::Map{::String => ::String}]
# Optional. A mapping of property names and values, used to configure Hive.
# Properties that conflict with values set by the Dataproc API may be
# overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
# /etc/hive/conf/hive-site.xml, and classes in user code.
# @!attribute [rw] jar_file_uris
# @return [::Array<::String>]
# Optional. HCFS URIs of jar files to add to the CLASSPATH of the
# Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
# and UDFs.
class HiveJob
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class ScriptVariablesEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class PropertiesEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# A Dataproc job for running [Apache Spark
# SQL](http://spark.apache.org/sql/) queries.
# @!attribute [rw] query_file_uri
# @return [::String]
# The HCFS URI of the script that contains SQL queries.
# @!attribute [rw] query_list
# @return [::Google::Cloud::Dataproc::V1::QueryList]
# A list of queries.
# @!attribute [rw] script_variables
# @return [::Google::Protobuf::Map{::String => ::String}]
# Optional. Mapping of query variable names to values (equivalent to the
# Spark SQL command: SET `name="value";`).
# @!attribute [rw] properties
# @return [::Google::Protobuf::Map{::String => ::String}]
# Optional. A mapping of property names to values, used to configure
# Spark SQL's SparkConf. Properties that conflict with values set by the
# Dataproc API may be overwritten.
# @!attribute [rw] jar_file_uris
# @return [::Array<::String>]
# Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
# @!attribute [rw] logging_config
# @return [::Google::Cloud::Dataproc::V1::LoggingConfig]
# Optional. The runtime log config for job execution.
class SparkSqlJob
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class ScriptVariablesEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class PropertiesEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# A Dataproc job for running [Apache Pig](https://pig.apache.org/)
# queries on YARN.
# @!attribute [rw] query_file_uri
# @return [::String]
# The HCFS URI of the script that contains the Pig queries.
# @!attribute [rw] query_list
# @return [::Google::Cloud::Dataproc::V1::QueryList]
# A list of queries.
# @!attribute [rw] continue_on_failure
# @return [::Boolean]
# Optional. Whether to continue executing queries if a query fails.
# The default value is `false`. Setting to `true` can be useful when
# executing independent parallel queries.
# @!attribute [rw] script_variables
# @return [::Google::Protobuf::Map{::String => ::String}]
# Optional. Mapping of query variable names to values (equivalent to the Pig
# command: `name=[value]`).
# @!attribute [rw] properties
# @return [::Google::Protobuf::Map{::String => ::String}]
# Optional. A mapping of property names to values, used to configure Pig.
# Properties that conflict with values set by the Dataproc API may be
# overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
# /etc/pig/conf/pig.properties, and classes in user code.
# @!attribute [rw] jar_file_uris
# @return [::Array<::String>]
# Optional. HCFS URIs of jar files to add to the CLASSPATH of
# the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
# @!attribute [rw] logging_config
# @return [::Google::Cloud::Dataproc::V1::LoggingConfig]
# Optional. The runtime log config for job execution.
class PigJob
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class ScriptVariablesEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class PropertiesEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# A Dataproc job for running
# [Apache SparkR](https://spark.apache.org/docs/latest/sparkr.html)
# applications on YARN.
# @!attribute [rw] main_r_file_uri
# @return [::String]
# Required. The HCFS URI of the main R file to use as the driver.
# Must be a .R file.
# @!attribute [rw] args
# @return [::Array<::String>]
# Optional. The arguments to pass to the driver. Do not include arguments,
# such as `--conf`, that can be set as job properties, since a collision may
# occur that causes an incorrect job submission.
# @!attribute [rw] file_uris
# @return [::Array<::String>]
# Optional. HCFS URIs of files to be placed in the working directory of
# each executor. Useful for naively parallel tasks.
# @!attribute [rw] archive_uris
# @return [::Array<::String>]
# Optional. HCFS URIs of archives to be extracted into the working directory
# of each executor. Supported file types:
# .jar, .tar, .tar.gz, .tgz, and .zip.
# @!attribute [rw] properties
# @return [::Google::Protobuf::Map{::String => ::String}]
# Optional. A mapping of property names to values, used to configure SparkR.
# Properties that conflict with values set by the Dataproc API may be
# overwritten. Can include properties set in
# /etc/spark/conf/spark-defaults.conf and classes in user code.
# @!attribute [rw] logging_config
# @return [::Google::Cloud::Dataproc::V1::LoggingConfig]
# Optional. The runtime log config for job execution.
class SparkRJob
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class PropertiesEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# A Dataproc job for running [Presto](https://prestosql.io/) queries.
# **IMPORTANT**: The [Dataproc Presto Optional
# Component](https://cloud.google.com/dataproc/docs/concepts/components/presto)
# must be enabled when the cluster is created to submit a Presto job to the
# cluster.
# @!attribute [rw] query_file_uri
# @return [::String]
# The HCFS URI of the script that contains SQL queries.
# @!attribute [rw] query_list
# @return [::Google::Cloud::Dataproc::V1::QueryList]
# A list of queries.
# @!attribute [rw] continue_on_failure
# @return [::Boolean]
# Optional. Whether to continue executing queries if a query fails.
# The default value is `false`. Setting to `true` can be useful when
# executing independent parallel queries.
# @!attribute [rw] output_format
# @return [::String]
# Optional. The format in which query output will be displayed. See the
# Presto documentation for supported output formats
# @!attribute [rw] client_tags
# @return [::Array<::String>]
# Optional. Presto client tags to attach to this query
# @!attribute [rw] properties
# @return [::Google::Protobuf::Map{::String => ::String}]
# Optional. A mapping of property names to values. Used to set Presto
# [session properties](https://prestodb.io/docs/current/sql/set-session.html)
# Equivalent to using the --session flag in the Presto CLI
# @!attribute [rw] logging_config
# @return [::Google::Cloud::Dataproc::V1::LoggingConfig]
# Optional. The runtime log config for job execution.
class PrestoJob
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class PropertiesEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# Dataproc job config.
# @!attribute [rw] cluster_name
# @return [::String]
# Required. The name of the cluster where the job will be submitted.
# @!attribute [r] cluster_uuid
# @return [::String]
# Output only. A cluster UUID generated by the Dataproc service when
# the job is submitted.
# @!attribute [rw] cluster_labels
# @return [::Google::Protobuf::Map{::String => ::String}]
# Optional. Cluster labels to identify a cluster where the job will be submitted.
class JobPlacement
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class ClusterLabelsEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# Dataproc job status.
# @!attribute [r] state
# @return [::Google::Cloud::Dataproc::V1::JobStatus::State]
# Output only. A state message specifying the overall job state.
# @!attribute [r] details
# @return [::String]
# Optional. Output only. Job state details, such as an error
# description if the state is ERROR
.
# @!attribute [r] state_start_time
# @return [::Google::Protobuf::Timestamp]
# Output only. The time when this state was entered.
# @!attribute [r] substate
# @return [::Google::Cloud::Dataproc::V1::JobStatus::Substate]
# Output only. Additional state information, which includes
# status reported by the agent.
class JobStatus
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# The job state.
module State
# The job state is unknown.
STATE_UNSPECIFIED = 0
# The job is pending; it has been submitted, but is not yet running.
PENDING = 1
# Job has been received by the service and completed initial setup;
# it will soon be submitted to the cluster.
SETUP_DONE = 8
# The job is running on the cluster.
RUNNING = 2
# A CancelJob request has been received, but is pending.
CANCEL_PENDING = 3
# Transient in-flight resources have been canceled, and the request to
# cancel the running job has been issued to the cluster.
CANCEL_STARTED = 7
# The job cancellation was successful.
CANCELLED = 4
# The job has completed successfully.
DONE = 5
# The job has completed, but encountered an error.
ERROR = 6
# Job attempt has failed. The detail field contains failure details for
# this attempt.
#
# Applies to restartable jobs only.
ATTEMPT_FAILURE = 9
end
# The job substate.
module Substate
# The job substate is unknown.
UNSPECIFIED = 0
# The Job is submitted to the agent.
#
# Applies to RUNNING state.
SUBMITTED = 1
# The Job has been received and is awaiting execution (it may be waiting
# for a condition to be met). See the "details" field for the reason for
# the delay.
#
# Applies to RUNNING state.
QUEUED = 2
# The agent-reported status is out of date, which may be caused by a
# loss of communication between the agent and Dataproc. If the
# agent does not send a timely update, the job will fail.
#
# Applies to RUNNING state.
STALE_STATUS = 3
end
end
# Encapsulates the full scoping used to reference a job.
# @!attribute [rw] project_id
# @return [::String]
# Optional. The ID of the Google Cloud Platform project that the job belongs to. If
# specified, must match the request project ID.
# @!attribute [rw] job_id
# @return [::String]
# Optional. The job ID, which must be unique within the project.
#
# The ID must contain only letters (a-z, A-Z), numbers (0-9),
# underscores (_), or hyphens (-). The maximum length is 100 characters.
#
# If not specified by the caller, the job ID will be provided by the server.
class JobReference
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# A YARN application created by a job. Application information is a subset of
# org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto
.
#
# **Beta Feature**: This report is available for testing purposes only. It may
# be changed before final release.
# @!attribute [rw] name
# @return [::String]
# Required. The application name.
# @!attribute [rw] state
# @return [::Google::Cloud::Dataproc::V1::YarnApplication::State]
# Required. The application state.
# @!attribute [rw] progress
# @return [::Float]
# Required. The numerical progress of the application, from 1 to 100.
# @!attribute [rw] tracking_url
# @return [::String]
# Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or
# TimelineServer that provides application-specific information. The URL uses
# the internal hostname, and requires a proxy server for resolution and,
# possibly, access.
class YarnApplication
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# The application state, corresponding to
# YarnProtos.YarnApplicationStateProto
.
module State
# Status is unspecified.
STATE_UNSPECIFIED = 0
# Status is NEW.
NEW = 1
# Status is NEW_SAVING.
NEW_SAVING = 2
# Status is SUBMITTED.
SUBMITTED = 3
# Status is ACCEPTED.
ACCEPTED = 4
# Status is RUNNING.
RUNNING = 5
# Status is FINISHED.
FINISHED = 6
# Status is FAILED.
FAILED = 7
# Status is KILLED.
KILLED = 8
end
end
# A Dataproc job resource.
# @!attribute [rw] reference
# @return [::Google::Cloud::Dataproc::V1::JobReference]
# Optional. The fully qualified reference to the job, which can be used to
# obtain the equivalent REST path of the job resource. If this property
# is not specified when a job is created, the server generates a
# job_id
.
# @!attribute [rw] placement
# @return [::Google::Cloud::Dataproc::V1::JobPlacement]
# Required. Job information, including how, when, and where to
# run the job.
# @!attribute [rw] hadoop_job
# @return [::Google::Cloud::Dataproc::V1::HadoopJob]
# Optional. Job is a Hadoop job.
# @!attribute [rw] spark_job
# @return [::Google::Cloud::Dataproc::V1::SparkJob]
# Optional. Job is a Spark job.
# @!attribute [rw] pyspark_job
# @return [::Google::Cloud::Dataproc::V1::PySparkJob]
# Optional. Job is a PySpark job.
# @!attribute [rw] hive_job
# @return [::Google::Cloud::Dataproc::V1::HiveJob]
# Optional. Job is a Hive job.
# @!attribute [rw] pig_job
# @return [::Google::Cloud::Dataproc::V1::PigJob]
# Optional. Job is a Pig job.
# @!attribute [rw] spark_r_job
# @return [::Google::Cloud::Dataproc::V1::SparkRJob]
# Optional. Job is a SparkR job.
# @!attribute [rw] spark_sql_job
# @return [::Google::Cloud::Dataproc::V1::SparkSqlJob]
# Optional. Job is a SparkSql job.
# @!attribute [rw] presto_job
# @return [::Google::Cloud::Dataproc::V1::PrestoJob]
# Optional. Job is a Presto job.
# @!attribute [r] status
# @return [::Google::Cloud::Dataproc::V1::JobStatus]
# Output only. The job status. Additional application-specific
# status information may be contained in the type_job
# and yarn_applications
fields.
# @!attribute [r] status_history
# @return [::Array<::Google::Cloud::Dataproc::V1::JobStatus>]
# Output only. The previous job status.
# @!attribute [r] yarn_applications
# @return [::Array<::Google::Cloud::Dataproc::V1::YarnApplication>]
# Output only. The collection of YARN applications spun up by this job.
#
# **Beta** Feature: This report is available for testing purposes only. It
# may be changed before final release.
# @!attribute [r] driver_output_resource_uri
# @return [::String]
# Output only. A URI pointing to the location of the stdout of the job's
# driver program.
# @!attribute [r] driver_control_files_uri
# @return [::String]
# Output only. If present, the location of miscellaneous control files
# which may be used as part of job setup and handling. If not present,
# control files may be placed in the same location as `driver_output_uri`.
# @!attribute [rw] labels
# @return [::Google::Protobuf::Map{::String => ::String}]
# Optional. The labels to associate with this job.
# Label **keys** must contain 1 to 63 characters, and must conform to
# [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
# Label **values** may be empty, but, if present, must contain 1 to 63
# characters, and must conform to [RFC
# 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
# associated with a job.
# @!attribute [rw] scheduling
# @return [::Google::Cloud::Dataproc::V1::JobScheduling]
# Optional. Job scheduling configuration.
# @!attribute [r] job_uuid
# @return [::String]
# Output only. A UUID that uniquely identifies a job within the project
# over time. This is in contrast to a user-settable reference.job_id that
# may be reused over time.
# @!attribute [r] done
# @return [::Boolean]
# Output only. Indicates whether the job is completed. If the value is `false`,
# the job is still in progress. If `true`, the job is completed, and
# `status.state` field will indicate if it was successful, failed,
# or cancelled.
class Job
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::String]
class LabelsEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# Job scheduling options.
# @!attribute [rw] max_failures_per_hour
# @return [::Integer]
# Optional. Maximum number of times per hour a driver may be restarted as
# a result of driver exiting with non-zero code before job is
# reported failed.
#
# A job may be reported as thrashing if driver exits with non-zero code
# 4 times within 10 minute window.
#
# Maximum value is 10.
# @!attribute [rw] max_failures_total
# @return [::Integer]
# Optional. Maximum number of times in total a driver may be restarted as a result of
# driver exiting with non-zero code before job is reported failed.
# Maximum value is 240.
class JobScheduling
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# A request to submit a job.
# @!attribute [rw] project_id
# @return [::String]
# Required. The ID of the Google Cloud Platform project that the job
# belongs to.
# @!attribute [rw] region
# @return [::String]
# Required. The Dataproc region in which to handle the request.
# @!attribute [rw] job
# @return [::Google::Cloud::Dataproc::V1::Job]
# Required. The job resource.
# @!attribute [rw] request_id
# @return [::String]
# Optional. A unique id used to identify the request. If the server
# receives two
# [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s
# with the same id, then the second request will be ignored and the
# first {::Google::Cloud::Dataproc::V1::Job Job} created and stored in the backend
# is returned.
#
# It is recommended to always set this value to a
# [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
#
# The id must contain only letters (a-z, A-Z), numbers (0-9),
# underscores (_), and hyphens (-). The maximum length is 40 characters.
class SubmitJobRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Job Operation metadata.
# @!attribute [r] job_id
# @return [::String]
# Output only. The job id.
# @!attribute [r] status
# @return [::Google::Cloud::Dataproc::V1::JobStatus]
# Output only. Most recent job status.
# @!attribute [r] operation_type
# @return [::String]
# Output only. Operation type.
# @!attribute [r] start_time
# @return [::Google::Protobuf::Timestamp]
# Output only. Job submission time.
class JobMetadata
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# A request to get the resource representation for a job in a project.
# @!attribute [rw] project_id
# @return [::String]
# Required. The ID of the Google Cloud Platform project that the job
# belongs to.
# @!attribute [rw] region
# @return [::String]
# Required. The Dataproc region in which to handle the request.
# @!attribute [rw] job_id
# @return [::String]
# Required. The job ID.
class GetJobRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# A request to list jobs in a project.
# @!attribute [rw] project_id
# @return [::String]
# Required. The ID of the Google Cloud Platform project that the job
# belongs to.
# @!attribute [rw] region
# @return [::String]
# Required. The Dataproc region in which to handle the request.
# @!attribute [rw] page_size
# @return [::Integer]
# Optional. The number of results to return in each response.
# @!attribute [rw] page_token
# @return [::String]
# Optional. The page token, returned by a previous call, to request the
# next page of results.
# @!attribute [rw] cluster_name
# @return [::String]
# Optional. If set, the returned jobs list includes only jobs that were
# submitted to the named cluster.
# @!attribute [rw] job_state_matcher
# @return [::Google::Cloud::Dataproc::V1::ListJobsRequest::JobStateMatcher]
# Optional. Specifies enumerated categories of jobs to list.
# (default = match ALL jobs).
#
# If `filter` is provided, `jobStateMatcher` will be ignored.
# @!attribute [rw] filter
# @return [::String]
# Optional. A filter constraining the jobs to list. Filters are
# case-sensitive and have the following syntax:
#
# [field = value] AND [field [= value]] ...
#
# where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label
# key. **value** can be `*` to match all values.
# `status.state` can be either `ACTIVE` or `NON_ACTIVE`.
# Only the logical `AND` operator is supported; space-separated items are
# treated as having an implicit `AND` operator.
#
# Example filter:
#
# status.state = ACTIVE AND labels.env = staging AND labels.starred = *
class ListJobsRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# A matcher that specifies categories of job states.
module JobStateMatcher
# Match all jobs, regardless of state.
ALL = 0
# Only match jobs in non-terminal states: PENDING, RUNNING, or
# CANCEL_PENDING.
ACTIVE = 1
# Only match jobs in terminal states: CANCELLED, DONE, or ERROR.
NON_ACTIVE = 2
end
end
# A request to update a job.
# @!attribute [rw] project_id
# @return [::String]
# Required. The ID of the Google Cloud Platform project that the job
# belongs to.
# @!attribute [rw] region
# @return [::String]
# Required. The Dataproc region in which to handle the request.
# @!attribute [rw] job_id
# @return [::String]
# Required. The job ID.
# @!attribute [rw] job
# @return [::Google::Cloud::Dataproc::V1::Job]
# Required. The changes to the job.
# @!attribute [rw] update_mask
# @return [::Google::Protobuf::FieldMask]
# Required. Specifies the path, relative to Job
, of
# the field to update. For example, to update the labels of a Job the
# update_mask
parameter would be specified as
# labels
, and the `PATCH` request body would specify the new
# value. Note: Currently, labels
is the only
# field that can be updated.
class UpdateJobRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# A list of jobs in a project.
# @!attribute [r] jobs
# @return [::Array<::Google::Cloud::Dataproc::V1::Job>]
# Output only. Jobs list.
# @!attribute [rw] next_page_token
# @return [::String]
# Optional. This token is included in the response if there are more results
# to fetch. To fetch additional results, provide this value as the
# `page_token` in a subsequent ListJobsRequest
.
class ListJobsResponse
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# A request to cancel a job.
# @!attribute [rw] project_id
# @return [::String]
# Required. The ID of the Google Cloud Platform project that the job
# belongs to.
# @!attribute [rw] region
# @return [::String]
# Required. The Dataproc region in which to handle the request.
# @!attribute [rw] job_id
# @return [::String]
# Required. The job ID.
class CancelJobRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# A request to delete a job.
# @!attribute [rw] project_id
# @return [::String]
# Required. The ID of the Google Cloud Platform project that the job
# belongs to.
# @!attribute [rw] region
# @return [::String]
# Required. The Dataproc region in which to handle the request.
# @!attribute [rw] job_id
# @return [::String]
# Required. The job ID.
class DeleteJobRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
end
end
end