lib/aws-sdk-glue/client.rb in aws-sdk-glue-1.114.0 vs lib/aws-sdk-glue/client.rb in aws-sdk-glue-1.115.0
- old
+ new
@@ -883,11 +883,11 @@
# resp.dev_endpoints[0].yarn_endpoint_address #=> String
# resp.dev_endpoints[0].private_address #=> String
# resp.dev_endpoints[0].zeppelin_remote_spark_interpreter_port #=> Integer
# resp.dev_endpoints[0].public_address #=> String
# resp.dev_endpoints[0].status #=> String
- # resp.dev_endpoints[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X"
+ # resp.dev_endpoints[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X"
# resp.dev_endpoints[0].glue_version #=> String
# resp.dev_endpoints[0].number_of_workers #=> Integer
# resp.dev_endpoints[0].number_of_nodes #=> Integer
# resp.dev_endpoints[0].availability_zone #=> String
# resp.dev_endpoints[0].vpc_id #=> String
@@ -957,11 +957,11 @@
# resp.jobs[0].connections.connections[0] #=> String
# resp.jobs[0].max_retries #=> Integer
# resp.jobs[0].allocated_capacity #=> Integer
# resp.jobs[0].timeout #=> Integer
# resp.jobs[0].max_capacity #=> Float
- # resp.jobs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X"
+ # resp.jobs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X"
# resp.jobs[0].number_of_workers #=> Integer
# resp.jobs[0].security_configuration #=> String
# resp.jobs[0].notification_property.notify_delay_after #=> Integer
# resp.jobs[0].glue_version #=> String
# resp.jobs[0].code_gen_configuration_nodes #=> Hash
@@ -1708,11 +1708,11 @@
# resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].predecessor_runs[0].run_id #=> String
# resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].allocated_capacity #=> Integer
# resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
# resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
# resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
- # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X"
+ # resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X"
# resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
# resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
# resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
# resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].notification_property.notify_delay_after #=> Integer
# resp.workflows[0].last_run.graph.nodes[0].job_details.job_runs[0].glue_version #=> String
@@ -1775,11 +1775,11 @@
# resp.workflows[0].graph.nodes[0].job_details.job_runs[0].predecessor_runs[0].run_id #=> String
# resp.workflows[0].graph.nodes[0].job_details.job_runs[0].allocated_capacity #=> Integer
# resp.workflows[0].graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
# resp.workflows[0].graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
# resp.workflows[0].graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
- # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X"
+ # resp.workflows[0].graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X"
# resp.workflows[0].graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
# resp.workflows[0].graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
# resp.workflows[0].graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
# resp.workflows[0].graph.nodes[0].job_details.job_runs[0].notification_property.notify_delay_after #=> Integer
# resp.workflows[0].graph.nodes[0].job_details.job_runs[0].glue_version #=> String
@@ -2000,11 +2000,11 @@
def cancel_ml_task_run(params = {}, options = {})
req = build_request(:cancel_ml_task_run, params)
req.send_request(options)
end
- # Cancels the statement..
+ # Cancels the statement.
#
# @option params [required, String] :session_id
# The Session ID of the statement to be cancelled.
#
# @option params [required, Integer] :id
@@ -2628,11 +2628,11 @@
# security_group_ids: ["GenericString"],
# subnet_id: "GenericString",
# public_key: "GenericString",
# public_keys: ["GenericString"],
# number_of_nodes: 1,
- # worker_type: "Standard", # accepts Standard, G.1X, G.2X
+ # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X
# glue_version: "GlueVersionString",
# number_of_workers: 1,
# extra_python_libs_s3_path: "GenericString",
# extra_jars_s3_path: "GenericString",
# security_configuration: "NameString",
@@ -2653,11 +2653,11 @@
# resp.subnet_id #=> String
# resp.role_arn #=> String
# resp.yarn_endpoint_address #=> String
# resp.zeppelin_remote_spark_interpreter_port #=> Integer
# resp.number_of_nodes #=> Integer
- # resp.worker_type #=> String, one of "Standard", "G.1X", "G.2X"
+ # resp.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X"
# resp.glue_version #=> String
# resp.number_of_workers #=> Integer
# resp.availability_zone #=> String
# resp.vpc_id #=> String
# resp.extra_python_libs_s3_path #=> String
@@ -2704,10 +2704,15 @@
# The default arguments for this job.
#
# You can specify arguments here that your own job-execution script
# consumes, as well as arguments that Glue itself consumes.
#
+ # Job arguments may be logged. Do not pass plaintext secrets as
+ # arguments. Retrieve secrets from a Glue Connection, Secrets Manager or
+ # other secret management mechanism if you intend to keep them within
+ # the Job.
+ #
# For information about how to specify and consume your own Job
# arguments, see the [Calling Glue APIs in Python][1] topic in the
# developer guide.
#
# For information about the key-value pairs that Glue consumes to set up
@@ -2730,11 +2735,11 @@
#
# @option params [Integer] :allocated_capacity
# This parameter is deprecated. Use `MaxCapacity` instead.
#
# The number of Glue data processing units (DPUs) to allocate to this
- # Job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is
+ # Job. You can allocate a minimum of 2 DPUs; the default is 10. A DPU is
# a relative measure of processing power that consists of 4 vCPUs of
# compute capacity and 16 GB of memory. For more information, see the
# [Glue pricing page][1].
#
#
@@ -2762,12 +2767,12 @@
# (`JobCommand.Name`="pythonshell"), you can allocate either 0.0625
# or 1 DPU. The default is 0.0625 DPU.
#
# * When you specify an Apache Spark ETL job
# (`JobCommand.Name`="glueetl") or Apache Spark streaming ETL job
- # (`JobCommand.Name`="gluestreaming"), you can allocate from 2 to
- # 100 DPUs. The default is 10 DPUs. This job type cannot have a
+ # (`JobCommand.Name`="gluestreaming"), you can allocate a minimum of
+ # 2 DPUs. The default is 10 DPUs. This job type cannot have a
# fractional DPU allocation.
#
# For Glue version 2.0 jobs, you cannot instead specify a `Maximum
# capacity`. Instead, you should specify a `Worker type` and the `Number
# of workers`.
@@ -2810,16 +2815,13 @@
#
# @option params [Integer] :number_of_workers
# The number of workers of a defined `workerType` that are allocated
# when a job runs.
#
- # The maximum number of workers you can define are 299 for `G.1X`, and
- # 149 for `G.2X`.
- #
# @option params [String] :worker_type
# The type of predefined worker that is allocated when a job runs.
- # Accepts a value of Standard, G.1X, or G.2X.
+ # Accepts a value of Standard, G.1X, G.2X, or G.025X.
#
# * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB
# of memory and a 50GB disk, and 2 executors per worker.
#
# * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPU, 16 GB
@@ -2828,10 +2830,15 @@
#
# * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPU, 32 GB
# of memory, 128 GB disk), and provides 1 executor per worker. We
# recommend this worker type for memory-intensive jobs.
#
+ # * For the `G.025X` worker type, each worker maps to 0.25 DPU (2 vCPU,
+ # 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We
+ # recommend this worker type for low volume streaming jobs. This
+ # worker type is only available for Glue version 3.0 streaming jobs.
+ #
# @option params [Hash<String,Types::CodeGenConfigurationNode>] :code_gen_configuration_nodes
# The representation of a directed acyclic graph on which both the Glue
# Studio visual component and Glue Studio code generation is based.
#
# @return [Types::CreateJobResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
@@ -2873,11 +2880,11 @@
# notification_property: {
# notify_delay_after: 1,
# },
# glue_version: "GlueVersionString",
# number_of_workers: 1,
- # worker_type: "Standard", # accepts Standard, G.1X, G.2X
+ # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X
# code_gen_configuration_nodes: {
# "NodeId" => {
# athena_connector_source: {
# name: "NodeName", # required
# connection_name: "EnclosedInStringProperty", # required
@@ -3737,11 +3744,11 @@
# },
# },
# role: "RoleString", # required
# glue_version: "GlueVersionString",
# max_capacity: 1.0,
- # worker_type: "Standard", # accepts Standard, G.1X, G.2X
+ # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X
# number_of_workers: 1,
# timeout: 1,
# max_retries: 1,
# tags: {
# "TagKey" => "TagValue",
@@ -4242,27 +4249,45 @@
#
# @option params [Types::ConnectionsList] :connections
# The number of connections to use for the session.
#
# @option params [Float] :max_capacity
- # The number of AWS Glue data processing units (DPUs) that can be
- # allocated when the job runs. A DPU is a relative measure of processing
- # power that consists of 4 vCPUs of compute capacity and 16 GB memory.
+ # The number of Glue data processing units (DPUs) that can be allocated
+ # when the job runs. A DPU is a relative measure of processing power
+ # that consists of 4 vCPUs of compute capacity and 16 GB memory.
#
# @option params [Integer] :number_of_workers
- # The number of workers to use for the session.
+ # The number of workers of a defined `WorkerType` to use for the
+ # session.
#
# @option params [String] :worker_type
- # The Worker Type. Can be one of G.1X, G.2X, Standard
+ # The type of predefined worker that is allocated to use for the
+ # session. Accepts a value of Standard, G.1X, G.2X, or G.025X.
#
+ # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB
+ # of memory and a 50GB disk, and 2 executors per worker.
+ #
+ # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPU, 16 GB
+ # of memory, 64 GB disk), and provides 1 executor per worker. We
+ # recommend this worker type for memory-intensive jobs.
+ #
+ # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPU, 32 GB
+ # of memory, 128 GB disk), and provides 1 executor per worker. We
+ # recommend this worker type for memory-intensive jobs.
+ #
+ # * For the `G.025X` worker type, each worker maps to 0.25 DPU (2 vCPU,
+ # 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We
+ # recommend this worker type for low volume streaming jobs. This
+ # worker type is only available for Glue version 3.0 streaming jobs.
+ #
# @option params [String] :security_configuration
# The name of the SecurityConfiguration structure to be used with the
# session
#
# @option params [String] :glue_version
# The Glue version determines the versions of Apache Spark and Python
- # that AWS Glue supports. The GlueVersion must be greater than 2.0.
+ # that Glue supports. The GlueVersion must be greater than 2.0.
#
# @option params [Hash<String,String>] :tags
# The map of key value pairs (tags) belonging to the session.
#
# @option params [String] :request_origin
@@ -4290,11 +4315,11 @@
# connections: {
# connections: ["GenericString"],
# },
# max_capacity: 1.0,
# number_of_workers: 1,
- # worker_type: "Standard", # accepts Standard, G.1X, G.2X
+ # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X
# security_configuration: "NameString",
# glue_version: "GlueVersionString",
# tags: {
# "TagKey" => "TagValue",
# },
@@ -6540,11 +6565,11 @@
# resp.dev_endpoint.yarn_endpoint_address #=> String
# resp.dev_endpoint.private_address #=> String
# resp.dev_endpoint.zeppelin_remote_spark_interpreter_port #=> Integer
# resp.dev_endpoint.public_address #=> String
# resp.dev_endpoint.status #=> String
- # resp.dev_endpoint.worker_type #=> String, one of "Standard", "G.1X", "G.2X"
+ # resp.dev_endpoint.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X"
# resp.dev_endpoint.glue_version #=> String
# resp.dev_endpoint.number_of_workers #=> Integer
# resp.dev_endpoint.number_of_nodes #=> Integer
# resp.dev_endpoint.availability_zone #=> String
# resp.dev_endpoint.vpc_id #=> String
@@ -6610,11 +6635,11 @@
# resp.dev_endpoints[0].yarn_endpoint_address #=> String
# resp.dev_endpoints[0].private_address #=> String
# resp.dev_endpoints[0].zeppelin_remote_spark_interpreter_port #=> Integer
# resp.dev_endpoints[0].public_address #=> String
# resp.dev_endpoints[0].status #=> String
- # resp.dev_endpoints[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X"
+ # resp.dev_endpoints[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X"
# resp.dev_endpoints[0].glue_version #=> String
# resp.dev_endpoints[0].number_of_workers #=> Integer
# resp.dev_endpoints[0].number_of_nodes #=> Integer
# resp.dev_endpoints[0].availability_zone #=> String
# resp.dev_endpoints[0].vpc_id #=> String
@@ -6676,11 +6701,11 @@
# resp.job.connections.connections[0] #=> String
# resp.job.max_retries #=> Integer
# resp.job.allocated_capacity #=> Integer
# resp.job.timeout #=> Integer
# resp.job.max_capacity #=> Float
- # resp.job.worker_type #=> String, one of "Standard", "G.1X", "G.2X"
+ # resp.job.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X"
# resp.job.number_of_workers #=> Integer
# resp.job.security_configuration #=> String
# resp.job.notification_property.notify_delay_after #=> Integer
# resp.job.glue_version #=> String
# resp.job.code_gen_configuration_nodes #=> Hash
@@ -7253,11 +7278,11 @@
# resp.job_run.predecessor_runs[0].run_id #=> String
# resp.job_run.allocated_capacity #=> Integer
# resp.job_run.execution_time #=> Integer
# resp.job_run.timeout #=> Integer
# resp.job_run.max_capacity #=> Float
- # resp.job_run.worker_type #=> String, one of "Standard", "G.1X", "G.2X"
+ # resp.job_run.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X"
# resp.job_run.number_of_workers #=> Integer
# resp.job_run.security_configuration #=> String
# resp.job_run.log_group_name #=> String
# resp.job_run.notification_property.notify_delay_after #=> Integer
# resp.job_run.glue_version #=> String
@@ -7318,11 +7343,11 @@
# resp.job_runs[0].predecessor_runs[0].run_id #=> String
# resp.job_runs[0].allocated_capacity #=> Integer
# resp.job_runs[0].execution_time #=> Integer
# resp.job_runs[0].timeout #=> Integer
# resp.job_runs[0].max_capacity #=> Float
- # resp.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X"
+ # resp.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X"
# resp.job_runs[0].number_of_workers #=> Integer
# resp.job_runs[0].security_configuration #=> String
# resp.job_runs[0].log_group_name #=> String
# resp.job_runs[0].notification_property.notify_delay_after #=> Integer
# resp.job_runs[0].glue_version #=> String
@@ -7381,11 +7406,11 @@
# resp.jobs[0].connections.connections[0] #=> String
# resp.jobs[0].max_retries #=> Integer
# resp.jobs[0].allocated_capacity #=> Integer
# resp.jobs[0].timeout #=> Integer
# resp.jobs[0].max_capacity #=> Float
- # resp.jobs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X"
+ # resp.jobs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X"
# resp.jobs[0].number_of_workers #=> Integer
# resp.jobs[0].security_configuration #=> String
# resp.jobs[0].notification_property.notify_delay_after #=> Integer
# resp.jobs[0].glue_version #=> String
# resp.jobs[0].code_gen_configuration_nodes #=> Hash
@@ -8096,11 +8121,11 @@
# resp.schema[0].name #=> String
# resp.schema[0].data_type #=> String
# resp.role #=> String
# resp.glue_version #=> String
# resp.max_capacity #=> Float
- # resp.worker_type #=> String, one of "Standard", "G.1X", "G.2X"
+ # resp.worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X"
# resp.number_of_workers #=> Integer
# resp.timeout #=> Integer
# resp.max_retries #=> Integer
# resp.transform_encryption.ml_user_data_encryption.ml_user_data_encryption_mode #=> String, one of "DISABLED", "SSE-KMS"
# resp.transform_encryption.ml_user_data_encryption.kms_key_id #=> String
@@ -8204,11 +8229,11 @@
# resp.transforms[0].schema[0].name #=> String
# resp.transforms[0].schema[0].data_type #=> String
# resp.transforms[0].role #=> String
# resp.transforms[0].glue_version #=> String
# resp.transforms[0].max_capacity #=> Float
- # resp.transforms[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X"
+ # resp.transforms[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X"
# resp.transforms[0].number_of_workers #=> Integer
# resp.transforms[0].timeout #=> Integer
# resp.transforms[0].max_retries #=> Integer
# resp.transforms[0].transform_encryption.ml_user_data_encryption.ml_user_data_encryption_mode #=> String, one of "DISABLED", "SSE-KMS"
# resp.transforms[0].transform_encryption.ml_user_data_encryption.kms_key_id #=> String
@@ -10432,11 +10457,11 @@
# resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].predecessor_runs[0].run_id #=> String
# resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].allocated_capacity #=> Integer
# resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
# resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
# resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
- # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X"
+ # resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X"
# resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
# resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
# resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
# resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].notification_property.notify_delay_after #=> Integer
# resp.workflow.last_run.graph.nodes[0].job_details.job_runs[0].glue_version #=> String
@@ -10499,11 +10524,11 @@
# resp.workflow.graph.nodes[0].job_details.job_runs[0].predecessor_runs[0].run_id #=> String
# resp.workflow.graph.nodes[0].job_details.job_runs[0].allocated_capacity #=> Integer
# resp.workflow.graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
# resp.workflow.graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
# resp.workflow.graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
- # resp.workflow.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X"
+ # resp.workflow.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X"
# resp.workflow.graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
# resp.workflow.graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
# resp.workflow.graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
# resp.workflow.graph.nodes[0].job_details.job_runs[0].notification_property.notify_delay_after #=> Integer
# resp.workflow.graph.nodes[0].job_details.job_runs[0].glue_version #=> String
@@ -10617,11 +10642,11 @@
# resp.run.graph.nodes[0].job_details.job_runs[0].predecessor_runs[0].run_id #=> String
# resp.run.graph.nodes[0].job_details.job_runs[0].allocated_capacity #=> Integer
# resp.run.graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
# resp.run.graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
# resp.run.graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
- # resp.run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X"
+ # resp.run.graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X"
# resp.run.graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
# resp.run.graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
# resp.run.graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
# resp.run.graph.nodes[0].job_details.job_runs[0].notification_property.notify_delay_after #=> Integer
# resp.run.graph.nodes[0].job_details.job_runs[0].glue_version #=> String
@@ -10775,11 +10800,11 @@
# resp.runs[0].graph.nodes[0].job_details.job_runs[0].predecessor_runs[0].run_id #=> String
# resp.runs[0].graph.nodes[0].job_details.job_runs[0].allocated_capacity #=> Integer
# resp.runs[0].graph.nodes[0].job_details.job_runs[0].execution_time #=> Integer
# resp.runs[0].graph.nodes[0].job_details.job_runs[0].timeout #=> Integer
# resp.runs[0].graph.nodes[0].job_details.job_runs[0].max_capacity #=> Float
- # resp.runs[0].graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X"
+ # resp.runs[0].graph.nodes[0].job_details.job_runs[0].worker_type #=> String, one of "Standard", "G.1X", "G.2X", "G.025X"
# resp.runs[0].graph.nodes[0].job_details.job_runs[0].number_of_workers #=> Integer
# resp.runs[0].graph.nodes[0].job_details.job_runs[0].security_configuration #=> String
# resp.runs[0].graph.nodes[0].job_details.job_runs[0].log_group_name #=> String
# resp.runs[0].graph.nodes[0].job_details.job_runs[0].notification_property.notify_delay_after #=> Integer
# resp.runs[0].graph.nodes[0].job_details.job_runs[0].glue_version #=> String
@@ -11380,11 +11405,11 @@
def list_schemas(params = {}, options = {})
req = build_request(:list_schemas, params)
req.send_request(options)
end
- # Retrieve a session..
+ # Retrieve a list of sessions.
#
# @option params [String] :next_token
# The token for the next set of results, or null if there are no more
# result.
#
@@ -11455,10 +11480,11 @@
#
# @option params [String] :request_origin
# The origin of the request to list statements.
#
# @option params [String] :next_token
+ # A continuation token, if this is a continuation call.
#
# @return [Types::ListStatementsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListStatementsResponse#statements #statements} => Array<Types::Statement>
# * {Types::ListStatementsResponse#next_token #next_token} => String
@@ -12481,10 +12507,15 @@
# replace the default arguments set in the job definition itself.
#
# You can specify arguments here that your own job-execution script
# consumes, as well as arguments that Glue itself consumes.
#
+ # Job arguments may be logged. Do not pass plaintext secrets as
+ # arguments. Retrieve secrets from a Glue Connection, Secrets Manager or
+ # other secret management mechanism if you intend to keep them within
+ # the Job.
+ #
# For information about how to specify and consume your own Job
# arguments, see the [Calling Glue APIs in Python][1] topic in the
# developer guide.
#
# For information about the key-value pairs that Glue consumes to set up
@@ -12498,11 +12529,11 @@
#
# @option params [Integer] :allocated_capacity
# This field is deprecated. Use `MaxCapacity` instead.
#
# The number of Glue data processing units (DPUs) to allocate to this
- # JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU
+ # JobRun. You can allocate a minimum of 2 DPUs; the default is 10. A DPU
# is a relative measure of processing power that consists of 4 vCPUs of
# compute capacity and 16 GB of memory. For more information, see the
# [Glue pricing page][1].
#
#
@@ -12529,11 +12560,11 @@
# * When you specify a Python shell job
# (`JobCommand.Name`="pythonshell"), you can allocate either 0.0625
# or 1 DPU. The default is 0.0625 DPU.
#
# * When you specify an Apache Spark ETL job
- # (`JobCommand.Name`="glueetl"), you can allocate from 2 to 100
+ # (`JobCommand.Name`="glueetl"), you can allocate a minimum of 2
# DPUs. The default is 10 DPUs. This job type cannot have a fractional
# DPU allocation.
#
#
#
@@ -12546,28 +12577,30 @@
# @option params [Types::NotificationProperty] :notification_property
# Specifies configuration properties of a job run notification.
#
# @option params [String] :worker_type
# The type of predefined worker that is allocated when a job runs.
- # Accepts a value of Standard, G.1X, or G.2X.
+ # Accepts a value of Standard, G.1X, G.2X, or G.025X.
#
# * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB
# of memory and a 50GB disk, and 2 executors per worker.
#
# * For the `G.1X` worker type, each worker provides 4 vCPU, 16 GB of
# memory and a 64GB disk, and 1 executor per worker.
#
# * For the `G.2X` worker type, each worker provides 8 vCPU, 32 GB of
# memory and a 128GB disk, and 1 executor per worker.
#
+ # * For the `G.025X` worker type, each worker maps to 0.25 DPU (2 vCPU,
+ # 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We
+ # recommend this worker type for low volume streaming jobs. This
+ # worker type is only available for Glue version 3.0 streaming jobs.
+ #
# @option params [Integer] :number_of_workers
# The number of workers of a defined `workerType` that are allocated
# when a job runs.
#
- # The maximum number of workers you can define are 299 for `G.1X`, and
- # 149 for `G.2X`.
- #
# @return [Types::StartJobRunResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::StartJobRunResponse#job_run_id #job_run_id} => String
#
# @example Request syntax with placeholder values
@@ -12583,11 +12616,11 @@
# max_capacity: 1.0,
# security_configuration: "NameString",
# notification_property: {
# notify_delay_after: 1,
# },
- # worker_type: "Standard", # accepts Standard, G.1X, G.2X
+ # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X
# number_of_workers: 1,
# })
#
# @example Response structure
#
@@ -13663,17 +13696,19 @@
def update_dev_endpoint(params = {}, options = {})
req = build_request(:update_dev_endpoint, params)
req.send_request(options)
end
- # Updates an existing job definition.
+ # Updates an existing job definition. The previous job definition is
+ # completely overwritten by this information.
#
# @option params [required, String] :job_name
# The name of the job definition to update.
#
# @option params [required, Types::JobUpdate] :job_update
# Specifies the values with which to update the job definition.
+ # Unspecified configuration is removed or reset to default values.
#
# @return [Types::UpdateJobResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::UpdateJobResponse#job_name #job_name} => String
#
@@ -13704,11 +13739,11 @@
# },
# max_retries: 1,
# allocated_capacity: 1,
# timeout: 1,
# max_capacity: 1.0,
- # worker_type: "Standard", # accepts Standard, G.1X, G.2X
+ # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X
# number_of_workers: 1,
# security_configuration: "NameString",
# notification_property: {
# notify_delay_after: 1,
# },
@@ -14494,11 +14529,11 @@
# },
# },
# role: "RoleString",
# glue_version: "GlueVersionString",
# max_capacity: 1.0,
- # worker_type: "Standard", # accepts Standard, G.1X, G.2X
+ # worker_type: "Standard", # accepts Standard, G.1X, G.2X, G.025X
# number_of_workers: 1,
# timeout: 1,
# max_retries: 1,
# })
#
@@ -15055,10 +15090,10 @@
operation: config.api.operation(operation_name),
client: self,
params: params,
config: config)
context[:gem_name] = 'aws-sdk-glue'
- context[:gem_version] = '1.114.0'
+ context[:gem_version] = '1.115.0'
Seahorse::Client::Request.new(handlers, context)
end
# @api private
# @deprecated