lib/aws-sdk-emr/client.rb in aws-sdk-emr-1.25.0 vs lib/aws-sdk-emr/client.rb in aws-sdk-emr-1.26.0
- old
+ new
@@ -30,15 +30,15 @@
Aws::Plugins::GlobalConfiguration.add_identifier(:emr)
module Aws::EMR
# An API client for EMR. To construct a client, you need to configure a `:region` and `:credentials`.
#
- # client = Aws::EMR::Client.new(
- # region: region_name,
- # credentials: credentials,
- # # ...
- # )
+ # client = Aws::EMR::Client.new(
+ # region: region_name,
+ # credentials: credentials,
+ # # ...
+ # )
#
# For details on configuring region and credentials see
# the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html).
#
# See {#initialize} for a full list of supported configuration options.
@@ -227,20 +227,24 @@
# used by the default backoff function. This option is only used in the
# `legacy` retry mode.
#
# @option options [String] :retry_mode ("legacy")
# Specifies which retry algorithm to use. Values are:
- # * `legacy` - The pre-existing retry behavior. This is default value if
- # no retry mode is provided.
- # * `standard` - A standardized set of retry rules across the AWS SDKs.
- # This includes support for retry quotas, which limit the number of
- # unsuccessful retries a client can make.
- # * `adaptive` - An experimental retry mode that includes all the
- # functionality of `standard` mode along with automatic client side
- # throttling. This is a provisional mode that may change behavior
- # in the future.
#
+ # * `legacy` - The pre-existing retry behavior. This is default value if
+ # no retry mode is provided.
+ #
+ # * `standard` - A standardized set of retry rules across the AWS SDKs.
+ # This includes support for retry quotas, which limit the number of
+ # unsuccessful retries a client can make.
+ #
+ # * `adaptive` - An experimental retry mode that includes all the
+ # functionality of `standard` mode along with automatic client side
+ # throttling. This is a provisional mode that may change behavior
+ # in the future.
+ #
+ #
# @option options [String] :secret_access_key
#
# @option options [String] :session_token
#
# @option options [Boolean] :simple_json (false)
@@ -273,12 +277,11 @@
# seconds to wait when opening a HTTP session before raising a
# `Timeout::Error`.
#
# @option options [Integer] :http_read_timeout (60) The default
# number of seconds to wait for response data. This value can
- # safely be set
- # per-request on the session yielded by {#session_for}.
+ # safely be set per-request on the session.
#
# @option options [Float] :http_idle_timeout (5) The number of
# seconds a connection is allowed to sit idle before it is
# considered stale. Stale connections are closed and removed
# from the pool before making a request.
@@ -286,11 +289,11 @@
# @option options [Float] :http_continue_timeout (1) The number of
# seconds to wait for a 100-continue response before sending the
# request body. This option has no effect unless the request has
# "Expect" header set to "100-continue". Defaults to `nil` which
# disables this behaviour. This value can safely be set per
- # request on the session yielded by {#session_for}.
+ # request on the session.
#
# @option options [Boolean] :http_wire_trace (false) When `true`,
# HTTP debug output will be sent to the `:logger`.
#
# @option options [Boolean] :ssl_verify_peer (true) When `true`,
@@ -814,13 +817,19 @@
# resp.cluster.kerberos_attributes.kdc_admin_password #=> String
# resp.cluster.kerberos_attributes.cross_realm_trust_principal_password #=> String
# resp.cluster.kerberos_attributes.ad_domain_join_user #=> String
# resp.cluster.kerberos_attributes.ad_domain_join_password #=> String
# resp.cluster.cluster_arn #=> String
- # resp.cluster.step_concurrency_level #=> Integer
# resp.cluster.outpost_arn #=> String
+ # resp.cluster.step_concurrency_level #=> Integer
#
+ #
+ # The following waiters are defined for this operation (see {Client#wait_until} for detailed usage):
+ #
+ # * cluster_running
+ # * cluster_terminated
+ #
# @see http://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/DescribeCluster AWS API Documentation
#
# @overload describe_cluster(params = {})
# @param [Hash] params ({})
def describe_cluster(params = {}, options = {})
@@ -1025,10 +1034,15 @@
# resp.step.status.failure_details.log_file #=> String
# resp.step.status.timeline.creation_date_time #=> Time
# resp.step.status.timeline.start_date_time #=> Time
# resp.step.status.timeline.end_date_time #=> Time
#
+ #
+ # The following waiters are defined for this operation (see {Client#wait_until} for detailed usage):
+ #
+ # * step_complete
+ #
# @see http://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/DescribeStep AWS API Documentation
#
# @overload describe_step(params = {})
# @param [Hash] params ({})
def describe_step(params = {}, options = {})
@@ -1066,10 +1080,42 @@
def get_block_public_access_configuration(params = {}, options = {})
req = build_request(:get_block_public_access_configuration, params)
req.send_request(options)
end
+ # Fetches the attached managed scaling policy for an Amazon EMR cluster.
+ #
+ # @option params [required, String] :cluster_id
+ # Specifies the ID of the cluster for which the managed scaling policy
+ # will be fetched.
+ #
+ # @return [Types::GetManagedScalingPolicyOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
+ #
+ # * {Types::GetManagedScalingPolicyOutput#managed_scaling_policy #managed_scaling_policy} => Types::ManagedScalingPolicy
+ #
+ # @example Request syntax with placeholder values
+ #
+ # resp = client.get_managed_scaling_policy({
+ # cluster_id: "ClusterId", # required
+ # })
+ #
+ # @example Response structure
+ #
+ # resp.managed_scaling_policy.compute_limits.unit_type #=> String, one of "InstanceFleetUnits", "Instances", "VCPU"
+ # resp.managed_scaling_policy.compute_limits.minimum_capacity_units #=> Integer
+ # resp.managed_scaling_policy.compute_limits.maximum_capacity_units #=> Integer
+ # resp.managed_scaling_policy.compute_limits.maximum_on_demand_capacity_units #=> Integer
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/GetManagedScalingPolicy AWS API Documentation
+ #
+ # @overload get_managed_scaling_policy(params = {})
+ # @param [Hash] params ({})
+ def get_managed_scaling_policy(params = {}, options = {})
+ req = build_request(:get_managed_scaling_policy, params)
+ req.send_request(options)
+ end
+
# Provides information about the bootstrap actions associated with a
# cluster.
#
# @option params [required, String] :cluster_id
# The cluster identifier for the bootstrap actions to list.
@@ -1081,10 +1127,12 @@
# @return [Types::ListBootstrapActionsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListBootstrapActionsOutput#bootstrap_actions #bootstrap_actions} => Array<Types::Command>
# * {Types::ListBootstrapActionsOutput#marker #marker} => String
#
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
+ #
# @example Request syntax with placeholder values
#
# resp = client.list_bootstrap_actions({
# cluster_id: "ClusterId", # required
# marker: "Marker",
@@ -1132,10 +1180,12 @@
# @return [Types::ListClustersOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListClustersOutput#clusters #clusters} => Array<Types::ClusterSummary>
# * {Types::ListClustersOutput#marker #marker} => String
#
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
+ #
# @example Request syntax with placeholder values
#
# resp = client.list_clusters({
# created_after: Time.now,
# created_before: Time.now,
@@ -1185,10 +1235,12 @@
# @return [Types::ListInstanceFleetsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListInstanceFleetsOutput#instance_fleets #instance_fleets} => Array<Types::InstanceFleet>
# * {Types::ListInstanceFleetsOutput#marker #marker} => String
#
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
+ #
# @example Request syntax with placeholder values
#
# resp = client.list_instance_fleets({
# cluster_id: "ClusterId", # required
# marker: "Marker",
@@ -1252,10 +1304,12 @@
# @return [Types::ListInstanceGroupsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListInstanceGroupsOutput#instance_groups #instance_groups} => Array<Types::InstanceGroup>
# * {Types::ListInstanceGroupsOutput#marker #marker} => String
#
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
+ #
# @example Request syntax with placeholder values
#
# resp = client.list_instance_groups({
# cluster_id: "ClusterId", # required
# marker: "Marker",
@@ -1368,10 +1422,12 @@
# @return [Types::ListInstancesOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListInstancesOutput#instances #instances} => Array<Types::Instance>
# * {Types::ListInstancesOutput#marker #marker} => String
#
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
+ #
# @example Request syntax with placeholder values
#
# resp = client.list_instances({
# cluster_id: "ClusterId", # required
# instance_group_id: "InstanceGroupId",
@@ -1427,10 +1483,12 @@
# @return [Types::ListSecurityConfigurationsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListSecurityConfigurationsOutput#security_configurations #security_configurations} => Array<Types::SecurityConfigurationSummary>
# * {Types::ListSecurityConfigurationsOutput#marker #marker} => String
#
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
+ #
# @example Request syntax with placeholder values
#
# resp = client.list_security_configurations({
# marker: "Marker",
# })
@@ -1473,10 +1531,12 @@
# @return [Types::ListStepsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListStepsOutput#steps #steps} => Array<Types::StepSummary>
# * {Types::ListStepsOutput#marker #marker} => String
#
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
+ #
# @example Request syntax with placeholder values
#
# resp = client.list_steps({
# cluster_id: "ClusterId", # required
# step_states: ["PENDING"], # accepts PENDING, CANCEL_PENDING, RUNNING, COMPLETED, CANCELLED, FAILED, INTERRUPTED
@@ -1769,10 +1829,18 @@
# `BlockPublicAccessConfiguration`. By default, Port 22 (SSH) is an
# exception, and public access is allowed on this port. You can change
# this by updating `BlockPublicSecurityGroupRules` to remove the
# exception.
#
+ # <note markdown="1"> For accounts that created clusters in a Region before November 25,
+ # 2019, block public access is disabled by default in that Region. To
+ # use this feature, you must manually enable and configure it. For
+ # accounts that did not create an EMR cluster in a Region before this
+ # date, block public access is enabled by default in that Region.
+ #
+ # </note>
+ #
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.put_block_public_access_configuration({
@@ -1794,10 +1862,48 @@
def put_block_public_access_configuration(params = {}, options = {})
req = build_request(:put_block_public_access_configuration, params)
req.send_request(options)
end
+ # Creates or updates a managed scaling policy for an Amazon EMR cluster.
+ # The managed scaling policy defines the limits for resources, such as
+ # EC2 instances that can be added or terminated from a cluster. The
+ # policy only applies to the core and task nodes. The master node cannot
+ # be scaled after initial configuration.
+ #
+ # @option params [required, String] :cluster_id
+ # Specifies the ID of an EMR cluster where the managed scaling policy is
+ # attached.
+ #
+ # @option params [required, Types::ManagedScalingPolicy] :managed_scaling_policy
+ # Specifies the constraints for the managed scaling policy.
+ #
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
+ #
+ # @example Request syntax with placeholder values
+ #
+ # resp = client.put_managed_scaling_policy({
+ # cluster_id: "ClusterId", # required
+ # managed_scaling_policy: { # required
+ # compute_limits: {
+ # unit_type: "InstanceFleetUnits", # required, accepts InstanceFleetUnits, Instances, VCPU
+ # minimum_capacity_units: 1, # required
+ # maximum_capacity_units: 1, # required
+ # maximum_on_demand_capacity_units: 1,
+ # },
+ # },
+ # })
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/PutManagedScalingPolicy AWS API Documentation
+ #
+ # @overload put_managed_scaling_policy(params = {})
+ # @param [Hash] params ({})
+ def put_managed_scaling_policy(params = {}, options = {})
+ req = build_request(:put_managed_scaling_policy, params)
+ req.send_request(options)
+ end
+
# Removes an automatic scaling policy from a specified instance group
# within an EMR cluster.
#
# @option params [required, String] :cluster_id
# Specifies the ID of a cluster. The instance group to which the
@@ -1823,10 +1929,33 @@
def remove_auto_scaling_policy(params = {}, options = {})
req = build_request(:remove_auto_scaling_policy, params)
req.send_request(options)
end
+ # Removes a managed scaling policy from a specified EMR cluster.
+ #
+ # @option params [required, String] :cluster_id
+ # Specifies the ID of the cluster from which the managed scaling policy
+ # will be removed.
+ #
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
+ #
+ # @example Request syntax with placeholder values
+ #
+ # resp = client.remove_managed_scaling_policy({
+ # cluster_id: "ClusterId", # required
+ # })
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/RemoveManagedScalingPolicy AWS API Documentation
+ #
+ # @overload remove_managed_scaling_policy(params = {})
+ # @param [Hash] params ({})
+ def remove_managed_scaling_policy(params = {}, options = {})
+ req = build_request(:remove_managed_scaling_policy, params)
+ req.send_request(options)
+ end
+
# Removes tags from an Amazon EMR resource. Tags make it easier to
# associate clusters in various ways, such as grouping clusters to track
# your Amazon EMR resource allocation costs. For more information, see
# [Tag Clusters][1].
#
@@ -2097,10 +2226,13 @@
#
# @option params [Integer] :step_concurrency_level
# Specifies the number of steps that can be executed concurrently. The
# default value is `1`. The maximum value is `256`.
#
+ # @option params [Types::ManagedScalingPolicy] :managed_scaling_policy
+ # The specified managed scaling policy for an Amazon EMR cluster.
+ #
# @return [Types::RunJobFlowOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::RunJobFlowOutput#job_flow_id #job_flow_id} => String
# * {Types::RunJobFlowOutput#cluster_arn #cluster_arn} => String
#
@@ -2326,10 +2458,18 @@
# cross_realm_trust_principal_password: "XmlStringMaxLen256",
# ad_domain_join_user: "XmlStringMaxLen256",
# ad_domain_join_password: "XmlStringMaxLen256",
# },
# step_concurrency_level: 1,
+ # managed_scaling_policy: {
+ # compute_limits: {
+ # unit_type: "InstanceFleetUnits", # required, accepts InstanceFleetUnits, Instances, VCPU
+ # minimum_capacity_units: 1, # required
+ # maximum_capacity_units: 1, # required
+ # maximum_on_demand_capacity_units: 1,
+ # },
+ # },
# })
#
# @example Response structure
#
# resp.job_flow_id #=> String
@@ -2478,11 +2618,11 @@
operation: config.api.operation(operation_name),
client: self,
params: params,
config: config)
context[:gem_name] = 'aws-sdk-emr'
- context[:gem_version] = '1.25.0'
+ context[:gem_version] = '1.26.0'
Seahorse::Client::Request.new(handlers, context)
end
# Polls an API operation until a resource enters a desired state.
#
@@ -2544,14 +2684,14 @@
# ## Valid Waiters
#
# The following table lists the valid waiter names, the operations they call,
# and the default `:delay` and `:max_attempts` values.
#
- # | waiter_name | params | :delay | :max_attempts |
- # | ------------------ | ------------------- | -------- | ------------- |
- # | cluster_running | {#describe_cluster} | 30 | 60 |
- # | cluster_terminated | {#describe_cluster} | 30 | 60 |
- # | step_complete | {#describe_step} | 30 | 60 |
+ # | waiter_name | params | :delay | :max_attempts |
+ # | ------------------ | ------------------------- | -------- | ------------- |
+ # | cluster_running | {Client#describe_cluster} | 30 | 60 |
+ # | cluster_terminated | {Client#describe_cluster} | 30 | 60 |
+ # | step_complete | {Client#describe_step} | 30 | 60 |
#
# @raise [Errors::FailureStateError] Raised when the waiter terminates
# because the waiter has entered a state that it will not transition
# out of, preventing success.
#