lib/aws-sdk-autoscaling/client.rb in aws-sdk-autoscaling-1.81.0 vs lib/aws-sdk-autoscaling/client.rb in aws-sdk-autoscaling-1.82.0
- old
+ new
@@ -841,18 +841,16 @@
#
#
# [1]: https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-template.html
#
# @option params [Types::MixedInstancesPolicy] :mixed_instances_policy
- # An embedded object that specifies a mixed instances policy.
+ # The mixed instances policy. For more information, see [Auto Scaling
+ # groups with multiple instance types and purchase options][1] in the
+ # *Amazon EC2 Auto Scaling User Guide*.
#
- # For more information, see [Auto Scaling groups with multiple instance
- # types and purchase options][1] in the *Amazon EC2 Auto Scaling User
- # Guide*.
#
#
- #
# [1]: https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html
#
# @option params [String] :instance_id
# The ID of the instance used to base the launch configuration on. If
# specified, Amazon EC2 Auto Scaling uses the configuration values from
@@ -1254,10 +1252,15 @@
# accelerator_names: ["a100"], # accepts a100, v100, k80, t4, m60, radeon-pro-v520, vu9p
# accelerator_total_memory_mi_b: {
# min: 1,
# max: 1,
# },
+ # network_bandwidth_gbps: {
+ # min: 1.0,
+ # max: 1.0,
+ # },
+ # allowed_instance_types: ["AllowedInstanceType"],
# },
# },
# ],
# },
# instances_distribution: {
@@ -1328,14 +1331,25 @@
# Scaling][1] in the *Amazon EC2 Auto Scaling User Guide*.
#
# For more information, see [Launch configurations][2] in the *Amazon
# EC2 Auto Scaling User Guide*.
#
+ # <note markdown="1"> Amazon EC2 Auto Scaling configures instances launched as part of an
+ # Auto Scaling group using either a launch template or a launch
+ # configuration. We strongly recommend that you do not use launch
+ # configurations. They do not provide full functionality for Amazon EC2
+ # Auto Scaling or Amazon EC2. For information about using launch
+ # templates, see [Launch templates][3] in the *Amazon EC2 Auto Scaling
+ # User Guide*.
#
+ # </note>
#
+ #
+ #
# [1]: https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-quotas.html
# [2]: https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchConfiguration.html
+ # [3]: https://docs.aws.amazon.com/autoscaling/ec2/userguide/launch-templates.html
#
# @option params [required, String] :launch_configuration_name
# The name of the launch configuration. This name must be unique per
# Region per account.
#
@@ -1368,31 +1382,15 @@
#
#
# [1]: https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html
#
# @option params [String] :classic_link_vpc_id
- # *EC2-Classic retires on August 15, 2022. This property is not
- # supported after that date.*
+ # Available for backward compatibility.
#
- # The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances
- # to. For more information, see [ClassicLink][1] in the *Amazon EC2 User
- # Guide for Linux Instances*.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html
- #
# @option params [Array<String>] :classic_link_vpc_security_groups
- # *EC2-Classic retires on August 15, 2022. This property is not
- # supported after that date.*
+ # Available for backward compatibility.
#
- # The IDs of one or more security groups for the specified
- # ClassicLink-enabled VPC.
- #
- # If you specify the `ClassicLinkVPCId` property, you must specify
- # `ClassicLinkVPCSecurityGroups`.
- #
# @option params [String] :user_data
# The user data to make available to the launched EC2 instances. For
# more information, see [Instance metadata and user data][1] (Linux) and
# [Instance metadata and user data][2] (Windows). If you are using a
# command line tool, base64-encoding is performed for you, and you can
@@ -1718,24 +1716,34 @@
# Deletes the specified Auto Scaling group.
#
# If the group has instances or scaling activities in progress, you must
# specify the option to force the deletion in order for it to succeed.
+ # The force delete operation will also terminate the EC2 instances. If
+ # the group has a warm pool, the force delete option also deletes the
+ # warm pool.
#
- # If the group has policies, deleting the group deletes the policies,
- # the underlying alarm actions, and any alarm that no longer has an
- # associated action.
- #
# To remove instances from the Auto Scaling group before deleting it,
# call the DetachInstances API with the list of instances and the option
# to decrement the desired capacity. This ensures that Amazon EC2 Auto
# Scaling does not launch replacement instances.
#
# To terminate all instances before deleting the Auto Scaling group,
# call the UpdateAutoScalingGroup API and set the minimum size and
# desired capacity of the Auto Scaling group to zero.
#
+ # If the group has scaling policies, deleting the group deletes the
+ # policies, the underlying alarm actions, and any alarm that no longer
+ # has an associated action.
+ #
+ # For more information, see [Delete your Auto Scaling infrastructure][1]
+ # in the *Amazon EC2 Auto Scaling User Guide*.
+ #
+ #
+ #
+ # [1]: https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-process-shutdown.html
+ #
# @option params [required, String] :auto_scaling_group_name
# The name of the Auto Scaling group.
#
# @option params [Boolean] :force_delete
# Specifies that the group is to be deleted along with all instances
@@ -2319,10 +2327,14 @@
# resp.auto_scaling_groups[0].mixed_instances_policy.launch_template.overrides[0].instance_requirements.accelerator_manufacturers[0] #=> String, one of "nvidia", "amd", "amazon-web-services", "xilinx"
# resp.auto_scaling_groups[0].mixed_instances_policy.launch_template.overrides[0].instance_requirements.accelerator_names #=> Array
# resp.auto_scaling_groups[0].mixed_instances_policy.launch_template.overrides[0].instance_requirements.accelerator_names[0] #=> String, one of "a100", "v100", "k80", "t4", "m60", "radeon-pro-v520", "vu9p"
# resp.auto_scaling_groups[0].mixed_instances_policy.launch_template.overrides[0].instance_requirements.accelerator_total_memory_mi_b.min #=> Integer
# resp.auto_scaling_groups[0].mixed_instances_policy.launch_template.overrides[0].instance_requirements.accelerator_total_memory_mi_b.max #=> Integer
+ # resp.auto_scaling_groups[0].mixed_instances_policy.launch_template.overrides[0].instance_requirements.network_bandwidth_gbps.min #=> Float
+ # resp.auto_scaling_groups[0].mixed_instances_policy.launch_template.overrides[0].instance_requirements.network_bandwidth_gbps.max #=> Float
+ # resp.auto_scaling_groups[0].mixed_instances_policy.launch_template.overrides[0].instance_requirements.allowed_instance_types #=> Array
+ # resp.auto_scaling_groups[0].mixed_instances_policy.launch_template.overrides[0].instance_requirements.allowed_instance_types[0] #=> String
# resp.auto_scaling_groups[0].mixed_instances_policy.instances_distribution.on_demand_allocation_strategy #=> String
# resp.auto_scaling_groups[0].mixed_instances_policy.instances_distribution.on_demand_base_capacity #=> Integer
# resp.auto_scaling_groups[0].mixed_instances_policy.instances_distribution.on_demand_percentage_above_base_capacity #=> Integer
# resp.auto_scaling_groups[0].mixed_instances_policy.instances_distribution.spot_allocation_strategy #=> String
# resp.auto_scaling_groups[0].mixed_instances_policy.instances_distribution.spot_instance_pools #=> Integer
@@ -2688,10 +2700,14 @@
# resp.instance_refreshes[0].desired_configuration.mixed_instances_policy.launch_template.overrides[0].instance_requirements.accelerator_manufacturers[0] #=> String, one of "nvidia", "amd", "amazon-web-services", "xilinx"
# resp.instance_refreshes[0].desired_configuration.mixed_instances_policy.launch_template.overrides[0].instance_requirements.accelerator_names #=> Array
# resp.instance_refreshes[0].desired_configuration.mixed_instances_policy.launch_template.overrides[0].instance_requirements.accelerator_names[0] #=> String, one of "a100", "v100", "k80", "t4", "m60", "radeon-pro-v520", "vu9p"
# resp.instance_refreshes[0].desired_configuration.mixed_instances_policy.launch_template.overrides[0].instance_requirements.accelerator_total_memory_mi_b.min #=> Integer
# resp.instance_refreshes[0].desired_configuration.mixed_instances_policy.launch_template.overrides[0].instance_requirements.accelerator_total_memory_mi_b.max #=> Integer
+ # resp.instance_refreshes[0].desired_configuration.mixed_instances_policy.launch_template.overrides[0].instance_requirements.network_bandwidth_gbps.min #=> Float
+ # resp.instance_refreshes[0].desired_configuration.mixed_instances_policy.launch_template.overrides[0].instance_requirements.network_bandwidth_gbps.max #=> Float
+ # resp.instance_refreshes[0].desired_configuration.mixed_instances_policy.launch_template.overrides[0].instance_requirements.allowed_instance_types #=> Array
+ # resp.instance_refreshes[0].desired_configuration.mixed_instances_policy.launch_template.overrides[0].instance_requirements.allowed_instance_types[0] #=> String
# resp.instance_refreshes[0].desired_configuration.mixed_instances_policy.instances_distribution.on_demand_allocation_strategy #=> String
# resp.instance_refreshes[0].desired_configuration.mixed_instances_policy.instances_distribution.on_demand_base_capacity #=> Integer
# resp.instance_refreshes[0].desired_configuration.mixed_instances_policy.instances_distribution.on_demand_percentage_above_base_capacity #=> Integer
# resp.instance_refreshes[0].desired_configuration.mixed_instances_policy.instances_distribution.spot_allocation_strategy #=> String
# resp.instance_refreshes[0].desired_configuration.mixed_instances_policy.instances_distribution.spot_instance_pools #=> Integer
@@ -5978,10 +5994,15 @@
# accelerator_names: ["a100"], # accepts a100, v100, k80, t4, m60, radeon-pro-v520, vu9p
# accelerator_total_memory_mi_b: {
# min: 1,
# max: 1,
# },
+ # network_bandwidth_gbps: {
+ # min: 1.0,
+ # max: 1.0,
+ # },
+ # allowed_instance_types: ["AllowedInstanceType"],
# },
# },
# ],
# },
# instances_distribution: {
@@ -6226,13 +6247,13 @@
# The launch template and version to use to specify the updates. If you
# specify `LaunchTemplate` in your update request, you can't specify
# `LaunchConfigurationName` or `MixedInstancesPolicy`.
#
# @option params [Types::MixedInstancesPolicy] :mixed_instances_policy
- # An embedded object that specifies a mixed instances policy. For more
- # information, see [Auto Scaling groups with multiple instance types and
- # purchase options][1] in the *Amazon EC2 Auto Scaling User Guide*.
+ # The mixed instances policy. For more information, see [Auto Scaling
+ # groups with multiple instance types and purchase options][1] in the
+ # *Amazon EC2 Auto Scaling User Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html
#
@@ -6500,10 +6521,15 @@
# accelerator_names: ["a100"], # accepts a100, v100, k80, t4, m60, radeon-pro-v520, vu9p
# accelerator_total_memory_mi_b: {
# min: 1,
# max: 1,
# },
+ # network_bandwidth_gbps: {
+ # min: 1.0,
+ # max: 1.0,
+ # },
+ # allowed_instance_types: ["AllowedInstanceType"],
# },
# },
# ],
# },
# instances_distribution: {
@@ -6554,10 +6580,10 @@
operation: config.api.operation(operation_name),
client: self,
params: params,
config: config)
context[:gem_name] = 'aws-sdk-autoscaling'
- context[:gem_version] = '1.81.0'
+ context[:gem_version] = '1.82.0'
Seahorse::Client::Request.new(handlers, context)
end
# Polls an API operation until a resource enters a desired state.
#