lib/aws-sdk-ecs/client.rb in aws-sdk-ecs-1.167.0 vs lib/aws-sdk-ecs/client.rb in aws-sdk-ecs-1.168.0

- old
+ new

@@ -560,24 +560,25 @@ # Creates a new Amazon ECS cluster. By default, your account receives a # `default` cluster when you launch your first container instance. # However, you can create your own cluster with a unique name. # - # <note markdown="1"> When you call the CreateCluster API operation, Amazon ECS attempts to - # create the Amazon ECS service-linked role for your account. This is so - # that it can manage required resources in other Amazon Web Services - # services on your behalf. However, if the user that makes the call - # doesn't have permissions to create the service-linked role, it isn't - # created. For more information, see [Using service-linked roles for - # Amazon ECS][1] in the *Amazon Elastic Container Service Developer - # Guide*. + # <note markdown="1"> When you call the [CreateCluster][1] API operation, Amazon ECS + # attempts to create the Amazon ECS service-linked role for your + # account. This is so that it can manage required resources in other + # Amazon Web Services services on your behalf. However, if the user that + # makes the call doesn't have permissions to create the service-linked + # role, it isn't created. For more information, see [Using + # service-linked roles for Amazon ECS][2] in the *Amazon Elastic + # Container Service Developer Guide*. # # </note> # # # - # [1]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html + # [1]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateCluster.html + # [2]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html # # @option params [String] :cluster_name # The name of your cluster. If you don't specify a name for your # cluster, you create a cluster that's named `default`. Up to 255 # letters (uppercase and lowercase), numbers, underscores, and hyphens @@ -819,12 +820,12 @@ end # Runs and maintains your desired number of tasks from a specified task # definition. If the number of tasks running in a service drops below # the `desiredCount`, Amazon ECS runs another copy of the task in the - # specified cluster. To update an existing service, see the - # UpdateService action. + # specified cluster. To update an existing service, use + # [UpdateService][1]. # # <note markdown="1"> On March 21, 2024, a change was made to resolve the task definition # revision before authorization. When a task definition revision is not # specified, authorization will occur using the latest revision of a # task definition. @@ -837,17 +838,17 @@ # # In addition to maintaining the desired count of tasks in your service, # you can optionally run your service behind one or more load balancers. # The load balancers distribute traffic across the tasks that are # associated with the service. For more information, see [Service load - # balancing][1] in the *Amazon Elastic Container Service Developer + # balancing][2] in the *Amazon Elastic Container Service Developer # Guide*. # # You can attach Amazon EBS volumes to Amazon ECS tasks by configuring # the volume when creating or updating a service. `volumeConfigurations` # is only supported for REPLICA service and not DAEMON service. For more - # infomation, see [Amazon EBS volumes][2] in the *Amazon Elastic + # infomation, see [Amazon EBS volumes][3] in the *Amazon Elastic # Container Service Developer Guide*. # # Tasks for services that don't use a load balancer are considered # healthy if they're in the `RUNNING` state. Tasks for services that # use a load balancer are considered healthy if they're in the @@ -858,28 +859,28 @@ # * `REPLICA` - The replica scheduling strategy places and maintains # your desired number of tasks across your cluster. By default, the # service scheduler spreads tasks across Availability Zones. You can # use task placement strategies and constraints to customize task # placement decisions. For more information, see [Service scheduler - # concepts][3] in the *Amazon Elastic Container Service Developer + # concepts][4] in the *Amazon Elastic Container Service Developer # Guide*. # # * `DAEMON` - The daemon scheduling strategy deploys exactly one task # on each active container instance that meets all of the task # placement constraints that you specify in your cluster. The service # scheduler also evaluates the task placement constraints for running # tasks. It also stops tasks that don't meet the placement # constraints. When using this strategy, you don't need to specify a # desired number of tasks, a task placement strategy, or use Service # Auto Scaling policies. For more information, see [Service scheduler - # concepts][3] in the *Amazon Elastic Container Service Developer + # concepts][4] in the *Amazon Elastic Container Service Developer # Guide*. # # You can optionally specify a deployment configuration for your # service. The deployment is initiated by changing properties. For # example, the deployment might be initiated by the task definition or - # by your desired count of a service. You can use [UpdateService][4]. + # by your desired count of a service. You can use [UpdateService][1]. # The default value for a replica service for `minimumHealthyPercent` is # 100%. The default value for a daemon service for # `minimumHealthyPercent` is 0%. # # If a service uses the `ECS` deployment controller, the minimum healthy @@ -935,14 +936,14 @@ # strategies, see [Amazon ECS task placement][7] in the *Amazon Elastic # Container Service Developer Guide* # # # - # [1]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html - # [2]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ebs-volumes.html#ebs-volume-types - # [3]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html - # [4]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_UpdateService.html + # [1]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_UpdateService.html + # [2]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html + # [3]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ebs-volumes.html#ebs-volume-types + # [4]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html # [5]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateTaskSet.html # [6]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html # [7]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement.html # # @option params [String] :cluster @@ -1136,11 +1137,11 @@ # [1]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names # # @option params [Types::DeploymentConfiguration] :deployment_configuration # Optional deployment parameters that control how many tasks run during - # the deployment and the failure detection methods. + # the deployment and the ordering of stopping and starting tasks. # # @option params [Array<Types::PlacementConstraint>] :placement_constraints # An array of placement constraint objects to use for tasks in your # service. You can specify a maximum of 10 constraints for each task. # This limit includes constraints in the task definition and those @@ -1161,31 +1162,23 @@ # # [1]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html # # @option params [Integer] :health_check_grace_period_seconds # The period of time, in seconds, that the Amazon ECS service scheduler - # ignores unhealthy Elastic Load Balancing target health checks after a - # task has first started. This is only used when your service is - # configured to use a load balancer. If your service has a load balancer - # defined and you don't specify a health check grace period value, the - # default value of `0` is used. + # ignores unhealthy Elastic Load Balancing, VPC Lattice, and container + # health checks after a task has first started. If you don't specify a + # health check grace period value, the default value of `0` is used. If + # you don't use any of the health checks, then + # `healthCheckGracePeriodSeconds` is unused. # - # If you do not use an Elastic Load Balancing, we recommend that you use - # the `startPeriod` in the task definition health check parameters. For - # more information, see [Health check][1]. + # If your service's tasks take a while to start and respond to health + # checks, you can specify a health check grace period of up to + # 2,147,483,647 seconds (about 69 years). During that time, the Amazon + # ECS service scheduler ignores health check status. This grace period + # can prevent the service scheduler from marking tasks as unhealthy and + # stopping them before they have time to come up. # - # If your service's tasks take a while to start and respond to Elastic - # Load Balancing health checks, you can specify a health check grace - # period of up to 2,147,483,647 seconds (about 69 years). During that - # time, the Amazon ECS service scheduler ignores health check status. - # This grace period can prevent the service scheduler from marking tasks - # as unhealthy and stopping them before they have time to come up. - # - # - # - # [1]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_HealthCheck.html - # # @option params [String] :scheduling_strategy # The scheduling strategy to use for the service. For more information, # see [Services][1]. # # There are two service scheduler strategies available: @@ -1306,10 +1299,13 @@ # @option params [Array<Types::ServiceVolumeConfiguration>] :volume_configurations # The configuration for a volume specified in the task definition as a # volume that is configured at launch time. Currently, the only # supported volume type is an Amazon EBS volume. # + # @option params [Array<Types::VpcLatticeConfiguration>] :vpc_lattice_configurations + # The VPC Lattice configuration for the service being created. + # # @return [Types::CreateServiceResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::CreateServiceResponse#service #service} => Types::Service # # @@ -1576,10 +1572,17 @@ # role_arn: "IAMRoleArn", # required # filesystem_type: "ext3", # accepts ext3, ext4, xfs, ntfs # }, # }, # ], + # vpc_lattice_configurations: [ + # { + # role_arn: "IAMRoleArn", # required + # target_group_arn: "String", # required + # port_name: "String", # required + # }, + # ], # }) # # @example Response structure # # resp.service.service_arn #=> String @@ -1722,10 +1725,14 @@ # resp.service.deployments[0].volume_configurations[0].managed_ebs_volume.tag_specifications[0].tags[0].value #=> String # resp.service.deployments[0].volume_configurations[0].managed_ebs_volume.tag_specifications[0].propagate_tags #=> String, one of "TASK_DEFINITION", "SERVICE", "NONE" # resp.service.deployments[0].volume_configurations[0].managed_ebs_volume.role_arn #=> String # resp.service.deployments[0].volume_configurations[0].managed_ebs_volume.filesystem_type #=> String, one of "ext3", "ext4", "xfs", "ntfs" # resp.service.deployments[0].fargate_ephemeral_storage.kms_key_id #=> String + # resp.service.deployments[0].vpc_lattice_configurations #=> Array + # resp.service.deployments[0].vpc_lattice_configurations[0].role_arn #=> String + # resp.service.deployments[0].vpc_lattice_configurations[0].target_group_arn #=> String + # resp.service.deployments[0].vpc_lattice_configurations[0].port_name #=> String # resp.service.role_arn #=> String # resp.service.events #=> Array # resp.service.events[0].id #=> String # resp.service.events[0].created_at #=> Time # resp.service.events[0].message #=> String @@ -2533,10 +2540,14 @@ # resp.service.deployments[0].volume_configurations[0].managed_ebs_volume.tag_specifications[0].tags[0].value #=> String # resp.service.deployments[0].volume_configurations[0].managed_ebs_volume.tag_specifications[0].propagate_tags #=> String, one of "TASK_DEFINITION", "SERVICE", "NONE" # resp.service.deployments[0].volume_configurations[0].managed_ebs_volume.role_arn #=> String # resp.service.deployments[0].volume_configurations[0].managed_ebs_volume.filesystem_type #=> String, one of "ext3", "ext4", "xfs", "ntfs" # resp.service.deployments[0].fargate_ephemeral_storage.kms_key_id #=> String + # resp.service.deployments[0].vpc_lattice_configurations #=> Array + # resp.service.deployments[0].vpc_lattice_configurations[0].role_arn #=> String + # resp.service.deployments[0].vpc_lattice_configurations[0].target_group_arn #=> String + # resp.service.deployments[0].vpc_lattice_configurations[0].port_name #=> String # resp.service.role_arn #=> String # resp.service.events #=> Array # resp.service.events[0].id #=> String # resp.service.events[0].created_at #=> Time # resp.service.events[0].message #=> String @@ -3850,10 +3861,14 @@ # resp.service_revisions[0].volume_configurations[0].managed_ebs_volume.tag_specifications[0].propagate_tags #=> String, one of "TASK_DEFINITION", "SERVICE", "NONE" # resp.service_revisions[0].volume_configurations[0].managed_ebs_volume.role_arn #=> String # resp.service_revisions[0].volume_configurations[0].managed_ebs_volume.filesystem_type #=> String, one of "ext3", "ext4", "xfs", "ntfs" # resp.service_revisions[0].fargate_ephemeral_storage.kms_key_id #=> String # resp.service_revisions[0].created_at #=> Time + # resp.service_revisions[0].vpc_lattice_configurations #=> Array + # resp.service_revisions[0].vpc_lattice_configurations[0].role_arn #=> String + # resp.service_revisions[0].vpc_lattice_configurations[0].target_group_arn #=> String + # resp.service_revisions[0].vpc_lattice_configurations[0].port_name #=> String # resp.failures #=> Array # resp.failures[0].arn #=> String # resp.failures[0].reason #=> String # resp.failures[0].detail #=> String # @@ -4095,10 +4110,14 @@ # resp.services[0].deployments[0].volume_configurations[0].managed_ebs_volume.tag_specifications[0].tags[0].value #=> String # resp.services[0].deployments[0].volume_configurations[0].managed_ebs_volume.tag_specifications[0].propagate_tags #=> String, one of "TASK_DEFINITION", "SERVICE", "NONE" # resp.services[0].deployments[0].volume_configurations[0].managed_ebs_volume.role_arn #=> String # resp.services[0].deployments[0].volume_configurations[0].managed_ebs_volume.filesystem_type #=> String, one of "ext3", "ext4", "xfs", "ntfs" # resp.services[0].deployments[0].fargate_ephemeral_storage.kms_key_id #=> String + # resp.services[0].deployments[0].vpc_lattice_configurations #=> Array + # resp.services[0].deployments[0].vpc_lattice_configurations[0].role_arn #=> String + # resp.services[0].deployments[0].vpc_lattice_configurations[0].target_group_arn #=> String + # resp.services[0].deployments[0].vpc_lattice_configurations[0].port_name #=> String # resp.services[0].role_arn #=> String # resp.services[0].events #=> Array # resp.services[0].events[0].id #=> String # resp.services[0].events[0].created_at #=> Time # resp.services[0].events[0].message #=> String @@ -6846,27 +6865,24 @@ # When using the `host` network mode, you should not run containers # using the root user (UID 0). It is considered best practice to use a # non-root user. # # If the network mode is `awsvpc`, the task is allocated an elastic - # network interface, and you must specify a NetworkConfiguration value - # when you create a service or run a task with the task definition. For - # more information, see [Task Networking][1] in the *Amazon Elastic - # Container Service Developer Guide*. + # network interface, and you must specify a [NetworkConfiguration][1] + # value when you create a service or run a task with the task + # definition. For more information, see [Task Networking][2] in the + # *Amazon Elastic Container Service Developer Guide*. # # If the network mode is `host`, you cannot run multiple instantiations # of the same task on a single container instance when port mappings are # used. # - # For more information, see [Network settings][2] in the *Docker run - # reference*. # # + # [1]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_NetworkConfiguration.html + # [2]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html # - # [1]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html - # [2]: https://docs.docker.com/engine/reference/run/#network-settings - # # @option params [required, Array<Types::ContainerDefinition>] :container_definitions # A list of container definitions in JSON format that describe the # different containers that make up your task. # # @option params [Array<Types::Volume>] :volumes @@ -7024,16 +7040,14 @@ # # If `task` is specified, all containers within the specified task share # the same process namespace. # # If no value is specified, the default is a private namespace for each - # container. For more information, see [PID settings][1] in the *Docker - # run reference*. + # container. # # If the `host` PID mode is used, there's a heightened risk of - # undesired process namespace exposure. For more information, see - # [Docker security][2]. + # undesired process namespace exposure. # # <note markdown="1"> This parameter is not supported for Windows containers. # # </note> # @@ -7041,15 +7055,10 @@ # if the tasks are using platform version `1.4.0` or later (Linux). This # isn't supported for Windows containers on Fargate. # # </note> # - # - # - # [1]: https://docs.docker.com/engine/reference/run/#pid-settings---pid - # [2]: https://docs.docker.com/engine/security/security/ - # # @option params [String] :ipc_mode # The IPC resource namespace to use for the containers in the task. The # valid values are `host`, `task`, or `none`. If `host` is specified, # then all containers within the tasks that specified the `host` IPC # mode on the same container instance share the same IPC resources with @@ -7057,20 +7066,18 @@ # within the specified task share the same IPC resources. If `none` is # specified, then IPC resources within the containers of a task are # private and not shared with other containers in a task or on the # container instance. If no value is specified, then the IPC resource # namespace sharing depends on the Docker daemon setting on the - # container instance. For more information, see [IPC settings][1] in the - # *Docker run reference*. + # container instance. # # If the `host` IPC mode is used, be aware that there is a heightened - # risk of undesired IPC namespace expose. For more information, see - # [Docker security][2]. + # risk of undesired IPC namespace expose. # # If you are setting namespaced kernel parameters using `systemControls` # for the containers in the task, the following will apply to your IPC - # resource namespace. For more information, see [System Controls][3] in + # resource namespace. For more information, see [System Controls][1] in # the *Amazon Elastic Container Service Developer Guide*. # # * For tasks that use the `host` IPC mode, IPC namespace related # `systemControls` are not supported. # @@ -7082,13 +7089,11 @@ # # </note> # # # - # [1]: https://docs.docker.com/engine/reference/run/#ipc-settings---ipc - # [2]: https://docs.docker.com/engine/security/security/ - # [3]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html + # [1]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html # # @option params [Types::ProxyConfiguration] :proxy_configuration # The configuration details for the App Mesh proxy. # # For tasks hosted on Amazon EC2 instances, the container instances @@ -9849,11 +9854,11 @@ # [1]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutClusterCapacityProviders.html # [2]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateClusterCapacityProvider.html # # @option params [Types::DeploymentConfiguration] :deployment_configuration # Optional deployment parameters that control how many tasks run during - # the deployment and the failure detection methods. + # the deployment and the ordering of stopping and starting tasks. # # @option params [Types::NetworkConfiguration] :network_configuration # An object representing the network configuration for the service. # # @option params [Array<Types::PlacementConstraint>] :placement_constraints @@ -9895,18 +9900,21 @@ # same image/tag combination (`my_image:latest`) or to roll Fargate # tasks onto a newer platform version. # # @option params [Integer] :health_check_grace_period_seconds # The period of time, in seconds, that the Amazon ECS service scheduler - # ignores unhealthy Elastic Load Balancing target health checks after a - # task has first started. This is only valid if your service is - # configured to use a load balancer. If your service's tasks take a - # while to start and respond to Elastic Load Balancing health checks, - # you can specify a health check grace period of up to 2,147,483,647 - # seconds. During that time, the Amazon ECS service scheduler ignores - # the Elastic Load Balancing health check status. This grace period can - # prevent the ECS service scheduler from marking tasks as unhealthy and + # ignores unhealthy Elastic Load Balancing, VPC Lattice, and container + # health checks after a task has first started. If you don't specify a + # health check grace period value, the default value of `0` is used. If + # you don't use any of the health checks, then + # `healthCheckGracePeriodSeconds` is unused. + # + # If your service's tasks take a while to start and respond to health + # checks, you can specify a health check grace period of up to + # 2,147,483,647 seconds (about 69 years). During that time, the Amazon + # ECS service scheduler ignores health check status. This grace period + # can prevent the service scheduler from marking tasks as unhealthy and # stopping them before they have time to come up. # # @option params [Boolean] :enable_execute_command # If `true`, this enables execute command functionality on all task # containers. @@ -10016,10 +10024,14 @@ # # # # [1]: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ServiceManagedEBSVolumeConfiguration.html # + # @option params [Array<Types::VpcLatticeConfiguration>] :vpc_lattice_configurations + # An object representing the VPC Lattice configuration for the service + # being updated. + # # @return [Types::UpdateServiceResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::UpdateServiceResponse#service #service} => Types::Service # # @@ -10183,10 +10195,17 @@ # role_arn: "IAMRoleArn", # required # filesystem_type: "ext3", # accepts ext3, ext4, xfs, ntfs # }, # }, # ], + # vpc_lattice_configurations: [ + # { + # role_arn: "IAMRoleArn", # required + # target_group_arn: "String", # required + # port_name: "String", # required + # }, + # ], # }) # # @example Response structure # # resp.service.service_arn #=> String @@ -10329,10 +10348,14 @@ # resp.service.deployments[0].volume_configurations[0].managed_ebs_volume.tag_specifications[0].tags[0].value #=> String # resp.service.deployments[0].volume_configurations[0].managed_ebs_volume.tag_specifications[0].propagate_tags #=> String, one of "TASK_DEFINITION", "SERVICE", "NONE" # resp.service.deployments[0].volume_configurations[0].managed_ebs_volume.role_arn #=> String # resp.service.deployments[0].volume_configurations[0].managed_ebs_volume.filesystem_type #=> String, one of "ext3", "ext4", "xfs", "ntfs" # resp.service.deployments[0].fargate_ephemeral_storage.kms_key_id #=> String + # resp.service.deployments[0].vpc_lattice_configurations #=> Array + # resp.service.deployments[0].vpc_lattice_configurations[0].role_arn #=> String + # resp.service.deployments[0].vpc_lattice_configurations[0].target_group_arn #=> String + # resp.service.deployments[0].vpc_lattice_configurations[0].port_name #=> String # resp.service.role_arn #=> String # resp.service.events #=> Array # resp.service.events[0].id #=> String # resp.service.events[0].created_at #=> Time # resp.service.events[0].message #=> String @@ -10742,10 +10765,10 @@ params: params, config: config, tracer: tracer ) context[:gem_name] = 'aws-sdk-ecs' - context[:gem_version] = '1.167.0' + context[:gem_version] = '1.168.0' Seahorse::Client::Request.new(handlers, context) end # Polls an API operation until a resource enters a desired state. #