# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE require 'seahorse/client/plugins/content_length.rb' require 'aws-sdk-core/plugins/credentials_configuration.rb' require 'aws-sdk-core/plugins/logging.rb' require 'aws-sdk-core/plugins/param_converter.rb' require 'aws-sdk-core/plugins/param_validator.rb' require 'aws-sdk-core/plugins/user_agent.rb' require 'aws-sdk-core/plugins/helpful_socket_errors.rb' require 'aws-sdk-core/plugins/retry_errors.rb' require 'aws-sdk-core/plugins/global_configuration.rb' require 'aws-sdk-core/plugins/regional_endpoint.rb' require 'aws-sdk-core/plugins/response_paging.rb' require 'aws-sdk-core/plugins/stub_responses.rb' require 'aws-sdk-core/plugins/idempotency_token.rb' require 'aws-sdk-core/plugins/signature_v4.rb' require 'aws-sdk-core/plugins/protocols/json_rpc.rb' Aws::Plugins::GlobalConfiguration.add_identifier(:ecs) module Aws::ECS class Client < Seahorse::Client::Base include Aws::ClientStubs @identifier = :ecs set_api(ClientApi::API) add_plugin(Seahorse::Client::Plugins::ContentLength) add_plugin(Aws::Plugins::CredentialsConfiguration) add_plugin(Aws::Plugins::Logging) add_plugin(Aws::Plugins::ParamConverter) add_plugin(Aws::Plugins::ParamValidator) add_plugin(Aws::Plugins::UserAgent) add_plugin(Aws::Plugins::HelpfulSocketErrors) add_plugin(Aws::Plugins::RetryErrors) add_plugin(Aws::Plugins::GlobalConfiguration) add_plugin(Aws::Plugins::RegionalEndpoint) add_plugin(Aws::Plugins::ResponsePaging) add_plugin(Aws::Plugins::StubResponses) add_plugin(Aws::Plugins::IdempotencyToken) add_plugin(Aws::Plugins::SignatureV4) add_plugin(Aws::Plugins::Protocols::JsonRpc) # @option options [required, Aws::CredentialProvider] :credentials # Your AWS credentials. This can be an instance of any one of the # following classes: # # * `Aws::Credentials` - Used for configuring static, non-refreshing # credentials. # # * `Aws::InstanceProfileCredentials` - Used for loading credentials # from an EC2 IMDS on an EC2 instance. # # * `Aws::SharedCredentials` - Used for loading credentials from a # shared file, such as `~/.aws/config`. # # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role. # # When `:credentials` are not configured directly, the following # locations will be searched for credentials: # # * `Aws.config[:credentials]` # * The `:access_key_id`, `:secret_access_key`, and `:session_token` options. # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY'] # * `~/.aws/credentials` # * `~/.aws/config` # * EC2 IMDS instance profile - When used by default, the timeouts are # very aggressive. Construct and pass an instance of # `Aws::InstanceProfileCredentails` to enable retries and extended # timeouts. # # @option options [required, String] :region # The AWS region to connect to. The configured `:region` is # used to determine the service `:endpoint`. When not passed, # a default `:region` is search for in the following locations: # # * `Aws.config[:region]` # * `ENV['AWS_REGION']` # * `ENV['AMAZON_REGION']` # * `ENV['AWS_DEFAULT_REGION']` # * `~/.aws/credentials` # * `~/.aws/config` # # @option options [String] :access_key_id # # @option options [Boolean] :convert_params (true) # When `true`, an attempt is made to coerce request parameters into # the required types. # # @option options [String] :endpoint # The client endpoint is normally constructed from the `:region` # option. You should only configure an `:endpoint` when connecting # to test endpoints. This should be avalid HTTP(S) URI. # # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default) # The log formatter. # # @option options [Symbol] :log_level (:info) # The log level to send messages to the `:logger` at. # # @option options [Logger] :logger # The Logger instance to send log messages to. If this option # is not set, logging will be disabled. # # @option options [String] :profile ("default") # Used when loading credentials from the shared credentials file # at HOME/.aws/credentials. When not specified, 'default' is used. # # @option options [Integer] :retry_limit (3) # The maximum number of times to retry failed requests. Only # ~ 500 level server errors and certain ~ 400 level client errors # are retried. Generally, these are throttling errors, data # checksum errors, networking errors, timeout errors and auth # errors from expired credentials. # # @option options [String] :secret_access_key # # @option options [String] :session_token # # @option options [Boolean] :simple_json (false) # Disables request parameter conversion, validation, and formatting. # Also disable response data type conversions. This option is useful # when you want to ensure the highest level of performance by # avoiding overhead of walking request parameters and response data # structures. # # When `:simple_json` is enabled, the request parameters hash must # be formatted exactly as the DynamoDB API expects. # # @option options [Boolean] :stub_responses (false) # Causes the client to return stubbed responses. By default # fake responses are generated and returned. You can specify # the response data to return or errors to raise by calling # {ClientStubs#stub_responses}. See {ClientStubs} for more information. # # ** Please note ** When response stubbing is enabled, no HTTP # requests are made, and retries are disabled. # # @option options [Boolean] :validate_params (true) # When `true`, request parameters are validated before # sending the request. # def initialize(*args) super end # @!group API Operations # Creates a new Amazon ECS cluster. By default, your account receives a # `default` cluster when you launch your first container instance. # However, you can create your own cluster with a unique name with the # `CreateCluster` action. # # @option params [String] :cluster_name # The name of your cluster. If you do not specify a name for your # cluster, you create a cluster named `default`. Up to 255 letters # (uppercase and lowercase), numbers, hyphens, and underscores are # allowed. # # @return [Types::CreateClusterResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::CreateClusterResponse#cluster #cluster} => Types::Cluster # # @example Request syntax with placeholder values # # resp = client.create_cluster({ # cluster_name: "String", # }) # # @example Response structure # # resp.cluster.cluster_arn #=> String # resp.cluster.cluster_name #=> String # resp.cluster.status #=> String # resp.cluster.registered_container_instances_count #=> Integer # resp.cluster.running_tasks_count #=> Integer # resp.cluster.pending_tasks_count #=> Integer # resp.cluster.active_services_count #=> Integer # # @overload create_cluster(params = {}) # @param [Hash] params ({}) def create_cluster(params = {}, options = {}) req = build_request(:create_cluster, params) req.send_request(options) end # Runs and maintains a desired number of tasks from a specified task # definition. If the number of tasks running in a service drops below # `desiredCount`, Amazon ECS spawns another copy of the task in the # specified cluster. To update an existing service, see UpdateService. # # In addition to maintaining the desired count of tasks in your service, # you can optionally run your service behind a load balancer. The load # balancer distributes traffic across the tasks that are associated with # the service. For more information, see [Service Load Balancing][1] in # the *Amazon EC2 Container Service Developer Guide*. # # You can optionally specify a deployment configuration for your # service. During a deployment (which is triggered by changing the task # definition or the desired count of a service with an UpdateService # operation), the service scheduler uses the `minimumHealthyPercent` and # `maximumPercent` parameters to determine the deployment strategy. # # The `minimumHealthyPercent` represents a lower limit on the number of # your service's tasks that must remain in the `RUNNING` state during a # deployment, as a percentage of the `desiredCount` (rounded up to the # nearest integer). This parameter enables you to deploy without using # additional cluster capacity. For example, if `desiredCount` is four # tasks and the minimum is 50%, the scheduler can stop two existing # tasks to free up cluster capacity before starting two new tasks. Tasks # for services that do not use a load balancer are considered healthy if # they are in the `RUNNING` state. Tasks for services that use a load # balancer are considered healthy if they are in the `RUNNING` state and # the container instance they are hosted on is reported as healthy by # the load balancer. The default value is 50% in the console and 100% # for the AWS CLI, the AWS SDKs, and the APIs. # # The `maximumPercent` parameter represents an upper limit on the number # of your service's tasks that are allowed in the `RUNNING` or # `PENDING` state during a deployment, as a percentage of the # `desiredCount` (rounded down to the nearest integer). This parameter # enables you to define the deployment batch size. For example, if # `desiredCount` is four tasks and the maximum is 200%, the scheduler # can start four new tasks before stopping the four older tasks # (provided that the cluster resources required to do this are # available). The default value is 200%. # # When the service scheduler launches new tasks, it determines task # placement in your cluster using the following logic: # # * Determine which of the container instances in your cluster can # support your service's task definition (for example, they have the # required CPU, memory, ports, and container instance attributes). # # * By default, the service scheduler attempts to balance tasks across # Availability Zones in this manner (although you can choose a # different placement strategy): # # * Sort the valid container instances by the fewest number of running # tasks for this service in the same Availability Zone as the # instance. For example, if zone A has one running service task and # zones B and C each have zero, valid container instances in either # zone B or C are considered optimal for placement. # # * Place the new service task on a valid container instance in an # optimal Availability Zone (based on the previous steps), favoring # container instances with the fewest number of running tasks for # this service. # # # # [1]: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster on # which to run your service. If you do not specify a cluster, the # default cluster is assumed. # # @option params [required, String] :service_name # The name of your service. Up to 255 letters (uppercase and lowercase), # numbers, hyphens, and underscores are allowed. Service names must be # unique within a cluster, but you can have similarly named services in # multiple clusters within a region or across multiple regions. # # @option params [required, String] :task_definition # The `family` and `revision` (`family:revision`) or full Amazon # Resource Name (ARN) of the task definition to run in your service. If # a `revision` is not specified, the latest `ACTIVE` revision is used. # # @option params [Array] :load_balancers # A load balancer object representing the load balancer to use with your # service. Currently, you are limited to one load balancer or target # group per service. After you create a service, the load balancer name # or target group ARN, container name, and container port specified in # the service definition are immutable. # # For Elastic Load Balancing Classic load balancers, this object must # contain the load balancer name, the container name (as it appears in a # container definition), and the container port to access from the load # balancer. When a task from this service is placed on a container # instance, the container instance is registered with the load balancer # specified here. # # For Elastic Load Balancing Application load balancers, this object # must contain the load balancer target group ARN, the container name # (as it appears in a container definition), and the container port to # access from the load balancer. When a task from this service is placed # on a container instance, the container instance and port combination # is registered as a target in the target group specified here. # # @option params [required, Integer] :desired_count # The number of instantiations of the specified task definition to place # and keep running on your cluster. # # @option params [String] :client_token # Unique, case-sensitive identifier you provide to ensure the # idempotency of the request. Up to 32 ASCII characters are allowed. # # @option params [String] :role # The name or full Amazon Resource Name (ARN) of the IAM role that # allows Amazon ECS to make calls to your load balancer on your behalf. # This parameter is required if you are using a load balancer with your # service. If you specify the `role` parameter, you must also specify a # load balancer object with the `loadBalancers` parameter. # # If your specified role has a path other than `/`, then you must either # specify the full role ARN (this is recommended) or prefix the role # name with the path. For example, if a role with the name `bar` has a # path of `/foo/` then you would specify `/foo/bar` as the role name. # For more information, see [Friendly Names and Paths][1] in the *IAM # User Guide*. # # # # [1]: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names # # @option params [Types::DeploymentConfiguration] :deployment_configuration # Optional deployment parameters that control how many tasks run during # the deployment and the ordering of stopping and starting tasks. # # @option params [Array] :placement_constraints # An array of placement constraint objects to use for tasks in your # service. You can specify a maximum of 10 constraints per task (this # limit includes constraints in the task definition and those specified # at run time). # # @option params [Array] :placement_strategy # The placement strategy objects to use for tasks in your service. You # can specify a maximum of 5 strategy rules per service. # # @return [Types::CreateServiceResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::CreateServiceResponse#service #service} => Types::Service # # @example Request syntax with placeholder values # # resp = client.create_service({ # cluster: "String", # service_name: "String", # required # task_definition: "String", # required # load_balancers: [ # { # target_group_arn: "String", # load_balancer_name: "String", # container_name: "String", # container_port: 1, # }, # ], # desired_count: 1, # required # client_token: "String", # role: "String", # deployment_configuration: { # maximum_percent: 1, # minimum_healthy_percent: 1, # }, # placement_constraints: [ # { # type: "distinctInstance", # accepts distinctInstance, memberOf # expression: "String", # }, # ], # placement_strategy: [ # { # type: "random", # accepts random, spread, binpack # field: "String", # }, # ], # }) # # @example Response structure # # resp.service.service_arn #=> String # resp.service.service_name #=> String # resp.service.cluster_arn #=> String # resp.service.load_balancers #=> Array # resp.service.load_balancers[0].target_group_arn #=> String # resp.service.load_balancers[0].load_balancer_name #=> String # resp.service.load_balancers[0].container_name #=> String # resp.service.load_balancers[0].container_port #=> Integer # resp.service.status #=> String # resp.service.desired_count #=> Integer # resp.service.running_count #=> Integer # resp.service.pending_count #=> Integer # resp.service.task_definition #=> String # resp.service.deployment_configuration.maximum_percent #=> Integer # resp.service.deployment_configuration.minimum_healthy_percent #=> Integer # resp.service.deployments #=> Array # resp.service.deployments[0].id #=> String # resp.service.deployments[0].status #=> String # resp.service.deployments[0].task_definition #=> String # resp.service.deployments[0].desired_count #=> Integer # resp.service.deployments[0].pending_count #=> Integer # resp.service.deployments[0].running_count #=> Integer # resp.service.deployments[0].created_at #=> Time # resp.service.deployments[0].updated_at #=> Time # resp.service.role_arn #=> String # resp.service.events #=> Array # resp.service.events[0].id #=> String # resp.service.events[0].created_at #=> Time # resp.service.events[0].message #=> String # resp.service.created_at #=> Time # resp.service.placement_constraints #=> Array # resp.service.placement_constraints[0].type #=> String, one of "distinctInstance", "memberOf" # resp.service.placement_constraints[0].expression #=> String # resp.service.placement_strategy #=> Array # resp.service.placement_strategy[0].type #=> String, one of "random", "spread", "binpack" # resp.service.placement_strategy[0].field #=> String # # @overload create_service(params = {}) # @param [Hash] params ({}) def create_service(params = {}, options = {}) req = build_request(:create_service, params) req.send_request(options) end # Deletes one or more custom attributes from an Amazon ECS resource. # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster that # contains the resource to apply attributes. If you do not specify a # cluster, the default cluster is assumed. # # @option params [required, Array] :attributes # The attributes to delete from your resource. You can specify up to 10 # attributes per request. For custom attributes, specify the attribute # name and target ID, but do not specify the value. If you specify the # target ID using the short form, you must also specify the target type. # # @return [Types::DeleteAttributesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::DeleteAttributesResponse#attributes #attributes} => Array<Types::Attribute> # # @example Request syntax with placeholder values # # resp = client.delete_attributes({ # cluster: "String", # attributes: [ # required # { # name: "String", # required # value: "String", # target_type: "container-instance", # accepts container-instance # target_id: "String", # }, # ], # }) # # @example Response structure # # resp.attributes #=> Array # resp.attributes[0].name #=> String # resp.attributes[0].value #=> String # resp.attributes[0].target_type #=> String, one of "container-instance" # resp.attributes[0].target_id #=> String # # @overload delete_attributes(params = {}) # @param [Hash] params ({}) def delete_attributes(params = {}, options = {}) req = build_request(:delete_attributes, params) req.send_request(options) end # Deletes the specified cluster. You must deregister all container # instances from this cluster before you may delete it. You can list the # container instances in a cluster with ListContainerInstances and # deregister them with DeregisterContainerInstance. # # @option params [required, String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster to # delete. # # @return [Types::DeleteClusterResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::DeleteClusterResponse#cluster #cluster} => Types::Cluster # # @example Request syntax with placeholder values # # resp = client.delete_cluster({ # cluster: "String", # required # }) # # @example Response structure # # resp.cluster.cluster_arn #=> String # resp.cluster.cluster_name #=> String # resp.cluster.status #=> String # resp.cluster.registered_container_instances_count #=> Integer # resp.cluster.running_tasks_count #=> Integer # resp.cluster.pending_tasks_count #=> Integer # resp.cluster.active_services_count #=> Integer # # @overload delete_cluster(params = {}) # @param [Hash] params ({}) def delete_cluster(params = {}, options = {}) req = build_request(:delete_cluster, params) req.send_request(options) end # Deletes a specified service within a cluster. You can delete a service # if you have no running tasks in it and the desired task count is zero. # If the service is actively maintaining tasks, you cannot delete it, # and you must update the service to a desired task count of zero. For # more information, see UpdateService. # # When you delete a service, if there are still running tasks that # require cleanup, the service status moves from `ACTIVE` to `DRAINING`, # and the service is no longer visible in the console or in ListServices # API operations. After the tasks have stopped, then the service status # moves from `DRAINING` to `INACTIVE`. Services in the `DRAINING` or # `INACTIVE` status can still be viewed with DescribeServices API # operations; however, in the future, `INACTIVE` services may be cleaned # up and purged from Amazon ECS record keeping, and DescribeServices API # operations on those services will return a `ServiceNotFoundException` # error. # # # # @option params [String] :cluster # The name of the cluster that hosts the service to delete. If you do # not specify a cluster, the default cluster is assumed. # # @option params [required, String] :service # The name of the service to delete. # # @return [Types::DeleteServiceResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::DeleteServiceResponse#service #service} => Types::Service # # @example Request syntax with placeholder values # # resp = client.delete_service({ # cluster: "String", # service: "String", # required # }) # # @example Response structure # # resp.service.service_arn #=> String # resp.service.service_name #=> String # resp.service.cluster_arn #=> String # resp.service.load_balancers #=> Array # resp.service.load_balancers[0].target_group_arn #=> String # resp.service.load_balancers[0].load_balancer_name #=> String # resp.service.load_balancers[0].container_name #=> String # resp.service.load_balancers[0].container_port #=> Integer # resp.service.status #=> String # resp.service.desired_count #=> Integer # resp.service.running_count #=> Integer # resp.service.pending_count #=> Integer # resp.service.task_definition #=> String # resp.service.deployment_configuration.maximum_percent #=> Integer # resp.service.deployment_configuration.minimum_healthy_percent #=> Integer # resp.service.deployments #=> Array # resp.service.deployments[0].id #=> String # resp.service.deployments[0].status #=> String # resp.service.deployments[0].task_definition #=> String # resp.service.deployments[0].desired_count #=> Integer # resp.service.deployments[0].pending_count #=> Integer # resp.service.deployments[0].running_count #=> Integer # resp.service.deployments[0].created_at #=> Time # resp.service.deployments[0].updated_at #=> Time # resp.service.role_arn #=> String # resp.service.events #=> Array # resp.service.events[0].id #=> String # resp.service.events[0].created_at #=> Time # resp.service.events[0].message #=> String # resp.service.created_at #=> Time # resp.service.placement_constraints #=> Array # resp.service.placement_constraints[0].type #=> String, one of "distinctInstance", "memberOf" # resp.service.placement_constraints[0].expression #=> String # resp.service.placement_strategy #=> Array # resp.service.placement_strategy[0].type #=> String, one of "random", "spread", "binpack" # resp.service.placement_strategy[0].field #=> String # # @overload delete_service(params = {}) # @param [Hash] params ({}) def delete_service(params = {}, options = {}) req = build_request(:delete_service, params) req.send_request(options) end # Deregisters an Amazon ECS container instance from the specified # cluster. This instance is no longer available to run tasks. # # If you intend to use the container instance for some other purpose # after deregistration, you should stop all of the tasks running on the # container instance before deregistration to avoid any orphaned tasks # from consuming resources. # # Deregistering a container instance removes the instance from a # cluster, but it does not terminate the EC2 instance; if you are # finished using the instance, be sure to terminate it in the Amazon EC2 # console to stop billing. # # If you terminate a running container instance, Amazon ECS # automatically deregisters the instance from your cluster (stopped # container instances or instances with disconnected agents are not # automatically deregistered when terminated). # # # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster that # hosts the container instance to deregister. If you do not specify a # cluster, the default cluster is assumed. # # @option params [required, String] :container_instance # The container instance ID or full Amazon Resource Name (ARN) of the # container instance to deregister. The ARN contains the `arn:aws:ecs` # namespace, followed by the region of the container instance, the AWS # account ID of the container instance owner, the `container-instance` # namespace, and then the container instance ID. For example, # `arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID # `. # # @option params [Boolean] :force # Forces the deregistration of the container instance. If you have tasks # running on the container instance when you deregister it with the # `force` option, these tasks remain running until you terminate the # instance or the tasks stop through some other means, but they are # orphaned (no longer monitored or accounted for by Amazon ECS). If an # orphaned task on your container instance is part of an Amazon ECS # service, then the service scheduler starts another copy of that task, # on a different container instance if possible. # # Any containers in orphaned service tasks that are registered with a # Classic load balancer or an Application load balancer target group are # deregistered, and they will begin connection draining according to the # settings on the load balancer or target group. # # @return [Types::DeregisterContainerInstanceResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::DeregisterContainerInstanceResponse#container_instance #container_instance} => Types::ContainerInstance # # @example Request syntax with placeholder values # # resp = client.deregister_container_instance({ # cluster: "String", # container_instance: "String", # required # force: false, # }) # # @example Response structure # # resp.container_instance.container_instance_arn #=> String # resp.container_instance.ec2_instance_id #=> String # resp.container_instance.version #=> Integer # resp.container_instance.version_info.agent_version #=> String # resp.container_instance.version_info.agent_hash #=> String # resp.container_instance.version_info.docker_version #=> String # resp.container_instance.remaining_resources #=> Array # resp.container_instance.remaining_resources[0].name #=> String # resp.container_instance.remaining_resources[0].type #=> String # resp.container_instance.remaining_resources[0].double_value #=> Float # resp.container_instance.remaining_resources[0].long_value #=> Integer # resp.container_instance.remaining_resources[0].integer_value #=> Integer # resp.container_instance.remaining_resources[0].string_set_value #=> Array # resp.container_instance.remaining_resources[0].string_set_value[0] #=> String # resp.container_instance.registered_resources #=> Array # resp.container_instance.registered_resources[0].name #=> String # resp.container_instance.registered_resources[0].type #=> String # resp.container_instance.registered_resources[0].double_value #=> Float # resp.container_instance.registered_resources[0].long_value #=> Integer # resp.container_instance.registered_resources[0].integer_value #=> Integer # resp.container_instance.registered_resources[0].string_set_value #=> Array # resp.container_instance.registered_resources[0].string_set_value[0] #=> String # resp.container_instance.status #=> String # resp.container_instance.agent_connected #=> Boolean # resp.container_instance.running_tasks_count #=> Integer # resp.container_instance.pending_tasks_count #=> Integer # resp.container_instance.agent_update_status #=> String, one of "PENDING", "STAGING", "STAGED", "UPDATING", "UPDATED", "FAILED" # resp.container_instance.attributes #=> Array # resp.container_instance.attributes[0].name #=> String # resp.container_instance.attributes[0].value #=> String # resp.container_instance.attributes[0].target_type #=> String, one of "container-instance" # resp.container_instance.attributes[0].target_id #=> String # # @overload deregister_container_instance(params = {}) # @param [Hash] params ({}) def deregister_container_instance(params = {}, options = {}) req = build_request(:deregister_container_instance, params) req.send_request(options) end # Deregisters the specified task definition by family and revision. Upon # deregistration, the task definition is marked as `INACTIVE`. Existing # tasks and services that reference an `INACTIVE` task definition # continue to run without disruption. Existing services that reference # an `INACTIVE` task definition can still scale up or down by modifying # the service's desired count. # # You cannot use an `INACTIVE` task definition to run new tasks or # create new services, and you cannot update an existing service to # reference an `INACTIVE` task definition (although there may be up to a # 10 minute window following deregistration where these restrictions # have not yet taken effect). # # @option params [required, String] :task_definition # The `family` and `revision` (`family:revision`) or full Amazon # Resource Name (ARN) of the task definition to deregister. You must # specify a `revision`. # # @return [Types::DeregisterTaskDefinitionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::DeregisterTaskDefinitionResponse#task_definition #task_definition} => Types::TaskDefinition # # @example Request syntax with placeholder values # # resp = client.deregister_task_definition({ # task_definition: "String", # required # }) # # @example Response structure # # resp.task_definition.task_definition_arn #=> String # resp.task_definition.container_definitions #=> Array # resp.task_definition.container_definitions[0].name #=> String # resp.task_definition.container_definitions[0].image #=> String # resp.task_definition.container_definitions[0].cpu #=> Integer # resp.task_definition.container_definitions[0].memory #=> Integer # resp.task_definition.container_definitions[0].memory_reservation #=> Integer # resp.task_definition.container_definitions[0].links #=> Array # resp.task_definition.container_definitions[0].links[0] #=> String # resp.task_definition.container_definitions[0].port_mappings #=> Array # resp.task_definition.container_definitions[0].port_mappings[0].container_port #=> Integer # resp.task_definition.container_definitions[0].port_mappings[0].host_port #=> Integer # resp.task_definition.container_definitions[0].port_mappings[0].protocol #=> String, one of "tcp", "udp" # resp.task_definition.container_definitions[0].essential #=> Boolean # resp.task_definition.container_definitions[0].entry_point #=> Array # resp.task_definition.container_definitions[0].entry_point[0] #=> String # resp.task_definition.container_definitions[0].command #=> Array # resp.task_definition.container_definitions[0].command[0] #=> String # resp.task_definition.container_definitions[0].environment #=> Array # resp.task_definition.container_definitions[0].environment[0].name #=> String # resp.task_definition.container_definitions[0].environment[0].value #=> String # resp.task_definition.container_definitions[0].mount_points #=> Array # resp.task_definition.container_definitions[0].mount_points[0].source_volume #=> String # resp.task_definition.container_definitions[0].mount_points[0].container_path #=> String # resp.task_definition.container_definitions[0].mount_points[0].read_only #=> Boolean # resp.task_definition.container_definitions[0].volumes_from #=> Array # resp.task_definition.container_definitions[0].volumes_from[0].source_container #=> String # resp.task_definition.container_definitions[0].volumes_from[0].read_only #=> Boolean # resp.task_definition.container_definitions[0].hostname #=> String # resp.task_definition.container_definitions[0].user #=> String # resp.task_definition.container_definitions[0].working_directory #=> String # resp.task_definition.container_definitions[0].disable_networking #=> Boolean # resp.task_definition.container_definitions[0].privileged #=> Boolean # resp.task_definition.container_definitions[0].readonly_root_filesystem #=> Boolean # resp.task_definition.container_definitions[0].dns_servers #=> Array # resp.task_definition.container_definitions[0].dns_servers[0] #=> String # resp.task_definition.container_definitions[0].dns_search_domains #=> Array # resp.task_definition.container_definitions[0].dns_search_domains[0] #=> String # resp.task_definition.container_definitions[0].extra_hosts #=> Array # resp.task_definition.container_definitions[0].extra_hosts[0].hostname #=> String # resp.task_definition.container_definitions[0].extra_hosts[0].ip_address #=> String # resp.task_definition.container_definitions[0].docker_security_options #=> Array # resp.task_definition.container_definitions[0].docker_security_options[0] #=> String # resp.task_definition.container_definitions[0].docker_labels #=> Hash # resp.task_definition.container_definitions[0].docker_labels["String"] #=> String # resp.task_definition.container_definitions[0].ulimits #=> Array # resp.task_definition.container_definitions[0].ulimits[0].name #=> String, one of "core", "cpu", "data", "fsize", "locks", "memlock", "msgqueue", "nice", "nofile", "nproc", "rss", "rtprio", "rttime", "sigpending", "stack" # resp.task_definition.container_definitions[0].ulimits[0].soft_limit #=> Integer # resp.task_definition.container_definitions[0].ulimits[0].hard_limit #=> Integer # resp.task_definition.container_definitions[0].log_configuration.log_driver #=> String, one of "json-file", "syslog", "journald", "gelf", "fluentd", "awslogs", "splunk" # resp.task_definition.container_definitions[0].log_configuration.options #=> Hash # resp.task_definition.container_definitions[0].log_configuration.options["String"] #=> String # resp.task_definition.family #=> String # resp.task_definition.task_role_arn #=> String # resp.task_definition.network_mode #=> String, one of "bridge", "host", "none" # resp.task_definition.revision #=> Integer # resp.task_definition.volumes #=> Array # resp.task_definition.volumes[0].name #=> String # resp.task_definition.volumes[0].host.source_path #=> String # resp.task_definition.status #=> String, one of "ACTIVE", "INACTIVE" # resp.task_definition.requires_attributes #=> Array # resp.task_definition.requires_attributes[0].name #=> String # resp.task_definition.requires_attributes[0].value #=> String # resp.task_definition.requires_attributes[0].target_type #=> String, one of "container-instance" # resp.task_definition.requires_attributes[0].target_id #=> String # resp.task_definition.placement_constraints #=> Array # resp.task_definition.placement_constraints[0].type #=> String, one of "memberOf" # resp.task_definition.placement_constraints[0].expression #=> String # # @overload deregister_task_definition(params = {}) # @param [Hash] params ({}) def deregister_task_definition(params = {}, options = {}) req = build_request(:deregister_task_definition, params) req.send_request(options) end # Describes one or more of your clusters. # # @option params [Array] :clusters # A space-separated list of up to 100 cluster names or full cluster # Amazon Resource Name (ARN) entries. If you do not specify a cluster, # the default cluster is assumed. # # @return [Types::DescribeClustersResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::DescribeClustersResponse#clusters #clusters} => Array<Types::Cluster> # * {Types::DescribeClustersResponse#failures #failures} => Array<Types::Failure> # # @example Request syntax with placeholder values # # resp = client.describe_clusters({ # clusters: ["String"], # }) # # @example Response structure # # resp.clusters #=> Array # resp.clusters[0].cluster_arn #=> String # resp.clusters[0].cluster_name #=> String # resp.clusters[0].status #=> String # resp.clusters[0].registered_container_instances_count #=> Integer # resp.clusters[0].running_tasks_count #=> Integer # resp.clusters[0].pending_tasks_count #=> Integer # resp.clusters[0].active_services_count #=> Integer # resp.failures #=> Array # resp.failures[0].arn #=> String # resp.failures[0].reason #=> String # # @overload describe_clusters(params = {}) # @param [Hash] params ({}) def describe_clusters(params = {}, options = {}) req = build_request(:describe_clusters, params) req.send_request(options) end # Describes Amazon EC2 Container Service container instances. Returns # metadata about registered and remaining resources on each container # instance requested. # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster that # hosts the container instances to describe. If you do not specify a # cluster, the default cluster is assumed. # # @option params [required, Array] :container_instances # A space-separated list of container instance IDs or full Amazon # Resource Name (ARN) entries. # # @return [Types::DescribeContainerInstancesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::DescribeContainerInstancesResponse#container_instances #container_instances} => Array<Types::ContainerInstance> # * {Types::DescribeContainerInstancesResponse#failures #failures} => Array<Types::Failure> # # @example Request syntax with placeholder values # # resp = client.describe_container_instances({ # cluster: "String", # container_instances: ["String"], # required # }) # # @example Response structure # # resp.container_instances #=> Array # resp.container_instances[0].container_instance_arn #=> String # resp.container_instances[0].ec2_instance_id #=> String # resp.container_instances[0].version #=> Integer # resp.container_instances[0].version_info.agent_version #=> String # resp.container_instances[0].version_info.agent_hash #=> String # resp.container_instances[0].version_info.docker_version #=> String # resp.container_instances[0].remaining_resources #=> Array # resp.container_instances[0].remaining_resources[0].name #=> String # resp.container_instances[0].remaining_resources[0].type #=> String # resp.container_instances[0].remaining_resources[0].double_value #=> Float # resp.container_instances[0].remaining_resources[0].long_value #=> Integer # resp.container_instances[0].remaining_resources[0].integer_value #=> Integer # resp.container_instances[0].remaining_resources[0].string_set_value #=> Array # resp.container_instances[0].remaining_resources[0].string_set_value[0] #=> String # resp.container_instances[0].registered_resources #=> Array # resp.container_instances[0].registered_resources[0].name #=> String # resp.container_instances[0].registered_resources[0].type #=> String # resp.container_instances[0].registered_resources[0].double_value #=> Float # resp.container_instances[0].registered_resources[0].long_value #=> Integer # resp.container_instances[0].registered_resources[0].integer_value #=> Integer # resp.container_instances[0].registered_resources[0].string_set_value #=> Array # resp.container_instances[0].registered_resources[0].string_set_value[0] #=> String # resp.container_instances[0].status #=> String # resp.container_instances[0].agent_connected #=> Boolean # resp.container_instances[0].running_tasks_count #=> Integer # resp.container_instances[0].pending_tasks_count #=> Integer # resp.container_instances[0].agent_update_status #=> String, one of "PENDING", "STAGING", "STAGED", "UPDATING", "UPDATED", "FAILED" # resp.container_instances[0].attributes #=> Array # resp.container_instances[0].attributes[0].name #=> String # resp.container_instances[0].attributes[0].value #=> String # resp.container_instances[0].attributes[0].target_type #=> String, one of "container-instance" # resp.container_instances[0].attributes[0].target_id #=> String # resp.failures #=> Array # resp.failures[0].arn #=> String # resp.failures[0].reason #=> String # # @overload describe_container_instances(params = {}) # @param [Hash] params ({}) def describe_container_instances(params = {}, options = {}) req = build_request(:describe_container_instances, params) req.send_request(options) end # Describes the specified services running in your cluster. # # @option params [String] :cluster # The name of the cluster that hosts the service to describe. If you do # not specify a cluster, the default cluster is assumed. # # @option params [required, Array] :services # A list of services to describe. You may specify up to 10 services to # describe in a single operation. # # @return [Types::DescribeServicesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::DescribeServicesResponse#services #services} => Array<Types::Service> # * {Types::DescribeServicesResponse#failures #failures} => Array<Types::Failure> # # @example Request syntax with placeholder values # # resp = client.describe_services({ # cluster: "String", # services: ["String"], # required # }) # # @example Response structure # # resp.services #=> Array # resp.services[0].service_arn #=> String # resp.services[0].service_name #=> String # resp.services[0].cluster_arn #=> String # resp.services[0].load_balancers #=> Array # resp.services[0].load_balancers[0].target_group_arn #=> String # resp.services[0].load_balancers[0].load_balancer_name #=> String # resp.services[0].load_balancers[0].container_name #=> String # resp.services[0].load_balancers[0].container_port #=> Integer # resp.services[0].status #=> String # resp.services[0].desired_count #=> Integer # resp.services[0].running_count #=> Integer # resp.services[0].pending_count #=> Integer # resp.services[0].task_definition #=> String # resp.services[0].deployment_configuration.maximum_percent #=> Integer # resp.services[0].deployment_configuration.minimum_healthy_percent #=> Integer # resp.services[0].deployments #=> Array # resp.services[0].deployments[0].id #=> String # resp.services[0].deployments[0].status #=> String # resp.services[0].deployments[0].task_definition #=> String # resp.services[0].deployments[0].desired_count #=> Integer # resp.services[0].deployments[0].pending_count #=> Integer # resp.services[0].deployments[0].running_count #=> Integer # resp.services[0].deployments[0].created_at #=> Time # resp.services[0].deployments[0].updated_at #=> Time # resp.services[0].role_arn #=> String # resp.services[0].events #=> Array # resp.services[0].events[0].id #=> String # resp.services[0].events[0].created_at #=> Time # resp.services[0].events[0].message #=> String # resp.services[0].created_at #=> Time # resp.services[0].placement_constraints #=> Array # resp.services[0].placement_constraints[0].type #=> String, one of "distinctInstance", "memberOf" # resp.services[0].placement_constraints[0].expression #=> String # resp.services[0].placement_strategy #=> Array # resp.services[0].placement_strategy[0].type #=> String, one of "random", "spread", "binpack" # resp.services[0].placement_strategy[0].field #=> String # resp.failures #=> Array # resp.failures[0].arn #=> String # resp.failures[0].reason #=> String # # @overload describe_services(params = {}) # @param [Hash] params ({}) def describe_services(params = {}, options = {}) req = build_request(:describe_services, params) req.send_request(options) end # Describes a task definition. You can specify a `family` and `revision` # to find information about a specific task definition, or you can # simply specify the family to find the latest `ACTIVE` revision in that # family. # # You can only describe `INACTIVE` task definitions while an active task # or service references them. # # # # @option params [required, String] :task_definition # The `family` for the latest `ACTIVE` revision, `family` and `revision` # (`family:revision`) for a specific revision in the family, or full # Amazon Resource Name (ARN) of the task definition to describe. # # @return [Types::DescribeTaskDefinitionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::DescribeTaskDefinitionResponse#task_definition #task_definition} => Types::TaskDefinition # # @example Request syntax with placeholder values # # resp = client.describe_task_definition({ # task_definition: "String", # required # }) # # @example Response structure # # resp.task_definition.task_definition_arn #=> String # resp.task_definition.container_definitions #=> Array # resp.task_definition.container_definitions[0].name #=> String # resp.task_definition.container_definitions[0].image #=> String # resp.task_definition.container_definitions[0].cpu #=> Integer # resp.task_definition.container_definitions[0].memory #=> Integer # resp.task_definition.container_definitions[0].memory_reservation #=> Integer # resp.task_definition.container_definitions[0].links #=> Array # resp.task_definition.container_definitions[0].links[0] #=> String # resp.task_definition.container_definitions[0].port_mappings #=> Array # resp.task_definition.container_definitions[0].port_mappings[0].container_port #=> Integer # resp.task_definition.container_definitions[0].port_mappings[0].host_port #=> Integer # resp.task_definition.container_definitions[0].port_mappings[0].protocol #=> String, one of "tcp", "udp" # resp.task_definition.container_definitions[0].essential #=> Boolean # resp.task_definition.container_definitions[0].entry_point #=> Array # resp.task_definition.container_definitions[0].entry_point[0] #=> String # resp.task_definition.container_definitions[0].command #=> Array # resp.task_definition.container_definitions[0].command[0] #=> String # resp.task_definition.container_definitions[0].environment #=> Array # resp.task_definition.container_definitions[0].environment[0].name #=> String # resp.task_definition.container_definitions[0].environment[0].value #=> String # resp.task_definition.container_definitions[0].mount_points #=> Array # resp.task_definition.container_definitions[0].mount_points[0].source_volume #=> String # resp.task_definition.container_definitions[0].mount_points[0].container_path #=> String # resp.task_definition.container_definitions[0].mount_points[0].read_only #=> Boolean # resp.task_definition.container_definitions[0].volumes_from #=> Array # resp.task_definition.container_definitions[0].volumes_from[0].source_container #=> String # resp.task_definition.container_definitions[0].volumes_from[0].read_only #=> Boolean # resp.task_definition.container_definitions[0].hostname #=> String # resp.task_definition.container_definitions[0].user #=> String # resp.task_definition.container_definitions[0].working_directory #=> String # resp.task_definition.container_definitions[0].disable_networking #=> Boolean # resp.task_definition.container_definitions[0].privileged #=> Boolean # resp.task_definition.container_definitions[0].readonly_root_filesystem #=> Boolean # resp.task_definition.container_definitions[0].dns_servers #=> Array # resp.task_definition.container_definitions[0].dns_servers[0] #=> String # resp.task_definition.container_definitions[0].dns_search_domains #=> Array # resp.task_definition.container_definitions[0].dns_search_domains[0] #=> String # resp.task_definition.container_definitions[0].extra_hosts #=> Array # resp.task_definition.container_definitions[0].extra_hosts[0].hostname #=> String # resp.task_definition.container_definitions[0].extra_hosts[0].ip_address #=> String # resp.task_definition.container_definitions[0].docker_security_options #=> Array # resp.task_definition.container_definitions[0].docker_security_options[0] #=> String # resp.task_definition.container_definitions[0].docker_labels #=> Hash # resp.task_definition.container_definitions[0].docker_labels["String"] #=> String # resp.task_definition.container_definitions[0].ulimits #=> Array # resp.task_definition.container_definitions[0].ulimits[0].name #=> String, one of "core", "cpu", "data", "fsize", "locks", "memlock", "msgqueue", "nice", "nofile", "nproc", "rss", "rtprio", "rttime", "sigpending", "stack" # resp.task_definition.container_definitions[0].ulimits[0].soft_limit #=> Integer # resp.task_definition.container_definitions[0].ulimits[0].hard_limit #=> Integer # resp.task_definition.container_definitions[0].log_configuration.log_driver #=> String, one of "json-file", "syslog", "journald", "gelf", "fluentd", "awslogs", "splunk" # resp.task_definition.container_definitions[0].log_configuration.options #=> Hash # resp.task_definition.container_definitions[0].log_configuration.options["String"] #=> String # resp.task_definition.family #=> String # resp.task_definition.task_role_arn #=> String # resp.task_definition.network_mode #=> String, one of "bridge", "host", "none" # resp.task_definition.revision #=> Integer # resp.task_definition.volumes #=> Array # resp.task_definition.volumes[0].name #=> String # resp.task_definition.volumes[0].host.source_path #=> String # resp.task_definition.status #=> String, one of "ACTIVE", "INACTIVE" # resp.task_definition.requires_attributes #=> Array # resp.task_definition.requires_attributes[0].name #=> String # resp.task_definition.requires_attributes[0].value #=> String # resp.task_definition.requires_attributes[0].target_type #=> String, one of "container-instance" # resp.task_definition.requires_attributes[0].target_id #=> String # resp.task_definition.placement_constraints #=> Array # resp.task_definition.placement_constraints[0].type #=> String, one of "memberOf" # resp.task_definition.placement_constraints[0].expression #=> String # # @overload describe_task_definition(params = {}) # @param [Hash] params ({}) def describe_task_definition(params = {}, options = {}) req = build_request(:describe_task_definition, params) req.send_request(options) end # Describes a specified task or tasks. # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster that # hosts the task to describe. If you do not specify a cluster, the # default cluster is assumed. # # @option params [required, Array] :tasks # A space-separated list of task IDs or full Amazon Resource Name (ARN) # entries. # # @return [Types::DescribeTasksResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::DescribeTasksResponse#tasks #tasks} => Array<Types::Task> # * {Types::DescribeTasksResponse#failures #failures} => Array<Types::Failure> # # @example Request syntax with placeholder values # # resp = client.describe_tasks({ # cluster: "String", # tasks: ["String"], # required # }) # # @example Response structure # # resp.tasks #=> Array # resp.tasks[0].task_arn #=> String # resp.tasks[0].cluster_arn #=> String # resp.tasks[0].task_definition_arn #=> String # resp.tasks[0].container_instance_arn #=> String # resp.tasks[0].overrides.container_overrides #=> Array # resp.tasks[0].overrides.container_overrides[0].name #=> String # resp.tasks[0].overrides.container_overrides[0].command #=> Array # resp.tasks[0].overrides.container_overrides[0].command[0] #=> String # resp.tasks[0].overrides.container_overrides[0].environment #=> Array # resp.tasks[0].overrides.container_overrides[0].environment[0].name #=> String # resp.tasks[0].overrides.container_overrides[0].environment[0].value #=> String # resp.tasks[0].overrides.task_role_arn #=> String # resp.tasks[0].last_status #=> String # resp.tasks[0].desired_status #=> String # resp.tasks[0].containers #=> Array # resp.tasks[0].containers[0].container_arn #=> String # resp.tasks[0].containers[0].task_arn #=> String # resp.tasks[0].containers[0].name #=> String # resp.tasks[0].containers[0].last_status #=> String # resp.tasks[0].containers[0].exit_code #=> Integer # resp.tasks[0].containers[0].reason #=> String # resp.tasks[0].containers[0].network_bindings #=> Array # resp.tasks[0].containers[0].network_bindings[0].bind_ip #=> String # resp.tasks[0].containers[0].network_bindings[0].container_port #=> Integer # resp.tasks[0].containers[0].network_bindings[0].host_port #=> Integer # resp.tasks[0].containers[0].network_bindings[0].protocol #=> String, one of "tcp", "udp" # resp.tasks[0].started_by #=> String # resp.tasks[0].version #=> Integer # resp.tasks[0].stopped_reason #=> String # resp.tasks[0].created_at #=> Time # resp.tasks[0].started_at #=> Time # resp.tasks[0].stopped_at #=> Time # resp.tasks[0].group #=> String # resp.failures #=> Array # resp.failures[0].arn #=> String # resp.failures[0].reason #=> String # # @overload describe_tasks(params = {}) # @param [Hash] params ({}) def describe_tasks(params = {}, options = {}) req = build_request(:describe_tasks, params) req.send_request(options) end # This action is only used by the Amazon EC2 Container Service agent, # and it is not intended for use outside of the agent. # # # # Returns an endpoint for the Amazon EC2 Container Service agent to poll # for updates. # # @option params [String] :container_instance # The container instance ID or full Amazon Resource Name (ARN) of the # container instance. The ARN contains the `arn:aws:ecs` namespace, # followed by the region of the container instance, the AWS account ID # of the container instance owner, the `container-instance` namespace, # and then the container instance ID. For example, # `arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID # `. # # @option params [String] :cluster # The cluster that the container instance belongs to. # # @return [Types::DiscoverPollEndpointResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::DiscoverPollEndpointResponse#endpoint #endpoint} => String # * {Types::DiscoverPollEndpointResponse#telemetry_endpoint #telemetry_endpoint} => String # # @example Request syntax with placeholder values # # resp = client.discover_poll_endpoint({ # container_instance: "String", # cluster: "String", # }) # # @example Response structure # # resp.endpoint #=> String # resp.telemetry_endpoint #=> String # # @overload discover_poll_endpoint(params = {}) # @param [Hash] params ({}) def discover_poll_endpoint(params = {}, options = {}) req = build_request(:discover_poll_endpoint, params) req.send_request(options) end # Lists the attributes for Amazon ECS resources within a specified # target type and cluster. When you specify a target type and cluster, # `LisAttributes` returns a list of attribute objects, one for each # attribute on each resource. You can filter the list of results to a # single attribute name to only return results that have that name. You # can also filter the results by attribute name and value, for example, # to see which container instances in a cluster are running a Linux AMI # (`ecs.os-type=linux`). # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster to # list attributes. If you do not specify a cluster, the default cluster # is assumed. # # @option params [required, String] :target_type # The type of the target with which to list attributes. # # @option params [String] :attribute_name # The name of the attribute with which to filter the results. # # @option params [String] :attribute_value # The value of the attribute with which to filter results. You must also # specify an attribute name to use this parameter. # # @option params [String] :next_token # The `nextToken` value returned from a previous paginated # `ListAttributes` request where `maxResults` was used and the results # exceeded the value of that parameter. Pagination continues from the # end of the previous results that returned the `nextToken` value. This # value is `null` when there are no more results to return. # # This token should be treated as an opaque identifier that is only used # to retrieve the next items in a list and not for other programmatic # purposes. # # # # @option params [Integer] :max_results # The maximum number of cluster results returned by `ListAttributes` in # paginated output. When this parameter is used, `ListAttributes` only # returns `maxResults` results in a single page along with a `nextToken` # response element. The remaining results of the initial request can be # seen by sending another `ListAttributes` request with the returned # `nextToken` value. This value can be between 1 and 100. If this # parameter is not used, then `ListAttributes` returns up to 100 results # and a `nextToken` value if applicable. # # @return [Types::ListAttributesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListAttributesResponse#attributes #attributes} => Array<Types::Attribute> # * {Types::ListAttributesResponse#next_token #next_token} => String # # @example Request syntax with placeholder values # # resp = client.list_attributes({ # cluster: "String", # target_type: "container-instance", # required, accepts container-instance # attribute_name: "String", # attribute_value: "String", # next_token: "String", # max_results: 1, # }) # # @example Response structure # # resp.attributes #=> Array # resp.attributes[0].name #=> String # resp.attributes[0].value #=> String # resp.attributes[0].target_type #=> String, one of "container-instance" # resp.attributes[0].target_id #=> String # resp.next_token #=> String # # @overload list_attributes(params = {}) # @param [Hash] params ({}) def list_attributes(params = {}, options = {}) req = build_request(:list_attributes, params) req.send_request(options) end # Returns a list of existing clusters. # # @option params [String] :next_token # The `nextToken` value returned from a previous paginated # `ListClusters` request where `maxResults` was used and the results # exceeded the value of that parameter. Pagination continues from the # end of the previous results that returned the `nextToken` value. This # value is `null` when there are no more results to return. # # This token should be treated as an opaque identifier that is only used # to retrieve the next items in a list and not for other programmatic # purposes. # # # # @option params [Integer] :max_results # The maximum number of cluster results returned by `ListClusters` in # paginated output. When this parameter is used, `ListClusters` only # returns `maxResults` results in a single page along with a `nextToken` # response element. The remaining results of the initial request can be # seen by sending another `ListClusters` request with the returned # `nextToken` value. This value can be between 1 and 100. If this # parameter is not used, then `ListClusters` returns up to 100 results # and a `nextToken` value if applicable. # # @return [Types::ListClustersResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListClustersResponse#cluster_arns #cluster_arns} => Array<String> # * {Types::ListClustersResponse#next_token #next_token} => String # # @example Request syntax with placeholder values # # resp = client.list_clusters({ # next_token: "String", # max_results: 1, # }) # # @example Response structure # # resp.cluster_arns #=> Array # resp.cluster_arns[0] #=> String # resp.next_token #=> String # # @overload list_clusters(params = {}) # @param [Hash] params ({}) def list_clusters(params = {}, options = {}) req = build_request(:list_clusters, params) req.send_request(options) end # Returns a list of container instances in a specified cluster. You can # filter the results of a `ListContainerInstances` operation with # cluster query language statements inside the `filter` parameter. For # more information, see [Cluster Query Language][1] in the *Amazon EC2 # Container Service Developer Guide*. # # # # [1]: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster that # hosts the container instances to list. If you do not specify a # cluster, the default cluster is assumed. # # @option params [String] :filter # You can filter the results of a `ListContainerInstances` operation # with cluster query language statements. For more information, see # [Cluster Query Language][1] in the *Amazon EC2 Container Service # Developer Guide*. # # # # [1]: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html # # @option params [String] :next_token # The `nextToken` value returned from a previous paginated # `ListContainerInstances` request where `maxResults` was used and the # results exceeded the value of that parameter. Pagination continues # from the end of the previous results that returned the `nextToken` # value. This value is `null` when there are no more results to return. # # This token should be treated as an opaque identifier that is only used # to retrieve the next items in a list and not for other programmatic # purposes. # # # # @option params [Integer] :max_results # The maximum number of container instance results returned by # `ListContainerInstances` in paginated output. When this parameter is # used, `ListContainerInstances` only returns `maxResults` results in a # single page along with a `nextToken` response element. The remaining # results of the initial request can be seen by sending another # `ListContainerInstances` request with the returned `nextToken` value. # This value can be between 1 and 100. If this parameter is not used, # then `ListContainerInstances` returns up to 100 results and a # `nextToken` value if applicable. # # @option params [String] :status # The container instance status with which to filter the # `ListContainerInstances` results. Specifying a container instance # status of `DRAINING` limits the results to container instances that # have been set to drain with the UpdateContainerInstancesState # operation. # # @return [Types::ListContainerInstancesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListContainerInstancesResponse#container_instance_arns #container_instance_arns} => Array<String> # * {Types::ListContainerInstancesResponse#next_token #next_token} => String # # @example Request syntax with placeholder values # # resp = client.list_container_instances({ # cluster: "String", # filter: "String", # next_token: "String", # max_results: 1, # status: "ACTIVE", # accepts ACTIVE, DRAINING # }) # # @example Response structure # # resp.container_instance_arns #=> Array # resp.container_instance_arns[0] #=> String # resp.next_token #=> String # # @overload list_container_instances(params = {}) # @param [Hash] params ({}) def list_container_instances(params = {}, options = {}) req = build_request(:list_container_instances, params) req.send_request(options) end # Lists the services that are running in a specified cluster. # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster that # hosts the services to list. If you do not specify a cluster, the # default cluster is assumed. # # @option params [String] :next_token # The `nextToken` value returned from a previous paginated # `ListServices` request where `maxResults` was used and the results # exceeded the value of that parameter. Pagination continues from the # end of the previous results that returned the `nextToken` value. This # value is `null` when there are no more results to return. # # This token should be treated as an opaque identifier that is only used # to retrieve the next items in a list and not for other programmatic # purposes. # # # # @option params [Integer] :max_results # The maximum number of container instance results returned by # `ListServices` in paginated output. When this parameter is used, # `ListServices` only returns `maxResults` results in a single page # along with a `nextToken` response element. The remaining results of # the initial request can be seen by sending another `ListServices` # request with the returned `nextToken` value. This value can be between # 1 and 10. If this parameter is not used, then `ListServices` returns # up to 10 results and a `nextToken` value if applicable. # # @return [Types::ListServicesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListServicesResponse#service_arns #service_arns} => Array<String> # * {Types::ListServicesResponse#next_token #next_token} => String # # @example Request syntax with placeholder values # # resp = client.list_services({ # cluster: "String", # next_token: "String", # max_results: 1, # }) # # @example Response structure # # resp.service_arns #=> Array # resp.service_arns[0] #=> String # resp.next_token #=> String # # @overload list_services(params = {}) # @param [Hash] params ({}) def list_services(params = {}, options = {}) req = build_request(:list_services, params) req.send_request(options) end # Returns a list of task definition families that are registered to your # account (which may include task definition families that no longer # have any `ACTIVE` task definition revisions). # # You can filter out task definition families that do not contain any # `ACTIVE` task definition revisions by setting the `status` parameter # to `ACTIVE`. You can also filter the results with the `familyPrefix` # parameter. # # @option params [String] :family_prefix # The `familyPrefix` is a string that is used to filter the results of # `ListTaskDefinitionFamilies`. If you specify a `familyPrefix`, only # task definition family names that begin with the `familyPrefix` string # are returned. # # @option params [String] :status # The task definition family status with which to filter the # `ListTaskDefinitionFamilies` results. By default, both `ACTIVE` and # `INACTIVE` task definition families are listed. If this parameter is # set to `ACTIVE`, only task definition families that have an `ACTIVE` # task definition revision are returned. If this parameter is set to # `INACTIVE`, only task definition families that do not have any # `ACTIVE` task definition revisions are returned. If you paginate the # resulting output, be sure to keep the `status` value constant in each # subsequent request. # # @option params [String] :next_token # The `nextToken` value returned from a previous paginated # `ListTaskDefinitionFamilies` request where `maxResults` was used and # the results exceeded the value of that parameter. Pagination continues # from the end of the previous results that returned the `nextToken` # value. This value is `null` when there are no more results to return. # # This token should be treated as an opaque identifier that is only used # to retrieve the next items in a list and not for other programmatic # purposes. # # # # @option params [Integer] :max_results # The maximum number of task definition family results returned by # `ListTaskDefinitionFamilies` in paginated output. When this parameter # is used, `ListTaskDefinitions` only returns `maxResults` results in a # single page along with a `nextToken` response element. The remaining # results of the initial request can be seen by sending another # `ListTaskDefinitionFamilies` request with the returned `nextToken` # value. This value can be between 1 and 100. If this parameter is not # used, then `ListTaskDefinitionFamilies` returns up to 100 results and # a `nextToken` value if applicable. # # @return [Types::ListTaskDefinitionFamiliesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListTaskDefinitionFamiliesResponse#families #families} => Array<String> # * {Types::ListTaskDefinitionFamiliesResponse#next_token #next_token} => String # # @example Request syntax with placeholder values # # resp = client.list_task_definition_families({ # family_prefix: "String", # status: "ACTIVE", # accepts ACTIVE, INACTIVE, ALL # next_token: "String", # max_results: 1, # }) # # @example Response structure # # resp.families #=> Array # resp.families[0] #=> String # resp.next_token #=> String # # @overload list_task_definition_families(params = {}) # @param [Hash] params ({}) def list_task_definition_families(params = {}, options = {}) req = build_request(:list_task_definition_families, params) req.send_request(options) end # Returns a list of task definitions that are registered to your # account. You can filter the results by family name with the # `familyPrefix` parameter or by status with the `status` parameter. # # @option params [String] :family_prefix # The full family name with which to filter the `ListTaskDefinitions` # results. Specifying a `familyPrefix` limits the listed task # definitions to task definition revisions that belong to that family. # # @option params [String] :status # The task definition status with which to filter the # `ListTaskDefinitions` results. By default, only `ACTIVE` task # definitions are listed. By setting this parameter to `INACTIVE`, you # can view task definitions that are `INACTIVE` as long as an active # task or service still references them. If you paginate the resulting # output, be sure to keep the `status` value constant in each subsequent # request. # # @option params [String] :sort # The order in which to sort the results. Valid values are `ASC` and # `DESC`. By default (`ASC`), task definitions are listed # lexicographically by family name and in ascending numerical order by # revision so that the newest task definitions in a family are listed # last. Setting this parameter to `DESC` reverses the sort order on # family name and revision so that the newest task definitions in a # family are listed first. # # @option params [String] :next_token # The `nextToken` value returned from a previous paginated # `ListTaskDefinitions` request where `maxResults` was used and the # results exceeded the value of that parameter. Pagination continues # from the end of the previous results that returned the `nextToken` # value. This value is `null` when there are no more results to return. # # This token should be treated as an opaque identifier that is only used # to retrieve the next items in a list and not for other programmatic # purposes. # # # # @option params [Integer] :max_results # The maximum number of task definition results returned by # `ListTaskDefinitions` in paginated output. When this parameter is # used, `ListTaskDefinitions` only returns `maxResults` results in a # single page along with a `nextToken` response element. The remaining # results of the initial request can be seen by sending another # `ListTaskDefinitions` request with the returned `nextToken` value. # This value can be between 1 and 100. If this parameter is not used, # then `ListTaskDefinitions` returns up to 100 results and a `nextToken` # value if applicable. # # @return [Types::ListTaskDefinitionsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListTaskDefinitionsResponse#task_definition_arns #task_definition_arns} => Array<String> # * {Types::ListTaskDefinitionsResponse#next_token #next_token} => String # # @example Request syntax with placeholder values # # resp = client.list_task_definitions({ # family_prefix: "String", # status: "ACTIVE", # accepts ACTIVE, INACTIVE # sort: "ASC", # accepts ASC, DESC # next_token: "String", # max_results: 1, # }) # # @example Response structure # # resp.task_definition_arns #=> Array # resp.task_definition_arns[0] #=> String # resp.next_token #=> String # # @overload list_task_definitions(params = {}) # @param [Hash] params ({}) def list_task_definitions(params = {}, options = {}) req = build_request(:list_task_definitions, params) req.send_request(options) end # Returns a list of tasks for a specified cluster. You can filter the # results by family name, by a particular container instance, or by the # desired status of the task with the `family`, `containerInstance`, and # `desiredStatus` parameters. # # Recently-stopped tasks might appear in the returned results. # Currently, stopped tasks appear in the returned results for at least # one hour. # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster that # hosts the tasks to list. If you do not specify a cluster, the default # cluster is assumed. # # @option params [String] :container_instance # The container instance ID or full Amazon Resource Name (ARN) of the # container instance with which to filter the `ListTasks` results. # Specifying a `containerInstance` limits the results to tasks that # belong to that container instance. # # @option params [String] :family # The name of the family with which to filter the `ListTasks` results. # Specifying a `family` limits the results to tasks that belong to that # family. # # @option params [String] :next_token # The `nextToken` value returned from a previous paginated `ListTasks` # request where `maxResults` was used and the results exceeded the value # of that parameter. Pagination continues from the end of the previous # results that returned the `nextToken` value. This value is `null` when # there are no more results to return. # # This token should be treated as an opaque identifier that is only used # to retrieve the next items in a list and not for other programmatic # purposes. # # # # @option params [Integer] :max_results # The maximum number of task results returned by `ListTasks` in # paginated output. When this parameter is used, `ListTasks` only # returns `maxResults` results in a single page along with a `nextToken` # response element. The remaining results of the initial request can be # seen by sending another `ListTasks` request with the returned # `nextToken` value. This value can be between 1 and 100. If this # parameter is not used, then `ListTasks` returns up to 100 results and # a `nextToken` value if applicable. # # @option params [String] :started_by # The `startedBy` value with which to filter the task results. # Specifying a `startedBy` value limits the results to tasks that were # started with that value. # # @option params [String] :service_name # The name of the service with which to filter the `ListTasks` results. # Specifying a `serviceName` limits the results to tasks that belong to # that service. # # @option params [String] :desired_status # The task desired status with which to filter the `ListTasks` results. # Specifying a `desiredStatus` of `STOPPED` limits the results to tasks # that ECS has set the desired status to `STOPPED`, which can be useful # for debugging tasks that are not starting properly or have died or # finished. The default status filter is `RUNNING`, which shows tasks # that ECS has set the desired status to `RUNNING`. # # Although you can filter results based on a desired status of # `PENDING`, this will not return any results because ECS never sets the # desired status of a task to that value (only a task's `lastStatus` # may have a value of `PENDING`). # # # # @return [Types::ListTasksResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListTasksResponse#task_arns #task_arns} => Array<String> # * {Types::ListTasksResponse#next_token #next_token} => String # # @example Request syntax with placeholder values # # resp = client.list_tasks({ # cluster: "String", # container_instance: "String", # family: "String", # next_token: "String", # max_results: 1, # started_by: "String", # service_name: "String", # desired_status: "RUNNING", # accepts RUNNING, PENDING, STOPPED # }) # # @example Response structure # # resp.task_arns #=> Array # resp.task_arns[0] #=> String # resp.next_token #=> String # # @overload list_tasks(params = {}) # @param [Hash] params ({}) def list_tasks(params = {}, options = {}) req = build_request(:list_tasks, params) req.send_request(options) end # Create or update an attribute on an Amazon ECS resource. If the # attribute does not exist, it is created. If the attribute exists, its # value is replaced with the specified value. To delete an attribute, # use DeleteAttributes. For more information, see [Attributes][1] in the # *Amazon EC2 Container Service Developer Guide*. # # # # [1]: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html#attributes # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster that # contains the resource to apply attributes. If you do not specify a # cluster, the default cluster is assumed. # # @option params [required, Array] :attributes # The attributes to apply to your resource. You can specify up to 10 # custom attributes per resource. You can specify up to 10 attributes in # a single call. # # @return [Types::PutAttributesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::PutAttributesResponse#attributes #attributes} => Array<Types::Attribute> # # @example Request syntax with placeholder values # # resp = client.put_attributes({ # cluster: "String", # attributes: [ # required # { # name: "String", # required # value: "String", # target_type: "container-instance", # accepts container-instance # target_id: "String", # }, # ], # }) # # @example Response structure # # resp.attributes #=> Array # resp.attributes[0].name #=> String # resp.attributes[0].value #=> String # resp.attributes[0].target_type #=> String, one of "container-instance" # resp.attributes[0].target_id #=> String # # @overload put_attributes(params = {}) # @param [Hash] params ({}) def put_attributes(params = {}, options = {}) req = build_request(:put_attributes, params) req.send_request(options) end # This action is only used by the Amazon EC2 Container Service agent, # and it is not intended for use outside of the agent. # # # # Registers an EC2 instance into the specified cluster. This instance # becomes available to place containers on. # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster with # which to register your container instance. If you do not specify a # cluster, the default cluster is assumed. # # @option params [String] :instance_identity_document # The instance identity document for the EC2 instance to register. This # document can be found by running the following command from the # instance: `curl # http://169.254.169.254/latest/dynamic/instance-identity/document/` # # @option params [String] :instance_identity_document_signature # The instance identity document signature for the EC2 instance to # register. This signature can be found by running the following command # from the instance: `curl # http://169.254.169.254/latest/dynamic/instance-identity/signature/` # # @option params [Array] :total_resources # The resources available on the instance. # # @option params [Types::VersionInfo] :version_info # The version information for the Amazon ECS container agent and Docker # daemon running on the container instance. # # @option params [String] :container_instance_arn # The Amazon Resource Name (ARN) of the container instance (if it was # previously registered). # # @option params [Array] :attributes # The container instance attributes that this container instance # supports. # # @return [Types::RegisterContainerInstanceResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::RegisterContainerInstanceResponse#container_instance #container_instance} => Types::ContainerInstance # # @example Request syntax with placeholder values # # resp = client.register_container_instance({ # cluster: "String", # instance_identity_document: "String", # instance_identity_document_signature: "String", # total_resources: [ # { # name: "String", # type: "String", # double_value: 1.0, # long_value: 1, # integer_value: 1, # string_set_value: ["String"], # }, # ], # version_info: { # agent_version: "String", # agent_hash: "String", # docker_version: "String", # }, # container_instance_arn: "String", # attributes: [ # { # name: "String", # required # value: "String", # target_type: "container-instance", # accepts container-instance # target_id: "String", # }, # ], # }) # # @example Response structure # # resp.container_instance.container_instance_arn #=> String # resp.container_instance.ec2_instance_id #=> String # resp.container_instance.version #=> Integer # resp.container_instance.version_info.agent_version #=> String # resp.container_instance.version_info.agent_hash #=> String # resp.container_instance.version_info.docker_version #=> String # resp.container_instance.remaining_resources #=> Array # resp.container_instance.remaining_resources[0].name #=> String # resp.container_instance.remaining_resources[0].type #=> String # resp.container_instance.remaining_resources[0].double_value #=> Float # resp.container_instance.remaining_resources[0].long_value #=> Integer # resp.container_instance.remaining_resources[0].integer_value #=> Integer # resp.container_instance.remaining_resources[0].string_set_value #=> Array # resp.container_instance.remaining_resources[0].string_set_value[0] #=> String # resp.container_instance.registered_resources #=> Array # resp.container_instance.registered_resources[0].name #=> String # resp.container_instance.registered_resources[0].type #=> String # resp.container_instance.registered_resources[0].double_value #=> Float # resp.container_instance.registered_resources[0].long_value #=> Integer # resp.container_instance.registered_resources[0].integer_value #=> Integer # resp.container_instance.registered_resources[0].string_set_value #=> Array # resp.container_instance.registered_resources[0].string_set_value[0] #=> String # resp.container_instance.status #=> String # resp.container_instance.agent_connected #=> Boolean # resp.container_instance.running_tasks_count #=> Integer # resp.container_instance.pending_tasks_count #=> Integer # resp.container_instance.agent_update_status #=> String, one of "PENDING", "STAGING", "STAGED", "UPDATING", "UPDATED", "FAILED" # resp.container_instance.attributes #=> Array # resp.container_instance.attributes[0].name #=> String # resp.container_instance.attributes[0].value #=> String # resp.container_instance.attributes[0].target_type #=> String, one of "container-instance" # resp.container_instance.attributes[0].target_id #=> String # # @overload register_container_instance(params = {}) # @param [Hash] params ({}) def register_container_instance(params = {}, options = {}) req = build_request(:register_container_instance, params) req.send_request(options) end # Registers a new task definition from the supplied `family` and # `containerDefinitions`. Optionally, you can add data volumes to your # containers with the `volumes` parameter. For more information about # task definition parameters and defaults, see [Amazon ECS Task # Definitions][1] in the *Amazon EC2 Container Service Developer Guide*. # # You can specify an IAM role for your task with the `taskRoleArn` # parameter. When you specify an IAM role for a task, its containers can # then use the latest versions of the AWS CLI or SDKs to make API # requests to the AWS services that are specified in the IAM policy # associated with the role. For more information, see [IAM Roles for # Tasks][2] in the *Amazon EC2 Container Service Developer Guide*. # # You can specify a Docker networking mode for the containers in your # task definition with the `networkMode` parameter. The available # network modes correspond to those described in [Network settings][3] # in the Docker run reference. # # # # [1]: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html # [2]: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html # [3]: https://docs.docker.com/engine/reference/run/#/network-settings # # @option params [required, String] :family # You must specify a `family` for a task definition, which allows you to # track multiple versions of the same task definition. The `family` is # used as a name for your task definition. Up to 255 letters (uppercase # and lowercase), numbers, hyphens, and underscores are allowed. # # @option params [String] :task_role_arn # The short name or full Amazon Resource Name (ARN) of the IAM role that # containers in this task can assume. All containers in this task are # granted the permissions that are specified in this role. For more # information, see [IAM Roles for Tasks][1] in the *Amazon EC2 Container # Service Developer Guide*. # # # # [1]: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html # # @option params [String] :network_mode # The Docker networking mode to use for the containers in the task. The # valid values are `none`, `bridge`, and `host`. # # The default Docker network mode is `bridge`. If the network mode is # set to `none`, you cannot specify port mappings in your container # definitions, and the task's containers do not have external # connectivity. The `host` network mode offers the highest networking # performance for containers because they use the host network stack # instead of the virtualized network stack provided by the `bridge` # mode; however, exposed container ports are mapped directly to the # corresponding host port, so you cannot take advantage of dynamic host # port mappings or run multiple instantiations of the same task on a # single container instance if port mappings are used. # # For more information, see [Network settings][1] in the *Docker run # reference*. # # # # [1]: https://docs.docker.com/engine/reference/run/#network-settings # # @option params [required, Array] :container_definitions # A list of container definitions in JSON format that describe the # different containers that make up your task. # # @option params [Array] :volumes # A list of volume definitions in JSON format that containers in your # task may use. # # @option params [Array] :placement_constraints # An array of placement constraint objects to use for the task. You can # specify a maximum of 10 constraints per task (this limit includes # constraints in the task definition and those specified at run time). # # @return [Types::RegisterTaskDefinitionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::RegisterTaskDefinitionResponse#task_definition #task_definition} => Types::TaskDefinition # # @example Request syntax with placeholder values # # resp = client.register_task_definition({ # family: "String", # required # task_role_arn: "String", # network_mode: "bridge", # accepts bridge, host, none # container_definitions: [ # required # { # name: "String", # image: "String", # cpu: 1, # memory: 1, # memory_reservation: 1, # links: ["String"], # port_mappings: [ # { # container_port: 1, # host_port: 1, # protocol: "tcp", # accepts tcp, udp # }, # ], # essential: false, # entry_point: ["String"], # command: ["String"], # environment: [ # { # name: "String", # value: "String", # }, # ], # mount_points: [ # { # source_volume: "String", # container_path: "String", # read_only: false, # }, # ], # volumes_from: [ # { # source_container: "String", # read_only: false, # }, # ], # hostname: "String", # user: "String", # working_directory: "String", # disable_networking: false, # privileged: false, # readonly_root_filesystem: false, # dns_servers: ["String"], # dns_search_domains: ["String"], # extra_hosts: [ # { # hostname: "String", # required # ip_address: "String", # required # }, # ], # docker_security_options: ["String"], # docker_labels: { # "String" => "String", # }, # ulimits: [ # { # name: "core", # required, accepts core, cpu, data, fsize, locks, memlock, msgqueue, nice, nofile, nproc, rss, rtprio, rttime, sigpending, stack # soft_limit: 1, # required # hard_limit: 1, # required # }, # ], # log_configuration: { # log_driver: "json-file", # required, accepts json-file, syslog, journald, gelf, fluentd, awslogs, splunk # options: { # "String" => "String", # }, # }, # }, # ], # volumes: [ # { # name: "String", # host: { # source_path: "String", # }, # }, # ], # placement_constraints: [ # { # type: "memberOf", # accepts memberOf # expression: "String", # }, # ], # }) # # @example Response structure # # resp.task_definition.task_definition_arn #=> String # resp.task_definition.container_definitions #=> Array # resp.task_definition.container_definitions[0].name #=> String # resp.task_definition.container_definitions[0].image #=> String # resp.task_definition.container_definitions[0].cpu #=> Integer # resp.task_definition.container_definitions[0].memory #=> Integer # resp.task_definition.container_definitions[0].memory_reservation #=> Integer # resp.task_definition.container_definitions[0].links #=> Array # resp.task_definition.container_definitions[0].links[0] #=> String # resp.task_definition.container_definitions[0].port_mappings #=> Array # resp.task_definition.container_definitions[0].port_mappings[0].container_port #=> Integer # resp.task_definition.container_definitions[0].port_mappings[0].host_port #=> Integer # resp.task_definition.container_definitions[0].port_mappings[0].protocol #=> String, one of "tcp", "udp" # resp.task_definition.container_definitions[0].essential #=> Boolean # resp.task_definition.container_definitions[0].entry_point #=> Array # resp.task_definition.container_definitions[0].entry_point[0] #=> String # resp.task_definition.container_definitions[0].command #=> Array # resp.task_definition.container_definitions[0].command[0] #=> String # resp.task_definition.container_definitions[0].environment #=> Array # resp.task_definition.container_definitions[0].environment[0].name #=> String # resp.task_definition.container_definitions[0].environment[0].value #=> String # resp.task_definition.container_definitions[0].mount_points #=> Array # resp.task_definition.container_definitions[0].mount_points[0].source_volume #=> String # resp.task_definition.container_definitions[0].mount_points[0].container_path #=> String # resp.task_definition.container_definitions[0].mount_points[0].read_only #=> Boolean # resp.task_definition.container_definitions[0].volumes_from #=> Array # resp.task_definition.container_definitions[0].volumes_from[0].source_container #=> String # resp.task_definition.container_definitions[0].volumes_from[0].read_only #=> Boolean # resp.task_definition.container_definitions[0].hostname #=> String # resp.task_definition.container_definitions[0].user #=> String # resp.task_definition.container_definitions[0].working_directory #=> String # resp.task_definition.container_definitions[0].disable_networking #=> Boolean # resp.task_definition.container_definitions[0].privileged #=> Boolean # resp.task_definition.container_definitions[0].readonly_root_filesystem #=> Boolean # resp.task_definition.container_definitions[0].dns_servers #=> Array # resp.task_definition.container_definitions[0].dns_servers[0] #=> String # resp.task_definition.container_definitions[0].dns_search_domains #=> Array # resp.task_definition.container_definitions[0].dns_search_domains[0] #=> String # resp.task_definition.container_definitions[0].extra_hosts #=> Array # resp.task_definition.container_definitions[0].extra_hosts[0].hostname #=> String # resp.task_definition.container_definitions[0].extra_hosts[0].ip_address #=> String # resp.task_definition.container_definitions[0].docker_security_options #=> Array # resp.task_definition.container_definitions[0].docker_security_options[0] #=> String # resp.task_definition.container_definitions[0].docker_labels #=> Hash # resp.task_definition.container_definitions[0].docker_labels["String"] #=> String # resp.task_definition.container_definitions[0].ulimits #=> Array # resp.task_definition.container_definitions[0].ulimits[0].name #=> String, one of "core", "cpu", "data", "fsize", "locks", "memlock", "msgqueue", "nice", "nofile", "nproc", "rss", "rtprio", "rttime", "sigpending", "stack" # resp.task_definition.container_definitions[0].ulimits[0].soft_limit #=> Integer # resp.task_definition.container_definitions[0].ulimits[0].hard_limit #=> Integer # resp.task_definition.container_definitions[0].log_configuration.log_driver #=> String, one of "json-file", "syslog", "journald", "gelf", "fluentd", "awslogs", "splunk" # resp.task_definition.container_definitions[0].log_configuration.options #=> Hash # resp.task_definition.container_definitions[0].log_configuration.options["String"] #=> String # resp.task_definition.family #=> String # resp.task_definition.task_role_arn #=> String # resp.task_definition.network_mode #=> String, one of "bridge", "host", "none" # resp.task_definition.revision #=> Integer # resp.task_definition.volumes #=> Array # resp.task_definition.volumes[0].name #=> String # resp.task_definition.volumes[0].host.source_path #=> String # resp.task_definition.status #=> String, one of "ACTIVE", "INACTIVE" # resp.task_definition.requires_attributes #=> Array # resp.task_definition.requires_attributes[0].name #=> String # resp.task_definition.requires_attributes[0].value #=> String # resp.task_definition.requires_attributes[0].target_type #=> String, one of "container-instance" # resp.task_definition.requires_attributes[0].target_id #=> String # resp.task_definition.placement_constraints #=> Array # resp.task_definition.placement_constraints[0].type #=> String, one of "memberOf" # resp.task_definition.placement_constraints[0].expression #=> String # # @overload register_task_definition(params = {}) # @param [Hash] params ({}) def register_task_definition(params = {}, options = {}) req = build_request(:register_task_definition, params) req.send_request(options) end # Starts a new task using the specified task definition. # # You can allow Amazon ECS to place tasks for you, or you can customize # how Amazon ECS places tasks using placement constraints and placement # strategies. For more information, see [Scheduling Tasks][1] in the # *Amazon EC2 Container Service Developer Guide*. # # Alternatively, you can use StartTask to use your own scheduler or # place tasks manually on specific container instances. # # # # [1]: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/scheduling_tasks.html # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster on # which to run your task. If you do not specify a cluster, the default # cluster is assumed. # # @option params [required, String] :task_definition # The `family` and `revision` (`family:revision`) or full Amazon # Resource Name (ARN) of the task definition to run. If a `revision` is # not specified, the latest `ACTIVE` revision is used. # # @option params [Types::TaskOverride] :overrides # A list of container overrides in JSON format that specify the name of # a container in the specified task definition and the overrides it # should receive. You can override the default command for a container # (that is specified in the task definition or Docker image) with a # `command` override. You can also override existing environment # variables (that are specified in the task definition or Docker image) # on a container or add new environment variables to it with an # `environment` override. # # A total of 8192 characters are allowed for overrides. This limit # includes the JSON formatting characters of the override structure. # # # # @option params [Integer] :count # The number of instantiations of the specified task to place on your # cluster. You can specify up to 10 tasks per call. # # @option params [String] :started_by # An optional tag specified when a task is started. For example if you # automatically trigger a task to run a batch process job, you could # apply a unique identifier for that job to your task with the # `startedBy` parameter. You can then identify which tasks belong to # that job by filtering the results of a ListTasks call with the # `startedBy` value. Up to 36 letters (uppercase and lowercase), # numbers, hyphens, and underscores are allowed. # # If a task is started by an Amazon ECS service, then the `startedBy` # parameter contains the deployment ID of the service that starts it. # # @option params [String] :group # The name of the task group to associate with the task. The default # value is the family name of the task definition (for example, # family:my-family-name). # # @option params [Array] :placement_constraints # An array of placement constraint objects to use for the task. You can # specify up to 10 constraints per task (including constraints in the # task definition and those specified at run time). # # @option params [Array] :placement_strategy # The placement strategy objects to use for the task. You can specify a # maximum of 5 strategy rules per task. # # @return [Types::RunTaskResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::RunTaskResponse#tasks #tasks} => Array<Types::Task> # * {Types::RunTaskResponse#failures #failures} => Array<Types::Failure> # # @example Request syntax with placeholder values # # resp = client.run_task({ # cluster: "String", # task_definition: "String", # required # overrides: { # container_overrides: [ # { # name: "String", # command: ["String"], # environment: [ # { # name: "String", # value: "String", # }, # ], # }, # ], # task_role_arn: "String", # }, # count: 1, # started_by: "String", # group: "String", # placement_constraints: [ # { # type: "distinctInstance", # accepts distinctInstance, memberOf # expression: "String", # }, # ], # placement_strategy: [ # { # type: "random", # accepts random, spread, binpack # field: "String", # }, # ], # }) # # @example Response structure # # resp.tasks #=> Array # resp.tasks[0].task_arn #=> String # resp.tasks[0].cluster_arn #=> String # resp.tasks[0].task_definition_arn #=> String # resp.tasks[0].container_instance_arn #=> String # resp.tasks[0].overrides.container_overrides #=> Array # resp.tasks[0].overrides.container_overrides[0].name #=> String # resp.tasks[0].overrides.container_overrides[0].command #=> Array # resp.tasks[0].overrides.container_overrides[0].command[0] #=> String # resp.tasks[0].overrides.container_overrides[0].environment #=> Array # resp.tasks[0].overrides.container_overrides[0].environment[0].name #=> String # resp.tasks[0].overrides.container_overrides[0].environment[0].value #=> String # resp.tasks[0].overrides.task_role_arn #=> String # resp.tasks[0].last_status #=> String # resp.tasks[0].desired_status #=> String # resp.tasks[0].containers #=> Array # resp.tasks[0].containers[0].container_arn #=> String # resp.tasks[0].containers[0].task_arn #=> String # resp.tasks[0].containers[0].name #=> String # resp.tasks[0].containers[0].last_status #=> String # resp.tasks[0].containers[0].exit_code #=> Integer # resp.tasks[0].containers[0].reason #=> String # resp.tasks[0].containers[0].network_bindings #=> Array # resp.tasks[0].containers[0].network_bindings[0].bind_ip #=> String # resp.tasks[0].containers[0].network_bindings[0].container_port #=> Integer # resp.tasks[0].containers[0].network_bindings[0].host_port #=> Integer # resp.tasks[0].containers[0].network_bindings[0].protocol #=> String, one of "tcp", "udp" # resp.tasks[0].started_by #=> String # resp.tasks[0].version #=> Integer # resp.tasks[0].stopped_reason #=> String # resp.tasks[0].created_at #=> Time # resp.tasks[0].started_at #=> Time # resp.tasks[0].stopped_at #=> Time # resp.tasks[0].group #=> String # resp.failures #=> Array # resp.failures[0].arn #=> String # resp.failures[0].reason #=> String # # @overload run_task(params = {}) # @param [Hash] params ({}) def run_task(params = {}, options = {}) req = build_request(:run_task, params) req.send_request(options) end # Starts a new task from the specified task definition on the specified # container instance or instances. # # Alternatively, you can use RunTask to place tasks for you. For more # information, see [Scheduling Tasks][1] in the *Amazon EC2 Container # Service Developer Guide*. # # # # [1]: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/scheduling_tasks.html # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster on # which to start your task. If you do not specify a cluster, the default # cluster is assumed. # # @option params [required, String] :task_definition # The `family` and `revision` (`family:revision`) or full Amazon # Resource Name (ARN) of the task definition to start. If a `revision` # is not specified, the latest `ACTIVE` revision is used. # # @option params [Types::TaskOverride] :overrides # A list of container overrides in JSON format that specify the name of # a container in the specified task definition and the overrides it # should receive. You can override the default command for a container # (that is specified in the task definition or Docker image) with a # `command` override. You can also override existing environment # variables (that are specified in the task definition or Docker image) # on a container or add new environment variables to it with an # `environment` override. # # A total of 8192 characters are allowed for overrides. This limit # includes the JSON formatting characters of the override structure. # # # # @option params [required, Array] :container_instances # The container instance IDs or full Amazon Resource Name (ARN) entries # for the container instances on which you would like to place your # task. You can specify up to 10 container instances. # # @option params [String] :started_by # An optional tag specified when a task is started. For example if you # automatically trigger a task to run a batch process job, you could # apply a unique identifier for that job to your task with the # `startedBy` parameter. You can then identify which tasks belong to # that job by filtering the results of a ListTasks call with the # `startedBy` value. Up to 36 letters (uppercase and lowercase), # numbers, hyphens, and underscores are allowed. # # If a task is started by an Amazon ECS service, then the `startedBy` # parameter contains the deployment ID of the service that starts it. # # @option params [String] :group # The name of the task group to associate with the task. The default # value is the family name of the task definition (for example, # family:my-family-name). # # @return [Types::StartTaskResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::StartTaskResponse#tasks #tasks} => Array<Types::Task> # * {Types::StartTaskResponse#failures #failures} => Array<Types::Failure> # # @example Request syntax with placeholder values # # resp = client.start_task({ # cluster: "String", # task_definition: "String", # required # overrides: { # container_overrides: [ # { # name: "String", # command: ["String"], # environment: [ # { # name: "String", # value: "String", # }, # ], # }, # ], # task_role_arn: "String", # }, # container_instances: ["String"], # required # started_by: "String", # group: "String", # }) # # @example Response structure # # resp.tasks #=> Array # resp.tasks[0].task_arn #=> String # resp.tasks[0].cluster_arn #=> String # resp.tasks[0].task_definition_arn #=> String # resp.tasks[0].container_instance_arn #=> String # resp.tasks[0].overrides.container_overrides #=> Array # resp.tasks[0].overrides.container_overrides[0].name #=> String # resp.tasks[0].overrides.container_overrides[0].command #=> Array # resp.tasks[0].overrides.container_overrides[0].command[0] #=> String # resp.tasks[0].overrides.container_overrides[0].environment #=> Array # resp.tasks[0].overrides.container_overrides[0].environment[0].name #=> String # resp.tasks[0].overrides.container_overrides[0].environment[0].value #=> String # resp.tasks[0].overrides.task_role_arn #=> String # resp.tasks[0].last_status #=> String # resp.tasks[0].desired_status #=> String # resp.tasks[0].containers #=> Array # resp.tasks[0].containers[0].container_arn #=> String # resp.tasks[0].containers[0].task_arn #=> String # resp.tasks[0].containers[0].name #=> String # resp.tasks[0].containers[0].last_status #=> String # resp.tasks[0].containers[0].exit_code #=> Integer # resp.tasks[0].containers[0].reason #=> String # resp.tasks[0].containers[0].network_bindings #=> Array # resp.tasks[0].containers[0].network_bindings[0].bind_ip #=> String # resp.tasks[0].containers[0].network_bindings[0].container_port #=> Integer # resp.tasks[0].containers[0].network_bindings[0].host_port #=> Integer # resp.tasks[0].containers[0].network_bindings[0].protocol #=> String, one of "tcp", "udp" # resp.tasks[0].started_by #=> String # resp.tasks[0].version #=> Integer # resp.tasks[0].stopped_reason #=> String # resp.tasks[0].created_at #=> Time # resp.tasks[0].started_at #=> Time # resp.tasks[0].stopped_at #=> Time # resp.tasks[0].group #=> String # resp.failures #=> Array # resp.failures[0].arn #=> String # resp.failures[0].reason #=> String # # @overload start_task(params = {}) # @param [Hash] params ({}) def start_task(params = {}, options = {}) req = build_request(:start_task, params) req.send_request(options) end # Stops a running task. # # When StopTask is called on a task, the equivalent of `docker stop` is # issued to the containers running in the task. This results in a # `SIGTERM` and a 30-second timeout, after which `SIGKILL` is sent and # the containers are forcibly stopped. If the container handles the # `SIGTERM` gracefully and exits within 30 seconds from receiving it, no # `SIGKILL` is sent. # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster that # hosts the task to stop. If you do not specify a cluster, the default # cluster is assumed. # # @option params [required, String] :task # The task ID or full Amazon Resource Name (ARN) entry of the task to # stop. # # @option params [String] :reason # An optional message specified when a task is stopped. For example, if # you are using a custom scheduler, you can use this parameter to # specify the reason for stopping the task here, and the message will # appear in subsequent DescribeTasks API operations on this task. Up to # 255 characters are allowed in this message. # # @return [Types::StopTaskResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::StopTaskResponse#task #task} => Types::Task # # @example Request syntax with placeholder values # # resp = client.stop_task({ # cluster: "String", # task: "String", # required # reason: "String", # }) # # @example Response structure # # resp.task.task_arn #=> String # resp.task.cluster_arn #=> String # resp.task.task_definition_arn #=> String # resp.task.container_instance_arn #=> String # resp.task.overrides.container_overrides #=> Array # resp.task.overrides.container_overrides[0].name #=> String # resp.task.overrides.container_overrides[0].command #=> Array # resp.task.overrides.container_overrides[0].command[0] #=> String # resp.task.overrides.container_overrides[0].environment #=> Array # resp.task.overrides.container_overrides[0].environment[0].name #=> String # resp.task.overrides.container_overrides[0].environment[0].value #=> String # resp.task.overrides.task_role_arn #=> String # resp.task.last_status #=> String # resp.task.desired_status #=> String # resp.task.containers #=> Array # resp.task.containers[0].container_arn #=> String # resp.task.containers[0].task_arn #=> String # resp.task.containers[0].name #=> String # resp.task.containers[0].last_status #=> String # resp.task.containers[0].exit_code #=> Integer # resp.task.containers[0].reason #=> String # resp.task.containers[0].network_bindings #=> Array # resp.task.containers[0].network_bindings[0].bind_ip #=> String # resp.task.containers[0].network_bindings[0].container_port #=> Integer # resp.task.containers[0].network_bindings[0].host_port #=> Integer # resp.task.containers[0].network_bindings[0].protocol #=> String, one of "tcp", "udp" # resp.task.started_by #=> String # resp.task.version #=> Integer # resp.task.stopped_reason #=> String # resp.task.created_at #=> Time # resp.task.started_at #=> Time # resp.task.stopped_at #=> Time # resp.task.group #=> String # # @overload stop_task(params = {}) # @param [Hash] params ({}) def stop_task(params = {}, options = {}) req = build_request(:stop_task, params) req.send_request(options) end # This action is only used by the Amazon EC2 Container Service agent, # and it is not intended for use outside of the agent. # # # # Sent to acknowledge that a container changed states. # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster that # hosts the container. # # @option params [String] :task # The task ID or full Amazon Resource Name (ARN) of the task that hosts # the container. # # @option params [String] :container_name # The name of the container. # # @option params [String] :status # The status of the state change request. # # @option params [Integer] :exit_code # The exit code returned for the state change request. # # @option params [String] :reason # The reason for the state change request. # # @option params [Array] :network_bindings # The network bindings of the container. # # @return [Types::SubmitContainerStateChangeResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::SubmitContainerStateChangeResponse#acknowledgment #acknowledgment} => String # # @example Request syntax with placeholder values # # resp = client.submit_container_state_change({ # cluster: "String", # task: "String", # container_name: "String", # status: "String", # exit_code: 1, # reason: "String", # network_bindings: [ # { # bind_ip: "String", # container_port: 1, # host_port: 1, # protocol: "tcp", # accepts tcp, udp # }, # ], # }) # # @example Response structure # # resp.acknowledgment #=> String # # @overload submit_container_state_change(params = {}) # @param [Hash] params ({}) def submit_container_state_change(params = {}, options = {}) req = build_request(:submit_container_state_change, params) req.send_request(options) end # This action is only used by the Amazon EC2 Container Service agent, # and it is not intended for use outside of the agent. # # # # Sent to acknowledge that a task changed states. # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster that # hosts the task. # # @option params [String] :task # The task ID or full Amazon Resource Name (ARN) of the task in the # state change request. # # @option params [String] :status # The status of the state change request. # # @option params [String] :reason # The reason for the state change request. # # @return [Types::SubmitTaskStateChangeResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::SubmitTaskStateChangeResponse#acknowledgment #acknowledgment} => String # # @example Request syntax with placeholder values # # resp = client.submit_task_state_change({ # cluster: "String", # task: "String", # status: "String", # reason: "String", # }) # # @example Response structure # # resp.acknowledgment #=> String # # @overload submit_task_state_change(params = {}) # @param [Hash] params ({}) def submit_task_state_change(params = {}, options = {}) req = build_request(:submit_task_state_change, params) req.send_request(options) end # Updates the Amazon ECS container agent on a specified container # instance. Updating the Amazon ECS container agent does not interrupt # running tasks or services on the container instance. The process for # updating the agent differs depending on whether your container # instance was launched with the Amazon ECS-optimized AMI or another # operating system. # # `UpdateContainerAgent` requires the Amazon ECS-optimized AMI or Amazon # Linux with the `ecs-init` service installed and running. For help # updating the Amazon ECS container agent on other operating systems, # see [Manually Updating the Amazon ECS Container Agent][1] in the # *Amazon EC2 Container Service Developer Guide*. # # # # [1]: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html#manually_update_agent # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster that # your container instance is running on. If you do not specify a # cluster, the default cluster is assumed. # # @option params [required, String] :container_instance # The container instance ID or full Amazon Resource Name (ARN) entries # for the container instance on which you would like to update the # Amazon ECS container agent. # # @return [Types::UpdateContainerAgentResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::UpdateContainerAgentResponse#container_instance #container_instance} => Types::ContainerInstance # # @example Request syntax with placeholder values # # resp = client.update_container_agent({ # cluster: "String", # container_instance: "String", # required # }) # # @example Response structure # # resp.container_instance.container_instance_arn #=> String # resp.container_instance.ec2_instance_id #=> String # resp.container_instance.version #=> Integer # resp.container_instance.version_info.agent_version #=> String # resp.container_instance.version_info.agent_hash #=> String # resp.container_instance.version_info.docker_version #=> String # resp.container_instance.remaining_resources #=> Array # resp.container_instance.remaining_resources[0].name #=> String # resp.container_instance.remaining_resources[0].type #=> String # resp.container_instance.remaining_resources[0].double_value #=> Float # resp.container_instance.remaining_resources[0].long_value #=> Integer # resp.container_instance.remaining_resources[0].integer_value #=> Integer # resp.container_instance.remaining_resources[0].string_set_value #=> Array # resp.container_instance.remaining_resources[0].string_set_value[0] #=> String # resp.container_instance.registered_resources #=> Array # resp.container_instance.registered_resources[0].name #=> String # resp.container_instance.registered_resources[0].type #=> String # resp.container_instance.registered_resources[0].double_value #=> Float # resp.container_instance.registered_resources[0].long_value #=> Integer # resp.container_instance.registered_resources[0].integer_value #=> Integer # resp.container_instance.registered_resources[0].string_set_value #=> Array # resp.container_instance.registered_resources[0].string_set_value[0] #=> String # resp.container_instance.status #=> String # resp.container_instance.agent_connected #=> Boolean # resp.container_instance.running_tasks_count #=> Integer # resp.container_instance.pending_tasks_count #=> Integer # resp.container_instance.agent_update_status #=> String, one of "PENDING", "STAGING", "STAGED", "UPDATING", "UPDATED", "FAILED" # resp.container_instance.attributes #=> Array # resp.container_instance.attributes[0].name #=> String # resp.container_instance.attributes[0].value #=> String # resp.container_instance.attributes[0].target_type #=> String, one of "container-instance" # resp.container_instance.attributes[0].target_id #=> String # # @overload update_container_agent(params = {}) # @param [Hash] params ({}) def update_container_agent(params = {}, options = {}) req = build_request(:update_container_agent, params) req.send_request(options) end # Modifies the status of an Amazon ECS container instance. # # You can change the status of a container instance to `DRAINING` to # manually remove an instance from a cluster, for example to perform # system updates, update the Docker daemon, or scale down the cluster # size. # # When you set a container instance to `DRAINING`, Amazon ECS prevents # new tasks from being scheduled for placement on the container instance # and replacement service tasks are started on other container instances # in the cluster if the resources are available. Service tasks on the # container instance that are in the `PENDING` state are stopped # immediately. # # Service tasks on the container instance that are in the `RUNNING` # state are stopped and replaced according the service's deployment # configuration parameters, `minimumHealthyPercent` and # `maximumPercent`. Note that you can change the deployment # configuration of your service using UpdateService. # # * If `minimumHealthyPercent` is below 100%, the scheduler can ignore # `desiredCount` temporarily during task replacement. For example, # `desiredCount` is four tasks, a minimum of 50% allows the scheduler # to stop two existing tasks before starting two new tasks. If the # minimum is 100%, the service scheduler can't remove existing tasks # until the replacement tasks are considered healthy. Tasks for # services that do not use a load balancer are considered healthy if # they are in the `RUNNING` state. Tasks for services that use a load # balancer are considered healthy if they are in the `RUNNING` state # and the container instance they are hosted on is reported as healthy # by the load balancer. # # * The `maximumPercent` parameter represents an upper limit on the # number of running tasks during task replacement, which enables you # to define the replacement batch size. For example, if `desiredCount` # of four tasks, a maximum of 200% starts four new tasks before # stopping the four tasks to be drained (provided that the cluster # resources required to do this are available). If the maximum is # 100%, then replacement tasks can't start until the draining tasks # have stopped. # # Any `PENDING` or `RUNNING` tasks that do not belong to a service are # not affected; you must wait for them to finish or stop them manually. # # A container instance has completed draining when it has no more # `RUNNING` tasks. You can verify this using ListTasks. # # When you set a container instance to `ACTIVE`, the Amazon ECS # scheduler can begin scheduling tasks on the instance again. # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster that # hosts the container instance to update. If you do not specify a # cluster, the default cluster is assumed. # # @option params [required, Array] :container_instances # A space-separated list of container instance IDs or full Amazon # Resource Name (ARN) entries. # # @option params [required, String] :status # The container instance state with which to update the container # instance. # # @return [Types::UpdateContainerInstancesStateResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::UpdateContainerInstancesStateResponse#container_instances #container_instances} => Array<Types::ContainerInstance> # * {Types::UpdateContainerInstancesStateResponse#failures #failures} => Array<Types::Failure> # # @example Request syntax with placeholder values # # resp = client.update_container_instances_state({ # cluster: "String", # container_instances: ["String"], # required # status: "ACTIVE", # required, accepts ACTIVE, DRAINING # }) # # @example Response structure # # resp.container_instances #=> Array # resp.container_instances[0].container_instance_arn #=> String # resp.container_instances[0].ec2_instance_id #=> String # resp.container_instances[0].version #=> Integer # resp.container_instances[0].version_info.agent_version #=> String # resp.container_instances[0].version_info.agent_hash #=> String # resp.container_instances[0].version_info.docker_version #=> String # resp.container_instances[0].remaining_resources #=> Array # resp.container_instances[0].remaining_resources[0].name #=> String # resp.container_instances[0].remaining_resources[0].type #=> String # resp.container_instances[0].remaining_resources[0].double_value #=> Float # resp.container_instances[0].remaining_resources[0].long_value #=> Integer # resp.container_instances[0].remaining_resources[0].integer_value #=> Integer # resp.container_instances[0].remaining_resources[0].string_set_value #=> Array # resp.container_instances[0].remaining_resources[0].string_set_value[0] #=> String # resp.container_instances[0].registered_resources #=> Array # resp.container_instances[0].registered_resources[0].name #=> String # resp.container_instances[0].registered_resources[0].type #=> String # resp.container_instances[0].registered_resources[0].double_value #=> Float # resp.container_instances[0].registered_resources[0].long_value #=> Integer # resp.container_instances[0].registered_resources[0].integer_value #=> Integer # resp.container_instances[0].registered_resources[0].string_set_value #=> Array # resp.container_instances[0].registered_resources[0].string_set_value[0] #=> String # resp.container_instances[0].status #=> String # resp.container_instances[0].agent_connected #=> Boolean # resp.container_instances[0].running_tasks_count #=> Integer # resp.container_instances[0].pending_tasks_count #=> Integer # resp.container_instances[0].agent_update_status #=> String, one of "PENDING", "STAGING", "STAGED", "UPDATING", "UPDATED", "FAILED" # resp.container_instances[0].attributes #=> Array # resp.container_instances[0].attributes[0].name #=> String # resp.container_instances[0].attributes[0].value #=> String # resp.container_instances[0].attributes[0].target_type #=> String, one of "container-instance" # resp.container_instances[0].attributes[0].target_id #=> String # resp.failures #=> Array # resp.failures[0].arn #=> String # resp.failures[0].reason #=> String # # @overload update_container_instances_state(params = {}) # @param [Hash] params ({}) def update_container_instances_state(params = {}, options = {}) req = build_request(:update_container_instances_state, params) req.send_request(options) end # Modifies the desired count, deployment configuration, or task # definition used in a service. # # You can add to or subtract from the number of instantiations of a task # definition in a service by specifying the cluster that the service is # running in and a new `desiredCount` parameter. # # You can use UpdateService to modify your task definition and deploy a # new version of your service. # # You can also update the deployment configuration of a service. When a # deployment is triggered by updating the task definition of a service, # the service scheduler uses the deployment configuration parameters, # `minimumHealthyPercent` and `maximumPercent`, to determine the # deployment strategy. # # * If `minimumHealthyPercent` is below 100%, the scheduler can ignore # `desiredCount` temporarily during a deployment. For example, if # `desiredCount` is four tasks, a minimum of 50% allows the scheduler # to stop two existing tasks before starting two new tasks. Tasks for # services that do not use a load balancer are considered healthy if # they are in the `RUNNING` state. Tasks for services that use a load # balancer are considered healthy if they are in the `RUNNING` state # and the container instance they are hosted on is reported as healthy # by the load balancer. # # * The `maximumPercent` parameter represents an upper limit on the # number of running tasks during a deployment, which enables you to # define the deployment batch size. For example, if `desiredCount` is # four tasks, a maximum of 200% starts four new tasks before stopping # the four older tasks (provided that the cluster resources required # to do this are available). # # When UpdateService stops a task during a deployment, the equivalent of # `docker stop` is issued to the containers running in the task. This # results in a `SIGTERM` and a 30-second timeout, after which `SIGKILL` # is sent and the containers are forcibly stopped. If the container # handles the `SIGTERM` gracefully and exits within 30 seconds from # receiving it, no `SIGKILL` is sent. # # When the service scheduler launches new tasks, it determines task # placement in your cluster with the following logic: # # * Determine which of the container instances in your cluster can # support your service's task definition (for example, they have the # required CPU, memory, ports, and container instance attributes). # # * By default, the service scheduler attempts to balance tasks across # Availability Zones in this manner (although you can choose a # different placement strategy): # # * Sort the valid container instances by the fewest number of running # tasks for this service in the same Availability Zone as the # instance. For example, if zone A has one running service task and # zones B and C each have zero, valid container instances in either # zone B or C are considered optimal for placement. # # * Place the new service task on a valid container instance in an # optimal Availability Zone (based on the previous steps), favoring # container instances with the fewest number of running tasks for # this service. # # When the service scheduler stops running tasks, it attempts to # maintain balance across the Availability Zones in your cluster using # the following logic: # # * Sort the container instances by the largest number of running tasks # for this service in the same Availability Zone as the instance. For # example, if zone A has one running service task and zones B and C # each have two, container instances in either zone B or C are # considered optimal for termination. # # * Stop the task on a container instance in an optimal Availability # Zone (based on the previous steps), favoring container instances # with the largest number of running tasks for this service. # # @option params [String] :cluster # The short name or full Amazon Resource Name (ARN) of the cluster that # your service is running on. If you do not specify a cluster, the # default cluster is assumed. # # @option params [required, String] :service # The name of the service to update. # # @option params [Integer] :desired_count # The number of instantiations of the task to place and keep running in # your service. # # @option params [String] :task_definition # The `family` and `revision` (`family:revision`) or full Amazon # Resource Name (ARN) of the task definition to run in your service. If # a `revision` is not specified, the latest `ACTIVE` revision is used. # If you modify the task definition with `UpdateService`, Amazon ECS # spawns a task with the new version of the task definition and then # stops an old task after the new version is running. # # @option params [Types::DeploymentConfiguration] :deployment_configuration # Optional deployment parameters that control how many tasks run during # the deployment and the ordering of stopping and starting tasks. # # @return [Types::UpdateServiceResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::UpdateServiceResponse#service #service} => Types::Service # # @example Request syntax with placeholder values # # resp = client.update_service({ # cluster: "String", # service: "String", # required # desired_count: 1, # task_definition: "String", # deployment_configuration: { # maximum_percent: 1, # minimum_healthy_percent: 1, # }, # }) # # @example Response structure # # resp.service.service_arn #=> String # resp.service.service_name #=> String # resp.service.cluster_arn #=> String # resp.service.load_balancers #=> Array # resp.service.load_balancers[0].target_group_arn #=> String # resp.service.load_balancers[0].load_balancer_name #=> String # resp.service.load_balancers[0].container_name #=> String # resp.service.load_balancers[0].container_port #=> Integer # resp.service.status #=> String # resp.service.desired_count #=> Integer # resp.service.running_count #=> Integer # resp.service.pending_count #=> Integer # resp.service.task_definition #=> String # resp.service.deployment_configuration.maximum_percent #=> Integer # resp.service.deployment_configuration.minimum_healthy_percent #=> Integer # resp.service.deployments #=> Array # resp.service.deployments[0].id #=> String # resp.service.deployments[0].status #=> String # resp.service.deployments[0].task_definition #=> String # resp.service.deployments[0].desired_count #=> Integer # resp.service.deployments[0].pending_count #=> Integer # resp.service.deployments[0].running_count #=> Integer # resp.service.deployments[0].created_at #=> Time # resp.service.deployments[0].updated_at #=> Time # resp.service.role_arn #=> String # resp.service.events #=> Array # resp.service.events[0].id #=> String # resp.service.events[0].created_at #=> Time # resp.service.events[0].message #=> String # resp.service.created_at #=> Time # resp.service.placement_constraints #=> Array # resp.service.placement_constraints[0].type #=> String, one of "distinctInstance", "memberOf" # resp.service.placement_constraints[0].expression #=> String # resp.service.placement_strategy #=> Array # resp.service.placement_strategy[0].type #=> String, one of "random", "spread", "binpack" # resp.service.placement_strategy[0].field #=> String # # @overload update_service(params = {}) # @param [Hash] params ({}) def update_service(params = {}, options = {}) req = build_request(:update_service, params) req.send_request(options) end # @!endgroup # @param params ({}) # @api private def build_request(operation_name, params = {}) handlers = @handlers.for(operation_name) context = Seahorse::Client::RequestContext.new( operation_name: operation_name, operation: config.api.operation(operation_name), client: self, params: params, config: config) context[:gem_name] = 'aws-sdk-ecs' context[:gem_version] = '1.0.0.rc1' Seahorse::Client::Request.new(handlers, context) end # Polls an API operation until a resource enters a desired state. # # ## Basic Usage # # A waiter will call an API operation until: # # * It is successful # * It enters a terminal state # * It makes the maximum number of attempts # # In between attempts, the waiter will sleep. # # # polls in a loop, sleeping between attempts # client.waiter_until(waiter_name, params) # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. You can pass # configuration as the final arguments hash. # # # poll for ~25 seconds # client.wait_until(waiter_name, params, { # max_attempts: 5, # delay: 5, # }) # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # client.wait_until(waiter_name, params, { # # # disable max attempts # max_attempts: nil, # # # poll for 1 hour, instead of a number of attempts # before_wait: -> (attempts, response) do # throw :failure if Time.now - started_at > 3600 # end # }) # # ## Handling Errors # # When a waiter is unsuccessful, it will raise an error. # All of the failure errors extend from # {Aws::Waiters::Errors::WaiterFailed}. # # begin # client.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # ## Valid Waiters # # The following table lists the valid waiter names, the operations they call, # and the default `:delay` and `:max_attempts` values. # # | waiter_name | params | :delay | :max_attempts | # | ----------------- | -------------------- | -------- | ------------- | # | services_inactive | {#describe_services} | 15 | 40 | # | services_stable | {#describe_services} | 15 | 40 | # | tasks_running | {#describe_tasks} | 6 | 100 | # | tasks_stopped | {#describe_tasks} | 6 | 100 | # # @raise [Errors::FailureStateError] Raised when the waiter terminates # because the waiter has entered a state that it will not transition # out of, preventing success. # # @raise [Errors::TooManyAttemptsError] Raised when the configured # maximum number of attempts have been made, and the waiter is not # yet successful. # # @raise [Errors::UnexpectedError] Raised when an error is encounted # while polling for a resource that is not expected. # # @raise [Errors::NoSuchWaiterError] Raised when you request to wait # for an unknown state. # # @return [Boolean] Returns `true` if the waiter was successful. # @param [Symbol] waiter_name # @param [Hash] params ({}) # @param [Hash] options ({}) # @option options [Integer] :max_attempts # @option options [Integer] :delay # @option options [Proc] :before_attempt # @option options [Proc] :before_wait def wait_until(waiter_name, params = {}, options = {}) w = waiter(waiter_name, options) yield(w.waiter) if block_given? # deprecated w.wait(params) end # @api private # @deprecated def waiter_names waiters.keys end private # @param [Symbol] waiter_name # @param [Hash] options ({}) def waiter(waiter_name, options = {}) waiter_class = waiters[waiter_name] if waiter_class waiter_class.new(options.merge(client: self)) else raise Aws::Waiters::Errors::NoSuchWaiterError.new(waiter_name, waiters.keys) end end def waiters { services_inactive: Waiters::ServicesInactive, services_stable: Waiters::ServicesStable, tasks_running: Waiters::TasksRunning, tasks_stopped: Waiters::TasksStopped } end class << self # @api private attr_reader :identifier # @api private def errors_module Errors end end end end