lib/aws-sdk-transfer/client.rb in aws-sdk-transfer-1.47.0 vs lib/aws-sdk-transfer/client.rb in aws-sdk-transfer-1.48.0
- old
+ new
@@ -404,21 +404,10 @@
#
# The following is an `Entry` and `Target` pair example for `chroot`.
#
# `[ \{ "Entry:": "/", "Target": "/bucket_name/home/mydirectory" \} ]`
#
- # <note markdown="1"> If the target of a logical directory entry does not exist in Amazon S3
- # or EFS, the entry is ignored. As a workaround, you can use the Amazon
- # S3 API or EFS API to create 0 byte objects as place holders for your
- # directory. If using the CLI, use the `s3api` or `efsapi` call instead
- # of `s3` or `efs` so you can use the put-object operation. For example,
- # you use the following: `aws s3api put-object --bucket bucketname --key
- # path/to/folder/`. Make sure that the end of the key name ends in a `/`
- # for it to be considered a folder.
- #
- # </note>
- #
# @option params [String] :policy
# A session policy for your user so that you can use the same IAM role
# across multiple users. This policy scopes down user access to portions
# of their Amazon S3 bucket. Variables that you can use inside this
# policy include `$\{Transfer:UserName\}`,
@@ -830,21 +819,10 @@
#
# The following is an `Entry` and `Target` pair example for `chroot`.
#
# `[ \{ "Entry:": "/", "Target": "/bucket_name/home/mydirectory" \} ]`
#
- # <note markdown="1"> If the target of a logical directory entry does not exist in Amazon S3
- # or EFS, the entry is ignored. As a workaround, you can use the Amazon
- # S3 API or EFS API to create 0 byte objects as place holders for your
- # directory. If using the CLI, use the `s3api` or `efsapi` call instead
- # of `s3` or `efs` so you can use the put-object operation. For example,
- # you use the following: `aws s3api put-object --bucket bucketname --key
- # path/to/folder/`. Make sure that the end of the key name ends in a `/`
- # for it to be considered a folder.
- #
- # </note>
- #
# @option params [String] :policy
# A session policy for your user so that you can use the same IAM role
# across multiple users. This policy scopes down user access to portions
# of their Amazon S3 bucket. Variables that you can use inside this
# policy include `$\{Transfer:UserName\}`,
@@ -1027,27 +1005,31 @@
# file_system_id: "EfsFileSystemId",
# path: "EfsPath",
# },
# },
# overwrite_existing: "TRUE", # accepts TRUE, FALSE
+ # source_file_location: "SourceFileLocation",
# },
# custom_step_details: {
# name: "WorkflowStepName",
# target: "CustomStepTarget",
# timeout_seconds: 1,
+ # source_file_location: "SourceFileLocation",
# },
# delete_step_details: {
# name: "WorkflowStepName",
+ # source_file_location: "SourceFileLocation",
# },
# tag_step_details: {
# name: "WorkflowStepName",
# tags: [
# {
# key: "S3TagKey", # required
# value: "S3TagValue", # required
# },
# ],
+ # source_file_location: "SourceFileLocation",
# },
# },
# ],
# on_exception_steps: [
# {
@@ -1063,27 +1045,31 @@
# file_system_id: "EfsFileSystemId",
# path: "EfsPath",
# },
# },
# overwrite_existing: "TRUE", # accepts TRUE, FALSE
+ # source_file_location: "SourceFileLocation",
# },
# custom_step_details: {
# name: "WorkflowStepName",
# target: "CustomStepTarget",
# timeout_seconds: 1,
+ # source_file_location: "SourceFileLocation",
# },
# delete_step_details: {
# name: "WorkflowStepName",
+ # source_file_location: "SourceFileLocation",
# },
# tag_step_details: {
# name: "WorkflowStepName",
# tags: [
# {
# key: "S3TagKey", # required
# value: "S3TagValue", # required
# },
# ],
+ # source_file_location: "SourceFileLocation",
# },
# },
# ],
# tags: [
# {
@@ -1589,34 +1575,42 @@
# resp.workflow.steps[0].copy_step_details.destination_file_location.s3_file_location.bucket #=> String
# resp.workflow.steps[0].copy_step_details.destination_file_location.s3_file_location.key #=> String
# resp.workflow.steps[0].copy_step_details.destination_file_location.efs_file_location.file_system_id #=> String
# resp.workflow.steps[0].copy_step_details.destination_file_location.efs_file_location.path #=> String
# resp.workflow.steps[0].copy_step_details.overwrite_existing #=> String, one of "TRUE", "FALSE"
+ # resp.workflow.steps[0].copy_step_details.source_file_location #=> String
# resp.workflow.steps[0].custom_step_details.name #=> String
# resp.workflow.steps[0].custom_step_details.target #=> String
# resp.workflow.steps[0].custom_step_details.timeout_seconds #=> Integer
+ # resp.workflow.steps[0].custom_step_details.source_file_location #=> String
# resp.workflow.steps[0].delete_step_details.name #=> String
+ # resp.workflow.steps[0].delete_step_details.source_file_location #=> String
# resp.workflow.steps[0].tag_step_details.name #=> String
# resp.workflow.steps[0].tag_step_details.tags #=> Array
# resp.workflow.steps[0].tag_step_details.tags[0].key #=> String
# resp.workflow.steps[0].tag_step_details.tags[0].value #=> String
+ # resp.workflow.steps[0].tag_step_details.source_file_location #=> String
# resp.workflow.on_exception_steps #=> Array
# resp.workflow.on_exception_steps[0].type #=> String, one of "COPY", "CUSTOM", "TAG", "DELETE"
# resp.workflow.on_exception_steps[0].copy_step_details.name #=> String
# resp.workflow.on_exception_steps[0].copy_step_details.destination_file_location.s3_file_location.bucket #=> String
# resp.workflow.on_exception_steps[0].copy_step_details.destination_file_location.s3_file_location.key #=> String
# resp.workflow.on_exception_steps[0].copy_step_details.destination_file_location.efs_file_location.file_system_id #=> String
# resp.workflow.on_exception_steps[0].copy_step_details.destination_file_location.efs_file_location.path #=> String
# resp.workflow.on_exception_steps[0].copy_step_details.overwrite_existing #=> String, one of "TRUE", "FALSE"
+ # resp.workflow.on_exception_steps[0].copy_step_details.source_file_location #=> String
# resp.workflow.on_exception_steps[0].custom_step_details.name #=> String
# resp.workflow.on_exception_steps[0].custom_step_details.target #=> String
# resp.workflow.on_exception_steps[0].custom_step_details.timeout_seconds #=> Integer
+ # resp.workflow.on_exception_steps[0].custom_step_details.source_file_location #=> String
# resp.workflow.on_exception_steps[0].delete_step_details.name #=> String
+ # resp.workflow.on_exception_steps[0].delete_step_details.source_file_location #=> String
# resp.workflow.on_exception_steps[0].tag_step_details.name #=> String
# resp.workflow.on_exception_steps[0].tag_step_details.tags #=> Array
# resp.workflow.on_exception_steps[0].tag_step_details.tags[0].key #=> String
# resp.workflow.on_exception_steps[0].tag_step_details.tags[0].value #=> String
+ # resp.workflow.on_exception_steps[0].tag_step_details.source_file_location #=> String
# resp.workflow.workflow_id #=> String
# resp.workflow.tags #=> Array
# resp.workflow.tags[0].key #=> String
# resp.workflow.tags[0].value #=> String
#
@@ -2342,21 +2336,10 @@
#
# The following is an `Entry` and `Target` pair example for `chroot`.
#
# `[ \{ "Entry:": "/", "Target": "/bucket_name/home/mydirectory" \} ]`
#
- # <note markdown="1"> If the target of a logical directory entry does not exist in Amazon S3
- # or EFS, the entry is ignored. As a workaround, you can use the Amazon
- # S3 API or EFS API to create 0 byte objects as place holders for your
- # directory. If using the CLI, use the `s3api` or `efsapi` call instead
- # of `s3` or `efs` so you can use the put-object operation. For example,
- # you use the following: `aws s3api put-object --bucket bucketname --key
- # path/to/folder/`. Make sure that the end of the key name ends in a `/`
- # for it to be considered a folder.
- #
- # </note>
- #
# @option params [String] :policy
# A session policy for your user so that you can use the same IAM role
# across multiple users. This policy scopes down user access to portions
# of their Amazon S3 bucket. Variables that you can use inside this
# policy include `$\{Transfer:UserName\}`,
@@ -2726,21 +2709,10 @@
#
# The following is an `Entry` and `Target` pair example for `chroot`.
#
# `[ \{ "Entry:": "/", "Target": "/bucket_name/home/mydirectory" \} ]`
#
- # <note markdown="1"> If the target of a logical directory entry does not exist in Amazon S3
- # or EFS, the entry is ignored. As a workaround, you can use the Amazon
- # S3 API or EFS API to create 0 byte objects as place holders for your
- # directory. If using the CLI, use the `s3api` or `efsapi` call instead
- # of `s3` or `efs` so you can use the put-object operation. For example,
- # you use the following: `aws s3api put-object --bucket bucketname --key
- # path/to/folder/`. Make sure that the end of the key name ends in a `/`
- # for it to be considered a folder.
- #
- # </note>
- #
# @option params [String] :policy
# A session policy for your user so that you can use the same IAM role
# across multiple users. This policy scopes down user access to portions
# of their Amazon S3 bucket. Variables that you can use inside this
# policy include `$\{Transfer:UserName\}`,
@@ -2848,10 +2820,10 @@
operation: config.api.operation(operation_name),
client: self,
params: params,
config: config)
context[:gem_name] = 'aws-sdk-transfer'
- context[:gem_version] = '1.47.0'
+ context[:gem_version] = '1.48.0'
Seahorse::Client::Request.new(handlers, context)
end
# @api private
# @deprecated