lib/aws-sdk-gluedatabrew/client.rb in aws-sdk-gluedatabrew-1.7.0 vs lib/aws-sdk-gluedatabrew/client.rb in aws-sdk-gluedatabrew-1.8.0

- old
+ new

@@ -398,24 +398,24 @@ # @option params [required, String] :name # The name of the dataset to be created. Valid characters are # alphanumeric (A-Z, a-z, 0-9), hyphen (-), period (.), and space. # # @option params [String] :format - # The file format of a dataset that is created from an S3 file or + # The file format of a dataset that is created from an Amazon S3 file or # folder. # # @option params [Types::FormatOptions] :format_options # Represents a set of options that define the structure of either # comma-separated value (CSV), Excel, or JSON input. # # @option params [required, Types::Input] :input # Represents information on how DataBrew can find data, in either the - # AWS Glue Data Catalog or Amazon S3. + # Glue Data Catalog or Amazon S3. # # @option params [Types::PathOptions] :path_options - # A set of options that defines how DataBrew interprets an S3 path of - # the dataset. + # A set of options that defines how DataBrew interprets an Amazon S3 + # path of the dataset. # # @option params [Hash<String,String>] :tags # Metadata tags to apply to this dataset. # # @return [Types::CreateDatasetResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: @@ -523,11 +523,11 @@ # protect the job. # # @option params [String] :encryption_mode # The encryption mode for the job, which can be one of the following: # - # * `SSE-KMS` - `SSE-KMS` - Server-side encryption with AWS KMS-managed + # * `SSE-KMS` - `SSE-KMS` - Server-side encryption with KMS-managed # keys. # # * `SSE-S3` - Server-side encryption with keys managed by Amazon S3. # # @option params [required, String] :name @@ -548,12 +548,12 @@ # @option params [required, Types::S3Location] :output_location # Represents an Amazon S3 location (bucket name and object key) where # DataBrew can read input data, or write output from a job. # # @option params [required, String] :role_arn - # The Amazon Resource Name (ARN) of the AWS Identity and Access - # Management (IAM) role to be assumed when DataBrew runs the job. + # The Amazon Resource Name (ARN) of the Identity and Access Management + # (IAM) role to be assumed when DataBrew runs the job. # # @option params [Hash<String,String>] :tags # Metadata tags to apply to this job. # # @option params [Integer] :timeout @@ -623,12 +623,12 @@ # @option params [Types::Sample] :sample # Represents the sample size and sampling type for DataBrew to use for # interactive data analysis. # # @option params [required, String] :role_arn - # The Amazon Resource Name (ARN) of the AWS Identity and Access - # Management (IAM) role to be assumed for this request. + # The Amazon Resource Name (ARN) of the Identity and Access Management + # (IAM) role to be assumed for this request. # # @option params [Hash<String,String>] :tags # Metadata tags to apply to this project. # # @return [Types::CreateProjectResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: @@ -724,11 +724,11 @@ req = build_request(:create_recipe, params) req.send_request(options) end # Creates a new job to transform input data, using steps defined in an - # existing AWS Glue DataBrew recipe + # existing Glue DataBrew recipe # # @option params [String] :dataset_name # The name of the dataset that this job processes. # # @option params [String] :encryption_key_arn @@ -736,11 +736,11 @@ # protect the job. # # @option params [String] :encryption_mode # The encryption mode for the job, which can be one of the following: # - # * `SSE-KMS` - Server-side encryption with keys managed by AWS KMS. + # * `SSE-KMS` - Server-side encryption with keys managed by KMS. # # * `SSE-S3` - Server-side encryption with keys managed by Amazon S3. # # @option params [required, String] :name # A unique name for the job. Valid characters are alphanumeric (A-Z, @@ -755,23 +755,27 @@ # processes data. # # @option params [Integer] :max_retries # The maximum number of times to retry the job after a job run fails. # - # @option params [required, Array<Types::Output>] :outputs + # @option params [Array<Types::Output>] :outputs # One or more artifacts that represent the output from running the job. # + # @option params [Array<Types::DataCatalogOutput>] :data_catalog_outputs + # One or more artifacts that represent the AWS Glue Data Catalog output + # from running the job. + # # @option params [String] :project_name # Either the name of an existing project, or a combination of a recipe # and a dataset to associate with the recipe. # # @option params [Types::RecipeReference] :recipe_reference # Represents the name and version of a DataBrew recipe. # # @option params [required, String] :role_arn - # The Amazon Resource Name (ARN) of the AWS Identity and Access - # Management (IAM) role to be assumed when DataBrew runs the job. + # The Amazon Resource Name (ARN) of the Identity and Access Management + # (IAM) role to be assumed when DataBrew runs the job. # # @option params [Hash<String,String>] :tags # Metadata tags to apply to this job. # # @option params [Integer] :timeout @@ -790,11 +794,11 @@ # encryption_mode: "SSE-KMS", # accepts SSE-KMS, SSE-S3 # name: "JobName", # required # log_subscription: "ENABLE", # accepts ENABLE, DISABLE # max_capacity: 1, # max_retries: 1, - # outputs: [ # required + # outputs: [ # { # compression_format: "GZIP", # accepts GZIP, LZ4, SNAPPY, BZIP2, DEFLATE, LZO, BROTLI, ZSTD, ZLIB # format: "CSV", # accepts CSV, JSON, PARQUET, GLUEPARQUET, AVRO, ORC, XML # partition_columns: ["ColumnName"], # location: { # required @@ -807,10 +811,31 @@ # delimiter: "Delimiter", # }, # }, # }, # ], + # data_catalog_outputs: [ + # { + # catalog_id: "CatalogId", + # database_name: "DatabaseName", # required + # table_name: "TableName", # required + # s3_options: { + # location: { # required + # bucket: "Bucket", # required + # key: "Key", + # }, + # }, + # database_options: { + # temp_directory: { + # bucket: "Bucket", # required + # key: "Key", + # }, + # table_name: "DatabaseTableName", # required + # }, + # overwrite: false, + # }, + # ], # project_name: "ProjectName", # recipe_reference: { # name: "RecipeName", # required # recipe_version: "RecipeVersion", # }, @@ -840,11 +865,11 @@ # @option params [Array<String>] :job_names # The name or names of one or more jobs to be run. # # @option params [required, String] :cron_expression # The date or dates and time or times when the jobs are to be run. For - # more information, see [Cron expressions][1] in the *AWS Glue DataBrew + # more information, see [Cron expressions][1] in the *Glue DataBrew # Developer Guide*. # # # # [1]: https://docs.aws.amazon.com/databrew/latest/dg/jobs.cron.html @@ -1133,10 +1158,11 @@ # * {Types::DescribeJobResponse#last_modified_date #last_modified_date} => Time # * {Types::DescribeJobResponse#log_subscription #log_subscription} => String # * {Types::DescribeJobResponse#max_capacity #max_capacity} => Integer # * {Types::DescribeJobResponse#max_retries #max_retries} => Integer # * {Types::DescribeJobResponse#outputs #outputs} => Array&lt;Types::Output&gt; + # * {Types::DescribeJobResponse#data_catalog_outputs #data_catalog_outputs} => Array&lt;Types::DataCatalogOutput&gt; # * {Types::DescribeJobResponse#project_name #project_name} => String # * {Types::DescribeJobResponse#recipe_reference #recipe_reference} => Types::RecipeReference # * {Types::DescribeJobResponse#resource_arn #resource_arn} => String # * {Types::DescribeJobResponse#role_arn #role_arn} => String # * {Types::DescribeJobResponse#tags #tags} => Hash&lt;String,String&gt; @@ -1170,10 +1196,20 @@ # resp.outputs[0].partition_columns[0] #=> String # resp.outputs[0].location.bucket #=> String # resp.outputs[0].location.key #=> String # resp.outputs[0].overwrite #=> Boolean # resp.outputs[0].format_options.csv.delimiter #=> String + # resp.data_catalog_outputs #=> Array + # resp.data_catalog_outputs[0].catalog_id #=> String + # resp.data_catalog_outputs[0].database_name #=> String + # resp.data_catalog_outputs[0].table_name #=> String + # resp.data_catalog_outputs[0].s3_options.location.bucket #=> String + # resp.data_catalog_outputs[0].s3_options.location.key #=> String + # resp.data_catalog_outputs[0].database_options.temp_directory.bucket #=> String + # resp.data_catalog_outputs[0].database_options.temp_directory.key #=> String + # resp.data_catalog_outputs[0].database_options.table_name #=> String + # resp.data_catalog_outputs[0].overwrite #=> Boolean # resp.project_name #=> String # resp.recipe_reference.name #=> String # resp.recipe_reference.recipe_version #=> String # resp.resource_arn #=> String # resp.role_arn #=> String @@ -1211,10 +1247,11 @@ # * {Types::DescribeJobRunResponse#run_id #run_id} => String # * {Types::DescribeJobRunResponse#state #state} => String # * {Types::DescribeJobRunResponse#log_subscription #log_subscription} => String # * {Types::DescribeJobRunResponse#log_group_name #log_group_name} => String # * {Types::DescribeJobRunResponse#outputs #outputs} => Array&lt;Types::Output&gt; + # * {Types::DescribeJobRunResponse#data_catalog_outputs #data_catalog_outputs} => Array&lt;Types::DataCatalogOutput&gt; # * {Types::DescribeJobRunResponse#recipe_reference #recipe_reference} => Types::RecipeReference # * {Types::DescribeJobRunResponse#started_by #started_by} => String # * {Types::DescribeJobRunResponse#started_on #started_on} => Time # * {Types::DescribeJobRunResponse#job_sample #job_sample} => Types::JobSample # @@ -1244,10 +1281,20 @@ # resp.outputs[0].partition_columns[0] #=> String # resp.outputs[0].location.bucket #=> String # resp.outputs[0].location.key #=> String # resp.outputs[0].overwrite #=> Boolean # resp.outputs[0].format_options.csv.delimiter #=> String + # resp.data_catalog_outputs #=> Array + # resp.data_catalog_outputs[0].catalog_id #=> String + # resp.data_catalog_outputs[0].database_name #=> String + # resp.data_catalog_outputs[0].table_name #=> String + # resp.data_catalog_outputs[0].s3_options.location.bucket #=> String + # resp.data_catalog_outputs[0].s3_options.location.key #=> String + # resp.data_catalog_outputs[0].database_options.temp_directory.bucket #=> String + # resp.data_catalog_outputs[0].database_options.temp_directory.key #=> String + # resp.data_catalog_outputs[0].database_options.table_name #=> String + # resp.data_catalog_outputs[0].overwrite #=> Boolean # resp.recipe_reference.name #=> String # resp.recipe_reference.recipe_version #=> String # resp.started_by #=> String # resp.started_on #=> Time # resp.job_sample.mode #=> String, one of "FULL_DATASET", "CUSTOM_ROWS" @@ -1560,10 +1607,20 @@ # resp.job_runs[0].outputs[0].partition_columns[0] #=> String # resp.job_runs[0].outputs[0].location.bucket #=> String # resp.job_runs[0].outputs[0].location.key #=> String # resp.job_runs[0].outputs[0].overwrite #=> Boolean # resp.job_runs[0].outputs[0].format_options.csv.delimiter #=> String + # resp.job_runs[0].data_catalog_outputs #=> Array + # resp.job_runs[0].data_catalog_outputs[0].catalog_id #=> String + # resp.job_runs[0].data_catalog_outputs[0].database_name #=> String + # resp.job_runs[0].data_catalog_outputs[0].table_name #=> String + # resp.job_runs[0].data_catalog_outputs[0].s3_options.location.bucket #=> String + # resp.job_runs[0].data_catalog_outputs[0].s3_options.location.key #=> String + # resp.job_runs[0].data_catalog_outputs[0].database_options.temp_directory.bucket #=> String + # resp.job_runs[0].data_catalog_outputs[0].database_options.temp_directory.key #=> String + # resp.job_runs[0].data_catalog_outputs[0].database_options.table_name #=> String + # resp.job_runs[0].data_catalog_outputs[0].overwrite #=> Boolean # resp.job_runs[0].recipe_reference.name #=> String # resp.job_runs[0].recipe_reference.recipe_version #=> String # resp.job_runs[0].started_by #=> String # resp.job_runs[0].started_on #=> Time # resp.job_runs[0].job_sample.mode #=> String, one of "FULL_DATASET", "CUSTOM_ROWS" @@ -1637,10 +1694,20 @@ # resp.jobs[0].outputs[0].partition_columns[0] #=> String # resp.jobs[0].outputs[0].location.bucket #=> String # resp.jobs[0].outputs[0].location.key #=> String # resp.jobs[0].outputs[0].overwrite #=> Boolean # resp.jobs[0].outputs[0].format_options.csv.delimiter #=> String + # resp.jobs[0].data_catalog_outputs #=> Array + # resp.jobs[0].data_catalog_outputs[0].catalog_id #=> String + # resp.jobs[0].data_catalog_outputs[0].database_name #=> String + # resp.jobs[0].data_catalog_outputs[0].table_name #=> String + # resp.jobs[0].data_catalog_outputs[0].s3_options.location.bucket #=> String + # resp.jobs[0].data_catalog_outputs[0].s3_options.location.key #=> String + # resp.jobs[0].data_catalog_outputs[0].database_options.temp_directory.bucket #=> String + # resp.jobs[0].data_catalog_outputs[0].database_options.temp_directory.key #=> String + # resp.jobs[0].data_catalog_outputs[0].database_options.table_name #=> String + # resp.jobs[0].data_catalog_outputs[0].overwrite #=> Boolean # resp.jobs[0].project_name #=> String # resp.jobs[0].recipe_reference.name #=> String # resp.jobs[0].recipe_reference.recipe_version #=> String # resp.jobs[0].resource_arn #=> String # resp.jobs[0].role_arn #=> String @@ -2191,24 +2258,24 @@ # # @option params [required, String] :name # The name of the dataset to be updated. # # @option params [String] :format - # The file format of a dataset that is created from an S3 file or + # The file format of a dataset that is created from an Amazon S3 file or # folder. # # @option params [Types::FormatOptions] :format_options # Represents a set of options that define the structure of either # comma-separated value (CSV), Excel, or JSON input. # # @option params [required, Types::Input] :input # Represents information on how DataBrew can find data, in either the - # AWS Glue Data Catalog or Amazon S3. + # Glue Data Catalog or Amazon S3. # # @option params [Types::PathOptions] :path_options - # A set of options that defines how DataBrew interprets an S3 path of - # the dataset. + # A set of options that defines how DataBrew interprets an Amazon S3 + # path of the dataset. # # @return [Types::UpdateDatasetResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::UpdateDatasetResponse#name #name} => String # @@ -2307,11 +2374,11 @@ # protect the job. # # @option params [String] :encryption_mode # The encryption mode for the job, which can be one of the following: # - # * `SSE-KMS` - Server-side encryption with keys managed by AWS KMS. + # * `SSE-KMS` - Server-side encryption with keys managed by KMS. # # * `SSE-S3` - Server-side encryption with keys managed by Amazon S3. # # @option params [required, String] :name # The name of the job to be updated. @@ -2330,12 +2397,12 @@ # @option params [required, Types::S3Location] :output_location # Represents an Amazon S3 location (bucket name and object key) where # DataBrew can read input data, or write output from a job. # # @option params [required, String] :role_arn - # The Amazon Resource Name (ARN) of the AWS Identity and Access - # Management (IAM) role to be assumed when DataBrew runs the job. + # The Amazon Resource Name (ARN) of the Identity and Access Management + # (IAM) role to be assumed when DataBrew runs the job. # # @option params [Integer] :timeout # The job's timeout in minutes. A job that attempts to run longer than # this timeout period ends with a status of `TIMEOUT`. # @@ -2488,11 +2555,11 @@ # protect the job. # # @option params [String] :encryption_mode # The encryption mode for the job, which can be one of the following: # - # * `SSE-KMS` - Server-side encryption with keys managed by AWS KMS. + # * `SSE-KMS` - Server-side encryption with keys managed by KMS. # # * `SSE-S3` - Server-side encryption with keys managed by Amazon S3. # # @option params [required, String] :name # The name of the job to update. @@ -2506,16 +2573,20 @@ # processes data. # # @option params [Integer] :max_retries # The maximum number of times to retry the job after a job run fails. # - # @option params [required, Array<Types::Output>] :outputs + # @option params [Array<Types::Output>] :outputs # One or more artifacts that represent the output from running the job. # + # @option params [Array<Types::DataCatalogOutput>] :data_catalog_outputs + # One or more artifacts that represent the AWS Glue Data Catalog output + # from running the job. + # # @option params [required, String] :role_arn - # The Amazon Resource Name (ARN) of the AWS Identity and Access - # Management (IAM) role to be assumed when DataBrew runs the job. + # The Amazon Resource Name (ARN) of the Identity and Access Management + # (IAM) role to be assumed when DataBrew runs the job. # # @option params [Integer] :timeout # The job's timeout in minutes. A job that attempts to run longer than # this timeout period ends with a status of `TIMEOUT`. # @@ -2530,11 +2601,11 @@ # encryption_mode: "SSE-KMS", # accepts SSE-KMS, SSE-S3 # name: "JobName", # required # log_subscription: "ENABLE", # accepts ENABLE, DISABLE # max_capacity: 1, # max_retries: 1, - # outputs: [ # required + # outputs: [ # { # compression_format: "GZIP", # accepts GZIP, LZ4, SNAPPY, BZIP2, DEFLATE, LZO, BROTLI, ZSTD, ZLIB # format: "CSV", # accepts CSV, JSON, PARQUET, GLUEPARQUET, AVRO, ORC, XML # partition_columns: ["ColumnName"], # location: { # required @@ -2547,10 +2618,31 @@ # delimiter: "Delimiter", # }, # }, # }, # ], + # data_catalog_outputs: [ + # { + # catalog_id: "CatalogId", + # database_name: "DatabaseName", # required + # table_name: "TableName", # required + # s3_options: { + # location: { # required + # bucket: "Bucket", # required + # key: "Key", + # }, + # }, + # database_options: { + # temp_directory: { + # bucket: "Bucket", # required + # key: "Key", + # }, + # table_name: "DatabaseTableName", # required + # }, + # overwrite: false, + # }, + # ], # role_arn: "Arn", # required # timeout: 1, # }) # # @example Response structure @@ -2571,11 +2663,11 @@ # @option params [Array<String>] :job_names # The name or names of one or more jobs to be run for this schedule. # # @option params [required, String] :cron_expression # The date or dates and time or times when the jobs are to be run. For - # more information, see [Cron expressions][1] in the *AWS Glue DataBrew + # more information, see [Cron expressions][1] in the *Glue DataBrew # Developer Guide*. # # # # [1]: https://docs.aws.amazon.com/databrew/latest/dg/jobs.cron.html @@ -2619,10 +2711,10 @@ operation: config.api.operation(operation_name), client: self, params: params, config: config) context[:gem_name] = 'aws-sdk-gluedatabrew' - context[:gem_version] = '1.7.0' + context[:gem_version] = '1.8.0' Seahorse::Client::Request.new(handlers, context) end # @api private # @deprecated