lib/aws-sdk-glue/client.rb in aws-sdk-glue-1.79.0 vs lib/aws-sdk-glue/client.rb in aws-sdk-glue-1.80.0

- old
+ new

@@ -689,10 +689,11 @@ # resp.crawlers[0].classifiers #=> Array # resp.crawlers[0].classifiers[0] #=> String # resp.crawlers[0].recrawl_policy.recrawl_behavior #=> String, one of "CRAWL_EVERYTHING", "CRAWL_NEW_FOLDERS_ONLY" # resp.crawlers[0].schema_change_policy.update_behavior #=> String, one of "LOG", "UPDATE_IN_DATABASE" # resp.crawlers[0].schema_change_policy.delete_behavior #=> String, one of "LOG", "DELETE_FROM_DATABASE", "DEPRECATE_IN_DATABASE" + # resp.crawlers[0].lineage_configuration.crawler_lineage_settings #=> String, one of "ENABLE", "DISABLE" # resp.crawlers[0].state #=> String, one of "READY", "RUNNING", "STOPPING" # resp.crawlers[0].table_prefix #=> String # resp.crawlers[0].schedule.schedule_expression #=> String # resp.crawlers[0].schedule.state #=> String, one of "SCHEDULED", "NOT_SCHEDULED", "TRANSITIONING" # resp.crawlers[0].crawl_elapsed_time #=> Integer @@ -1553,10 +1554,13 @@ # # @option params [Types::RecrawlPolicy] :recrawl_policy # A policy that specifies whether to crawl the entire dataset again, or # to crawl only folders that were added since the last crawler run. # + # @option params [Types::LineageConfiguration] :lineage_configuration + # Specifies data lineage configuration settings for the crawler. + # # @option params [String] :configuration # Crawler configuration information. This versioned JSON string allows # users to specify aspects of a crawler's behavior. For more # information, see [Configuring a Crawler][1]. # @@ -1630,10 +1634,13 @@ # delete_behavior: "LOG", # accepts LOG, DELETE_FROM_DATABASE, DEPRECATE_IN_DATABASE # }, # recrawl_policy: { # recrawl_behavior: "CRAWL_EVERYTHING", # accepts CRAWL_EVERYTHING, CRAWL_NEW_FOLDERS_ONLY # }, + # lineage_configuration: { + # crawler_lineage_settings: "ENABLE", # accepts ENABLE, DISABLE + # }, # configuration: "CrawlerConfiguration", # crawler_security_configuration: "CrawlerSecurityConfiguration", # tags: { # "TagKey" => "TagValue", # }, @@ -2401,10 +2408,50 @@ def create_partition(params = {}, options = {}) req = build_request(:create_partition, params) req.send_request(options) end + # Creates a specified partition index in an existing table. + # + # @option params [String] :catalog_id + # The catalog ID where the table resides. + # + # @option params [required, String] :database_name + # Specifies the name of a database in which you want to create a + # partition index. + # + # @option params [required, String] :table_name + # Specifies the name of a table in which you want to create a partition + # index. + # + # @option params [required, Types::PartitionIndex] :partition_index + # Specifies a `PartitionIndex` structure to create a partition index in + # an existing table. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.create_partition_index({ + # catalog_id: "CatalogIdString", + # database_name: "NameString", # required + # table_name: "NameString", # required + # partition_index: { # required + # keys: ["NameString"], # required + # index_name: "NameString", # required + # }, + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreatePartitionIndex AWS API Documentation + # + # @overload create_partition_index(params = {}) + # @param [Hash] params ({}) + def create_partition_index(params = {}, options = {}) + req = build_request(:create_partition_index, params) + req.send_request(options) + end + # Creates a new registry which may be used to hold a collection of # schemas. # # @option params [required, String] :registry_name # Name of the registry to be created of max length of 255, and may only @@ -3352,10 +3399,46 @@ def delete_partition(params = {}, options = {}) req = build_request(:delete_partition, params) req.send_request(options) end + # Deletes a specified partition index from an existing table. + # + # @option params [String] :catalog_id + # The catalog ID where the table resides. + # + # @option params [required, String] :database_name + # Specifies the name of a database from which you want to delete a + # partition index. + # + # @option params [required, String] :table_name + # Specifies the name of a table from which you want to delete a + # partition index. + # + # @option params [required, String] :index_name + # The name of the partition index to be deleted. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.delete_partition_index({ + # catalog_id: "CatalogIdString", + # database_name: "NameString", # required + # table_name: "NameString", # required + # index_name: "NameString", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeletePartitionIndex AWS API Documentation + # + # @overload delete_partition_index(params = {}) + # @param [Hash] params ({}) + def delete_partition_index(params = {}, options = {}) + req = build_request(:delete_partition_index, params) + req.send_request(options) + end + # Delete the entire registry including schema and all of its versions. # To get the status of the delete operation, you can call the # `GetRegistry` API after the asynchronous call. Deleting a registry # will disable all online operations for the registry such as the # `UpdateRegistry`, `CreateSchema`, `UpdateSchema`, and @@ -4209,10 +4292,11 @@ # resp.crawler.classifiers #=> Array # resp.crawler.classifiers[0] #=> String # resp.crawler.recrawl_policy.recrawl_behavior #=> String, one of "CRAWL_EVERYTHING", "CRAWL_NEW_FOLDERS_ONLY" # resp.crawler.schema_change_policy.update_behavior #=> String, one of "LOG", "UPDATE_IN_DATABASE" # resp.crawler.schema_change_policy.delete_behavior #=> String, one of "LOG", "DELETE_FROM_DATABASE", "DEPRECATE_IN_DATABASE" + # resp.crawler.lineage_configuration.crawler_lineage_settings #=> String, one of "ENABLE", "DISABLE" # resp.crawler.state #=> String, one of "READY", "RUNNING", "STOPPING" # resp.crawler.table_prefix #=> String # resp.crawler.schedule.schedule_expression #=> String # resp.crawler.schedule.state #=> String, one of "SCHEDULED", "NOT_SCHEDULED", "TRANSITIONING" # resp.crawler.crawl_elapsed_time #=> Integer @@ -4339,10 +4423,11 @@ # resp.crawlers[0].classifiers #=> Array # resp.crawlers[0].classifiers[0] #=> String # resp.crawlers[0].recrawl_policy.recrawl_behavior #=> String, one of "CRAWL_EVERYTHING", "CRAWL_NEW_FOLDERS_ONLY" # resp.crawlers[0].schema_change_policy.update_behavior #=> String, one of "LOG", "UPDATE_IN_DATABASE" # resp.crawlers[0].schema_change_policy.delete_behavior #=> String, one of "LOG", "DELETE_FROM_DATABASE", "DEPRECATE_IN_DATABASE" + # resp.crawlers[0].lineage_configuration.crawler_lineage_settings #=> String, one of "ENABLE", "DISABLE" # resp.crawlers[0].state #=> String, one of "READY", "RUNNING", "STOPPING" # resp.crawlers[0].table_prefix #=> String # resp.crawlers[0].schedule.schedule_expression #=> String # resp.crawlers[0].schedule.state #=> String, one of "SCHEDULED", "NOT_SCHEDULED", "TRANSITIONING" # resp.crawlers[0].crawl_elapsed_time #=> Integer @@ -5492,11 +5577,16 @@ # resp.partition_index_descriptor_list #=> Array # resp.partition_index_descriptor_list[0].index_name #=> String # resp.partition_index_descriptor_list[0].keys #=> Array # resp.partition_index_descriptor_list[0].keys[0].name #=> String # resp.partition_index_descriptor_list[0].keys[0].type #=> String - # resp.partition_index_descriptor_list[0].index_status #=> String, one of "ACTIVE" + # resp.partition_index_descriptor_list[0].index_status #=> String, one of "CREATING", "ACTIVE", "DELETING", "FAILED" + # resp.partition_index_descriptor_list[0].backfill_errors #=> Array + # resp.partition_index_descriptor_list[0].backfill_errors[0].code #=> String, one of "ENCRYPTED_PARTITION_ERROR", "INTERNAL_ERROR", "INVALID_PARTITION_TYPE_DATA_ERROR", "MISSING_PARTITION_VALUE_ERROR", "UNSUPPORTED_PARTITION_CHARACTER_ERROR" + # resp.partition_index_descriptor_list[0].backfill_errors[0].partitions #=> Array + # resp.partition_index_descriptor_list[0].backfill_errors[0].partitions[0].values #=> Array + # resp.partition_index_descriptor_list[0].backfill_errors[0].partitions[0].values[0] #=> String # resp.next_token #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartitionIndexes AWS API Documentation # # @overload get_partition_indexes(params = {}) @@ -9549,10 +9639,13 @@ # # @option params [Types::RecrawlPolicy] :recrawl_policy # A policy that specifies whether to crawl the entire dataset again, or # to crawl only folders that were added since the last crawler run. # + # @option params [Types::LineageConfiguration] :lineage_configuration + # Specifies data lineage configuration settings for the crawler. + # # @option params [String] :configuration # Crawler configuration information. This versioned JSON string allows # users to specify aspects of a crawler's behavior. For more # information, see [Configuring a Crawler][1]. # @@ -9617,10 +9710,13 @@ # delete_behavior: "LOG", # accepts LOG, DELETE_FROM_DATABASE, DEPRECATE_IN_DATABASE # }, # recrawl_policy: { # recrawl_behavior: "CRAWL_EVERYTHING", # accepts CRAWL_EVERYTHING, CRAWL_NEW_FOLDERS_ONLY # }, + # lineage_configuration: { + # crawler_lineage_settings: "ENABLE", # accepts ENABLE, DISABLE + # }, # configuration: "CrawlerConfiguration", # crawler_security_configuration: "CrawlerSecurityConfiguration", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateCrawler AWS API Documentation @@ -10492,10 +10588,10 @@ operation: config.api.operation(operation_name), client: self, params: params, config: config) context[:gem_name] = 'aws-sdk-glue' - context[:gem_version] = '1.79.0' + context[:gem_version] = '1.80.0' Seahorse::Client::Request.new(handlers, context) end # @api private # @deprecated