# frozen_string_literal: true # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/managedkafka/v1/resources.proto require 'google/protobuf' require 'google/api/field_behavior_pb' require 'google/api/resource_pb' require 'google/protobuf/timestamp_pb' descriptor_data = "\n,google/cloud/managedkafka/v1/resources.proto\x12\x1cgoogle.cloud.managedkafka.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xd8\x06\n\x07\x43luster\x12\x42\n\ngcp_config\x18\t \x01(\x0b\x32\'.google.cloud.managedkafka.v1.GcpConfigB\x03\xe0\x41\x02H\x00\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x08\x12\x34\n\x0b\x63reate_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x34\n\x0bupdate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x46\n\x06labels\x18\x04 \x03(\x0b\x32\x31.google.cloud.managedkafka.v1.Cluster.LabelsEntryB\x03\xe0\x41\x01\x12J\n\x0f\x63\x61pacity_config\x18\x05 \x01(\x0b\x32,.google.cloud.managedkafka.v1.CapacityConfigB\x03\xe0\x41\x02\x12L\n\x10rebalance_config\x18\x08 \x01(\x0b\x32-.google.cloud.managedkafka.v1.RebalanceConfigB\x03\xe0\x41\x01\x12?\n\x05state\x18\n \x01(\x0e\x32+.google.cloud.managedkafka.v1.Cluster.StateB\x03\xe0\x41\x03\x12\x1f\n\rsatisfies_pzi\x18\x0b \x01(\x08\x42\x03\xe0\x41\x03H\x01\x88\x01\x01\x12\x1f\n\rsatisfies_pzs\x18\x0c \x01(\x08\x42\x03\xe0\x41\x03H\x02\x88\x01\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"F\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\n\n\x06\x41\x43TIVE\x10\x02\x12\x0c\n\x08\x44\x45LETING\x10\x03:w\xea\x41t\n#managedkafka.googleapis.com/Cluster\x12:projects/{project}/locations/{location}/clusters/{cluster}*\x08\x63lusters2\x07\x63lusterB\x11\n\x0fplatform_configB\x10\n\x0e_satisfies_pziB\x10\n\x0e_satisfies_pzs\"D\n\x0e\x43\x61pacityConfig\x12\x17\n\nvcpu_count\x18\x01 \x01(\x03\x42\x03\xe0\x41\x02\x12\x19\n\x0cmemory_bytes\x18\x02 \x01(\x03\x42\x03\xe0\x41\x02\"\xa8\x01\n\x0fRebalanceConfig\x12\x45\n\x04mode\x18\x01 \x01(\x0e\x32\x32.google.cloud.managedkafka.v1.RebalanceConfig.ModeB\x03\xe0\x41\x01\"N\n\x04Mode\x12\x14\n\x10MODE_UNSPECIFIED\x10\x00\x12\x10\n\x0cNO_REBALANCE\x10\x01\x12\x1e\n\x1a\x41UTO_REBALANCE_ON_SCALE_UP\x10\x02\"$\n\rNetworkConfig\x12\x13\n\x06subnet\x18\x02 \x01(\tB\x03\xe0\x41\x02\"Y\n\x0c\x41\x63\x63\x65ssConfig\x12I\n\x0fnetwork_configs\x18\x01 \x03(\x0b\x32+.google.cloud.managedkafka.v1.NetworkConfigB\x03\xe0\x41\x02\"\x92\x01\n\tGcpConfig\x12\x46\n\raccess_config\x18\x03 \x01(\x0b\x32*.google.cloud.managedkafka.v1.AccessConfigB\x03\xe0\x41\x02\x12=\n\x07kms_key\x18\x02 \x01(\tB,\xe0\x41\x01\xe0\x41\x05\xfa\x41#\n!cloudkms.googleapis.com/CryptoKey\"\xd7\x02\n\x05Topic\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x08\x12\x1c\n\x0fpartition_count\x18\x02 \x01(\x05\x42\x03\xe0\x41\x02\x12\"\n\x12replication_factor\x18\x03 \x01(\x05\x42\x06\xe0\x41\x02\xe0\x41\x05\x12\x46\n\x07\x63onfigs\x18\x04 \x03(\x0b\x32\x30.google.cloud.managedkafka.v1.Topic.ConfigsEntryB\x03\xe0\x41\x01\x1a.\n\x0c\x43onfigsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01:\x80\x01\xea\x41}\n!managedkafka.googleapis.com/Topic\x12Iprojects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}*\x06topics2\x05topic\"\xe1\x01\n\x15\x43onsumerTopicMetadata\x12\\\n\npartitions\x18\x01 \x03(\x0b\x32\x43.google.cloud.managedkafka.v1.ConsumerTopicMetadata.PartitionsEntryB\x03\xe0\x41\x01\x1aj\n\x0fPartitionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\x46\n\x05value\x18\x02 \x01(\x0b\x32\x37.google.cloud.managedkafka.v1.ConsumerPartitionMetadata:\x02\x38\x01\"G\n\x19\x43onsumerPartitionMetadata\x12\x13\n\x06offset\x18\x01 \x01(\x03\x42\x03\xe0\x41\x02\x12\x15\n\x08metadata\x18\x02 \x01(\tB\x03\xe0\x41\x01\"\x81\x03\n\rConsumerGroup\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x08\x12L\n\x06topics\x18\x02 \x03(\x0b\x32\x37.google.cloud.managedkafka.v1.ConsumerGroup.TopicsEntryB\x03\xe0\x41\x01\x1a\x62\n\x0bTopicsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.google.cloud.managedkafka.v1.ConsumerTopicMetadata:\x02\x38\x01:\xaa\x01\xea\x41\xa6\x01\n)managedkafka.googleapis.com/ConsumerGroup\x12Zprojects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumer_group}*\x0e\x63onsumerGroups2\rconsumerGroup\"\x80\x02\n\x11OperationMetadata\x12\x34\n\x0b\x63reate_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x31\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x13\n\x06target\x18\x03 \x01(\tB\x03\xe0\x41\x03\x12\x11\n\x04verb\x18\x04 \x01(\tB\x03\xe0\x41\x03\x12\x1b\n\x0estatus_message\x18\x05 \x01(\tB\x03\xe0\x41\x03\x12#\n\x16requested_cancellation\x18\x06 \x01(\x08\x42\x03\xe0\x41\x03\x12\x18\n\x0b\x61pi_version\x18\x07 \x01(\tB\x03\xe0\x41\x03\x42\xd5\x02\n com.google.cloud.managedkafka.v1B\x0eResourcesProtoP\x01ZDcloud.google.com/go/managedkafka/apiv1/managedkafkapb;managedkafkapb\xaa\x02\x1cGoogle.Cloud.ManagedKafka.V1\xca\x02\x1cGoogle\\Cloud\\ManagedKafka\\V1\xea\x02\x1fGoogle::Cloud::ManagedKafka::V1\xea\x41x\n!cloudkms.googleapis.com/CryptoKey\x12Sprojects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}b\x06proto3" pool = Google::Protobuf::DescriptorPool.generated_pool begin pool.add_serialized_file(descriptor_data) rescue TypeError # Compatibility code: will be removed in the next major version. require 'google/protobuf/descriptor_pb' parsed = Google::Protobuf::FileDescriptorProto.decode(descriptor_data) parsed.clear_dependency serialized = parsed.class.encode(parsed) file = pool.add_serialized_file(serialized) warn "Warning: Protobuf detected an import path issue while loading generated file #{__FILE__}" imports = [ ["google.protobuf.Timestamp", "google/protobuf/timestamp.proto"], ] imports.each do |type_name, expected_filename| import_file = pool.lookup(type_name).file_descriptor if import_file.name != expected_filename warn "- #{file.name} imports #{expected_filename}, but that import was loaded as #{import_file.name}" end end warn "Each proto file must use a consistent fully-qualified name." warn "This will become an error in the next major version." end module Google module Cloud module ManagedKafka module V1 Cluster = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.managedkafka.v1.Cluster").msgclass Cluster::State = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.managedkafka.v1.Cluster.State").enummodule CapacityConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.managedkafka.v1.CapacityConfig").msgclass RebalanceConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.managedkafka.v1.RebalanceConfig").msgclass RebalanceConfig::Mode = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.managedkafka.v1.RebalanceConfig.Mode").enummodule NetworkConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.managedkafka.v1.NetworkConfig").msgclass AccessConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.managedkafka.v1.AccessConfig").msgclass GcpConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.managedkafka.v1.GcpConfig").msgclass Topic = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.managedkafka.v1.Topic").msgclass ConsumerTopicMetadata = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.managedkafka.v1.ConsumerTopicMetadata").msgclass ConsumerPartitionMetadata = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.managedkafka.v1.ConsumerPartitionMetadata").msgclass ConsumerGroup = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.managedkafka.v1.ConsumerGroup").msgclass OperationMetadata = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.managedkafka.v1.OperationMetadata").msgclass end end end end