logger: file: log/phobos.log level: info kafka: # identifier for this application client_id: phobos # timeout setting for connecting to brokers connect_timeout: # timeout setting for socket connections socket_timeout: # PEM encoded CA cert to use with an SSL connection (string) ssl_ca_cert: # PEM encoded client cert to use with an SSL connection (string) # Must be used in combination with ssl_client_cert_key ssl_client_cert: # PEM encoded client cert key to use with an SSL connection (string) # Must be used in combination with ssl_client_cert ssl_client_cert_key: # list of brokers used to initialize the client ("port:protocol") seed_brokers: - localhost:9092 producer: # number of seconds a broker can wait for replicas to acknowledge # a write before responding with a timeout ack_timeout: 5 # number of replicas that must acknowledge a write, or `:all` # if all in-sync replicas must acknowledge required_acks: :all # number of retries that should be attempted before giving up sending # messages to the cluster. Does not include the original attempt max_retries: 2 # number of seconds to wait between retries retry_backoff: 1 # number of messages allowed in the buffer before new writes will # raise {BufferOverflow} exceptions max_buffer_size: 1000 # maximum size of the buffer in bytes. Attempting to produce messages # when the buffer reaches this size will result in {BufferOverflow} being raised max_buffer_bytesize: 10000000 # name of the compression codec to use, or nil if no compression should be performed. # Valid codecs: `:snappy` and `:gzip` compression_codec: # number of messages that needs to be in a message set before it should be compressed. # Note that message sets are per-partition rather than per-topic or per-producer compression_threshold: 1 consumer: # number of seconds after which, if a client hasn't contacted the Kafka cluster, # it will be kicked out of the group session_timeout: 30 # interval between offset commits, in seconds offset_commit_interval: 10 # number of messages that can be processed before their offsets are committed. # If zero, offset commits are not triggered by message processing offset_commit_threshold: 0 # interval between heartbeats; must be less than the session window heartbeat_interval: 10 backoff: min_ms: 1000 max_ms: 60000 listeners: - handler: Phobos::EchoHandler topic: test # id of the group that the consumer should join group_id: test-1 # Once the consumer group has checkpointed its progress in the topic's partitions, # the consumers will always start from the checkpointed offsets, regardless of config # As such, this setting only applies when the consumer initially starts consuming from a topic start_from_beginning: true # maximum amount of data fetched from a single partition at a time max_bytes_per_partition: 524288 # 512 KB # Number of threads created for this listener, each thread will behave as an independent consumer. # They don't share any state max_concurrency: 1