templates/graylog/config/rubber/role/graylog_server/graylog2.conf in rubber-2.6.5 vs templates/graylog/config/rubber/role/graylog_server/graylog2.conf in rubber-2.7.0
- old
+ new
@@ -1,67 +1,181 @@
<%
@path = "/etc/graylog2.conf"
+
+ require 'digest/sha2'
+ root_password_sha2 = (Digest::SHA2.new << rubber_env.graylog_web_password).to_s
+
+ es_servers = rubber_instances.for_role('graylog_elasticsearch').collect { |i| "#{i.internal_ip}:9300" }.join(',')
%>
+# If you are running more than one instances of graylog2-server you have to select one of these
+# instances as master. The master will perform some periodical tasks that non-masters won't perform.
+is_master = true
-# On which port (UDP) should we listen for Syslog messages? (Standard: 514)
-syslog_listen_port = <%= rubber_env.graylog_server_syslog_port %>
-syslog_protocol = udp
+# The auto-generated node ID will be stored in this file and read after restarts. It is a good idea
+# to use an absolute file path here if you are starting graylog2-server from init scripts or similar.
+node_id_file = /etc/graylog2-server-node-id
-# ElasticSearch URL (default: http://localhost:9200/)
-elasticsearch_url = http://<%= rubber_instances.for_role('graylog_elasticsearch').first.full_name %>:9200/
-elasticsearch_index_name = <%= rubber_env.graylog_elasticsearch_index %>
+# You MUST set a secret to secure/pepper the stored user passwords here. Use at least 64 characters.
+# Generate one by using for example: pwgen -s 96
+password_secret = <%= rubber_env.graylog_server_secret %>
-# Always try a reverse DNS lookup instead of parsing hostname from syslog message?
-force_syslog_rdns = false
+# the default root user is named 'admin'
+root_username = <%= rubber_env.graylog_web_username %>
+# You MUST specify a hash password for the root user (which you only need to initially set up the
+# system and in case you lose connectivity to your authentication backend)
+# Create one by using for example: echo -n yourpassword | shasum -a 256
+# and put the resulting hash value into the following line
+root_password_sha2 = <%= root_password_sha2 %>
+
+# Set plugin directory here (relative or absolute)
+plugin_dir = plugin
+
+# REST API listen URI. Must be reachable by other graylog2-server nodes if you run a cluster.
+rest_listen_uri = http://<%= rubber_instances[rubber_env.host].internal_ip %>:12900/
+
+# REST API transport address. Defaults to first non-loopback IPv4 system address and port 12900.
+# This will be promoted in the cluster discovery APIs and other nodes may try to connect on this
+# address. (see rest_listen_uri)
+rest_transport_uri = http://<%= rubber_instances[rubber_env.host].internal_ip %>:12900/
+
+# Embedded elasticsearch configuration file
+# pay attention to the working directory of the server, maybe use an absolute path here
+elasticsearch_config_file = /etc/graylog2-elasticsearch.yml
+
+elasticsearch_max_docs_per_index = 20000000
+
+# How many indices do you want to keep?
+# elasticsearch_max_number_of_indices*elasticsearch_max_docs_per_index=total number of messages in your setup
+elasticsearch_max_number_of_indices = 20
+
+# Decide what happens with the oldest indices when the maximum number of indices is reached.
+# The following strategies are availble:
+# - delete # Deletes the index completely (Default)
+# - close # Closes the index and hides it from the system. Can be re-opened later.
+retention_strategy = delete
+
+# How many ElasticSearch shards and replicas should be used per index? Note that this only applies to newly created indices.
+elasticsearch_shards = <%= [rubber_instances.for_role('graylog_elasticsearch').size, 1].max %>
+elasticsearch_replicas = 0
+
+elasticsearch_index_prefix = graylog2
+
+# settings to be passed to elasticsearch's client (overriding those in the provided elasticsearch_config_file)
+# all these
+# this must be the same as for your elasticsearch cluster
+elasticsearch_cluster_name = <%= rubber_env.graylog_elasticsearch_index %>
+
+# you could also leave this out, but makes it easier to identify the graylog2 client instance
+elasticsearch_node_name = <%= rubber_env.host %>
+
+# we don't want the graylog2 server to store any data, or be master node
+#elasticsearch_node_master = false
+#elasticsearch_node_data = false
+
+# use a different port if you run multiple elasticsearch nodes on one machine
+#elasticsearch_transport_tcp_port = 9350
+# we don't need to run the embedded HTTP server here
+#elasticsearch_http_enabled = false
+
+elasticsearch_discovery_zen_ping_multicast_enabled = false
+elasticsearch_discovery_zen_ping_unicast_hosts = <%= es_servers %>
+
+
+# Analyzer (tokenizer) to use for message and full_message field. The "standard" filter usually is a good idea.
+# All supported analyzers are: standard, simple, whitespace, stop, keyword, pattern, language, snowball, custom
+# ElasticSearch documentation: http://www.elasticsearch.org/guide/reference/index-modules/analysis/
+# Note that this setting only takes effect on newly created indices.
+elasticsearch_analyzer = standard
+
+# Batch size for all outputs. This is the maximum (!) number of messages an output module will get at once.
+# For example, if this is set to 5000 (default), the ElasticSearch Output will not index more than 5000 messages
+# at once. After that index operation is performed, the next batch will be indexed. If there is only 1 message
+# waiting, it will only index that single message. It is important to raise this parameter if you send in so
+# many messages that it is not enough to index 5000 messages at once. (Only at *really* high message rates)
+output_batch_size = 5000
+
+# The number of parallel running processors.
+# Raise this number if your buffers are filling up.
+processbuffer_processors = 5
+outputbuffer_processors = 5
+
+# Wait strategy describing how buffer processors wait on a cursor sequence. (default: sleeping)
+# Possible types:
+# - yielding
+# Compromise between performance and CPU usage.
+# - sleeping
+# Compromise between performance and CPU usage. Latency spikes can occur after quiet periods.
+# - blocking
+# High throughput, low latency, higher CPU usage.
+# - busy_spinning
+# Avoids syscalls which could introduce latency jitter. Best when threads can be bound to specific CPU cores.
+processor_wait_strategy = blocking
+
+# Size of internal ring buffers. Raise this if raising outputbuffer_processors does not help anymore.
+# For optimum performance your LogMessage objects in the ring buffer should fit in your CPU L3 cache.
+# Start server with --statistics flag to see buffer utilization.
+# Must be a power of 2. (512, 1024, 2048, ...)
+ring_size = 1024
+
# MongoDB Configuration
mongodb_useauth = false
-mongodb_user = grayloguser
-mongodb_password = 123
+#mongodb_user = grayloguser
+#mongodb_password = 123
mongodb_host = <%= rubber_instances.for_role('graylog_mongodb').first.full_name %>
#mongodb_replica_set = localhost:27017,localhost:27018,localhost:27019
mongodb_database = <%= rubber_env.graylog_mongo_database %>
mongodb_port = 27017
-# Graylog2 uses an internal message queue that holds all received messages until they are indexed. The mq_batch_size parameter defines how many messages are sent
-# to ElasticSearch at once (using a _bulk update: http://www.elasticsearch.org/guide/reference/api/bulk.html). The mq_poll_freq parameter controls in which
-# interval (in seconds) the message batch is sent. Example: If you leave the standard values (mq_batch_size = 4000, mq_poll_freq = 1), Graylog2 will index 4000 messages
-# every second. If you have spikes with more than 4000 messages per second, the queue will start growing until you get under 4000 messages/second again. The queue is
-# FIFO and can grow until you run out of RAM. Note that the queue *only* resists in RAM, so if you set the mq_poll_freq to a high value, you may lose a lot of not yet
-# indexed messages when the server crashes. Run the server in debug mode (java -jar graylog2-server.jar --debug) with a |grep '^INFO' to see debug information about
-# the queue and it's size. (INFO : org.graylog2.periodical.BulkIndexerThread - About to index max 4000 messages. You have a total of 103 messages in the queue. [freq:1s])
-# You can also monitor the queue size in your graylog2-web-interface.
-mq_batch_size = 4000
-mq_poll_freq = 1
-
-# You can set a maximum size of the message queue. If this size is reached, all new messages will be rejected until messages are removed/indexed from the queue.
-# 0 = unlimited queue size (default)
-mq_max_size = 0
-
# Raise this according to the maximum connections your MongoDB server can handle if you encounter MongoDB connection problems.
mongodb_max_connections = 100
# Number of threads allowed to be blocked by MongoDB connections multiplier. Default: 5
# If mongodb_max_connections is 100, and mongodb_threads_allowed_to_block_multiplier is 5, then 500 threads can block. More than that and an exception will be thrown.
# http://api.mongodb.org/java/current/com/mongodb/MongoOptions.html#threadsAllowedToBlockForConnectionMultiplier
mongodb_threads_allowed_to_block_multiplier = 5
-# Graylog Extended Log Format (GELF)
-use_gelf = true
-gelf_listen_address = 0.0.0.0
-gelf_listen_port = <%= rubber_env.graylog_server_port %>
# Drools Rule File (Use to rewrite incoming log messages)
-# rules_file = /etc/graylog2.d/rules/graylog2.drl
+# See: http://support.torch.sh/help/kb/graylog2-server/custom-message-rewritingprocessing
+# rules_file = /etc/graylog2.drl
-# AMQP
-amqp_enabled = false
-amqp_subscribed_queues = somequeue1:gelf,somequeue2:gelf,somequeue3:syslog
-amqp_host = localhost
-amqp_port = 5672
-amqp_username = guest
-amqp_password = guest
-amqp_virtualhost = /
+# Email transport
+transport_email_enabled = false
+transport_email_protocol = smtp
+transport_email_hostname = mail.example.com
+transport_email_port = 587
+transport_email_use_auth = true
+transport_email_use_tls = true
+transport_email_auth_username = you@example.com
+transport_email_auth_password = secret
+transport_email_subject_prefix = [graylog2]
+transport_email_from_email = graylog2@example.com
+transport_email_from_name = Graylog2
+transport_email_web_interface_url = http://your-graylog2.example.org
-# Forwarders
-# Timeout in seconds for each connection and read of Logg.ly API when forwarding messages. Default: 3
-forwarder_loggly_timeout = 3
+# Jabber/XMPP transport
+transport_jabber_enabled = false
+transport_jabber_hostname = jabber.example.com
+transport_jabber_port = 5222
+transport_jabber_use_sasl_auth = true
+transport_jabber_allow_selfsigned_certs = false
+transport_jabber_auth_username = your_user
+transport_jabber_auth_password = secret
+transport_jabber_message_prefix = [graylog2]
+
+# Additional modules
+# Graphite
+#enable_graphite_output = false
+#graphite_carbon_host = 127.0.0.1
+#graphite_carbon_tcp_port = 2003
+#graphite_prefix = logs
+
+# Librato Metrics (http://support.torch.sh/help/kb/graylog2-server/using-librato-metrics-with-graylog2)
+#enable_libratometrics_output = false
+#enable_libratometrics_system_metrics = false
+#libratometrics_api_user = you@example.com
+#libratometrics_api_token = abcdefg12345
+#libratometrics_prefix = gl2-
+#libratometrics_interval = 60
+#libratometrics_stream_filter =
+#libratometrics_host_filter =