templates/graylog/config/rubber/role/graylog_server/graylog2.conf in rubber-2.9.0 vs templates/graylog/config/rubber/role/graylog_server/graylog2.conf in rubber-2.10.0
- old
+ new
@@ -1,7 +1,8 @@
<%
@path = "/etc/graylog2.conf"
+ @delayed_post = "service graylog-server restart"
require 'digest/sha2'
root_password_sha2 = (Digest::SHA2.new << rubber_env.graylog_web_password).to_s
es_servers = rubber_instances.for_role('graylog_elasticsearch').collect { |i| "#{i.internal_ip}:9300" }.join(',')
@@ -21,10 +22,12 @@
# the default root user is named 'admin'
root_username = <%= rubber_env.graylog_web_username %>
# You MUST specify a hash password for the root user (which you only need to initially set up the
# system and in case you lose connectivity to your authentication backend)
+# This password cannot be changed using the API or via the web interface. If you need to change it,
+# modify it in this file.
# Create one by using for example: echo -n yourpassword | shasum -a 256
# and put the resulting hash value into the following line
root_password_sha2 = <%= root_password_sha2 %>
# Set plugin directory here (relative or absolute)
@@ -36,10 +39,19 @@
# REST API transport address. Defaults to first non-loopback IPv4 system address and port 12900.
# This will be promoted in the cluster discovery APIs and other nodes may try to connect on this
# address. (see rest_listen_uri)
rest_transport_uri = http://<%= rubber_instances[rubber_env.host].internal_ip %>:12900/
+# Enable CORS headers for REST api. This is necessary for JS-clients accessing the server directly.
+# If these are disabled, modern browsers will not be able to retrieve resources from the server.
+# This is disabled by default. Uncomment the next line to enable it.
+#rest_enable_cors = true
+
+# Enable GZIP support for REST api. This compresses API responses and therefore helps to reduce
+# overall round trip times. This is disabled by default. Uncomment the next line to enable it.
+#rest_enable_gzip = true
+
# Embedded elasticsearch configuration file
# pay attention to the working directory of the server, maybe use an absolute path here
elasticsearch_config_file = /etc/graylog2-elasticsearch.yml
elasticsearch_max_docs_per_index = 20000000
@@ -58,10 +70,18 @@
elasticsearch_shards = <%= [rubber_instances.for_role('graylog_elasticsearch').size, 1].max %>
elasticsearch_replicas = 0
elasticsearch_index_prefix = graylog2
+# Do you want to allow searches with leading wildcards? This can be extremely resource hungry and should only
+# be enabled with care. See also: http://support.torch.sh/help/kb/graylog2-web-interface/the-search-bar-explained
+allow_leading_wildcard_searches = false
+
+# Do you want to allow searches to be highlighted? Depending on the size of your messages this can be memory hungry and
+# should only be enabled after making sure your elasticsearch cluster has enough memory.
+allow_highlighting = false
+
# settings to be passed to elasticsearch's client (overriding those in the provided elasticsearch_config_file)
# all these
# this must be the same as for your elasticsearch cluster
elasticsearch_cluster_name = <%= rubber_env.graylog_elasticsearch_index %>
@@ -78,10 +98,17 @@
#elasticsearch_http_enabled = false
elasticsearch_discovery_zen_ping_multicast_enabled = false
elasticsearch_discovery_zen_ping_unicast_hosts = <%= es_servers %>
+# the following settings allow to change the bind addresses for the elasticsearch client in graylog2
+# these settings are empty by default, letting elasticsearch choose automatically,
+# override them here or in the 'elasticsearch_config_file' if you need to bind to a special address
+# refer to http://www.elasticsearch.org/guide/en/elasticsearch/reference/0.90/modules-network.html for special values here
+# elasticsearch_network_host =
+# elasticsearch_network_bind_host =
+# elasticsearch_network_publish_host =
# Analyzer (tokenizer) to use for message and full_message field. The "standard" filter usually is a good idea.
# All supported analyzers are: standard, simple, whitespace, stop, keyword, pattern, language, snowball, custom
# ElasticSearch documentation: http://www.elasticsearch.org/guide/reference/index-modules/analysis/
# Note that this setting only takes effect on newly created indices.
@@ -115,10 +142,22 @@
# For optimum performance your LogMessage objects in the ring buffer should fit in your CPU L3 cache.
# Start server with --statistics flag to see buffer utilization.
# Must be a power of 2. (512, 1024, 2048, ...)
ring_size = 1024
+# EXPERIMENTAL: Dead Letters
+# Every failed indexing attempt is logged by default and made visible in the web-interface. You can enable
+# the experimental dead letters feature to write every message that was not successfully indexed into the
+# MongoDB "dead_letters" collection to make sure that you never lose a message. The actual writing of dead
+# letter should work fine already but it is not heavily tested yet and will get more features in future
+# releases.
+dead_letters_enabled = false
+
+# How many seconds to wait between marking node as DEAD for possible load balancers and starting the actual
+# shutdown process. Set to 0 if you have no status checking load balancers in front.
+lb_recognition_period_seconds = 3
+
# MongoDB Configuration
mongodb_useauth = false
#mongodb_user = grayloguser
#mongodb_password = 123
mongodb_host = <%= rubber_instances.for_role('graylog_mongodb').first.full_name %>
@@ -132,50 +171,30 @@
# Number of threads allowed to be blocked by MongoDB connections multiplier. Default: 5
# If mongodb_max_connections is 100, and mongodb_threads_allowed_to_block_multiplier is 5, then 500 threads can block. More than that and an exception will be thrown.
# http://api.mongodb.org/java/current/com/mongodb/MongoOptions.html#threadsAllowedToBlockForConnectionMultiplier
mongodb_threads_allowed_to_block_multiplier = 5
-
# Drools Rule File (Use to rewrite incoming log messages)
# See: http://support.torch.sh/help/kb/graylog2-server/custom-message-rewritingprocessing
# rules_file = /etc/graylog2.drl
# Email transport
-transport_email_enabled = false
+transport_email_enabled = true
transport_email_protocol = smtp
-transport_email_hostname = mail.example.com
-transport_email_port = 587
-transport_email_use_auth = true
-transport_email_use_tls = true
+transport_email_hostname = localhost
+transport_email_port = 25
+transport_email_use_auth = false
+transport_email_use_tls = false
+transport_email_use_ssl = false
transport_email_auth_username = you@example.com
transport_email_auth_password = secret
-transport_email_subject_prefix = [graylog2]
-transport_email_from_email = graylog2@example.com
+transport_email_subject_prefix = <%= Rubber.env == 'production' ? '[graylog2]' : "[graylog2-#{Rubber.env}]" %>
+transport_email_from_email = <%= Rubber.env == 'production' ? 'graylog' : "graylog-#{Rubber.env}" %>@<%= rubber_env.domain %>
transport_email_from_name = Graylog2
-transport_email_web_interface_url = http://your-graylog2.example.org
-# Jabber/XMPP transport
-transport_jabber_enabled = false
-transport_jabber_hostname = jabber.example.com
-transport_jabber_port = 5222
-transport_jabber_use_sasl_auth = true
-transport_jabber_allow_selfsigned_certs = false
-transport_jabber_auth_username = your_user
-transport_jabber_auth_password = secret
-transport_jabber_message_prefix = [graylog2]
+# Specify and uncomment this if you want to include links to the stream in your stream alert mails.
+# This should define the fully qualified base url to your web interface exactly the same way as it is accessed by your users.
+#
+transport_email_web_interface_url = https://graylog-<%= rubber_env.full_host %>:<%= rubber_env.web_tools_ssl_port %>/
-# Additional modules
-# Graphite
-#enable_graphite_output = false
-#graphite_carbon_host = 127.0.0.1
-#graphite_carbon_tcp_port = 2003
-#graphite_prefix = logs
-
-# Librato Metrics (http://support.torch.sh/help/kb/graylog2-server/using-librato-metrics-with-graylog2)
-#enable_libratometrics_output = false
-#enable_libratometrics_system_metrics = false
-#libratometrics_api_user = you@example.com
-#libratometrics_api_token = abcdefg12345
-#libratometrics_prefix = gl2-
-#libratometrics_interval = 60
-#libratometrics_stream_filter =
-#libratometrics_host_filter =
+# HTTP proxy for outgoing HTTP calls
+#http_proxy_uri =