<% @path = "/etc/graylog2.conf" @delayed_post = "service graylog-server restart" require 'digest/sha2' root_password_sha2 = (Digest::SHA2.new << rubber_env.graylog_web_password).to_s es_servers = rubber_instances.for_role('graylog_elasticsearch').collect { |i| "#{i.internal_ip}:9300" }.join(',') %> # If you are running more than one instances of graylog2-server you have to select one of these # instances as master. The master will perform some periodical tasks that non-masters won't perform. is_master = true # The auto-generated node ID will be stored in this file and read after restarts. It is a good idea # to use an absolute file path here if you are starting graylog2-server from init scripts or similar. node_id_file = /etc/graylog2-server-node-id # You MUST set a secret to secure/pepper the stored user passwords here. Use at least 64 characters. # Generate one by using for example: pwgen -N 1 -s 96 password_secret = <%= rubber_env.graylog_server_secret %> # the default root user is named 'admin' root_username = <%= rubber_env.graylog_web_username %> # You MUST specify a hash password for the root user (which you only need to initially set up the # system and in case you lose connectivity to your authentication backend) # This password cannot be changed using the API or via the web interface. If you need to change it, # modify it in this file. # Create one by using for example: echo -n yourpassword | shasum -a 256 # and put the resulting hash value into the following line root_password_sha2 = <%= root_password_sha2 %> # Set plugin directory here (relative or absolute) plugin_dir = plugin # REST API listen URI. Must be reachable by other graylog2-server nodes if you run a cluster. rest_listen_uri = http://<%= rubber_instance.internal_ip %>:12900/ # REST API transport address. Defaults to the value of rest_listen_uri. Exception: If rest_listen_uri # is set to a wildcard IP address (0.0.0.0) the first non-loopback IPv4 system address is used. # This will be promoted in the cluster discovery APIs and other nodes may try to connect on this # address. (see rest_listen_uri) rest_transport_uri = http://<%= rubber_instance.internal_ip %>:12900/ # Enable CORS headers for REST api. This is necessary for JS-clients accessing the server directly. # If these are disabled, modern browsers will not be able to retrieve resources from the server. # This is disabled by default. Uncomment the next line to enable it. #rest_enable_cors = true # Enable GZIP support for REST api. This compresses API responses and therefore helps to reduce # overall round trip times. This is disabled by default. Uncomment the next line to enable it. #rest_enable_gzip = true # Embedded Elasticsearch configuration file # pay attention to the working directory of the server, maybe use an absolute path here #elasticsearch_config_file = /etc/graylog2-elasticsearch.yml # (Approximate) maximum number of documents in an Elasticsearch index before a new index # is being created, also see no_retention and elasticsearch_max_number_of_indices. elasticsearch_max_docs_per_index = 20000000 # Disable message retention on this node, i. e. disable Elasticsearch index rotation. #no_retention = false # How many indices do you want to keep? # elasticsearch_max_number_of_indices*elasticsearch_max_docs_per_index=total number of messages in your setup elasticsearch_max_number_of_indices = 20 # Decide what happens with the oldest indices when the maximum number of indices is reached. # The following strategies are availble: # - delete # Deletes the index completely (Default) # - close # Closes the index and hides it from the system. Can be re-opened later. retention_strategy = delete # How many Elasticsearch shards and replicas should be used per index? Note that this only applies to newly created indices. elasticsearch_shards = <%= [rubber_instances.for_role('graylog_elasticsearch').size, 1].max %> elasticsearch_replicas = 0 # Prefix for all Elasticsearch indices and index aliases managed by Graylog2. elasticsearch_index_prefix = graylog2 # Do you want to allow searches with leading wildcards? This can be extremely resource hungry and should only # be enabled with care. See also: http://graylog2.org/resources/documentation/general/queries allow_leading_wildcard_searches = false # Do you want to allow searches to be highlighted? Depending on the size of your messages this can be memory hungry and # should only be enabled after making sure your Elasticsearch cluster has enough memory. allow_highlighting = false # settings to be passed to elasticsearch's client (overriding those in the provided elasticsearch_config_file) # all these # this must be the same as for your Elasticsearch cluster elasticsearch_cluster_name = <%= rubber_env.graylog_elasticsearch_index %> # you could also leave this out, but makes it easier to identify the graylog2 client instance elasticsearch_node_name = <%= rubber_env.host %> # we don't want the graylog2 server to store any data, or be master node #elasticsearch_node_master = false #elasticsearch_node_data = false # use a different port if you run multiple Elasticsearch nodes on one machine #elasticsearch_transport_tcp_port = 9350 # we don't need to run the embedded HTTP server here #elasticsearch_http_enabled = false elasticsearch_discovery_zen_ping_multicast_enabled = false elasticsearch_discovery_zen_ping_unicast_hosts = <%= es_servers %> # Change the following setting if you are running into problems with timeouts during Elasticsearch cluster discovery. # The setting is specified in milliseconds, the default is 5000ms (5 seconds). # elasticsearch_cluster_discovery_timeout = 5000 # the following settings allow to change the bind addresses for the Elasticsearch client in graylog2 # these settings are empty by default, letting Elasticsearch choose automatically, # override them here or in the 'elasticsearch_config_file' if you need to bind to a special address # refer to http://www.elasticsearch.org/guide/en/elasticsearch/reference/0.90/modules-network.html # for special values here # elasticsearch_network_host = # elasticsearch_network_bind_host = # elasticsearch_network_publish_host = # The total amount of time discovery will look for other Elasticsearch nodes in the cluster # before giving up and declaring the current node master. #elasticsearch_discovery_initial_state_timeout = 3s # Analyzer (tokenizer) to use for message and full_message field. The "standard" filter usually is a good idea. # All supported analyzers are: standard, simple, whitespace, stop, keyword, pattern, language, snowball, custom # Elasticsearch documentation: http://www.elasticsearch.org/guide/reference/index-modules/analysis/ # Note that this setting only takes effect on newly created indices. elasticsearch_analyzer = standard # Batch size for the Elasticsearch output. This is the maximum (!) number of messages the Elasticsearch output # module will get at once and write to Elasticsearch in a batch call. If the configured batch size has not been # reached within output_flush_interval seconds, everything that is available will be flushed at once. Remember # that every outputbuffer processor manages its own batch and performs its own batch write calls. # ("outputbuffer_processors" variable) output_batch_size = 25 # Flush interval (in seconds) for the Elasticsearch output. This is the maximum amount of time between two # batches of messages written to Elasticsearch. It is only effective at all if your minimum number of messages # for this time period is less than output_batch_size * outputbuffer_processors. output_flush_interval = 1 # The number of parallel running processors. # Raise this number if your buffers are filling up. processbuffer_processors = 5 outputbuffer_processors = 3 #outputbuffer_processor_keep_alive_time = 5000 #outputbuffer_processor_threads_core_pool_size = 3 #outputbuffer_processor_threads_max_pool_size = 30 # UDP receive buffer size for all message inputs (e. g. SyslogUDPInput). #udp_recvbuffer_sizes = 1048576 # Wait strategy describing how buffer processors wait on a cursor sequence. (default: sleeping) # Possible types: # - yielding # Compromise between performance and CPU usage. # - sleeping # Compromise between performance and CPU usage. Latency spikes can occur after quiet periods. # - blocking # High throughput, low latency, higher CPU usage. # - busy_spinning # Avoids syscalls which could introduce latency jitter. Best when threads can be bound to specific CPU cores. processor_wait_strategy = blocking # Size of internal ring buffers. Raise this if raising outputbuffer_processors does not help anymore. # For optimum performance your LogMessage objects in the ring buffer should fit in your CPU L3 cache. # Start server with --statistics flag to see buffer utilization. # Must be a power of 2. (512, 1024, 2048, ...) ring_size = 1024 # EXPERIMENTAL: Dead Letters # Every failed indexing attempt is logged by default and made visible in the web-interface. You can enable # the experimental dead letters feature to write every message that was not successfully indexed into the # MongoDB "dead_letters" collection to make sure that you never lose a message. The actual writing of dead # letter should work fine already but it is not heavily tested yet and will get more features in future # releases. dead_letters_enabled = false # How many seconds to wait between marking node as DEAD for possible load balancers and starting the actual # shutdown process. Set to 0 if you have no status checking load balancers in front. lb_recognition_period_seconds = 3 # Every message is matched against the configured streams and it can happen that a stream contains rules which # take an unusual amount of time to run, for example if its using regular expressions that perform excessive backtracking. # This will impact the processing of the entire server. To keep such misbehaving stream rules from impacting other # streams, Graylog2 limits the execution time for each stream. # The default values are noted below, the timeout is in milliseconds. # If the stream matching for one stream took longer than the timeout value, and this happened more than "max_faults" times # that stream is disabled and a notification is shown in the web interface. # stream_processing_timeout = 2000 # stream_processing_max_faults = 3 # Since 0.21 the graylog2 server supports pluggable output modules. This means a single message can be written to multiple # outputs. The next setting defines the timeout for a single output module, including the default output module where all # messages end up. This setting is specified in milliseconds. # Time in milliseconds to wait for all message outputs to finish writing a single message. #output_module_timeout = 10000 # Time in milliseconds after which a detected stale master node is being rechecked on startup. #stale_master_timeout = 2000 # Time in milliseconds which Graylog2 is waiting for all threads to stop on shutdown. #shutdown_timeout = 30000 # MongoDB Configuration mongodb_useauth = false #mongodb_user = grayloguser #mongodb_password = 123 mongodb_host = <%= rubber_instances.for_role('graylog_mongodb').first.full_name %> #mongodb_replica_set = localhost:27017,localhost:27018,localhost:27019 mongodb_database = <%= rubber_env.graylog_mongo_database %> mongodb_port = 27017 # Raise this according to the maximum connections your MongoDB server can handle if you encounter MongoDB connection problems. mongodb_max_connections = 100 # Number of threads allowed to be blocked by MongoDB connections multiplier. Default: 5 # If mongodb_max_connections is 100, and mongodb_threads_allowed_to_block_multiplier is 5, then 500 threads can block. More than that and an exception will be thrown. # http://api.mongodb.org/java/current/com/mongodb/MongoOptions.html#threadsAllowedToBlockForConnectionMultiplier mongodb_threads_allowed_to_block_multiplier = 5 # Drools Rule File (Use to rewrite incoming log messages) # See: http://graylog2.org/resources/documentation/general/rewriting #rules_file = /etc/graylog2.drl # Email transport transport_email_enabled = true transport_email_protocol = smtp transport_email_hostname = localhost transport_email_port = 25 transport_email_use_auth = false transport_email_use_tls = false transport_email_use_ssl = false transport_email_auth_username = you@example.com transport_email_auth_password = secret transport_email_subject_prefix = <%= Rubber.env == 'production' ? '[graylog2]' : "[graylog2-#{Rubber.env}]" %> transport_email_from_email = <%= Rubber.env == 'production' ? 'graylog' : "graylog-#{Rubber.env}" %>@<%= rubber_env.domain %> transport_email_from_name = Graylog2 # Specify and uncomment this if you want to include links to the stream in your stream alert mails. # This should define the fully qualified base url to your web interface exactly the same way as it is accessed by your users. # transport_email_web_interface_url = https://graylog-<%= rubber_env.full_host %>:<%= rubber_env.web_tools_ssl_port %>/ # HTTP proxy for outgoing HTTP calls #http_proxy_uri = # Switch to enable/disable the off-heap message cache. Stores cached messages in the spool directory if set to true. # Stores the messages in an in-memory data structure if set to false. #message_cache_off_heap = true # Directory for the off-heap message cache data. (absolute or relative) #message_cache_spool_dir = spool # The commit interval for the message cache in milliseconds. Only affects message cache implementations that need to commit data. #message_cache_commit_interval = 1000 # When more messages are coming in as we can process, incoming messages will be cached in memory until # they are ready to be processed. Per default this data structure is unbounded, so in situations of # constant high load, it will grow endlessly until all allocatable memory has been consumed and the # process dies. # To prevent this, the next setting allows you to define an upper bound for this memory cache, if the # number of messages in the cache has reached this limit, incoming messages will be dropped until it # has shrunk again. # # The default is 0, which means no upper bound. # #input_cache_max_size = 0 # Connection timeout for a configured LDAP server (e. g. ActiveDirectory) in milliseconds. #ldap_connection_timeout = 2000 # Version checks settings. All timeouts are in milliseconds. #versionchecks = true #versionchecks_uri = http://versioncheck.torch.sh/check #versionchecks_connection_request_timeout = 10000 #versionchecks_connect_timeout = 10000 #versionchecks_socket_timeout = 10000 # https://github.com/bazhenov/groovy-shell-server #groovy_shell_enable = false #groovy_shell_port = 6789 # Enable collection of Graylog2-related metrics into MongoDB #enable_metrics_collection = false