#===============================================================================
# BROKER
#===============================================================================
# Description: The broker is responsible for:
# - Exporting centralized logs of all Shinken daemon processes
# - Exporting status data
# - Exporting performance data
# - Exposing Shinken APIs:
# - Status data
# - Performance data
# - Command interface
#===============================================================================
define broker {
#======== Daemon name and address =========
# Daemon name. Must be unique
broker_name broker-1
# IP/fqdn of this daemon (note: you MUST change it by the real ip/fqdn of this server)
address node1.mydomain
# Port (HTTP/HTTPS) exposed by this daemon
port 7772
# 0 = use HTTP, 1 = use HTTPS
use_ssl 0
#======== Master or spare selection =========
# 1 = is a spare, 0 = is not a spare
spare 0
#======== Daemon connection timeout and down state limit =========
# timeout: how many seconds to consider a node don't answer
timeout 3
# data_timeout: how many second to consider a configuration transfert to be failed
# because the network brandwith is too small.
data_timeout 120
# max_check_attempts: how many fail check to consider this daemon as DEAD
max_check_attempts 3
# Check this daemon every X seconds
check_interval 60
#======== Modules to enable for this daemon =========
# Available:
# - Simple-log : save all logs into a common file
# - WebUI core-broker-060340145ade11e5b703080027f08538
_SE_UUID_HASH : visualisation interface
8e00136f9e61061e07ca0f4a63509b68
# -End Graphite-Perfdataof Shinken Enterprise part
: save all metrics into a graphite database #======== Daemon name and address =========
# -Daemon slaname. Must be unique
broker_name : save sla into a database broker-master
# -IP/fqdn Livestatusof this daemon (note: you MUST change it by the real ip/fqdn :of TCPthis APIserver)
to query element state,address used by nagios external tools like NagVis or Thruk
modules localhost
# Port (HTTP/HTTPS) exposed by this daemon
Simple-log, WebUI, Graphite-Perfdata, sla
port #======== Realm and architecture settings =========
# Realm to set this daemon into7772
realm# 0 = use HTTP, 1 = use HTTPS
use_ssl All
# 1 = take data from the0
daemon realm and its sub realms
# 0 = take data only from the daemon realm
manage_sub_realms #======== Master or spare selection =========
# 1 = is a spare, 0 = is not a spare
spare 1
# Is enabled, then this broker will receive data (logs and) from0
the arbiter
# managespare_arbitersdaemon: name of the daemon that will take this daemon job if it 1
dies
# InIMPORTANT:
NATted environments, you declare# each satellite ip[:port] as seen by
# *this* broker (if port not set, the port declared by satellite itself
# is used)* a spare_daemon can only be the spare of 1 (and only one) master daemon
# * a spare_daemon cannot have a spare_daemon
#satellitemap# scheduler-1=1.2.3.4:7768, poller-1=1.2.3.5:7771
#======== Memory protection =========
# Are the daemon module process and worker process are waiting for enough* the spare must have modules with the same module_type as the master
# memory to be available before- beingdepending launch.of Default:the 1 (enabled)
value of the broker__manage_brokspare__enable_sub_processes_memory_usage_protection 1
spare_must_have_the_same_list_of_module_type parameter
# The sub process memory usage protection can have a system reserved memory
Example: spare_daemon #broker-spare
that won't be usedspare_daemon
by theses sub process# when1 launched
= (default) the spare #defined By default: 0 (no reserved memory)
# Example: 10 (means 10% of the total memory is reserved for the system)
with spare_daemon must have the same module_type as this master
# 0 = the spare module_type are not checked
# broker__manage_brokspare__subspare_processmust_memoryhave_usagethe_systemsame_reserved_memorylist_of_module_type 0 1
# If a sub process cannot be started because of the protection,======== Daemon connection timeout and down state limit =========
# timeout: how many seconds
to consider a #node it will be retry and wait that the system memory is freed until it fail to start
don't answer
timeout # By default: 5 (seconds)3
broker__manage_brok__sub_processes_memory_usage_protection_max_retry_time 5
#======== Brok pusher worker =========# data_timeout: how many second to consider a configuration transfer to be failed
# Thebecause brokerthe spawnnetwork broksbandwidth pusheris subtoo processsmall.
to push to externaldata_timeout modules (like WebUI)
# the broker will look at this worker120
execution time, and will kill if it timeout
# The broker will compute the average execution time of previous workers to# max_check_attempts: how many fail check to consider this daemon as DEAD
max_check_attempts 3
# decideCheck aboutthis howdaemon manyevery timeX thisseconds
worker will take based on:
check_interval # number of broks to send / past60
average send speed (broks/s)
# If #======== Modules to enable for this time is reach, it means that the pusher process is killed
daemon =========
# Available:
# For- smallWebUI amount of broks to send, it should lead to ridicusly small allowed execution time
# and the fac to spawn the sub process can be higher than this value, so we are using a minimal : Visualisation interface
# - Graphite-Perfdata : Save all metrics into a graphite database
# execution timeout
- sla # Default: 5 (second)
broker__manage_brok__sub_process_broks_pusher_min_execution_timeout : Save sla into a database
# - Livestatus 5
# In order to manage the fact that the server: canTCP slowAPI downto duringquery thiselement sendstate, youused canby setupnagios a
external tools like NagVis #or ratioThruk
that will be used# to increase the allowed timeout by multiply it
# Default: 5
broker__manage_brok__sub_process_broks_pusher_security_ratio 5
# At the broker start without stats, this valud will be used for the timeout
# Default: 240 (seconds)
broker__manage_brok__sub_process_broks_pusher_max_execution_timeout 240
# If a sub process reach a timeout, it will be killed and relaunched. After max retry,
# the attached module will be restarted
# Default: 3
broker__manage_brok__sub_process_broks_pusher_max_retry - broker-module-livedata : REST API to query all monitored element data (host, cluster or check)
# - event-manager-writer : Save events for events manager (do not forget to activate the module in your webui to see data)
# - Simple-log : Save all logs into a common file, Use this module only if you need to have all the check results in one file.
modules WebUI, Graphite-Perfdata, sla, event-manager-writer
#======== Realm and architecture settings =========
# Realm to set this daemon into
realm 3
All
#======= 1 = Enabletake ordata notfrom thisthe daemon ========= realm and its sub realms
# 10 = is enabled, o = is disabledtake data only from the daemon realm
enabledmanage_sub_realms 1
# In NATted environments, you declare 1
}
each satellite ip[:port] as seen by
# *this* daemon (if port not set, the port declared by satellite itself
# is used)
#satellitemap scheduler-1=1.2.3.4:7768, scheduler-2=1.2.3.5:7771
# Exchange between brokers <- schedulers can be limited by packet size (in kB)
# Note: as compression is automatic, this is a higher limit, and in real case the
# packets will be lower than this value
# broks_packet_size 1024
#======== Memory protection =========
# Are the daemon module process and worker process are waiting for enough
# memory to be available before being launch. Default: 1 (enabled)
broker__manage_brok__enable_sub_processes_memory_usage_protection 1
# The sub process memory usage protection can have a system reserved memory
# that won't be used by theses sub process when launched
# By default: 0 (no reserved memory)
# Example: 10 (means 10% of the total memory is reserved for the system)
broker__manage_brok__sub_process_memory_usage_system_reserved_memory 0
# If a sub process cannot be started because of the protection, how many seconds
# it will be retry and wait that the system memory is freed until it fail to start
# By default: 5 (seconds)
broker__manage_brok__sub_processes_memory_usage_protection_max_retry_time 5
#======== Brok pusher worker =========
# The broker spawn broks pusher sub process to push to external modules (like WebUI)
# the broker will look at this worker execution time, and will kill if it timeout
# The broker will compute the average execution time of previous workers to
# decide about how many time this worker will take based on:
# number of broks to send / past average send speed (broks/s)
# If this time is reach, it means that the pusher process is killed
# For small amount of broks to send, it should lead to ridiculously small allowed execution time
# and the fac to spawn the sub process can be higher than this value, so we are using a minimal
# execution timeout
# Default: 5 (second)
broker__manage_brok__sub_process_broks_pusher_min_execution_timeout 5
# In order to manage the fact that the server can slow down during this send, you can setup a
# ratio that will be used to increase the allowed timeout by multiply it
# Default: 5
broker__manage_brok__sub_process_broks_pusher_security_ratio 5
# At the broker start without stats, this valid will be used for the timeout
# Default: 240 (seconds)
broker__manage_brok__sub_process_broks_pusher_max_execution_timeout 240
# If a sub process reach a timeout, it will be killed and relaunched. After max retry,
# the attached module will be restarted
# Default: 3
broker__manage_brok__sub_process_broks_pusher_max_retry 3
# broker__manage_brok__sub_process_broks_pusher_queue_batch_size:
# * defines the maximum number of broks the "queue brok pusher"
# process will handle per send to external module ( like WebUI ) .
# * Remaining broks will be handled in next send.
# * IMPORTANT: increase this value can lead to error on the socket
# Default: 100000 (broks/batch)
# broker__manage_brok__sub_process_broks_pusher_queue_batch_size 100000
# Broks whose serialization time exceeds this threshold will generate a warning
# Default: 100 (milliseconds)
# broker__manage_brok__oversized_data_warning_threshold__serialization_time 100
# Broks whose serialization time exceeds this threshold will generate an error
# Default: 500 (milliseconds)
# broker__manage_brok__oversized_data_error_threshold__serialization_time 500
#======== VMWare / ESXi ==========
# 1 (default) = if vmware get the ESXi CPU stats value, 0 = do not get value
vmware__statistics_compute_enable 1
#======== Enable or not this daemon =========
# 1 = is enabled, 0 = is disabled
enabled 1
}
|