#===============================================================================
# BROKER
#===============================================================================
# Description: The Broker is responsible for:
# - Exporting centralized logs of all Shinken daemon processes
# - Exporting status data
# - Exporting performance data
# - Exposing Shinken APIs:
# - Status data
# - Performance data
# - Command interface
#===============================================================================
define broker {
## BROKER
#====================================================================== Daemon name and address =========
# Description: The broker is responsible for:
# Daemon- name.Exporting Mustcentralized belogs unique
of all Shinken broker_name broker-master
# IP/fqdn of this daemon (note: you MUST change it by the real ip/fqdn of this server)
address localhost
# Port (HTTP/HTTPS) exposed by this daemon
port 7772
# 0 = use HTTP, 1 = use HTTPS
use_ssl 0
#daemon processes
# - Exporting status data
# - Exporting performance data
# - Exposing Shinken APIs:
# - Status data
# - Performance data
# - Command interface
#===================================== Master or spare selection =========
# 1 = is a spare, 0 = is not a spare
spare 0
# spare_daemon: name of the daemon that will take this daemon job if it dies
# IMPORTANT:
# * a spare_daemon can only be the spare of 1 (and only one) master daemon================================
define broker {
# Shinken Enterprise. Lines added by import core. Do not remove it, it's used by Shinken Enterprise to update your objects if you re-import them.
_SE_UUID core-broker-060340145ade11e5b703080027f08538
_SE_UUID_HASH 8e00136f9e61061e07ca0f4a63509b68
# End of Shinken Enterprise part
#======== Daemon name and address =========
# Daemon name. *Must a spare_daemon cannot have a spare_daemon
be unique
broker_name # * the spare must have modules with the same module_type as the master broker-master
# IP/fqdn of this daemon -(note: dependingyou ofMUST thechange valueit ofby the broker__manage_spare__spare_must_have_the_same_list_of_module_type parameter real ip/fqdn of this server)
#address Example: spare_daemon broker-spare
spare_daemonlocalhost
# 1 =Port (defaultHTTP/HTTPS) theexposed spareby defined withthis spare_daemon
must have the sameport module_type as this master
# 0 = the spare module_type are not checked
# broker__manage_spare__spare_must_have_the_same_list_of_module_type 1
7772
#======== Daemon connection timeout and down state limit =========
# timeout: how many seconds to consider a node don't answer
timeout 3
# data_timeout: how many second to consider a configuration transfer to be failed
# because the network bandwidth is too small.
data_timeout 120
# max_check_attempts: how many fail check to consider this daemon as DEAD
max_check_attempts 3
# Check this daemon every X seconds
check_interval 60
#======== Modules to enable for this daemon =========
# Available:
# - WebUI 0 = use HTTP, 1 = use HTTPS
use_ssl 0
#======== Master or spare selection =========
# 1 = is a spare, 0 = is not a spare
spare 0
# spare_daemon: name of the daemon that will take this daemon job if it dies
# IMPORTANT:
# * a spare_daemon can only be the spare of 1 (and only one) master daemon
# * a spare_daemon cannot have a spare_daemon
# * the spare must have modules with the same module_type as the master
# - depending of the value of the broker__manage_spare__spare_must_have_the_same_list_of_module_type parameter
# Example: spare_daemon broker-spare
spare_daemon
: Visualisation interface
# -1 Graphite-Perfdata : Save all metrics into a graphite database= (default) the spare defined with spare_daemon must have the same module_type as this master
# -0 sla= the spare module_type are not checked
# broker__manage_spare__spare_must_have_the_same_list_of_module_type 1
#======== Daemon connection timeout and :down Savestate sla into a databaselimit =========
# -timeout: Livestatushow many seconds to consider a node don't answer
timeout : TCP API to query element state, used by nagios external tools like NagVis or Thruk 3
# - broker-module-livedata : REST API to query all monitored element data (host, cluster or check)data_timeout: how many second to consider a configuration transfer to be failed
# because the network bandwidth is too small.
#data_timeout - event-manager-writer :120
Save events for events manager (do not forget# max_check_attempts: how many fail check to activateconsider thethis moduledaemon inas yourDEAD
webui to see data)
max_check_attempts # - Simple-log 3
# Check this daemon every X seconds
check_interval : Save all logs into a common file, Use this module only if you need to have all the check results in one file. 60
#======== Modules to enable for this daemon =========
# Available:
# - broker--module-report-builderWebUI : External module used by webui--module-report-handler to generate reports
: modulesVisualisation interface
# - Graphite-Perfdata : Save all metrics WebUI, Graphite-Perfdata, sla, event-manager-writer
into a graphite database
#======== Realm and architecture settings =========
- sla # Realm to set this daemon into
realm : Save sla into a database
# - Livestatus All
# 1 = take data from: theTCP daemonAPI realmto andquery itselement substate, realms
used by nagios external #tools 0like =NagVis takeor dataThruk
only from the daemon# realm
- broker-module-livedata : manage_sub_realmsREST API to query all monitored element 1
data (host, cluster or check)
# In NATted environments, you declare each satellite ip[:port] as seen by
# *this* Broker (if port not set, the port declared by satellite itself- event-manager-writer : Save events for events manager (do not forget to activate the module in your webui to see data)
# is used)
- Simple-log #satellitemap scheduler-1=1.2.3.4:7768, poller-1=1.2.3.5:7771
# Exchange: betweenSave Brokersall <-logs Schedulersinto cana becommon limitedfile, byUse packetthis sizemodule (in kB)
# Note: as compression is automatic, this is a higher limit, and in real case the
only if you need to have all the check results in one file.
modules # packets will be lower than this valueWebUI, Graphite-Perfdata, sla, event-manager-writer
# broks_packet_size 1024
#======== MemoryRealm protection =and architecture settings =========
# AreRealm theto daemonset modulethis processdaemon andinto
worker process are waitingrealm for enough
# memory to be available before being launch. Default: 1 (enabled)
broker__manage_brok__enable_sub_processes_memory_usage_protection 1All
# The1 sub= processtake memorydata usagefrom protectionthe candaemon haverealm aand systemits reservedsub memoryrealms
# that0 won't= betake useddata byonly thesesfrom subthe processdaemon when launchedrealm
# By default: 0 (no reserved memory)manage_sub_realms 1
# Example:In 10NATted environments, (meansyou 10%declare ofeach the total memory is reserved for the system)
broker__manage_brok__sub_process_memory_usage_system_reserved_memory 0
satellite ip[:port] as seen by
# *this* daemon (if port not set, the port declared by satellite itself
# Ifis aused)
sub process cannot be#satellitemap started because of the protection, how many seconds scheduler-1=1.2.3.4:7768, scheduler-2=1.2.3.5:7771
# itExchange willbetween bebrokers retry<- andschedulers waitcan thatbe thelimited systemby memorypacket issize freed until it fail to start
# By default: 5 (seconds)
broker__manage_brok__sub_processes_memory_usage_protection_max_retry_time 5
#======== Brok pusher worker =========
# The Broker spawn broks pusher sub process to push to external modules (like WebUI)(in kB)
# Note: as compression is automatic, this is a higher limit, and in real case the
# packets will be lower than this value
# broks_packet_size 1024
#======== Memory protection =========
# Are the Brokerdaemon willmodule lookprocess at thisand worker executionprocess time,are andwaiting will kill if it timeout
# The Broker will compute the average execution time of previous workers tofor enough
# memory to be available before being launch. Default: 1 (enabled)
broker__manage_brok__enable_sub_processes_memory_usage_protection 1
# decideThe aboutsub howprocess manymemory timeusage thisprotection workercan willhave takea system basedreserved on:memory
# numberthat ofwon't broksbe toused sendby /theses pastsub averageprocess send speed (broks/s)when launched
# IfBy thisdefault: time0 is(no reach, it means that the pusher process is killed
# For small amount of broks to send, it should lead to ridiculously small allowed execution timereserved memory)
# Example: 10 (means 10% of the total memory is reserved for the system)
broker__manage_brok__sub_process_memory_usage_system_reserved_memory 0
# andIf the fac to spawn the a sub process cancannot be higherstarted because thanof thisthe valueprotection, so we are using a minimal
# execution timeouthow many seconds
# it will be retry and wait that the system memory is freed until it fail to start
# By Defaultdefault: 5 (secondseconds)
broker__manage_brok__sub_processprocesses_memory_broksusage_pusherprotection_minmax_execution_timeout retry_time 5
# In order to manage the fact that the server can slow down during this send, you can setup a======== Brok pusher worker =========
# The broker spawn broks pusher sub process to push to external modules (like WebUI)
# ratiothe thatbroker will belook usedat tothis increaseworker theexecution allowed timeout by multiply ittime, and will kill if it timeout
# Default: 5
broker__manage_brok__sub_process_broks_pusher_security_ratio The broker will compute the average execution time of previous workers to
# decide about how many time this worker will take based 5
on:
# Atnumber theof Brokerbroks startto withoutsend stats,/ thispast validaverage willsend be used for the timeoutspeed (broks/s)
# Default:If 240 (seconds)
broker__manage_brok__sub_process_broks_pusher_max_execution_timeout 240this time is reach, it means that the pusher process is killed
# IfFor asmall subamount processof reachbroks ato timeoutsend, it willshould belead killedto andridiculously relaunched.small Afterallowed maxexecution retry,time
# and the attached module will be restartedfac to spawn the sub process can be higher than this value, so we are using a minimal
# Default:execution 3timeout
broker__# Default: 5 (second)
broker__manage_brok__sub_process_broks_pusher_maxmin_execution_retrytimeout 5
# In order to manage the fact 3
#======== Enable or not this daemon =========that the server can slow down during this send, you can setup a
# 1 = is enabled, 0 = is disabledratio that will be used to increase the allowed timeout by multiply it
# Default: 5
enabledbroker__manage_brok__sub_process_broks_pusher_security_ratio 1
}
5
# At the broker start without stats, this valid will be used for the timeout
# Default: 240 (seconds)
broker__manage_brok__sub_process_broks_pusher_max_execution_timeout 240
# If a sub process reach a timeout, it will be killed and relaunched. After max retry,
# the attached module will be restarted
# Default: 3
broker__manage_brok__sub_process_broks_pusher_max_retry 3
# broker__manage_brok__sub_process_broks_pusher_queue_batch_size:
# * defines the maximum number of broks the "queue brok pusher"
# process will handle per send to external module ( like WebUI ) .
# * Remaining broks will be handled in next send.
# * IMPORTANT: increase this value can lead to error on the socket
# Default: 100000 (broks/batch)
# broker__manage_brok__sub_process_broks_pusher_queue_batch_size 100000
# Broks whose serialization time exceeds this threshold will generate a warning
# Default: 100 (milliseconds)
# broker__manage_brok__oversized_data_warning_threshold__serialization_time 100
# Broks whose serialization time exceeds this threshold will generate an error
# Default: 500 (milliseconds)
# broker__manage_brok__oversized_data_error_threshold__serialization_time 500
#======== VMWare / ESXi ==========
# 1 (default) = if vmware get the ESXi CPU stats value, 0 = do not get value
vmware__statistics_compute_enable 1
#======== Enable or not this daemon =========
# 1 = is enabled, 0 = is disabled
enabled 1
}
|