#===============================================================================
# BROKER
#===============================================================================
# Description: The Broker is responsible for:
# - Exporting centralized logs of all Shinken daemon processes
# - Exporting status data
# - Exporting performance data
# - Exposing Shinken APIs:
# - Status data
# - Performance data
# - Command interface
#===============================================================================
define broker {================
# BROKER
#===============================================================================
# Description: The broker is responsible for:
# - Exporting centralized logs of all Shinken daemon processes
# - Exporting status data
# - Exporting performance data
# - Exposing Shinken APIs:
# - Status data
# - Performance data
# - Command interface
#===============================================================================
define broker {
# Shinken Enterprise. Lines added by import core. Do not remove it, it's used by Shinken Enterprise to update your objects if you re-import them.
_SE_UUID core-broker-060340145ade11e5b703080027f08538
_SE_UUID_HASH 8e00136f9e61061e07ca0f4a63509b68
# End of Shinken Enterprise part
#======== Daemon name and address =========
# Daemon name. Must be unique
broker_name broker-master
# IP/fqdn of this daemon (note: you MUST change it by the real ip/fqdn of this server)
address localhost
# Port (HTTP/HTTPS) exposed by this daemon
port 7772
# 0 = use HTTP, 1 = use HTTPS
use_ssl 0
#======== DaemonMaster nameor andspare addressselection =========
# 1 Daemon= name.is Musta bespare, unique
0 = is not broker_namea spare
spare broker-master
# IP/fqdn of this daemon (note: you MUST0
change it by the real ip/fqdn of this server)
address # spare_daemon: name of the daemon that will take this daemon job if it dies
# IMPORTANT:
# * a spare_daemon can localhost
only be the spare #of Port1 (HTTP/HTTPS) exposed by thisand only one) master daemon
port# * a spare_daemon cannot have a spare_daemon
# * the spare must have modules 7772
with the same module_type #as 0the =master
use HTTP, 1 =# use HTTPS
- use_ssl depending of the value of the broker__manage_spare__spare_must_have_the_same_list_of_module_type parameter
# Example: spare_daemon 0
#======== Master or spare selection ========= broker-spare
#spare_daemon
1 = is a spare, 0# 1 = is(default) notthe a spare
defined with spare_daemon must have the same module_type as this master
# 0 = the spare module_type are not 0
checked
# spare_daemon: name of the daemon that will take this daemon job if it dies broker__manage_spare__spare_must_have_the_same_list_of_module_type 1
# IMPORTANT:
# * a spare_daemon can only be the spare of 1 (and only one) master daemon
# * a spare_daemon cannot have a spare_daemon
# * the spare must have modules with the same module_type as the master
# - depending of the value of the broker__manage_spare__spare_must_have_the_same_list_of_module_type parameter
# Example: spare_daemon======== Daemon connection timeout and down state limit =========
# timeout: how many seconds to consider a node don't answer
timeout 3
# data_timeout: how many second to consider a configuration transfer to be failed
# because the network bandwidth is too small.
data_timeout broker-spare120
spare_daemon
# 1 = (default) the spare defined with spare_daemon must have the same module_type as this master# max_check_attempts: how many fail check to consider this daemon as DEAD
max_check_attempts 3
# 0 = the spare module_type are not checked
# broker__manage_spare__spare_must_have_the_same_list_of_module_typeCheck this daemon every X seconds
check_interval 160
#======== DaemonModules connectionto timeoutenable andfor downthis statedaemon limit =========
# timeoutAvailable:
how many seconds to# consider- aWebUI node don't answer
timeout : 3
Visualisation interface
# data_timeout: how many second to consider a configuration transfer to be failed
- Graphite-Perfdata # because the network: bandwidthSave isall toometrics small.
into a graphite database
data_timeout # - sla 120
# max_check_attempts: how many fail check to consider this daemon: asSave DEAD
sla into a database
max_check_attempts # - Livestatus 3
# Check this daemon every X seconds
: TCP API check_intervalto query element state, used by nagios external tools like NagVis or 60
Thruk
#======== Modules to enable for this daemon =========
# Available:
# - WebUI : Visualisation interface - broker-module-livedata : REST API to query all monitored element data (host, cluster or check)
# - event-manager-writer : Save events for events manager (do not forget to activate the module in your webui to see data)
# - GraphiteSimple-Perfdatalog : Save all metricslogs into a common graphitefile, database
Use this module only #if you -need slato have all the check results in one file.
modules : Save sla into a databaseWebUI, Graphite-Perfdata, sla, event-manager-writer
#
- Livestatus #======== Realm and architecture settings =========
# Realm to set this daemon into
:realm TCP API to query element state, used by nagios external tools like NagVis or Thruk
# - broker-module-livedata All
# 1 = take data :from RESTthe APIdaemon torealm queryand allits monitoredsub elementrealms
data (host, cluster or# check)
0 = take data #only - event-manager-writer from the daemon realm
manage_sub_realms : Save events for1
events manager (do not# forgetIn toNATted activateenvironments, theyou moduledeclare ineach your webui to see data)satellite ip[:port] as seen by
# - Simple-log *this* daemon (if port not set, the port declared by satellite itself
# is used)
#satellitemap : Save all logs into a common file, Use this module only if you need to have all the check results in one file. scheduler-1=1.2.3.4:7768, scheduler-2=1.2.3.5:7771
# Exchange between brokers <- schedulers can be limited by packet size (in kB)
# - broker--module-report-builder Note: Externalas modulecompression usedis by webui--module-report-handler to generate reports
modules automatic, this is a higher limit, and in real case the
# packets will be WebUI, Graphite-Perfdata, sla, event-manager-writer
lower than this value
#======== Realm and architecture settings = broks_packet_size 1024
#========
# Realm to set this daemon into Memory protection =========
realm# Are the daemon module process and worker process are waiting for enough
# memory to be All
available before being launch. #Default: 1 =(enabled)
take data from the daemon realm and its sub realms broker__manage_brok__enable_sub_processes_memory_usage_protection 1
# 0The =sub takeprocess datamemory onlyusage fromprotection thecan daemonhave realm
a system reserved memory
manage_sub_realms # that won't be used 1
by theses sub process #when In NATted environments, you declare each satellite ip[:port] as seen bylaunched
# By default: 0 (no reserved memory)
# *this* Broker Example: 10 (ifmeans port not set,10% of the porttotal declaredmemory byis satellitereserved itself
for # is usedthe system)
#satellitemap scheduler-1=1.2.3.4:7768, poller-1=1.2.3.5:7771broker__manage_brok__sub_process_memory_usage_system_reserved_memory 0
# If Exchangea betweensub Brokersprocess <-cannot Schedulersbe canstarted bebecause limitedof bythe packetprotection, sizehow (inmany kB)seconds
# Note:it aswill compression is automatic, this is a higher limit, and in real case the
# packets will be lower than this value
# broks_packet_size 1024be retry and wait that the system memory is freed until it fail to start
# By default: 5 (seconds)
broker__manage_brok__sub_processes_memory_usage_protection_max_retry_time 5
#======== Brok Memorypusher protectionworker =========
# Are the daemon moduleThe broker spawn broks pusher sub process andto workerpush processto areexternal waitingmodules for(like enoughWebUI)
# memorythe tobroker bewill availablelook beforeat beingthis launch.worker Default:execution 1 (enabled)
broker__manage_brok__enable_sub_processes_memory_usage_protection 1
time, and will kill if it timeout
# The subbroker processwill memorycompute usagethe protectionaverage canexecution havetime aof systemprevious reservedworkers memoryto
# decide thatabout won'thow bemany usedtime bythis thesesworker subwill processtake whenbased launchedon:
# By default: 0 (no reserved memory number of broks to send / past average send speed (broks/s)
# Example: 10 (means 10% of the total memory is reserved for the system)
broker__manage_brok__sub_process_memory_usage_system_reserved_memory 0
# If a sub process cannot be started because of the protection, how many seconds
# it will be retry and wait that the system memory is freed until it fail to start If this time is reach, it means that the pusher process is killed
# For small amount of broks to send, it should lead to ridiculously small allowed execution time
# and the fac to spawn the sub process can be higher than this value, so we are using a minimal
# execution timeout
# By defaultDefault: 5 (secondssecond)
broker__manage_brok__sub_processesprocess_memorybroks_usagepusher_protectionmin_max_retry_timeexecution_timeout 5
#======== Brok pusher worker ========= In order to manage the fact that the server can slow down during this send, you can setup a
# Theratio Brokerthat spawnwill broksbe pusherused subto process toincrease pushthe toallowed externaltimeout modulesby (likemultiply WebUI)it
# theDefault: 5
Broker will look at this worker execution time, and will kill if it timeout
broker__manage_brok__sub_process_broks_pusher_security_ratio # The Broker will compute5
the average execution time# ofAt previousthe workersbroker to
start without stats, this #valid decidewill aboutbe howused manyfor timethe thistimeout
worker will take based# on:Default: 240 (seconds)
# number of broks to send / past average send speed (broks/s)broker__manage_brok__sub_process_broks_pusher_max_execution_timeout 240
# If a sub thisprocess timereach isa reachtimeout, it meanswill be thatkilled theand pusherrelaunched. processAfter ismax killedretry,
# For small amount of broks to send, it should lead to ridiculously small allowed execution time
the attached module will be restarted
# Default: 3
broker__manage_brok__sub_process_broks_pusher_max_retry # and the fac to spawn the sub process can be higher than this value, so3
we are using a minimal# broker__manage_brok__sub_process_broks_pusher_queue_batch_size:
# execution timeout
# Default: 5 (second)
broker__manage_brok__sub_process_broks_pusher_min_execution_timeout * defines the maximum number of broks the "queue brok pusher"
# process will handle per 5
send to external module #( Inlike orderWebUI to) manage.
the fact that the# server can slow* downRemaining duringbroks thiswill send,be youhandled canin setupnext asend.
# ratio that* willIMPORTANT: beincrease usedthis tovalue increasecan thelead allowedto timeouterror byon multiplythe itsocket
# Default: 5100000 (broks/batch)
# broker__manage_brok__sub_process_broks_pusher_queue_securitybatch_ratiosize 5100000
# AtBroks thewhose Brokerserialization starttime withoutexceeds stats, this validthreshold will be used for the timeoutgenerate a warning
# Default: 240100 (secondsmilliseconds)
# broker__manage_brok__suboversized_processdata_brokswarning_pusherthreshold_max_executionserialization_timeouttime 240100
# IfBroks awhose subserialization process reach a timeout, it will be killed and relaunched. After max retry,
# the attached module will be restartedtime exceeds this threshold will generate an error
# Default: 3500 (milliseconds)
# broker__manage_brok__oversized_data__sub_process_broks_pusher_max_retry error_threshold__serialization_time 500
#======== VMWare / ESXi ==========
# 1 (default) = if vmware get the ESXi CPU stats value, 0 = do not get value
vmware__statistics_compute_enable 31
#======== Enable or not this daemon =========
# 1 = is enabled, 0 = is disabled
enabled 1
}
|