diff --git a/build/exadata/SPECS/hc-exadata-platform.spec b/build/exadata/SPECS/hc-exadata-platform.spec new file mode 100644 index 0000000..0709388 --- /dev/null +++ b/build/exadata/SPECS/hc-exadata-platform.spec @@ -0,0 +1,90 @@ +%define build_timestamp %(date +"%Y%m%d") + +Name: hc-exadata-platform +Version: %{build_timestamp} +Release: 1 + +Summary: The KUDOS Health Checker (HC) for UNIX (platform plugins) +Group: Tools/Monitoring + +License: GNU General Public License either version 2 of the License, or (at your option) any later version +URL: http://www.kudos.be + +Requires: ksh,hc-linux +BuildArch: noarch +BuildRoot: %{_topdir}/%{name}-%{version}-root + +%description +The Health Checker is collection of scripts (plugins) designed to perform regular - but not intensive - health checks on UNIX/Linux systems. It provides plugins for AIX, HP-UX and Linux as well customer specific checks. Checks may include topics such file system mounts, process checks, file consistency etc. +This package contains platform/OS specific plugins. + +%prep + +%build + +%install +rm -rf $RPM_BUILD_ROOT +install -d -m 755 $RPM_BUILD_ROOT/opt/hc/lib +install -d -m 755 $RPM_BUILD_ROOT/opt/hc/lib/platform +install -d -m 755 $RPM_BUILD_ROOT/opt/hc/lib/platform/exadata +cp ../SOURCES/lib/platform/exadata/check_exadata_zfs_logs.sh $RPM_BUILD_ROOT/opt/hc/lib/platform/exadata/check_exadata_zfs_logs.sh +cp ../SOURCES/lib/platform/exadata/check_exadata_zfs_services.sh $RPM_BUILD_ROOT/opt/hc/lib/platform/exadata/check_exadata_zfs_services.sh +cp ../SOURCES/lib/platform/exadata/check_exadata_zfs_share_replication.sh $RPM_BUILD_ROOT/opt/hc/lib/platform/exadata/check_exadata_zfs_share_replication.sh +cp ../SOURCES/lib/platform/exadata/check_exadata_zfs_share_usage.sh $RPM_BUILD_ROOT/opt/hc/lib/platform/exadata/check_exadata_zfs_share_usage.sh + +install -d -m 755 $RPM_BUILD_ROOT/etc/opt/hc +cp ../SOURCES/etc/check_exadata_zfs_logs.conf.dist $RPM_BUILD_ROOT/etc/opt/hc/check_exadata_zfs_logs.conf.dist +cp ../SOURCES/etc/check_exadata_zfs_services.conf.dist $RPM_BUILD_ROOT/etc/opt/hc/check_exadata_zfs_services.conf.dist +cp ../SOURCES/etc/check_exadata_zfs_share_replication.conf.dist $RPM_BUILD_ROOT/etc/opt/hc/check_exadata_zfs_share_replication.conf.dist +cp ../SOURCES/etc/check_exadata_zfs_share_usage.conf.dist $RPM_BUILD_ROOT/etc/opt/hc/check_exadata_zfs_share_usage.conf.dist + + +%post +# ------------------------- CONFIGURATION starts here ------------------------- +# location of check_health.sh +HC_BIN="/opt/hc/bin/check_health.sh" +PATH="$PATH:/usr/bin:/etc:/usr/sbin:/usr/ucb:/usr/bin/X11:/sbin" +# ------------------------- CONFIGURATION ends here --------------------------- +echo "INFO: starting post-install script ..." +# refresh symbolic FPATH links +if [[ -x ${HC_BIN} ]] +then + ${HC_BIN} --fix-symlinks + (( $? == 0 )) || echo "WARN: updating symlinks failed" +fi +echo "INFO: finished post-install script" + +%postun +# ------------------------- CONFIGURATION starts here ------------------------- +# location of check_health.sh +HC_BIN="/opt/hc/bin/check_health.sh" +PATH="$PATH:/usr/bin:/etc:/usr/sbin:/usr/ucb:/usr/bin/X11:/sbin" +# ------------------------- CONFIGURATION ends here --------------------------- +echo "INFO: starting post-uninstall script ..." +# refresh symbolic FPATH links +if [[ -x ${HC_BIN} ]] +then + ${HC_BIN} --fix-symlinks + (( $? == 0 )) || echo "WARN: updating symlinks failed" +fi +echo "INFO: finished post-uninstall script" + +%files +%defattr(-,root,root,755) +%dir /opt/hc/lib +%dir /opt/hc/lib/platform +%dir /opt/hc/lib/platform/exadata +%attr(755, root, root) /opt/hc/lib/platform/exadata/check_exadata_zfs_logs.sh +%attr(755, root, root) /opt/hc/lib/platform/exadata/check_exadata_zfs_services.sh +%attr(755, root, root) /opt/hc/lib/platform/exadata/check_exadata_zfs_share_replication.sh +%attr(755, root, root) /opt/hc/lib/platform/exadata/check_exadata_zfs_share_usage.sh +%dir /etc/opt/hc +%attr(644, root, root) /etc/opt/hc/check_exadata_zfs_logs.dist +%attr(644, root, root) /etc/opt/hc/check_exadata_zfs_services.dist +%attr(644, root, root) /etc/opt/hc/check_exadata_zfs_share_replication.conf.dist +%attr(644, root, root) /etc/opt/hc/check_exadata_zfs_share_usage.conf.dist + + +%changelog +* Mon Feb 18 2019 - 0.0.1 +- Initial build diff --git a/build/exadata/hc_build_linux_rpms.sh b/build/exadata/hc_build_linux_rpms.sh new file mode 100644 index 0000000..7ff43a8 --- /dev/null +++ b/build/exadata/hc_build_linux_rpms.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env ksh +#****************************************************************************** +# @(#) build script for HC RPM packages (uses 'rpmbuild') +#****************************************************************************** +# @(#) Copyright (C) 2014 by KUDOS BVBA (info@kudos.be). All rights reserved. +# +# This program is a free software; you can redistribute it and/or modify +# it under the same terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details +#****************************************************************************** + +#****************************************************************************** +# Requires following build (dir) structures: +# +# hc_build_linux_rpms.sh +# BUILD/ +# BUILDROOT/ +# RPMS/ +# SOURCES/ +# SOURCES/bin/ +# SOURCES/lib/*/ +# SPECS/ +# SRPMS/ +# +# Build order: +# 1) Copy sources/scripts to the correct locations +# 2) Copy template, build and installer script files into correct locations +# 3) Execute hc_build_linux_rpms.sh +# 4) RPM packages may be found in the RPMS directory +#****************************************************************************** + +BUILD_DIR="$(dirname $0)" + +# clean up previous packages +rm -f ${BUILD_DIR}/RPMS/*/* >/dev/null + +# build main packages +rpmbuild -bb ${BUILD_DIR}/SPECS/hc-exadata-platform.spec + +print "List of built packages:" +ls -l ${BUILD_DIR}/RPMS/*/* + +exit 0 + +#****************************************************************************** +# END of script +#****************************************************************************** diff --git a/configs/etc/check_exadata_zfs_logs.conf.dist b/configs/etc/check_exadata_zfs_logs.conf.dist new file mode 100644 index 0000000..9960c12 --- /dev/null +++ b/configs/etc/check_exadata_zfs_logs.conf.dist @@ -0,0 +1,44 @@ +#****************************************************************************** +# @(#) check_exadata_zfs_share_usage.conf +#****************************************************************************** +# This is a configuration file for the check_exadata_zfs_share_usage HC plugin. +# All lines starting with a '#' are comment lines. +# [default: indicates hardcoded script values if no value is defined here] +#****************************************************************************** + +# specify whether to also log passed health checks +# (warning: this may rapidly grow the HC log) +# [default: no] +log_healthy="yes" + +# specify the user account for the SSH session to the ZFS appliance(s) +# [default: root] +ssh_user="" + +# specify the private key file for the SSH session to the ZFS appliance(s) +# [default: ~root/.ssh/id_rsa] +ssh_key_file="" + +# specify additional options for the SSH session to the ZFS appliance(s) +# [default: null] +ssh_opts="" + +# specify the ZFS hostname(s), log name(s) & alert levels. Filters +# should be comma-separated. Following logs are supported (filters in brackets) +# alert (critical,major,minor +# fltlog (critical,major,minor) +# system (error) +# scrk (failed) +# Format: +# zfs:::] +# Examples: +# check all major events in the fltlog on myzfs1 +# zfs:myzfs1:fltlog:major +# check for all major & critical events in the audit and fltlog on myzfs2 +# zfs:myzfs2:audit:major,critical +# zfs:myzfs2:fltlog:major,critical + + +#****************************************************************************** +# End of FILE +#****************************************************************************** diff --git a/configs/etc/check_exadata_zfs_services.conf.dist b/configs/etc/check_exadata_zfs_services.conf.dist new file mode 100644 index 0000000..a289465 --- /dev/null +++ b/configs/etc/check_exadata_zfs_services.conf.dist @@ -0,0 +1,38 @@ +#****************************************************************************** +# @(#) check_exadata_zfs_services.conf +#****************************************************************************** +# This is a configuration file for the check_exadata_zfs_services HC plugin. +# All lines starting with a '#' are comment lines. +# [default: indicates hardcoded script values if no value is defined here] +#****************************************************************************** + +# specify whether to also log passed health checks +# (warning: this may rapidly grow the HC log) +# [default: no] +log_healthy="yes" + +# specify the user account for the SSH session to the ZFS appliance(s) +# [default: root] +ssh_user="" + +# specify the private key file for the SSH session to the ZFS appliance(s) +# [default: ~root/.ssh/id_rsa] +ssh_key_file="" + +# specify additional options for the SSH session to the ZFS appliance(s) +# [default: null] +ssh_opts="" + +# specify the ZFS hostname(s), services name(s) and their desired state. +# In order to check share(s) for a given ZFS appliance at least one configuration +# entry must be present: either a wildcard or custom entry. +# Format: +# zfs::: +# Examples: +# check on myzfs1 that svc1 is online +# zfs:myzfs1:svc1:online + + +#****************************************************************************** +# End of FILE +#****************************************************************************** diff --git a/configs/etc/check_exadata_zfs_share_replication.conf.dist b/configs/etc/check_exadata_zfs_share_replication.conf.dist new file mode 100644 index 0000000..dfa9b51 --- /dev/null +++ b/configs/etc/check_exadata_zfs_share_replication.conf.dist @@ -0,0 +1,55 @@ +#****************************************************************************** +# @(#) check_exadata_zfs_share_replication.conf +#****************************************************************************** +# This is a configuration file for the check_exadata_zfs_share_replication HC plugin. +# All lines starting with a '#' are comment lines. +# [default: indicates hardcoded script values if no value is defined here] +#****************************************************************************** + +# specify whether to also log passed health checks +# (warning: this may rapidly grow the HC log) +# [default: no] +log_healthy="yes" + +# specify the user account for the SSH session to the ZFS appliance(s) +# [default: root] +ssh_user="" + +# specify the private key file for the SSH session to the ZFS appliance(s) +# [default: ~root/.ssh/id_rsa] +ssh_key_file="" + +# specify additional options for the SSH session to the ZFS appliance(s) +# [default: null] +ssh_opts="" + +# specify the maximum replication in seconds (general threshold) +# [default: 300] +max_replication_lag=300 + +# specify the ZFS hostname(s), replication name(s) and their maximum lag (in seconds) +# When not defining a threshold for a given share, the general threshold will +# be used (see above). When defining a threshold of 0 (zero), then the check +# will for this give share will be skipped (this allows for exclusion of shares) +# In order to check share(s) for a given ZFS appliance at least one configuration +# entry must be present: either a wildcard or custom entry. +# Format: +# zfs:::::[] +# Examples: +# check rep_share1 on myzfs1 with a custom threshold of 300 seconds +# zfs:myzfs1:rep_share1:*:*:600 +# check all shares of myzfs2 with a custom threshold of 1200 seconds +# zfs:myzfs2:*:*:*:1200 +# check all shares of myzfs3 with the general threshold +# zfs:myzfs3:*:*:*: +# disable all shares of myzfs4 from checking +# zfs:myzfs4:*:*:*:0 +# disable check of rep_share7 on myzfs5 +# zfs:myzfs5:rep_share7:*:*:0 +# check that rep_share4 on myzfs6 is inactive +# zfs:myzfs6:rep_share4:false:*: + + +#****************************************************************************** +# End of FILE +#****************************************************************************** diff --git a/configs/etc/check_exadata_zfs_share_usage.conf.dist b/configs/etc/check_exadata_zfs_share_usage.conf.dist new file mode 100644 index 0000000..799720b --- /dev/null +++ b/configs/etc/check_exadata_zfs_share_usage.conf.dist @@ -0,0 +1,53 @@ +#****************************************************************************** +# @(#) check_exadata_zfs_share_usage.conf +#****************************************************************************** +# This is a configuration file for the check_exadata_zfs_share_usage HC plugin. +# All lines starting with a '#' are comment lines. +# [default: indicates hardcoded script values if no value is defined here] +#****************************************************************************** + +# specify whether to also log passed health checks +# (warning: this may rapidly grow the HC log) +# [default: no] +log_healthy="yes" + +# specify the user account for the SSH session to the ZFS appliance(s) +# [default: root] +ssh_user="" + +# specify the private key file for the SSH session to the ZFS appliance(s) +# [default: ~root/.ssh/id_rsa] +ssh_key_file="" + +# specify additional options for the SSH session to the ZFS appliance(s) +# [default: null] +ssh_opts="" + +# specify the maximum % of space usage a share may reach (general threshold) +# [default: 90] +max_space_usage=90 + +# specify the ZFS hostname(s), project+share name(s) and their maximum usage (in %) +# When not defining a threshold for a given share, the general threshold will +# be used (see above). When defining a threshold of 0 (zero), then the check +# will for this give share will be skipped (this allows for exclusion of shares) +# In order to check share(s) for a given ZFS appliance at least one configuration +# entry must be present: either a wildcard or custom entry. +# Format: +# zfs::::[] +# Examples: +# check share1 on myzfs1 with a custom threshold of 75% +# zfs:myzfs1:projectX:share1:75 +# check all shares of myzfs2 with a custom threshold of 85% +# zfs:myzfs2:*:*:85 +# check all shares of myzfs3 with the general threshold +# zfs:myzfs3:*:*: +# disable all shares of myzfs4 from checking +# zfs:myzfs4:*:*:0 +# disable check of share7 on myzfs5 +# zfs:myzfs5:projectY:share7:0 + + +#****************************************************************************** +# End of FILE +#****************************************************************************** diff --git a/configs/etc/check_hpux_fs_usage.conf.dist b/configs/etc/check_hpux_fs_usage.conf.dist index af9c0c4..1c693fe 100644 --- a/configs/etc/check_hpux_fs_usage.conf.dist +++ b/configs/etc/check_hpux_fs_usage.conf.dist @@ -32,7 +32,7 @@ max_space_usage=90 # defining a threshold of 0 (zero), then the check will for this give filesystem # will be skipped (this allows for exclusion of filesystems) # Format: -# [fs::::[]:[:::[]:[ 0 && ARG_DEBUG_LEVEL > 0 )) && set "${DEBUG_OPTS}" @@ -149,7 +149,7 @@ return 0 # EXPECTS: n/a # OUTPUTS: 0=not active/installed; 1=active/installed # RETURNS: 0=success; 1=error -# REQUIRES: n/a +# REQUIRES: Docker function linux_has_docker { (( ARG_DEBUG > 0 && ARG_DEBUG_LEVEL > 0 )) && set "${DEBUG_OPTS}" @@ -184,7 +184,7 @@ return 0 # EXPECTS: n/a # OUTPUTS: 0=not active/installed; 1=active/installed # RETURNS: 0=success; 1=error -# REQUIRES: n/a +# REQUIRES: NetworkManager function linux_has_nm { (( ARG_DEBUG > 0 && ARG_DEBUG_LEVEL > 0 )) && set "${DEBUG_OPTS}" @@ -218,7 +218,7 @@ return 0 # EXPECTS: name of service [string] # OUTPUTS: 0=not installed; 1=installed # RETURNS: 0=success; 1=error -# REQUIRES: n/a +# REQUIRES: systemd function linux_has_systemd_service { (( ARG_DEBUG > 0 && ARG_DEBUG_LEVEL > 0 )) && set "${DEBUG_OPTS}" @@ -230,6 +230,31 @@ _RC=$? return ${_RC} } +# ----------------------------------------------------------------------------- +# @(#) FUNCTION: linux_exec_ssh() +# DOES: execute a shell command remotely via SSH +# EXPECTS: 1=options [string], 2=user [string], 3=host [string], 4=command [string] +# RETURNS: exit code of remote command +# OUTPUTS: STDOUT from SSH call +# REQUIRES: ssh command-line utility +function linux_exec_ssh +{ +(( ARG_DEBUG > 0 && ARG_DEBUG_LEVEL > 0 )) && set ${DEBUG_OPTS} +typeset _SSH_OPTS="${1}" +typeset _SSH_USER="${2}" +typeset _SSH_HOST="${3}" +typeset _SSH_COMMAND="${4}" + +if [[ -z "${_SSH_USER}" || -z "${_SSH_HOST}" || -z "${_SSH_COMMAND}" ]] +then + return 255 +fi +# shellcheck disable=SC2086 +ssh ${_SSH_OPTS} -l ${_SSH_USER} ${_SSH_HOST} ${_SSH_COMMAND} 2>>${HC_STDERR_LOG} 0 && ARG_DEBUG_LEVEL > 0 )) && set ${DEBUG_OPTS} +init_hc "$0" "${_SUPPORTED_PLATFORMS}" "${_VERSION}" +typeset _ARGS=$(data_comma2space "$*") +typeset _ARG="" +typeset _MSG="" +typeset _STC=0 +typeset _AWK_RC="" +typeset _FILTER="" +typeset _FILTERS="" +typeset _CFG_HEALTHY="" +typeset _LOG_HEALTHY=0 +typeset _CFG_SSH_KEY_FILE="" +typeset _CFG_SSH_OPTS="" +typeset _CFG_SSH_USER="" +typeset _CFG_ZFS_HOSTS="" +typeset _CFG_ZFS_HOST="" +typeset _LAST_LOG_ENTRY="" +typeset _MSG_DESC="" +typeset _MSG_ID="" +typeset _MSG_MODULE="" +typeset _MSG_PRIO="" +typeset _MSG_RESULT="" +typeset _MSG_TEXT="" +typeset _MSG_TYPE="" +typeset _NEW_LAST_LOG_ENTRY="" +typeset _SSH_BIN="" +typeset _SSH_OUTPUT="" +typeset _STATE_FILE="" +typeset _ZFS_SCRIPT="" +typeset _ZFS_LOG="" +typeset _ZFS_DATA="" + +# handle arguments (originally comma-separated) +for _ARG in ${_ARGS} +do + case "${_ARG}" in + help) + _show_usage $0 ${_VERSION} ${_CONFIG_FILE} && return 0 + ;; + esac +done + +# handle configuration file +[[ -n "${ARG_CONFIG_FILE}" ]] && _CONFIG_FILE="${ARG_CONFIG_FILE}" +if [[ ! -r ${_CONFIG_FILE} ]] +then + warn "unable to read configuration file at ${_CONFIG_FILE}" + return 1 +fi +# read configuration values +_CFG_HEALTHY=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'log_healthy') +case "${_CFG_HEALTHY}" in + yes|YES|Yes) + _LOG_HEALTHY=1 + ;; + *) + # do not override hc_arg + (( _LOG_HEALTHY > 0 )) || _LOG_HEALTHY=0 + ;; +esac +_CFG_SSH_USER=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'ssh_user') +if [[ -z "${_CFG_SSH_USER}" ]] +then + _CFG_SSH_USER="root" +fi +_CFG_SSH_KEY_FILE=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'ssh_key_file') +_CFG_SSH_OPTS=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'ssh_opts') +# add quiet mode +_CFG_SSH_OPTS="${_CFG_SSH_OPTS} -q" +if [[ -n "${_CFG_SSH_KEY_FILE}" ]] +then + if [[ -r "${_CFG_SSH_KEY_FILE}" ]] + then + log "will use SSH key ${_CFG_SSH_KEY_FILE}" + _CFG_SSH_OPTS="${_CFG_SSH_OPTS} -i ${_CFG_SSH_KEY_FILE}" + else + warn "will use SSH key ${_CFG_SSH_KEY_FILE}, but file does not exist" + return 1 + fi +fi + +# log_healthy +(( ARG_LOG_HEALTHY > 0 )) && _LOG_HEALTHY=1 +if (( _LOG_HEALTHY > 0 )) +then + if (( ARG_LOG > 0 )) + then + log "logging/showing passed health checks" + else + log "showing passed health checks (but not logging)" + fi +else + log "not logging/showing passed health checks" +fi + +# check ssh +_SSH_BIN="$(which ssh 2>>${HC_STDERR_LOG})" +if [[ ! -x ${_SSH_BIN} || -z "${_SSH_BIN}" ]] +then + warn "SSH is not installed here" + return 1 +fi + +# gather ZFS hostnames +_CFG_ZFS_HOSTS=$(grep -i -E -e '^zfs:' ${_CONFIG_FILE} 2>/dev/null | cut -f2 -d':' 2>/dev/null | sort -u 2>/dev/null) +if [[ -z "${_CFG_ZFS_HOSTS}" ]] +then + warn "no monitoring rules defined in ${_CONFIG_FILE}" + return 1 +fi + +# gather ZFS log data +print "${_CFG_ZFS_HOSTS}" | while read -r _CFG_ZFS_HOST +do + # for which log(s)? + grep -i -E -e "^zfs:${_CFG_ZFS_HOST}:" ${_CONFIG_FILE} 2>/dev/null |\ + while IFS=':' read -r _ _ _ZFS_LOG _FILTERS + do + # validate _ZFS_LOG & define script settings + case "${_ZFS_LOG}" in + alert|ALERT|Alert) + # define log query script -- DO NOT CHANGE -- + _ZFS_SCRIPT=" + script + run('maintenance logs select alert'); + entries = list(); + for (i = 0; i < entries.length; i++) { + try { run('select ' + entries[i]); + printf('%s|%s|%s|%s|%s\n', entries[i], get('timestamp'), + get('uuid'), get('description'), get('type')); + run('cd ..'); + } catch (err) { } + }" + # validate _FILTERS + for _FILTER in $(data_comma2space "${_FILTERS}") + do + case "${_FILTER}" in + minor|MINOR|major|MAJOR|CRITICAL|critical) + : + ;; + *) + warn "filter value is incorrect for ${_CFG_ZFS_HOST}/${_ZFS_LOG} in configuration file ${_CONFIG_FILE} " + return 1 + ;; + esac + done + ;; + FLTLOG|fltlog|Fltlog) + # define log query script -- DO NOT CHANGE -- + _ZFS_SCRIPT=" + script + run('maintenance logs select fltlog'); + entries = list(); + for (i = 0; i < entries.length; i++) { + try { run('select ' + entries[i]); + printf('%s|%s|%s|%s|%s\n', entries[i], get('timestamp'), + get('uuid'), get('desc'), get('type')); + run('cd ..'); + } catch (err) { } + }" + # validate _FILTERS + for _FILTER in $(data_comma2space "${_FILTERS}") + do + case "${_FILTER}" in + minor|MINOR|major|MAJOR|CRITICAL|critical) + : + ;; + *) + warn "filter value is incorrect for ${_CFG_ZFS_HOST}/${_ZFS_LOG} in configuration file ${_CONFIG_FILE} " + return 1 + ;; + esac + done + ;; + SCRK|scrk|Scrk) + # define log query script -- DO NOT CHANGE -- + _ZFS_SCRIPT=" + script + run('maintenance logs select scrk'); + entries = list(); + for (i = 0; i < entries.length; i++) { + try { run('select ' + entries[i]); + printf('%s|%s|%s|%s\n', entries[i], get('timestamp'), + get('description'), get('result')); + run('cd ..'); + } catch (err) { } + }" + # validate _FILTERS + for _FILTER in $(data_comma2space "${_FILTERS}") + do + case "${_FILTER}" in + failed|FAILED|OK|ok) + : + ;; + *) + warn "filter value is incorrect for ${_CFG_ZFS_HOST}/${_ZFS_LOG} in configuration file ${_CONFIG_FILE} " + return 1 + ;; + esac + done + ;; + SYSTEM|system|System) + # define log query script -- DO NOT CHANGE -- + _ZFS_SCRIPT=" + script + run('maintenance logs select system'); + entries = list(); + for (i = 0; i < entries.length; i++) { + try { run('select ' + entries[i]); + printf('%s|%s|%s|%s|%s\n', entries[i], get('timestamp'), + get('module'), get('priority'), get('text')); + run('cd ..'); + } catch (err) { } + }" + _FILTERS="error" + ;; + *) + warn "log name value is incorrect for ${_CFG_ZFS_HOST}/${_ZFS_LOG} in configuration file ${_CONFIG_FILE} " + return 1 + ;; + esac + + # check state file + _STATE_FILE="${STATE_PERM_DIR}/${_CFG_ZFS_HOST}.${_ZFS_LOG}.logs" + (( ARG_DEBUG > 0 )) && debug "checking/reading state file at ${_STATE_FILE}" + if [[ -r ${_STATE_FILE} ]] + then + _LAST_LOG_ENTRY=$(<"${_STATE_FILE}") + if [[ -z "${_LAST_LOG_ENTRY}" ]] + then + (( ARG_DEBUG > 0 )) && debug "no recorded last log entry for ${_CFG_ZFS_HOST}/${_ZFS_LOG}" + else + (( ARG_DEBUG > 0 )) && debug "recorded last log entry for ${_CFG_ZFS_HOST}/${_ZFS_LOG}: ${_LAST_LOG_ENTRY}" + fi + else + : >${_STATE_FILE} + (( $? > 0 )) && { + warn "failed to create new state file at ${_STATE_FILE}" + return 1 + } + log "created new state file at ${_STATE_FILE}" + fi + + (( ARG_DEBUG > 0 )) && debug "executing remote ZFS script on ${_CFG_ZFS_HOST} for log ${_ZFS_LOG}" + _SSH_OUTPUT=$(linux_exec_ssh "${_CFG_SSH_OPTS}" "${_CFG_SSH_USER}" "${_CFG_ZFS_HOST}" "${_ZFS_SCRIPT}" 2>>${HC_STDERR_LOG}) + if (( $? > 0 )) || [[ -z "${_SSH_OUTPUT}" ]] + then + warn "unable to discover ${_ZFS_LOG} log data on ${_CFG_ZFS_HOST}" + (( ARG_DEBUG > 0 && ARG_DEBUG_LEVEL > 0 )) && dump_logs + continue + else + # parse log lines based on the log source they originated from + _ZFS_DATA=$(print -R "${_SSH_OUTPUT}" |\ + awk -F'|' -v zfs_log="${_ZFS_LOG}" -v filters="${_FILTERS}" -v last_entry="${_LAST_LOG_ENTRY}" ' + BEGIN { + found_last_entry = 0; + + # build search needles/regexes + split (filters, filter_array, ","); + for (word in filter_array) { + filter_needle = filter_needle filter_array[word]"|"; + } + # chop last "|" + gsub (/\|$/, "", filter_needle); + + zfs_log = tolower(zfs_log); + } + + { + # match log data against needle & $_LAST_LOG_ENTRY pointer + if (last_entry == "") { + # match against needle + if (zfs_log == "alert" || zfs_log == "fltlog") { + if (tolower ($5) ~ filter_needle) { print $0 }; + } + if (zfs_log == "scrk" || zfs_log == "system") { + if (tolower ($4) ~ filter_needle) { print $0 }; + } + } else { + # match against the $_LAST_LOG_ENTRY pointer + if ($1 ~ last_entry) { + found_last_entry = 1; + next; + } + if (found_last_entry > 0) { + # match against needle + if (zfs_log == "alert" || zfs_log == "fltlog") { + if (tolower ($5) ~ filter_needle) { print $0 }; + } + if (zfs_log == "scrk" || zfs_log == "system") { + if (tolower ($4) ~ filter_needle) { print $0 }; + } + } + } + } + + END { + # when we had a log pointer at the start but did not + # encounter it, then we have a problematic situation (could + # be that the log pointer got rotated past the 100 lines query) + # flag this by RC=255 and reset the log pointer to last + # discovered entry (which is only a stopgap solution but the + # best we can come up with) + if (last_entry != "" && found_last_entry == 0) { + exit 255; + } + }' 2>>${HC_STDERR_LOG}) + _AWK_RC=$? + # check and reports results + if (( _AWK_RC == 255 )) + then + warn "lost the current log pointer for ${_CFG_ZFS_HOST}/${_ZFS_LOG}" + # rewrite log pointer from the last log entry we discovered + _NEW_LAST_LOG_ENTRY=$(print "${_SSH_OUTPUT}" | tail -1 2>/dev/null | awk -F'|' '{ print $1 }' 2>/dev/null) + if [[ -n "${_NEW_LAST_LOG_ENTRY}" ]] + then + if (( ARG_LOG > 0 )) + then + warn "resetting the current log pointer for ${_CFG_ZFS_HOST}/${_ZFS_LOG} to ${_NEW_LAST_LOG_ENTRY}" + print "${_NEW_LAST_LOG_ENTRY}" >${_STATE_FILE} 2>>${HC_STDERR_LOG} + fi + else + # zero the state file + if (( ARG_LOG > 0 )) + then + warn "null resetting the current log pointer for ${_CFG_ZFS_HOST}/${_ZFS_LOG}" + : >${_STATE_FILE} 2>>${HC_STDERR_LOG} + fi + fi + continue + elif (( _AWK_RC > 0 )) + then + warn "unable to parse log data from ${_CFG_ZFS_HOST}/${_ZFS_LOG}" + (( ARG_DEBUG > 0 && ARG_DEBUG_LEVEL > 0 )) && dump_logs + return 1 + else + if [[ -n "${_ZFS_DATA}" ]] + then + # save data to STDOUT + print "${_ZFS_DATA}" >>${HC_STDOUT_LOG} + # filter data based on logs + case "${_ZFS_LOG}" in + alert|ALERT|Alert) + print -R "${_ZFS_DATA}" | while IFS='|' read -r _MSG_ID _ _ _MSG_DESC _MSG_TYPE + do + _MSG="${_MSG_ID} (${_MSG_TYPE}) ${_MSG_DESC}" + log_hc "$0" 1 "${_CFG_ZFS_HOST}/${_ZFS_LOG}: ${_MSG}" + done + ;; + FLTLOG|fltlog|Fltlog) + print -R "${_ZFS_DATA}" | while IFS='|' read -r _MSG_ID _ _ _MSG_DESC _MSG_TYPE + do + _MSG="${_MSG_ID} (${_MSG_TYPE}) ${_MSG_DESC}" + log_hc "$0" 1 "${_CFG_ZFS_HOST}/${_ZFS_LOG}: ${_MSG}" + done + ;; + SCRK|scrk|Scrk) + print -R "${_ZFS_DATA}" | while IFS='|' read -r _MSG_ID _ _MSG_DESC _MSG_RESULT + do + if [[ "${_MSG_RESULT}" = "OK" ]] + then + _STC=0 + else + _STC=1 + fi + _MSG="${_MSG_ID} (${_MSG_RESULT}) ${_MSG_DESC}" + if (( _LOG_HEALTHY > 0 || _STC > 0 )) + then + log_hc "$0" ${_STC} "${_CFG_ZFS_HOST}/${_ZFS_LOG}: ${_MSG}" + fi + done + ;; + SYSTEM|system|System) + print -R "${_ZFS_DATA}" | while IFS='|' read -r _MSG_ID _ _MSG_MODULE _MSG_PRIO _MSG_TEXT + do + _MSG="${_MSG_ID} (${_MSG_PRIO}) ${_MSG_MODULE}: ${_MSG_TEXT}" + log_hc "$0" 1 "${_CFG_ZFS_HOST}/${_ZFS_LOG}: ${_MSG}" + done + ;; + esac + else + if (( _LOG_HEALTHY > 0 || _STC > 0 )) + then + _MSG="no (new) messages discovered from ${_CFG_ZFS_HOST}:/${_ZFS_LOG}" + log_hc "$0" 0 "${_MSG}" + fi + fi + # rewrite log pointer from the last log entry we discovered + _NEW_LAST_LOG_ENTRY=$(print "${_SSH_OUTPUT}" | tail -1 2>/dev/null | awk -F'|' '{ print $1 }' 2>/dev/null ) + if (( ARG_LOG > 0 )) && [[ -n "${_NEW_LAST_LOG_ENTRY}" ]] + then + (( ARG_DEBUG > 0 )) && debug "updating last log entry for ${_CFG_ZFS_HOST}/${_ZFS_LOG} to ${_NEW_LAST_LOG_ENTRY}" + print "${_NEW_LAST_LOG_ENTRY}" >${_STATE_FILE} 2>>${HC_STDERR_LOG} + fi + fi + fi + done +done + +return 0 +} + +# ----------------------------------------------------------------------------- +function _show_usage +{ +cat <<- EOT +NAME : $1 +VERSION : $2 +CONFIG : $3 with parameters: + log_healthy= + ssh_user= + ssh_key_file= + and formatted stanzas of: + zfs::: +PURPOSE : checks the ZFS logs for (new) entries with particular alert level(s) + Following logs are supported (filters in brackets): + * alert (critical,major,minor) + * fltlog (critical,major,minor) + * system (error) + * scrk (failed) + CLI: zfs > maintenance > logs > select (log) > show +CAVEAT: plugin will use state files to track 'seen' messages. However each + check will only retrieve the default 100 last log entries. So it + is possible that log entries are lost between health checks (this + can be avoided by scheduling the check quicker than the likely + rotation time for 100 log entries). +LOG HEALTHY : Supported + +EOT + +return 0 +} + +#****************************************************************************** +# END of script +#****************************************************************************** diff --git a/sources/lib/platform/exadata/check_exadata_zfs_services.sh b/sources/lib/platform/exadata/check_exadata_zfs_services.sh new file mode 100644 index 0000000..a4913e1 --- /dev/null +++ b/sources/lib/platform/exadata/check_exadata_zfs_services.sh @@ -0,0 +1,257 @@ +#!/usr/bin/env ksh +#****************************************************************************** +# @(#) check_exadata_zfs_services.sh +#****************************************************************************** +# @(#) Copyright (C) 2019 by KUDOS BVBA (info@kudos.be). All rights reserved. +# +# This program is a free software; you can redistribute it and/or modify +# it under the same terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details +#****************************************************************************** +# +# DOCUMENTATION (MAIN) +# ----------------------------------------------------------------------------- +# @(#) MAIN: check_exadata_zfs_services +# DOES: see _show_usage() +# EXPECTS: see _show_usage() +# REQUIRES: data_comma2space(), dump_logs(), init_hc(), linux_exec_ssh(), +# log_hc(), warn() +# +# @(#) HISTORY: +# @(#) 2019-02-18: initial version [Patrick Van der Veken] +# ----------------------------------------------------------------------------- +# DO NOT CHANGE THIS FILE UNLESS YOU KNOW WHAT YOU ARE DOING! +#****************************************************************************** + +# ----------------------------------------------------------------------------- +function check_exadata_zfs_services +{ +# ------------------------- CONFIGURATION starts here ------------------------- +typeset _CONFIG_FILE="${CONFIG_DIR}/$0.conf" +typeset _VERSION="2019-02-18" # YYYY-MM-DD +typeset _SUPPORTED_PLATFORMS="Linux" # uname -s match +# usage query script -- DO NOT CHANGE -- +# svc1:online +# svc2:disabled +typeset _ZFS_SCRIPT=" + script + run('configuration services'); + + var svcs = children(); + for (var i = 0; i < svcs.length; ++i) { + run(svcs[i]); + try { + printf('%0s:%s\n', svcs[i], get('')); + } catch (err) { }; + run('done'); + }" +# ------------------------- CONFIGURATION ends here --------------------------- + +# set defaults +(( ARG_DEBUG > 0 && ARG_DEBUG_LEVEL > 0 )) && set ${DEBUG_OPTS} +init_hc "$0" "${_SUPPORTED_PLATFORMS}" "${_VERSION}" +typeset _ARGS=$(data_comma2space "$*") +typeset _ARG="" +typeset _MSG="" +typeset _STC=0 +typeset _CFG_HEALTHY="" +typeset _LOG_HEALTHY=0 +typeset _CFG_SSH_KEY_FILE="" +typeset _CFG_SSH_OPTS="" +typeset _CFG_SSH_USER="" +typeset _CFG_ZFS_HOSTS="" +typeset _CFG_ZFS_HOST="" +typeset _CFG_ZFS_LINE="" +typeset _SERVICE_NAME="" +typeset _SERVICE_STATE="" +typeset _SSH_BIN="" +typeset _SSH_OUTPUT="" +typeset _ZFS_DATA="" + +# handle arguments (originally comma-separated) +for _ARG in ${_ARGS} +do + case "${_ARG}" in + help) + _show_usage $0 ${_VERSION} ${_CONFIG_FILE} && return 0 + ;; + esac +done + +# handle configuration file +[[ -n "${ARG_CONFIG_FILE}" ]] && _CONFIG_FILE="${ARG_CONFIG_FILE}" +if [[ ! -r ${_CONFIG_FILE} ]] +then + warn "unable to read configuration file at ${_CONFIG_FILE}" + return 1 +fi +# read configuration values +_CFG_HEALTHY=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'log_healthy') +case "${_CFG_HEALTHY}" in + yes|YES|Yes) + _LOG_HEALTHY=1 + ;; + *) + # do not override hc_arg + (( _LOG_HEALTHY > 0 )) || _LOG_HEALTHY=0 + ;; +esac +_CFG_SSH_USER=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'ssh_user') +if [[ -z "${_CFG_SSH_USER}" ]] +then + _CFG_SSH_USER="root" +fi +_CFG_SSH_KEY_FILE=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'ssh_key_file') +_CFG_SSH_OPTS=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'ssh_opts') +# add quiet mode +_CFG_SSH_OPTS="${_CFG_SSH_OPTS} -q" +if [[ -n "${_CFG_SSH_KEY_FILE}" ]] +then + if [[ -r "${_CFG_SSH_KEY_FILE}" ]] + then + log "will use SSH key ${_CFG_SSH_KEY_FILE}" + _CFG_SSH_OPTS="${_CFG_SSH_OPTS} -i ${_CFG_SSH_KEY_FILE}" + else + warn "will use SSH key ${_CFG_SSH_KEY_FILE}, but file does not exist" + return 1 + fi +fi + +# log_healthy +(( ARG_LOG_HEALTHY > 0 )) && _LOG_HEALTHY=1 +if (( _LOG_HEALTHY > 0 )) +then + if (( ARG_LOG > 0 )) + then + log "logging/showing passed health checks" + else + log "showing passed health checks (but not logging)" + fi +else + log "not logging/showing passed health checks" +fi + +# check ssh +_SSH_BIN="$(which ssh 2>>${HC_STDERR_LOG})" +if [[ ! -x ${_SSH_BIN} || -z "${_SSH_BIN}" ]] +then + warn "SSH is not installed here" + return 1 +fi + +# gather ZFS hostnames (for this we need at least one data line, possibly with wildcards) +_CFG_ZFS_HOSTS=$(grep -i -E -e '^zfs:' ${_CONFIG_FILE} 2>/dev/null | cut -f2 -d':' 2>/dev/null | sort -u 2>/dev/null) +if [[ -z "${_CFG_ZFS_HOSTS}" ]] +then + warn "no monitoring rules defined in ${_CONFIG_FILE}" + return 1 +fi + +# gather ZFS usage data +print "${_CFG_ZFS_HOSTS}" | while read -r _CFG_ZFS_HOST +do + (( ARG_DEBUG > 0 )) && debug "executing remote ZFS script on ${_CFG_ZFS_HOST}" + _SSH_OUTPUT=$(linux_exec_ssh "${_CFG_SSH_OPTS}" "${_CFG_SSH_USER}" "${_CFG_ZFS_HOST}" "${_ZFS_SCRIPT}" 2>>${HC_STDERR_LOG}) + if (( $? > 0 )) || [[ -z "${_SSH_OUTPUT}" ]] + then + warn "unable to discover services data on ${_CFG_ZFS_HOST}" + (( ARG_DEBUG > 0 && ARG_DEBUG_LEVEL > 0 )) && dump_logs + continue + else + # mangle SSH output by prefixing with hostname + print "${_SSH_OUTPUT}" | while read -r _SSH_LINE + do + if [[ -z "${_ZFS_DATA}" ]] + then + _ZFS_DATA="${_CFG_ZFS_HOST}:${_SSH_LINE}" + else + # shellcheck disable=SC1117 + _ZFS_DATA="${_ZFS_DATA}\n${_CFG_ZFS_HOST}:${_SSH_LINE}" + fi + done + fi +done + +# process usage status data +if [[ -z "${_ZFS_DATA}" ]] +then + _MSG="did not discover any ZFS services data" + _STC=2 + if (( _LOG_HEALTHY > 0 || _STC > 0 )) + then + log_hc "$0" ${_STC} "${_MSG}" + fi + return 1 +fi +grep -E -e '^zfs:' ${_CONFIG_FILE} 2>/dev/null |\ + while IFS=':' read -r _ _CFG_ZFS_HOST _CFG_SERVICE_NAME _CFG_SERVICE_STATE +do + if [[ -z "${_CFG_SERVICE_NAME}" ]] + then + warn "value of for ${_CFG_ZFS_HOST} is not defined in configuration file ${_CONFIG_FILE}" + continue + fi + case "${_CFG_SERVICE_STATE}" in + online|ONLINE|Online|disabled|DISABLED|Disabled) + : + ;; + *) + warn "value of for ${_CFG_ZFS_HOST}/${_CFG_SERVICE_NAME} is not correct in configuration file ${_CONFIG_FILE}" + continue + esac + (( ARG_DEBUG > 0 )) && debug "parsing services data for service: ${_CFG_ZFS_HOST}/${_CFG_SERVICE_NAME}" + + # perform check + _SERVICE_STATE=$(print "${_ZFS_DATA}" | grep -E -e "^${_CFG_ZFS_HOST}:${_CFG_SERVICE_NAME}:" 2>/dev/null | cut -f3 -d':' 2>/dev/null) + if [[ -n "${_SERVICE_STATE}" ]] + then + if [[ $(data_lc "${_SERVICE_STATE}") != $(data_lc "${_CFG_SERVICE_STATE}") ]] + then + _MSG="state of ${_CFG_ZFS_HOST}/${_CFG_SERVICE_NAME} is incorrect (${_SERVICE_STATE}!=${_CFG_SERVICE_STATE})" + _STC=1 + else + _MSG="state of ${_CFG_ZFS_HOST}/${_CFG_SERVICE_NAME} is correct (${_SERVICE_STATE}=${_CFG_SERVICE_STATE})" + _STC=0 + fi + if (( _LOG_HEALTHY > 0 || _STC > 0 )) + then + log_hc "$0" ${_STC} "${_MSG}" + fi + else + warn "did not find services data for ${_CFG_ZFS_HOST}/${_CFG_SERVICE_NAME}" + continue + fi +done + +return 0 +} + +# ----------------------------------------------------------------------------- +function _show_usage +{ +cat <<- EOT +NAME : $1 +VERSION : $2 +CONFIG : $3 with parameters: + log_healthy= + ssh_user= + ssh_key_file= + and formatted stanzas of: + zfs::: +PURPOSE : Checks the state of services for the configured ZFS hosts/shares + CLI: zfs > status > services > show +LOG HEALTHY : Supported + +EOT + +return 0 +} + +#****************************************************************************** +# END of script +#****************************************************************************** diff --git a/sources/lib/platform/exadata/check_exadata_zfs_share_replication.sh b/sources/lib/platform/exadata/check_exadata_zfs_share_replication.sh new file mode 100644 index 0000000..10cd770 --- /dev/null +++ b/sources/lib/platform/exadata/check_exadata_zfs_share_replication.sh @@ -0,0 +1,328 @@ +#!/usr/bin/env ksh +#****************************************************************************** +# @(#) check_exadata_zfs_share_replication.sh +#****************************************************************************** +# @(#) Copyright (C) 2019 by KUDOS BVBA (info@kudos.be). All rights reserved. +# +# This program is a free software; you can redistribute it and/or modify +# it under the same terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details +#****************************************************************************** +# +# DOCUMENTATION (MAIN) +# ----------------------------------------------------------------------------- +# @(#) MAIN: check_exadata_zfs_share_replication +# DOES: see _show_usage() +# EXPECTS: see _show_usage() +# REQUIRES: data_comma2space(), dump_logs(), init_hc(), linux_exec_ssh(), +# log_hc(), warn() +# +# @(#) HISTORY: +# @(#) 2019-02-18: initial version [Patrick Van der Veken] +# ----------------------------------------------------------------------------- +# DO NOT CHANGE THIS FILE UNLESS YOU KNOW WHAT YOU ARE DOING! +#****************************************************************************** + +# ----------------------------------------------------------------------------- +function check_exadata_zfs_share_replication +{ +# ------------------------- CONFIGURATION starts here ------------------------- +typeset _CONFIG_FILE="${CONFIG_DIR}/$0.conf" +typeset _VERSION="2019-02-18" # YYYY-MM-DD +typeset _SUPPORTED_PLATFORMS="Linux" # uname -s match +# replication query script -- DO NOT CHANGE -- +# prj1/share1:true:idle:success:111 +# prj2/share2:true:idle:success:51 +typeset _ZFS_SCRIPT=" + script + run('shares replication actions'); + actions = list(); + for (i = 0; i < actions.length; i++) { + try { run('select ' + actions[i]); + printf('%s:%s:%s:%s\n', get('replication_of'), + get('enabled'), + get('last_result'), + get('replica_lag')); + run('cd ..'); + } catch (err) { } + }" +# ------------------------- CONFIGURATION ends here --------------------------- + +# set defaults +(( ARG_DEBUG > 0 && ARG_DEBUG_LEVEL > 0 )) && set ${DEBUG_OPTS} +init_hc "$0" "${_SUPPORTED_PLATFORMS}" "${_VERSION}" +typeset _ARGS=$(data_comma2space "$*") +typeset _ARG="" +typeset _MSG="" +typeset _STC=0 +typeset _CFG_HEALTHY="" +typeset _LOG_HEALTHY=0 +typeset _CFG_MAX_REPLICA_LAG="" +typeset _CFG_REPLICATION_LAG="" +typeset _CFG_SSH_KEY_FILE="" +typeset _CFG_SSH_OPTS="" +typeset _CFG_SSH_USER="" +typeset _CFG_ZFS_HOSTS="" +typeset _CFG_ZFS_HOST="" +typeset _CFG_ZFS_LINE="" +typeset _REPLICATION_ENABLED="" +typeset _REPLICATION_LAG="" +typeset _REPLICATION_RESULT="" +typeset _SSH_BIN="" +typeset _SSH_OUTPUT="" +typeset _ZFS_DATA="" + +# handle arguments (originally comma-separated) +for _ARG in ${_ARGS} +do + case "${_ARG}" in + help) + _show_usage $0 ${_VERSION} ${_CONFIG_FILE} && return 0 + ;; + esac +done + +# handle configuration file +[[ -n "${ARG_CONFIG_FILE}" ]] && _CONFIG_FILE="${ARG_CONFIG_FILE}" +if [[ ! -r ${_CONFIG_FILE} ]] +then + warn "unable to read configuration file at ${_CONFIG_FILE}" + return 1 +fi +# read configuration values +_CFG_HEALTHY=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'log_healthy') +case "${_CFG_HEALTHY}" in + yes|YES|Yes) + _LOG_HEALTHY=1 + ;; + *) + # do not override hc_arg + (( _LOG_HEALTHY > 0 )) || _LOG_HEALTHY=0 + ;; +esac +_CFG_SSH_USER=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'ssh_user') +if [[ -z "${_CFG_SSH_USER}" ]] +then + _CFG_SSH_USER="root" +fi +_CFG_SSH_KEY_FILE=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'ssh_key_file') +_CFG_SSH_OPTS=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'ssh_opts') +# add quiet mode +_CFG_SSH_OPTS="${_CFG_SSH_OPTS} -q" +if [[ -n "${_CFG_SSH_KEY_FILE}" ]] +then + if [[ -r "${_CFG_SSH_KEY_FILE}" ]] + then + log "will use SSH key ${_CFG_SSH_KEY_FILE}" + _CFG_SSH_OPTS="${_CFG_SSH_OPTS} -i ${_CFG_SSH_KEY_FILE}" + else + warn "will use SSH key ${_CFG_SSH_KEY_FILE}, but file does not exist" + return 1 + fi +fi +_CFG_MAX_REPLICA_LAG=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'max_replica_lag') +if [[ -z "${_CFG_MAX_REPLICA_LAG}" ]] +then + # default + _CFG_MAX_REPLICA_LAG=90 +fi + +# log_healthy +(( ARG_LOG_HEALTHY > 0 )) && _LOG_HEALTHY=1 +if (( _LOG_HEALTHY > 0 )) +then + if (( ARG_LOG > 0 )) + then + log "logging/showing passed health checks" + else + log "showing passed health checks (but not logging)" + fi +else + log "not logging/showing passed health checks" +fi + +# check ssh +_SSH_BIN="$(which ssh 2>>${HC_STDERR_LOG})" +if [[ ! -x ${_SSH_BIN} || -z "${_SSH_BIN}" ]] +then + warn "SSH is not installed here" + return 1 +fi + +# gather ZFS hostnames +_CFG_ZFS_HOSTS=$(grep -i -E -e '^zfs:' ${_CONFIG_FILE} 2>/dev/null | cut -f2 -d':' 2>/dev/null | sort -u 2>/dev/null) +if [[ -z "${_CFG_ZFS_HOSTS}" ]] +then + warn "no monitoring rules defined in ${_CONFIG_FILE}" + return 1 +fi + +# gather ZFS replication data +print "${_CFG_ZFS_HOSTS}" | while read -r _CFG_ZFS_HOST +do + (( ARG_DEBUG > 0 )) && debug "executing remote ZFS script on ${_CFG_ZFS_HOST}" + _SSH_OUTPUT=$(linux_exec_ssh "${_CFG_SSH_OPTS}" "${_CFG_SSH_USER}" "${_CFG_ZFS_HOST}" "${_ZFS_SCRIPT}" 2>>${HC_STDERR_LOG}) + if (( $? > 0 )) || [[ -z "${_SSH_OUTPUT}" ]] + then + warn "unable to discover replication data on ${_CFG_ZFS_HOST}" + (( ARG_DEBUG > 0 && ARG_DEBUG_LEVEL > 0 )) && dump_logs + continue + else + # mangle SSH output by prefixing with hostname + print "${_SSH_OUTPUT}" | while read -r _SSH_LINE + do + if [[ -z "${_ZFS_DATA}" ]] + then + _ZFS_DATA="${_CFG_ZFS_HOST}:${_SSH_LINE}" + else + # shellcheck disable=SC1117 + _ZFS_DATA="${_ZFS_DATA}\n${_CFG_ZFS_HOST}:${_SSH_LINE}" + fi + done + fi +done + +# process replication status data +if [[ -z "${_ZFS_DATA}" ]] +then + _MSG="did not discover any ZFS replication data" + _STC=2 + if (( _LOG_HEALTHY > 0 || _STC > 0 )) + then + log_hc "$0" ${_STC} "${_MSG}" + fi + return 1 +fi +print "${_ZFS_DATA}" | while IFS=':' read -r _ZFS_HOST _REPLICATION_NAME _REPLICATION_ENABLED _REPLICATION_RESULT _REPLICATION_LAG +do + (( ARG_DEBUG > 0 )) && debug "parsing replication data for share: ${_ZFS_HOST}:${_REPLICATION_NAME}" + _CFG_REPLICATION_ENABLED="" + _CFG_REPLICATION_RESULT="" + _CFG_REPLICATION_LAG="" + + # which values to use (general or custom?), keep in mind wildcards (custom will overrule wildcard entry) + _CFG_ZFS_LINE=$(grep -E -e "^zfs:${_ZFS_HOST}:[*]:" ${_CONFIG_FILE} 2>/dev/null) + if [[ -n "${_CFG_ZFS_LINE}" ]] + then + (( ARG_DEBUG > 0 )) && debug "found wilcard definition for ${_ZFS_HOST} in configuration file ${_CONFIG_FILE}" + _CFG_REPLICATION_ENABLED=$(print "${_CFG_ZFS_LINE}" | cut -f4 -d':' 2>/dev/null) + _CFG_REPLICATION_RESULT=$(print "${_CFG_ZFS_LINE}" | cut -f5 -d':' 2>/dev/null) + _CFG_REPLICATION_LAG=$(print "${_CFG_ZFS_LINE}" | cut -f6 -d':' 2>/dev/null) + # null value means general threshold + if [[ -z "${_CFG_REPLICATION_LAG}" ]] + then + (( ARG_DEBUG > 0 )) && debug "found empty lag threshold for ${_ZFS_HOST}, using general threshold" + _CFG_REPLICATION_LAG=${_CFG_MAX_REPLICATION_LAG} + fi + fi + _CFG_ZFS_LINE=$(grep -E -e "^zfs:${_ZFS_HOST}:${_REPLICATION_NAME}:" ${_CONFIG_FILE} 2>/dev/null) + if [[ -n "${_CFG_ZFS_LINE}" ]] + then + (( ARG_DEBUG > 0 )) && debug "found custom definition for ${_ZFS_HOST}:${_REPLICATION_NAME} in configuration file ${_CONFIG_FILE}" + _CFG_REPLICATION_ENABLED=$(print "${_CFG_ZFS_LINE}" | cut -f4 -d':' 2>/dev/null) + _CFG_REPLICATION_RESULT=$(print "${_CFG_ZFS_LINE}" | cut -f5 -d':' 2>/dev/null) + _CFG_REPLICATION_LAG=$(print "${_CFG_ZFS_LINE}" | cut -f6 -d':' 2>/dev/null) + # null value means general threshold + if [[ -z "${_CFG_REPLICATION_LAG}" ]] + then + (( ARG_DEBUG > 0 )) && debug "found empty lag threshold for ${_ZFS_HOST}, using general threshold" + _CFG_REPLICATION_LAG=${_CFG_MAX_REPLICATION_LAG} + fi + fi + if [[ -n "${_CFG_REPLICATION_LAG}" ]] + then + data_is_numeric "${_CFG_REPLICATION_LAG}" + if (( $? > 0 )) + then + warn "value for is not numeric in configuration file ${_CONFIG_FILE}" + continue + fi + # zero value means disabled check + if (( _CFG_REPLICATION_LAG == 0 )) + then + (( ARG_DEBUG > 0 )) && debug "found zero lag threshold, disabling check" + continue + fi + else + (( ARG_DEBUG > 0 )) && debug "no custom space threshold for ${_ZFS_HOST}:${_REPLICATION_NAME}, using general threshold" + _CFG_REPLICATION_LAG=${_CFG_MAX_REPLICATION_LAG} + fi + # fixed defaults if missing + [[ -z "${_CFG_REPLICATION_ENABLED}" || "${_CFG_REPLICATION_ENABLED}" = '*' ]] && _CFG_REPLICATION_ENABLED="true" + [[ -z "${_CFG_REPLICATION_RESULT}" || "${_CFG_REPLICATION_RESULT}" = '*' ]] && _CFG_REPLICATION_RESULT="success" + + # perform checks + # check replication enabled state (active or not?) + if [[ $(data_lc "${_REPLICATION_ENABLED}") != $(data_lc "${_CFG_REPLICATION_ENABLED}") ]] + then + _MSG="state for ${_ZFS_HOST}:${_REPLICATION_NAME} is incorrect [${_REPLICATION_ENABLED}!=${_CFG_REPLICATION_ENABLED}]" + _STC=1 + else + _MSG="state for ${_ZFS_HOST}:${_REPLICATION_NAME} is correct [${_REPLICATION_ENABLED}=${_CFG_REPLICATION_ENABLED}]" + _STC=0 + fi + if (( _LOG_HEALTHY > 0 || _STC > 0 )) + then + log_hc "$0" ${_STC} "${_MSG}" "${_REPLICATION_ENABLED}" "${_CFG_REPLICATION_ENABLED}" + fi + # check replication last result (success or not?) + if [[ $(data_lc "${_REPLICATION_RESULT}") != $(data_lc "${_CFG_REPLICATION_RESULT}") ]] + then + _MSG="result for ${_ZFS_HOST}:${_REPLICATION_NAME} is incorrect [${_REPLICATION_RESULT}!=${_CFG_REPLICATION_RESULT}]" + _STC=1 + else + _MSG="result for ${_ZFS_HOST}:${_REPLICATION_NAME} is correct [${_REPLICATION_RESULT}=${_CFG_REPLICATION_RESULT}]" + _STC=0 + fi + if (( _LOG_HEALTHY > 0 || _STC > 0 )) + then + log_hc "$0" ${_STC} "${_MSG}" "${_REPLICATION_RESULT}" "${_CFG_REPLICATION_RESULT}" + fi + # check replication lag + if (( _REPLICATION_LAG > _CFG_REPLICATION_LAG )) + then + _MSG="lag for ${_ZFS_HOST}:${_REPLICATION_NAME} is too big [${_REPLICATION_LAG}>${_CFG_REPLICATION_LAG}]" + _STC=1 + else + _MSG="lag for ${_ZFS_HOST}:${_REPLICATION_NAME} is OK [${_REPLICATION_LAG}<=${_CFG_REPLICATION_LAG}]" + _STC=0 + fi + if (( _LOG_HEALTHY > 0 || _STC > 0 )) + then + log_hc "$0" ${_STC} "${_MSG}" "${_REPLICATION_LAG}" "${_CFG_REPLICATION_LAG}" + fi +done + +return 0 +} + +# ----------------------------------------------------------------------------- +function _show_usage +{ +cat <<- EOT +NAME : $1 +VERSION : $2 +CONFIG : $3 with parameters: + log_healthy= + ssh_user= + ssh_key_file= + max_replication_lag= + and formatted stanzas of: + zfs::::: +PURPOSE : Checks the replication state, sync status and maximum lag of the configured ZFS hosts/shares + CLI: zfs > shares > replications > packages > select (action) > show +LOG HEALTHY : Supported + +EOT + +return 0 +} + +#****************************************************************************** +# END of script +#****************************************************************************** diff --git a/sources/lib/platform/exadata/check_exadata_zfs_share_usage.sh b/sources/lib/platform/exadata/check_exadata_zfs_share_usage.sh new file mode 100644 index 0000000..176c800 --- /dev/null +++ b/sources/lib/platform/exadata/check_exadata_zfs_share_usage.sh @@ -0,0 +1,300 @@ +#!/usr/bin/env ksh +#****************************************************************************** +# @(#) check_exadata_zfs_share_usage.sh +#****************************************************************************** +# @(#) Copyright (C) 2019 by KUDOS BVBA (info@kudos.be). All rights reserved. +# +# This program is a free software; you can redistribute it and/or modify +# it under the same terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details +#****************************************************************************** +# +# DOCUMENTATION (MAIN) +# ----------------------------------------------------------------------------- +# @(#) MAIN: check_exadata_zfs_share_usage +# DOES: see _show_usage() +# EXPECTS: see _show_usage() +# REQUIRES: data_comma2space(), dump_logs(), init_hc(), linux_exec_ssh(), +# log_hc(), warn() +# +# @(#) HISTORY: +# @(#) 2019-02-18: initial version [Patrick Van der Veken] +# ----------------------------------------------------------------------------- +# DO NOT CHANGE THIS FILE UNLESS YOU KNOW WHAT YOU ARE DOING! +#****************************************************************************** + +# ----------------------------------------------------------------------------- +function check_exadata_zfs_share_usage +{ +# ------------------------- CONFIGURATION starts here ------------------------- +typeset _CONFIG_FILE="${CONFIG_DIR}/$0.conf" +typeset _VERSION="2019-02-18" # YYYY-MM-DD +typeset _SUPPORTED_PLATFORMS="Linux" # uname -s match +# usage query script -- DO NOT CHANGE -- +# prj1:share1:16 +# prj2:share1:85 +typeset _ZFS_SCRIPT=" + script + run('shares'); + projects = list(); + + for (i = 0; i < projects.length; i++) { + try { run('select ' + projects[i]); + shares = list(); + + for (j = 0; j < shares.length; j++) { + try { run('select ' + shares[j]); + printf('%s:%s:%d\n', projects[i], shares[j], + get('space_data')/get('space_available')*100); + run('cd ..'); + } catch (err) { } + } + run('cd ..'); + } catch (err) { + throw ('unexpected error occurred'); + } + }" +# ------------------------- CONFIGURATION ends here --------------------------- + +# set defaults +(( ARG_DEBUG > 0 && ARG_DEBUG_LEVEL > 0 )) && set ${DEBUG_OPTS} +init_hc "$0" "${_SUPPORTED_PLATFORMS}" "${_VERSION}" +typeset _ARGS=$(data_comma2space "$*") +typeset _ARG="" +typeset _MSG="" +typeset _STC=0 +typeset _CFG_HEALTHY="" +typeset _LOG_HEALTHY=0 +typeset _CFG_MAX_SPACE_USAGE="" +typeset _CFG_SSH_KEY_FILE="" +typeset _CFG_SSH_OPTS="" +typeset _CFG_SSH_USER="" +typeset _CFG_SPACE_THRESHOLD="" +typeset _CFG_ZFS_HOSTS="" +typeset _CFG_ZFS_HOST="" +typeset _CFG_ZFS_LINE="" +typeset _PROJECT_NAME="" +typeset _SHARE_NAME="" +typeset _SPACE_USAGE="" +typeset _SSH_BIN="" +typeset _SSH_OUTPUT="" +typeset _ZFS_DATA="" + +# handle arguments (originally comma-separated) +for _ARG in ${_ARGS} +do + case "${_ARG}" in + help) + _show_usage $0 ${_VERSION} ${_CONFIG_FILE} && return 0 + ;; + esac +done + +# handle configuration file +[[ -n "${ARG_CONFIG_FILE}" ]] && _CONFIG_FILE="${ARG_CONFIG_FILE}" +if [[ ! -r ${_CONFIG_FILE} ]] +then + warn "unable to read configuration file at ${_CONFIG_FILE}" + return 1 +fi +# read configuration values +_CFG_HEALTHY=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'log_healthy') +case "${_CFG_HEALTHY}" in + yes|YES|Yes) + _LOG_HEALTHY=1 + ;; + *) + # do not override hc_arg + (( _LOG_HEALTHY > 0 )) || _LOG_HEALTHY=0 + ;; +esac +_CFG_SSH_USER=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'ssh_user') +if [[ -z "${_CFG_SSH_USER}" ]] +then + _CFG_SSH_USER="root" +fi +_CFG_SSH_KEY_FILE=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'ssh_key_file') +_CFG_SSH_OPTS=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'ssh_opts') +# add quiet mode +_CFG_SSH_OPTS="${_CFG_SSH_OPTS} -q" +if [[ -n "${_CFG_SSH_KEY_FILE}" ]] +then + if [[ -r "${_CFG_SSH_KEY_FILE}" ]] + then + log "will use SSH key ${_CFG_SSH_KEY_FILE}" + _CFG_SSH_OPTS="${_CFG_SSH_OPTS} -i ${_CFG_SSH_KEY_FILE}" + else + warn "will use SSH key ${_CFG_SSH_KEY_FILE}, but file does not exist" + return 1 + fi +fi +_CFG_MAX_SPACE_USAGE=$(_CONFIG_FILE="${_CONFIG_FILE}" data_get_lvalue_from_config 'max_space_usage') +if [[ -z "${_CFG_MAX_SPACE_USAGE}" ]] +then + # default + _CFG_MAX_SPACE_USAGE=90 +fi + +# log_healthy +(( ARG_LOG_HEALTHY > 0 )) && _LOG_HEALTHY=1 +if (( _LOG_HEALTHY > 0 )) +then + if (( ARG_LOG > 0 )) + then + log "logging/showing passed health checks" + else + log "showing passed health checks (but not logging)" + fi +else + log "not logging/showing passed health checks" +fi + +# check ssh +_SSH_BIN="$(which ssh 2>>${HC_STDERR_LOG})" +if [[ ! -x ${_SSH_BIN} || -z "${_SSH_BIN}" ]] +then + warn "SSH is not installed here" + return 1 +fi + +# gather ZFS hostnames (for this we need at least one data line, possibly with wildcards) +_CFG_ZFS_HOSTS=$(grep -i -E -e '^zfs:' ${_CONFIG_FILE} 2>/dev/null | cut -f2 -d':' 2>/dev/null | sort -u 2>/dev/null) +if [[ -z "${_CFG_ZFS_HOSTS}" ]] +then + warn "no monitoring rules defined in ${_CONFIG_FILE}" + return 1 +fi + +# gather ZFS usage data +print "${_CFG_ZFS_HOSTS}" | while read -r _CFG_ZFS_HOST +do + (( ARG_DEBUG > 0 )) && debug "executing remote ZFS script on ${_CFG_ZFS_HOST}" + _SSH_OUTPUT=$(linux_exec_ssh "${_CFG_SSH_OPTS}" "${_CFG_SSH_USER}" "${_CFG_ZFS_HOST}" "${_ZFS_SCRIPT}" 2>>${HC_STDERR_LOG}) + if (( $? > 0 )) || [[ -z "${_SSH_OUTPUT}" ]] + then + warn "unable to discover usage data on ${_CFG_ZFS_HOST}" + (( ARG_DEBUG > 0 && ARG_DEBUG_LEVEL > 0 )) && dump_logs + continue + else + # mangle SSH output by prefixing with hostname + print "${_SSH_OUTPUT}" | while read -r _SSH_LINE + do + if [[ -z "${_ZFS_DATA}" ]] + then + _ZFS_DATA="${_CFG_ZFS_HOST}:${_SSH_LINE}" + else + # shellcheck disable=SC1117 + _ZFS_DATA="${_ZFS_DATA}\n${_CFG_ZFS_HOST}:${_SSH_LINE}" + fi + done + fi +done + +# process usage status data +if [[ -z "${_ZFS_DATA}" ]] +then + _MSG="did not discover any ZFS share data" + _STC=2 + if (( _LOG_HEALTHY > 0 || _STC > 0 )) + then + log_hc "$0" ${_STC} "${_MSG}" + fi + return 1 +fi +print "${_ZFS_DATA}" | while IFS=':' read -r _ZFS_HOST _PROJECT_NAME _SHARE_NAME _SPACE_USAGE +do + (( ARG_DEBUG > 0 )) && debug "parsing space data for share: ${_ZFS_HOST}:${_PROJECT_NAME}/${_SHARE_NAME}" + _CFG_SPACE_THRESHOLD="" + + # which threshold to use (general or custom?), keep in mind wildcards (custom will overrule wildcard entry) + _CFG_ZFS_LINE=$(grep -E -e "^zfs:${_ZFS_HOST}:[*]:[*]:" ${_CONFIG_FILE} 2>/dev/null) + if [[ -n "${_CFG_ZFS_LINE}" ]] + then + (( ARG_DEBUG > 0 )) && debug "found wilcard definition for ${_ZFS_HOST} in configuration file ${_CONFIG_FILE}" + _CFG_SPACE_THRESHOLD=$(print "${_CFG_ZFS_LINE}" | cut -f5 -d':' 2>/dev/null) + # null value means general threshold + if [[ -z "${_CFG_SPACE_THRESHOLD}" ]] + then + (( ARG_DEBUG > 0 )) && debug "found empty space threshold for ${_ZFS_HOST}, using general threshold" + _CFG_SPACE_THRESHOLD=${_CFG_MAX_SPACE_USAGE} + fi + fi + _CFG_ZFS_LINE=$(grep -E -e "^zfs:${_ZFS_HOST}:${_PROJECT_NAME}:${_SHARE_NAME}:" ${_CONFIG_FILE} 2>/dev/null) + if [[ -n "${_CFG_ZFS_LINE}" ]] + then + (( ARG_DEBUG > 0 )) && debug "found custom definition for ${_ZFS_HOST}:${_PROJECT_NAME}/${_SHARE_NAME} in configuration file ${_CONFIG_FILE}" + _CFG_SPACE_THRESHOLD=$(print "${_CFG_ZFS_LINE}" | cut -f5 -d':' 2>/dev/null) + # null value means general threshold + if [[ -z "${_CFG_SPACE_THRESHOLD}" ]] + then + (( ARG_DEBUG > 0 )) && debug "found empty space threshold for ${_ZFS_HOST}:${_PROJECT_NAME}:${_SHARE_NAME}, using general threshold" + _CFG_SPACE_THRESHOLD=${_CFG_MAX_SPACE_USAGE} + fi + fi + if [[ -n "${_CFG_SPACE_THRESHOLD}" ]] + then + data_is_numeric "${_CFG_SPACE_THRESHOLD}" + if (( $? > 0 )) + then + warn "value for is not numeric in configuration file ${_CONFIG_FILE}" + continue + fi + # zero value means disabled check + if (( _CFG_SPACE_THRESHOLD == 0 )) + then + (( ARG_DEBUG > 0 )) && debug "found zero space threshold, disabling check" + continue + fi + else + (( ARG_DEBUG > 0 )) && debug "no custom space threshold for ${_ZFS_HOST}:${_PROJECT_NAME}:${_SHARE_NAME}, using general threshold" + _CFG_SPACE_THRESHOLD=${_CFG_MAX_SPACE_USAGE} + fi + + # perform check + if (( _SPACE_USAGE > _CFG_SPACE_THRESHOLD )) + then + _MSG="${_ZFS_HOST}:${_PROJECT_NAME}/${_SHARE_NAME} exceeds its space threshold (${_SPACE_USAGE}%>${_CFG_SPACE_THRESHOLD}%)" + _STC=1 + else + _MSG="${_ZFS_HOST}:${_PROJECT_NAME}/${_SHARE_NAME} does not exceed its space threshold (${_SPACE_USAGE}%>${_CFG_SPACE_THRESHOLD}%)" + _STC=0 + fi + if (( _LOG_HEALTHY > 0 || _STC > 0 )) + then + log_hc "$0" ${_STC} "${_MSG}" "${_SPACE_USAGE}" "${_CFG_SPACE_THRESHOLD}" + fi +done + +return 0 +} + +# ----------------------------------------------------------------------------- +function _show_usage +{ +cat <<- EOT +NAME : $1 +VERSION : $2 +CONFIG : $3 with parameters: + log_healthy= + ssh_user= + ssh_key_file= + max_space_usage= + and formatted stanzas of: + zfs::::: +PURPOSE : Checks the space usage for the configured ZFS hosts/shares + CLI: zfs > shares > select (project) > (select share) > show +LOG HEALTHY : Supported + +EOT + +return 0 +} + +#****************************************************************************** +# END of script +#****************************************************************************** diff --git a/sources/lib/platform/hp-ux/check_hpux_fs_usage.sh b/sources/lib/platform/hp-ux/check_hpux_fs_usage.sh index 73c529d..a19d2ab 100644 --- a/sources/lib/platform/hp-ux/check_hpux_fs_usage.sh +++ b/sources/lib/platform/hp-ux/check_hpux_fs_usage.sh @@ -26,6 +26,7 @@ # @(#) 2019-01-27: regex fix [Patrick Van der Veken] # @(#) 2019-01-30: refactored to support custom defintions with all # filesystems check [Patrick Van der Veken] +# @(#) 2019-02-18: fixes + help update # ----------------------------------------------------------------------------- # DO NOT CHANGE THIS FILE UNLESS YOU KNOW WHAT YOU ARE DOING! #****************************************************************************** @@ -35,7 +36,7 @@ function check_hpux_fs_usage { # ------------------------- CONFIGURATION starts here ------------------------- typeset _CONFIG_FILE="${CONFIG_DIR}/$0.conf" -typeset _VERSION="2019-01-30" # YYYY-MM-DD +typeset _VERSION="2019-02-18" # YYYY-MM-DD typeset _SUPPORTED_PLATFORMS="HP-UX" # uname -s match # ------------------------- CONFIGURATION ends here --------------------------- @@ -215,7 +216,7 @@ then fi if (( _LOG_HEALTHY > 0 || _STC > 0 )) then - log_hc "$0" ${_STC} "${_MSG}" ${_INODES_USAGE} ${_CFG_INODES_THRESHOLD} + log_hc "$0" ${_STC} "${_MSG}" "${_INODES_USAGE}" "${_CFG_INODES_THRESHOLD}" fi done fi @@ -276,7 +277,7 @@ then fi if (( _LOG_HEALTHY > 0 || _STC > 0 )) then - log_hc "$0" ${_STC} "${_MSG}" ${_SPACE_USAGE} ${_CFG_SPACE_THRESHOLD} + log_hc "$0" ${_STC} "${_MSG}" "${_SPACE_USAGE}" "${_CFG_SPACE_THRESHOLD}" fi done fi @@ -293,6 +294,7 @@ VERSION : $2 CONFIG : $3 with formatted stanzas (optional): fs::: Other options: + log_healthy= check_inodes_usage= check_space_usage= max_inodes_usage= diff --git a/sources/lib/platform/linux/check_linux_fs_usage.sh b/sources/lib/platform/linux/check_linux_fs_usage.sh index 2605bcb..60a1a66 100644 --- a/sources/lib/platform/linux/check_linux_fs_usage.sh +++ b/sources/lib/platform/linux/check_linux_fs_usage.sh @@ -27,6 +27,7 @@ # @(#) 2019-01-30: refactored to support custom definitions with all # filesystems check [Patrick Van der Veken] # @(#) 2019-02-04: fix in cleanup +# @(#) 2019-02-18: fixes + help update # ----------------------------------------------------------------------------- # DO NOT CHANGE THIS FILE UNLESS YOU KNOW WHAT YOU ARE DOING! #****************************************************************************** @@ -36,7 +37,7 @@ function check_linux_fs_usage { # ------------------------- CONFIGURATION starts here ------------------------- typeset _CONFIG_FILE="${CONFIG_DIR}/$0.conf" -typeset _VERSION="2019-02-04" # YYYY-MM-DD +typeset _VERSION="2019-02-18" # YYYY-MM-DD typeset _SUPPORTED_PLATFORMS="Linux" # uname -s match # ------------------------- CONFIGURATION ends here --------------------------- @@ -231,7 +232,7 @@ then fi if (( _LOG_HEALTHY > 0 || _STC > 0 )) then - log_hc "$0" ${_STC} "${_MSG}" ${_INODES_USAGE} ${_CFG_MAX_INODES_USAGE} + log_hc "$0" ${_STC} "${_MSG}" "${_INODES_USAGE}" "${_CFG_MAX_INODES_USAGE}" fi done # add df output to stdout log_hc @@ -295,7 +296,7 @@ then fi if (( _LOG_HEALTHY > 0 || _STC > 0 )) then - log_hc "$0" ${_STC} "${_MSG}" ${_SPACE_USAGE} ${_CFG_SPACE_THRESHOLD} + log_hc "$0" ${_STC} "${_MSG}" "${_SPACE_USAGE}" "${_CFG_SPACE_THRESHOLD}" fi done # add df output to stdout log_hc @@ -318,6 +319,7 @@ VERSION : $2 CONFIG : $3 with formatted stanzas (optional): fs::: Other options: + log_healthy= check_inodes_usage= check_space_usage= max_inodes_usage= diff --git a/sources/lib/platform/linux/check_linux_mysqld_status.sh b/sources/lib/platform/linux/check_linux_mysqld_status.sh index a1f0973..06b8291 100644 --- a/sources/lib/platform/linux/check_linux_mysqld_status.sh +++ b/sources/lib/platform/linux/check_linux_mysqld_status.sh @@ -369,6 +369,7 @@ cat <<- EOT NAME : $1 VERSION : $2 CONFIG : $3 with parameters: + log_healthy= mysql_user= mysql_password= mysql_host=