--- /dev/null
+<!--- Provide a general summary of the issue in the Title above -->
+
+<!-- If you are reporting a problem or a bug, please ensure to read https://github.com/Icinga/icinga2/blob/master/doc/15-troubleshooting.md first. -->
+
+<!-- Formatting tips:
+
+GitHub supports Markdown: https://guides.github.com/features/mastering-markdown/
+Multi-line code blocks either with three back ticks, or four space indent.
+
+```
+object Host "myhost" {
+ ...
+}
+```
+-->
+
+## Expected Behavior
+<!--- If you're describing a bug, tell us what should happen -->
+<!--- If you're suggesting a change/improvement, tell us how it should work -->
+
+## Current Behavior
+<!--- If describing a bug, tell us what happens instead of the expected behavior -->
+<!--- If suggesting a change/improvement, explain the difference from current behavior -->
+
+## Possible Solution
+<!--- Not obligatory, but suggest a fix/reason for the bug, -->
+<!--- or ideas how to implement: the addition or change -->
+
+## Steps to Reproduce (for bugs)
+<!--- Provide a link to a live example, or an unambiguous set of steps to -->
+<!--- reproduce this bug. Include configuration, logs, etc. to reproduce, if relevant -->
+1.
+2.
+3.
+4.
+
+## Context
+<!--- How has this issue affected you? What are you trying to accomplish? -->
+<!--- Providing context helps us come up with a solution that is most useful in the real world -->
+
+## Your Environment
+<!--- Include as many relevant details about the environment you experienced the problem in -->
+* Version used (`icinga2 --version`):
+* Operating System and version:
+* Enabled features (`icinga2 feature list`):
+* Icinga Web 2 version and modules (System - About):
+* Config validation (`icinga2 daemon -C`):
+* If you run multiple Icinga 2 instances, the `zones.conf` file (or `icinga2 object list --type Endpoint` and `icinga2 object list --type Zone`) from all affected nodes.
+
if (EXISTS ${CMAKE_CURRENT_BINARY_DIR}/NSCP.msi)
file(SHA256 ${CMAKE_CURRENT_BINARY_DIR}/NSCP.msi NSCP_SHA256SUM)
endif()
-
+
if (NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/NSCP.msi OR NOT ${NSCP_SHA256SUM} STREQUAL ${NSCP_SHA256})
file(DOWNLOAD ${NSCP_URL} ${CMAKE_CURRENT_BINARY_DIR}/NSCP.msi SHOW_PROGRESS)
endif()
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/NSCP.msi DESTINATION ${CMAKE_INSTALL_SBINDIR})
+ if (OPENSSL_VERSION_MINOR GREATER_EQUAL 1)
+ if (CMAKE_VS_PLATFORM_NAME STREQUAL "x64")
+ list (APPEND ICINGA2_OPENSSL_DLLS ${OPENSSL_INCLUDE_DIR}/../bin/libcrypto-1_1-x64.dll ${OPENSSL_INCLUDE_DIR}/../bin/libssl-1_1-x64.dll)
+ else()
+ list (APPEND ICINGA2_OPENSSL_DLLS ${OPENSSL_INCLUDE_DIR}/../bin/libcrypto-1_1.dll ${OPENSSL_INCLUDE_DIR}/../bin/libssl-1_1.dll)
+ endif()
+ else()
+ list (APPEND ICINGA2_OPENSSL_DLLS ${OPENSSL_INCLUDE_DIR}/../bin/libeay32.dll ${OPENSSL_INCLUDE_DIR}/../bin/ssleay32.dll)
+ endif()
+
install(
PROGRAMS ${CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS}
- ${OPENSSL_INCLUDE_DIR}/../bin/libeay32.dll ${OPENSSL_INCLUDE_DIR}/../bin/ssleay32.dll
+ ${ICINGA2_OPENSSL_DLLS}
DESTINATION ${CMAKE_INSTALL_SBINDIR}
)
endif()
--- /dev/null
+# <a id="contributing"></a> Contributing
+
+Icinga is an open source project and lives from your ideas and contributions.
+
+There are many ways to contribute, from improving the documentation, submitting
+bug reports and features requests or writing code to add enhancements or fix bugs.
+
+#### Table of Contents
+
+1. [Introduction](#contributing-intro)
+2. [Fork the Project](#contributing-fork)
+3. [Branches](#contributing-branches)
+4. [Commits](#contributing-commits)
+5. [Pull Requests](#contributing-pull-requests)
+6. [Testing](#contributing-testing)
+7. [Source Code Patches](#contributing-patches-source-code)
+8. [Documentation Patches](#contributing-patches-documentation)
+9. [Contribute CheckCommand Definitions](#contributing-patches-itl-checkcommands)
+
+## <a id="contributing-intro"></a> Introduction
+
+Please consider our [roadmap](https://github.com/Icinga/icinga2/milestones) and
+[open issues](https://github.com/icinga/icinga2/issues) when you start contributing
+to the project.
+
+Before starting your work on Icinga 2, you should [fork the project](https://help.github.com/articles/fork-a-repo/)
+to your GitHub account. This allows you to freely experiment with your changes.
+When your changes are complete, submit a [pull request](https://help.github.com/articles/using-pull-requests/).
+All pull requests will be reviewed and merged if they suit some general guidelines:
+
+* Changes are located in a topic branch
+* For new functionality, proper tests are written
+* Changes should follow the existing coding style and standards
+
+Please continue reading in the following sections for a step by step guide.
+
+## <a id="contributing-fork"></a> Fork the Project
+
+[Fork the project](https://help.github.com/articles/fork-a-repo/) to your GitHub account
+and clone the repository:
+
+```
+git clone git@github.com:dnsmichi/icinga2.git
+cd icinga2
+```
+
+Add a new remote `upstream` with this repository as value.
+
+```
+git remote add upstream https://github.com/icinga/icinga2.git
+```
+
+You can pull updates to your fork's master branch:
+
+```
+git fetch --all
+git pull upstream HEAD
+```
+
+Please continue to learn about [branches](CONTRIBUTING.md#contributing-branches).
+
+## <a id="contributing-branches"></a> Branches
+
+Choosing a proper name for a branch helps us identify its purpose and possibly
+find an associated bug or feature.
+Generally a branch name should include a topic such as `fix` or `feature` followed
+by a description and an issue number if applicable. Branches should have only changes
+relevant to a specific issue.
+
+```
+git checkout -b fix/service-template-typo-1234
+git checkout -b feature/config-handling-1235
+```
+
+Continue to apply your changes and test them. More details on specific changes:
+
+* [Source Code Patches](#contributing-patches-source-code)
+* [Documentation Patches](#contributing-patches-documentation)
+* [Contribute CheckCommand Definitions](#contributing-patches-itl-checkcommands)
+
+## <a id="contributing-commits"></a> Commits
+
+Once you've finished your work in a branch, please ensure to commit
+your changes. A good commit message includes a short topic, additional body
+and a reference to the issue you wish to solve (if existing).
+
+Fixes:
+
+```
+Fix problem with notifications in HA cluster
+
+There was a race condition when restarting.
+
+refs #4567
+```
+
+Features:
+
+```
+Add ITL CheckCommand printer
+
+Requires the check_printer plugin.
+
+refs #1234
+```
+
+You can add multiple commits during your journey to finish your patch.
+Don't worry, you can squash those changes into a single commit later on.
+
+## <a id="contributing-pull-requests"></a> Pull Requests
+
+Once you've commited your changes, please update your local master
+branch and rebase your fix/feature branch against it before submitting a PR.
+
+```
+git checkout master
+git pull upstream HEAD
+
+git checkout fix/notifications
+git rebase master
+```
+
+Once you've resolved any conflicts, push the branch to your remote repository.
+It might be necessary to force push after rebasing - use with care!
+
+New branch:
+```
+git push --set-upstream origin fix/notifications
+```
+
+Existing branch:
+```
+git push -f origin fix/notifications
+```
+
+You can now either use the [hub](https://hub.github.com) CLI tool to create a PR, or nagivate
+to your GitHub repository and create a PR there.
+
+The pull request should again contain a telling subject and a reference
+with `fixes` to an existing issue id if any. That allows developers
+to automatically resolve the issues once your PR gets merged.
+
+```
+hub pull-request
+
+<a telling subject>
+
+fixes #1234
+```
+
+Thanks a lot for your contribution!
+
+
+### <a id="contributing-rebase"></a> Rebase a Branch
+
+If you accidentally sent in a PR which was not rebased against the upstream master,
+developers might ask you to rebase your PR.
+
+First off, fetch and pull `upstream` master.
+
+```
+git checkout master
+git fetch --all
+git pull upstream HEAD
+```
+
+Then change to your working branch and start rebasing it against master:
+
+```
+git checkout fix/notifications
+git rebase master
+```
+
+If you are running into a conflict, rebase will stop and ask you to fix the problems.
+
+```
+git status
+
+ both modified: path/to/conflict.cpp
+```
+
+Edit the file and search for `>>>`. Fix, build, test and save as needed.
+
+Add the modified file(s) and continue rebasing.
+
+```
+git add path/to/conflict.cpp
+git rebase --continue
+```
+
+Once succeeded ensure to push your changed history remotely.
+
+```
+git push -f origin fix/notifications
+```
+
+
+If you fear to break things, do the rebase in a backup branch first and later replace your current branch.
+
+```
+git checkout fix/notifications
+git checkout -b fix/notifications-rebase
+
+git rebase master
+
+git branch -D fix/notifications
+git checkout -b fix/notifications
+
+git push -f origin fix/notifications
+```
+
+### <a id="contributing-squash"></a> Squash Commits
+
+> **Note:**
+>
+> Be careful with squashing. This might lead to non-recoverable mistakes.
+>
+> This is for advanced Git users.
+
+Say you want to squash the last 3 commits in your branch into a single one.
+
+Start an interactive (`-i`) rebase from current HEAD minus three commits (`HEAD~3`).
+
+```
+git rebase -i HEAD~3
+```
+
+Git opens your preferred editor. `pick` the commit in the first line, change `pick` to `squash` on the other lines.
+
+```
+pick e4bf04e47 Fix notifications
+squash d7b939d99 Tests
+squash b37fd5377 Doc updates
+```
+
+Save and let rebase to its job. Then force push the changes to the remote origin.
+
+```
+git push -f origin fix/notifications
+```
+
+
+## <a id="contributing-testing"></a> Testing
+
+Basic unit test coverage is provided by running `make test` during package builds.
+Read the [INSTALL.md](INSTALL.md) file for more information about development builds.
+
+Snapshot packages from the laster development branch are available inside the
+[package repository](https://packages.icinga.com).
+
+You can help test-drive the latest Icinga 2 snapshot packages inside the
+[Icinga 2 Vagrant boxes](https://github.com/icinga/icinga-vagrant).
+
+
+## <a id="contributing-patches-source-code"></a> Source Code Patches
+
+Icinga 2 is written in C++ and uses the Boost libraries. We are also using the C++11 standard where applicable (please
+note the minimum required compiler versions in the [INSTALL.md](INSTALL.md) file.
+
+Icinga 2 can be built on Linux/Unix and Windows clients. In order to develop patches for Icinga 2,
+you should prepare your own local build environment and know how to work with C++.
+
+More tips:
+
+* Requirements and source code installation is explained inside the [INSTALL.md](INSTALL.md) file.
+* Debug requirements and GDB instructions can be found in the [documentation](https://github.com/Icinga/icinga2/blob/master/doc/20-development.md).
+* If you are planning to debug a Windows client, setup a Windows environment with [Visual Studio](https://www.visualstudio.com/vs/community/). An example can be found in [this blogpost](https://blog.netways.de/2015/08/24/developing-icinga-2-on-windows-10-using-visual-studio-2015/).
+
+## <a id="contributing-patches-documentation"></a> Documentation Patches
+
+The documentation is written in GitHub flavored [Markdown](https://guides.github.com/features/mastering-markdown/).
+It is located in the `doc/` directory and can be edited with your preferred editor. You can also
+edit it online on GitHub.
+
+```
+vim doc/2-getting-started.md
+```
+
+In order to review and test changes, you can install the [mkdocs](http://www.mkdocs.org) Python library.
+
+```
+pip install mkdocs
+```
+
+This allows you to start a local mkdocs viewer instance on http://localhost:8000
+
+```
+mkdocs serve
+```
+
+Changes on the chapter layout can be done inside the `mkdocs.yml` file in the main tree.
+
+There also is a script to ensure that relative URLs to other sections are updated. This script
+also checks for broken URLs.
+
+```
+./doc/update-links.py doc/*.md
+```
+
+## <a id="contributing-patches-itl-checkcommands"></a> Contribute CheckCommand Definitions
+
+The Icinga Template Library (ITL) and its plugin check commands provide a variety of CheckCommand
+object definitions which can be included on-demand.
+
+Advantages of sending them upstream:
+
+* Everyone can use and update/fix them.
+* One single place for configuration and documentation.
+* Developers may suggest updates and help with best practices.
+* You don't need to care about copying the command definitions to your satellites and clients.
+
+#### <a id="contributing-itl-checkcommands-start"></a> Where do I start?
+
+Get to know the check plugin and its options. Read the general documentation on how to integrate
+your check plugins and how to create a good CheckCommand definition.
+
+A good command definition uses:
+
+* Command arguments including `value`, `description`, optional: `set_if`, `required`, etc.
+* Comments `/* ... */` to describe difficult parts.
+* Command name as prefix for the custom attributes referenced (e.g. `disk_`)
+* Default values
+ * If `host.address` is involved, set a custom attribute (e.g. `ping_address`) to the default `$address$`. This allows users to override the host's address later on by setting the custom attribute inside the service apply definitions.
+ * If the plugin is also capable to use ipv6, import the `ipv4-or-ipv6` template and use `$check_address$` instead of `$address$`. This allows to fall back to ipv6 if only this address is set.
+ * If `set_if` is involved, ensure to specify a sane default value if required.
+* Templates if there are multiple plugins with the same basic behaviour (e.g. ping4 and ping6).
+* Your love and enthusiasm in making it the perfect CheckCommand.
+
+#### <a id="contributing-itl-checkcommands-overview"></a> I have created a CheckCommand, what now?
+
+Icinga 2 developers love documentation. This isn't just because we want to annoy anyone sending a patch,
+it's a matter of making your contribution visible to the community.
+
+Your patch should consist of 2 parts:
+
+* The CheckCommand definition.
+* The documentation bits.
+
+[Fork the repository](https://help.github.com/articles/fork-a-repo/) and ensure that the master branch is up-to-date.
+
+Create a new fix or feature branch and start your work.
+
+```
+git checkout -b feature/itl-check-printer
+```
+
+#### <a id="contributing-itl-checkcommands-add"></a> Add CheckCommand Definition to Contrib Plugins
+
+There already exists a defined structure for contributed plugins. Navigate to `itl/plugins-contrib.d`
+and verify where your command definitions fits into.
+
+```
+cd itl/plugins-contrib.d/
+ls
+```
+
+If you want to add or modify an existing Monitoring Plugin please use `itl/command-plugins.conf` instead.
+
+```
+vim itl/command-plugins-conf
+```
+
+##### Existing Configuration File
+
+Just edit it, and add your CheckCommand definition.
+
+```
+vim operating-system.conf
+```
+
+Proceed to the documentation.
+
+##### New type for CheckCommand Definition
+
+Create a new file with .conf suffix.
+
+```
+ $ vim printer.conf
+```
+
+Add the file to `itl/CMakeLists.txt` in the FILES line in **alpha-numeric order**.
+This ensures that the installation and packages properly include your newly created file.
+
+```
+vim CMakeLists.txt
+
+-FILES ipmi.conf network-components.conf operating-system.conf virtualization.conf vmware.conf
++FILES ipmi.conf network-components.conf operating-system.conf printer.conf virtualization.conf vmware.conf
+```
+
+Add the newly created file to your git commit.
+
+```
+git add printer.conf
+```
+
+Do not commit it yet but finish with the documentation.
+
+#### <a id="contributing-itl-checkcommands-docs"></a> Create CheckCommand Documentation
+
+Edit the documentation file in the `doc/` directory. More details on documentation
+updates can be found [here](CONTRIBUTING.md#contributing-documentation).
+
+```
+vim doc/7-icinga-template-library.md
+```
+
+The CheckCommand documentation should be located in the same chapter
+similar to the configuration file you have just added/modified.
+
+Create a section for your plugin, add a description and a table of parameters. Each parameter should have at least:
+
+* optional or required
+* description of its purpose
+* the default value, if any
+
+Look at the existing documentation and "copy" the same style and layout.
+
+
+#### <a id="contributing-itl-checkcommands-patch"></a> Send a Patch
+
+Commit your changes which includes a descriptive commit message.
+
+```
+git commit -av
+Add printer CheckCommand definition
+
+Explain its purpose and possible enhancements/shortcomings.
+
+refs #existingticketnumberifany
+```
+Push the branch to the remote origin and create a [pull request](https://help.github.com/articles/using-pull-requests/).
+
+```
+git push --set-upstream origin feature/itl-check-printer
+hub pull-request
+```
+
+In case developers ask for changes during review, please add them
+to the branch and push those changes.
+
## What's New
+### What's New in Version 2.6.3
+
+#### Changes
+
+This is a bugfix release which addresses a number of bugs we've found since
+2.6.2 was released. It also contains a number of improvements for the Icinga
+documentation.
+
+#### Feature
+
+* Feature 4955 (Documentation): Review CheckCommand documentation including external URLs
+* Feature 5057 (Documentation): Update Security section in the Distributed Monitoring chapter
+* Feature 5055 (Documentation): mysql_socket attribute missing in the documentation for the mysql CheckCommand
+* Feature 5035 (Documentation): Docs: Typo in Distributed Monitoring chapter
+* Feature 5029 (Documentation): Advanced topics: Wrong acknowledgement notification filter
+* Feature 5030 (Documentation): Advanced topics: Mention the API and explain stick acks, fixed/flexible downtimes
+* Feature 3133 (Documentation): [dev.icinga.com #9583] Add practical examples for apply expressions
+* Feature 4996 (Documentation): documentation: mixed up host names in 6-distributed-monitoring.md
+* Feature 4980 (Documentation): Add OpenBSD and AlpineLinux package repositories to the documentation
+* Feature 4954 (Documentation): Add an example for /v1/actions/process-check-result which uses filter/type
+
+#### Bugfixes
+
+* Bug 5080 (IDO): Missing index use can cause icinga_downtimehistory queries to hang indefinitely
+* Bug 4603 (IDO): [dev.icinga.com #12597] With too many comments, Icinga reload process won't finish reconnecting to database
+* Bug 4989 (Check Execution): Icinga daemon runs with nice 5 after reload
+* Bug 4930 (Cluster): Change "Discarding 'config update object'" log messages to notice log level
+
### What's New in Version 2.6.2
#### Changes
* pkg-config
* OpenSSL library and header files >= 0.9.8 (openssl-devel on RHEL, libopenssl1-devel on SLES11,
libopenssl-devel on SLES12, libssl-dev on Debian)
-* Boost library and header files >= 1.41.0 (boost-devel on RHEL, libboost-all-dev on Debian)
+* Boost library and header files >= 1.48.0 (boost148-devel on EPEL for RHEL / CentOS, libboost-all-dev on Debian)
* GNU bison (bison)
* GNU flex (flex) >= 2.5.35
* recommended: libexecinfo on FreeBSD (automatically used when Icinga 2 is
Note: RHEL5 ships an ancient flex version. Updated packages are available for
example from the repoforge buildtools repository.
-* x86: http://mirror.hs-esslingen.de/repoforge/redhat/el5/en/i386/buildtools/
-* x86\_64: http://mirror.hs-esslingen.de/repoforge/redhat/el5/en/x86\_64/buildtools/
+* x86: https://mirror.hs-esslingen.de/repoforge/redhat/el5/en/i386/buildtools/
+* x86\_64: https://mirror.hs-esslingen.de/repoforge/redhat/el5/en/x86\_64/buildtools/
### User Requirements
$ rpmbuild -ba SPEC/icinga2.spec
+#### SELinux policy module
+
+The following packages are required to build the SELinux policy module:
+
+* checkpolicy
+* selinux-policy (selinux-policy on CentOS 6, selinux-policy-devel on CentOS 7)
+* selinux-policy-doc
+
#### RHEL/CentOS 5 and 6
The RedHat Developer Toolset is required for building Icinga 2 beforehand.
cat >/etc/yum.repos.d/devtools-2.repo <<REPO
[testing-devtools-2-centos-\$releasever]
name=testing 2 devtools for CentOS $releasever
- baseurl=http://people.centos.org/tru/devtools-2/\$releasever/\$basearch/RPMS
+ baseurl=https://people.centos.org/tru/devtools-2/\$releasever/\$basearch/RPMS
gpgcheck=0
REPO
- yum install -y devtoolset-2-gcc devtoolset-2-gcc-c++ devtoolset-2-binutils
+Dependencies to devtools-2 are used in the RPM SPEC, so the correct tools
+should be used for building.
+
+As an alternative, you can use newer Boost packages provided on
+[packages.icinga.com](https://packages.icinga.com/epel).
- export LD_LIBRARY_PATH=/opt/rh/devtoolset-2/root/usr/lib:$LD_LIBRARY_PATH
- export PATH=/opt/rh/devtoolset-2/root/usr/bin:$PATH
- ln -sf /opt/rh/devtoolset-2/root/usr/bin/ld.bfd /opt/rh/devtoolset-2/root/usr/bin/ld
- for file in `find /opt/rh/devtoolset-2/root/usr/include/c++ -name c++config.h`; do
- echo '#define _GLIBCXX__PTHREADS' >> $file
- done
+ cat >$HOME/.rpmmacros <<MACROS
+ %build_icinga_org 1
+ MACROS
#### SLES 11
3. [Installation][Installation]
4. [Documentation][Documentation]
5. [Support][Support]
-6. [Development and Contributions][Development]
+6. [Contributing][Contributing]
## About
[community channels](https://www.icinga.com/community/get-involved/) for questions
or ask an Icinga partner for [professional support](https://www.icinga.com/services/support/).
-## Development
-
-The Git repository is located on [GitHub](https://github.com/Icinga/icinga2).
-
-Icinga 2 is written in C++ and can be built on Linux/Unix and Windows.
-Read more about development builds in the [INSTALL.md](INSTALL.md) file.
-
-### Contributing
+## Contributing
There are many ways to contribute to Icinga -- whether it be sending patches,
testing, reporting bugs, or reviewing and updating the documentation. Every
contribution is appreciated!
-Please read the [contributing section](https://www.icinga.com/community/get-involved/)
-first. Then you should have a look at the [roadmap](https://github.com/Icinga/icinga2/milestones)
-and remaining [open issues](https://github.com/Icinga/icinga2/issues).
-
-Pick issues you want to help resolve, fork the [repository on GitHub](https://github.com/Icinga/icinga2)
-and send a pull request with your changes. Thanks for your contribution!
-
-### Testing
-
-Basic unit test coverage is provided by running `make test` during package builds.
-Read the [INSTALL.md](INSTALL.md) file for more information about development builds.
-
-Snapshot packages from the laster development branch are available inside the
-[package repository](http://packages.icinga.com).
-
-You can help test-drive the latest Icinga 2 snapshot packages inside the
-[Icinga 2 Vagrant boxes](https://github.com/icinga/icinga-vagrant).
-
+Please continue reading in the [Contributing chapter](CONTRIBUTING.md).
+<!-- TOC URLs -->
[About]: #about
[License]: #license
[Installation]: #installation
[Documentation]: #documentation
[Support]: #support
-[Development]: #development
+[Contributing]: #contributing
Specify the release version.
- VERSION=2.6.2
+ VERSION=2.6.3
## Issues
* Test DB IDO with MySQL and PostgreSQL.
* Provision the vagrant boxes and test the release packages.
-* Test the [setup wizard](http://packages.icinga.com/windows/) inside a Windows VM.
+* Test the [setup wizard](https://packages.icinga.com/windows/) inside a Windows VM.
* Start a new docker container and install/run icinga2.
$ docker run -ti centos:latest bash
- # yum -y install http://packages.icinga.com/epel/7/release/noarch/icinga-rpm-release-7-1.el7.centos.noarch.rpm
+ # yum -y install https://packages.icinga.com/epel/7/release/noarch/icinga-rpm-release-7-1.el7.centos.noarch.rpm
# yum -y install icinga2
# icinga2 daemon -C
Install the created icinga2 package locally:
- choco install icinga2 -version 2.6.2 -fdv "%cd%" -source "'%cd%;https://chocolatey.org/api/v2/'"
+ choco install icinga2 -version 2.6.3 -fdv "%cd%" -source "'%cd%;https://chocolatey.org/api/v2/'"
Upload the package to [chocolatey](https://chocolatey.org/packages/upload).
* Create a new blog post on www.icinga.com/blog
* Send announcement mail to icinga-announce@lists.icinga.org
-* Social media: [Twitter](https://twitter.com/icinga), [Facebook](https://www.facebook.com/icinga), [G+](http://plus.google.com/+icinga), [Xing](https://www.xing.com/communities/groups/icinga-da4b-1060043), [LinkedIn](https://www.linkedin.com/groups/Icinga-1921830/about)
+* Social media: [Twitter](https://twitter.com/icinga), [Facebook](https://www.facebook.com/icinga), [G+](https://plus.google.com/+icinga), [Xing](https://www.xing.com/communities/groups/icinga-da4b-1060043), [LinkedIn](https://www.linkedin.com/groups/Icinga-1921830/about)
* Update IRC channel topic
# After the release
<metadata>\r
<!-- Read this before publishing packages to chocolatey.org: https://github.com/chocolatey/chocolatey/wiki/CreatePackages -->\r
<id>icinga2</id>\r
- <title>Icinga2</title>\r
+ <title>Icinga 2</title>\r
<version>${SPEC_VERSION}</version>\r
- <authors>2016 - The Icinga Project</authors>\r
+ <authors>The Icinga Project</authors>\r
<owners>Icinga Development Team</owners>\r
<summary>icinga2 - Monitoring Agent for Windows</summary>\r
<description>Icinga 2 is an open source monitoring platform which notifies users about host and service outages.</description>\r
<projectUrl>https://www.icinga.com/</projectUrl>\r
- <tags>icinga2 agent monitoring</tags>\r
+ <tags>icinga2 agent monitoring admin</tags>\r
<licenseUrl>https://www.icinga.com/resources/faq/</licenseUrl>\r
+ <releaseNotes>https://github.com/Icinga/icinga2/blob/master/ChangeLog</releaseNotes>\r
+ <docsUrl>https://docs.icinga.com/icinga2/</docsUrl>\r
+ <bugTrackerUrl>https://github.com/Icinga/icinga2/issues</bugTrackerUrl>\r
+ <packageSourceUrl>https://github.com/Icinga/icinga2</packageSourceUrl>\r
+ <projectSourceUrl>https://github.com/Icinga/icinga2</projectSourceUrl>\r
<requireLicenseAcceptance>false</requireLicenseAcceptance>\r
<iconUrl>https://www.icinga.com/wp-content/uploads/2015/05/icinga_icon_128x128.png</iconUrl>\r
</metadata>\r
testing, reporting bugs, or reviewing and updating the documentation. Every
contribution is appreciated!
-Please read the [contributing section](https://www.icinga.com/community/get-involved/)
-first. Then you should have a look at the [roadmap](https://github.com/Icinga/icinga2/milestones)
-and remaining [open issues](https://github.com/Icinga/icinga2/issues).
-
-Pick issues you want to help resolve, fork the [repository on GitHub](https://github.com/Icinga/icinga2)
-and send a pull request with your changes. Thanks for your contribution!
+Please continue reading in the [Contributing chapter](https://github.com/Icinga/icinga2/blob/master/CONTRIBUTING.md).
### <a id="development-info"></a> Icinga 2 Development
## <a id="whats-new"></a> What's New
+### What's New in Version 2.6.3
+
+#### Changes
+
+This is a bugfix release which addresses a number of bugs we've found since
+2.6.2 was released. It also contains a number of improvements for the Icinga
+documentation.
+
+#### Feature
+
+* Feature [4955](https://github.com/Icinga/icinga2/issues/4955) (Documentation): Review CheckCommand documentation including external URLs
+* Feature [5057](https://github.com/Icinga/icinga2/issues/5057) (Documentation): Update Security section in the Distributed Monitoring chapter
+* Feature [5055](https://github.com/Icinga/icinga2/issues/5055) (Documentation): mysql_socket attribute missing in the documentation for the mysql CheckCommand
+* Feature [5035](https://github.com/Icinga/icinga2/issues/5035) (Documentation): Docs: Typo in Distributed Monitoring chapter
+* Feature [5029](https://github.com/Icinga/icinga2/issues/5029) (Documentation): Advanced topics: Wrong acknowledgement notification filter
+* Feature [5030](https://github.com/Icinga/icinga2/issues/5030) (Documentation): Advanced topics: Mention the API and explain stick acks, fixed/flexible downtimes
+* Feature [3133](https://github.com/Icinga/icinga2/issues/3133) (Documentation): [dev.icinga.com #9583] Add practical examples for apply expressions
+* Feature [4996](https://github.com/Icinga/icinga2/issues/4996) (Documentation): documentation: mixed up host names in 6-distributed-monitoring.md
+* Feature [4980](https://github.com/Icinga/icinga2/issues/4980) (Documentation): Add OpenBSD and AlpineLinux package repositories to the documentation
+* Feature [4954](https://github.com/Icinga/icinga2/issues/4954) (Documentation): Add an example for /v1/actions/process-check-result which uses filter/type
+
+#### Bugfixes
+
+* Bug [5080](https://github.com/Icinga/icinga2/issues/5080) (IDO): Missing index use can cause icinga_downtimehistory queries to hang indefinitely
+* Bug [4603](https://github.com/Icinga/icinga2/issues/4603) (IDO): [dev.icinga.com #12597] With too many comments, Icinga reload process won't finish reconnecting to database
+* Bug [4989](https://github.com/Icinga/icinga2/issues/4989) (Check Execution): Icinga daemon runs with nice 5 after reload
+* Bug [4930](https://github.com/Icinga/icinga2/issues/4930) (Cluster): Change "Discarding 'config update object'" log messages to notice log level
+
### What's New in Version 2.6.2
#### Changes
---------------------|--------------
dns_lookup | **Optional.** The hostname or IP to query the DNS for. Defaults to "$host_name$".
dns_server | **Optional.** The DNS server to query. Defaults to the server configured in the OS.
+dns_query_type | **Optional.** The DNS record query type where TYPE =(A, AAAA, SRV, TXT, MX, ANY). The default query type is 'A' (IPv4 host entry)
dns_expected_answers | **Optional.** The answer(s) to look for. A hostname must end with a dot. Multiple answers must be defined as array.
dns_authoritative | **Optional.** Expect the server to send an authoritative answer.
dns_wtime | **Optional.** Return warning if elapsed time exceeds value.
### <a id="plugin-check-command-fping4"></a> fping4
The [check_fping](https://www.monitoring-plugins.org/doc/man/check_fping.html) plugin
-will use the `fping` command to ping the specified host for a fast check. Note that it is
+uses the `fping` command to ping the specified host for a fast check. Note that it is
necessary to set the suid flag on fping.
This CheckCommand expects an IPv4 address.
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
-Name | Description
-----------------|--------------
-ldap_address | **Optional.** Host name, IP Address, or unix socket (must be an absolute path). Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
-ldap_port | **Optional.** Port number. Defaults to 389.
-ldap_attr | **Optional.** LDAP attribute to search for (default: "(objectclass=*)"
-ldap_base | **Required.** LDAP base (eg. ou=myunit,o=myorg,c=at).
-ldap_bind | **Optional.** LDAP bind DN (if required).
-ldap_pass | **Optional.** LDAP password (if required).
-ldap_starttls | **Optional.** Use STARTSSL mechanism introduced in protocol version 3.
-ldap_ssl | **Optional.** Use LDAPS (LDAP v2 SSL method). This also sets the default port to 636.
-ldap_v2 | **Optional.** Use LDAP protocol version 2 (enabled by default).
-ldap_v3 | **Optional.** Use LDAP protocol version 3 (disabled by default)
-ldap_warning | **Optional.** Response time to result in warning status (seconds).
-ldap_critical | **Optional.** Response time to result in critical status (seconds).
-ldap_timeout | **Optional.** Seconds before connection times out (default: 10).
-ldap_verbose | **Optional.** Show details for command-line debugging (disabled by default)
+Name | Description
+------------------------|--------------
+ldap_address | **Optional.** Host name, IP Address, or unix socket (must be an absolute path). Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+ldap_port | **Optional.** Port number. Defaults to 389.
+ldap_attr | **Optional.** LDAP attribute to search for (default: "(objectclass=*)"
+ldap_base | **Required.** LDAP base (eg. ou=myunit,o=myorg,c=at).
+ldap_bind | **Optional.** LDAP bind DN (if required).
+ldap_pass | **Optional.** LDAP password (if required).
+ldap_starttls | **Optional.** Use STARTSSL mechanism introduced in protocol version 3.
+ldap_ssl | **Optional.** Use LDAPS (LDAP v2 SSL method). This also sets the default port to 636.
+ldap_v2 | **Optional.** Use LDAP protocol version 2 (enabled by default).
+ldap_v3 | **Optional.** Use LDAP protocol version 3 (disabled by default)
+ldap_warning | **Optional.** Response time to result in warning status (seconds).
+ldap_critical | **Optional.** Response time to result in critical status (seconds).
+ldap_warning_entries | **Optional.** Number of found entries to result in warning status.
+ldap_critical_entries | **Optional.** Number of found entries to result in critical status.
+ldap_timeout | **Optional.** Seconds before connection times out (default: 10).
+ldap_verbose | **Optional.** Show details for command-line debugging (disabled by default)
### <a id="plugin-check-command-load"></a> load
------------------------|---------------------------------------------------------------
mysql_hostname | **Optional.** Host name, IP Address, or unix socket (must be an absolute path).
mysql_port | **Optional.** Port number (default: 3306).
+mysql_socket | **Optional.** Use the specified socket (has no effect if `mysql_hostname` is used).
mysql_ignore_auth | **Optional.** Ignore authentication failure and check for mysql connectivity only.
mysql_database | **Optional.** Check database with indicated name.
mysql_file | **Optional.** Read from the specified client options file.
### <a id="plugin-check-command-nrpe"></a> nrpe
-The `check_nrpe` plugin can be used to query an [NRPE](http://docs.icinga.com/latest/en/nrpe.html)
+The `check_nrpe` plugin can be used to query an [NRPE](https://docs.icinga.com/latest/en/nrpe.html)
server or [NSClient++](https://www.nsclient.org). **Note**: This plugin
is considered insecure/deprecated.
nrpe_arguments | **Optional.** Arguments that should be passed to the command. Multiple arguments must be defined as array.
nrpe_ipv4 | **Optional.** Use IPv4 connection. Defaults to false.
nrpe_ipv6 | **Optional.** Use IPv6 connection. Defaults to false.
+nrpe_version_2 | **Optional.** Use this if you want to connect using NRPE v2 protocol. Defaults to false.
### <a id="plugin-check-command-nscp"></a> nscp
Name | Description
------------------------------|--------------
ssl_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
-ssl_port | **Required.** The port that should be checked.
+ssl_port | **Optional.** The port that should be checked. Defaults to 443.
ssl_timeout | **Optional.** Timeout in seconds for the connect and handshake. The plugin default is 10 seconds.
-ssl_cert_valid_days_warn | **Optional.** Warning threshold for days before the certificate will expire. When used, ssl_cert_valid_days_critical must also be set.
+ssl_cert_valid_days_warn | **Optional.** Warning threshold for days before the certificate will expire. When used, the default for ssl_cert_valid_days_critical is 0.
ssl_cert_valid_days_critical | **Optional.** Critical threshold for days before the certificate will expire. When used, ssl_cert_valid_days_warn must also be set.
ssl_sni | **Optional.** The `server_name` that is send to select the SSL certificate to check. Important if SNI is used. Defaults to "$ssl_address$".
Check command object for the `check_disk.exe` plugin.
Aggregates the free disk space of all volumes and mount points it can find, or the ones defined in `disk_win_path`. Ignores removable storage like fash drives and discs (CD, DVD etc.).
+> **Note**
+>
+> Percentage based thresholds can be used by adding a '%' to the threshold
+> value.
+
Custom attributes:
Name | Description
Name | Description
:-------------------|:------------
-update\_win\_warn | If set, returns warning when important updates are available
-update\_win\_crit | If set, return critical when important updates that require a reboot are available.
-update\_win\_reboot | Set to treat 'may need update' as 'definitely needs update'
+update\_win\_warn | **Optional**. If set, returns warning when important updates are available.
+update\_win\_crit | **Optional**. If set, return critical when important updates that require a reboot are available.
+update\_win\_reboot | **Optional**. Set to treat 'may need update' as 'definitely needs update'. Please Note that this is true for almost every update and is therefore not recommended.
+
+
+In contrast to most other plugins, the values of check_update's custom attributes do not set thresholds, but just enable/disable the behaviour described in the table above.
+It can be enabled/disabled for example by setting them to "true" or "false", "1" or "0" would also work.
+Thresholds will always be "1".
+
+> **Note**
+>
+> If they are enabled, performance data will be shown in the webinterface.
+> If run without the optional parameters, the plugin will output critical if any important updates are available.
### <a id="windows-plugins-uptime-windows"></a> uptime-windows
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
-Name | Description
-------------------------|--------------
-snmp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
-snmp_nocrypt | **Optional.** Define SNMP encryption. If set, **snmp_v3** needs to be set. Defaults to false.
-snmp_community | **Optional.** The SNMP community. Defaults to "public".
-snmp_port | **Optional.** The SNMP port connection.
-snmp_v2 | **Optional.** SNMP version to 2c. Defaults to false.
-snmp_v3 | **Optional.** SNMP version to 3. Defaults to false.
-snmp_login | **Optional.** SNMP version 3 username. Defaults to "snmpuser".
-snmp_password | **Required.** SNMP version 3 password. No value defined as default.
-snmp_v3_use_privpass | **Optional.** Define to use SNMP version 3 priv password. Defaults to false.
-snmp_v3_use_authprotocol| **Optional.** Define to use SNMP version 3 authentication protocol. Defaults to false.
-snmp_authprotocol | **Optional.** SNMP version 3 authentication protocol. Defaults to "md5,des".
-snmp_privpass | **Required.** SNMP version 3 priv password. No value defined as default..
-snmp_warn | **Optional.** The warning threshold.
-snmp_crit | **Optional.** The critical threshold.
-snmp_process_name | **Optional.** Name of the process (regexp). No trailing slash!. Defaults to ".*".
-snmp_perf | **Optional.** Enable perfdata values. Defaults to true.
-snmp_timeout | **Optional.** The command timeout in seconds. Defaults to 5 seconds.
-snmp_process_use_params | **Optional.** Add process parameters to process name for regexp matching. Example: "named.*-t /var/named/chroot" will only select named process with this parameter. Defaults to false.
-
+Name | Description
+---------------------------|--------------
+snmp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+snmp_nocrypt | **Optional.** Define SNMP encryption. If set, **snmp_v3** needs to be set. Defaults to false.
+snmp_community | **Optional.** The SNMP community. Defaults to "public".
+snmp_port | **Optional.** The SNMP port connection.
+snmp_v2 | **Optional.** SNMP version to 2c. Defaults to false.
+snmp_v3 | **Optional.** SNMP version to 3. Defaults to false.
+snmp_login | **Optional.** SNMP version 3 username. Defaults to "snmpuser".
+snmp_password | **Required.** SNMP version 3 password. No value defined as default.
+snmp_v3_use_privpass | **Optional.** Define to use SNMP version 3 priv password. Defaults to false.
+snmp_v3_use_authprotocol | **Optional.** Define to use SNMP version 3 authentication protocol. Defaults to false.
+snmp_authprotocol | **Optional.** SNMP version 3 authentication protocol. Defaults to "md5,des".
+snmp_privpass | **Required.** SNMP version 3 priv password. No value defined as default..
+snmp_warn | **Optional.** The warning threshold.
+snmp_crit | **Optional.** The critical threshold.
+snmp_process_name | **Optional.** Name of the process (regexp). No trailing slash!. Defaults to ".*".
+snmp_perf | **Optional.** Enable perfdata values. Defaults to true.
+snmp_timeout | **Optional.** The command timeout in seconds. Defaults to 5 seconds.
+snmp_process_use_params | **Optional.** Add process parameters to process name for regexp matching. Example: "named.*-t /var/named/chroot" will only select named process with this parameter. Defaults to false.
+snmp_process_mem_usage | **Optional.** Define to check memory usage for the process. Defaults to false.
+snmp_process_mem_threshold | **Optional.** Defines the warning and critical thresholds in Mb when snmp_process_mem_usage set to true. Example "512,1024". Defaults to "0,0".
+snmp_process_cpu_usage | **Optional.** Define to check CPU usage for the process. Defaults to false.
+snmp_process_cpu_threshold | **Optional.** Defines the warning and critical thresholds in % when snmp_process_cpu_usage set to true. If more than one CPU, value can be > 100% : 100%=1 CPU. Example "15,50". Defaults to "0,0".
## <a id="plugin-contrib"></a> Contributed Plugin Check Commands
is set to the path where the user installs custom plugins and can be enabled by
uncommenting the corresponding line in [icinga2.conf](4-configuring-icinga-2.md#icinga2-conf):
- include <plugin-contrib>
+```
+vim /etc/icinga2/icinga2.conf
+
+include <plugin-contrib>
+```
+
+This is enabled by default since Icinga 2 2.5.0.
### <a id="plugin-contrib-databases"></a> Databases
-All database plugins go in this category.
+This category contains plugins for various database servers.
#### <a id="plugin-contrib-command-db2_health"></a> db2_health
-The plugin `db2_health` utilises Perl DBD::DB2.
-For release tarballs and detailed documentation especially on the different modes and required permissions see [https://labs.consol.de](https://labs.consol.de/nagios/check_db2_health/). For development check [https://github.com](https://github.com/lausser/check_db2_health).
+The [check_db2_health](https://labs.consol.de/nagios/check_db2_health/) plugin
+uses the `DBD::DB2` Perl library to monitor a [DB2](https://www.ibm.com/support/knowledgecenter/SSEPGG_11.1.0/)
+database.
+
+The Git repository is located on [GitHub](https://github.com/lausser/check_db2_health).
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
db2_health_maxinactivity | **Optional.** Used for the maximum amount of time a certain event has not happened.
db2_health_mitigation | **Optional.** Classifies the severity of an offline tablespace.
db2_health_lookback | **Optional.** How many days in the past db2_health check should look back to calculate exitcode.
+db2_health_report | **Optional.** Report can be used to output only the bad news. Possible values are "short", "long", "html". Defaults to `short`.
db2_health_env_db2_home | **Required.** Specifies the location of the db2 client libraries as environment variable `DB2_HOME`. Defaults to "/opt/ibm/db2/V10.5".
db2_health_env_db2_version | **Optional.** Specifies the DB2 version as environment variable `DB2_VERSION`.
#### <a id="plugin-contrib-command-mssql_health"></a> mssql_health
-The plugin `mssql_health` utilises Perl DBD::Sybase based on FreeTDS to connect to MSSQL databases for monitoring.
-For release tarballs, detailed documentation especially on the different modes and scripts for creating a monitoring user see [https://labs.consol.de](https://labs.consol.de/nagios/check_mssql_health/). For development check [https://github.com](https://github.com/lausser/check_mssql_health).
+The [check_mssql_health](https://labs.consol.de/nagios/check_mssql_health/index.html) plugin
+uses the `DBD::Sybase` Perl library based on [FreeTDS](http://www.freetds.org/) to monitor a
+[MS SQL](https://www.microsoft.com/en-us/sql-server/) server.
+
+The Git repository is located on [GitHub](https://github.com/lausser/check_mssql_health).
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
mssql_health_units | **Optional.** This is used for a better output of mode=sql and for specifying thresholds for mode=tablespace-free. Possible values are "%", "KB", "MB" and "GB".
mssql_health_offlineok | **Optional.** Set this to true if offline databases are perfectly ok for you. Defaults to false.
mssql_health_commit | **Optional.** Set this to true to turn on autocommit for the dbd::sybase module. Defaults to false.
+mssql_health_notemp | **Optional.** Set this to true to ignore temporary databases/tablespaces. Defaults to false.
+mssql_health_nooffline | **Optional.** Set this to true to ignore offline databases. Defaults to false.
+mssql_health_lookback | **Optional.** The amount of time you want to look back when calculating average rates.
+mssql_health_report | **Optional.** Report can be used to output only the bad news. Possible values are "short", "long", "html". Defaults to `short`.
#### <a id="plugin-contrib-command-mysql_health"></a> mysql_health
-The plugin `mysql_health` utilises Perl DBD::MySQL to connect to MySQL databases for monitoring.
-For release tarballs and detailed documentation especially on the different modes and required permissions see [https://labs.consol.de](https://labs.consol.de/nagios/check_mysql_health/). For development check [https://github.com](https://github.com/lausser/check_mysql_health).
+The [check_mysql_health](https://labs.consol.de/nagios/check_mysql_health/index.html) plugin
+uses the `DBD::MySQL` Perl library to monitor a
+[MySQL](https://dev.mysql.com/downloads/mysql/) or [MariaDB](https://mariadb.org/about/) database.
+
+The Git repository is located on [GitHub](https://github.com/lausser/check_mysql_health).
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
#### <a id="plugin-contrib-command-oracle_health"></a> oracle_health
-The plugin `oracle_health` utilises Perl DBD::Oracle based on oracle-instantclient-sdk or sqlplus to connect to Oracle databases for monitoring.
-For release tarballs and detailed documentation especially on the different modes and required permissions see [https://labs.consol.de](https://labs.consol.de/nagios/check_oracle_health/). For development check [https://github.com](https://github.com/lausser/check_oracle_health).
+The [check_oracle_health](https://labs.consol.de/nagios/check_oracle_health/index.html) plugin
+uses the `DBD::Oracle` Perl library to monitor an [Oracle](https://www.oracle.com/database/) database.
+
+The Git repository is located on [GitHub](https://github.com/lausser/check_oracle_health).
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
#### <a id="plugin-contrib-command-postgres"></a> postgres
-The plugin `postgres` utilises the psql binary to connect to PostgreSQL databases for monitoring.
-For release tarballs and detailed documentation especially the different actions and required persmissions see [https://bucardo.org/wiki/Check_postgres](https://bucardo.org/wiki/Check_postgres). For development check [https://github.com](https://github.com/bucardo/check_postgres).
+The [check_postgres](https://bucardo.org/wiki/Check_postgres) plugin
+uses the `psql` binary to monitor a [PostgreSQL](https://www.postgresql.org/about/) database.
+
+The Git repository is located on [GitHub](https://github.com/bucardo/check_postgres).
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
postgres_query | **Optional.** Query for "custom_query" action.
postgres_valtype | **Optional.** Value type of query result for "custom_query".
postgres_reverse | **Optional.** If "postgres_reverse" is set, warning and critical values are reversed for "custom_query" action.
-postgres_tempdir | **Optional.** Specify directory for temporary files. The default directory is dependent on the OS. More details [here](http://perldoc.perl.org/File/Spec.html).
+postgres_tempdir | **Optional.** Specify directory for temporary files. The default directory is dependent on the OS. More details [here](https://perldoc.perl.org/File/Spec.html).
#### <a id="plugin-contrib-command-mongodb"></a> mongodb
-The plugin `mongodb` utilises Python PyMongo.
-For development check [https://github.com](https://github.com/mzupan/nagios-plugin-mongodb).
+The [check_mongodb.py](https://github.com/mzupan/nagios-plugin-mongodb) plugin
+uses the `pymongo` Python library to monitor a [MongoDB](https://docs.mongodb.com/manual/) instance.
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
#### <a id="plugin-contrib-command-elasticsearch"></a> elasticsearch
-An [ElasticSearch](https://www.elastic.co/products/elasticsearch) availability
-and performance monitoring plugin available for download at [GitHub](https://github.com/anchor/nagios-plugin-elasticsearch).
-The plugin requires the HTTP API enabled on your ElasticSearch node.
+The [check_elasticsearch](https://github.com/anchor/nagios-plugin-elasticsearch) plugin
+uses the HTTP API to monitor an [Elasticsearch](https://www.elastic.co/products/elasticsearch) node.
+
+Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
Name | Description
-----------------------------|-------------------------------------------------------------------------------------------------------
#### <a id="plugin-contrib-command-redis"></a> redis
-The plugin `redis` can measure response time, hitrate, memory utilization, check replication sync and more. It can also test data in a specified key (if necessary, doing average or sum on range).
-It is provided by `William Leibzon` at [https://github.com](https://github.com/willixix/naglio-plugins/blob/master/check_redis.pl).
+The [check_redis.pl](https://github.com/willixix/naglio-plugins/blob/master/check_redis.pl) plugin
+uses the `Redis` Perl library to monitor a [Redis](https://redis.io/) instance. The plugin can
+measure response time, hitrate, memory utilization, check replication synchronization, etc. It is
+also possible to test data in a specified key and calculate averages or summaries on ranges.
+
+Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
Name | Description
-------------------------|--------------------------------------------------------------------------------------------------------------
### <a id="plugin-contrib-hardware"></a> Hardware
-This category includes all plugins for various hardware checks.
+This category includes all plugin check commands for various hardware checks.
#### <a id="plugin-contrib-command-hpasm"></a> hpasm
-The plugin [check_hpasm](https://labs.consol.de/de/nagios/check_hpasm/index.html) is a plugin to monitor HP hardware through the HP Insight Agent via SNMP.
+The [check_hpasm](https://labs.consol.de/de/nagios/check_hpasm/index.html) plugin
+monitors the hardware health of HP Proliant Servers, provided that the `hpasm`
+(HP Advanced Server Management) software is installed. It is also able to monitor
+the system health of HP Bladesystems and storage systems.
+
+The plugin can run in two different ways:
+
+1. Local execution using the `hpasmcli` command line tool.
+2. Remote SNMP query which invokes the HP Insight Tools on the remote node.
+
+You can either set or omit `hpasm_hostname` custom attribute and select the corresponding node.
+
+The `hpasm_remote` attribute enables the plugin to execute remote SNMP queries if set to `true`.
+For compatibility reasons this attribute uses `true` as default value, and ensures that
+specifying the `hpasm_hostname` always enables remote checks.
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
hpasm_privprotocol | **Optional.** The private protocol for SNMPv3 (des\|aes\|aes128\|3des\|3desde).
hpasm_servertype | **Optional.** The type of the server: proliant (default) or bladesystem.
hpasm_eval-nics | **Optional.** Check network interfaces (and groups). Try it and report me whyt you think about it. I need to build up some know how on this subject. If you get an error and think, it is not justified for your configuration, please tell me about it. (alwasy send the output of "snmpwalk -On .... 1.3.6.1.4.1.232" and a description how you setup your nics and why it is correct opposed to the plugins error message.
+hpasm_remote | **Optional.** Run remote SNMP checks if enabled. Otherwise checks are executed locally using the `hpasmcli` binary. Defaults to `true`.
+
+#### <a id="plugin-contrib-command-adaptec-raid"></a> adaptec-raid
+
+The [check_adaptec_raid](https://github.com/thomas-krenn/check_adaptec_raid) plugin
+uses the `arcconf` binary to monitor Adaptec RAID controllers.
+
+Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------------|-----------------------------------------------------------------------
+adaptec_controller_number | **Required.** Controller number to monitor.
+arcconf_path | **Required.** Path to the `arcconf` binary, e.g. "/sbin/arcconf".
+
+#### <a id="plugin-contrib-command-lsi-raid"></a> lsi-raid
+
+The [check_lsi_raid](https://github.com/thomas-krenn/check_lsi_raid) plugin
+uses the `storcli` binary to monitor MegaRAID RAID controllers.
+
+Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------------|-----------------------------------------------------------------------
+lsi_controller_number | **Required.** Controller number to monitor.
+storcli_path | **Required.** Path to the `storcli` binary, e.g. "/usr/sbin/storcli".
+
+#### <a id="plugin-contrib-command-smart-attributes"></a> smart-attributes
+
+The [check_smart_attributes](https://github.com/thomas-krenn/check_smart_attributes) plugin
+uses the `smartctl` binary to monitor SMART values of SSDs and HDDs.
+
+Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------------|-----------------------------------------------------------------------
+smart_attributes_config_path | **Required.** Path to the smart attributes config file (e.g. check_smartdb.json).
+smart_attributes_device | **Required.** Device name (e.g. /dev/sda) to monitor.
### <a id="plugin-contrib-icingacli"></a> IcingaCLI
#### <a id="plugin-contrib-icingacli-businessprocess"></a> Business Process
-This subcommand is provided by the [business process module](https://exchange.icinga.com/icinga/Business+Process) and executed as `icingacli-businessprocess`. The module is hosted by the Icinga project on its [project homepage](https://github.com/Icinga/icingaweb2-module-businessprocess).
+This subcommand is provided by the [business process module](https://exchange.icinga.com/icinga/Business+Process)
+and executed as `icingacli businessprocess` CLI command.
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
------------------------------------------|-----------------------------------------------------------------------------------------
icingacli_businessprocess_process | **Required.** Business process to monitor.
icingacli_businessprocess_config | **Optional.** Configuration file containing your business process without file extension.
-icingacli_businessprocess_details | **Optional.** Get details for root cause analyses. Defaults to false.
+icingacli_businessprocess_details | **Optional.** Get details for root cause analysis. Defaults to false.
+icingacli_businessprocess_statetype | **Optional.** Define which state type to look at, `soft` or `hard`. Overrides the default value inside the businessprocess module, if configured.
### <a id="plugin-contrib-ipmi"></a> IPMI Devices
#### <a id="plugin-contrib-command-ipmi-sensor"></a> ipmi-sensor
-With the plugin `ipmi-sensor` provided by <a href="https://www.thomas-krenn.com/">Thomas-Krenn.AG</a> you can monitor sensor data for IPMI devices. See https://www.thomas-krenn.com/en/wiki/IPMI_Sensor_Monitoring_Plugin for installation and configuration instructions.
+The [check_ipmi_sensor](https://github.com/thomas-krenn/check_ipmi_sensor_v3) plugin
+uses the `ipmimonitoring` binary to monitor sensor data for IPMI devices. Please
+read the [documentation](https://www.thomas-krenn.com/en/wiki/IPMI_Sensor_Monitoring_Plugin)
+for installation and configuration details.
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
ipmi_exclude_sensor | **Optional.** Exclude sensor based on IPMI sensor type. (Comma-separated)
ipmi_exclude_sel | **Optional.** Exclude SEL entries of specific sensor types. (comma-separated list).
ipmi_sensor_id | **Optional.** Include sensor matching ipmi_sensor_id.
-ipmi_protocal_lan_version | **Optional.** Change the protocol LAN version. Defaults to "LAN_2_0".
+ipmi_protocol_lan_version | **Optional.** Change the protocol LAN version. Defaults to "LAN_2_0".
ipmi_number_of_active_fans | **Optional.** Number of fans that should be active. Otherwise a WARNING state is returned.
ipmi_show_fru | **Optional.** Print the product serial number if it is available in the IPMI FRU data.
ipmi_no_sel_checking | **Optional.** Turn off system event log checking via ipmi-sel.
ipmi_verbose | **Optional.** Be Verbose multi line output, also with additional details for warnings.
ipmi_debug | **Optional.** Be Verbose debugging output, followed by normal multi line output.
+#### <a id="plugin-contrib-command-ipmi-alive"></a> ipmi-alive
+
+The `ipmi-alive` check commands allows you to create a ping check for the IPMI Interface.
+Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------------------|-----------------------------------------------------------------------------------------------------
+ping_address | **Optional.** The address of the IPMI interface. Defaults to "$address$" if the IPMI interface's `address` attribute is set, "$address6$" otherwise.
+ping_wrta | **Optional.** The RTA warning threshold in milliseconds. Defaults to 5000.
+ping_wpl | **Optional.** The packet loss warning threshold in %. Defaults to 100.
+ping_crta | **Optional.** The RTA critical threshold in milliseconds. Defaults to 5000.
+ping_cpl | **Optional.** The packet loss critical threshold in %. Defaults to 100.
+ping_packets | **Optional.** The number of packets to send. Defaults to 1.
+ping_timeout | **Optional.** The plugin timeout in seconds. Defaults to 0 (no timeout).
### <a id="plugins-contrib-log-management"></a> Log Management
#### <a id="plugin-contrib-command-graphite"></a> graphite
-Check command object for the [check_graphite](https://github.com/obfuscurity/nagios-scripts) plugin.
+The [check_graphite](https://github.com/obfuscurity/nagios-scripts) plugin
+uses the `rest-client` Ruby library to monitor a [Graphite](https://graphiteapp.org) instance.
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
#### <a id="plugin-contrib-command-interfacetable"></a> interfacetable
-The plugin `interfacetable` generates a html page containing information about the monitored node and all of its interfaces. The actively developed and maintained version is `interfacetable_v3t` provided by `Yannick Charton` on [http://www.tontonitch.com](http://www.tontonitch.com/tiki/tiki-index.php?page=Nagios+plugins+-+interfacetable_v3t) or [https://github.com](https://github.com/Tontonitch/interfacetable_v3t).
+The [check_interfacetable_v3t](http://www.tontonitch.com/tiki/tiki-index.php?page=Nagios+plugins+-+interfacetable_v3t) plugin
+generates a html page containing information about the monitored node and all of its interfaces.
+
+The Git repository is located on [GitHub](https://github.com/Tontonitch/interfacetable_v3t).
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
#### <a id="plugin-contrib-command-iftraffic"></a> iftraffic
-The plugin [check_iftraffic](https://exchange.icinga.com/exchange/iftraffic)
+The [check_iftraffic](https://exchange.icinga.com/exchange/iftraffic) plugin
checks the utilization of a given interface name using the SNMP protocol.
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
#### <a id="plugin-contrib-command-iftraffic64"></a> iftraffic64
-The plugin [check_iftraffic64](https://exchange.icinga.com/exchange/iftraffic64)
+The [check_iftraffic64](https://exchange.icinga.com/exchange/iftraffic64) plugin
checks the utilization of a given interface name using the SNMP protocol.
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
#### <a id="plugin-contrib-command-interfaces"></a> interfaces
-The plugin [check_interfaces](https://www.netways.org/projects/check-interfaces)
-Check interfaces and utilization.
+The [check_interfaces](https://git.netways.org/plugins/check_interfaces) plugin
+uses SNMP to monitor network interfaces and their utilization.
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
#### <a id="plugin-contrib-command-nwc_health"></a> nwc_health
-The plugin [check_nwc_health](https://labs.consol.de/de/nagios/check_nwc_health/index.html)
-Check switches, router, there interfaces and utilization.
+The [check_nwc_health](https://labs.consol.de/de/nagios/check_nwc_health/index.html) plugin
+uses SNMP to monitor network components. The plugin is able to generate interface statistics,
+check hardware (CPU, memory, fan, power, etc.), monitor firewall policies, HRSP, load-balancer
+pools, processor and memory usage.
+
+Currently the following network components are supported: Cisco IOS, Cisco Nexus, Cisco ASA,
+Cisco PIX, F5 BIG-IP, CheckPoint Firewall1, Juniper NetScreen, HP Procurve, Nortel, Brocade 4100/4900,
+EMC DS 4700, EMC DS 24, Allied Telesyn. Blue Coat SG600, Cisco Wireless Lan Controller 5500,
+Brocade ICX6610-24-HPOE, Cisco UC Telefonzeugs, FOUNDRY-SN-AGENT-MIB, FRITZ!BOX 7390, FRITZ!DECT 200,
+Juniper IVE, Pulse-Gateway MAG4610, Cisco IronPort AsyncOS, Foundry, etc. A complete list can be
+found in the plugin [documentation](https://labs.consol.de/nagios/check_nwc_health/index.html).
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
nwc_health_units | **Optional.** One of %, B, KB, MB, GB, Bit, KBi, MBi, GBi. (used for e.g. mode interface-usage)
nwc_health_name2 | **Optional.** The secondary name of a component.
nwc_health_role | **Optional.** The role of this device in a hsrp group (active/standby/listen).
-nwc_health_report | **Optional.** Can be used to shorten the output.
+nwc_health_report | **Optional.** Can be used to shorten the output. Possible values are: 'long' (default), 'short' (to shorten if available), or 'html' (to produce some html outputs if available)
nwc_health_lookback | **Optional.** The amount of time you want to look back when calculating average rates. Use it for mode interface-errors or interface-usage. Without --lookback the time between two runs of check_nwc_health is the base for calculations. If you want your checkresult to be based for example on the past hour, use --lookback 3600.
nwc_health_warning | **Optional.** The warning threshold
nwc_health_critical | **Optional.** The critical threshold
nwc_health_warningx | **Optional.** The extended warning thresholds
nwc_health_criticalx | **Optional.** The extended critical thresholds
-nwc_health_mitigation | **Optional.** The parameter allows you to change a critical error to a warning.
+nwc_health_mitigation | **Optional.** The parameter allows you to change a critical error to a warning (1) or ok (0).
nwc_health_selectedperfdata | **Optional.** The parameter allows you to limit the list of performance data. It's a perl regexp. Only matching perfdata show up in the output.
nwc_health_morphperfdata | **Optional.** The parameter allows you to change performance data labels. It's a perl regexp and a substitution. --morphperfdata '(.*)ISATAP(.*)'='$1patasi$2'
nwc_health_negate | **Optional.** The parameter allows you to map exit levels, such as warning=critical.
### <a id="plugin-contrib-operating-system"></a> Operating System
-In this category you can find plugins for gathering information about your operating system or the system beneath like memory usage.
+This category contains plugins which receive details about your operating system
+or the guest system.
#### <a id="plugin-contrib-command-mem"></a> mem
-The plugin `mem` is used for gathering information about memory usage on linux and unix hosts. It is able to count cache memory as free when comparing it to the thresholds. It is provided by `Justin Ellison` on [https://github.com](https://github.com/justintime/nagios-plugins). For more details see the developers blog [http://sysadminsjourney.com](http://sysadminsjourney.com/content/2009/06/04/new-and-improved-checkmempl-nagios-plugin).
+The [check_mem.pl](https://github.com/justintime/nagios-plugins) plugin checks the
+memory usage on linux and unix hosts. It is able to count cache memory as free when
+compared to thresholds. More details can be found on [this blog entry]((http://sysadminsjourney.com/content/2009/06/04/new-and-improved-checkmempl-nagios-plugin).
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
#### <a id="plugin-contrib-command-running_kernel"></a> running_kernel
-Check command object for the `check_running_kernel` plugin
-provided by the `nagios-plugin-contrib` package on Debian.
+The [check_running_kernel](https://packages.debian.org/stretch/nagios-plugins-contrib) plugin
+is provided by the `nagios-plugin-contrib` package on Debian/Ubuntu.
Custom attributes:
#### <a id="plugin-contrib-command-iostats"></a> iostats
-The plugin [check_iostats](https://github.com/dnsmichi/icinga-plugins/blob/master/scripts/check_iostats) is used to monitor I/O with `iostats` on a Linux host. The default thresholds are rather high so you can use a grapher for baselining before setting your own.
+The [check_iostats](https://github.com/dnsmichi/icinga-plugins/blob/master/scripts/check_iostats) plugin
+uses the `iostat` binary to monitor I/O on a Linux host. The default thresholds are rather high
+so you can use a grapher for baselining before setting your own.
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
#### <a id="plugin-contrib-command-iostat"></a> iostat
-The plugin [check_iostat](https://github.com/dnsmichi/icinga-plugins/blob/master/scripts/check_iostat) is used to monitor I/O with `iostat` on a Linux host. The default thresholds are rather high so you can use a grapher for baselining before setting your own.
+The [check_iostat](https://github.com/dnsmichi/icinga-plugins/blob/master/scripts/check_iostat) plugin
+uses the `iostat` binary to monitor disk I/O on a Linux host. The default thresholds are rather high
+so you can use a grapher for baselining before setting your own.
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
#### <a id="plugin-contrib-command-yum"></a> yum
-The plugin [check_yum](https://github.com/calestyo/check_yum) is used to check the YUM package
+The [check_yum](https://github.com/calestyo/check_yum) plugin checks the YUM package
management system for package updates.
The plugin requires the `yum-plugin-security` package to differentiate between security and normal updates.
#### <a id="plugins-contrib-command-glusterfs"></a> glusterfs
-The plugin [glusterfs](https://www.unixadm.org/software/nagios-stuff/checks/check_glusterfs) is used to check the GlusterFS storage health on the server.
+The [glusterfs](https://www.unixadm.org/software/nagios-stuff/checks/check_glusterfs) plugin
+is used to check the GlusterFS storage health on the server.
The plugin requires `sudo` permissions.
+Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
+
Name | Description
---------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
glusterfs_perfdata | **Optional.** Print perfdata of all or the specified volume.
#### <a id="plugin-contrib-command-esxi-hardware"></a> esxi_hardware
-The plugin `esxi_hardware` is a plugin to monitor hardware of ESXi servers through the vmware api and cim service. It is provided by `Claudio Kuenzler` on [http://www.claudiokuenzler.com](http://www.claudiokuenzler.com/nagios-plugins/check_esxi_hardware.php). For instruction on creating the required local user and workarounds for some hardware types have a look on his homepage.
+The [check_esxi_hardware.py](https://www.claudiokuenzler.com/nagios-plugins/check_esxi_hardware.php) plugin
+uses the [pywbem](https://pywbem.github.io/pywbem/) Python library to monitor the hardware of ESXi servers
+through the [VMWare API](https://www.vmware.com/support/pubs/sdk_pubs.html) and CIM service.
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
Check commands for the [check_vmware_esx](https://github.com/BaldMansMojo/check_vmware_esx) plugin.
-##### <a id="plugin-contrib-vmware-esx-dc-volumes"></a> vmware-esx-dc-volumes
+**vmware-esx-dc-volumes**
Check command object for the `check_vmware_esx` plugin. Shows all datastore volumes info.
vmware_crit | **Optional.** The critical threshold for volumes. Defaults to "90%".
-##### <a id="plugin-contrib-vmware-esx-dc-runtime-info"></a> vmware-esx-dc-runtime-info
+**vmware-esx-dc-runtime-info**
Check command object for the `check_vmware_esx` plugin. Shows all runtime info for the datacenter/Vcenter.
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-dc-runtime-listvms"></a> vmware-esx-dc-runtime-listvms
+**vmware-esx-dc-runtime-listvms**
Check command object for the `check_vmware_esx` plugin. List of vmware machines and their power state. BEWARE!! In larger environments systems can cause trouble displaying the informations needed due to the mass of data. Use **vmware_alertonly** to avoid this.
vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-dc-runtime-listhost"></a> vmware-esx-dc-runtime-listhost
+**vmware-esx-dc-runtime-listhost**
Check command object for the `check_vmware_esx` plugin. List of VMware ESX hosts and their power state.
vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-dc-runtime-listcluster"></a> vmware-esx-dc-runtime-listcluster
+**vmware-esx-dc-runtime-listcluster**
Check command object for the `check_vmware_esx` plugin. List of VMware clusters and their states.
vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-dc-runtime-issues"></a> vmware-esx-dc-runtime-issues
+**vmware-esx-dc-runtime-issues**
Check command object for the `check_vmware_esx` plugin. All issues for the host.
vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-dc-runtime-status"></a> vmware-esx-dc-runtime-status
+**vmware-esx-dc-runtime-status**
Check command object for the `check_vmware_esx` plugin. Overall object status (gray/green/red/yellow).
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-dc-runtime-tools"></a> vmware-esx-dc-runtime-tools
+**vmware-esx-dc-runtime-tools**
Check command object for the `check_vmware_esx` plugin. Vmware Tools status.
vmware_openvmtools | **Optional** Prevent CRITICAL state for installed and running Open VM Tools.
-##### <a id="plugin-contrib-vmware-esx-soap-host-check"></a> vmware-esx-soap-host-check
+**vmware-esx-soap-host-check**
Check command object for the `check_vmware_esx` plugin. Simple check to verify a successfull connection to VMware SOAP API.
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-soap-host-uptime"></a> vmware-esx-soap-host-uptime
+**vmware-esx-soap-host-uptime**
Check command object for the `check_vmware_esx` plugin. Displays uptime of the VMware host.
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-soap-host-cpu"></a> vmware-esx-soap-host-cpu
+**vmware-esx-soap-host-cpu**
Check command object for the `check_vmware_esx` plugin. CPU usage in percentage.
vmware_crit | **Optional.** The critical threshold in percent. Defaults to "90%".
-##### <a id="plugin-contrib-vmware-esx-soap-host-cpu-ready"></a> vmware-esx-soap-host-cpu-ready
+**vmware-esx-soap-host-cpu-ready**
Check command object for the `check_vmware_esx` plugin. Percentage of time that the virtual machine was ready, but could not get scheduled to run on the physical CPU. CPU ready time is dependent on the number of virtual machines on the host and their CPU loads. High or growing ready time can be a hint CPU bottlenecks.
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-soap-host-cpu-wait"></a> vmware-esx-soap-host-cpu-wait
+**vmware-esx-soap-host-cpu-wait**
Check command object for the `check_vmware_esx` plugin. CPU time spent in wait state. The wait total includes time spent the CPU idle, CPU swap wait, and CPU I/O wait states. High or growing wait time can be a hint I/O bottlenecks.
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-soap-host-cpu-usage"></a> vmware-esx-soap-host-cpu-usage
+**vmware-esx-soap-host-cpu-usage**
Check command object for the `check_vmware_esx` plugin. Actively used CPU of the host, as a percentage of the total available CPU. Active CPU is approximately equal to the ratio of the used CPU to the available CPU.
vmware_crit | **Optional.** The critical threshold in percent. Defaults to "90%".
-##### <a id="plugin-contrib-vmware-esx-soap-host-mem"></a> vmware-esx-soap-host-mem
+**vmware-esx-soap-host-mem**
Check command object for the `check_vmware_esx` plugin. All mem info(except overall and no thresholds).
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-soap-host-mem-usage"></a> vmware-esx-soap-host-mem-usage
+**vmware-esx-soap-host-mem-usage**
Check command object for the `check_vmware_esx` plugin. Average mem usage in percentage.
vmware_crit | **Optional.** The critical threshold in percent. Defaults to "90%".
-##### <a id="plugin-contrib-vmware-esx-soap-host-mem-consumed"></a> vmware-esx-soap-host-mem-consumed
+**vmware-esx-soap-host-mem-consumed**
Check command object for the `check_vmware_esx` plugin. Amount of machine memory used on the host. Consumed memory includes Includes memory used by the Service Console, the VMkernel vSphere services, plus the total consumed metrics for all running virtual machines in MB.
vmware_crit | **Optional.** The critical threshold in percent. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-mem-swapused"></a> vmware-esx-soap-host-mem-swapused
+**vmware-esx-soap-host-mem-swapused**
Check command object for the `check_vmware_esx` plugin. Amount of memory that is used by swap. Sum of memory swapped of all powered on VMs and vSphere services on the host in MB. In case of an error all VMs with their swap used will be displayed.
vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-mem-overhead"></a> vmware-esx-soap-host-mem-overhead
+**vmware-esx-soap-host-mem-overhead**
Check command object for the `check_vmware_esx` plugin. Additional mem used by VM Server in MB.
vmware_crit | **Optional.** The critical threshold in percent. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-mem-memctl"></a> vmware-esx-soap-host-mem-memctl
+**vmware-esx-soap-host-mem-memctl**
Check command object for the `check_vmware_esx` plugin. The sum of all vmmemctl values in MB for all powered-on virtual machines, plus vSphere services on the host. If the balloon target value is greater than the balloon value, the VMkernel inflates the balloon, causing more virtual machine memory to be reclaimed. If the balloon target value is less than the balloon value, the VMkernel deflates the balloon, which allows the virtual machine to consume additional memory if needed (used by VM memory control driver). In case of an error all VMs with their vmmemctl values will be displayed.
vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-net"></a> vmware-esx-soap-host-net
+**vmware-esx-soap-host-net**
Check command object for the `check_vmware_esx` plugin. Shows net info.
vmware_isregexp | **Optional.** Treat blacklist expression as regexp.
-##### <a id="plugin-contrib-vmware-esx-soap-host-net-usage"></a> vmware-esx-soap-host-net-usage
+**vmware-esx-soap-host-net-usage**
Check command object for the `check_vmware_esx` plugin. Overall network usage in KBps(Kilobytes per Second).
vmware_crit | **Optional.** The critical threshold in KBps(Kilobytes per Second). No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-net-receive"></a> vmware-esx-soap-host-net-receive
+**vmware-esx-soap-host-net-receive**
Check command object for the `check_vmware_esx` plugin. Data receive in KBps(Kilobytes per Second).
vmware_crit | **Optional.** The critical threshold in KBps(Kilobytes per Second). No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-net-send"></a> vmware-esx-soap-host-net-send
+**vmware-esx-soap-host-net-send**
Check command object for the `check_vmware_esx` plugin. Data send in KBps(Kilobytes per Second).
vmware_crit | **Optional.** The critical threshold in KBps(Kilobytes per Second). No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-net-nic"></a> vmware-esx-soap-host-net-nic
+**vmware-esx-soap-host-net-nic**
Check command object for the `check_vmware_esx` plugin. Check all active NICs.
vmware_isregexp | **Optional.** Treat blacklist expression as regexp.
-##### <a id="plugin-contrib-vmware-esx-soap-host-volumes"></a> vmware-esx-soap-host-volumes
+**vmware-esx-soap-host-volumes**
Check command object for the `check_vmware_esx` plugin. Shows all datastore volumes info.
vmware_spaceleft | **Optional.** This has to be used in conjunction with thresholds as mentioned above.
-##### <a id="plugin-contrib-vmware-esx-soap-host-io"></a> vmware-esx-soap-host-io
+**vmware-esx-soap-host-io**
Check command object for the `check_vmware_esx` plugin. Shows all disk io info. Without subselect no thresholds can be given. All I/O values are aggregated from historical intervals over the past 24 hours with a 5 minute sample rate.
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-soap-host-io-aborted"></a> vmware-esx-soap-host-io-aborted
+**vmware-esx-soap-host-io-aborted**
Check command object for the `check_vmware_esx` plugin. Number of aborted SCSI commands.
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-io-resets"></a> vmware-esx-soap-host-io-resets
+**vmware-esx-soap-host-io-resets**
Check command object for the `check_vmware_esx` plugin. Number of SCSI bus resets.
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-io-read"></a> vmware-esx-soap-host-io-read
+**vmware-esx-soap-host-io-read**
Check command object for the `check_vmware_esx` plugin. Average number of kilobytes read from the disk each second.
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-io-read-latency"></a> vmware-esx-soap-host-io-read-latency
+**vmware-esx-soap-host-io-read-latency**
Check command object for the `check_vmware_esx` plugin. Average amount of time (ms) to process a SCSI read command issued from the Guest OS to the virtual machine.
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-io-write"></a> vmware-esx-soap-host-io-write
+**vmware-esx-soap-host-io-write**
Check command object for the `check_vmware_esx` plugin. Average number of kilobytes written to disk each second.
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-io-write-latency"></a> vmware-esx-soap-host-io-write-latency
+**vmware-esx-soap-host-io-write-latency**
Check command object for the `check_vmware_esx` plugin. Average amount of time (ms) taken to process a SCSI write command issued by the Guest OS to the virtual machine.
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-io-usage"></a> vmware-esx-soap-host-io-usage
+**vmware-esx-soap-host-io-usage**
Check command object for the `check_vmware_esx` plugin. Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host.
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-io-kernel-latency"></a> vmware-esx-soap-host-io-kernel-latency
+**vmware-esx-soap-host-io-kernel-latency**
Check command object for the `check_vmware_esx` plugin. Average amount of time (ms) spent by VMkernel processing each SCSI command.
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-io-device-latency"></a> vmware-esx-soap-host-io-device-latency
+**vmware-esx-soap-host-io-device-latency**
Check command object for the `check_vmware_esx` plugin. Average amount of time (ms) to complete a SCSI command from the physical device.
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-io-queue-latency"></a> vmware-esx-soap-host-io-queue-latency
+**vmware-esx-soap-host-io-queue-latency**
Check command object for the `check_vmware_esx` plugin. Average amount of time (ms) spent in the VMkernel queue.
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-io-total-latency"></a> vmware-esx-soap-host-io-total-latency
+**vmware-esx-soap-host-io-total-latency**
Check command object for the `check_vmware_esx` plugin. Average amount of time (ms) taken during the collection interval to process a SCSI command issued by the guest OS to the virtual machine. The sum of kernelWriteLatency and deviceWriteLatency.
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-media"></a> vmware-esx-soap-host-media
+**vmware-esx-soap-host-media**
Check command object for the `check_vmware_esx` plugin. List vm's with attached host mounted media like cd,dvd or floppy drives. This is important for monitoring because a virtual machine with a mount cd or dvd drive can not be moved to another host.
vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-service"></a> vmware-esx-soap-host-service
+**vmware-esx-soap-host-service**
Check command object for the `check_vmware_esx` plugin. Shows host service info.
vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-runtime"></a> vmware-esx-soap-host-runtime
+**vmware-esx-soap-host-runtime**
Check command object for the `check_vmware_esx` plugin. Shows runtime info: VMs, overall status, connection state, health, storagehealth, temperature and sensor are represented as one value and without thresholds.
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-soap-host-runtime-con"></a> vmware-esx-soap-host-runtime-con
+**vmware-esx-soap-host-runtime-con**
Check command object for the `check_vmware_esx` plugin. Shows connection state.
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-soap-host-runtime-listvms"></a> vmware-esx-soap-host-runtime-listvms
+**vmware-esx-soap-host-runtime-listvms**
Check command object for the `check_vmware_esx` plugin. List of VMware machines and their status.
vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-runtime-status"></a> vmware-esx-soap-host-runtime-status
+**vmware-esx-soap-host-runtime-status**
Check command object for the `check_vmware_esx` plugin. Overall object status (gray/green/red/yellow).
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-soap-host-runtime-health"></a> vmware-esx-soap-host-runtime-health
+**vmware-esx-soap-host-runtime-health**
Check command object for the `check_vmware_esx` plugin. Checks cpu/storage/memory/sensor status.
vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
-##### <a id="plugin-contrib-vmware-esx-soap-host-runtime-health-listsensors"></a> vmware-esx-soap-host-runtime-health-listsensors
+**vmware-esx-soap-host-runtime-health-listsensors**
Check command object for the `check_vmware_esx` plugin. List all available sensors(use for listing purpose only).
vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
-##### <a id="plugin-contrib-vmware-esx-soap-host-runtime-health-nostoragestatus"></a> vmware-esx-soap-host-runtime-health-nostoragestatus
+**vmware-esx-soap-host-runtime-health-nostoragestatus**
Check command object for the `check_vmware_esx` plugin. This is to avoid a double alarm if you use **vmware-esx-soap-host-runtime-health** and **vmware-esx-soap-host-runtime-storagehealth**.
vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
-##### <a id="plugin-contrib-vmware-esx-soap-host-runtime-storagehealth"></a> vmware-esx-soap-host-runtime-storagehealth
+**vmware-esx-soap-host-runtime-storagehealth**
Check command object for the `check_vmware_esx` plugin. Local storage status check.
vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-runtime-temp"></a> vmware-esx-soap-host-runtime-temp
+**vmware-esx-soap-host-runtime-temp**
Check command object for the `check_vmware_esx` plugin. Lists all temperature sensors.
vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-runtime-issues"></a> vmware-esx-soap-host-runtime-issues
+**vmware-esx-soap-host-runtime-issues**
Check command object for the `check_vmware_esx` plugin. Lists all configuration issues for the host.
vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-storage"></a> vmware-esx-soap-host-storage
+**vmware-esx-soap-host-storage**
Check command object for the `check_vmware_esx` plugin. Shows Host storage info.
vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
-##### <a id="plugin-contrib-vmware-esx-soap-host-storage-adapter"></a> vmware-esx-soap-host-storage-adapter
+**vmware-esx-soap-host-storage-adapter**
Check command object for the `check_vmware_esx` plugin. List host bus adapters.
vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-storage-lun"></a> vmware-esx-soap-host-storage-lun
+**vmware-esx-soap-host-storage-lun**
Check command object for the `check_vmware_esx` plugin. List SCSI logical units. The listing will include: LUN, canonical name of the disc, all of displayed name which is not part of the canonical name and status.
vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-host-storage-path"></a> vmware-esx-soap-host-storage-path
+**vmware-esx-soap-host-storage-path**
Check command object for the `check_vmware_esx` plugin. List multipaths and the associated paths.
vmware_include | **Optional.** Whitelist paths. No value defined as default.
vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+vmware_standbyok | **Optional.** For storage systems where a standby multipath is ok and not a warning. Defaults to false.
-##### <a id="plugin-contrib-vmware-esx-soap-vm-cpu"></a> vmware-esx-soap-vm-cpu
+**vmware-esx-soap-vm-cpu**
Check command object for the `check_vmware_esx` plugin. Shows all CPU usage info.
-##### <a id="plugin-contrib-vmware-esx-soap-vm-cpu-ready"></a> vmware-esx-soap-vm-cpu-ready
+**vmware-esx-soap-vm-cpu-ready**
Check command object for the `check_vmware_esx` plugin. Percentage of time that the virtual machine was ready, but could not get scheduled to run on the physical CPU.
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-vm-cpu-wait"></a> vmware-esx-soap-vm-cpu-wait
+**vmware-esx-soap-vm-cpu-wait**
Check command object for the `check_vmware_esx` plugin. CPU time spent in wait state. The wait total includes time spent the CPU idle, CPU swap wait, and CPU I/O wait states. High or growing wait time can be a hint I/O bottlenecks.
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-vm-cpu-usage"></a> vmware-esx-soap-vm-cpu-usage
+**vmware-esx-soap-vm-cpu-usage**
Check command object for the `check_vmware_esx` plugin. Amount of actively used virtual CPU, as a percentage of total available CPU. This is the host's view of the CPU usage, not the guest operating system view. It is the average CPU utilization over all available virtual CPUs in the virtual machine.
vmware_crit | **Optional.** Critical threshold in percent. Defaults to "90%".
-##### <a id="plugin-contrib-vmware-esx-soap-vm-mem"></a> vmware-esx-soap-vm-mem
+**vmware-esx-soap-vm-mem**
Check command object for the `check_vmware_esx` plugin. Shows all memory info, except overall.
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-soap-vm-mem-usage"></a> vmware-esx-soap-vm-mem-usage
+**vmware-esx-soap-vm-mem-usage**
Check command object for the `check_vmware_esx` plugin. Average mem usage in percentage of configured virtual machine "physical" memory.
vmware_crit | **Optional.** Critical threshold in percent. Defaults to "90%".
-##### <a id="plugin-contrib-vmware-esx-soap-vm-mem-consumed"></a> vmware-esx-soap-vm-mem-consumed
+**vmware-esx-soap-vm-mem-consumed**
Check command object for the `check_vmware_esx` plugin. Amount of guest physical memory in MB consumed by the virtual machine for guest memory. Consumed memory does not include overhead memory. It includes shared memory and memory that might be reserved, but not actually used. Use this metric for charge-back purposes.<br>
**vm consumed memory = memory granted -- memory saved**
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-vm-mem-memctl"></a> vmware-esx-soap-vm-mem-memctl
+**vmware-esx-soap-vm-mem-memctl**
Check command object for the `check_vmware_esx` plugin. Amount of guest physical memory that is currently reclaimed from the virtual machine through ballooning. This is the amount of guest physical memory that has been allocated and pinned by the balloon driver.
-##### <a id="plugin-contrib-vmware-esx-soap-vm-net"></a> vmware-esx-soap-vm-net
+**vmware-esx-soap-vm-net**
Check command object for the `check_vmware_esx` plugin. Shows net info.
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-soap-vm-net-usage"></a> vmware-esx-soap-vm-net-usage
+**vmware-esx-soap-vm-net-usage**
Check command object for the `check_vmware_esx` plugin. Overall network usage in KBps(Kilobytes per Second).
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-vm-net-receive"></a> vmware-esx-soap-vm-net-receive
+**vmware-esx-soap-vm-net-receive**
Check command object for the `check_vmware_esx` plugin. Receive in KBps(Kilobytes per Second).
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-vm-net-send"></a> vmware-esx-soap-vm-net-send
+**vmware-esx-soap-vm-net-send**
Check command object for the `check_vmware_esx` plugin. Send in KBps(Kilobytes per Second).
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-vm-io"></a> vmware-esx-soap-vm-io
+**vmware-esx-soap-vm-io**
Check command object for the `check_vmware_esx` plugin. SShows all disk io info. Without subselect no thresholds can be given. All I/O values are aggregated from historical intervals over the past 24 hours with a 5 minute sample rate.
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-soap-vm-io-read"></a> vmware-esx-soap-vm-io-read
+**vmware-esx-soap-vm-io-read**
Check command object for the `check_vmware_esx` plugin. Average number of kilobytes read from the disk each second.
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-vm-io-write"></a> vmware-esx-soap-vm-io-write
+**vmware-esx-soap-vm-io-write**
Check command object for the `check_vmware_esx` plugin. Average number of kilobytes written to disk each second.
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-vm-io-usage"></a> vmware-esx-soap-vm-io-usage
+**vmware-esx-soap-vm-io-usage**
Check command object for the `check_vmware_esx` plugin. Aggregated disk I/O rate.
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-vm-runtime"></a> vmware-esx-soap-vm-runtime
+**vmware-esx-soap-vm-runtime**
Check command object for the `check_vmware_esx` plugin. Shows virtual machine runtime info.
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-soap-vm-runtime-con"></a> vmware-esx-soap-vm-runtime-con
+**vmware-esx-soap-vm-runtime-con**
Check command object for the `check_vmware_esx` plugin. Shows the connection state.
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-soap-vm-runtime-powerstate"></a> vmware-esx-soap-vm-runtime-powerstate
+**vmware-esx-soap-vm-runtime-powerstate**
Check command object for the `check_vmware_esx` plugin. Shows virtual machine power state: poweredOn, poweredOff or suspended.
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-soap-vm-runtime-status"></a> vmware-esx-soap-vm-runtime-status
+**vmware-esx-soap-vm-runtime-status**
Check command object for the `check_vmware_esx` plugin. Overall object status (gray/green/red/yellow).
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-soap-vm-runtime-consoleconnections"></a> vmware-esx-soap-vm-runtime-consoleconnections
+**vmware-esx-soap-vm-runtime-consoleconnections**
Check command object for the `check_vmware_esx` plugin. Console connections to virtual machine.
vmware_crit | **Optional.** The critical threshold. No value defined as default.
-##### <a id="plugin-contrib-vmware-esx-soap-vm-runtime-gueststate"></a> vmware-esx-soap-vm-runtime-gueststate
+**vmware-esx-soap-vm-runtime-gueststate**
Check command object for the `check_vmware_esx` plugin. Guest OS status. Needs VMware Tools installed and running.
vmware_password | **Optional.** The username's password. No value defined as default.
vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Autentication file content:** <br> username=vmuser <br> password=p@ssw0rd
-##### <a id="plugin-contrib-vmware-esx-soap-vm-runtime-tools"></a> vmware-esx-soap-vm-runtime-tools
+**vmware-esx-soap-vm-runtime-tools**
Check command object for the `check_vmware_esx` plugin. Guest OS status. VMware tools status.
vmware_openvmtools | **Optional** Prevent CRITICAL state for installed and running Open VM Tools.
-##### <a id="plugin-contrib-vmware-esx-soap-vm-runtime-issues"></a> vmware-esx-soap-vm-runtime-issues
+**vmware-esx-soap-vm-runtime-issues**
Check command object for the `check_vmware_esx` plugin. All issues for the virtual machine.
This category includes all plugins for web-based checks.
-#### <a id="plugin-contrib-command-webinject"></a> webinject
+#### <a id="plugin-contrib-command-apache_status"></a> apache_status
-Check command object for the [check_webinject](http://www.webinject.org/manual.html) plugin.
+The [check_apache_status.pl](https://github.com/lbetz/check_apache_status) plugin
+uses the [/server-status](https://httpd.apache.org/docs/current/mod/mod_status.html)
+HTTP endpoint to monitor status metrics for the Apache webserver.
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
Name | Description
-------------------------|--------------
-webinject_config_file | **Optional.** There is a configuration file named 'config.xml' that is used to store configuration settings for your project. You can use this to specify which test case files to run and to set some constants and settings to be used by WebInject.
-webinject_output | **Optional.** This option is followed by a directory name or a prefix to prepended to the output files. This is used to specify the location for writing output files (http.log, results.html, and results.xml). If a directory name is supplied (use either an absolute or relative path and make sure to add the trailing slash), all output files are written to this directory. If the trailing slash is ommitted, it is assumed to a prefix and this will be prepended to the output files. You may also use a combination of a directory and prefix.
-webinject_no_output | **Optional.** Suppresses all output to STDOUT except the results summary.
-webinject_timeout | **Optional.** The value [given in seconds] will be compared to the global time elapsed to run all the tests. If the tests have all been successful, but have taken more time than the 'globaltimeout' value, a warning message is sent back to Icinga.
-webinject_report_type | **Optional.** This setting is used to enable output formatting that is compatible for use with specific external programs. The available values you can set this to are: nagios, mrtg, external and standard.
-webinject_testcase_file | **Optional.** When you launch WebInject in console mode, you can optionally supply an argument for a testcase file to run. It will look for this file in the directory that webinject.pl resides in. If no filename is passed from the command line, it will look in config.xml for testcasefile declarations. If no files are specified, it will look for a default file named 'testcases.xml' in the current [webinject] directory. If none of these are found, the engine will stop and give you an error.
+------------------------|----------------------------------------------------------------------------------
+apache_status_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, `address6` otherwise.
+apache_status_port | **Optional.** the http port.
+apache_status_url | **Optional.** URL to use, instead of the default (http://`apache_status_address`/server-status).
+apache_status_ssl | **Optional.** set to use ssl connection
+apache_status_timeout | **Optional.** timeout in seconds
+apache_status_warning | **Optional.** Warning threshold (number of open slots, busy workers and idle workers that will cause a WARNING) like ':20,50,:50'.
+apache_status_critical | **Optional.** Critical threshold (number of open slots, busy workers and idle workers that will cause a CRITICAL) like ':10,25,:20'.
+
+
+### <a id="plugin-check-command-ssl_cert"></a> cert
+
+The [check_ssl_cert](https://github.com/matteocorti/check_ssl_cert) plugin
+uses the openssl binary (and optional curl) to check a X.509 certificate.
+
+Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------|--------------
+ssl_cert_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+ssl_cert_port | **Optional.** TCP port number (default: 443).
+ssl_cert_file | **Optional.** Local file path. Works only if `ssl_cert_address` is set to "localhost".
+ssl_cert_warn | **Optional.** Minimum number of days a certificate has to be valid.
+ssl_cert_critical | **Optional.** Minimum number of days a certificate has to be valid to issue a critical status.
+ssl_cert_cn | **Optional.** Pattern to match the CN of the certificate.
+ssl_cert_issuer | **Optional.** Pattern to match the issuer of the certificate.
+ssl_cert_org | **Optional.** Pattern to match the organization of the certificate.
+ssl_cert_email | **Optional.** Pattern to match the email address contained in the certificate.
+ssl_cert_serial | **Optional.** Pattern to match the serial number.
+ssl_cert_match_host | **Optional.** Match CN with the host name.
+ssl_cert_selfsigned | **Optional.** Allow self-signed certificate.
+ssl_cert_sni | **Optional.** Sets the TLS SNI (Server Name Indication) extension.
+ssl_cert_timeout | **Optional.** Seconds before connection times out (default: 10)
+ssl_cert_protocol | **Optional.** Use the specific protocol {http,smtp,pop3,imap,ftp,xmpp,irc,ldap} (default: http).
+ssl_cert_clientcert | **Optional.** Use client certificate to authenticate.
+ssl_cert_clientpass | **Optional.** Set passphrase for client certificate.
+ssl_cert_rootcert | **Optional.** Root certificate or directory to be used for certficate validation.
+ssl_cert_ignore_signature | **Optional.** Do not check if the certificate was signed with SHA1 od MD5.
+ssl_cert_ssl_version | **Optional.** Force specific SSL version out of {ssl2,ssl3,tls1,tls1_1,tls1_2}.
+ssl_cert_disable_ssl_versions | **Optional.** Disable specific SSL versions out of {ssl2,ssl3,tls1,tls1_1,tls1_2}. Multiple versions can be given as array.
+ssl_cert_cipher | **Optional.** Cipher selection: force {ecdsa,rsa} authentication.
+ssl_cert_ignore_expiration | **Optional.** Ignore expiration date.
+ssl_cert_ignore_ocsp | **Optional.** Do not check revocation with OCSP.
+
#### <a id="plugin-contrib-command-jmx4perl"></a> jmx4perl
-The plugin `jmx4perl` utilizes the api provided by the jolokia web application to query java message beans on an application server. It is part of the perl module provided by Roland Huß on [cpan](http://search.cpan.org/~roland/jmx4perl/) including a detailed [documentation](http://search.cpan.org/~roland/jmx4perl/scripts/check_jmx4perl) containing installation tutorial, security advices und usage examples.
+The [check_jmx4perl](http://search.cpan.org/~roland/jmx4perl/scripts/check_jmx4perl) plugin
+uses the HTTP API exposed by the [Jolokia](https://jolokia.org)
+web application and queries Java message beans on an application server. It is
+part of the `JMX::Jmx4Perl` Perl module which includes detailed
+[documentation](http://search.cpan.org/~roland/jmx4perl/scripts/check_jmx4perl).
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
jmx4perl_server | **Optional.** Symbolic name of server url to use, which needs to be configured in the configuration file.
jmx4perl_check | **Optional.** Name of a check configuration as defined in the configuration file, use array if you need arguments.
-#### <a id="plugin-contrib-command-squid"></a> squid
-Plugin for monitoring [Squid](https://exchange.icinga.com/exchange/check_squid).
+#### <a id="plugin-contrib-command-kdc"></a> kdc
+
+The [check_kdc](https://exchange.nagios.org/directory/Plugins/Security/check_kdc/details) plugin
+uses the Kerberos `kinit` binary to monitor Kerberos 5 KDC by acquiring a ticket.
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
-Name | Description
-------------------------|----------------------------------------------------------------------------------
-squid_hostname | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
-squid_data | **Optional.** Data to fetch (default: Connections) available data: Connections Cache Resources Memory FileDescriptors.
-squid_port | **Optional.** Port number (default: 3128).
-squid_user | **Optional.** WWW user
-squid_password | **Optional.** WWW password
-squid_warning | **Optional.** Warning threshold. See http://nagiosplug.sourceforge.net/developer-guidelines.html#THRESHOLDFORMAT for the threshold format.
-squid_critical | **Optional.** Critical threshold. See http://nagiosplug.sourceforge.net/developer-guidelines.html#THRESHOLDFORMAT for the threshold format.
-squid_client | **Optional.** Path of squidclient (default: /usr/bin/squidclient).
-squid_timeout | **Optional.** Seconds before plugin times out (default: 15).
+Name | Description
+----------------|--------------------------------------------------------------------------
+kdc_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, `address6` otherwise.
+kdc_port | **Optional** Port on which KDC runs (default 88).
+kdc_principal | **Required** Principal name to authenticate as (including realm).
+kdc_keytab | **Required** Keytab file containing principal's key.
#### <a id="plugin-contrib-command-nginx_status"></a> nginx_status
-Plugin for monitoring [nginx_status](https://github.com/regilero/check_nginx_status).
+The [check_nginx_status.pl](https://github.com/regilero/check_nginx_status) plugin
+uses the [/nginx_status](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html)
+HTTP endpoint which provides metrics for monitoring Nginx.
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
-Name | Description
-------------------------|----------------------------------------------------------------------------------
-nginx_status_host_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, `address6` otherwise.
+Name | Description
+--------------------------------|----------------------------------------------------------------------------------
+nginx_status_host_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, `address6` otherwise.
nginx_status_port | **Optional.** the http port.
nginx_status_url | **Optional.** URL to use, instead of the default (http://`nginx_status_hostname`/nginx_status).
nginx_status_servername | **Optional.** ServerName to use if you specified an IP to match the good Virtualhost in your target
nginx_status_critical | **Optional.** Critical threshold (number of active connections, ReqPerSec or ConnPerSec that will cause a CRITICAL) like '20000,200,300'.
-#### <a id="plugin-contrib-command-apache_status"></a> apache_status
+#### <a id="plugin-contrib-command-rbl"></a> rbl
-Plugin for monitoring [apache_status](https://github.com/lbetz/check_apache_status).
+The [check_rbl](https://github.com/matteocorti/check_rbl) plugin
+uses the `Net::DNS` Perl library to check whether your SMTP server
+is blacklisted.
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
-Name | Description
-------------------------|----------------------------------------------------------------------------------
-apache_status_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, `address6` otherwise.
-apache_status_port | **Optional.** the http port.
-apache_status_url | **Optional.** URL to use, instead of the default (http://`apache_status_address`/server-status).
-apache_status_ssl | **Optional.** set to use ssl connection
-apache_status_timeout | **Optional.** timeout in seconds
-apache_status_warning | **Optional.** Warning threshold (number of open slots, busy workers and idle workers that will cause a WARNING) like ':20,50,:50'.
-apache_status_critical | **Optional.** Critical threshold (number of open slots, busy workers and idle workers that will cause a CRITICAL) like ':10,25,:20'.
+Name | Description
+----------------|--------------------------------------------------------------------------
+rbl_hostname | **Optional.** The address or name of the SMTP server to check. Defaults to "$address$" if the host's `address` attribute is set, `address6` otherwise.
+rbl_server | **Required** List of RBL servers as an array.
+rbl_warning | **Optional** Number of blacklisting servers for a warning.
+rbl_critical | **Optional** Number of blacklisting servers for a critical.
+tbl_timeout | **Optional** Seconds before plugin times out (default: 15).
-#### <a id="plugin-contrib-command-kdc"></a> kdc
+#### <a id="plugin-contrib-command-squid"></a> squid
-Plugin for monitoring [kdc](https://exchange.nagios.org/directory/Plugins/Security/check_kdc/details).
+The [check_squid](https://exchange.icinga.com/exchange/check_squid) plugin
+uses the `squidclient` binary to monitor a [Squid proxy](http://www.squid-cache.org).
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
-Name | Description
-----------------|--------------------------------------------------------------------------
-kdc_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, `address6` otherwise.
-kdc_port | **Optional** Port on which KDC runs (default 88).
-kdc_principal | **Required** Principal name to authenticate as (including realm).
-kdc_keytab | **Required** Keytab file containing principal's key.
+Name | Description
+------------------------|----------------------------------------------------------------------------------
+squid_hostname | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+squid_data | **Optional.** Data to fetch (default: Connections) available data: Connections Cache Resources Memory FileDescriptors.
+squid_port | **Optional.** Port number (default: 3128).
+squid_user | **Optional.** WWW user
+squid_password | **Optional.** WWW password
+squid_warning | **Optional.** Warning threshold. See http://nagiosplug.sourceforge.net/developer-guidelines.html#THRESHOLDFORMAT for the threshold format.
+squid_critical | **Optional.** Critical threshold. See http://nagiosplug.sourceforge.net/developer-guidelines.html#THRESHOLDFORMAT for the threshold format.
+squid_client | **Optional.** Path of squidclient (default: /usr/bin/squidclient).
+squid_timeout | **Optional.** Seconds before plugin times out (default: 15).
-#### <a id="plugin-contrib-command-rbl"></a> rbl
+#### <a id="plugin-contrib-command-webinject"></a> webinject
-Plugin for monitoring [rbl](https://github.com/matteocorti/check_rbl)
+The [check_webinject](https://labs.consol.de/de/nagios/check_webinject/index.html) plugin
+uses [WebInject](http://www.webinject.org/manual.html) to test web applications
+and web services in an automated fashion.
+It can be used to test individual system components that have HTTP interfaces
+(JSP, ASP, CGI, PHP, AJAX, Servlets, HTML Forms, XML/SOAP Web Services, REST, etc),
+and can be used as a test harness to create a suite of HTTP level automated functional,
+acceptance, and regression tests. A test harness allows you to run many test cases
+and collect/report your results. WebInject offers real-time results
+display and may also be used for monitoring system response times.
Custom attributes passed as [command parameters](3-monitoring-basics.md#command-passing-parameters):
-Name | Description
-----------------|--------------------------------------------------------------------------
-rbl_hostname | **Optional.** The address or name of the SMTP server to check. Defaults to "$address$" if the host's `address` attribute is set, `address6` otherwise.
-rbl_server | **Required** List of RBL servers as an array.
-rbl_warning | **Optional** Number of blacklisting servers for a warning.
-rbl_critical | **Optional** Number of blacklisting servers for a critical.
-tbl_timeout | **Optional** Seconds before plugin times out (default: 15).
+Name | Description
+------------------------|--------------
+webinject_config_file | **Optional.** There is a configuration file named 'config.xml' that is used to store configuration settings for your project. You can use this to specify which test case files to run and to set some constants and settings to be used by WebInject.
+webinject_output | **Optional.** This option is followed by a directory name or a prefix to prepended to the output files. This is used to specify the location for writing output files (http.log, results.html, and results.xml). If a directory name is supplied (use either an absolute or relative path and make sure to add the trailing slash), all output files are written to this directory. If the trailing slash is ommitted, it is assumed to a prefix and this will be prepended to the output files. You may also use a combination of a directory and prefix.
+webinject_no_output | **Optional.** Suppresses all output to STDOUT except the results summary.
+webinject_timeout | **Optional.** The value [given in seconds] will be compared to the global time elapsed to run all the tests. If the tests have all been successful, but have taken more time than the 'globaltimeout' value, a warning message is sent back to Icinga.
+webinject_report_type | **Optional.** This setting is used to enable output formatting that is compatible for use with specific external programs. The available values you can set this to are: nagios, mrtg, external and standard.
+webinject_testcase_file | **Optional.** When you launch WebInject in console mode, you can optionally supply an argument for a testcase file to run. It will look for this file in the directory that webinject.pl resides in. If no filename is passed from the command line, it will look in config.xml for testcasefile declarations. If no files are specified, it will look for a default file named 'testcases.xml' in the current [webinject] directory. If none of these are found, the engine will stop and give you an error.
+
Icinga 2 comes with a number of CLI commands which support bash autocompletion.
These CLI commands will allow you to use certain functionality
-provided by and around the Icinga 2 daemon.
+provided by and around Icinga 2.
Each CLI command provides its own help and usage information, so please
make sure to always run them with the `--help` parameter.
Bash Auto-Completion (pressing `<TAB>`) is provided only for the corresponding context.
-While `--config` will suggest and auto-complete files and directories on disk,
-`feature enable` will only suggest disabled features. Try it yourself.
+While `--config` suggests and auto-completes files and directories on disk,
+`feature enable` only suggests disabled features.
RPM and Debian packages install the bash completion files into
`/etc/bash_completion.d/icinga2`.
-You will need to install the `bash-completion` package if not already installed.
+You need to install the `bash-completion` package if not already installed.
RHEL/CentOS/Fedora:
By default the `icinga2` binary loads the `icinga` library. A different application type
can be specified with the `--app` command-line option.
+Note: This is not needed by the average Icinga user, only developers.
### Libraries
Instead of loading libraries using the [`library` config directive](17-language-reference.md#library)
you can also use the `--library` command-line option.
+Note: This is not needed by the average Icinga user, only developers.
### Constants
include <test.conf>
-This would cause Icinga 2 to search its include path for the configuration file
-`test.conf`. By default the installation path for the Icinga Template Library
+This causes Icinga 2 to search its include path for the configuration file
+`test.conf`. By default the installation path for the [Icinga Template Library](10-icinga-template-library.md#icinga-template-library)
is the only search directory.
Using the `--include` command-line option additional search directories can be
## <a id="cli-command-console"></a> CLI command: Console
-The CLI command `console` can be used to evaluate Icinga 2 config expressions, e.g. to test
-[functions](17-language-reference.md#functions).
+The CLI command `console` can be used to debug and evaluate Icinga 2 config expressions,
+e.g. to test [functions](17-language-reference.md#functions) in your local sandbox.
$ icinga2 console
- Icinga 2 (version: v2.4.0)
+ Icinga 2 (version: v2.6.0)
<1> => function test(name) {
<1> .. log("Hello " + name)
<1> .. }
null
<3> =>
+Further usage examples can be found in the [library reference](18-library-reference.md#library-reference) chapter.
On operating systems without the `libedit` library installed there is no
support for line-editing or a command history. However you can
$ rlwrap icinga2 console
-The `console` can be used to connect to a running Icinga 2 instance using
+The debug console can be used to connect to a running Icinga 2 instance using
the [REST API](12-icinga2-api.md#icinga2-api). [API permissions](12-icinga2-api.md#icinga2-api-permissions)
are required for executing config expressions and auto-completion.
> **Note**
-> The console does not currently support SSL certificate verification.
+>
+> The debug console does not currently support SSL certificate verification.
+>
+> Runtime modifications are not validated and might cause the Icinga 2
+> daemon to crash or behave in an unexpected way. Use these runtime changes
+> at your own risk and rather *inspect and debug objects read-only*.
You can specify the API URL using the `--connect` parameter.
Although the password can be specified there process arguments on UNIX platforms are
usually visible to other users (e.g. through `ps`). In order to securely specify the
-user credentials the console supports two environment variables:
+user credentials the debug console supports two environment variables:
Environment variable | Description
---------------------|-------------
## <a id="cli-command-daemon"></a> CLI command: Daemon
The CLI command `daemon` provides the functionality to start/stop Icinga 2.
-Furthermore it provides the [configuration validation](11-cli-commands.md#config-validation).
+Furthermore it allows to run the [configuration validation](11-cli-commands.md#config-validation).
# icinga2 daemon --help
icinga2 - The Icinga 2 network monitoring daemon (version: v2.6.0)
### Config Files
-Using the `--config` option you can specify one or more configuration files.
+You can specify one or more configuration files with the `--config` option.
Configuration files are processed in the order they're specified on the command-line.
When no configuration file is specified and the `--no-config` is not used
### Config Validation
-The `--validate` option can be used to check if your configuration files
+The `--validate` option can be used to check if configuration files
contain errors. If any errors are found, the exit status is 1, otherwise 0
is returned. More details in the [configuration validation](11-cli-commands.md#config-validation) chapter.
## <a id="cli-command-object"></a> CLI command: Object
The `object` CLI command can be used to list all configuration objects and their
-attributes. The command also shows where each of the attributes was modified.
+attributes. The command also shows where each of the attributes was modified and as such
+provides debug information for further configuration problem analysis.
That way you can also identify which objects have been created from your [apply rules](17-language-reference.md#apply).
+Runtime modifications via the [REST API](12-icinga2-api.md#icinga2-api-config-objects)
+are not immediately updated. Furthermore there is a known issue with
+[group assign expressions](17-language-reference.md#group-assign) which are not reflected in the host object output.
+You need to restart Icinga 2 in order to update the `icinga2.debug` cache file.
+
More information can be found in the [troubleshooting](15-troubleshooting.md#list-configuration-objects) section.
# icinga2 object --help
### <a id="icinga2-api-requests"></a> Requests
Any tool capable of making HTTP requests can communicate with
-the API, for example [curl](http://curl.haxx.se).
+the API, for example [curl](https://curl.haxx.se/).
Requests are only allowed to use the HTTPS protocol so that
traffic remains encrypted.
In addition to these parameters a [filter](12-icinga2-api.md#icinga2-api-filters) should be provided.
-**Note**: Modified attributes do not trigger a re-evaluation of existing
-static [apply rules](3-monitoring-basics.md#using-apply) and [group assignments](3-monitoring-basics.md#group-assign-intro).
-Delete and re-create the objects if you require such changes.
-Furthermore you cannot modify templates which have already been resolved
-during [object creation](12-icinga2-api.md#icinga2-api-config-objects-create).
-
+> **Note**:
+>
+> Modified attributes do not trigger a re-evaluation of existing
+> static [apply rules](3-monitoring-basics.md#using-apply) and [group assignments](3-monitoring-basics.md#group-assign-intro).
+> Delete and re-create the objects if you require such changes.
+>
+> Furthermore you cannot modify templates which have already been resolved
+> during [object creation](12-icinga2-api.md#icinga2-api-config-objects-create).
+> There are attributes which can only be set for [PUT requests](12-icinga2-api.md#icinga2-api-config-objects-create) such as `groups`
+> or `zone`. A complete list of `no_user_modify` attributes can be fetched from the [types](12-icinga2-api.md#icinga2-api-types) URL endpoint.
If attributes are of the Dictionary type, you can also use the indexer format:
comment | string | **Required.** Comment text.
start\_time | timestamp | **Required.** Timestamp marking the beginning of the downtime.
end\_time | timestamp | **Required.** Timestamp marking the end of the downtime.
- duration | integer | **Required.** Duration of the downtime in seconds if `fixed` is set to false.
fixed | boolean | **Optional.** Defaults to `true`. If true, the downtime is `fixed` otherwise `flexible`. See [downtimes](8-advanced-topics.md#downtimes) for more information.
+ duration | integer | **Required for flexible downtimes.** Duration of the downtime in seconds if `fixed` is set to false.
trigger\_name | string | **Optional.** Sets the trigger for a triggered downtime. See [downtimes](8-advanced-topics.md#downtimes) for more information on triggered downtimes.
child\_options | integer | **Optional.** Schedule child downtimes. `0` does not do anything, `1` schedules child downtimes triggered by this downtime, `2` schedules non-triggered downtimes. Defaults to `0`.
The [API permission](12-icinga2-api.md#icinga2-api-permissions) `console` is required for executing
expressions.
+> **Note**
+>
+> Runtime modifications via `execute-script` calls are not validated and might cause the Icinga 2
+> daemon to crash or behave in an unexpected way. Use these runtime changes at your own risk.
+
If you specify a session identifier, the same script context can be reused for multiple requests. This allows you to, for example, set a local variable in a request and use that local variable in another request. Sessions automatically expire after a set period of inactivity (currently 30 minutes).
Example for fetching the command line from the local host's last check result:
There are a couple of existing clients which can be used with the Icinga 2 API:
-* [curl](http://curl.haxx.se) or any other HTTP client really
+* [curl](https://curl.haxx.se/) or any other HTTP client really
* [Icinga 2 console (CLI command)](12-icinga2-api.md#icinga2-api-clients-cli-console)
* [Icinga Studio](12-icinga2-api.md#icinga2-api-clients-icinga-studio)
* [Icinga Web 2 Director](https://www.icinga.com/products/icinga-web-2-modules/)
### <a id="icinga2-api-clients-cli-console"></a> Icinga 2 Console
-By default the [console CLI command](11-cli-commands.md#cli-command-console) evaluates expressions in a local interpreter, i.e. independently from your Icinga 2 daemon. Using the `--connect` parameter you can use the Icinga 2 console to evaluate expressions via the API.
+By default the [console CLI command](11-cli-commands.md#cli-command-console) evaluates
+expressions in a local interpreter, i.e. independently from your Icinga 2 daemon.
+Add the `--connect` parameter to debug and evaluate expressions via the API.
### <a id="icinga2-api-clients-programmatic-examples"></a> API Clients Programmatic Examples
### <a id="addons-graphing-pnp"></a> PNP
-[PNP](http://www.pnp4nagios.org) is a graphing addon.
+[PNP](https://www.pnp4nagios.org) is a graphing addon.
-[PNP](http://www.pnp4nagios.org) is an addon which adds a graphical representation of the performance data collected
+[PNP](https://www.pnp4nagios.org) is an addon which adds a graphical representation of the performance data collected
by the monitoring plugins. The data is stored as rrd (round robin database) files.
Use your distribution's package manager to install the `pnp4nagios` package.
If you're planning to use it, configure it to use the
-[bulk mode with npcd and npcdmod](http://docs.pnp4nagios.org/pnp-0.6/modes#bulk_mode_with_npcd_and_npcdmod)
+[bulk mode with npcd and npcdmod](https://docs.pnp4nagios.org/pnp-0.6/modes#bulk_mode_with_npcd_and_npcdmod)
in combination with Icinga 2's [PerfdataWriter](14-features.md#performance-data). NPCD collects the performance
data files which Icinga 2 generates.
### <a id="addons-graphing-graphite"></a> Graphite
-[Graphite](http://graphite.readthedocs.org/en/latest/) is a time-series database
+[Graphite](https://graphite.readthedocs.org/en/latest/) is a time-series database
storing collected metrics and making them available through restful apis
and web interfaces.
There are Graphite addons available for collecting the performance data files too (e.g. `Graphios`).
-A popular alternative frontend for Graphite is for example [Grafana](http://grafana.org).
+A popular alternative frontend for Graphite is for example [Grafana](https://grafana.org).
### <a id="addons-graphing-influxdb"></a> InfluxDB
# icinga2 feature enable influxdb
-A popular frontend for InfluxDB is for example [Grafana](http://grafana.org).
+A popular frontend for InfluxDB is for example [Grafana](https://grafana.org).
## <a id="addons-visualization"></a> Visualization
By using either [Livestatus](14-features.md#setting-up-livestatus) or
[DB IDO](14-features.md#db-ido) as a backend you can create your own network maps
-based on your monitoring configuration and status data using [NagVis](http://www.nagvis.org).
+based on your monitoring configuration and status data using [NagVis](https://www.nagvis.org).
The configuration in nagvis.ini.php should look like this for Livestatus for example:
### <a id="addons-visualization-thruk"></a> Thruk
-[Thruk](http://www.thruk.org) is an alternative web interface which can be used with Icinga 2
+[Thruk](https://www.thruk.org) is an alternative web interface which can be used with Icinga 2
and the [Livestatus](14-features.md#setting-up-livestatus) feature.
## <a id="log-monitoring"></a> Log Monitoring
Icinga 2 supports three different types of logging:
* File logging
-* Syslog (on *NIX-based operating systems)
+* Syslog (on Linux/UNIX)
* Console logging (`STDOUT` on tty)
You can enable additional loggers using the `icinga2 feature enable`
The IDO (Icinga Data Output) modules for Icinga 2 take care of exporting all
configuration and status information into a database. The IDO database is used
-by a number of projects including Icinga Web 1.x and 2.
+by Icinga Web 2.
Details on the installation can be found in the [Configuring DB IDO](2-getting-started.md#configuring-db-ido-mysql)
chapter. Details on the configuration can be found in the
A list of currently supported external commands can be found [here](23-appendix.md#external-commands-list-detail).
Detailed information on the commands and their required parameters can be found
-on the [Icinga 1.x documentation](http://docs.icinga.com/latest/en/extcommands2.html).
+on the [Icinga 1.x documentation](https://docs.icinga.com/latest/en/extcommands2.html).
## <a id="performance-data"></a> Performance Data
More configuration details can be found [here](9-object-types.md#objecttype-influxdbwriter).
-### <a id="gelfwriter"></a> GELF Writer
+### <a id="graylog-integration"></a> Graylog Integration
-The `Graylog Extended Log Format` (short: [GELF](http://www.graylog2.org/resources/gelf))
+#### <a id="gelfwriter"></a> GELF Writer
+
+The `Graylog Extended Log Format` (short: [GELF](http://docs.graylog.org/en/latest/pages/gelf.html))
can be used to send application logs directly to a TCP socket.
-While it has been specified by the [graylog2](http://www.graylog2.org/) project as their
-[input resource standard](http://www.graylog2.org/resources/gelf), other tools such as
-[Logstash](http://www.logstash.net) also support `GELF` as
-[input type](http://logstash.net/docs/latest/inputs/gelf).
+While it has been specified by the [Graylog](https://www.graylog.org) project as their
+[input resource standard](http://docs.graylog.org/en/latest/pages/sending_data.html), other tools such as
+[Logstash](https://www.elastic.co/products/logstash) also support `GELF` as
+[input type](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-gelf.html).
You can enable the feature using
* State changes
* Notifications
+### <a id="elastic-stack-integration"></a> Elastic Stack Integration
+
+[Icingabeat](https://github.com/icinga/icingabeat) is an Elastic Beat that fetches data
+from the Icinga 2 API and sends it either directly to Elasticsearch or Logstash.
+
+More integrations in development:
+* [Logstash output](https://github.com/Icinga/logstash-output-icinga) for the Icinga 2 API.
+* [Logstash Grok Pattern](https://github.com/Icinga/logstash-grok-pattern) for Icinga 2 logs.
+
+#### <a id="logstash-writer"></a> Logstash Writer
+
+[Logstash](https://www.elastic.co/products/logstash) receives
+and processes event messages sent by Icinga 2 and the [LogstashWriter](9-object-types.md#objecttype-logstashwriter)
+feature. As part of the Elastic Stack it allows you to
+process and modify the messages and forward them to [Elasticsearch](https://www.elastic.co/products/elasticsearch)
+as backed.
+
+Before proceeding with this integration guide please ensure
+that you have Logstash, Elasticsearch and Kibana up and running
+as part of the Elastic Stack.
+
+> **Note**
+>
+> The LogstashWriter feature has been tested with Elastic Stack 5.x and therefore Logstash 5.x.
+> Older versions are not supported.
+
+Logstash supports `TCP` and `UDP` as input socket type. You must
+further enable JSON support for input data processing. Logstash 5.x
+comes without any pre-installed plugins and requires you to install
+them separately.
+
+Example on CentOS 7 and UDP as socket type:
+
+```
+/usr/share/logstash/bin/logstash-plugin install logstash-input-udp
+/usr/share/logstash/bin/logstash-plugin install logstash-codec-json
+```
+
+Add the Icinga 2 input and set the output to your running Elasticsearch instance.
+You do not need to reload Logstash since version 5.x supports configuration changes
+without restart.
+
+This example uses port `5555`. You are allowed to use any available port (note it for later).
+
+```
+# vim /etc/logstash/conf.d/icinga2.conf
+
+input {
+ udp {
+ port => 5555
+ codec => "json"
+ }
+}
+output {
+ elasticsearch {
+ hosts => [ "localhost:9200" ]
+ }
+}
+```
+
+Modify the feature configuration and set the
+socket type, host and port attributes. The port must be the same
+as configured in your Logstash input, e.g. `5555`.
+
+```
+# vim /etc/icinga2/features-available/logstash.conf
+
+object LogstashWriter "logstash" {
+ host = "192.168.33.7"
+ port = 5555
+ socket_type = "udp"
+}
+```
+
+Enable the feature and restart Icinga 2.
+
+```
+# icinga2 feature enable logstash
+# systemctl restart icinga2
+```
+
+Open [Kibana](https://www.elastic.co/products/kibana) or your
+favorite Elasticsearch frontend and visualize the messages received
+from Icinga 2.
+
### <a id="opentsdb-writer"></a> OpenTSDB Writer
While there are some OpenTSDB collector scripts and daemons like tcollector available for
## <a id="setting-up-livestatus"></a> Livestatus
-The [MK Livestatus](http://mathias-kettner.de/checkmk_livestatus.html) project
+The [MK Livestatus](https://mathias-kettner.de/checkmk_livestatus.html) project
implements a query protocol that lets users query their Icinga instance for
status information. It can also be used to send commands.
# <a id="troubleshooting"></a> Icinga 2 Troubleshooting
-## <a id="troubleshooting-information-required"></a> Which information is required
+## <a id="troubleshooting-information-required"></a> Required Information
-* Run `icinga2 troubleshoot` to collect required troubleshooting information
-* Alternative, manual steps:
+Please ensure to provide any detail which may help reproduce and understand your issue.
+Whether you ask on the community channels or you create an issue at [GitHub](https://github.com/Icinga), make sure
+that others can follow your explanations. If necessary, draw a picture and attach it for
+better illustration. This is especially helpful if you are troubleshooting a distributed
+setup.
+
+We've come around many community questions and compiled this list. Add your own
+findings and details please.
+
+* Describe the expected behavior in your own words.
+* Describe the actual behavior in one or two sentences.
+* Ensure to provide general information such as:
+ * How was Icinga 2 installed (and which repository in case) and which distribution are you using
* `icinga2 --version`
* `icinga2 feature list`
- * `icinga2 daemon --validate`
- * Relevant output from your main and debug log ( `icinga2 object list --type='filelogger'` )
- * The newest Icinga 2 crash log if relevant
- * Your icinga2.conf and, if you run multiple Icinga 2 instances, your zones.conf
-* How was Icinga 2 installed (and which repository in case) and which distribution are you using
-* Provide complete configuration snippets explaining your problem in detail
-* If the check command failed, what's the output of your manual plugin tests?
-* In case of [debugging](20-development.md#development) Icinga 2, the full back traces and outputs
+ * `icinga2 daemon -C`
+ * [Icinga Web 2](https://www.icinga.com/products/icinga-web-2/) version (screenshot from System - About)
+ * [Icinga Web 2 modules](https://www.icinga.com/products/icinga-web-2-modules/) e.g. the Icinga Director (optional)
+* Configuration insights:
+ * Provide complete configuration snippets explaining your problem in detail
+ * Your [icinga2.conf](4-configuring-icinga-2.md#icinga2-conf) file
+ * If you run multiple Icinga 2 instances, the [zones.conf](4-configuring-icinga-2.md#zones-conf) file (or `icinga2 object list --type Endpoint` and `icinga2 object list --type Zone`) from all affected nodes.
+* Logs
+ * Relevant output from your main and [debug log](15-troubleshooting.md#troubleshooting-enable-debug-output) in `/var/log/icinga2`. Please add step-by-step explanations with timestamps if required.
+ * The newest Icinga 2 crash log if relevant, located in `/var/log/icinga2/crash`
+* Additional details
+ * If the check command failed, what's the output of your manual plugin tests?
+ * In case of [debugging](20-development.md#development) Icinga 2, the full back traces and outputs
+
+## <a id="troubleshooting-analyze-environment"></a> Analyze your Environment
+
+There are many components involved on a server running Icinga 2. When you
+analyze a problem, keep in mind that basic system administration knowledge
+is also key to identify bottlenecks and issues.
+
+> **Tip**
+>
+> [Monitor Icinga 2](8-advanced-topics.md#monitoring-icinga) and use the hints for further analysis.
+
+* Analyze the system's performance and dentify bottlenecks and issues.
+* Collect details about all applications (e.g. Icinga 2, MySQL, Apache, Graphite, Elastic, etc.).
+* If data is exchanged via network (e.g. central MySQL cluster) ensure to monitor the bandwidth capabilities too.
+* Add graphs and screenshots to your issue description
+
+Install tools which help you to do so. Opinions differ, let us know if you have any additions here!
+
+### <a id="troubleshooting-analyze-environment-linux"></a> Analyse your Linux/Unix Environment
+
+[htop](https://hisham.hm/htop/) is a better replacement for `top` and helps to analyze processes
+interactively.
+
+```
+yum install htop
+apt-get install htop
+```
+
+If you are for example experiencing performance issues, open `htop` and take a screenshot.
+Add it to your question and/or bug report.
+
+Analyse disk I/O performance in Grafana, take a screenshot and obfuscate any sensitive details.
+Attach it when posting a question to the community channels.
+
+The [sysstat](https://github.com/sysstat/sysstat) package provides a number of tools to
+analyze the performance on Linux. On FreeBSD you could use `systat` for example.
+
+```
+yum install htop
+apt-get install htop
+```
+
+Example for `vmstat` (summary of memory, processes, etc.):
+
+```
+// summary
+vmstat -s
+// print timestamps, format in MB, stats every 1 second, 5 times
+vmstat -t -S M 1 5
+```
+
+Example for `iostat`:
+
+```
+watch -n 1 iostat
+```
+
+Example for `sar`:
+```
+sar //cpu
+sar -r //ram
+sar -q //load avg
+sar -b //I/O
+```
+
+`sysstat` also provides the `iostat` binary. On FreeBSD you could use `systat` for example.
+
+If you are missing checks and metrics found in your analysis, add them to your monitoring!
+
+### <a id="troubleshooting-analyze-environment-windows"></a> Analyze your Windows Environment
+
+A good tip for Windows are the tools found inside the [Sysinternals Suite](https://technet.microsoft.com/en-us/sysinternals/bb842062.aspx).
+
+You can also start `perfmon` and analyze specific performance counters.
+Keep notes which could be important for your monitoring, and add service
+checks later on.
## <a id="troubleshooting-enable-debug-output"></a> Enable Debug Output
# icinga2 feature enable debuglog
# service icinga2 restart
-You can find the debug log file in `/var/log/icinga2/debug.log`.
+The debug log file can be found in `/var/log/icinga2/debug.log`.
Alternatively you may run Icinga 2 in the foreground with debugging enabled. Specify the console
log severity as an additional parameter argument to `-x`.
# /usr/sbin/icinga2 daemon -x notice
-The log level can be one of `critical`, `warning`, `information`, `notice`
+The [log severity](9-object-types.md#objecttype-filelogger) can be one of `critical`, `warning`, `information`, `notice`
and `debug`.
## <a id="list-configuration-objects"></a> List Configuration Objects
[2014-10-15 14:27:19 +0200] information/cli: Parsed 175 objects.
+Runtime modifications via the [REST API](12-icinga2-api.md#icinga2-api-config-objects)
+are not immediately updated. Furthermore there is a known issue with
+[group assign expressions](17-language-reference.md#group-assign) which are not reflected in the host object output.
+You need to restart Icinga 2 in order to update the `icinga2.debug` cache file.
+
+
## <a id="check-command-definitions"></a> Where are the check command definitions?
Icinga 2 features a number of built-in [check command definitions](10-icinga-template-library.md#plugin-check-commands) which are
-included using
+included with
include <itl>
include <plugins>
to fetch the checkable object, its check result and the executed shell command.
* Alternatively enable the [debug log](15-troubleshooting.md#troubleshooting-enable-debug-output) and look for the executed command.
-Example for a service object query using a [regex match]() on the name:
+Example for a service object query using a [regex match](18-library-reference.md#global-functions-regex)
+on the name:
$ curl -k -s -u root:icinga -H 'Accept: application/json' -H 'X-HTTP-Method-Override: GET' -X POST 'https://localhost:5665/v1/objects/services' \
-d '{ "filter": "regex(pattern, service.name)", "filter_vars": { "pattern": "^http" }, "attrs": [ "__name", "last_check_result" ] }' | python -m json.tool
$ curl -k -s -u root:icinga -X POST 'https://localhost:5665/v1/events?queue=debugchecks&types=CheckResult&filter=match%28%22random*%22,event.service%29'
+### <a id="late-check-results"></a> Late Check Results
+
+[Icinga Web 2](https://www.icinga.com/products/icinga-web-2/) provides
+a dashboard overview for `overdue checks`.
+
+The REST API provides the [status] URL endpoint with some generic metrics
+on Icinga and its features.
+
+ # curl -k -s -u root:icinga 'https://localhost:5665/v1/status' | python -m json.tool | less
+
+You can also calculate late check results via the REST API:
+
+* Fetch the `last_check` timestamp from each object
+* Compare the timestamp with the current time and add `check_interval` multiple times (change it to see which results are really late, like five times check_interval)
+
+You can use the [icinga2 console](11-cli-commands.md#cli-command-console) to connect to the instance, fetch all data
+and calculate the differences. More infos can be found in [this blogpost](https://www.icinga.com/2016/08/11/analyse-icinga-2-problems-using-the-console-api/).
+
+ # ICINGA2_API_USERNAME=root ICINGA2_API_PASSWORD=icinga icinga2 console --connect 'https://localhost:5665/'
+
+ <1> => var res = []; for (s in get_objects(Service).filter(s => s.last_check < get_time() - 2 * s.check_interval)) { res.add([s.__name, DateTime(s.last_check).to_string()]) }; res
+
+ [ [ "10807-host!10807-service", "2016-06-10 15:54:55 +0200" ], [ "mbmif.int.netways.de!disk /", "2016-01-26 16:32:29 +0100" ] ]
+
+Or if you are just interested in numbers, call [len](18-library-reference.md#array-len) on the result array `res`:
+
+ <2> => var res = []; for (s in get_objects(Service).filter(s => s.last_check < get_time() - 2 * s.check_interval)) { res.add([s.__name, DateTime(s.last_check).to_string()]) }; res.len()
+
+ 2.000000
+
+If you need to analyze that problem multiple times, just add the current formatted timestamp
+and repeat the commands.
+
+ <23> => DateTime(get_time()).to_string()
+
+ "2017-04-04 16:09:39 +0200"
+
+ <24> => var res = []; for (s in get_objects(Service).filter(s => s.last_check < get_time() - 2 * s.check_interval)) { res.add([s.__name, DateTime(s.last_check).to_string()]) }; res.len()
+
+ 8287.000000
+
+More details about the Icinga 2 DSL and its possibilities can be
+found in the [language](17-language-reference.md#language-reference) and [library](18-library-reference.md#library-reference) reference chapters.
+
+### <a id="late-check-results-distributed"></a> Late Check Results in Distributed Environments
+
+When it comes to a distributed HA setup, each node is responsible for a load-balanced amount of checks.
+Host and Service objects provide the attribute `paused`. If this is set to `false`, the current node
+actively attempts to schedule and execute checks. Otherwise the node does not feel responsible.
+
+ <3> => var res = {}; for (s in get_objects(Service).filter(s => s.last_check < get_time() - 2 * s.check_interval)) { res[s.paused] += 1 }; res
+ {
+ @false = 2.000000
+ @true = 1.000000
+ }
+
+You may ask why this analysis is important? Fair enough - if the numbers are not inverted in a HA zone
+with two members, this may give a hint that the cluster nodes are in a split-brain scenario, or you've
+found a bug in the cluster.
+
+
+If you are running a cluster setup where the master/satellite executes checks on the client via
+[top down command endpoint](6-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint) mode,
+you might want to know which zones are affected.
+
+This analysis assumes that clients which are not connected, have the string `connected` in their
+service check result output and their state is `UNKNOWN`.
+
+ <4> => var res = {}; for (s in get_objects(Service)) { if (s.state==3) { if (match("*connected*", s.last_check_result.output)) { res[s.zone] += [s.host_name] } } }; for (k => v in res) { res[k] = len(v.unique()) }; res
+
+ {
+ Asia = 31.000000
+ Europe = 214.000000
+ USA = 207.000000
+ }
+
+The result set shows the configured zones and their affected hosts in a unique list. The output also just prints the numbers
+but you can adjust this by omitting the `len()` call inside the for loop.
+
## <a id="notifications-not-sent"></a> Notifications are not sent
-* Check the debug log to see if a notification is triggered.
+* Check the [debug log](15-troubleshooting.md#troubleshooting-enable-debug-output) to see if a notification is triggered.
* If yes, verify that all conditions are satisfied.
* Are any errors on the notification command execution logged?
+Please ensure to add these details with your own description
+to any question or issue posted to the community channels.
+
Verify the following configuration:
* Is the host/service `enable_notifications` attribute set, and if so, to which value?
-* Do the notification attributes `states`, `types`, `period` match the notification conditions?
-* Do the user attributes `states`, `types`, `period` match the notification conditions?
+* Do the [notification](9-object-types.md#objecttype-notification) attributes `states`, `types`, `period` match the notification conditions?
+* Do the [user](9-object-types.md#objecttype-user) attributes `states`, `types`, `period` match the notification conditions?
* Are there any notification `begin` and `end` times configured?
* Make sure the [notification](11-cli-commands.md#enable-features) feature is enabled.
* Does the referenced NotificationCommand work when executed as Icinga user on the shell?
* Are the feature attributes set correctly according to the documentation?
* Any errors on the logs?
+Look up the [object type](9-object-types.md#object-types) for the required feature and verify it is enabled:
+
+ # icinga2 object list --type <feature object type>
+
+Example for the `graphite` feature:
+
+ # icinga2 object list --type GraphiteWriter
+
## <a id="configuration-ignored"></a> Configuration is ignored
* Make sure that the line(s) are not [commented out](17-language-reference.md#comments) (starting with `//` or `#`, or
encapsulated by `/* ... */`).
* Is the configuration file included in [icinga2.conf](4-configuring-icinga-2.md#icinga2-conf)?
+Run the [configuration validation](11-cli-commands.md#config-validation) and add `notice` as log severity.
+Search for the file which should be included i.e. using the `grep` CLI command.
+
+ # icinga2 daemon -C -x notice | grep command
+
## <a id="configuration-attribute-inheritance"></a> Configuration attributes are inherited from
Icinga 2 allows you to import templates using the [import](17-language-reference.md#template-imports) keyword. If these templates
contain additional attributes, your objects will automatically inherit them. You can override
or modify these attributes in the current object.
+The [object list](15-troubleshooting.md#list-configuration-objects) CLI command allows you to verify the attribute origin.
+
## <a id="configuration-value-dollar-sign"></a> Configuration Value with Single Dollar Sign
In case your configuration validation fails with a missing closing dollar sign error message, you
critical/config: Error: Validation failed for Object 'ping4' (Type: 'Service') at /etc/icinga2/zones.d/global-templates/windows.conf:24: Closing $ not found in macro format string 'top-syntax=${list}'.
+Correct the custom attribute value to
+
+ "top-syntax=$${list}"
## <a id="troubleshooting-cluster"></a> Cluster and Clients Troubleshooting
> **Note**
>
-> Some problems just exist due to wrong file permissions or packet filters applied. Make
+> Some problems just exist due to wrong file permissions or applied packet filters. Make
> sure to check these in the first place.
### <a id="troubleshooting-cluster-connection-errors"></a> Cluster Troubleshooting Connection Errors
-General connection errors normally lead you to one of the following problems:
+General connection errors could be one of the following problems:
-* Wrong network configuration
-* Packet loss on the connection
+* Incorrect network configuration
+* Packet loss
* Firewall rules preventing traffic
Use tools like `netstat`, `tcpdump`, `nmap`, etc. to make sure that the cluster communication
-happens (default port is `5665`).
+works (default port is `5665`).
# tcpdump -n port 5665 -i any
"Hello World!"
+#### <a id="string-literals-escape-sequences"></a> String Literals Escape Sequences
+
Certain characters need to be escaped. The following escape sequences
are supported:
These functions are globally available in [assign/ignore where expressions](3-monitoring-basics.md#using-apply-expressions),
[functions](17-language-reference.md#functions), [API filters](12-icinga2-api.md#icinga2-api-filters)
-and the [Icinga 2 console](11-cli-commands.md#cli-command-console).
+and the [Icinga 2 debug console](11-cli-commands.md#cli-command-console).
-You can use the [Icinga 2 console](11-cli-commands.md#cli-command-console)
+You can use the [Icinga 2 debug console](11-cli-commands.md#cli-command-console)
as a sandbox to test these functions before implementing
them in your scenarios.
true
<4> => typeof([ 1, 2, 3]) == Array
true
- <5> => typeof({ a = 2, b = 3}) == Dictionary
+ <5> => typeof({ a = 2, b = 3 }) == Dictionary
+ true
### <a id="global-functions-get_time"></a> get_time
Distribution | Repository
------------------------|---------------------------
- Debian | [Icinga Repository](http://packages.icinga.com/debian/), [debmon](https://debmon.org/packages/debmon-jessie/icinga2)
- Ubuntu | [Icinga Repository](http://packages.icinga.com/ubuntu/), [Icinga PPA](https://launchpad.net/~formorer/+archive/ubuntu/icinga)
- RHEL/CentOS | [Icinga Repository](http://packages.icinga.com/epel/)
- openSUSE | [Icinga Repository](http://packages.icinga.com/openSUSE/), [Server Monitoring Repository](https://build.opensuse.org/package/show/server:monitoring/icinga2)
- SLES | [Icinga Repository](http://packages.icinga.com/SUSE/)
- Gentoo | [Upstream](http://packages.gentoo.org/package/net-analyzer/icinga2)
- FreeBSD | [Upstream](http://www.freshports.org/net-mgmt/icinga2)
+ Debian | [Icinga Repository](https://packages.icinga.com/debian/), [debmon](https://debmon.org/packages/debmon-jessie/icinga2)
+ Ubuntu | [Icinga Repository](https://packages.icinga.com/ubuntu/), [Icinga PPA](https://launchpad.net/~formorer/+archive/ubuntu/icinga)
+ RHEL/CentOS | [Icinga Repository](https://packages.icinga.com/epel/)
+ openSUSE | [Icinga Repository](https://packages.icinga.com/openSUSE/), [Server Monitoring Repository](https://build.opensuse.org/package/show/server:monitoring/icinga2)
+ SLES | [Icinga Repository](https://packages.icinga.com/SUSE/)
+ Gentoo | [Upstream](https://packages.gentoo.org/package/net-analyzer/icinga2)
+ FreeBSD | [Upstream](https://www.freshports.org/net-mgmt/icinga2)
OpenBSD | [Upstream](http://ports.su/net/icinga/core2,-main)
ArchLinux | [Upstream](https://aur.archlinux.org/packages/icinga2)
AlpineLinux | [Upstream](https://pkgs.alpinelinux.org/package/edge/community/x86_64/icinga2)
Debian:
- # wget -O - http://packages.icinga.com/icinga.key | apt-key add -
- # echo 'deb http://packages.icinga.com/debian icinga-jessie main' >/etc/apt/sources.list.d/icinga.list
+ # wget -O - https://packages.icinga.com/icinga.key | apt-key add -
+ # echo 'deb https://packages.icinga.com/debian icinga-jessie main' >/etc/apt/sources.list.d/icinga.list
# apt-get update
Ubuntu:
- # wget -O - http://packages.icinga.com/icinga.key | apt-key add -
- # echo 'deb http://packages.icinga.com/ubuntu icinga-xenial main' >/etc/apt/sources.list.d/icinga.list
+ # wget -O - https://packages.icinga.com/icinga.key | apt-key add -
+ # echo 'deb https://packages.icinga.com/ubuntu icinga-xenial main' >/etc/apt/sources.list.d/icinga.list
# apt-get update
RHEL/CentOS 7:
RHEL/CentOS 5:
- rpm -i http://packages.icinga.com/epel/5/release/noarch/icinga-rpm-release-5-1.el5.centos.noarch.rpm
+ rpm -i https://packages.icinga.com/epel/5/release/noarch/icinga-rpm-release-5-1.el5.centos.noarch.rpm
Fedora 25:
SLES 11:
- # zypper ar http://packages.icinga.com/SUSE/ICINGA-release-11.repo
+ # zypper ar https://packages.icinga.com/SUSE/ICINGA-release-11.repo
# zypper ref
SLES 12:
- # zypper ar http://packages.icinga.com/SUSE/ICINGA-release.repo
+ # zypper ar https://packages.icinga.com/SUSE/ICINGA-release.repo
# zypper ref
openSUSE:
- # zypper ar http://packages.icinga.com/openSUSE/ICINGA-release.repo
+ # zypper ar https://packages.icinga.com/openSUSE/ICINGA-release.repo
# zypper ref
#### <a id="package-repositories-rhel-epel"></a> RHEL/CentOS EPEL Repository
The packages for RHEL/CentOS depend on other packages which are distributed
-as part of the [EPEL repository](http://fedoraproject.org/wiki/EPEL).
+as part of the [EPEL repository](https://fedoraproject.org/wiki/EPEL).
CentOS 7/6/5:
yum install epel-release
If you are using RHEL you need enable the `optional` repository and then install
-the [EPEL rpm package](http://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F).
+the [EPEL rpm package](https://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F).
#### <a id="package-repositories-sles-security"></a> SLES Security Repository
OS/Distribution | Package Name | Repository | Installation Path
-----------------------|--------------------|---------------------------|----------------------------
-RHEL/CentOS | nagios-plugins-all | [EPEL](http://fedoraproject.org/wiki/EPEL) |Â /usr/lib/nagios/plugins or /usr/lib64/nagios/plugins
+RHEL/CentOS | nagios-plugins-all | [EPEL](https://fedoraproject.org/wiki/EPEL) |Â /usr/lib/nagios/plugins or /usr/lib64/nagios/plugins
SLES/OpenSUSE | monitoring-plugins | [server:monitoring](https://build.opensuse.org/project/repositories/server:monitoring) | /usr/lib/nagios/plugins
Debian/Ubuntu | nagios-plugins | - |Â /usr/lib/nagios/plugins
FreeBSD | monitoring-plugins | - |Â /usr/local/libexec/nagios
-OS X | nagios-plugins | [MacPorts](http://www.macports.org), [Homebrew](http://brew.sh) | /opt/local/libexec or /usr/local/sbin
+OS X | nagios-plugins | [MacPorts](https://www.macports.org), [Homebrew](https://brew.sh) | /opt/local/libexec or /usr/local/sbin
The recommended way of installing these standard plugins is to use your
distribution's package manager.
# yum install nagios-plugins-all
The packages for RHEL/CentOS depend on other packages which are distributed
-as part of the [EPEL repository](http://fedoraproject.org/wiki/EPEL). Please
+as part of the [EPEL repository](https://fedoraproject.org/wiki/EPEL). Please
make sure to enable this repository by following
-[these instructions](http://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F).
+[these instructions](https://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F).
Fedora:
SELinux is a mandatory access control (MAC) system on Linux which adds a fine-grained permission system for access to all system resources such as files, devices, networks and inter-process communication.
-The most important questions are answered briefly in the [FAQ of the SELinux Project](http://selinuxproject.org/page/FAQ). For more details on SELinux and how to actually use and administrate it on your system have a look at [Red Hat Enterprise Linux 7 - SELinux User's and Administrator's Guide](https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/SELinux_Users_and_Administrators_Guide/index.html). For a simplified (and funny) introduction download the [SELinux Coloring Book](https://github.com/mairin/selinux-coloring-book).
+The most important questions are answered briefly in the [FAQ of the SELinux Project](https://selinuxproject.org/page/FAQ). For more details on SELinux and how to actually use and administrate it on your system have a look at [Red Hat Enterprise Linux 7 - SELinux User's and Administrator's Guide](https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/SELinux_Users_and_Administrators_Guide/index.html). For a simplified (and funny) introduction download the [SELinux Coloring Book](https://github.com/mairin/selinux-coloring-book).
This documentation will use a format similar to the SELinux User's and Administrator's Guide.
Download and install a plugin, for example check_mysql_health.
- # wget http://labs.consol.de/download/shinken-nagios-plugins/check_mysql_health-2.1.9.2.tar.gz
+ # wget https://labs.consol.de/download/shinken-nagios-plugins/check_mysql_health-2.1.9.2.tar.gz
# tar xvzf check_mysql_health-2.1.9.2.tar.gz
# cd check_mysql_health-2.1.9.2/
# ./configure --libexecdir /usr/lib64/nagios/plugins
provided separately.
There is no output length restriction as known from Icinga 1.x using an
-[8KB static buffer](http://docs.icinga.com/latest/en/pluginapi.html#outputlengthrestrictions).
+[8KB static buffer](https://docs.icinga.com/latest/en/pluginapi.html#outputlengthrestrictions).
The `StatusDataWriter`, `IdoMysqlConnection` and `LivestatusListener` types
split the raw output into `output` (first line) and `long_output` (remaining
## <a id="external-commands-list-detail"></a> External Commands List
-Additional details can be found in the [Icinga 1.x Documentation](http://docs.icinga.com/latest/en/extcommands2.html)
+Additional details can be found in the [Icinga 1.x Documentation](https://docs.icinga.com/latest/en/extcommands2.html)
Command name | Parameters | Description
------------------------------------------|-----------------------------------|--------------------------
### <a id="schema-db-ido"></a> DB IDO Schema
There is a detailed documentation for the Icinga IDOUtils 1.x
-database schema available on [http://docs.icinga.com/latest/en/db_model.html]
+database schema available on [https://docs.icinga.com/latest/en/db_model.html]
#### <a id="schema-db-ido-extensions"></a> DB IDO Schema Extensions
#### <a id="schema-livestatus-hostsbygroup-table-attributes"></a> Livestatus Hostsbygroup Table Attributes
-All (hosts)[19-appendix.md#schema-livestatus-hosts-table-attributes] table attributes grouped with
-the (hostgroups)[19-appendix.md#schema-livestatus-hostgroups-table-attributes] table prefixed with `hostgroup_`.
+All [hosts](19-appendix.md#schema-livestatus-hosts-table-attributes) table attributes grouped with
+the [hostgroups](19-appendix.md#schema-livestatus-hostgroups-table-attributes) table prefixed with `hostgroup_`.
#### <a id="schema-livestatus-servicesbygroup-table-attributes"></a> Livestatus Servicesbygroup Table Attributes
-All (services)[19-appendix.md#schema-livestatus-services-table-attributes] table attributes grouped with
-the (servicegroups)[19-appendix.md#schema-livestatus-servicegroups-table-attributes] table prefixed with `servicegroup_`.
+All [services](19-appendix.md#schema-livestatus-services-table-attributes) table attributes grouped with
+the [servicegroups](19-appendix.md#schema-livestatus-servicegroups-table-attributes) table prefixed with `servicegroup_`.
#### <a id="schema-livestatus-servicesbyhostgroup-table-attributes"></a> Livestatus Servicesbyhostgroup Table Attributes
-All (services)[19-appendix.md#schema-livestatus-services-table-attributes] table attributes grouped with
-the (hostgroups)[19-appendix.md#schema-livestatus-hostgroups-table-attributes] table prefixed with `hostgroup_`.
+All [services](19-appendix.md#schema-livestatus-services-table-attributes) table attributes grouped with
+the [hostgroups](19-appendix.md#schema-livestatus-hostgroups-table-attributes) table prefixed with `hostgroup_`.
## <a id="using-apply"></a> Apply Rules
-Instead of assigning each object ([Service](9-object-types.md#objecttype-service),
+Several object types require an object relation, e.g. [Service](9-object-types.md#objecttype-service),
[Notification](9-object-types.md#objecttype-notification), [Dependency](9-object-types.md#objecttype-dependency),
-[ScheduledDowntime](9-object-types.md#objecttype-scheduleddowntime))
-based on attribute identifiers for example `host_name` objects can be [applied](17-language-reference.md#apply).
+[ScheduledDowntime](9-object-types.md#objecttype-scheduleddowntime) objects.
+If you for example create a service object you have to specify the [host_name](9-object-types.md#objecttype-service)
+attribute and reference an existing host attribute.
-Before you start using the apply rules keep the following in mind:
+ object Service "ping4" {
+ check_command = "ping4"
+ host_name = "icinga2-client1.localdomain"
+ }
+
+This isn't comfortable when managing a huge set of configuration objects which could
+[match](3-monitoring-basics.md#using-apply-expressions) on a common pattern.
+
+Instead you want to use **[apply](17-language-reference.md#apply) rules**.
+
+If you want basic monitoring for all your hosts, add a `ping4` service apply rule
+for all hosts which have the `address` attribute specified. Just one rule for 1000 hosts
+instead of 1000 service objects. Apply rules will automatically generate them for you.
+
+ apply Service "ping4" {
+ check_command = "ping4"
+ assign where host.address
+ }
+
+More explanations on assign where expressions can be found [here](3-monitoring-basics.md#using-apply-expressions).
+
+Before you start with apply rules keep the following in mind:
* Define the best match.
* A set of unique [custom attributes](3-monitoring-basics.md#custom-attributes) for these hosts/services?
- * Or [group](3-monitoring-basics.md#groups) memberships, e.g. a host being a member of a hostgroup, applying services to it?
+ * Or [group](3-monitoring-basics.md#groups) memberships, e.g. a host being a member of a hostgroup which should have a service set?
* A generic pattern [match](18-library-reference.md#global-functions-match) on the host/service name?
* [Multiple expressions combined](3-monitoring-basics.md#using-apply-expressions) with `&&` or `||` [operators](17-language-reference.md#expression-operators)
* All expressions must return a boolean value (an empty string is equal to `false` e.g.)
-> **Note**
->
-> You can set/override object attributes in apply rules using the respectively available
-> objects in that scope (host and/or service objects).
-
-[Custom attributes](3-monitoring-basics.md#custom-attributes) can also store nested dictionaries and arrays. That way you can use them
-for not only matching for their existance or values in apply expressions, but also assign
-("inherit") their values into the generated objected from apply rules.
+More specific object type requirements are described in these chapters:
* [Apply services to hosts](3-monitoring-basics.md#using-apply-services)
* [Apply notifications to hosts and services](3-monitoring-basics.md#using-apply-notifications)
* [Apply dependencies to hosts and services](3-monitoring-basics.md#using-apply-dependencies)
* [Apply scheduled downtimes to hosts and services](3-monitoring-basics.md#using-apply-scheduledowntimes)
-A more advanced example is using [apply with for loops on arrays or
-dictionaries](3-monitoring-basics.md#using-apply-for) for example provided by
+You can set/override object attributes in apply rules using the respectively available
+objects in that scope (host and/or service objects).
+
+ vars.application_type = host.vars.application_type
+
+[Custom attributes](3-monitoring-basics.md#custom-attributes) can also store nested dictionaries and arrays. That way you can use them
+for not only matching for their existence or values in apply expressions, but also assign
+("inherit") their values into the generated objected from apply rules.
+
+A more advanced example is to use [apply rules with for loops on arrays or
+dictionaries](3-monitoring-basics.md#using-apply-for) provided by
[custom atttributes](3-monitoring-basics.md#custom-attributes) or groups.
> **Tip**
you want to be able to add more than one assign/ignore where expression which matches
a specific condition. To achieve this you can use the logical `and` and `or` operators.
+#### <a id="using-apply-expressions-examples"></a> Apply Rules Expressions Examples
+
+Assign a service to a specific host in a host group [array](18-library-reference.md#array-type) using the [in operator](17-language-reference.md#expression-operators):
+
+ assign where "hostgroup-dev" in host.groups
+
+Assign an object when a custom attribute is [equal](17-language-reference.md#expression-operators) to a value:
+
+ assign where host.vars.application_type == "database"
+
+ assign where service.vars.sms_notify == true
+
+Assign an object if a dictionary [contains](18-library-reference.md#dictionary-contains) a given key:
+
+ assign where host.vars.app_dict.contains("app")
+
+Match the host name by either using a [case insensitive match](18-library-reference.md#global-functions-match):
+
+ assign where match("webserver*", host.name)
+
+Match the host name by using a [regular expression](18-library-reference.md#global-functions-regex). Please note the [escaped](17-language-reference.md#string-literals-escape-sequences) backslash character:
+
+ assign where regex("^webserver-[\\d+]", host.name)
+
[Match](18-library-reference.md#global-functions-match) all `*mysql*` patterns in the host name and (`&&`) custom attribute `prod_mysql_db`
matches the `db-*` pattern. All hosts with the custom attribute `test_server` set to `true`
assign where host.address && host.vars.os == "Linux"
}
-
Other detailed examples are used in their respective chapters, for example
[apply services with custom command arguments](3-monitoring-basics.md#command-passing-parameters).
/* Calculate some additional object attributes after populating the `vars` dictionary */
notes = "Interface check for " + interface_name + " (units: '" + interface_config.iftraffic_units + "') in VLAN '" + vars.vlan + "' with ' QoS '" + vars.qos + "'"
- notes_url = "http://foreman.company.com/hosts/" + host.name
+ notes_url = "https://foreman.company.com/hosts/" + host.name
action_url = "http://snmp.checker.company.com/" + host.name + "/if-" + interface_name
}
notes = "Support contract: " + vars.support_contract + " for Customer " + vars.customer_name + " (" + vars.customer_id + ")."
- notes_url = "http://foreman.company.com/hosts/" + host.name
+ notes_url = "https://foreman.company.com/hosts/" + host.name
action_url = "http://snmp.checker.company.com/" + host.name + "/" + vars.customer_id
}
## <a id="dependencies"></a> Dependencies
Icinga 2 uses host and service [Dependency](9-object-types.md#objecttype-dependency) objects
-for determing their network reachability.
+for determining their network reachability.
A service can depend on a host, and vice versa. A service has an implicit
dependency (parent) to its host. A host to host dependency acts implicitly
The configuration files which are automatically created when installing the Icinga 2 packages
are a good way to start with Icinga 2.
-If you're interested in a detailed explanation of each language feature used in those
-configuration files, you can find more information in the [Language Reference](17-language-reference.md#language-reference)
-chapter.
+The [Language Reference](17-language-reference.md#language-reference) chapter explains details
+on value types (string, number, dictionaries, etc.) and the general configuration syntax.
## <a id="configuration-best-practice"></a> Configuration Best Practice
There are many ways of creating Icinga 2 configuration objects:
* Manually with your preferred editor, for example vi(m), nano, notepad, etc.
+* A configuration tool for Icinga 2 e.g. the [Icinga Director](https://github.com/Icinga/icingaweb2-module-director)
* Generated by a [configuration management tool](13-addons.md#configuration-tools) such as Puppet, Chef, Ansible, etc.
-* A configuration addon for Icinga 2 ([Icinga Director](https://github.com/Icinga/icingaweb2-module-director))
* A custom exporter script from your CMDB or inventory tool
-* your own.
+* etc.
-In order to find the best strategy for your own configuration, ask yourself the following questions:
+Find the best strategy for your own configuration and ask yourself the following questions:
* Do your hosts share a common group of services (for example linux hosts with disk, load, etc. checks)?
* Only a small set of users receives notifications and escalations for all hosts/services?
Then you should look for the object specific configuration setting `host_name` etc. accordingly.
-Finding the best files and directory tree for your configuration is up to you. Make sure that
-the [icinga2.conf](4-configuring-icinga-2.md#icinga2-conf) configuration file includes them,
-and then think about:
+You decide on the "best" layout for configuration files and directories. Ensure that
+the [icinga2.conf](4-configuring-icinga-2.md#icinga2-conf) configuration file includes them.
+
+Consider these ideas:
-* tree-based on locations, hostgroups, specific host attributes with sub levels of directories.
+* tree-based on locations, host groups, specific host attributes with sub levels of directories.
* flat `hosts.conf`, `services.conf`, etc. files for rule based configuration.
* generated configuration with one file per host and a global configuration for groups, users, etc.
* one big file generated from an external application (probably a bad idea for maintaining changes).
take the configuration location into account too. Everything configured on the master, synced to all other
nodes? Or any specific local configuration (e.g. health checks)?
-TODO
+There is a detailed chapter on [distributed monitoring scenarios](6-distributed-monitoring.md#distributed-monitoring-scenarios).
+Please ensure to have read the [introduction](6-distributed-monitoring.md#distributed-monitoring) at first glance.
If you happen to have further questions, do not hesitate to join the
[community support channels](https://www.icinga.com/community/get-involved/)
and ask community members for their experience and best practices.
+## <a id="your-configuration"></a> Your Configuration
+
+If you prefer to organize your own local object tree, you can also remove
+`include_recursive "conf.d"` from your icinga2.conf file.
+
+Create a new configuration directory, e.g. `objects.d` and include it
+in your icinga2.conf file.
+
+ [root@icinga2-master1.localdomain /]# mkdir -p /etc/icinga2/objects.d
+
+ [root@icinga2-master1.localdomain /]# vim /etc/icinga2/icinga2.conf
+
+ /* Local object configuration on our master instance. */
+ include_recursive "objects.d"
+
+This approach is used by the [Icinga 2 Puppet module](https://github.com/Icinga/puppet-icinga2).
+
+If you plan to setup a distributed setup with HA clusters and clients, please refer to [this chapter](#6-distributed-monitoring.md#distributed-monitoring-top-down)
+for examples with `zones.d` as configuration directory.
+
## <a id="configuring-icinga2-overview"></a> Configuration Overview
### <a id="icinga2-conf"></a> icinga2.conf
and their generated configuration described in
[this chapter](6-distributed-monitoring.md#distributed-monitoring-bottom-up).
+**Note**: This has been DEPRECATED in Icinga 2 v2.6 and is **not** required for
+satellites and clients using the [top down approach](#6-distributed-monitoring.md#distributed-monitoring-top-down).
+You can safely disable/remove it.
+
/**
* Although in theory you could define all your objects in this file
/* The directory which contains the plugins from the Monitoring Plugins project. */
const PluginDir = "/usr/lib64/nagios/plugins"
-
/* The directory which contains the Manubulon plugins.
* Check the documentation, chapter "SNMP Manubulon Plugin Check Commands", for details.
*/
The `ZoneName` and `TicketSalt` constants are required for remote client
and distributed setups only.
+### <a id="zones-conf"></a> zones.conf
+
+This file can be used to specify the required [Zone](9-object-types.md#objecttype-zone)
+and [Endpoint](9-object-types.md#objecttype-endpoint) configuration object for
+[distributed monitoring](6-distributed-monitoring.md#distributed-monitoring).
+
+By default the `NodeName` and `ZoneName` [constants](4-configuring-icinga-2.md#constants-conf) will be used.
+
+It also contains several [global zones](6-distributed-monitoring.md#distributed-monitoring-global-zone-config-sync)
+for distributed monitoring environments.
+
+Please ensure to modify this configuration with real names i.e. use the FQDN
+mentioned in [this chapter](6-distributed-monitoring.md#distributed-monitoring-conventions)
+for your `Zone` and `Endpoint` object names.
+
### <a id="conf-d"></a> The conf.d Directory
-This directory contains example configuration which should help you get started
+This directory contains **example configuration** which should help you get started
with monitoring the local host and its services. It is included in the
[icinga2.conf](4-configuring-icinga-2.md#icinga2-conf) configuration file by default.
Just keep in mind to include the main directories in the
[icinga2.conf](4-configuring-icinga-2.md#icinga2-conf) file.
-You are certainly not bound to it. Remove it if you prefer your own
-way of deploying Icinga 2 configuration.
+> **Note**
+>
+> You can remove the include directive in [icinga2.conf](4-configuring-icinga-2.md#icinga2-conf)
+> if you prefer your own way of deploying Icinga 2 configuration.
Further details on configuration best practice and how to build your
own strategy is described in [this chapter](4-configuring-icinga-2.md#configuration-best-practice).
* [disk](10-icinga-template-library.md#plugin-check-command-disk)
* [mem](10-icinga-template-library.md#plugin-contrib-command-mem), [swap](10-icinga-template-library.md#plugin-check-command-swap)
+* [procs](10-icinga-template-library.md#plugin-check-command-processes)
+* [users](10-icinga-template-library.md#plugin-check-command-users)
* [running_kernel](10-icinga-template-library.md#plugin-contrib-command-running_kernel)
* package management: [apt](10-icinga-template-library.md#plugin-check-command-apt), [yum](10-icinga-template-library.md#plugin-contrib-command-yum), etc.
* [ssh](10-icinga-template-library.md#plugin-check-command-ssh)
* [VMware](10-icinga-template-library.md#plugin-contrib-vmware)
**Tip**: If you are encountering timeouts using the VMware Perl SDK,
-check [this blog entry](http://www.claudiokuenzler.com/blog/650/slow-vmware-perl-sdk-soap-request-error-libwww-version).
+check [this blog entry](https://www.claudiokuenzler.com/blog/650/slow-vmware-perl-sdk-soap-request-error-libwww-version).
### <a id="service-monitoring-sap"></a> SAP Monitoring
* [smtp](10-icinga-template-library.md#plugin-check-command-smtp), [ssmtp](10-icinga-template-library.md#plugin-check-command-ssmtp)
* [imap](10-icinga-template-library.md#plugin-check-command-imap), [simap](10-icinga-template-library.md#plugin-check-command-simap)
* [pop](10-icinga-template-library.md#plugin-check-command-pop), [spop](10-icinga-template-library.md#plugin-check-command-spop)
+* [mailq](10-icinga-template-library.md#plugin-check-command-mailq)
### <a id="service-monitoring-hardware"></a> Hardware Monitoring
additional security:
* SSL certificates are mandatory for communication between nodes. The CLI commands
-help you create those certs automatically.
+help you create those certificates.
* Child zones only receive updates (check results, commands, etc.) for their configured objects.
+* Child zones are not allowed to push configuration updates to parent zones.
* Zones cannot interfere with other zones and influence each other. Each checkable host or service object is assigned to **one zone** only.
* All nodes in a zone trust each other.
* [Config sync](6-distributed-monitoring.md#distributed-monitoring-top-down-config-sync) and [remote command endpoint execution](6-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint) is disabled by default.
-The underlying protocol is using JSON-RPC events sent over TLS secured
-connections. In case you are interested in specific details, please
-check the source code.
+The underlying protocol uses JSON-RPC event notifications exchanged by nodes.
+The connection is secured by TLS. The message protocol uses an internal API,
+and as such message types and names may change internally and are not documented.
## <a id="distributed-monitoring-setup-master"></a> Master Setup
### <a id="distributed-monitoring-setup-client-windows"></a> Client/Satellite Windows Setup
-Download the MSI-Installer package from [http://packages.icinga.com/windows/](http://packages.icinga.com/windows/).
+Download the MSI-Installer package from [https://packages.icinga.com/windows/](https://packages.icinga.com/windows/).
Requirements:
* Windows Vista/Server 2008 or higher
-* [Microsoft .NET Framework 2.0](http://www.microsoft.com/de-de/download/details.aspx?id=1639)
+* [Microsoft .NET Framework 2.0](https://www.microsoft.com/de-de/download/details.aspx?id=1639)
-The installer package includes the [NSClient++](http://www.nsclient.org/) package
+The installer package includes the [NSClient++](https://www.nsclient.org/) package
so that Icinga 2 can use its built-in plugins. You can find more details in
[this chapter](6-distributed-monitoring.md#distributed-monitoring-windows-nscp).
The Windows package also installs native [monitoring plugin binaries](6-distributed-monitoring.md#distributed-monitoring-windows-plugins)
If you specify the `host` attribute in the `icinga2-master1.localdomain` endpoint object,
the client will actively try to connect to the master node. Since we've specified the client
endpoint's attribute on the master node already, we don't want the clients to connect to the
-master. Choose one connection direction.
+master. **Choose one [connection direction](6-distributed-monitoring.md#distributed-monitoring-advanced-hints-connection-direction).**
[root@icinga2-client1.localdomain /]# vim /etc/icinga2/zones.conf
If you specify the `host` attribute in the `icinga2-master1.localdomain` and `icinga2-master2.localdomain`
endpoint objects, the client will actively try to connect to the master node. Since we've specified the client
endpoint's attribute on the master node already, we don't want the clients to connect to the
-master nodes. Choose one connection direction.
+master nodes. **Choose one [connection direction](6-distributed-monitoring.md#distributed-monitoring-advanced-hints-connection-direction).**
[root@icinga2-client1.localdomain /]# vim /etc/icinga2/zones.conf
![Icinga 2 Distributed Master and Satellites with Clients](images/distributed-monitoring/icinga2_distributed_scenarios_master_satellite_client.png)
This scenario combines everything you've learned so far: High-availability masters,
-satellites receiving their config from the master zone, and clients checked via command
+satellites receiving their configuration from the master zone, and clients checked via command
endpoint from the satellite zones.
**Tip**: It can get complicated, so grab a pen and paper and bring your thoughts to life.
Overview:
-* `icinga2-master1.localdomain` is the config master master node.
+* `icinga2-master1.localdomain` is the configuration master master node.
* `icinga2-master2.localdomain` is the secondary master master node without configuration in `zones.d`.
* `icinga2-satellite1.localdomain` and `icinga2-satellite2.localdomain` are satellite nodes in a `master` child zone.
* `icinga2-client1.localdomain` and `icinga2-client2.localdomain` are two child nodes as clients.
The zone hierarchy can look like this. We'll define only the directly connected zones here.
You can safely deploy this configuration onto all master and satellite zone
-members. You should keep in mind to control the endpoint connection direction
+members. You should keep in mind to control the endpoint [connection direction](6-distributed-monitoring.md#distributed-monitoring-advanced-hints-connection-direction)
using the `host` attribute.
[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.conf
global = true
}
-**Note**: The master nodes do not need to know about the indirectly connected clients.
-Since we want to use command endpoint check configuration,
+Repeat the configuration step for `icinga2-master2.localdomain`, `icinga2-satellite1.localdomain`
+and `icinga2-satellite2.localdomain`.
+
+Since we want to use [top down command endpoint](6-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint) checks,
we must configure the client endpoint and zone objects.
-In order to minimize the effort, we'll sync the client zone and endpoint config to the
+In order to minimize the effort, we'll sync the client zone and endpoint configuration to the
satellites where the connection information is needed as well.
[root@icinga2-master1.localdomain /]# mkdir -p /etc/icinga2/zones.d/{master,satellite,global-templates}
If you specify the `host` attribute in the `icinga2-satellite1.localdomain` and `icinga2-satellite2.localdomain`
endpoint objects, the client node will actively try to connect to the satellite node. Since we've specified the client
endpoint's attribute on the satellite node already, we don't want the client node to connect to the
-satellite nodes. Choose one connection direction.
+satellite nodes. **Choose one [connection direction](6-distributed-monitoring.md#distributed-monitoring-advanced-hints-connection-direction).**
+
+Example for `icinga2-client1.localdomain`:
[root@icinga2-client1.localdomain /]# vim /etc/icinga2/zones.conf
global = true
}
+Example for `icinga2-client2.localdomain`:
+
[root@icinga2-client2.localdomain /]# vim /etc/icinga2/zones.conf
object Endpoint "icinga2-satellite1.localdomain" {
zone and endpoint configuration for the clients.
[root@icinga2-master1.localdomain /]# cd /etc/icinga2/zones.d/satellite
+
+Add the host object configuration for the `icinga2-client1.localdomain` client. You should
+have created the configuration file in the previous steps and it should contain the endpoint
+and zone object configuration already.
+
[root@icinga2-master1.localdomain /etc/icinga2/zones.d/satellite]# vim icinga2-client1.localdomain.conf
object Host "icinga2-client1.localdomain" {
vars.client_endpoint = name //follows the convention that host name == endpoint name
}
+Add the host object configuration for the `icinga2-client2.localdomain` client configuration file:
+
[root@icinga2-master1.localdomain /etc/icinga2/zones.d/satellite]# vim icinga2-client2.localdomain.conf
object Host "icinga2-client2.localdomain" {
vars.client_endpoint = name //follows the convention that host name == endpoint name
}
-Add a service which is executed on the satellite nodes (e.g. `ping4`). Pin the apply rule to the `satellite` zone only.
+Add a service object which is executed on the satellite nodes (e.g. `ping4`). Pin the apply rule to the `satellite` zone only.
[root@icinga2-master1.localdomain /etc/icinga2/zones.d/satellite]# vim services.conf
disconnected and then reconnect.
This functionality is not needed when a master/satellite node is sending check
-execution events to a client which is purely configured for [command endpoint](distributed-monitoring-top-down-command-endpoint)
+execution events to a client which is purely configured for [command endpoint](6-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint)
checks only.
The [Endpoint](9-object-types.md#objecttype-endpoint) object attribute `log_duration` can
## <a id="agent-based-checks-nsclient"></a> NSClient++
-[NSClient++](http://nsclient.org) works on both Windows and Linux platforms and is well
+[NSClient++](https://nsclient.org/) works on both Windows and Linux platforms and is well
known for its magnificent Windows support. There are alternatives like the WMI interface,
but using `NSClient++` will allow you to run local scripts similar to check plugins fetching
the required output and performance counters.
vars.nscp_crit = 80
}
-For details on the `NSClient++` configuration please refer to the [official documentation](http://www.nsclient.org/nscp/wiki/doc/configuration/0.4.x).
+For details on the `NSClient++` configuration please refer to the [official documentation](https://docs.nsclient.org/).
## <a id="agent-based-checks-nsca-ng"></a> NSCA-NG
## <a id="agent-based-checks-nrpe"></a> NRPE
-[NRPE](http://docs.icinga.com/latest/en/nrpe.html) runs as daemon on the remote client including
+[NRPE](https://docs.icinga.com/latest/en/nrpe.html) runs as daemon on the remote client including
the required plugins and command definitions.
Icinga 2 calls the `check_nrpe` plugin binary in order to query the configured command on the
remote client.
}
}
-## <a id="advanced-use-of-apply-rules"></a> Advanced Use of Apply Rules
+## <a id="check-result-freshness"></a> Check Result Freshness
+
+In Icinga 2 active check freshness is enabled by default. It is determined by the
+`check_interval` attribute and no incoming check results in that period of time.
+
+ threshold = last check execution time + check interval
+
+Passive check freshness is calculated from the `check_interval` attribute if set.
+
+ threshold = last check result time + check interval
+
+If the freshness checks are invalid, a new check is executed defined by the
+`check_command` attribute.
+
+
+## <a id="check-flapping"></a> Check Flapping
+
+Icinga 2 supports optional detection of hosts and services that are "flapping".
+
+Flapping occurs when a service or host changes state too frequently, resulting
+in a storm of problem and recovery notifications. Flapping can be the source of
+configuration problems (i.e. thresholds set too low), troublesome services,
+or real network problems.
+
+Flapping detection can be enabled or disabled using the `enable_flapping` attribute.
+The `flapping_threshold` attributes allows to specify the percentage of state changes
+when a [host](9-object-types.md#objecttype-host) or [service](objecttype-service) is considered to flap.
+
+Note: There are known issues with flapping detection. Please refrain from enabling
+flapping until [#4982](https://github.com/Icinga/icinga2/issues/4982) is fixed.
+
+## <a id="volatile-services"></a> Volatile Services
+
+By default all services remain in a non-volatile state. When a problem
+occurs, the `SOFT` state applies and once `max_check_attempts` attribute
+is reached with the check counter, a `HARD` state transition happens.
+Notifications are only triggered by `HARD` state changes and are then
+re-sent defined by the `interval` attribute.
+
+It may be reasonable to have a volatile service which stays in a `HARD`
+state type if the service stays in a `NOT-OK` state. That way each
+service recheck will automatically trigger a notification unless the
+service is acknowledged or in a scheduled downtime.
+
+## <a id="monitoring-icinga"></a> Monitoring Icinga 2
+
+Why should you do that? Icinga and its components run like any other
+service application on your server. There are predictable issues
+such as "disk space is running low" and your monitoring suffers from just
+that.
+
+You would also like to ensure that features and backends are running
+and storing required data. Be it the database backend where Icinga Web 2
+presents fancy dashboards, forwarded metrics to Graphite or InfluxDB or
+the entire distributed setup.
+
+This list isn't complete but should help with your own setup.
+Windows client specific checks are highlighted.
+
+Type | Description | Plugins and CheckCommands
+----------------|-------------------------------|-----------------------------------------------------
+System | Filesystem | [disk](10-icinga-template-library.md#plugin-check-command-disk), [disk-windows](10-icinga-template-library.md#windows-plugins) (Windows Client)
+System | Memory, Swap | [mem](10-icinga-template-library.md#plugin-contrib-command-mem), [swap](10-icinga-template-library.md#plugin-check-command-swap), [memory](10-icinga-template-library.md#windows-plugins) (Windows Client)
+System | Hardware | [hpasm](10-icinga-template-library.md#plugin-contrib-command-hpasm), [ipmi-sensor](10-icinga-template-library.md#plugin-contrib-command-ipmi-sensor)
+System | Virtualization | [VMware](10-icinga-template-library.md#plugin-contrib-vmware), [esxi_hardware](10-icinga-template-library.md#plugin-contrib-command-esxi-hardware)
+System | Processes | [procs](10-icinga-template-library.md#plugin-check-command-processes), [service-windows](10-icinga-template-library.md#windows-plugins) (Windows Client)
+System | System Activity Reports | [check_sar_perf](https://github.com/dnsmichi/icinga-plugins/blob/master/scripts/check_sar_perf.py)
+System | I/O | [iostat](10-icinga-template-library.md#plugin-contrib-command-iostat)
+System | Network interfaces | [nwc_health](10-icinga-template-library.md#plugin-contrib-command-nwc_health), [interfaces](10-icinga-template-library.md#plugin-contrib-command-interfaces)
+System | Users | [users](10-icinga-template-library.md#plugin-check-command-users), [users-windows](10-icinga-template-library.md#windows-plugins) (Windows Client)
+System | Logs | Forward them to [Elastic Stack](14-features.md#elastic-stack-integration) or [Graylog](14-features.md#graylog-integration) and add your own alerts.
+System | NTP | [ntp_time](10-icinga-template-library.md#plugin-check-command-ntp-time)
+System | Updates | [apt](10-icinga-template-library.md#plugin-check-command-apt), [yum](10-icinga-template-library.md#plugin-contrib-command-yum)
+Icinga | Status & Stats | [icinga](10-icinga-template-library.md#itl-icinga) (more below)
+Icinga | Cluster & Clients | [health checks](6-distributed-monitoring.md#distributed-monitoring-health-checks)
+Database | MySQL | [mysql_health](10-icinga-template-library.md#plugin-contrib-command-mysql_health)
+Database | PostgreSQL | [postgres](10-icinga-template-library.md#plugin-contrib-command-postgres)
+Database | Housekeeping | Check the database size and growth and analyse metrics to examine trends.
+Database | DB IDO | [ido](10-icinga-template-library.md#itl-icinga-ido) (more below)
+Webserver | Apache2, Nginx, etc. | [http](10-icinga-template-library.md#plugin-check-command-http), [apache_status](10-icinga-template-library.md#plugin-contrib-command-apache_status), [nginx_status](10-icinga-template-library.md#plugin-contrib-command-nginx_status)
+Webserver | Certificates | [http](10-icinga-template-library.md#plugin-check-command-http)
+Webserver | Authorization | [http](10-icinga-template-library.md#plugin-check-command-http)
+Notifications | Mail (queue) | [smtp](10-icinga-template-library.md#plugin-check-command-smtp), [mailq](10-icinga-template-library.md#plugin-check-command-mailq)
+Notifications | SMS (GSM modem) | [check_sms3_status](https://exchange.icinga.com/netways/check_sms3status)
+Notifications | Messengers, Cloud services | XMPP, Twitter, IRC, Telegram, PagerDuty, VictorOps, etc.
+Metrics | PNP, RRDTool | [check_pnp_rrds](https://github.com/lingej/pnp4nagios/tree/master/scripts) checks for stale RRD files.
+Metrics | Graphite | [graphite](10-icinga-template-library.md#plugin-contrib-command-graphite)
+Metrics | InfluxDB | [check_influxdb](https://exchange.icinga.com/Mikanoshi/InfluxDB+data+monitoring+plugin)
+Metrics | Elastic Stack | [elasticsearch](10-icinga-template-library.md#plugin-contrib-command-elasticsearch), [Elastic Stack integration](14-features.md#elastic-stack-integration)
+Metrics | Graylog | [Graylog integration](14-features.md#graylog-integration)
+
+
+The [icinga](10-icinga-template-library.md#itl-icinga) CheckCommand provides metrics for the runtime stats of
+Icinga 2. You can forward them to your preferred graphing solution.
+If you require more metrics you can also query the [REST API](12-icinga2-api.md#icinga2-api) and write
+your own custom check plugin. Or you keep using the built-in [object accessor functions](8-advanced-topics.md#access-object-attributes-at-runtime)
+to calculate stats in-memory.
+
+There is a built-in [ido](10-icinga-template-library.md#itl-icinga-ido) check available for DB IDO MySQL/PostgreSQL
+which provides additional metrics for the IDO database.
+
+```
+apply Service "ido-mysql" {
+ check_command = "ido"
+
+ vars.ido_type = "IdoMysqlConnection"
+ vars.ido_name = "ido-mysql" //the name defined in /etc/icinga2/features-enabled/ido-mysql.conf
+
+ assign where match("master*.localdomain", host.name)
+}
+```
+
+More specific database queries can be found in the [DB IDO](14-features.md#db-ido) chapter.
+
+Distributed setups should include specific [health checks](6-distributed-monitoring.md#distributed-monitoring-health-checks).
+You might also want to add additional checks for SSL certificate expiration.
+
+
+## <a id="advanced-configuration-hints"></a> Advanced Configuration Hints
+
+### <a id="advanced-use-of-apply-rules"></a> Advanced Use of Apply Rules
[Apply rules](3-monitoring-basics.md#using-apply) can be used to create a rule set which is
entirely based on host objects and their attributes.
In addition to defining check parameters this way, you can also enrich the `display_name`
attribute with more details. This will be shown in in Icinga Web 2 for example.
-## <a id="use-functions-object-config"></a> Use Functions in Object Configuration
+### <a id="use-functions-object-config"></a> Use Functions in Object Configuration
There is a limited scope where functions can be used as object attributes such as:
* Use the `icinga2 console` to test basic functionality (e.g. iterating over a dictionary)
* Build them step-by-step. You can always refactor your code later on.
-### <a id="use-functions-command-arguments-setif"></a> Use Functions in Command Arguments set_if
+#### <a id="use-functions-command-arguments-setif"></a> Use Functions in Command Arguments set_if
The `set_if` attribute inside the command arguments definition in the
[CheckCommand object definition](9-object-types.md#objecttype-checkcommand) is primarily used to
}
-### <a id="use-functions-command-attribute"></a> Use Functions as Command Attribute
+#### <a id="use-functions-command-attribute"></a> Use Functions as Command Attribute
This comes in handy for [NotificationCommands](9-object-types.md#objecttype-notificationcommand)
or [EventCommands](9-object-types.md#objecttype-eventcommand) which does not require
}
}
-### <a id="custom-functions-as-attribute"></a> Use Custom Functions as Attribute
+#### <a id="custom-functions-as-attribute"></a> Use Custom Functions as Attribute
To use custom functions as attributes, the function must be defined in a
slightly unexpected way. The following example shows how to assign values
assign where true
}
-### <a id="use-functions-assign-where"></a> Use Functions in Assign Where Expressions
+#### <a id="use-functions-assign-where"></a> Use Functions in Assign Where Expressions
If a simple expression for matching a name or checking if an item
exists in an array or dictionary does not fit, you should consider
assign where check_app_type(host, "ABAP")
}
-## <a id="access-object-attributes-at-runtime"></a> Access Object Attributes at Runtime
+### <a id="access-object-attributes-at-runtime"></a> Access Object Attributes at Runtime
The [Object Accessor Functions](18-library-reference.md#object-accessor-functions)
can be used to retrieve references to other objects by name.
}
-## <a id="check-result-freshness"></a> Check Result Freshness
-
-In Icinga 2 active check freshness is enabled by default. It is determined by the
-`check_interval` attribute and no incoming check results in that period of time.
-
- threshold = last check execution time + check interval
-
-Passive check freshness is calculated from the `check_interval` attribute if set.
-
- threshold = last check result time + check interval
-
-If the freshness checks are invalid, a new check is executed defined by the
-`check_command` attribute.
-
-
-## <a id="check-flapping"></a> Check Flapping
-
-The flapping algorithm used in Icinga 2 does not store the past states but
-calculates the flapping threshold from a single value based on counters and
-half-life values. Icinga 2 compares the value with a single flapping threshold
-configuration attribute named `flapping_threshold`.
-
-Flapping detection can be enabled or disabled using the `enable_flapping` attribute.
-
-
-## <a id="volatile-services"></a> Volatile Services
-
-By default all services remain in a non-volatile state. When a problem
-occurs, the `SOFT` state applies and once `max_check_attempts` attribute
-is reached with the check counter, a `HARD` state transition happens.
-Notifications are only triggered by `HARD` state changes and are then
-re-sent defined by the `interval` attribute.
-
-It may be reasonable to have a volatile service which stays in a `HARD`
-state type if the service stays in a `NOT-OK` state. That way each
-service recheck will automatically trigger a notification unless the
-service is acknowledged or in a scheduled downtime.
DbCatComment | Comments | Icinga Web 2
DbCatDowntime | Downtimes | Icinga Web 2
DbCatEventHandler | Event handler data | Icinga Web 2
- DbCatExternalCommand | External commands | Icinga Web 2
+ DbCatExternalCommand | External commands | --
DbCatFlapping | Flap detection data | Icinga Web 2
DbCatCheck | Check results | --
DbCatLog | Log messages | --
DbCatRetention | Retention data | Icinga Web 2
DbCatStateHistory | Historical state data | Icinga Web 2
+The default value for `categories` includes everything required
+by Icinga Web 2 in the table above.
+
In addition to the category flags listed above the `DbCatEverything`
flag may be used as a shortcut for listing all flags.
> `DbCatProgramStatus | DbCatState` was deprecated in 2.5 and will
> be removed in future versions.
-External interfaces like Icinga Web 2 require everything except `DbCatCheck`
-and `DbCatLog` which is the default value if `categories` is not set.
-
## <a id="objecttype-idopgsqlconnection"></a> IdoPgSqlConnection
IDO database adapter for PostgreSQL.
DbCatComment | Comments | Icinga Web 2
DbCatDowntime | Downtimes | Icinga Web 2
DbCatEventHandler | Event handler data | Icinga Web 2
- DbCatExternalCommand | External commands | Icinga Web 2
+ DbCatExternalCommand | External commands | --
DbCatFlapping | Flap detection data | Icinga Web 2
DbCatCheck | Check results | --
DbCatLog | Log messages | --
DbCatRetention | Retention data | Icinga Web 2
DbCatStateHistory | Historical state data | Icinga Web 2
+The default value for `categories` includes everything required
+by Icinga Web 2 in the table above.
+
In addition to the category flags listed above the `DbCatEverything`
flag may be used as a shortcut for listing all flags.
> `DbCatProgramStatus | DbCatState` was deprecated in 2.5 and will
> be removed in future versions.
-External interfaces like Icinga Web 2 require everything except `DbCatCheck`
-and `DbCatLog` which is the default value if `categories` is not set.
-
## <a id="objecttype-influxdbwriter"></a> InfluxdbWriter
host = "127.0.0.1"
port = 8086
database = "icinga2"
+
+ flush_threshold = 1024
+ flush_interval = 10s
+
host_template = {
measurement = "$host.check_command$"
tags = {
flush_threshold | **Optional.** How many data points to buffer before forcing a transfer to InfluxDB. Defaults to `1024`.
socket_timeout | **Optional.** How long to wait for InfluxDB to respond. Defaults to `5s`.
+Note: If `flush_threshold` is set too low, this will always force the feature to flush all data
+to InfluxDB. Experiment with the setting, if you are processing more than 1024 metrics per second
+or similar.
+
### <a id="objecttype-influxdbwriter-instance-tags"></a> Instance Tagging
Consider the following service check:
>
> UNIX sockets are not supported on Windows.
+## <a id="objecttype-logstashwriter"></a> LogstashWriter
+
+Writes Icinga 2 event messages to [Logstash](14-features.md#logstash-writer).
+
+Example:
+
+```
+library "perfdata"
+
+object LogstashWriter "logstash" {
+ host = "192.168.33.7"
+ port = 5555
+ socket_type = "udp"
+}
+```
+
+Configuration Attributes:
+
+ Name |Description
+ ----------------------|----------------------
+ host |**Optional.** Logstash receiver host address. Defaults to `127.0.0.1`.
+ port |**Optional.** Logstash receiver port. Defaults to `9201`.
+ socket_type |**Optional.** Socket type. Can be either `udp` or `tcp`. Defaults to `udp`.
+ source |**Optional.** Source name for this instance. Defaults to `icinga2`.
+
## <a id="objecttype-notification"></a> Notification
//host = "127.0.0.1"
//port = 8086
//database = "icinga2"
+ //flush_threshold = 1024
+ //flush_interval = 10s
//host_template = {
// measurement = "$host.check_command$"
// tags = {
--- /dev/null
+/**
+ * The LogstashWriter type writes check result metrics and
+ * performance data to a TCP or UDP socket.
+ */
+
+library "perfdata"
+
+object LogstashWriter "logstash" {
+ //host = "127.0.0.1"
+ //port = 9201
+ //socket_type = "udp"
+}
/**
* The redis library implements functionality for putting Icinga
* event data into a redis database.
+ *
+ * NOTE: This is experimental and may change without further notice.
*/
library "redis"
#
### BEGIN INIT INFO
# Provides: icinga2
-# Required-Start: $remote_fs $syslog
-# Required-Stop: $remote_fs $syslog
+# Required-Start: $remote_fs $syslog $network
+# Required-Stop: $remote_fs $syslog $network
# Should-Start: mysql postgresql
# Should-Stop: mysql postgresql
# Default-Start: 2 3 5
[Unit]
Description=Icinga host/service/network monitoring system
-After=syslog.target network.target postgresql.service mariadb.service carbon-cache.service
+After=syslog.target network-online.target postgresql.service mariadb.service carbon-cache.service carbon-relay.service
[Service]
Type=forking
Summary: Network monitoring application
Name: icinga2
-Version: 2.6.2
+Version: 2.6.3
Release: %{revision}%{?dist}
License: GPL-2.0+
Group: Applications/System
BuildRequires: libstdc++48-devel
BuildRequires: libopenssl1-devel
%else
+%if "%{_vendor}" == "redhat" && (0%{?el5} || 0%{?rhel} == 5 || "%{?dist}" == ".el5" || 0%{?el6} || 0%{?rhel} == 6 || "%{?dist}" == ".el6")
+# Requires devtoolset-2 scl
+BuildRequires: devtoolset-2-gcc-c++
+BuildRequires: devtoolset-2-libstdc++-devel
+%define scl_enable scl enable devtoolset-2 --
+%else
BuildRequires: gcc-c++
BuildRequires: libstdc++-devel
+%endif
BuildRequires: openssl-devel
%endif
BuildRequires: cmake
# sles 11 sp3 requires packages.icinga.com
BuildRequires: boost153-devel
%else
-BuildRequires: boost-devel >= 1.41
+%if (0%{?el5} || 0%{?rhel} == 5 || "%{?dist}" == ".el5" || 0%{?el6} || 0%{?rhel} == 6 || "%{?dist}" == ".el6")
+# Requires EPEL repository
+BuildRequires: boost148-devel >= 1.48
+%else
+BuildRequires: boost-devel >= 1.48
+%endif
%endif
%endif
Requires: selinux-policy >= %{_selinux_policy_version}
%endif
Requires: %{name} = %{version}-%{release}
-Requires(post): /usr/sbin/semodule, /sbin/restorecon
-Requires(postun): /usr/sbin/semodule, /sbin/restorecon
+Requires(post): policycoreutils-python
+Requires(postun): policycoreutils-python
%description selinux
SELinux policy module supporting icinga2
%endif
%if "%{_vendor}" == "redhat"
%if 0%{?el5} || 0%{?rhel} == 5 || "%{?dist}" == ".el5" || 0%{?el6} || 0%{?rhel} == 6 || "%{?dist}" == ".el6"
+%if 0%{?build_icinga_org}
# Boost_VERSION 1.41.0 vs 101400 - disable build tests
# details in https://dev.icinga.com/issues/5033
-CMAKE_OPTS="$CMAKE_OPTS -DBOOST_LIBRARYDIR=/usr/lib/boost153 \
+CMAKE_OPTS="$CMAKE_OPTS -DBOOST_LIBRARYDIR=%{_libdir}/boost153 \
-DBOOST_INCLUDEDIR=/usr/include/boost153 \
- -DBoost_ADDITIONAL_VERSIONS='1.53;1.53.0' \
+ -DBoost_ADDITIONAL_VERSIONS='1.53;1.53.0'"
+%else
+CMAKE_OPTS="$CMAKE_OPTS -DBOOST_LIBRARYDIR=%{_libdir}/boost148 \
+ -DBOOST_INCLUDEDIR=/usr/include/boost148 \
+ -DBoost_ADDITIONAL_VERSIONS='1.48;1.48.0'"
+%endif
+CMAKE_OPTS="$CMAKE_OPTS \
-DBoost_NO_SYSTEM_PATHS=TRUE \
-DBUILD_TESTING=FALSE \
-DBoost_NO_BOOST_CMAKE=TRUE"
%endif
-%if 0%{?el6} || 0%{?rhel} == 6 || "%{?dist}" == ".el6"
-CMAKE_OPTS="$CMAKE_OPTS -DBUILD_TESTING=FALSE"
-%endif
%endif
%if "%{_vendor}" != "suse"
CMAKE_OPTS="$CMAKE_OPTS -DICINGA2_PLUGINDIR=%{_libdir}/nagios/plugins"
%else
%if 0%{?suse_version} < 1310
-CMAKE_OPTS="$CMAKE_OPTS -DBOOST_LIBRARYDIR=/usr/lib/boost153 \
+CMAKE_OPTS="$CMAKE_OPTS -DBOOST_LIBRARYDIR=%{_libdir}/boost153 \
-DBOOST_INCLUDEDIR=/usr/include/boost153 \
-DBoost_ADDITIONAL_VERSIONS='1.53;1.53.0' \
-DBoost_NO_SYSTEM_PATHS=TRUE \
CMAKE_OPTS="$CMAKE_OPTS -DUSE_SYSTEMD=ON"
%endif
-cmake $CMAKE_OPTS -DCMAKE_C_FLAGS:STRING="%{optflags} %{?march_flag}" -DCMAKE_CXX_FLAGS:STRING="%{optflags} %{?march_flag}" .
+%if "%{_vendor}" == "suse" && 0%{?suse_version} < 1210
+# from package gcc48-c++
+export CC=gcc-4.8
+export CXX=g++-4.8
+%endif
+
+%{?scl_enable} cmake $CMAKE_OPTS -DCMAKE_C_FLAGS:STRING="%{optflags} %{?march_flag}" -DCMAKE_CXX_FLAGS:STRING="%{optflags} %{?march_flag}" .
make %{?_smp_mflags}
}
-/**
+/**
* Memory and swap usage on Linux given by Net-snmp
* Memory usage on cisco routers or Pix
* For other systems use check_snmp_storage.pl
}
-/**
+/**
* snmp network interfaces
* Url reference: http://nagios.manubulon.com/snmp_int.html
*/
}
-/**
+/**
* snmp process
* Url reference: http://nagios.manubulon.com/snmp_process.html
*/
"-A" = {
set_if = "$snmp_process_use_params$"
}
+ "-m" = {
+ description = "Checks memory usage. Values warning and critical in Mb eg. 512,1024"
+ set_if = "$snmp_process_mem_usage$"
+ value = "$snmp_process_mem_threshold$"
+ }
+ "-u" = {
+ description = "Checks CPU usage. Values warning and critical in % (value can be > 100% : 100%=1 CPU) eg. 15,50"
+ set_if = "$snmp_process_cpu_usage$"
+ value = "$snmp_process_cpu_threshold$"
+ }
}
vars.snmp_process_name = ".*"
vars.snmp_crit = 0
vars.snmp_perf = true
vars.snmp_process_use_params = false
+ vars.snmp_process_mem_usage = false
+ vars.snmp_process_mem_threshold = "0,0"
+ vars.snmp_process_cpu_usage = false
+ vars.snmp_process_cpu_threshold = "0,0"
}
-
command = [ PluginDir + "/check_tcp" ]
arguments = {
- "-H" = "$ssl_address$"
- "-p" = "$ssl_port$"
+ "-H" = {
+ value = "$ssl_address$"
+ description = "Host address"
+ }
+ "-p" = {
+ value = "$ssl_port$"
+ description ="TCP port (default: 443)"
+ }
"--ssl" = { }
- "--timeout" = "$ssl_timeout$"
- "-D" = "$ssl_cert_valid_days_warn$,$ssl_cert_valid_days_critical$"
+ "-t" = {
+ value = "$ssl_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-D" = {{
+ var days_warn = macro("$ssl_cert_valid_days_warn$")
+ var days_critical = macro("$ssl_cert_valid_days_critical$")
+ if (days_warn) {
+ if (days_critical) {
+ return days_warn + "," + days_critical
+ } else {
+ return days_warn
+ }
+ }
+ }}
"-N" = "$ssl_sni$"
}
vars.ssl_address = "$check_address$"
+ vars.ssl_port = 443
+ vars.ssl_cert_valid_days_warn = false
+ vars.ssl_cert_valid_days_critical = false
vars.ssl_sni = "$ssl_address$"
}
+
object CheckCommand "udp" {
import "ipv4-or-ipv6"
value = "$dns_server$"
description = "Optional DNS server you want to use for the lookup."
}
+ "-q" = {
+ value = "$dns_query_type$"
+ description = "Optional DNS record query type where TYPE =(A, AAAA, SRV, TXT, MX, ANY). The default query type is 'A' (IPv4 host entry)"
+ }
"-a" = {
value = "$dns_expected_answers$"
description = "Optional ip address or host you expect the DNS server to return. Host must end with a dot (.). This option can be repeated multiple times (Returns OK if any value match). If multiple addresses are returned at once, you have to match the whole string of addresses separated with commas (sorted alphabetically)."
}
vars.dns_lookup = "$host.name$"
+ vars.dns_query_type = "A"
vars.dns_timeout = 10
}
set_if = "$nrpe_ipv6$"
description = "Use IPv6 connection"
}
+ "-2" = {
+ set_if = "$nrpe_version_2$"
+ description = "Use this if you want to connect to NRPE v2"
+ }
}
vars.nrpe_address = "$check_address$"
vars.nrpe_timeout_unknown = false
vars.check_ipv4 = "$nrpe_ipv4$"
vars.check_ipv6 = "$nrpe_ipv6$"
-
+ vars.nrpe_version_2 = false
timeout = 5m
}
value = "$ldap_critical$"
description = "Response time to result in critical status (seconds)"
}
+ "-W" = {
+ value = "$ldap_warning_entries$"
+ description = "Number of found entries to result in warning status (optional)"
+ }
+ "-C" = {
+ value = "$ldap_critical_entries$"
+ description = "Number of found entries to result in critical status (optional)"
+ }
"-t" = {
value = "$ldap_timeout$"
description = "Seconds before connection times out (default: 10)"
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
install(
- FILES databases.conf icingacli.conf ipmi.conf logmanagement.conf network-components.conf operating-system.conf virtualization.conf vmware.conf storage.conf web.conf hardware.conf network-services.conf metrics.conf
+ FILES databases.conf hardware.conf icingacli.conf ipmi.conf logmanagement.conf metrics.conf network-components.conf network-services.conf operating-system.conf raid-controller.conf smart-attributes.conf storage.conf virtualization.conf vmware.conf web.conf
DESTINATION ${CMAKE_INSTALL_DATADIR}/icinga2/include/plugins-contrib.d
)
set_if = "$mssql_health_commit$"
description = "turns on autocommit for the dbd::sybase module"
}
+ "--notemp" = {
+ set_if = "$mssql_health_notemp$"
+ description = "Ignore temporary databases/tablespaces"
+ }
+ "--nooffline" = {
+ set_if = "$mssql_health_nooffline$"
+ description = "Skip the offline databases"
+ }
+ "--lookback" = {
+ value = "$mssql_health_lookback$"
+ description = "The amount of time you want to look back when calculating average rates"
+ }
+ "--report" = {
+ value = "$mssql_health_report$"
+ description = "Report can be used to output only the bad news (short,long,html)"
+ }
}
vars.mssql_health_regexp = false
vars.mssql_health_offlineok = false
vars.mssql_health_commit = false
+ vars.mssql_health_notemp = false
+ vars.mssql_health_nooffline = false
+ vars.mssql_health_report = "short"
}
object CheckCommand "mysql_health" {
value = "$db2_health_lookback$"
description = "How many days iin the past db2_health check should look back to calculate exitcode."
}
-
+ "--report" = {
+ value = "$db2_health_report$"
+ description = "Report can be used to output only the bad news (short,long,html)"
+ }
}
env = {
vars.db2_health_regexp = false
vars.db2_health_hostname = "$check_address$"
+ vars.db2_health_report = "short"
vars.db2_health_env_db2_home = "/opt/ibm/db2/V10.5"
vars.db2_health_env_db2_version = "10.5"
arguments = {
"--hostname" = {
value = "$hpasm_hostname$"
+ set_if = "$hpasm_remote$"
description = "Hostname or IP-address of the server (SNMP mode only)"
- required = true
}
"--community" = {
value = "$hpasm_community$"
description = "Check network interfaces (and groups). Try it and report me whyt you think about it. I need to build up some know how on this subject. If get an error and you think, it is not justified for your configuration, please tell me about it. (alwasy send the output of \"snmpwalk -On .... 1.3.6.1.4.1.232\" and a description how you setup your nics and why it is correct opposed to the plugins error message"
}
}
-
+ vars.hpasm_remote = true
vars.hpasm_hostname = "$check_address$"
}
object CheckCommand "icingacli-businessprocess" {
import "icingacli"
- command += [ "businessprocess", "check", "process" ]
+ command += [ "businessprocess", "process", "check" ]
arguments = {
"--config" = {
}
"--details" = {
set_if = "$icingacli_businessprocess_details$"
- description = "Get details for root cause analyses"
+ description = "Get details for root cause analysis"
+ }
+ "--state-type" = {
+ value = "$icingacli_businessprocess_statetype$"
+ description = "Define which state type to look at. Could be either soft or hard, overrides an eventually configured default"
}
"--process" = {
value = "$icingacli_businessprocess_process$"
description = "Business process to monitor"
skip_key = true
required = true
+ order = -1
}
}
vars.icingacli_businessprocess_details = false
}
+
description = "Turn off system event log checking via ipmi-sel"
}
"-D" = {
- value = "$ipmi_protocal_lan_version$"
+ value = "$ipmi_protocol_lan_version$"
description = "Change the protocol LAN version (default: LAN_2_0)"
}
"-fc" = {
}
vars.ipmi_address = "$check_address$"
- vars.ipmi_protocal_lan_version = "LAN_2_0"
+ vars.ipmi_protocol_lan_version = "LAN_2_0"
+}
+
+/*
+ * Icinga2 CheckCommand definition for an IPMI interface ping check
+*/
+
+template CheckCommand "ipmi-alive-common" {
+ vars.ping_wrta = 5000.0
+ vars.ping_wpl = 100
+
+ vars.ping_crta = 5000.0
+ vars.ping_cpl = 100
+
+ vars.ping_packets = 1
+}
+object CheckCommand "ipmi-alive" {
+ import "ping"
+ import "ipmi-alive-common"
}
description = "The role of this device in a hsrp group (active/standby/listen)"
}
"--report" = {
- set_if = "$nwc_health_report$"
- description = "Can be used to shorten the output"
+ value = "$nwc_health_report$"
+ description = "Can be used to shorten the output. Possible values are: 'long' (default), 'short' (to shorten if available), or 'html' (to produce some html outputs if available)"
}
"--lookback" = {
value = "$nwc_health_lookback$"
description = "The extended critical thresholds"
}
"--mitigation" = {
- set_if = "$nwc_health_mitigation$"
+ value = "$nwc_health_mitigation$"
description = "The parameter allows you to change a critical error to a warning."
}
"--selectedperfdata" = {
--- /dev/null
+/*
+ * Icinga2 CheckCommand definitions to monitor RAID controller from Adaptec and Broadcom using
+ * the Adaptec RAID Monitoring Plugin and the LSI RAID Monitoring Plugin
+ */
+
+object CheckCommand "adaptec-raid" {
+ import "plugin-check-command"
+
+ command = [ PluginDir + "/check_adaptec_raid" ]
+
+ arguments = {
+ "-C" = {
+ required = true
+ value = "$adaptec_controller_number$"
+ description = "Insert the controller number to be checked."
+ }
+ "-p" = {
+ required = true
+ value = "$arcconf_path$"
+ description = "Insert the path to arcconf (e.g. /sbin/arcconf)."
+ }
+ }
+
+ vars.arcconf_path = "/sbin/arcconf"
+}
+
+object CheckCommand "lsi-raid" {
+ import "plugin-check-command"
+
+ command = [ PluginDir + "/check_lsi_raid" ]
+
+ arguments = {
+ "-C" = {
+ required = true
+ value = "$lsi_controller_number$"
+ description = "Insert the controller number to be checked."
+ }
+ "-p" = {
+ required = true
+ value = "$storcli_path$"
+ description = "Insert the path to storcli (e.g. /usr/sbin/storcli)."
+ }
+ }
+
+ vars.storcli_path = "/usr/sbin/storcli"
+}
--- /dev/null
+/*
+ * Icinga2 CheckCommand definition for the SMART Attributes Monitoring Plugin
+ */
+
+object CheckCommand "smart-attributes" {
+ import "plugin-check-command"
+
+ command = [ PluginDir + "/check_smart_attributes" ]
+
+ arguments = {
+ "-dbj" = {
+ required = true
+ value = "$smart_attributes_config_path$"
+ description = "Path to the smart attributes config file (e.g. check_smartdb.json)"
+ }
+ "-d" = {
+ required = true
+ value = "$smart_attributes_device$"
+ description = "Insert the device name (e.g. /dev/sda) to monitor"
+ }
+ }
+
+ vars.smart_attributes_config_path = SysconfDir + "/icinga2/plugins-config/check_smartdb.json"
+}
"--multiline" = {
set_if = "$vmware_multiline$"
}
+ "--standbyok" = {
+ set_if = "$vmware_standbyok$"
+ }
}
}
vars.apache_status_ssl = false
}
+object CheckCommand "ssl_cert" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_ssl_cert" ]
+
+ arguments = {
+ "-H" = {
+ value = "$ssl_cert_address$"
+ description = "The host's address"
+ required = true
+ }
+ "-p" = {
+ value = "$ssl_cert_port$"
+ description = "TCP port number (default: 443)"
+ }
+ "-f" = {
+ value = "$ssl_cert_file$"
+ description = "Local file path (works with -H localhost only)"
+ }
+ "-w" = {
+ value = "$ssl_cert_warn$"
+ description = "Minimum number of days a certificate has to be valid"
+ }
+ "-c" = {
+ value = "$ssl_cert_critical$"
+ description = "Minimum number of days a certificate has to be valid to issue a critical status"
+ }
+ "-n" = {
+ value = "$ssl_cert_cn$"
+ description = "Pattern to match the CN of the certificate"
+ }
+ "-i" = {
+ value = "$ssl_cert_issuer$"
+ description = "Pattern to match the issuer of the certificate"
+ }
+ "-o" = {
+ value = "$ssl_cert_org$"
+ description = "Pattern to match the organization of the certificate"
+ }
+ "-e" = {
+ value = "$ssl_cert_email$"
+ description = "Pattern to match the email address contained in the certificate"
+ }
+ "-N" = {
+ set_if = "$ssl_cert_match_host$"
+ description = "Match CN with the host name"
+ }
+ "--serial" = {
+ value = "$ssl_cert_serial$"
+ description = "Pattern to match the serial number"
+ }
+ "-s" = {
+ set_if = "$ssl_cert_selfsigned$"
+ description = "Allow self-signed certificate"
+ }
+ "--sni" = {
+ value = "$ssl_cert_sni$"
+ description = "Sets the TLS SNI (Server Name Indication) extension"
+ }
+ "-t" = {
+ value = "$ssl_cert_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-P" = {
+ value = "$ssl_cert_protocol$"
+ description = "Use the specific protocol {http|smtp|pop3|imap|ftp|xmpp|irc|ldap} (default: http)"
+ }
+ "-C" = {
+ value = "$ssl_cert_clientssl_cert$"
+ description = "Use client certificate to authenticate"
+ }
+ "--clientpass" = {
+ value = "$ssl_cert_clientpass$"
+ description = "Set passphrase for client certificate"
+ }
+ "-r" = {
+ value = "$ssl_cert_rootssl_cert$"
+ description = "Root certificate or directory to be used for certificate validation"
+ }
+ "--ssl2" = {
+ set_if = {{
+ return macro("$ssl_cert_ssl_version$") == "ssl2"
+ }}
+ }
+ "--ssl3" = {
+ set_if = {{
+ return macro("$ssl_cert_ssl_version$") == "ssl3"
+ }}
+ }
+ "--tls1" = {
+ set_if = {{
+ return macro("$ssl_cert_ssl_version$") == "tls1"
+ }}
+ }
+ "--tls1_1" = {
+ set_if = {{
+ return macro("$ssl_cert_ssl_version$") == "tls1_1"
+ }}
+ }
+ "--tls1_2" = {
+ set_if = {{
+ return macro("$ssl_cert_ssl_version$") == "tls1_2"
+ }}
+ }
+ "--no_ssl2" = {
+ set_if = {{
+ var disable_versions = macro("$ssl_cert_disable_ssl_versions$")
+ if (typeof(disable_versions) == String) {
+ disable_versions = [ disable_versions ]
+ }
+ return "ssl2" in disable_versions
+ }}
+ }
+ "--no_ssl3" = {
+ set_if = {{
+ var disable_versions = macro("$ssl_cert_disable_ssl_versions$")
+ if (typeof(disable_versions) == String) {
+ disable_versions = [ disable_versions ]
+ }
+ return "ssl3" in disable_versions
+ }}
+ }
+ "--no_tls1" = {
+ set_if = {{
+ var disable_versions = macro("$ssl_cert_disable_ssl_versions$")
+ if (typeof(disable_versions) == String) {
+ disable_versions = [ disable_versions ]
+ }
+ return "tls1" in disable_versions
+ }}
+ }
+ "--no_tls1_1" = {
+ set_if = {{
+ var disable_versions = macro("$ssl_cert_disable_ssl_versions$")
+ if (typeof(disable_versions) == String) {
+ disable_versions = [ disable_versions ]
+ }
+ return "tls1_1" in disable_versions
+ }}
+ }
+ "--no_tls1_2" = {
+ set_if = {{
+ var disable_versions = macro("$ssl_cert_disable_ssl_versions$")
+ if (typeof(disable_versions) == String) {
+ disable_versions = [ disable_versions ]
+ }
+ return "tls1_2" in disable_versions
+ }}
+ }
+ "--ecdsa" = {
+ set_if = {{
+ return macro("$ssl_cert_cipher$") == "ecdsa"
+ }}
+ description = "Cipher selection: force ECDSA authentication"
+ }
+ "--rsa" = {
+ set_if = {{
+ return macro("$ssl_cert_cipher$") == "rsa"
+ }}
+ description = "Cipher selection: force RSA authentication"
+ }
+ "--ignore-sig-alg" = {
+ set_if = "$ssl_cert_ignore_signature$"
+ description = "Do not check if the certificate was signed with SHA1 od MD5"
+ }
+ "--ignore-exp" = {
+ set_if = "$ssl_cert_ignore_expiration$"
+ description = "Ignore expiration date"
+ }
+ "--ignore-ocsp" = {
+ set_if = "$ssl_cert_ignore_ocsp$"
+ description = "Do not check revocation with OCSP"
+ }
+
+ }
+
+ vars.ssl_cert_address = "$check_address$"
+ vars.ssl_cert_port = 443
+ vars.ssl_cert_timeout = 10
+}
+
function.cpp function.thpp function-script.cpp functionwrapper.cpp scriptglobal.cpp
scriptutils.cpp serializer.cpp socket.cpp socketevents.cpp socketevents-epoll.cpp socketevents-poll.cpp stacktrace.cpp
statsfunction.cpp stdiostream.cpp stream.cpp streamlogger.cpp streamlogger.thpp string.cpp string-script.cpp
- sysloglogger.cpp sysloglogger.thpp tcpsocket.cpp threadpool.cpp timer.cpp
+ sysloglogger.cpp sysloglogger.thpp tcpsocket.cpp udpsocket.cpp threadpool.cpp timer.cpp
tlsstream.cpp tlsutility.cpp type.cpp typetype-script.cpp unixsocket.cpp utility.cpp value.cpp
value-operators.cpp workqueue.cpp
)
Log(LogCritical, "Application", "Cannot update PID file. Aborting restart operation.");
return;
}
+
+ Log(LogDebug, "Application")
+ << "Keeping pid '" << m_ReloadProcess << "' open.";
+
ClosePidFile(false);
} else
ClosePidFile(true);
if (!prototype) {
prototype = new Dictionary();
- prototype->Set("len", new Function("Array#len", WrapFunction(ArrayLen), true));
- prototype->Set("set", new Function("Array#set", WrapFunction(ArraySet)));
- prototype->Set("get", new Function("Array#get", WrapFunction(ArrayGet)));
- prototype->Set("add", new Function("Array#add", WrapFunction(ArrayAdd)));
- prototype->Set("remove", new Function("Array#remove", WrapFunction(ArrayRemove)));
- prototype->Set("contains", new Function("Array#contains", WrapFunction(ArrayContains), true));
+ prototype->Set("len", new Function("Array#len", WrapFunction(ArrayLen), {}, true));
+ prototype->Set("set", new Function("Array#set", WrapFunction(ArraySet), { "index", "value" }));
+ prototype->Set("get", new Function("Array#get", WrapFunction(ArrayGet), { "index" }));
+ prototype->Set("add", new Function("Array#add", WrapFunction(ArrayAdd), { "value" }));
+ prototype->Set("remove", new Function("Array#remove", WrapFunction(ArrayRemove), { "index" }));
+ prototype->Set("contains", new Function("Array#contains", WrapFunction(ArrayContains), { "value" }, true));
prototype->Set("clear", new Function("Array#clear", WrapFunction(ArrayClear)));
- prototype->Set("sort", new Function("Array#sort", WrapFunction(ArraySort), true));
- prototype->Set("shallow_clone", new Function("Array#shallow_clone", WrapFunction(ArrayShallowClone), true));
- prototype->Set("join", new Function("Array#join", WrapFunction(ArrayJoin), true));
- prototype->Set("reverse", new Function("Array#reverse", WrapFunction(ArrayReverse), true));
- prototype->Set("map", new Function("Array#map", WrapFunction(ArrayMap), true));
- prototype->Set("reduce", new Function("Array#reduce", WrapFunction(ArrayReduce), true));
- prototype->Set("filter", new Function("Array#filter", WrapFunction(ArrayFilter), true));
- prototype->Set("unique", new Function("Array#unique", WrapFunction(ArrayUnique), true));
+ prototype->Set("sort", new Function("Array#sort", WrapFunction(ArraySort), { "less_cmp" }, true));
+ prototype->Set("shallow_clone", new Function("Array#shallow_clone", WrapFunction(ArrayShallowClone), {}, true));
+ prototype->Set("join", new Function("Array#join", WrapFunction(ArrayJoin), { "separator" }, true));
+ prototype->Set("reverse", new Function("Array#reverse", WrapFunction(ArrayReverse), {}, true));
+ prototype->Set("map", new Function("Array#map", WrapFunction(ArrayMap), { "func" }, true));
+ prototype->Set("reduce", new Function("Array#reduce", WrapFunction(ArrayReduce), { "reduce" }, true));
+ prototype->Set("filter", new Function("Array#filter", WrapFunction(ArrayFilter), { "func" }, true));
+ prototype->Set("unique", new Function("Array#unique", WrapFunction(ArrayUnique), {}, true));
}
return prototype;
if (!prototype) {
prototype = new Dictionary();
- prototype->Set("to_string", new Function("Boolean#to_string", WrapFunction(BooleanToString), true));
+ prototype->Set("to_string", new Function("Boolean#to_string", WrapFunction(BooleanToString), {}, true));
}
return prototype;
if (!prototype) {
prototype = new Dictionary();
- prototype->Set("modify_attribute", new Function("ConfigObject#modify_attribute", WrapFunction(ConfigObjectModifyAttribute), false));
- prototype->Set("restore_attribute", new Function("ConfigObject#restore_attribute", WrapFunction(ConfigObjectRestoreAttribute), false));
+ prototype->Set("modify_attribute", new Function("ConfigObject#modify_attribute", WrapFunction(ConfigObjectModifyAttribute), { "attr", "value" }, false));
+ prototype->Set("restore_attribute", new Function("ConfigObject#restore_attribute", WrapFunction(ConfigObjectRestoreAttribute), { "attr", "value" }, false));
}
return prototype;
SetStartCalled(true);
}
+void ConfigObject::PreActivate(void)
+{
+ CONTEXT("Setting 'active' to true for object '" + GetName() + "' of type '" + GetReflectionType()->GetName() + "'");
+
+ ASSERT(!IsActive());
+ SetActive(true, true);
+}
+
void ConfigObject::Activate(bool runtimeCreated)
{
CONTEXT("Activating object '" + GetName() + "' of type '" + GetReflectionType()->GetName() + "'");
Start(runtimeCreated);
ASSERT(GetStartCalled());
- ASSERT(!IsActive());
- SetActive(true, true);
if (GetHAMode() == HARunEverywhere)
SetAuthority(true);
return m_Zone;
}
+Dictionary::Ptr ConfigObject::GetSourceLocation(void) const
+{
+ DebugInfo di = GetDebugInfo();
+
+ Dictionary::Ptr result = new Dictionary();
+ result->Set("path", di.Path);
+ result->Set("first_line", di.FirstLine);
+ result->Set("first_column", di.FirstColumn);
+ result->Set("last_line", di.LastLine);
+ result->Set("last_column", di.LastColumn);
+ return result;
+}
+
NameComposer::~NameComposer(void)
{ }
void Register(void);
void Unregister(void);
+ void PreActivate(void);
void Activate(bool runtimeCreated = false);
void Deactivate(bool runtimeRemoved = false);
void SetAuthority(bool authority);
virtual void OnAllConfigLoaded(void);
virtual void OnStateLoaded(void);
+ virtual Dictionary::Ptr GetSourceLocation(void) const override;
+
template<typename T>
static intrusive_ptr<T> GetObject(const String& name)
{
return m_ShortName;
}}}
};
- [config] name(Zone) zone (ZoneName);
+ [config, no_user_modify] name(Zone) zone (ZoneName);
[config, no_user_modify] String package;
[config, get_protected, no_user_modify] Array::Ptr templates;
+ [config, no_storage, no_user_modify] Dictionary::Ptr source_location {
+ get;
+ };
[get_protected, no_user_modify] bool active;
[get_protected, no_user_modify] bool paused {
default {{{ return true; }}}
if (!prototype) {
prototype = new Dictionary();
- prototype->Set("format", new Function("DateTime#format", WrapFunction(DateTimeFormat)));
+ prototype->Set("format", new Function("DateTime#format", WrapFunction(DateTimeFormat), { "format" }));
}
return prototype;
if (!prototype) {
prototype = new Dictionary();
- prototype->Set("len", new Function("Dictionary#len", WrapFunction(DictionaryLen), true));
- prototype->Set("set", new Function("Dictionary#set", WrapFunction(DictionarySet)));
- prototype->Set("get", new Function("Dictionary#get", WrapFunction(DictionaryGet)));
- prototype->Set("remove", new Function("Dictionary#remove", WrapFunction(DictionaryRemove)));
- prototype->Set("contains", new Function("Dictionary#contains", WrapFunction(DictionaryContains), true));
- prototype->Set("shallow_clone", new Function("Dictionary#shallow_clone", WrapFunction(DictionaryShallowClone), true));
- prototype->Set("keys", new Function("Dictionary#keys", WrapFunction(DictionaryKeys), true));
+ prototype->Set("len", new Function("Dictionary#len", WrapFunction(DictionaryLen), {}, true));
+ prototype->Set("set", new Function("Dictionary#set", WrapFunction(DictionarySet), { "key", "value" }));
+ prototype->Set("get", new Function("Dictionary#get", WrapFunction(DictionaryGet), { "key" }));
+ prototype->Set("remove", new Function("Dictionary#remove", WrapFunction(DictionaryRemove), { "key" }));
+ prototype->Set("contains", new Function("Dictionary#contains", WrapFunction(DictionaryContains), { "key" }, true));
+ prototype->Set("shallow_clone", new Function("Dictionary#shallow_clone", WrapFunction(DictionaryShallowClone), {}, true));
+ prototype->Set("keys", new Function("Dictionary#keys", WrapFunction(DictionaryKeys), {}, true));
}
return prototype;
return it->second;
}
+
/**
* Retrieves a value from a dictionary.
*
#include "base/function.hpp"
#include "base/function.tcpp"
+#include "base/array.hpp"
#include "base/scriptframe.hpp"
using namespace icinga;
REGISTER_TYPE_WITH_PROTOTYPE(Function, Function::GetPrototype());
-Function::Function(const String& name, const Callback& function, bool side_effect_free, bool deprecated)
+Function::Function(const String& name, const Callback& function, const std::vector<String>& args,
+ bool side_effect_free, bool deprecated)
: m_Callback(function)
{
SetName(name, true);
SetSideEffectFree(side_effect_free, true);
SetDeprecated(deprecated, true);
+ SetArguments(Array::FromVector(args), true);
}
Value Function::Invoke(const std::vector<Value>& arguments)
typedef boost::function<Value (const std::vector<Value>& arguments)> Callback;
- Function(const String& name, const Callback& function, bool side_effect_free = false, bool deprecated = false);
+ Function(const String& name, const Callback& function, const std::vector<String>& args = std::vector<String>(),
+ bool side_effect_free = false, bool deprecated = false);
Value Invoke(const std::vector<Value>& arguments = std::vector<Value>());
Value Invoke(const Value& otherThis, const std::vector<Value>& arguments = std::vector<Value>());
Callback m_Callback;
};
-#define REGISTER_SCRIPTFUNCTION_NS(ns, name, callback) \
+#define REGISTER_SCRIPTFUNCTION_NS(ns, name, callback, args) \
INITIALIZE_ONCE_WITH_PRIORITY([]() { \
- Function::Ptr sf = new icinga::Function(#ns "#" #name, WrapFunction(callback), false); \
+ Function::Ptr sf = new icinga::Function(#ns "#" #name, WrapFunction(callback), String(args).Split(":"), false); \
ScriptGlobal::Set(#ns "." #name, sf); \
}, 10)
-#define REGISTER_SCRIPTFUNCTION_NS_PREFIX(ns, name, callback) \
+#define REGISTER_SCRIPTFUNCTION_NS_PREFIX(ns, name, callback, args) \
INITIALIZE_ONCE_WITH_PRIORITY([]() { \
- Function::Ptr sf = new icinga::Function(#ns "#" #name, WrapFunction(callback), false); \
+ Function::Ptr sf = new icinga::Function(#ns "#" #name, WrapFunction(callback), String(args).Split(":"), false); \
ScriptGlobal::Set(#ns "." #name, sf); \
- Function::Ptr dsf = new icinga::Function("Deprecated#__" #name " (deprecated)", WrapFunction(callback), false, true); \
+ Function::Ptr dsf = new icinga::Function("Deprecated#__" #name " (deprecated)", WrapFunction(callback), String(args).Split(":"), false, true); \
ScriptGlobal::Set("Deprecated.__" #name, dsf); \
}, 10)
-#define REGISTER_SCRIPTFUNCTION_NS_DEPRECATED(ns, name, callback) \
+#define REGISTER_SCRIPTFUNCTION_NS_DEPRECATED(ns, name, callback, args) \
INITIALIZE_ONCE_WITH_PRIORITY([]() { \
- Function::Ptr sf = new icinga::Function(#ns "#" #name, WrapFunction(callback), false); \
+ Function::Ptr sf = new icinga::Function(#ns "#" #name, WrapFunction(callback), String(args).Split(":"), false); \
ScriptGlobal::Set(#ns "." #name, sf); \
- Function::Ptr dsf = new icinga::Function("Deprecated#" #name " (deprecated)", WrapFunction(callback), false, true); \
+ Function::Ptr dsf = new icinga::Function("Deprecated#" #name " (deprecated)", WrapFunction(callback), String(args).Split(":"), false, true); \
ScriptGlobal::Set("Deprecated." #name, dsf); \
}, 10)
-#define REGISTER_SAFE_SCRIPTFUNCTION_NS(ns, name, callback) \
+#define REGISTER_SAFE_SCRIPTFUNCTION_NS(ns, name, callback, args) \
INITIALIZE_ONCE_WITH_PRIORITY([]() { \
- Function::Ptr sf = new icinga::Function(#ns "#" #name, WrapFunction(callback), true); \
+ Function::Ptr sf = new icinga::Function(#ns "#" #name, WrapFunction(callback), String(args).Split(":"), true); \
ScriptGlobal::Set(#ns "." #name, sf); \
}, 10)
-#define REGISTER_SAFE_SCRIPTFUNCTION_NS_PREFIX(ns, name, callback) \
+#define REGISTER_SAFE_SCRIPTFUNCTION_NS_PREFIX(ns, name, callback, args) \
INITIALIZE_ONCE_WITH_PRIORITY([]() { \
- Function::Ptr sf = new icinga::Function(#ns "#" #name, WrapFunction(callback), true); \
+ Function::Ptr sf = new icinga::Function(#ns "#" #name, WrapFunction(callback), String(args).Split(":"), true); \
ScriptGlobal::Set(#ns "." #name, sf); \
- Function::Ptr dsf = new icinga::Function("Deprecated#__" #name " (deprecated)", WrapFunction(callback), true, true); \
+ Function::Ptr dsf = new icinga::Function("Deprecated#__" #name " (deprecated)", WrapFunction(callback), String(args).Split(":"), true, true); \
ScriptGlobal::Set("Deprecated.__" #name, dsf); \
}, 10)
-#define REGISTER_SAFE_SCRIPTFUNCTION_NS_DEPRECATED(ns, name, callback) \
+#define REGISTER_SAFE_SCRIPTFUNCTION_NS_DEPRECATED(ns, name, callback, args) \
INITIALIZE_ONCE_WITH_PRIORITY([]() { \
- Function::Ptr sf = new icinga::Function(#ns "#" #name, WrapFunction(callback), true); \
+ Function::Ptr sf = new icinga::Function(#ns "#" #name, WrapFunction(callback), String(args).Split(":"), true); \
ScriptGlobal::Set(#ns "." #name, sf); \
- Function::Ptr dsf = new icinga::Function("Deprecated#" #name " (deprecated)", WrapFunction(callback), true, true); \
+ Function::Ptr dsf = new icinga::Function("Deprecated#" #name " (deprecated)", WrapFunction(callback), String(args).Split(":"), true, true); \
ScriptGlobal::Set("Deprecated." #name, dsf); \
}, 10)
abstract class Function
{
- String "name";
- bool side_effect_free;
- bool deprecated;
+ [config] String "name";
+ [config] bool side_effect_free;
+ [config] bool deprecated;
+ [config] Array::Ptr arguments;
};
}
Dictionary::Ptr jsonObj = new Dictionary();
/* Methods */
- jsonObj->Set("encode", new Function("Json#encode", WrapFunction(JsonEncodeShim), true));
- jsonObj->Set("decode", new Function("Json#decode", WrapFunction(JsonDecode), true));
+ jsonObj->Set("encode", new Function("Json#encode", WrapFunction(JsonEncodeShim), { "value" }, true));
+ jsonObj->Set("decode", new Function("Json#decode", WrapFunction(JsonDecode), { "value" }, true));
ScriptGlobal::Set("Json", jsonObj);
});
mathObj->Set("SQRT2", 1.41421356237309504880);
/* Methods */
- mathObj->Set("abs", new Function("Math#abs", WrapFunction(MathAbs), true));
- mathObj->Set("acos", new Function("Math#acos", WrapFunction(MathAcos), true));
- mathObj->Set("asin", new Function("Math#asin", WrapFunction(MathAsin), true));
- mathObj->Set("atan", new Function("Math#atan", WrapFunction(MathAtan), true));
- mathObj->Set("atan2", new Function("Math#atan2", WrapFunction(MathAtan2), true));
- mathObj->Set("ceil", new Function("Math#ceil", WrapFunction(MathCeil), true));
- mathObj->Set("cos", new Function("Math#cos", WrapFunction(MathCos), true));
- mathObj->Set("exp", new Function("Math#exp", WrapFunction(MathExp), true));
- mathObj->Set("floor", new Function("Math#floor", WrapFunction(MathFloor), true));
- mathObj->Set("log", new Function("Math#log", WrapFunction(MathLog), true));
- mathObj->Set("max", new Function("Math#max", WrapFunction(MathMax), true));
- mathObj->Set("min", new Function("Math#min", WrapFunction(MathMin), true));
- mathObj->Set("pow", new Function("Math#pow", WrapFunction(MathPow), true));
- mathObj->Set("random", new Function("Math#random", WrapFunction(MathRandom), true));
- mathObj->Set("round", new Function("Math#round", WrapFunction(MathRound), true));
- mathObj->Set("sin", new Function("Math#sin", WrapFunction(MathSin), true));
- mathObj->Set("sqrt", new Function("Math#sqrt", WrapFunction(MathSqrt), true));
- mathObj->Set("tan", new Function("Math#tan", WrapFunction(MathTan), true));
- mathObj->Set("isnan", new Function("Math#isnan", WrapFunction(MathIsnan), true));
- mathObj->Set("isinf", new Function("Math#isinf", WrapFunction(MathIsinf), true));
- mathObj->Set("sign", new Function("Math#sign", WrapFunction(MathSign), true));
+ mathObj->Set("abs", new Function("Math#abs", WrapFunction(MathAbs), { "x" }, true));
+ mathObj->Set("acos", new Function("Math#acos", WrapFunction(MathAcos), { "x" }, true));
+ mathObj->Set("asin", new Function("Math#asin", WrapFunction(MathAsin), { "x" }, true));
+ mathObj->Set("atan", new Function("Math#atan", WrapFunction(MathAtan), { "x" }, true));
+ mathObj->Set("atan2", new Function("Math#atan2", WrapFunction(MathAtan2), { "x", "y" }, true));
+ mathObj->Set("ceil", new Function("Math#ceil", WrapFunction(MathCeil), { "x" }, true));
+ mathObj->Set("cos", new Function("Math#cos", WrapFunction(MathCos), { "x" }, true));
+ mathObj->Set("exp", new Function("Math#exp", WrapFunction(MathExp), { "x" }, true));
+ mathObj->Set("floor", new Function("Math#floor", WrapFunction(MathFloor), { "x" }, true));
+ mathObj->Set("log", new Function("Math#log", WrapFunction(MathLog), { "x" }, true));
+ mathObj->Set("max", new Function("Math#max", WrapFunction(MathMax), {}, true));
+ mathObj->Set("min", new Function("Math#min", WrapFunction(MathMin), {}, true));
+ mathObj->Set("pow", new Function("Math#pow", WrapFunction(MathPow), { "x", "y" }, true));
+ mathObj->Set("random", new Function("Math#random", WrapFunction(MathRandom), {}, true));
+ mathObj->Set("round", new Function("Math#round", WrapFunction(MathRound), { "x" }, true));
+ mathObj->Set("sin", new Function("Math#sin", WrapFunction(MathSin), { "x" }, true));
+ mathObj->Set("sqrt", new Function("Math#sqrt", WrapFunction(MathSqrt), { "x" }, true));
+ mathObj->Set("tan", new Function("Math#tan", WrapFunction(MathTan), { "x" }, true));
+ mathObj->Set("isnan", new Function("Math#isnan", WrapFunction(MathIsnan), { "x" }, true));
+ mathObj->Set("isinf", new Function("Math#isinf", WrapFunction(MathIsinf), { "x" }, true));
+ mathObj->Set("sign", new Function("Math#sign", WrapFunction(MathSign), { "x" }, true));
ScriptGlobal::Set("Math", mathObj);
});
/**
* Helper functions for reading/writing messages in the netstring format.
*
- * @see http://cr.yp.to/proto/netstrings.txt
+ * @see https://cr.yp.to/proto/netstrings.txt
*
* @ingroup base
*/
if (!prototype) {
prototype = new Dictionary();
- prototype->Set("to_string", new Function("Number#to_string", WrapFunction(NumberToString), true));
+ prototype->Set("to_string", new Function("Number#to_string", WrapFunction(NumberToString), {}, true));
}
return prototype;
if (!prototype) {
prototype = new Dictionary();
- prototype->Set("to_string", new Function("Object#to_string", WrapFunction(ObjectToString), true));
- prototype->Set("notify_attribute", new Function("Object#notify_attribute", WrapFunction(ObjectNotifyAttribute), false));
- prototype->Set("clone", new Function("Object#clone", WrapFunction(ObjectClone), true));
+ prototype->Set("to_string", new Function("Object#to_string", WrapFunction(ObjectToString), {}, true));
+ prototype->Set("notify_attribute", new Function("Object#notify_attribute", WrapFunction(ObjectNotifyAttribute), { "attribute" }, false));
+ prototype->Set("clone", new Function("Object#clone", WrapFunction(ObjectClone), {}, true));
}
return prototype;
using namespace icinga;
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, regex, &ScriptUtils::Regex);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, match, &Utility::Match);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, cidr_match, &Utility::CidrMatch);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, len, &ScriptUtils::Len);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, union, &ScriptUtils::Union);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, intersection, &ScriptUtils::Intersection);
-REGISTER_SCRIPTFUNCTION_NS(System, log, &ScriptUtils::Log);
-REGISTER_SCRIPTFUNCTION_NS(System, range, &ScriptUtils::Range);
-REGISTER_SCRIPTFUNCTION_NS(System, exit, &Application::Exit);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, typeof, &ScriptUtils::TypeOf);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, keys, &ScriptUtils::Keys);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, random, &Utility::Random);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, get_object, &ScriptUtils::GetObject);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, get_objects, &ScriptUtils::GetObjects);
-REGISTER_SCRIPTFUNCTION_NS(System, assert, &ScriptUtils::Assert);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, string, &ScriptUtils::CastString);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, number, &ScriptUtils::CastNumber);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, bool, &ScriptUtils::CastBool);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, get_time, &Utility::GetTime);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, basename, &Utility::BaseName);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, dirname, &Utility::DirName);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, msi_get_component_path, &ScriptUtils::MsiGetComponentPathShim);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, track_parents, &ScriptUtils::TrackParents);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, escape_shell_cmd, &Utility::EscapeShellCmd);
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, escape_shell_arg, &Utility::EscapeShellArg);
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, regex, &ScriptUtils::Regex, "pattern:text");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, match, &Utility::Match, "pattern:text");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, cidr_match, &Utility::CidrMatch, "pattern:ip");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, len, &ScriptUtils::Len, "value");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, union, &ScriptUtils::Union, "");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, intersection, &ScriptUtils::Intersection, "");
+REGISTER_SCRIPTFUNCTION_NS(System, log, &ScriptUtils::Log, "severity:facility:value");
+REGISTER_SCRIPTFUNCTION_NS(System, range, &ScriptUtils::Range, "start:end:increment");
+REGISTER_SCRIPTFUNCTION_NS(System, exit, &Application::Exit, "status");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, typeof, &ScriptUtils::TypeOf, "value");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, keys, &ScriptUtils::Keys, "value");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, random, &Utility::Random, "");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, get_object, &ScriptUtils::GetObject, "type:name");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, get_objects, &ScriptUtils::GetObjects, "type");
+REGISTER_SCRIPTFUNCTION_NS(System, assert, &ScriptUtils::Assert, "value");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, string, &ScriptUtils::CastString, "value");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, number, &ScriptUtils::CastNumber, "value");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, bool, &ScriptUtils::CastBool, "value");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, get_time, &Utility::GetTime, "");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, basename, &Utility::BaseName, "path");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, dirname, &Utility::DirName, "path");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, msi_get_component_path, &ScriptUtils::MsiGetComponentPathShim, "component");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, track_parents, &ScriptUtils::TrackParents, "child");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, escape_shell_cmd, &Utility::EscapeShellCmd, "cmd");
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, escape_shell_arg, &Utility::EscapeShellArg, "arg");
#ifdef _WIN32
-REGISTER_SAFE_SCRIPTFUNCTION_NS(System, escape_create_process_arg, &Utility::EscapeCreateProcessArg);
+REGISTER_SAFE_SCRIPTFUNCTION_NS(System, escape_create_process_arg, &Utility::EscapeCreateProcessArg, "arg");
#endif /* _WIN32 */
-REGISTER_SCRIPTFUNCTION_NS(System, ptr, &ScriptUtils::Ptr);
-REGISTER_SCRIPTFUNCTION_NS(System, sleep, &Utility::Sleep);
+REGISTER_SCRIPTFUNCTION_NS(System, ptr, &ScriptUtils::Ptr, "object");
+REGISTER_SCRIPTFUNCTION_NS(System, sleep, &Utility::Sleep, "interval");
String ScriptUtils::CastString(const Value& value)
{
#include <socketpair.h>
#ifndef _WIN32
-# include <poll.h>
+#include <poll.h>
#endif /* _WIN32 */
using namespace icinga;
* Constructor for the Socket class.
*/
Socket::Socket(void)
- : m_FD(INVALID_SOCKET)
+ : m_FD(INVALID_SOCKET), m_SocketType(SOCK_STREAM), m_Protocol(IPPROTO_TCP)
+{ }
+
+/**
+ * Constructor for the Socket class.
+ */
+Socket::Socket(int socketType, int protocol)
+ : m_FD(INVALID_SOCKET), m_SocketType(socketType), m_Protocol(protocol)
{ }
/**
* Constructor for the Socket class.
*/
Socket::Socket(SOCKET fd)
- : m_FD(INVALID_SOCKET)
+ : m_FD(INVALID_SOCKET)
{
SetFD(fd);
}
*/
Socket::Ptr Socket::Accept(void)
{
+ if (m_Protocol == IPPROTO_UDP)
+ BOOST_THROW_EXCEPTION(std::runtime_error("Accept cannot be used for UDP sockets."));
+
int fd;
sockaddr_storage addr;
socklen_t addrlen = sizeof(addr);
<< boost::errinfo_errno(errno));
}
+/**
+ * Creates a socket and connects to the specified node and service.
+ *
+ * @param node The node.
+ * @param service The service.
+ */
+void Socket::Connect(const String& node, const String& service)
+{
+ addrinfo hints;
+ addrinfo *result;
+ int error;
+ const char *func;
+
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = m_SocketType;
+ hints.ai_protocol = m_Protocol;
+
+ int rc = getaddrinfo(node.CStr(), service.CStr(), &hints, &result);
+
+ if (rc != 0) {
+ Log(LogCritical, "Socket")
+ << "getaddrinfo() failed with error code " << rc << ", \"" << gai_strerror(rc) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("getaddrinfo")
+ << errinfo_getaddrinfo_error(rc));
+ }
+
+ int fd = INVALID_SOCKET;
+
+ for (addrinfo *info = result; info != NULL; info = info->ai_next) {
+ fd = socket(info->ai_family, info->ai_socktype, info->ai_protocol);
+
+ if (fd == INVALID_SOCKET) {
+#ifdef _WIN32
+ error = WSAGetLastError();
+#else /* _WIN32 */
+ error = errno;
+#endif /* _WIN32 */
+ func = "socket";
+
+ continue;
+ }
+
+ rc = connect(fd, info->ai_addr, info->ai_addrlen);
+
+ if (rc < 0) {
+#ifdef _WIN32
+ error = WSAGetLastError();
+#else /* _WIN32 */
+ error = errno;
+#endif /* _WIN32 */
+ func = "connect";
+
+ closesocket(fd);
+
+ continue;
+ }
+
+ SetFD(fd);
+
+ break;
+ }
+
+ freeaddrinfo(result);
+
+ if (GetFD() == INVALID_SOCKET) {
+ Log(LogCritical, "Socket")
+ << "Invalid socket: " << Utility::FormatErrorNumber(error);
+
+#ifndef _WIN32
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function(func)
+ << boost::errinfo_errno(error));
+#else /* _WIN32 */
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function(func)
+ << errinfo_win32_error(error));
+#endif /* _WIN32 */
+ }
+}
size_t Write(const void *buffer, size_t size);
void Listen(void);
+ void Connect(const String& node, const String& service);
Socket::Ptr Accept(void);
bool Poll(bool read, bool write, struct timeval *timeout = NULL);
static void SocketPair(SOCKET s[2]);
protected:
+ Socket(int socketType, int protocol);
+
void SetFD(SOCKET fd);
int GetError(void) const;
-
mutable boost::mutex m_SocketMutex;
private:
SOCKET m_FD; /**< The socket descriptor. */
+ int m_SocketType;
+ int m_Protocol;
static String GetAddressFromSockaddr(sockaddr *address, socklen_t len);
};
if (!prototype) {
prototype = new Dictionary();
- prototype->Set("len", new Function("String#len", WrapFunction(StringLen), true));
- prototype->Set("to_string", new Function("String#to_string", WrapFunction(StringToString), true));
- prototype->Set("substr", new Function("String#substr", WrapFunction(StringSubstr), true));
- prototype->Set("upper", new Function("String#upper", WrapFunction(StringUpper), true));
- prototype->Set("lower", new Function("String#lower", WrapFunction(StringLower), true));
- prototype->Set("split", new Function("String#split", WrapFunction(StringSplit), true));
- prototype->Set("find", new Function("String#find", WrapFunction(StringFind), true));
- prototype->Set("contains", new Function("String#contains", WrapFunction(StringContains), true));
- prototype->Set("replace", new Function("String#replace", WrapFunction(StringReplace), true));
- prototype->Set("reverse", new Function("String#reverse", WrapFunction(StringReverse), true));
- prototype->Set("trim", new Function("String#trim", WrapFunction(StringTrim), true));
+ prototype->Set("len", new Function("String#len", WrapFunction(StringLen), {}, true));
+ prototype->Set("to_string", new Function("String#to_string", WrapFunction(StringToString), {}, true));
+ prototype->Set("substr", new Function("String#substr", WrapFunction(StringSubstr), { "start", "len" }, true));
+ prototype->Set("upper", new Function("String#upper", WrapFunction(StringUpper), {}, true));
+ prototype->Set("lower", new Function("String#lower", WrapFunction(StringLower), {}, true));
+ prototype->Set("split", new Function("String#split", WrapFunction(StringSplit), { "delims" }, true));
+ prototype->Set("find", new Function("String#find", WrapFunction(StringFind), { "str", "start" }, true));
+ prototype->Set("contains", new Function("String#contains", WrapFunction(StringContains), { "str" }, true));
+ prototype->Set("replace", new Function("String#replace", WrapFunction(StringReplace), { "search", "replacement" }, true));
+ prototype->Set("reverse", new Function("String#reverse", WrapFunction(StringReverse), {}, true));
+ prototype->Set("trim", new Function("String#trim", WrapFunction(StringTrim), {}, true));
}
return prototype;
#include "base/object.hpp"
#include <boost/algorithm/string/case_conv.hpp>
#include <boost/algorithm/string/trim.hpp>
+#include <boost/algorithm/string/split.hpp>
#include <boost/range/iterator.hpp>
#include <string.h>
#include <functional>
return m_Data.substr(first, len);
}
+ inline std::vector<String> Split(const char *separators) const
+ {
+ std::vector<String> result;
+ boost::algorithm::split(result, m_Data, boost::is_any_of(separators));
+ return result;
+ }
+
inline void Replace(SizeType first, SizeType second, const String& str)
{
m_Data.replace(first, second, str);
using namespace icinga;
+/**
+ * Constructor for the TcpSocket class.
+ */
+TcpSocket::TcpSocket(void)
+ : Socket(SOCK_STREAM, IPPROTO_TCP)
+{ }
+
/**
* Creates a socket and binds it to the specified service.
*
#endif /* _WIN32 */
}
}
-
-/**
- * Creates a socket and connects to the specified node and service.
- *
- * @param node The node.
- * @param service The service.
- */
-void TcpSocket::Connect(const String& node, const String& service)
-{
- addrinfo hints;
- addrinfo *result;
- int error;
- const char *func;
-
- memset(&hints, 0, sizeof(hints));
- hints.ai_family = AF_UNSPEC;
- hints.ai_socktype = SOCK_STREAM;
- hints.ai_protocol = IPPROTO_TCP;
-
- int rc = getaddrinfo(node.CStr(), service.CStr(), &hints, &result);
-
- if (rc != 0) {
- Log(LogCritical, "TcpSocket")
- << "getaddrinfo() failed with error code " << rc << ", \"" << gai_strerror(rc) << "\"";
-
- BOOST_THROW_EXCEPTION(socket_error()
- << boost::errinfo_api_function("getaddrinfo")
- << errinfo_getaddrinfo_error(rc));
- }
-
- int fd = INVALID_SOCKET;
-
- for (addrinfo *info = result; info != NULL; info = info->ai_next) {
- fd = socket(info->ai_family, info->ai_socktype, info->ai_protocol);
-
- if (fd == INVALID_SOCKET) {
-#ifdef _WIN32
- error = WSAGetLastError();
-#else /* _WIN32 */
- error = errno;
-#endif /* _WIN32 */
- func = "socket";
-
- continue;
- }
-
- const int optTrue = 1;
- if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, reinterpret_cast<const char *>(&optTrue), sizeof(optTrue)) != 0) {
-#ifdef _WIN32
- error = WSAGetLastError();
-#else /* _WIN32 */
- error = errno;
-#endif /* _WIN32 */
- Log(LogWarning, "TcpSocket")
- << "setsockopt() unable to enable TCP keep-alives with error code " << rc;
- }
-
- rc = connect(fd, info->ai_addr, info->ai_addrlen);
-
- if (rc < 0) {
-#ifdef _WIN32
- error = WSAGetLastError();
-#else /* _WIN32 */
- error = errno;
-#endif /* _WIN32 */
- func = "connect";
-
- closesocket(fd);
-
- continue;
- }
-
- SetFD(fd);
-
- break;
- }
-
- freeaddrinfo(result);
-
- if (GetFD() == INVALID_SOCKET) {
- Log(LogCritical, "TcpSocket")
- << "Invalid socket: " << Utility::FormatErrorNumber(error);
-
-#ifndef _WIN32
- BOOST_THROW_EXCEPTION(socket_error()
- << boost::errinfo_api_function(func)
- << boost::errinfo_errno(error));
-#else /* _WIN32 */
- BOOST_THROW_EXCEPTION(socket_error()
- << boost::errinfo_api_function(func)
- << errinfo_win32_error(error));
-#endif /* _WIN32 */
- }
-}
public:
DECLARE_PTR_TYPEDEFS(TcpSocket);
+ TcpSocket(void);
+
void Bind(const String& service, int family);
void Bind(const String& node, const String& service, int family);
-
- void Connect(const String& node, const String& service);
};
}
return output;
}
+String SHA1(const String& s, bool binary)
+{
+ char errbuf[120];
+ SHA_CTX context;
+ unsigned char digest[SHA_DIGEST_LENGTH];
+
+ if (!SHA1_Init(&context)) {
+ Log(LogCritical, "SSL")
+ << "Error on SHA Init: " << ERR_peek_error() << ", \"" << ERR_error_string(ERR_peek_error(), errbuf) << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SHA1_Init")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ if (!SHA1_Update(&context, (unsigned char*)s.CStr(), s.GetLength())) {
+ Log(LogCritical, "SSL")
+ << "Error on SHA Update: " << ERR_peek_error() << ", \"" << ERR_error_string(ERR_peek_error(), errbuf) << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SHA1_Update")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ if (!SHA1_Final(digest, &context)) {
+ Log(LogCritical, "SSL")
+ << "Error on SHA Final: " << ERR_peek_error() << ", \"" << ERR_error_string(ERR_peek_error(), errbuf) << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SHA1_Final")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ if (binary)
+ return String(reinterpret_cast<const char*>(digest), reinterpret_cast<const char *>(digest + SHA_DIGEST_LENGTH));
+
+ char output[SHA_DIGEST_LENGTH*2+1];
+ for (int i = 0; i < 20; i++)
+ sprintf(output + 2 * i, "%02x", digest[i]);
+
+ return output;
+}
+
String SHA256(const String& s)
{
char errbuf[120];
String I2_BASE_API CertificateToString(const boost::shared_ptr<X509>& cert);
boost::shared_ptr<X509> I2_BASE_API CreateCertIcingaCA(EVP_PKEY *pubkey, X509_NAME *subject);
String I2_BASE_API PBKDF2_SHA1(const String& password, const String& salt, int iterations);
-String I2_BASE_API SHA1(const String& s);
+String I2_BASE_API SHA1(const String& s, bool binary = false);
String I2_BASE_API SHA256(const String& s);
String I2_BASE_API RandomString(int length);
if (!prototype) {
prototype = new Dictionary();
- prototype->Set("register_attribute_handler", new Function("Type#register_attribute_handler", WrapFunction(TypeRegisterAttributeHandler), false));
+ prototype->Set("register_attribute_handler", new Function("Type#register_attribute_handler", WrapFunction(TypeRegisterAttributeHandler), { "field", "callback" }, false));
}
return prototype;
--- /dev/null
+/******************************************************************************
+ * Icinga 2 *
+ * Copyright (C) 2012-2016 Icinga Development Team (https://www.icinga.org/) *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, write to the Free Software Foundation *
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. *
+ ******************************************************************************/
+
+#include "base/udpsocket.hpp"
+#include "base/logger.hpp"
+#include "base/utility.hpp"
+#include "base/exception.hpp"
+#include <boost/exception/errinfo_api_function.hpp>
+#include <boost/exception/errinfo_errno.hpp>
+#include <iostream>
+
+using namespace icinga;
+
+/**
+ * Constructor for the UdpSocket class.
+ */
+UdpSocket::UdpSocket(void)
+ : Socket(SOCK_DGRAM, IPPROTO_UDP)
+{ }
+
--- /dev/null
+/******************************************************************************
+ * Icinga 2 *
+ * Copyright (C) 2012-2017 Icinga Development Team (https://www.icinga.com/) *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, write to the Free Software Foundation *
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. *
+ ******************************************************************************/
+
+#ifndef UDPSOCKET_H
+#define UDPSOCKET_H
+
+#include "base/i2-base.hpp"
+#include "base/socket.hpp"
+
+namespace icinga
+{
+
+/**
+ * A UDP socket.
+ *
+ * @ingroup base
+ */
+class I2_BASE_API UdpSocket : public Socket
+{
+public:
+ DECLARE_PTR_TYPEDEFS(UdpSocket);
+
+ UdpSocket(void);
+};
+
+}
+
+#endif /* UDPSOCKET_H */
+
upq.SetName("DaemonCommand::Run");
// activate config only after daemonization: it starts threads and that is not compatible with fork()
- if (!ConfigItem::ActivateItems(upq, newItems)) {
+ if (!ConfigItem::ActivateItems(upq, newItems, false, false, true)) {
Log(LogCritical, "cli", "Error activating configuration.");
return EXIT_FAILURE;
}
result->SetOutput(co.first);
result->SetPerformanceData(PluginUtility::SplitPerfdata(co.second));
result->SetState(PluginUtility::ExitStatusToState(Convert::ToLong(attrs["return_code"])));
- result->SetExecutionStart(Convert::ToDouble(attrs["start_time"]));
- result->SetExecutionEnd(Convert::ToDouble(attrs["finish_time"]));
+
+ if (attrs.find("start_time") != attrs.end())
+ result->SetExecutionStart(Convert::ToDouble(attrs["start_time"]));
+ else
+ result->SetExecutionStart(Utility::GetTime());
+
+ if (attrs.find("finish_time") != attrs.end())
+ result->SetExecutionEnd(Convert::ToDouble(attrs["finish_time"]));
+ else
+ result->SetExecutionEnd(result->GetExecutionStart());
checkable->ProcessCheckResult(result);
/**
* Hint: The reason why we're using "\n" rather than std::endl is because
* std::endl also _flushes_ the output stream which severely degrades
- * performance (see http://gcc.gnu.org/onlinedocs/libstdc++/manual/bk01pt11ch25s02.html).
+ * performance (see https://stackoverflow.com/questions/213907/c-stdendl-vs-n).
*/
/**
{
String upath;
- if (search || (path.GetLength() > 0 && path[0] == '/'))
+ if (search || (IsAbsolutePath(path)))
upath = path;
else
upath = relativeBase + "/" + path;
{
String ppath;
- if (path.GetLength() > 0 && path[0] == '/')
+ if (IsAbsolutePath(path))
ppath = path;
else
ppath = relativeBase + "/" + path;
String ppath;
- if (path.GetLength() > 0 && path[0] == '/')
+ if (IsAbsolutePath(path))
ppath = path;
else
ppath = relativeBase + "/" + path;
String ppath;
String newRelativeBase = relativeBase;
- if (path.GetLength() > 0 && path[0] == '/')
+ if (IsAbsolutePath(path))
ppath = path;
else {
ppath = relativeBase + "/" + path;
return !empty;
}
+
+bool ConfigCompiler::IsAbsolutePath(const String& path)
+{
+#ifndef _WIN32
+ return (path.GetLength() > 0 && path[0] == '/');
+#else /* _WIN32 */
+ return !PathIsRelative(path.CStr());
+#endif /* _WIN32 */
+}
+
static void HandleIncludeZone(const String& relativeBase, const String& tag, const String& path, const String& pattern, const String& package, std::vector<Expression *>& expressions);
+ static bool IsAbsolutePath(const String& path);
+
public:
bool m_Eof;
int m_OpenBraces;
#include "config/configcompilercontext.hpp"
#include "config/applyrule.hpp"
#include "config/objectrule.hpp"
+#include "config/configcompiler.hpp"
#include "base/application.hpp"
#include "base/configtype.hpp"
#include "base/objectlock.hpp"
ConfigItem::ItemList ConfigItem::m_UnnamedItems;
ConfigItem::IgnoredItemList ConfigItem::m_IgnoredItems;
-REGISTER_SCRIPTFUNCTION_NS(Internal, run_with_activation_context, &ConfigItem::RunWithActivationContext);
+REGISTER_SCRIPTFUNCTION_NS(Internal, run_with_activation_context, &ConfigItem::RunWithActivationContext, "func");
/**
* Constructor for the ConfigItem class.
return true;
}
-bool ConfigItem::ActivateItems(WorkQueue& upq, const std::vector<ConfigItem::Ptr>& newItems, bool runtimeCreated, bool silent)
+bool ConfigItem::ActivateItems(WorkQueue& upq, const std::vector<ConfigItem::Ptr>& newItems, bool runtimeCreated, bool silent, bool withModAttrs)
{
static boost::mutex mtx;
boost::mutex::scoped_lock lock(mtx);
- if (!silent)
- Log(LogInformation, "ConfigItem", "Triggering Start signal for config items");
+ if (withModAttrs) {
+ /* restore modified attributes */
+ if (Utility::PathExists(Application::GetModAttrPath())) {
+ Expression *expression = ConfigCompiler::CompileFile(Application::GetModAttrPath());
+
+ if (expression) {
+ try {
+ ScriptFrame frame;
+ expression->Evaluate(frame);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "config", DiagnosticInformation(ex));
+ }
+ }
+
+ delete expression;
+ }
+ }
for (const ConfigItem::Ptr& item : newItems) {
if (!item->m_Object)
if (object->IsActive())
continue;
+#ifdef I2_DEBUG
+ Log(LogDebug, "ConfigItem")
+ << "Setting 'active' to true for object '" << object->GetName() << "' of type '" << object->GetReflectionType()->GetName() << "'";
+#endif /* I2_DEBUG */
+ upq.Enqueue(boost::bind(&ConfigObject::PreActivate, object));
+ }
+
+ upq.Join();
+
+ if (upq.HasExceptions()) {
+ upq.ReportExceptions("ConfigItem");
+ return false;
+ }
+
+ if (!silent)
+ Log(LogInformation, "ConfigItem", "Triggering Start signal for config items");
+
+ for (const ConfigItem::Ptr& item : newItems) {
+ if (!item->m_Object)
+ continue;
+
+ ConfigObject::Ptr object = item->m_Object;
+
#ifdef I2_DEBUG
Log(LogDebug, "ConfigItem")
<< "Activating object '" << object->GetName() << "' of type '" << object->GetReflectionType()->GetName() << "'";
const String& name);
static bool CommitItems(const ActivationContext::Ptr& context, WorkQueue& upq, std::vector<ConfigItem::Ptr>& newItems, bool silent = false);
- static bool ActivateItems(WorkQueue& upq, const std::vector<ConfigItem::Ptr>& newItems, bool runtimeCreated = false, bool silent = false);
+ static bool ActivateItems(WorkQueue& upq, const std::vector<ConfigItem::Ptr>& newItems, bool runtimeCreated = false, bool silent = false, bool withModAttrs = false);
static bool RunWithActivationContext(const Function::Ptr& function);
std::map<String, Expression *> *closedVars, const boost::shared_ptr<Expression>& expression)
{
return new Function(name, boost::bind(&FunctionWrapper, _1, args,
- EvaluateClosedVars(frame, closedVars), expression));
+ EvaluateClosedVars(frame, closedVars), expression), args);
}
static inline Value NewApply(ScriptFrame& frame, const String& type, const String& target, const String& name, const boost::shared_ptr<Expression>& filter,
cat->Add("DbCatComment");
cat->Add("DbCatDowntime");
cat->Add("DbCatEventHandler");
- cat->Add("DbCatExternalCommand");
cat->Add("DbCatFlapping");
cat->Add("DbCatNotification");
cat->Add("DbCatProgramStatus");
query3.Fields = fields3;
query3.WhereCriteria = new Dictionary();
+ query3.WhereCriteria->Set("object_id", checkable);
query3.WhereCriteria->Set("internal_downtime_id", downtime->GetLegacyId());
query3.WhereCriteria->Set("entry_time", DbValue::FromTimestamp(downtime->GetEntryTime()));
query3.WhereCriteria->Set("scheduled_start_time", DbValue::FromTimestamp(downtime->GetStartTime()));
using namespace icinga;
-REGISTER_SCRIPTFUNCTION_NS(Internal, IdoCheck, &IdoCheckTask::ScriptFunc);
+REGISTER_SCRIPTFUNCTION_NS(Internal, IdoCheck, &IdoCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
void IdoCheckTask::ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
/* connection */
if (!mysql_init(&m_Connection)) {
Log(LogCritical, "IdoMysqlConnection")
- << "mysql_init() failed: \"" << mysql_error(&m_Connection) << "\"";
+ << "mysql_init() failed: out of memory";
BOOST_THROW_EXCEPTION(std::bad_alloc());
}
Log(LogCritical, "IdoMysqlConnection", "Schema does not provide any valid version! Verify your schema installation.");
- Application::Exit(EXIT_FAILURE);
+ BOOST_THROW_EXCEPTION(std::runtime_error("Invalid schema."));
}
DiscardRows(result);
<< IDO_COMPAT_SCHEMA_VERSION << "' (or newer)! Please check the upgrade documentation at "
<< "https://docs.icinga.com/icinga2/latest/doc/module/icinga2/chapter/upgrading-icinga-2#upgrading-mysql-db";
- Application::Exit(EXIT_FAILURE);
+ BOOST_THROW_EXCEPTION(std::runtime_error("Schema version mismatch."));
}
String instanceName = GetInstanceName();
-- -----------------------------------------
-- Copyright (c) 2014 Icinga Development Team (https://www.icinga.com)
--
--- Please check http://docs.icinga.com for upgrading information!
+-- Please check https://docs.icinga.com for upgrading information!
-- -----------------------------------------
UPDATE icinga_objects SET name2 = NULL WHERE name2 = '';
-- -----------------------------------------
-- Copyright (c) 2014 Icinga Development Team (https://www.icinga.com)
--
--- Please check http://docs.icinga.com for upgrading information!
+-- Please check https://docs.icinga.com for upgrading information!
-- -----------------------------------------
ALTER TABLE `icinga_programstatus` ADD COLUMN `endpoint_name` varchar(255) character set latin1 collate latin1_general_cs default NULL;
-- -----------------------------------------
-- Copyright (c) 2014 Icinga Development Team (https://www.icinga.com)
--
--- Please check http://docs.icinga.com for upgrading information!
+-- Please check https://docs.icinga.com for upgrading information!
-- -----------------------------------------
ALTER TABLE `icinga_programstatus` ADD COLUMN `program_version` varchar(64) character set latin1 collate latin1_general_cs default NULL;
-- -----------------------------------------
-- Copyright (c) 2015 Icinga Development Team (https://www.icinga.com)
--
--- Please check http://docs.icinga.com for upgrading information!
+-- Please check https://docs.icinga.com for upgrading information!
-- -----------------------------------------
-- -----------------------------------------
-- -----------------------------------------
-- Copyright (c) 2015 Icinga Development Team (https://www.icinga.com)
--
--- Please check http://docs.icinga.com for upgrading information!
+-- Please check https://docs.icinga.com for upgrading information!
-- -----------------------------------------
-- -----------------------------------------
-- -----------------------------------------
-- Copyright (c) 2016 Icinga Development Team (https://www.icinga.com)
--
--- Please check http://docs.icinga.com for upgrading information!
+-- Please check https://docs.icinga.com for upgrading information!
-- -----------------------------------------
SET SQL_MODE="NO_AUTO_VALUE_ON_ZERO";
-- -----------------------------------------
-- Copyright (c) 2016 Icinga Development Team (https://www.icinga.com)
--
--- Please check http://docs.icinga.com for upgrading information!
+-- Please check https://docs.icinga.com for upgrading information!
-- -----------------------------------------
-- -----------------------------------------
Log(LogCritical, "IdoPgsqlConnection", "Schema does not provide any valid version! Verify your schema installation.");
- Application::Exit(EXIT_FAILURE);
+ BOOST_THROW_EXCEPTION(std::runtime_error("Invalid schema."));
}
String version = row->Get("version");
<< IDO_COMPAT_SCHEMA_VERSION << "' (or newer)! Please check the upgrade documentation at "
<< "https://docs.icinga.com/icinga2/latest/doc/module/icinga2/chapter/upgrading-icinga-2#upgrading-postgresql-db";
- Application::Exit(EXIT_FAILURE);
+ BOOST_THROW_EXCEPTION(std::runtime_error("Schema version mismatch."));
}
String instanceName = GetInstanceName();
-- -----------------------------------------
-- Copyright (c) 2014 Icinga Development Team (https://www.icinga.com)
--
--- Please check http://docs.icinga.com for upgrading information!
+-- Please check https://docs.icinga.com for upgrading information!
-- -----------------------------------------
UPDATE icinga_objects SET name2 = NULL WHERE name2 = '';
-- -----------------------------------------
-- Copyright (c) 2014 Icinga Development Team (https://www.icinga.com)
--
--- Please check http://docs.icinga.com for upgrading information!
+-- Please check https://docs.icinga.com for upgrading information!
-- -----------------------------------------
ALTER TABLE icinga_programstatus ADD COLUMN endpoint_name TEXT default NULL;
-- -----------------------------------------
-- Copyright (c) 2014 Icinga Development Team (https://www.icinga.com)
--
--- Please check http://docs.icinga.com for upgrading information!
+-- Please check https://docs.icinga.com for upgrading information!
-- -----------------------------------------
ALTER TABLE icinga_programstatus ADD COLUMN program_version TEXT default NULL;
-- -----------------------------------------
-- Copyright (c) 2015 Icinga Development Team (https://www.icinga.com)
--
--- Please check http://docs.icinga.com for upgrading information!
+-- Please check https://docs.icinga.com for upgrading information!
-- -----------------------------------------
-- -----------------------------------------
-- -----------------------------------------
-- Copyright (c) 2015 Icinga Development Team (https://www.icinga.com)
--
--- Please check http://docs.icinga.com for upgrading information!
+-- Please check https://docs.icinga.com for upgrading information!
-- -----------------------------------------
-- -----------------------------------------
-- -----------------------------------------
-- Copyright (c) 2016 Icinga Development Team (https://www.icinga.com)
--
--- Please check http://docs.icinga.com for upgrading information!
+-- Please check https://docs.icinga.com for upgrading information!
-- -----------------------------------------
-- -----------------------------------------
-- -----------------------------------------
-- Copyright (c) 2016 Icinga Development Team (https://www.icinga.com)
--
--- Please check http://docs.icinga.com for upgrading information!
+-- Please check https://docs.icinga.com for upgrading information!
-- -----------------------------------------
-- -----------------------------------------
if (!prototype) {
prototype = new Dictionary();
- prototype->Set("process_check_result", new Function("Checkable#process_check_result", WrapFunction(CheckableProcessCheckResult), false));
+ prototype->Set("process_check_result", new Function("Checkable#process_check_result", WrapFunction(CheckableProcessCheckResult), { "cr" }, false));
}
return prototype;
return avalue;
}
-bool Checkable::IsAcknowledged(void)
+bool Checkable::IsAcknowledged(void) const
{
- return GetAcknowledgement() != AcknowledgementNone;
+ return const_cast<Checkable *>(this)->GetAcknowledgement() != AcknowledgementNone;
}
void Checkable::AcknowledgeProblem(const String& author, const String& comment, AcknowledgementType type, bool notify, double expiry, const MessageOrigin::Ptr& origin)
return Endpoint::GetByName(GetCommandEndpointRaw());
}
+int Checkable::GetSeverity(void) const
+{
+ /* overridden in Host/Service class. */
+ return 0;
+}
+
void Checkable::NotifyFixedDowntimeStart(const Downtime::Ptr& downtime)
{
if (!downtime->GetFixed())
CheckableService
};
+/**
+ * Severity Flags
+ *
+ * @ingroup icinga
+ */
+enum SeverityFlag
+{
+ SeverityFlagDowntime = 1,
+ SeverityFlagAcknowledgement = 2,
+ SeverityFlagUnhandled = 8,
+ SeverityFlagPending = 16,
+ SeverityFlagWarning = 32,
+ SeverityFlagUnknown = 64,
+ SeverityFlagCritical = 128,
+};
+
class CheckCommand;
class EventCommand;
class Dependency;
void AcknowledgeProblem(const String& author, const String& comment, AcknowledgementType type, bool notify = true, double expiry = 0, const MessageOrigin::Ptr& origin = MessageOrigin::Ptr());
void ClearAcknowledgement(const MessageOrigin::Ptr& origin = MessageOrigin::Ptr());
+ virtual int GetSeverity(void) const override;
+
/* Checks */
intrusive_ptr<CheckCommand> GetCheckCommand(void) const;
TimePeriod::Ptr GetCheckPeriod(void) const;
void RemoveAllDowntimes(void);
void TriggerDowntimes(void);
bool IsInDowntime(void) const;
- bool IsAcknowledged(void);
+ bool IsAcknowledged(void) const;
std::set<Downtime::Ptr> GetDowntimes(void) const;
void RegisterDowntime(const Downtime::Ptr& downtime);
default {{{ return Application::GetStartTime(); }}}
};
[state] Timestamp last_state_unreachable;
+
+ [no_storage] int severity {
+ get;
+ };
+
[state] bool force_next_check;
[state] int acknowledgement (AcknowledgementRaw) {
default {{{ return AcknowledgementNone; }}}
{
Comment::Ptr comment = Comment::GetByName(id);
- if (!comment)
+ if (!comment || comment->GetPackage() != "_api")
return;
Log(LogNotice, "Comment")
{
Downtime::Ptr downtime = Downtime::GetByName(id);
- if (!downtime)
+ if (!downtime || downtime->GetPackage() != "_api")
return;
String config_owner = downtime->GetConfigOwner();
Log(LogNotice, "Downtime")
<< "Removed downtime '" << downtime->GetName() << "' from object '" << downtime->GetCheckable()->GetName() << "'.";
- if (downtime->GetPackage() != "_api")
- return;
-
Array::Ptr errors = new Array();
if (!ConfigObjectUtility::DeleteObject(downtime, false, errors)) {
return CalculateState(GetLastHardStateRaw());
}
+/* keep in sync with Service::GetSeverity() */
+int Host::GetSeverity(void) const
+{
+ int severity = 0;
+
+ ObjectLock olock(this);
+ ServiceState state = GetStateRaw();
+
+ /* OK/Warning = Up, Critical/Unknownb = Down */
+ if (!HasBeenChecked())
+ severity |= SeverityFlagPending;
+ else if (state == ServiceUnknown)
+ severity |= SeverityFlagCritical;
+ else if (state == ServiceCritical)
+ severity |= SeverityFlagCritical;
+
+ if (IsInDowntime())
+ severity |= SeverityFlagDowntime;
+ else if (IsAcknowledged())
+ severity |= SeverityFlagAcknowledgement;
+ else
+ severity |= SeverityFlagUnhandled;
+
+ olock.Unlock();
+
+ return severity;
+}
+
bool Host::IsStateOK(ServiceState state)
{
return Host::CalculateState(state) == HostUp;
virtual HostState GetState(void) const override;
virtual HostState GetLastState(void) const override;
virtual HostState GetLastHardState(void) const override;
+ virtual int GetSeverity(void) const override;
virtual bool IsStateOK(ServiceState state) override;
virtual void SaveLastState(ServiceState state, double timestamp) override;
l_RetentionTimer->OnTimerExpired.connect(boost::bind(&IcingaApplication::DumpProgramState, this));
l_RetentionTimer->Start();
- /* restore modified attributes */
- if (Utility::PathExists(GetModAttrPath())) {
- Expression *expression = ConfigCompiler::CompileFile(GetModAttrPath());
-
- if (expression) {
- try {
- ScriptFrame frame;
- expression->Evaluate(frame);
- } catch (const std::exception& ex) {
- Log(LogCritical, "config", DiagnosticInformation(ex));
- }
- }
-
- delete expression;
- }
-
RunEventLoop();
Log(LogInformation, "IcingaApplication", "Icinga has shut down.");
using namespace icinga;
-REGISTER_SCRIPTFUNCTION_NS(Internal, LegacyTimePeriod, &LegacyTimePeriod::ScriptFunc);
+REGISTER_SCRIPTFUNCTION_NS(Internal, LegacyTimePeriod, &LegacyTimePeriod::ScriptFunc, "tp:begin:end");
bool LegacyTimePeriod::IsInTimeRange(tm *begin, tm *end, int stride, tm *reference)
{
resolvers_this->Set("macro", new Function("macro (temporary)", boost::bind(&MacroProcessor::InternalResolveMacrosShim,
_1, boost::cref(resolvers), cr, MacroProcessor::EscapeCallback(), resolvedMacros, useResolvedMacros,
- recursionLevel + 1)));
+ recursionLevel + 1), { "str" }));
resolvers_this->Set("resolve_arguments", new Function("resolve_arguments (temporary)", boost::bind(&MacroProcessor::InternalResolveArgumentsShim,
_1, boost::cref(resolvers), cr, resolvedMacros, useResolvedMacros,
recursionLevel + 1)));
using namespace icinga;
-REGISTER_SCRIPTFUNCTION_NS(System, get_host, &Host::GetByName);
-REGISTER_SCRIPTFUNCTION_NS(System, get_service, &ObjectUtils::GetService);
-REGISTER_SCRIPTFUNCTION_NS(System, get_user, &User::GetByName);
-REGISTER_SCRIPTFUNCTION_NS(System, get_check_command, &CheckCommand::GetByName);
-REGISTER_SCRIPTFUNCTION_NS(System, get_event_command, &EventCommand::GetByName);
-REGISTER_SCRIPTFUNCTION_NS(System, get_notification_command, &NotificationCommand::GetByName);
-REGISTER_SCRIPTFUNCTION_NS(System, get_host_group, &HostGroup::GetByName);
-REGISTER_SCRIPTFUNCTION_NS(System, get_service_group, &ServiceGroup::GetByName);
-REGISTER_SCRIPTFUNCTION_NS(System, get_user_group, &UserGroup::GetByName);
-REGISTER_SCRIPTFUNCTION_NS(System, get_time_period, &TimePeriod::GetByName);
+REGISTER_SCRIPTFUNCTION_NS(System, get_host, &Host::GetByName, "name");
+REGISTER_SCRIPTFUNCTION_NS(System, get_service, &ObjectUtils::GetService, "host:name");
+REGISTER_SCRIPTFUNCTION_NS(System, get_user, &User::GetByName, "name");
+REGISTER_SCRIPTFUNCTION_NS(System, get_check_command, &CheckCommand::GetByName, "name");
+REGISTER_SCRIPTFUNCTION_NS(System, get_event_command, &EventCommand::GetByName, "name");
+REGISTER_SCRIPTFUNCTION_NS(System, get_notification_command, &NotificationCommand::GetByName, "name");
+REGISTER_SCRIPTFUNCTION_NS(System, get_host_group, &HostGroup::GetByName, "name");
+REGISTER_SCRIPTFUNCTION_NS(System, get_service_group, &ServiceGroup::GetByName, "name");
+REGISTER_SCRIPTFUNCTION_NS(System, get_user_group, &UserGroup::GetByName, "name");
+REGISTER_SCRIPTFUNCTION_NS(System, get_time_period, &TimePeriod::GetByName, "name");
Service::Ptr ObjectUtils::GetService(const String& host, const String& name)
{
using namespace icinga;
REGISTER_TYPE(PerfdataValue);
-REGISTER_SCRIPTFUNCTION_NS(System, parse_performance_data, PerfdataValue::Parse);
+REGISTER_SCRIPTFUNCTION_NS(System, parse_performance_data, PerfdataValue::Parse, "perfdata");
PerfdataValue::PerfdataValue(void)
{ }
void ScheduledDowntime::ValidateRanges(const Dictionary::Ptr& value, const ValidationUtils& utils)
{
+ ObjectImpl<ScheduledDowntime>::ValidateRanges(value, utils);
+
if (!value)
return;
return m_Host;
}
+/* keep in sync with Host::GetSeverity() */
+int Service::GetSeverity(void) const
+{
+ int severity = 0;
+
+ ObjectLock olock(this);
+ ServiceState state = GetStateRaw();
+
+ if (!HasBeenChecked())
+ severity |= SeverityFlagPending;
+ else if (state == ServiceWarning)
+ severity |= SeverityFlagWarning;
+ else if (state == ServiceUnknown)
+ severity |= SeverityFlagUnknown;
+ else if (state == ServiceCritical)
+ severity |= SeverityFlagCritical;
+
+ /* TODO: Add host reachability and handled */
+ if (IsInDowntime())
+ severity |= SeverityFlagDowntime;
+ else if (IsAcknowledged())
+ severity |= SeverityFlagAcknowledgement;
+ else
+ severity |= SeverityFlagUnhandled;
+
+ olock.Unlock();
+
+ return severity;
+}
+
bool Service::IsStateOK(ServiceState state)
{
return state == ServiceOK;
static Service::Ptr GetByNamePair(const String& hostName, const String& serviceName);
virtual Host::Ptr GetHost(void) const override;
+ virtual int GetSeverity(void) const override;
virtual bool ResolveMacro(const String& macro, const CheckResult::Ptr& cr, Value *result) const override;
using namespace icinga;
-REGISTER_SCRIPTFUNCTION_NS(Internal, ClrCheck, &ClrCheckTask::ScriptFunc);
+REGISTER_SCRIPTFUNCTION_NS(Internal, ClrCheck, &ClrCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
static boost::once_flag l_OnceFlag = BOOST_ONCE_INIT;
using namespace icinga;
-REGISTER_SCRIPTFUNCTION_NS(Internal, ClusterCheck, &ClusterCheckTask::ScriptFunc);
+REGISTER_SCRIPTFUNCTION_NS(Internal, ClusterCheck, &ClusterCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
void ClusterCheckTask::ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
using namespace icinga;
-REGISTER_SCRIPTFUNCTION_NS(Internal, ClusterZoneCheck, &ClusterZoneCheckTask::ScriptFunc);
+REGISTER_SCRIPTFUNCTION_NS(Internal, ClusterZoneCheck, &ClusterZoneCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
void ClusterZoneCheckTask::ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
using namespace icinga;
-REGISTER_SCRIPTFUNCTION_NS(Internal, ExceptionCheck, &ExceptionCheckTask::ScriptFunc);
+REGISTER_SCRIPTFUNCTION_NS(Internal, ExceptionCheck, &ExceptionCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
void ExceptionCheckTask::ScriptFunc(const Checkable::Ptr& service, const CheckResult::Ptr& cr,
const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
using namespace icinga;
-REGISTER_SCRIPTFUNCTION_NS(Internal, IcingaCheck, &IcingaCheckTask::ScriptFunc);
+REGISTER_SCRIPTFUNCTION_NS(Internal, IcingaCheck, &IcingaCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
void IcingaCheckTask::ScriptFunc(const Checkable::Ptr& service, const CheckResult::Ptr& cr,
const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
using namespace icinga;
-REGISTER_SCRIPTFUNCTION_NS(Internal, NullCheck, &NullCheckTask::ScriptFunc);
+REGISTER_SCRIPTFUNCTION_NS(Internal, NullCheck, &NullCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
void NullCheckTask::ScriptFunc(const Checkable::Ptr& service, const CheckResult::Ptr& cr,
const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
using namespace icinga;
-REGISTER_SCRIPTFUNCTION_NS(Internal, NullEvent, &NullEventTask::ScriptFunc);
+REGISTER_SCRIPTFUNCTION_NS(Internal, NullEvent, &NullEventTask::ScriptFunc, "checkable:resolvedMacros:useResolvedMacros");
void NullEventTask::ScriptFunc(const Checkable::Ptr&, const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
{ }
using namespace icinga;
-REGISTER_SCRIPTFUNCTION_NS(Internal, PluginCheck, &PluginCheckTask::ScriptFunc);
+REGISTER_SCRIPTFUNCTION_NS(Internal, PluginCheck, &PluginCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
void PluginCheckTask::ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
using namespace icinga;
-REGISTER_SCRIPTFUNCTION_NS(Internal, PluginEvent, &PluginEventTask::ScriptFunc);
+REGISTER_SCRIPTFUNCTION_NS(Internal, PluginEvent, &PluginEventTask::ScriptFunc, "checkable:resolvedMacros:useResolvedMacros");
void PluginEventTask::ScriptFunc(const Checkable::Ptr& checkable,
const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
using namespace icinga;
-REGISTER_SCRIPTFUNCTION_NS(Internal, PluginNotification, &PluginNotificationTask::ScriptFunc);
+REGISTER_SCRIPTFUNCTION_NS(Internal, PluginNotification, &PluginNotificationTask::ScriptFunc, "notification:user:cr:itype:author:comment:resolvedMacros:useResolvedMacros");
void PluginNotificationTask::ScriptFunc(const Notification::Ptr& notification,
const User::Ptr& user, const CheckResult::Ptr& cr, int itype,
using namespace icinga;
-REGISTER_SCRIPTFUNCTION_NS(Internal, RandomCheck, &RandomCheckTask::ScriptFunc);
+REGISTER_SCRIPTFUNCTION_NS(Internal, RandomCheck, &RandomCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
void RandomCheckTask::ScriptFunc(const Checkable::Ptr& service, const CheckResult::Ptr& cr,
const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
using namespace icinga;
-REGISTER_SCRIPTFUNCTION_NS(Internal, EmptyTimePeriod, &TimePeriodTask::EmptyTimePeriodUpdate);
-REGISTER_SCRIPTFUNCTION_NS(Internal, EvenMinutesTimePeriod, &TimePeriodTask::EvenMinutesTimePeriodUpdate);
+REGISTER_SCRIPTFUNCTION_NS(Internal, EmptyTimePeriod, &TimePeriodTask::EmptyTimePeriodUpdate, "tp:begin:end");
+REGISTER_SCRIPTFUNCTION_NS(Internal, EvenMinutesTimePeriod, &TimePeriodTask::EvenMinutesTimePeriodUpdate, "tp:begin:end");
Array::Ptr TimePeriodTask::EmptyTimePeriodUpdate(const TimePeriod::Ptr&, double, double)
{
mkclass_target(gelfwriter.ti gelfwriter.tcpp gelfwriter.thpp)
mkclass_target(graphitewriter.ti graphitewriter.tcpp graphitewriter.thpp)
+mkclass_target(logstashwriter.ti logstashwriter.tcpp logstashwriter.thpp)
mkclass_target(influxdbwriter.ti influxdbwriter.tcpp influxdbwriter.thpp)
mkclass_target(opentsdbwriter.ti opentsdbwriter.tcpp opentsdbwriter.thpp)
mkclass_target(perfdatawriter.ti perfdatawriter.tcpp perfdatawriter.thpp)
set(perfdata_SOURCES
- gelfwriter.cpp gelfwriter.thpp graphitewriter.cpp graphitewriter.thpp influxdbwriter.cpp influxdbwriter.thpp opentsdbwriter.cpp opentsdbwriter.thpp perfdatawriter.cpp perfdatawriter.thpp
+ gelfwriter.cpp gelfwriter.thpp graphitewriter.cpp graphitewriter.thpp logstashwriter.cpp logstashwriter.thpp influxdbwriter.cpp influxdbwriter.thpp opentsdbwriter.cpp opentsdbwriter.thpp perfdatawriter.cpp perfdatawriter.thpp
)
if(ICINGA2_UNITY_BUILD)
${CMAKE_INSTALL_SYSCONFDIR}/icinga2/features-available
)
+install_if_not_exists(
+ ${PROJECT_SOURCE_DIR}/etc/icinga2/features-available/logstash.conf
+ ${CMAKE_INSTALL_SYSCONFDIR}/icinga2/features-available
+)
+
install_if_not_exists(
${PROJECT_SOURCE_DIR}/etc/icinga2/features-available/influxdb.conf
${CMAKE_INSTALL_SYSCONFDIR}/icinga2/features-available
}
#endif /* GELFWRITER_H */
+
#include "icinga/service.hpp"
#include "icinga/macroprocessor.hpp"
#include "icinga/icingaapplication.hpp"
-#include "icinga/compatutility.hpp"
#include "icinga/perfdatavalue.hpp"
#include "icinga/checkcommand.hpp"
#include "base/tcpsocket.hpp"
#include "base/logger.hpp"
#include "base/convert.hpp"
#include "base/utility.hpp"
-#include "base/application.hpp"
#include "base/stream.hpp"
#include "base/networkstream.hpp"
#include "base/exception.hpp"
REGISTER_STATSFUNCTION(InfluxdbWriter, &InfluxdbWriter::StatsFunc);
+//TODO: Evaluate whether multiple WQ threads and InfluxDB connections are possible. 10 threads will hog InfluxDB in large scale environments.
+InfluxdbWriter::InfluxdbWriter(void)
+ : m_WorkQueue(10000000, 1), m_TaskStats(15 * 60), m_PendingTasks(0), m_PendingTasksTimestamp(0)
+{ }
+
+void InfluxdbWriter::OnConfigLoaded(void)
+{
+ ObjectImpl<InfluxdbWriter>::OnConfigLoaded();
+
+ m_WorkQueue.SetName("InfluxdbWriter, " + GetName());
+}
+
void InfluxdbWriter::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr&)
{
Dictionary::Ptr nodes = new Dictionary();
for (const InfluxdbWriter::Ptr& influxdbwriter : ConfigType::GetObjectsByType<InfluxdbWriter>()) {
- nodes->Set(influxdbwriter->GetName(), 1); //add more stats
+ size_t workQueueItems = influxdbwriter->m_WorkQueue.GetLength();
+ size_t dataBufferItems = influxdbwriter->m_DataBuffer.size();
+
+ //TODO: Collect more stats
+ Dictionary::Ptr stats = new Dictionary();
+ stats->Set("work_queue_items", workQueueItems);
+ stats->Set("data_buffer_items", dataBufferItems);
+
+ nodes->Set(influxdbwriter->GetName(), stats);
}
status->Set("influxdbwriter", nodes);
void InfluxdbWriter::Start(bool runtimeCreated)
{
- m_DataBuffer = new Array();
-
ObjectImpl<InfluxdbWriter>::Start(runtimeCreated);
Log(LogInformation, "InfluxdbWriter")
<< "'" << GetName() << "' started.";
+ /* Register exception handler for WQ tasks. */
+ m_WorkQueue.SetExceptionCallback(boost::bind(&InfluxdbWriter::ExceptionHandler, this, _1));
+
+ /* Setup timer for periodically flushing m_DataBuffer */
m_FlushTimer = new Timer();
m_FlushTimer->SetInterval(GetFlushInterval());
m_FlushTimer->OnTimerExpired.connect(boost::bind(&InfluxdbWriter::FlushTimeout, this));
m_FlushTimer->Start();
m_FlushTimer->Reschedule(0);
+ /* Timer for updating and logging work queue stats */
+ m_StatsLoggerTimer = new Timer();
+ m_StatsLoggerTimer->SetInterval(60); // don't be too noisy
+ m_StatsLoggerTimer->OnTimerExpired.connect(boost::bind(&InfluxdbWriter::StatsLoggerTimerHandler, this));
+ m_StatsLoggerTimer->Start();
+
+ /* Register for new metrics. */
Service::OnNewCheckResult.connect(boost::bind(&InfluxdbWriter::CheckResultHandler, this, _1, _2));
}
Log(LogInformation, "InfluxdbWriter")
<< "'" << GetName() << "' stopped.";
+ m_WorkQueue.Join();
+
ObjectImpl<InfluxdbWriter>::Stop(runtimeRemoved);
}
+void InfluxdbWriter::AssertOnWorkQueue(void)
+{
+ ASSERT(m_WorkQueue.IsWorkerThread());
+}
+
+void InfluxdbWriter::ExceptionHandler(boost::exception_ptr exp)
+{
+ Log(LogCritical, "InfluxdbWriter", "Exception during InfluxDB operation: Verify that your backend is operational!");
+
+ Log(LogDebug, "InfluxdbWriter")
+ << "Exception during InfluxDB operation: " << DiagnosticInformation(exp);
+
+ //TODO: Close the connection, if we keep it open.
+}
+
+void InfluxdbWriter::StatsLoggerTimerHandler(void)
+{
+ int pending = m_WorkQueue.GetLength();
+
+ double now = Utility::GetTime();
+ double gradient = (pending - m_PendingTasks) / (now - m_PendingTasksTimestamp);
+ double timeToZero = pending / gradient;
+
+ String timeInfo;
+
+ if (pending > GetTaskCount(5)) {
+ timeInfo = " empty in ";
+ if (timeToZero < 0)
+ timeInfo += "infinite time, your backend isn't able to keep up";
+ else
+ timeInfo += Utility::FormatDuration(timeToZero);
+ }
+
+ m_PendingTasks = pending;
+ m_PendingTasksTimestamp = now;
+
+ Log(LogInformation, "InfluxdbWriter")
+ << "Work queue items: " << pending
+ << ", rate: " << std::setw(2) << GetTaskCount(60) / 60.0 << "/s"
+ << " (" << GetTaskCount(60) << "/min " << GetTaskCount(60 * 5) << "/5min " << GetTaskCount(60 * 15) << "/15min);"
+ << timeInfo;
+}
+
Stream::Ptr InfluxdbWriter::Connect(TcpSocket::Ptr& socket)
{
socket = new TcpSocket();
try {
socket->Connect(GetHost(), GetPort());
- } catch (std::exception&) {
+ } catch (const std::exception& ex) {
Log(LogWarning, "InfluxdbWriter")
<< "Can't connect to InfluxDB on host '" << GetHost() << "' port '" << GetPort() << "'.";
- return Stream::Ptr();
+ throw ex;
}
if (GetSslEnable()) {
- boost::shared_ptr<SSL_CTX> ssl_context;
+ boost::shared_ptr<SSL_CTX> sslContext;
try {
- ssl_context = MakeSSLContext(GetSslCert(), GetSslKey(), GetSslCaCert());
- } catch (std::exception&) {
+ sslContext = MakeSSLContext(GetSslCert(), GetSslKey(), GetSslCaCert());
+ } catch (const std::exception& ex) {
Log(LogWarning, "InfluxdbWriter")
<< "Unable to create SSL context.";
- return Stream::Ptr();
+ throw ex;
}
- TlsStream::Ptr tls_stream = new TlsStream(socket, GetHost(), RoleClient, ssl_context);
+ TlsStream::Ptr tlsStream = new TlsStream(socket, GetHost(), RoleClient, sslContext);
try {
- tls_stream->Handshake();
- } catch (std::exception&) {
+ tlsStream->Handshake();
+ } catch (const std::exception& ex) {
Log(LogWarning, "InfluxdbWriter")
<< "TLS handshake with host '" << GetHost() << "' failed.";
- return Stream::Ptr();
+ throw ex;
}
- return tls_stream;
+ return tlsStream;
} else {
return new NetworkStream(socket);
}
// Prevent missing macros from warning; will return an empty value
// which will be filtered out in SendMetric()
String missing_macro;
- tags->Set(pair.first, MacroProcessor::ResolveMacros(pair.second, resolvers, cr, &missing_macro));
+ tags->Set(pair.first, MacroProcessor::ResolveMacros(pair.second, resolvers, cr, &missing_macro));
}
}
SendPerfdata(tmpl, checkable, cr, ts);
}
-String InfluxdbWriter::FormatInteger(const int val)
+String InfluxdbWriter::FormatInteger(int val)
{
return Convert::ToString(val) + "i";
}
-String InfluxdbWriter::FormatBoolean(const bool val)
+String InfluxdbWriter::FormatBoolean(bool val)
{
- return val ? "true" : "false";
+ return String(val);
}
void InfluxdbWriter::SendPerfdata(const Dictionary::Ptr& tmpl, const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, double ts)
String InfluxdbWriter::EscapeField(const String& str)
{
+ //TODO: Evaluate whether boost::regex is really needed here.
+
// Handle integers
boost::regex integer("-?\\d+i");
if (boost::regex_match(str.GetData(), integer)) {
msgbuf << " ";
- bool first = true;
- ObjectLock fieldLock(fields);
- for (const Dictionary::Pair& pair : fields) {
- if (first)
- first = false;
- else
- msgbuf << ",";
- msgbuf << EscapeKey(pair.first) << "=" << EscapeField(pair.second);
+ {
+ bool first = true;
+
+ ObjectLock fieldLock(fields);
+ for (const Dictionary::Pair& pair : fields) {
+ if (first)
+ first = false;
+ else
+ msgbuf << ",";
+
+ msgbuf << EscapeKey(pair.first) << "=" << EscapeField(pair.second);
+ }
}
msgbuf << " " << static_cast<unsigned long>(ts);
Log(LogDebug, "InfluxdbWriter")
- << "Add to metric list:'" << msgbuf.str() << "'.";
+ << "Add to metric list: '" << msgbuf.str() << "'.";
// Atomically buffer the data point
- ObjectLock olock(m_DataBuffer);
- m_DataBuffer->Add(String(msgbuf.str()));
+ boost::mutex::scoped_lock lock(m_DataBufferMutex);
+ m_DataBuffer.push_back(String(msgbuf.str()));
// Flush if we've buffered too much to prevent excessive memory use
- if (static_cast<int>(m_DataBuffer->GetLength()) >= GetFlushThreshold()) {
+ if (static_cast<int>(m_DataBuffer.size()) >= GetFlushThreshold()) {
Log(LogDebug, "InfluxdbWriter")
- << "Data buffer overflow writing " << m_DataBuffer->GetLength() << " data points";
+ << "Data buffer overflow writing " << m_DataBuffer.size() << " data points";
Flush();
}
}
{
// Prevent new data points from being added to the array, there is a
// race condition where they could disappear
- ObjectLock olock(m_DataBuffer);
+ boost::mutex::scoped_lock lock(m_DataBufferMutex);
// Flush if there are any data available
- if (m_DataBuffer->GetLength() > 0) {
+ if (m_DataBuffer.size() > 0) {
Log(LogDebug, "InfluxdbWriter")
- << "Timer expired writing " << m_DataBuffer->GetLength() << " data points";
+ << "Timer expired writing " << m_DataBuffer.size() << " data points";
Flush();
}
}
void InfluxdbWriter::Flush(void)
{
+ // Ensure you hold a lock against m_DataBuffer so that things
+ // don't go missing after creating the body and clearing the buffer
+ String body = boost::algorithm::join(m_DataBuffer, "\n");
+ m_DataBuffer.clear();
+
+ // Asynchronously flush the metric body to InfluxDB
+ m_WorkQueue.Enqueue(boost::bind(&InfluxdbWriter::FlushHandler, this, body));
+}
+
+void InfluxdbWriter::FlushHandler(const String& body)
+{
+ AssertOnWorkQueue();
+
TcpSocket::Ptr socket;
Stream::Ptr stream = Connect(socket);
- // Unable to connect, play it safe and lose the data points
- // to avoid a memory leak
- if (!stream.get()) {
- m_DataBuffer->Clear();
+ if (!stream)
return;
- }
+
+ IncreaseTaskCount();
Url::Ptr url = new Url();
url->SetScheme(GetSslEnable() ? "https" : "http");
if (!GetPassword().IsEmpty())
url->AddQueryElement("p", GetPassword());
- // Ensure you hold a lock against m_DataBuffer so that things
- // don't go missing after creating the body and clearing the buffer
- String body = Utility::Join(m_DataBuffer, '\n', false);
- m_DataBuffer->Clear();
-
HttpRequest req(stream);
req.RequestMethod = "POST";
req.RequestUrl = url;
try {
req.WriteBody(body.CStr(), body.GetLength());
req.Finish();
- } catch (const std::exception&) {
+ } catch (const std::exception& ex) {
Log(LogWarning, "InfluxdbWriter")
<< "Cannot write to TCP socket on host '" << GetHost() << "' port '" << GetPort() << "'.";
- return;
+ throw ex;
}
+ //TODO: Evaluate whether waiting for the result makes sense here. KeepAlive and close are options.
HttpResponse resp(stream, req);
StreamReadContext context;
struct timeval timeout = { GetSocketTimeout(), 0 };
+
if (!socket->Poll(true, false, &timeout)) {
Log(LogWarning, "InfluxdbWriter")
<< "Response timeout of TCP socket from host '" << GetHost() << "' port '" << GetPort() << "'.";
try {
resp.Parse(context, true);
- } catch (const std::exception&) {
+ } catch (const std::exception& ex) {
Log(LogWarning, "InfluxdbWriter")
<< "Cannot read from TCP socket from host '" << GetHost() << "' port '" << GetPort() << "'.";
- return;
+ throw ex;
}
if (resp.StatusCode != 204) {
}
}
+void InfluxdbWriter::IncreaseTaskCount(void)
+{
+ double now = Utility::GetTime();
+
+ boost::mutex::scoped_lock lock(m_StatsMutex);
+ m_TaskStats.InsertValue(now, 1);
+}
+
+int InfluxdbWriter::GetTaskCount(RingBuffer::SizeType span) const
+{
+ boost::mutex::scoped_lock lock(m_StatsMutex);
+ return m_TaskStats.GetValues(span);
+}
+
void InfluxdbWriter::ValidateHostTemplate(const Dictionary::Ptr& value, const ValidationUtils& utils)
{
ObjectImpl<InfluxdbWriter>::ValidateHostTemplate(value, utils);
#include "base/configobject.hpp"
#include "base/tcpsocket.hpp"
#include "base/timer.hpp"
+#include "base/ringbuffer.hpp"
+#include "base/workqueue.hpp"
+#include <boost/thread/mutex.hpp>
#include <fstream>
namespace icinga
DECLARE_OBJECT(InfluxdbWriter);
DECLARE_OBJECTNAME(InfluxdbWriter);
+ InfluxdbWriter(void);
+
static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+ int GetTaskCount(RingBuffer::SizeType span) const;
+
virtual void ValidateHostTemplate(const Dictionary::Ptr& value, const ValidationUtils& utils) override;
virtual void ValidateServiceTemplate(const Dictionary::Ptr& value, const ValidationUtils& utils) override;
protected:
+ virtual void OnConfigLoaded(void) override;
virtual void Start(bool runtimeCreated) override;
virtual void Stop(bool runtimeRemoved) override;
+ void IncreaseTaskCount(void);
+
private:
+ WorkQueue m_WorkQueue;
Timer::Ptr m_FlushTimer;
- Array::Ptr m_DataBuffer;
+ std::vector<String> m_DataBuffer;
+ boost::mutex m_DataBufferMutex;
+
+ mutable boost::mutex m_StatsMutex;
+ RingBuffer m_TaskStats;
+ int m_PendingTasks;
+ double m_PendingTasksTimestamp;
+
+ Timer::Ptr m_StatsLoggerTimer;
+ void StatsLoggerTimerHandler(void);
void CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr);
void SendPerfdata(const Dictionary::Ptr& tmpl, const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, double ts);
void FlushTimeout(void);
void Flush(void);
- static String FormatInteger(const int val);
- static String FormatBoolean(const bool val);
+ void FlushHandler(const String& body);
+
+ static String FormatInteger(int val);
+ static String FormatBoolean(bool val);
static String EscapeKey(const String& str);
static String EscapeField(const String& str);
Stream::Ptr Connect(TcpSocket::Ptr& socket);
+
+ void AssertOnWorkQueue(void);
+
+ void ExceptionHandler(boost::exception_ptr exp);
};
}
--- /dev/null
+/******************************************************************************
+ * Icinga 2 *
+ * Copyright (C) 2012-2017 Icinga Development Team (https://www.icinga.com/) *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, write to the Free Software Foundation *
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. *
+ ******************************************************************************/
+
+#include "perfdata/logstashwriter.hpp"
+#include "perfdata/logstashwriter.tcpp"
+#include "icinga/service.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "icinga/compatutility.hpp"
+#include "icinga/perfdatavalue.hpp"
+#include "icinga/notification.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/utility.hpp"
+#include "base/stream.hpp"
+#include "base/networkstream.hpp"
+#include "base/json.hpp"
+#include "base/context.hpp"
+#include <boost/foreach.hpp>
+#include <boost/algorithm/string/replace.hpp>
+#include <string>
+
+using namespace icinga;
+
+REGISTER_TYPE(LogstashWriter);
+
+void LogstashWriter::Start(bool runtimeCreated)
+{
+ ObjectImpl<LogstashWriter>::Start(runtimeCreated);
+
+ m_ReconnectTimer = new Timer();
+ m_ReconnectTimer->SetInterval(10);
+ m_ReconnectTimer->OnTimerExpired.connect(boost::bind(&LogstashWriter::ReconnectTimerHandler, this));
+ m_ReconnectTimer->Start();
+ m_ReconnectTimer->Reschedule(0);
+
+ // Send check results
+ Service::OnNewCheckResult.connect(boost::bind(&LogstashWriter::CheckResultHandler, this, _1, _2));
+ // Send notifications
+ Service::OnNotificationSentToUser.connect(boost::bind(&LogstashWriter::NotificationToUserHandler, this, _1, _2, _3, _4, _5, _6, _7, _8));
+ // Send state change
+ Service::OnStateChange.connect(boost::bind(&LogstashWriter::StateChangeHandler, this, _1, _2, _3));
+}
+
+void LogstashWriter::ReconnectTimerHandler(void)
+{
+ if (m_Stream)
+ return;
+
+ Socket::Ptr socket;
+
+ if (GetSocketType() == "tcp")
+ socket = new TcpSocket();
+ else
+ socket = new UdpSocket();
+
+ Log(LogNotice, "LogstashWriter")
+ << "Reconnecting to Logstash endpoint '" << GetHost() << "' port '" << GetPort() << "'.";
+
+ try {
+ socket->Connect(GetHost(), GetPort());
+ } catch (const std::exception&) {
+ Log(LogCritical, "LogstashWriter")
+ << "Can't connect to Logstash endpoint '" << GetHost() << "' port '" << GetPort() << "'.";
+ return;
+ }
+
+ m_Stream = new NetworkStream(socket);
+}
+
+void LogstashWriter::CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr)
+{
+ CONTEXT("LOGSTASH Processing check result for '" + checkable->GetName() + "'");
+
+ Log(LogDebug, "LogstashWriter")
+ << "Processing check result for '" << checkable->GetName() << "'";
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr fields = new Dictionary();
+
+ if (service) {
+ fields->Set("service_name", service->GetShortName());
+ fields->Set("service_state", Service::StateToString(service->GetState()));
+ fields->Set("last_state", service->GetLastState());
+ fields->Set("last_hard_state", service->GetLastHardState());
+ } else {
+ fields->Set("last_state", host->GetLastState());
+ fields->Set("last_hard_state", host->GetLastHardState());
+ }
+
+ fields->Set("host_name", host->GetName());
+ fields->Set("type", "CheckResult");
+ fields->Set("state", service ? Service::StateToString(service->GetState()) : Host::StateToString(host->GetState()));
+
+ fields->Set("current_check_attempt", checkable->GetCheckAttempt());
+ fields->Set("max_check_attempts", checkable->GetMaxCheckAttempts());
+
+ fields->Set("latency", cr->CalculateLatency());
+ fields->Set("execution_time", cr->CalculateExecutionTime());
+ fields->Set("reachable", checkable->IsReachable());
+
+ double ts = Utility::GetTime();
+
+ if (cr) {
+ fields->Set("plugin_output", cr->GetOutput());
+ fields->Set("check_source", cr->GetCheckSource());
+ ts = cr->GetExecutionEnd();
+ }
+
+ Array::Ptr perfdata = cr->GetPerformanceData();
+
+ if (perfdata) {
+ Dictionary::Ptr perfdataItems = new Dictionary();
+
+ ObjectLock olock(perfdata);
+ for (const Value& val : perfdata) {
+ PerfdataValue::Ptr pdv;
+
+ if (val.IsObjectType<PerfdataValue>())
+ pdv = val;
+ else {
+ try {
+ pdv = PerfdataValue::Parse(val);
+ } catch (const std::exception&) {
+ Log(LogWarning, "LogstashWriter")
+ << "Ignoring invalid perfdata value: '" << val << "' for object '"
+ << checkable->GetName() << "'.";
+ continue;
+ }
+ }
+
+ Dictionary::Ptr perfdataItem = new Dictionary();
+ perfdataItem->Set("value", pdv->GetValue());
+
+ if (pdv->GetMin())
+ perfdataItem->Set("min", pdv->GetMin());
+ if (pdv->GetMax())
+ perfdataItem->Set("max", pdv->GetMax());
+ if (pdv->GetWarn())
+ perfdataItem->Set("warn", pdv->GetWarn());
+ if (pdv->GetCrit())
+ perfdataItem->Set("crit", pdv->GetCrit());
+
+ String escaped_key = EscapeMetricLabel(pdv->GetLabel());
+
+ perfdataItems->Set(escaped_key, perfdataItem);
+ }
+
+ fields->Set("performance_data", perfdataItems);
+ }
+
+ SendLogMessage(ComposeLogstashMessage(fields, GetSource(), ts));
+}
+
+
+void LogstashWriter::NotificationToUserHandler(const Notification::Ptr& notification, const Checkable::Ptr& checkable,
+ const User::Ptr& user, NotificationType notification_type, CheckResult::Ptr const& cr,
+ const String& author, const String& comment_text, const String& command_name)
+{
+ CONTEXT("Logstash Processing notification to all users '" + checkable->GetName() + "'");
+
+ Log(LogDebug, "LogstashWriter")
+ << "Processing notification for '" << checkable->GetName() << "'";
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ String notification_type_str = Notification::NotificationTypeToString(notification_type);
+
+ String author_comment = "";
+
+ if (notification_type == NotificationCustom || notification_type == NotificationAcknowledgement) {
+ author_comment = author + ";" + comment_text;
+ }
+
+ double ts = Utility::GetTime();
+
+ Dictionary::Ptr fields = new Dictionary();
+
+ if (service) {
+ fields->Set("type", "SERVICE NOTIFICATION");
+ fields->Set("service_name", service->GetShortName());
+ } else {
+ fields->Set("type", "HOST NOTIFICATION");
+ }
+
+ if (cr) {
+ fields->Set("plugin_output", cr->GetOutput());
+ ts = cr->GetExecutionEnd();
+ }
+
+ fields->Set("state", service ? Service::StateToString(service->GetState()) : Host::StateToString(host->GetState()));
+
+ fields->Set("host_name", host->GetName());
+ fields->Set("command", command_name);
+ fields->Set("notification_type", notification_type_str);
+ fields->Set("comment", author_comment);
+
+ SendLogMessage(ComposeLogstashMessage(fields, GetSource(), ts));
+}
+
+void LogstashWriter::StateChangeHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, StateType type)
+{
+ CONTEXT("Logstash Processing state change '" + checkable->GetName() + "'");
+
+ Log(LogDebug, "LogstashWriter")
+ << "Processing state change for '" << checkable->GetName() << "'";
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr fields = new Dictionary();
+
+ fields->Set("state", service ? Service::StateToString(service->GetState()) : Host::StateToString(host->GetState()));
+ fields->Set("type", "StateChange");
+ fields->Set("current_check_attempt", checkable->GetCheckAttempt());
+ fields->Set("max_check_attempts", checkable->GetMaxCheckAttempts());
+ fields->Set("hostname", host->GetName());
+
+ if (service) {
+ fields->Set("service_name", service->GetShortName());
+ fields->Set("service_state", Service::StateToString(service->GetState()));
+ fields->Set("last_state", service->GetLastState());
+ fields->Set("last_hard_state", service->GetLastHardState());
+ } else {
+ fields->Set("last_state", host->GetLastState());
+ fields->Set("last_hard_state", host->GetLastHardState());
+ }
+
+ double ts = Utility::GetTime();
+
+ if (cr) {
+ fields->Set("plugin_output", cr->GetOutput());
+ fields->Set("check_source", cr->GetCheckSource());
+ ts = cr->GetExecutionEnd();
+ }
+
+ SendLogMessage(ComposeLogstashMessage(fields, GetSource(), ts));
+}
+
+String LogstashWriter::ComposeLogstashMessage(const Dictionary::Ptr& fields, const String& source, double ts)
+{
+ fields->Set("version", "1.1");
+ fields->Set("host", source);
+ fields->Set("timestamp", ts);
+
+ return JsonEncode(fields) + "\n";
+}
+
+void LogstashWriter::SendLogMessage(const String& message)
+{
+ ObjectLock olock(this);
+
+ if (!m_Stream)
+ return;
+
+ try {
+ m_Stream->Write(&message[0], message.GetLength());
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "LogstashWriter")
+ << "Cannot write to " << GetSocketType()
+ << " socket on host '" << GetHost() << "' port '" << GetPort() << "'.";
+
+ m_Stream.reset();
+ }
+}
+
+String LogstashWriter::EscapeMetricLabel(const String& str)
+{
+ String result = str;
+
+ boost::replace_all(result, " ", "_");
+ boost::replace_all(result, ".", "_");
+ boost::replace_all(result, "\\", "_");
+ boost::replace_all(result, "::", ".");
+
+ return result;
+}
+
+void LogstashWriter::ValidateSocketType(const String& value, const ValidationUtils& utils)
+{
+ ObjectImpl<LogstashWriter>::ValidateSocketType(value, utils);
+
+ if (value != "udp" && value != "tcp")
+ BOOST_THROW_EXCEPTION(ValidationError(this, boost::assign::list_of("socket_type"), "Socket type '" + value + "' is invalid."));
+}
--- /dev/null
+/******************************************************************************
+ * Icinga 2 *
+ * Copyright (C) 2012-2017 Icinga Development Team (https://www.icinga.com/) *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, write to the Free Software Foundation *
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. *
+ ******************************************************************************/
+
+#ifndef LOGSTASHWRITER_H
+#define LOGSTASHWRITER_H
+
+#include "perfdata/logstashwriter.thpp"
+#include "icinga/service.hpp"
+#include "base/configobject.hpp"
+#include "base/tcpsocket.hpp"
+#include "base/udpsocket.hpp"
+#include "base/timer.hpp"
+#include <fstream>
+#include <string>
+
+namespace icinga
+{
+
+/**
+ * An Icinga logstash writer.
+ *
+ * @ingroup perfdata
+ */
+class LogstashWriter : public ObjectImpl<LogstashWriter>
+{
+
+public:
+ DECLARE_OBJECT(LogstashWriter);
+ DECLARE_OBJECTNAME(LogstashWriter);
+
+ virtual void ValidateSocketType(const String& value, const ValidationUtils& utils) override;
+
+protected:
+ virtual void Start(bool runtimeCreated) override;
+
+private:
+ Stream::Ptr m_Stream;
+
+ Timer::Ptr m_ReconnectTimer;
+
+ void CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr);
+ void NotificationToUserHandler(const Notification::Ptr& notification, const Checkable::Ptr& checkable,
+ const User::Ptr& user, NotificationType notification_type, CheckResult::Ptr const& cr,
+ const String& author, const String& comment_text, const String& command_name);
+ void StateChangeHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, StateType type);
+ void SendLogMessage(const String& message);
+ String ComposeLogstashMessage(const Dictionary::Ptr& fields, const String& source, double ts);
+
+ static String EscapeMetricLabel(const String& str);
+
+ void ReconnectTimerHandler(void);
+};
+
+}
+
+#endif /* LOGSTASHWRITER_H */
--- /dev/null
+/******************************************************************************
+ * Icinga 2 *
+ * Copyright (C) 2012-2016 Icinga Development Team (https://www.icinga.org/) *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, write to the Free Software Foundation *
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. *
+ ******************************************************************************/
+
+#include "base/configobject.hpp"
+
+library perfdata;
+
+namespace icinga
+{
+
+class LogstashWriter : ConfigObject
+{
+ [config] String host {
+ default {{{ return "127.0.0.1"; }}}
+ };
+
+ [config] String port {
+ default {{{ return "9201"; }}}
+ };
+
+ [config] String socket_type {
+ default {{{ return "udp"; }}}
+ };
+
+ [config] String source {
+ default {{{ return "icinga2"; }}}
+ };
+};
+
+}
mkclass_target(rediswriter.ti rediswriter.tcpp rediswriter.thpp)
set(redis_SOURCES
- rediswriter.cpp rediswriter.thpp
+ rediswriter.cpp rediswriter-status.cpp rediswriter-utility.cpp rediswriter.thpp
)
if(ICINGA2_UNITY_BUILD)
--- /dev/null
+/******************************************************************************
+ * Icinga 2 *
+ * Copyright (C) 2012-2017 Icinga Development Team (https://www.icinga.com/) *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, write to the Free Software Foundation *
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. *
+ ******************************************************************************/
+
+#include "redis/rediswriter.hpp"
+#include "icinga/customvarobject.hpp"
+#include "icinga/host.hpp"
+#include "icinga/service.hpp"
+#include "base/json.hpp"
+#include "base/logger.hpp"
+#include "base/serializer.hpp"
+#include "base/tlsutility.hpp"
+#include "base/initialize.hpp"
+
+using namespace icinga;
+
+/*
+- icinga:config:<type> as hash
+key: sha1 checksum(name)
+value: JsonEncode(Serialize(object, FAConfig)) + config_checksum
+
+Diff between calculated config_checksum and Redis json config_checksum
+Alternative: Replace into.
+
+
+- icinga:status:<type> as hash
+key: sha1 checksum(name)
+value: JsonEncode(Serialize(object, FAState))
+*/
+
+INITIALIZE_ONCE(&RedisWriter::ConfigStaticInitialize);
+
+void RedisWriter::ConfigStaticInitialize(void)
+{
+ /* triggered in ProcessCheckResult(), requires UpdateNextCheck() to be called before */
+ ConfigObject::OnStateChanged.connect(boost::bind(&RedisWriter::StateChangedHandler, _1));
+ CustomVarObject::OnVarsChanged.connect(boost::bind(&RedisWriter::VarsChangedHandler, _1));
+
+ /* triggered on create, update and delete objects */
+ ConfigObject::OnActiveChanged.connect(boost::bind(&RedisWriter::VersionChangedHandler, _1));
+ ConfigObject::OnVersionChanged.connect(boost::bind(&RedisWriter::VersionChangedHandler, _1));
+}
+
+void RedisWriter::UpdateAllConfigObjects(void)
+{
+ AssertOnWorkQueue();
+
+ double startTime = Utility::GetTime();
+
+ //TODO: "Publish" the config dump by adding another event, globally or by object
+ ExecuteQuery({ "MULTI" });
+
+ for (const Type::Ptr& type : Type::GetAllTypes()) {
+ if (!ConfigObject::TypeInstance->IsAssignableFrom(type))
+ continue;
+
+ String typeName = type->GetName();
+
+ /* replace into aka delete insert is faster than a full diff */
+ ExecuteQuery({ "DEL", "icinga:config:" + typeName, "icinga:config:" + typeName + ":checksum", "icinga:status:" + typeName });
+
+ /* fetch all objects and dump them */
+ ConfigType *ctype = dynamic_cast<ConfigType *>(type.get());
+ VERIFY(ctype);
+
+ for (const ConfigObject::Ptr& object : ctype->GetObjects()) {
+ SendConfigUpdate(object, typeName);
+ SendStatusUpdate(object, typeName);
+ }
+
+ /* publish config type dump finished */
+ ExecuteQuery({ "PUBLISH", "icinga:config:dump", typeName });
+ }
+
+ ExecuteQuery({ "EXEC" });
+
+ Log(LogInformation, "RedisWriter")
+ << "Initial config/status dump finished in " << Utility::GetTime() - startTime << " seconds.";
+}
+
+void RedisWriter::SendConfigUpdate(const ConfigObject::Ptr& object, const String& typeName, bool runtimeUpdate)
+{
+ AssertOnWorkQueue();
+
+ /* during startup we might send duplicated object config, ignore them without any connection */
+ if (!m_Context)
+ return;
+
+ /* TODO: This isn't essentially correct as we don't keep track of config objects ourselves. This would avoid duplicated config updates at startup.
+ if (!runtimeUpdate && m_ConfigDumpInProgress)
+ return;
+ */
+
+ /* Serialize config object attributes */
+ Dictionary::Ptr objectAttrs = SerializeObjectAttrs(object, FAConfig);
+
+ String jsonBody = JsonEncode(objectAttrs);
+
+ String objectName = object->GetName();
+
+ ExecuteQuery({ "HSET", "icinga:config:" + typeName, objectName, jsonBody });
+
+ /* check sums */
+ /* hset icinga:config:Host:checksums localhost { "name_checksum": "...", "properties_checksum": "...", "groups_checksum": "...", "vars_checksum": null } */
+ Dictionary::Ptr checkSum = new Dictionary();
+
+ checkSum->Set("name_checksum", CalculateCheckSumString(object->GetName()));
+
+ // TODO: move this elsewhere
+ Checkable::Ptr checkable = dynamic_pointer_cast<Checkable>(object);
+
+ if (checkable) {
+ Host::Ptr host;
+ Service::Ptr service;
+
+ tie(host, service) = GetHostService(checkable);
+
+ if (service)
+ checkSum->Set("groups_checksum", CalculateCheckSumGroups(service->GetGroups()));
+ else
+ checkSum->Set("groups_checksum", CalculateCheckSumGroups(host->GetGroups()));
+ }
+
+ checkSum->Set("properties_checksum", CalculateCheckSumProperties(object));
+ checkSum->Set("vars_checksum", CalculateCheckSumVars(object));
+
+ String checkSumBody = JsonEncode(checkSum);
+
+ ExecuteQuery({ "HSET", "icinga:config:" + typeName + ":checksum", objectName, checkSumBody });
+
+ /* publish runtime updated objects immediately */
+ if (!runtimeUpdate)
+ return;
+
+ /*
+ PUBLISH "icinga:config:dump" "Host"
+ PUBLISH "icinga:config:update" "Host:__name!checksumBody"
+ PUBLISH "icinga:config:delete" "Host:__name"
+ */
+
+ ExecuteQuery({ "PUBLISH", "icinga:config:update", typeName + ":" + objectName + "!" + checkSumBody });
+}
+
+void RedisWriter::SendConfigDelete(const ConfigObject::Ptr& object, const String& typeName)
+{
+ AssertOnWorkQueue();
+
+ /* during startup we might send duplicated object config, ignore them without any connection */
+ if (!m_Context)
+ return;
+
+ String objectName = object->GetName();
+
+ ExecuteQuery({ "HDEL", "icinga:config:" + typeName, objectName });
+ ExecuteQuery({ "HDEL", "icinga:config:" + typeName + ":checksum", objectName });
+ ExecuteQuery({ "HDEL", "icinga:status:" + typeName, objectName });
+
+ /*
+ PUBLISH "icinga:config:dump" "Host"
+ PUBLISH "icinga:config:update" "Host:__name!checksumBody"
+ PUBLISH "icinga:config:delete" "Host:__name"
+ */
+
+ ExecuteQuery({ "PUBLISH", "icinga:config:delete", typeName + ":" + objectName });
+}
+
+void RedisWriter::SendStatusUpdate(const ConfigObject::Ptr& object, const String& typeName)
+{
+ AssertOnWorkQueue();
+
+ /* during startup we might receive check results, ignore them without any connection */
+ if (!m_Context)
+ return;
+
+ /* Serialize config object attributes */
+ Dictionary::Ptr objectAttrs = SerializeObjectAttrs(object, FAState);
+
+ String jsonBody = JsonEncode(objectAttrs);
+
+ String objectName = object->GetName();
+
+ ExecuteQuery({ "HSET", "icinga:status:" + typeName, objectName, jsonBody });
+
+ /* Icinga DB part for Icinga Web 2 */
+ Checkable::Ptr checkable = dynamic_pointer_cast<Checkable>(object);
+
+ if (checkable) {
+ Dictionary::Ptr attrs = new Dictionary();
+ String tableName;
+ String objectCheckSum = CalculateCheckSumString(objectName, true); //store binary checksum here
+
+ Host::Ptr host;
+ Service::Ptr service;
+
+ tie(host, service) = GetHostService(checkable);
+
+ if (service) {
+ tableName = "servicestate";
+ attrs->Set("service_checksum", objectCheckSum);
+ attrs->Set("host_checksum", CalculateCheckSumString(host->GetName(), true));
+ } else {
+ tableName = "hoststate";
+ attrs->Set("host_checksum", objectCheckSum);
+ }
+
+ attrs->Set("last_check", checkable->GetLastCheck());
+ attrs->Set("next_check", checkable->GetNextCheck());
+
+ attrs->Set("severity", checkable->GetSeverity());
+
+/*
+ 'host_checksum' => null,
+ 'command' => null, // JSON, array
+ 'execution_start' => null,
+ 'execution_end' => null,
+ 'schedule_start' => null,
+ 'schedule_end' => null,
+ 'exit_status' => null,
+ 'output' => null,
+ 'performance_data' => null, // JSON, array
+
+
+10.0.3.12:6379> keys icinga:hoststate.*
+1) "icinga:hoststate.~\xf5a\x91+\x03\x97\x99\xb5(\x16 CYm\xb1\xdf\x85\xa2\xcb"
+10.0.3.12:6379> get "icinga:hoststate.~\xf5a\x91+\x03\x97\x99\xb5(\x16 CYm\xb1\xdf\x85\xa2\xcb"
+"{\"command\":[\"\\/usr\\/lib\\/nagios\\/plugins\\/check_ping\",\"-H\",\"127.0.0.1\",\"-c\",\"5000,100%\",\"-w\",\"3000,80%\"],\"execution_start\":1492007581.7624,\"execution_end\":1492007585.7654,\"schedule_start\":1492007581.7609,\"schedule_end\":1492007585.7655,\"exit_status\":0,\"output\":\"PING OK - Packet loss = 0%, RTA = 0.08 ms\",\"performance_data\":[\"rta=0.076000ms;3000.000000;5000.000000;0.000000\",\"pl=0%;80;100;0\"]}"
+
+*/
+
+ CheckResult::Ptr cr = checkable->GetLastCheckResult();
+
+ if (cr) {
+ attrs->Set("command", JsonEncode(cr->GetCommand()));
+ attrs->Set("execution_start", cr->GetExecutionStart());
+ attrs->Set("execution_end", cr->GetExecutionEnd());
+ attrs->Set("schedule_start", cr->GetScheduleStart());
+ attrs->Set("schedule_end", cr->GetScheduleStart());
+ attrs->Set("exit_status", cr->GetExitStatus());
+ attrs->Set("output", cr->GetOutput());
+ attrs->Set("performance_data", JsonEncode(cr->GetPerformanceData()));
+ }
+
+ String jsonAttrs = JsonEncode(attrs);
+ String key = "icinga:" + tableName + "." + objectCheckSum;
+ ExecuteQuery({ "SET", key, jsonAttrs });
+
+ /* expire in check_interval * attempts + timeout + some more seconds */
+ double expireTime = checkable->GetCheckInterval() * checkable->GetMaxCheckAttempts() + 60;
+ ExecuteQuery({ "EXPIRE", key, String(expireTime) });
+ }
+}
+
+void RedisWriter::StateChangedHandler(const ConfigObject::Ptr& object)
+{
+ Type::Ptr type = object->GetReflectionType();
+
+ for (const RedisWriter::Ptr& rw : ConfigType::GetObjectsByType<RedisWriter>()) {
+ rw->m_WorkQueue.Enqueue(boost::bind(&RedisWriter::SendStatusUpdate, rw, object, type->GetName()));
+ }
+}
+
+void RedisWriter::VarsChangedHandler(const ConfigObject::Ptr& object)
+{
+ Type::Ptr type = object->GetReflectionType();
+
+ for (const RedisWriter::Ptr& rw : ConfigType::GetObjectsByType<RedisWriter>()) {
+ rw->m_WorkQueue.Enqueue(boost::bind(&RedisWriter::SendConfigUpdate, rw, object, type->GetName(), true));
+ }
+}
+
+void RedisWriter::VersionChangedHandler(const ConfigObject::Ptr& object)
+{
+ Type::Ptr type = object->GetReflectionType();
+
+ if (object->IsActive()) {
+ /* Create or update the object config */
+ for (const RedisWriter::Ptr& rw : ConfigType::GetObjectsByType<RedisWriter>()) {
+ rw->m_WorkQueue.Enqueue(boost::bind(&RedisWriter::SendConfigUpdate, rw.get(), object, type->GetName(), true));
+ }
+ } else if (!object->IsActive() && object->GetExtension("ConfigObjectDeleted")) { /* same as in apilistener-configsync.cpp */
+ /* Delete object config */
+ for (const RedisWriter::Ptr& rw : ConfigType::GetObjectsByType<RedisWriter>()) {
+ rw->m_WorkQueue.Enqueue(boost::bind(&RedisWriter::SendConfigDelete, rw.get(), object, type->GetName()));
+ }
+ }
+}
--- /dev/null
+/******************************************************************************
+ * Icinga 2 *
+ * Copyright (C) 2012-2017 Icinga Development Team (https://www.icinga.com/) *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, write to the Free Software Foundation *
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. *
+ ******************************************************************************/
+
+#include "redis/rediswriter.hpp"
+#include "icinga/customvarobject.hpp"
+#include "base/json.hpp"
+#include "base/logger.hpp"
+#include "base/serializer.hpp"
+#include "base/tlsutility.hpp"
+#include "base/initialize.hpp"
+
+using namespace icinga;
+
+String RedisWriter::FormatCheckSumBinary(const String& str)
+{
+ char output[20*2+1];
+ for (int i = 0; i < 20; i++)
+ sprintf(output + 2 * i, "%02x", str[i]);
+
+ return output;
+}
+
+String RedisWriter::CalculateCheckSumString(const String& str, bool binary)
+{
+ return SHA1(str, binary);
+}
+
+String RedisWriter::CalculateCheckSumGroups(const Array::Ptr& groups, bool binary)
+{
+ String output;
+
+ ObjectLock olock(groups);
+
+ for (const String& group : groups) {
+ output += SHA1(group, true); //binary checksum required here
+ }
+
+ return SHA1(output, binary);
+}
+
+String RedisWriter::CalculateCheckSumProperties(const ConfigObject::Ptr& object, bool binary)
+{
+ //TODO: consider precision of 6 for double values; use specific config fields for hashing?
+ return HashValue(object, binary);
+}
+
+String RedisWriter::CalculateCheckSumVars(const ConfigObject::Ptr& object, bool binary)
+{
+ CustomVarObject::Ptr customVarObject = dynamic_pointer_cast<CustomVarObject>(object);
+
+ if (!customVarObject)
+ return HashValue(Empty, binary);
+
+ Dictionary::Ptr vars = customVarObject->GetVars();
+
+ if (!vars)
+ return HashValue(Empty, binary);
+
+ return HashValue(vars, binary);
+}
+
+String RedisWriter::HashValue(const Value& value, bool binary)
+{
+ Value temp;
+
+ Type::Ptr type = value.GetReflectionType();
+
+ if (ConfigObject::TypeInstance->IsAssignableFrom(type))
+ temp = Serialize(value, FAConfig);
+ else
+ temp = value;
+
+ return SHA1(JsonEncode(temp), binary);
+}
+
+Dictionary::Ptr RedisWriter::SerializeObjectAttrs(const Object::Ptr& object, int fieldType)
+{
+ Type::Ptr type = object->GetReflectionType();
+
+ Dictionary::Ptr resultAttrs = new Dictionary();
+
+ for (int fid = 0; fid < type->GetFieldCount(); fid++) {
+ Field field = type->GetFieldInfo(fid);
+
+ if ((field.Attributes & fieldType) == 0)
+ continue;
+
+ Value val = object->GetField(fid);
+
+ /* hide attributes which shouldn't be user-visible */
+ if (field.Attributes & FANoUserView)
+ continue;
+
+ /* hide internal navigation fields */
+ if (field.Attributes & FANavigation && !(field.Attributes & (FAConfig | FAState)))
+ continue;
+
+ Value sval = Serialize(val);
+ resultAttrs->Set(field.Name, sval);
+ }
+
+ return resultAttrs;
+}
+
#include "redis/rediswriter.tcpp"
#include "remote/eventqueue.hpp"
#include "base/json.hpp"
+#include "base/statsfunction.hpp"
using namespace icinga;
REGISTER_TYPE(RedisWriter);
+RedisWriter::RedisWriter(void)
+ : m_Context(NULL)
+{ }
+
/**
* Starts the component.
*/
Log(LogInformation, "RedisWriter")
<< "'" << GetName() << "' started.";
- boost::thread thread(boost::bind(&RedisWriter::ConnectionThreadProc, this));
+ m_ConfigDumpInProgress = false;
+
+ m_WorkQueue.SetExceptionCallback(boost::bind(&RedisWriter::ExceptionHandler, this, _1));
+
+ m_ReconnectTimer = new Timer();
+ m_ReconnectTimer->SetInterval(15);
+ m_ReconnectTimer->OnTimerExpired.connect(boost::bind(&RedisWriter::ReconnectTimerHandler, this));
+ m_ReconnectTimer->Start();
+ m_ReconnectTimer->Reschedule(0);
+
+ m_SubscriptionTimer = new Timer();
+ m_SubscriptionTimer->SetInterval(15);
+ m_SubscriptionTimer->OnTimerExpired.connect(boost::bind(&RedisWriter::UpdateSubscriptionsTimerHandler, this));
+ m_SubscriptionTimer->Start();
+
+ m_StatsTimer = new Timer();
+ m_StatsTimer->SetInterval(10);
+ m_StatsTimer->OnTimerExpired.connect(boost::bind(&RedisWriter::PublishStatsTimerHandler, this));
+ m_StatsTimer->Start();
+
+ boost::thread thread(boost::bind(&RedisWriter::HandleEvents, this));
thread.detach();
}
-void RedisWriter::ConnectionThreadProc(void)
+void RedisWriter::ExceptionHandler(boost::exception_ptr exp)
+{
+ Log(LogCritical, "RedisWriter", "Exception during redis query. Verify that Redis is operational.");
+
+ Log(LogDebug, "RedisWriter")
+ << "Exception during redis operation: " << DiagnosticInformation(exp);
+
+ if (m_Context) {
+ redisFree(m_Context);
+ m_Context = NULL;
+ }
+}
+
+void RedisWriter::ReconnectTimerHandler(void)
{
+ m_WorkQueue.Enqueue(boost::bind(&RedisWriter::TryToReconnect, this));
+}
+
+void RedisWriter::TryToReconnect(void)
+{
+ AssertOnWorkQueue();
+
+ if (m_Context)
+ return;
+
String path = GetPath();
String host = GetHost();
Log(LogWarning, "RedisWriter", "Connection error: ")
<< m_Context->errstr;
}
+
+ if (m_Context) {
+ redisFree(m_Context);
+ m_Context = NULL;
+ }
+
+ return;
}
- for (;;) {
- String password = GetPassword();
+ String password = GetPassword();
+
+ /* TODO: exception is fired but terminates reconnect silently.
+ * Error case: Password does not match, or even: "Client sent AUTH, but no password is set" which also results in an error.
+ */
+ if (!password.IsEmpty())
+ ExecuteQuery({ "AUTH", password });
+
+ int dbIndex = GetDbIndex();
- if (!password.IsEmpty()) {
- redisReply *reply = reinterpret_cast<redisReply *>(redisCommand(m_Context, "AUTH %s", password.CStr()));
+ if (dbIndex != 0)
+ ExecuteQuery({ "SELECT", Convert::ToString(dbIndex) });
- if (reply->type == REDIS_REPLY_STATUS || reply->type == REDIS_REPLY_ERROR) {
- Log(LogInformation, "RedisWriter")
- << "AUTH: " << reply->str;
- }
+ /* Config dump */
+ m_ConfigDumpInProgress = true;
- freeReplyObject(reply);
+ UpdateAllConfigObjects();
+
+ m_ConfigDumpInProgress = false;
+}
+
+void RedisWriter::UpdateSubscriptionsTimerHandler(void)
+{
+ m_WorkQueue.Enqueue(boost::bind(&RedisWriter::UpdateSubscriptions, this));
+}
+
+void RedisWriter::UpdateSubscriptions(void)
+{
+ AssertOnWorkQueue();
+
+ if (!m_Context)
+ return;
+
+ Log(LogInformation, "RedisWriter", "Updating Redis subscriptions");
+
+ std::map<String, String> subscriptions;
+ long long cursor = 0;
+
+ do {
+ boost::shared_ptr<redisReply> reply = ExecuteQuery({ "SCAN", Convert::ToString(cursor), "MATCH", "icinga:subscription:*", "COUNT", "1000" });
+
+ VERIFY(reply->type == REDIS_REPLY_ARRAY);
+ VERIFY(reply->elements % 2 == 0);
+
+ redisReply *cursorReply = reply->element[0];
+ cursor = Convert::ToLong(cursorReply->str);
+
+ redisReply *keysReply = reply->element[1];
+
+ for (size_t i = 0; i < keysReply->elements; i++) {
+ redisReply *keyReply = keysReply->element[i];
+ VERIFY(keyReply->type == REDIS_REPLY_STRING);
+
+ boost::shared_ptr<redisReply> vreply = ExecuteQuery({ "GET", keyReply->str });
+
+ subscriptions[keyReply->str] = vreply->str;
}
+ } while (cursor != 0);
+
+ m_Subscriptions.clear();
+
+ for (const std::pair<String, String>& kv : subscriptions) {
+ const String& key = kv.first.SubStr(20); /* removes the "icinga:subscription: prefix */
+ const String& value = kv.second;
+
+ try {
+ Dictionary::Ptr subscriptionInfo = JsonDecode(value);
+
+ Log(LogInformation, "RedisWriter")
+ << "Subscriber Info - Key: " << key << " Value: " << Value(subscriptionInfo);
- HandleEvents();
+ RedisSubscriptionInfo rsi;
- for (;;) {
- Log(LogInformation, "RedisWriter", "Trying to reconnect to redis server");
+ Array::Ptr types = subscriptionInfo->Get("types");
- if (redisReconnect(m_Context) == REDIS_OK) {
- Log(LogInformation, "RedisWriter", "Connection to redis server was reestablished");
- break;
- }
+ if (types)
+ rsi.EventTypes = types->ToSet<String>();
- Log(LogInformation, "RedisWriter", "Unable to reconnect to redis server: Waiting for next attempt");
+ m_Subscriptions[key] = rsi;
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "RedisWriter")
+ << "Invalid Redis subscriber info for subscriber '" << key << "': " << DiagnosticInformation(ex);
- Utility::Sleep(15);
+ continue;
}
+ //TODO
}
+
+ Log(LogInformation, "RedisWriter")
+ << "Current Redis event subscriptions: " << m_Subscriptions.size();
+}
+
+void RedisWriter::PublishStatsTimerHandler(void)
+{
+ m_WorkQueue.Enqueue(boost::bind(&RedisWriter::PublishStats, this));
+}
+
+void RedisWriter::PublishStats(void)
+{
+ AssertOnWorkQueue();
+
+ if (!m_Context)
+ return;
+
+ //TODO: Figure out if more stats can be useful here.
+ StatsFunction::Ptr func = StatsFunctionRegistry::GetInstance()->GetItem("CIB");
+ Dictionary::Ptr status = new Dictionary();
+ Array::Ptr perfdata = new Array();
+ func->Invoke(status, perfdata);
+ String jsonStats = JsonEncode(status);
+
+ ExecuteQuery({ "PUBLISH", "icinga:stats", jsonStats });
}
void RedisWriter::HandleEvents(void)
queue->AddClient(this);
for (;;) {
- Dictionary::Ptr result = queue->WaitForEvent(this);
+ Dictionary::Ptr event = queue->WaitForEvent(this);
- if (!result)
+ if (!event)
continue;
- String body = JsonEncode(result);
+ m_WorkQueue.Enqueue(boost::bind(&RedisWriter::HandleEvent, this, event));
+ }
- redisReply *reply = reinterpret_cast<redisReply *>(redisCommand(m_Context, "LPUSH icinga:events %s", body.CStr()));
+ queue->RemoveClient(this);
+ EventQueue::UnregisterIfUnused(queueName, queue);
+}
- if (!reply)
- break;
+void RedisWriter::HandleEvent(const Dictionary::Ptr& event)
+{
+ AssertOnWorkQueue();
- if (reply->type == REDIS_REPLY_STATUS || reply->type == REDIS_REPLY_ERROR) {
- Log(LogInformation, "RedisWriter")
- << "LPUSH icinga:events: " << reply->str;
- }
+ if (!m_Context)
+ return;
- if (reply->type == REDIS_REPLY_ERROR) {
- freeReplyObject(reply);
- break;
- }
+ for (const std::pair<String, RedisSubscriptionInfo>& kv : m_Subscriptions) {
+ const auto& name = kv.first;
+ const auto& rsi = kv.second;
- freeReplyObject(reply);
- }
+ if (rsi.EventTypes.find(event->Get("type")) == rsi.EventTypes.end())
+ continue;
- queue->RemoveClient(this);
- EventQueue::UnregisterIfUnused(queueName, queue);
+ String body = JsonEncode(event);
+
+ ExecuteQuery({ "LPUSH", "icinga:event:" + name, body });
+ }
}
void RedisWriter::Stop(bool runtimeRemoved)
ObjectImpl<RedisWriter>::Stop(runtimeRemoved);
}
+
+void RedisWriter::AssertOnWorkQueue(void)
+{
+ ASSERT(m_WorkQueue.IsWorkerThread());
+}
+
+boost::shared_ptr<redisReply> RedisWriter::ExecuteQuery(const std::vector<String>& query)
+{
+ const char **argv;
+ size_t *argvlen;
+
+ argv = new const char *[query.size()];
+ argvlen = new size_t[query.size()];
+
+ for (std::vector<String>::size_type i = 0; i < query.size(); i++) {
+ argv[i] = query[i].CStr();
+ argvlen[i] = query[i].GetLength();
+ }
+
+ redisReply *reply = reinterpret_cast<redisReply *>(redisCommandArgv(m_Context, query.size(), argv, argvlen));
+
+ delete [] argv;
+ delete [] argvlen;
+
+ if (reply->type == REDIS_REPLY_ERROR) {
+ Log(LogCritical, "RedisWriter")
+ << "Redis query failed: " << reply->str;
+
+ String msg = reply->str;
+
+ freeReplyObject(reply);
+
+ BOOST_THROW_EXCEPTION(
+ redis_error()
+ << errinfo_message(msg)
+ << errinfo_redis_query(Utility::Join(Array::FromVector(query), ' ', false))
+ );
+ }
+
+ return boost::shared_ptr<redisReply>(reply);
+}
#include "redis/rediswriter.thpp"
#include "remote/messageorigin.hpp"
#include "base/timer.hpp"
+#include "base/workqueue.hpp"
#include <hiredis/hiredis.h>
namespace icinga
{
+struct RedisSubscriptionInfo
+{
+ std::set<String> EventTypes;
+};
+
/**
* @ingroup redis
*/
DECLARE_OBJECT(RedisWriter);
DECLARE_OBJECTNAME(RedisWriter);
+ RedisWriter(void);
+
+ static void ConfigStaticInitialize(void);
+
virtual void Start(bool runtimeCreated) override;
virtual void Stop(bool runtimeRemoved) override;
private:
- void ConnectionThreadProc(void);
+ void ReconnectTimerHandler(void);
+ void TryToReconnect(void);
void HandleEvents(void);
+ void HandleEvent(const Dictionary::Ptr& event);
+
+ void UpdateSubscriptionsTimerHandler(void);
+ void UpdateSubscriptions(void);
+ void PublishStatsTimerHandler(void);
+ void PublishStats(void);
+
+ /* config & status dump */
+ void UpdateAllConfigObjects(void);
+ void SendConfigUpdate(const ConfigObject::Ptr& object, const String& typeName, bool runtimeUpdate = false);
+ void SendConfigDelete(const ConfigObject::Ptr& object, const String& typeName);
+ void SendStatusUpdate(const ConfigObject::Ptr& object, const String& typeName);
+
+ /* utilities */
+ static String FormatCheckSumBinary(const String& str);
+ static String CalculateCheckSumString(const String& str, bool binary = false);
+ static String CalculateCheckSumGroups(const Array::Ptr& groups, bool binary = false);
+ static String CalculateCheckSumProperties(const ConfigObject::Ptr& object, bool binary = false);
+ static String CalculateCheckSumVars(const ConfigObject::Ptr& object, bool binary = false);
+
+ static String HashValue(const Value& value, bool binary = false);
+ static Dictionary::Ptr SerializeObjectAttrs(const Object::Ptr& object, int fieldType);
+
+ static void StateChangedHandler(const ConfigObject::Ptr& object);
+ static void VarsChangedHandler(const ConfigObject::Ptr& object);
+ static void VersionChangedHandler(const ConfigObject::Ptr& object);
+
+ void AssertOnWorkQueue(void);
+
+ void ExceptionHandler(boost::exception_ptr exp);
+
+ boost::shared_ptr<redisReply> ExecuteQuery(const std::vector<String>& query);
+
+ Timer::Ptr m_StatsTimer;
+ Timer::Ptr m_ReconnectTimer;
+ Timer::Ptr m_SubscriptionTimer;
+ WorkQueue m_WorkQueue;
redisContext *m_Context;
+ std::map<String, RedisSubscriptionInfo> m_Subscriptions;
+ bool m_ConfigDumpInProgress;
};
+struct redis_error : virtual std::exception, virtual boost::exception { };
+
+struct errinfo_redis_query_;
+typedef boost::error_info<struct errinfo_redis_query_, std::string> errinfo_redis_query;
+
}
#endif /* REDISWRITER_H */
};
[config] String path;
[config] String password;
+ [config] int db_index;
};
}
CONTEXT("Creating config update for file '" + file + "'");
Log(LogNotice, "ApiListener")
- << "Creating config update for file '" << file << "'";
+ << "Creating config update for file '" << file << "'.";
std::ifstream fp(file.CStr(), std::ifstream::binary);
if (!fp)
double oldTimestamp;
- if (!oldConfig->Contains(".timestamp"))
+ if (!oldConfig->Contains("/.timestamp"))
oldTimestamp = 0;
else
- oldTimestamp = oldConfig->Get(".timestamp");
+ oldTimestamp = oldConfig->Get("/.timestamp");
double newTimestamp;
- if (!newConfig->Contains(".timestamp"))
+ if (!newConfig->Contains("/.timestamp"))
newTimestamp = Utility::GetTime();
else
- newTimestamp = newConfig->Get(".timestamp");
+ newTimestamp = newConfig->Get("/.timestamp");
- /* skip update if our config is newer */
- if (oldTimestamp >= newTimestamp)
+ /* skip update if our configuration files are more recent */
+ if (oldTimestamp >= newTimestamp) {
+ Log(LogInformation, "ApiListener")
+ << "Cannot apply configuration file update for path '" << configDir << "'. Current timestamp '"
+ << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", oldTimestamp) << "' ("
+ << std::fixed << std::setprecision(6) << oldTimestamp
+ << ") is more recent than received timestamp '"
+ << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", newTimestamp) << "' ("
+ << newTimestamp << ").";
return false;
+ }
+
+ Log(LogInformation, "ApiListener")
+ << "Applying configuration file update for path '" << configDir << "'. Received timestamp '"
+ << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", newTimestamp) << "' ("
+ << std::fixed << std::setprecision(6) << newTimestamp
+ << ") is more recent than current timestamp '"
+ << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", oldTimestamp) << "' ("
+ << oldTimestamp << ").";
{
ObjectLock olock(newConfig);
return Empty;
}
+ Log(LogInformation, "ApiListener")
+ << "Applying config update from endpoint '" << origin->FromClient->GetEndpoint()->GetName() << "' of zone '"
+ << origin->FromZone->GetName() << "'";
+
Dictionary::Ptr updateV1 = params->Get("update");
Dictionary::Ptr updateV2 = params->Get("update_v2");
{
std::vector<Object::Ptr> parents = DependencyGraph::GetParents(object);
+ Type::Ptr type = object->GetReflectionType();
+
if (!parents.empty() && !cascade) {
if (errors)
- errors->Add("Object cannot be deleted because other objects depend on it. "
+ errors->Add("Object '" + object->GetName() + "' of type '" + type->GetName() +
+ "' cannot be deleted because other objects depend on it. "
"Use cascading delete to delete it anyway.");
return false;
DeleteObjectHelper(parentObj, cascade, errors);
}
- Type::Ptr type = object->GetReflectionType();
-
ConfigItem::Ptr item = ConfigItem::GetByTypeAndName(type->GetName(), object->GetName());
try {
#include "remote/createobjecthandler.hpp"
#include "remote/configobjectutility.hpp"
#include "remote/httputility.hpp"
+#include "remote/jsonrpcconnection.hpp"
#include "remote/filterutility.hpp"
#include "remote/apiaction.hpp"
#include "remote/zone.hpp"
result1->Set("user", user->GetName());
result1->Set("permissions", Array::FromVector(permInfo));
result1->Set("version", Application::GetAppVersion());
- result1->Set("info", "More information about API requests is available in the documentation at http://docs.icinga.com/icinga2/latest.");
+ result1->Set("info", "More information about API requests is available in the documentation at https://docs.icinga.com/icinga2/latest.");
Array::Ptr results = new Array();
results->Add(result1);
} else
body += "Your user does not have any permissions.</p>";
- body += "<p>More information about API requests is available in the <a href=\"http://docs.icinga.com/icinga2/latest\" target=\"_blank\">documentation</a>.</p></html>";
+ body += "<p>More information about API requests is available in the <a href=\"https://docs.icinga.com/icinga2/latest\" target=\"_blank\">documentation</a>.</p></html>";
response.WriteBody(body.CStr(), body.GetLength());
}
return true;
}
- Dictionary::Ptr attrs = params->Get("attrs");
+ Value attrsVal = params->Get("attrs");
+
+ if (attrsVal.GetReflectionType() != Dictionary::TypeInstance) {
+ HttpUtility::SendJsonError(response, 400,
+ "Invalid type for 'attrs' attribute specified. Dictionary type is required.", Empty);
+ return true;
+ }
+
+ Dictionary::Ptr attrs = attrsVal;
Array::Ptr results = new Array();
Dictionary::Ptr resultAttrs = new Dictionary();
- for (int& fid : fids)
+ for (int fid : fids)
{
Field field = type->GetFieldInfo(fid);
if (response.dropped == printInfo.num) {
std::wcout << L"PING CRITICAL ALL CONNECTIONS DROPPED | " << perf.str() << '\n';
- return 3;
+ return 2;
}
switch (state) {
*/
criteria = SysAllocString(CRITERIA);
- // http://msdn.microsoft.com/en-us/library/windows/desktop/aa386526%28v=vs.85%29.aspx
- // http://msdn.microsoft.com/en-us/library/ff357803%28v=vs.85%29.aspx
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/aa386526%28v=vs.85%29.aspx
+ // https://msdn.microsoft.com/en-us/library/ff357803%28v=vs.85%29.aspx
if (debug)
std::wcout << L"Querrying updates from server" << '\n';
add_subdirectory(execvpe)
endif()
-add_subdirectory(socketpair)
-add_subdirectory(hiredis)
+if(ICINGA2_WITH_REDIS)
+ add_subdirectory(hiredis)
+endif()
+
+add_subdirectory(socketpair)
\ No newline at end of file
- gcc
- clang
-os:
- - linux
- - osx
-
-before_script:
- - if [ "$TRAVIS_OS_NAME" == "osx" ] ; then brew update; brew install redis; fi
-
addons:
apt:
packages:
- TARGET="32bit" TARGET_VARS="32bit-vars" CFLAGS="-Werror"
- TARGET="32bit" TARGET_VARS="32bit-vars" PRE="valgrind --track-origins=yes --leak-check=full"
-matrix:
- exclude:
- - os: osx
- env: PRE="valgrind --track-origins=yes --leak-check=full"
-
- - os: osx
- env: TARGET="32bit" TARGET_VARS="32bit-vars" PRE="valgrind --track-origins=yes --leak-check=full"
-
script: make $TARGET CFLAGS="$CFLAGS" && make check PRE="$PRE" && make $TARGET_VARS hiredis-example
-### 1.0.0 (unreleased)
-
-**Fixes**:
-
-* Catch a buffer overflow when formatting the error message
-* Import latest upstream sds. This breaks applications that are linked against the old hiredis v0.13
-* Fix warnings, when compiled with -Wshadow
-* Make hiredis compile in Cygwin on Windows, now CI-tested
-
-**BREAKING CHANGES**:
-
-* Change `redisReply.len` to `size_t`, as it denotes the the size of a string
-
-User code should compare this to `size_t` values as well.
-If it was used to compare to other values, casting might be necessary or can be removed, if casting was applied before.
-
-* Remove backwards compatibility macro's
-
-This removes the following old function aliases, use the new name now:
-
-| Old | New |
-| --------------------------- | ---------------------- |
-| redisReplyReaderCreate | redisReaderCreate |
-| redisReplyReaderCreate | redisReaderCreate |
-| redisReplyReaderFree | redisReaderFree |
-| redisReplyReaderFeed | redisReaderFeed |
-| redisReplyReaderGetReply | redisReaderGetReply |
-| redisReplyReaderSetPrivdata | redisReaderSetPrivdata |
-| redisReplyReaderGetObject | redisReaderGetObject |
-| redisReplyReaderGetError | redisReaderGetError |
-
-* The `DEBUG` variable in the Makefile was renamed to `DEBUG_FLAGS`
-
-Previously it broke some builds for people that had `DEBUG` set to some arbitrary value,
-due to debugging other software.
-By renaming we avoid unintentional name clashes.
-
-Simply rename `DEBUG` to `DEBUG_FLAGS` in your environment to make it working again.
-
### 0.13.3 (2015-09-16)
* Revert "Clear `REDIS_CONNECTED` flag when connection is closed".
CXX:=$(shell sh -c 'type $(CXX) >/dev/null 2>/dev/null && echo $(CXX) || echo g++')
OPTIMIZATION?=-O3
WARNINGS=-Wall -W -Wstrict-prototypes -Wwrite-strings
-DEBUG_FLAGS?= -g -ggdb
-REAL_CFLAGS=$(OPTIMIZATION) -fPIC $(CFLAGS) $(WARNINGS) $(DEBUG_FLAGS) $(ARCH)
+DEBUG?= -g -ggdb
+REAL_CFLAGS=$(OPTIMIZATION) -fPIC $(CFLAGS) $(WARNINGS) $(DEBUG) $(ARCH)
REAL_LDFLAGS=$(LDFLAGS) $(ARCH)
DYLIBSUFFIX=so
[![Build Status](https://travis-ci.org/redis/hiredis.png)](https://travis-ci.org/redis/hiredis)
-**This Readme reflects the latest changed in the master branch. See [v0.13.3](https://github.com/redis/hiredis/tree/v0.13.3) for the Readme and documentation for the latest release.**
-
# HIREDIS
Hiredis is a minimalistic C client library for the [Redis](http://redis.io/) database.
The library comes with multiple APIs. There is the
*synchronous API*, the *asynchronous API* and the *reply parsing API*.
-## Upgrading to `1.0.0`
-
-Version 1.0.0 marks a stable release of hiredis.
-It includes some minor breaking changes, mostly to make the exposed API more uniform and self-explanatory.
-It also bundles the updated `sds` library, to sync up with upstream and Redis.
-For most applications a recompile against the new hiredis should be enough.
-For code changes see the [Changelog](CHANGELOG.md).
-
-## Upgrading from `<0.9.0`
+## UPGRADING
Version 0.9.0 is a major overhaul of hiredis in every aspect. However, upgrading existing
code using hiredis should not be a big pain. The key thing to keep in mind when
check the `err` field to see if establishing the connection was successful:
```c
redisContext *c = redisConnect("127.0.0.1", 6379);
-if (c == NULL || c->err) {
- if (c) {
- printf("Error: %s\n", c->errstr);
- // handle error
- } else {
- printf("Can't allocate redis context\n");
- }
+if (c != NULL && c->err) {
+ printf("Error: %s\n", c->errstr);
+ // handle error
}
```
-*Note: A `redisContext` is not thread-safe.*
-
### Sending commands
There are several ways to issue commands to Redis. The first that will be introduced is
should be checked after creation to see if there were errors creating the connection.
Because the connection that will be created is non-blocking, the kernel is not able to
instantly return if the specified host and port is able to accept a connection.
-
-*Note: A `redisAsyncContext` is not thread-safe.*
-
```c
redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379);
if (c->err) {
#ifndef __HIREDIS_LIBEVENT_H__
#define __HIREDIS_LIBEVENT_H__
-#include <event2/event.h>
+#include <event.h>
#include "../hiredis.h"
#include "../async.h"
typedef struct redisLibeventEvents {
redisAsyncContext *context;
- struct event *rev, *wev;
+ struct event rev, wev;
} redisLibeventEvents;
static void redisLibeventReadEvent(int fd, short event, void *arg) {
static void redisLibeventAddRead(void *privdata) {
redisLibeventEvents *e = (redisLibeventEvents*)privdata;
- event_add(e->rev,NULL);
+ event_add(&e->rev,NULL);
}
static void redisLibeventDelRead(void *privdata) {
redisLibeventEvents *e = (redisLibeventEvents*)privdata;
- event_del(e->rev);
+ event_del(&e->rev);
}
static void redisLibeventAddWrite(void *privdata) {
redisLibeventEvents *e = (redisLibeventEvents*)privdata;
- event_add(e->wev,NULL);
+ event_add(&e->wev,NULL);
}
static void redisLibeventDelWrite(void *privdata) {
redisLibeventEvents *e = (redisLibeventEvents*)privdata;
- event_del(e->wev);
+ event_del(&e->wev);
}
static void redisLibeventCleanup(void *privdata) {
redisLibeventEvents *e = (redisLibeventEvents*)privdata;
- event_del(e->rev);
- event_del(e->wev);
+ event_del(&e->rev);
+ event_del(&e->wev);
free(e);
}
ac->ev.data = e;
/* Initialize and install read/write events */
- e->rev = event_new(base, c->fd, EV_READ, redisLibeventReadEvent, e);
- e->wev = event_new(base, c->fd, EV_WRITE, redisLibeventWriteEvent, e);
- event_add(e->rev, NULL);
- event_add(e->wev, NULL);
+ event_set(&e->rev,c->fd,EV_READ,redisLibeventReadEvent,e);
+ event_set(&e->wev,c->fd,EV_WRITE,redisLibeventWriteEvent,e);
+ event_base_set(base,&e->rev);
+ event_base_set(base,&e->wev);
return REDIS_OK;
}
#endif
return;
}
- if (p->context != NULL && (events & UV_READABLE)) {
+ if (events & UV_READABLE) {
redisAsyncHandleRead(p->context);
}
- if (p->context != NULL && (events & UV_WRITABLE)) {
+ if (events & UV_WRITABLE) {
redisAsyncHandleWrite(p->context);
}
}
static void redisLibuvCleanup(void *privdata) {
redisLibuvEvents* p = (redisLibuvEvents*)privdata;
- p->context = NULL; // indicate that context might no longer exist
uv_close((uv_handle_t*)&p->handle, on_close);
}
+++ /dev/null
-# Appveyor configuration file for CI build of hiredis on Windows (under Cygwin)
-environment:
- matrix:
- - CYG_ROOT: C:\cygwin64
- CYG_SETUP: setup-x86_64.exe
- CYG_MIRROR: http://cygwin.mirror.constant.com
- CYG_CACHE: C:\cygwin64\var\cache\setup
- CYG_BASH: C:\cygwin64\bin\bash
- CC: gcc
- - CYG_ROOT: C:\cygwin
- CYG_SETUP: setup-x86.exe
- CYG_MIRROR: http://cygwin.mirror.constant.com
- CYG_CACHE: C:\cygwin\var\cache\setup
- CYG_BASH: C:\cygwin\bin\bash
- CC: gcc
- TARGET: 32bit
- TARGET_VARS: 32bit-vars
-
-# Cache Cygwin files to speed up build
-cache:
- - '%CYG_CACHE%'
-clone_depth: 1
-
-# Attempt to ensure we don't try to convert line endings to Win32 CRLF as this will cause build to fail
-init:
- - git config --global core.autocrlf input
-
-# Install needed build dependencies
-install:
- - ps: 'Start-FileDownload "http://cygwin.com/$env:CYG_SETUP" -FileName "$env:CYG_SETUP"'
- - '%CYG_SETUP% --quiet-mode --no-shortcuts --only-site --root "%CYG_ROOT%" --site "%CYG_MIRROR%" --local-package-dir "%CYG_CACHE%" --packages automake,bison,gcc-core,libtool,make,gettext-devel,gettext,intltool,pkg-config,clang,llvm > NUL 2>&1'
- - '%CYG_BASH% -lc "cygcheck -dc cygwin"'
-
-build_script:
- - 'echo building...'
- - '%CYG_BASH% -lc "cd $APPVEYOR_BUILD_FOLDER; exec 0</dev/null; make LDFLAGS=$LDFLAGS CC=$CC $TARGET CFLAGS=$CFLAGS && make LDFLAGS=$LDFLAGS CC=$CC $TARGET_VARS hiredis-example"'
}
/* Internal helper function to detect socket status the first time a read or
- * write event fires. When connecting was not successful, the connect callback
+ * write event fires. When connecting was not succesful, the connect callback
* is called with a REDIS_ERR status and the context is free'd. */
static int __redisAsyncHandleConnect(redisAsyncContext *ac) {
redisContext *c = &(ac->c);
dictEntry *entry, auxentry;
/* Try to add the element. If the key
- * does not exists dictAdd will succeed. */
+ * does not exists dictAdd will suceed. */
if (dictAdd(ht, key, val) == DICT_OK)
return 1;
/* It already exists, get the entry */
/* Expand the hash table if needed */
static int _dictExpandIfNeeded(dict *ht) {
- /* If the hash table is empty expand it to the initial size,
+ /* If the hash table is empty expand it to the intial size,
* if the table is "full" dobule its size. */
if (ht->size == 0)
return dictExpand(ht, DICT_HT_INITIAL_SIZE);
for (j = 0; j < 10; j++) {
char buf[64];
- snprintf(buf,64,"%u",j);
+ snprintf(buf,64,"%d",j);
reply = redisCommand(c,"LPUSH mylist element-%s", buf);
freeReplyObject(reply);
}
#define _DEFAULT_SOURCE
#endif
-#if defined(__CYGWIN__)
-#include <sys/cdefs.h>
-#endif
-
#if defined(__sun__)
#define _POSIX_C_SOURCE 200112L
-#else
-#if !(defined(__APPLE__) && defined(__MACH__))
+#elif defined(__linux__) || defined(__OpenBSD__) || defined(__NetBSD__)
#define _XOPEN_SOURCE 600
-#endif
+#else
+#define _XOPEN_SOURCE
#endif
-#if defined(__APPLE__) && defined(__MACH__)
+#if __APPLE__ && __MACH__
#define _OSX
#endif
cmd = sdscatfmt(cmd, "*%i\r\n", argc);
for (j=0; j < argc; j++) {
len = argvlen ? argvlen[j] : strlen(argv[j]);
- cmd = sdscatfmt(cmd, "$%u\r\n", len);
+ cmd = sdscatfmt(cmd, "$%T\r\n", len);
cmd = sdscatlen(cmd, argv[j], len);
cmd = sdscatlen(cmd, "\r\n", sizeof("\r\n")-1);
}
/* Write the output buffer to the socket.
*
* Returns REDIS_OK when the buffer is empty, or (a part of) the buffer was
- * successfully written to the socket. When the buffer is empty after the
+ * succesfully written to the socket. When the buffer is empty after the
* write operation, "done" is set to 1 (if given).
*
- * Returns REDIS_ERR if an error occurred trying to write and sets
+ * Returns REDIS_ERR if an error occured trying to write and sets
* c->errstr to hold the appropriate error string.
*/
int redisBufferWrite(redisContext *c, int *done) {
* context is non-blocking, the "reply" pointer will not be used and the
* command is simply appended to the write buffer.
*
- * Returns the reply when a reply was successfully retrieved. Returns NULL
+ * Returns the reply when a reply was succesfully retrieved. Returns NULL
* otherwise. When NULL is returned in a blocking context, the error field
* in the context will be set.
*/
* then GNU strerror_r returned an internal static buffer and we \
* need to copy the result into our private buffer. */ \
if (err_str != (buf)) { \
- strncpy((buf), err_str, ((len) - 1)); \
- buf[(len)-1] = '\0'; \
+ buf[(len)] = '\0'; \
+ strncat((buf), err_str, ((len) - 1)); \
} \
} while (0)
#endif
typedef struct redisReply {
int type; /* REDIS_REPLY_* */
long long integer; /* The integer when type is REDIS_REPLY_INTEGER */
- size_t len; /* Length of string */
+ int len; /* Length of string */
char *str; /* Used for both REDIS_REPLY_ERROR and REDIS_REPLY_STRING */
size_t elements; /* number of elements, for REDIS_REPLY_ARRAY */
struct redisReply **element; /* elements vector for REDIS_REPLY_ARRAY */
enum redisConnectionType {
REDIS_CONN_TCP,
- REDIS_CONN_UNIX
+ REDIS_CONN_UNIX,
};
/* Context for a connection to Redis */
* host, ip (or path), timeout and bind address are reused,
* flags are used unmodified from the existing context.
*
- * Returns REDIS_OK on successful connect or REDIS_ERR otherwise.
+ * Returns REDIS_OK on successfull connect or REDIS_ERR otherwise.
*/
int redisReconnect(redisContext *c);
}
static void __redisSetErrorFromErrno(redisContext *c, int type, const char *prefix) {
- int errorno = errno; /* snprintf() may change errno */
char buf[128] = { 0 };
size_t len = 0;
if (prefix != NULL)
len = snprintf(buf,sizeof(buf),"%s: ",prefix);
- __redis_strerror_r(errorno, (char *)(buf + len), sizeof(buf) - len);
+ __redis_strerror_r(errno, (char *)(buf + len), sizeof(buf) - len);
__redisSetError(c,type,buf);
}
#define __MAX_MSEC (((LONG_MAX) - 999) / 1000)
-static int redisContextTimeoutMsec(redisContext *c, long *result)
-{
- const struct timeval *timeout = c->timeout;
- long msec = -1;
+static int redisContextWaitReady(redisContext *c, const struct timeval *timeout) {
+ struct pollfd wfd[1];
+ long msec;
+
+ msec = -1;
+ wfd[0].fd = c->fd;
+ wfd[0].events = POLLOUT;
/* Only use timeout when not NULL. */
if (timeout != NULL) {
if (timeout->tv_usec > 1000000 || timeout->tv_sec > __MAX_MSEC) {
- *result = msec;
+ __redisSetErrorFromErrno(c, REDIS_ERR_IO, NULL);
+ redisContextCloseFd(c);
return REDIS_ERR;
}
}
}
- *result = msec;
- return REDIS_OK;
-}
-
-static int redisContextWaitReady(redisContext *c, long msec) {
- struct pollfd wfd[1];
-
- wfd[0].fd = c->fd;
- wfd[0].events = POLLOUT;
-
if (errno == EINPROGRESS) {
int res;
int blocking = (c->flags & REDIS_BLOCK);
int reuseaddr = (c->flags & REDIS_REUSEADDR);
int reuses = 0;
- long timeout_msec = -1;
- servinfo = NULL;
c->connection_type = REDIS_CONN_TCP;
c->tcp.port = port;
c->timeout = NULL;
}
- if (redisContextTimeoutMsec(c, &timeout_msec) != REDIS_OK) {
- __redisSetError(c, REDIS_ERR_IO, "Invalid timeout specified");
- goto error;
- }
-
if (source_addr == NULL) {
free(c->tcp.source_addr);
c->tcp.source_addr = NULL;
if (++reuses >= REDIS_CONNECT_RETRIES) {
goto error;
} else {
- redisContextCloseFd(c);
goto addrretry;
}
} else {
- if (redisContextWaitReady(c,timeout_msec) != REDIS_OK)
+ if (redisContextWaitReady(c,c->timeout) != REDIS_OK)
goto error;
}
}
int redisContextConnectUnix(redisContext *c, const char *path, const struct timeval *timeout) {
int blocking = (c->flags & REDIS_BLOCK);
struct sockaddr_un sa;
- long timeout_msec = -1;
if (redisCreateSocket(c,AF_LOCAL) < 0)
return REDIS_ERR;
c->timeout = NULL;
}
- if (redisContextTimeoutMsec(c,&timeout_msec) != REDIS_OK)
- return REDIS_ERR;
-
sa.sun_family = AF_LOCAL;
strncpy(sa.sun_path,path,sizeof(sa.sun_path)-1);
if (connect(c->fd, (struct sockaddr*)&sa, sizeof(sa)) == -1) {
if (errno == EINPROGRESS && !blocking) {
/* This is ok. */
} else {
- if (redisContextWaitReady(c,timeout_msec) != REDIS_OK)
+ if (redisContextWaitReady(c,c->timeout) != REDIS_OK)
return REDIS_ERR;
}
}
* might not have a trailing NULL character. */
while (pos < _len) {
while(pos < _len && s[pos] != '\r') pos++;
- if (pos==_len) {
+ if (s[pos] != '\r') {
/* Not found. */
return NULL;
} else {
#define REDIS_OK 0
/* When an error occurs, the err flag in a context is set to hold the type of
- * error that occurred. REDIS_ERR_IO means there was an I/O error and you
+ * error that occured. REDIS_ERR_IO means there was an I/O error and you
* should use the "errno" variable to find out what is wrong.
* For other values, the "errstr" field will hold a description. */
#define REDIS_ERR_IO 1 /* Error in read or write */
int redisReaderFeed(redisReader *r, const char *buf, size_t len);
int redisReaderGetReply(redisReader *r, void **reply);
-#define redisReaderSetPrivdata(_r, _p) (int)(((redisReader*)(_r))->privdata = (_p))
-#define redisReaderGetObject(_r) (((redisReader*)(_r))->reply)
-#define redisReaderGetError(_r) (((redisReader*)(_r))->errstr)
+/* Backwards compatibility, can be removed on big version bump. */
+#define redisReplyReaderCreate redisReaderCreate
+#define redisReplyReaderFree redisReaderFree
+#define redisReplyReaderFeed redisReaderFeed
+#define redisReplyReaderGetReply redisReaderGetReply
+#define redisReplyReaderSetPrivdata(_r, _p) (int)(((redisReader*)(_r))->privdata = (_p))
+#define redisReplyReaderGetObject(_r) (((redisReader*)(_r))->reply)
+#define redisReplyReaderGetError(_r) (((redisReader*)(_r))->errstr)
#ifdef __cplusplus
}
-/* SDSLib 2.0 -- A C dynamic strings library
+/* SDS (Simple Dynamic Strings), A C dynamic strings library.
*
- * Copyright (c) 2006-2015, Salvatore Sanfilippo <antirez at gmail dot com>
- * Copyright (c) 2015, Oran Agra
- * Copyright (c) 2015, Redis Labs, Inc
+ * Copyright (c) 2006-2014, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <string.h>
#include <ctype.h>
#include <assert.h>
-#include "sds.h"
-#include "sdsalloc.h"
-
-static inline int sdsHdrSize(char type) {
- switch(type&SDS_TYPE_MASK) {
- case SDS_TYPE_5:
- return sizeof(struct sdshdr5);
- case SDS_TYPE_8:
- return sizeof(struct sdshdr8);
- case SDS_TYPE_16:
- return sizeof(struct sdshdr16);
- case SDS_TYPE_32:
- return sizeof(struct sdshdr32);
- case SDS_TYPE_64:
- return sizeof(struct sdshdr64);
- }
- return 0;
-}
-static inline char sdsReqType(size_t string_size) {
- if (string_size < 32)
- return SDS_TYPE_5;
- if (string_size < 0xff)
- return SDS_TYPE_8;
- if (string_size < 0xffff)
- return SDS_TYPE_16;
- if (string_size < 0xffffffff)
- return SDS_TYPE_32;
- return SDS_TYPE_64;
-}
+#include "sds.h"
/* Create a new sds string with the content specified by the 'init' pointer
* and 'initlen'.
* The string is always null-termined (all the sds strings are, always) so
* even if you create an sds string with:
*
- * mystring = sdsnewlen("abc",3);
+ * mystring = sdsnewlen("abc",3");
*
* You can print the string with printf() as there is an implicit \0 at the
* end of the string. However the string is binary safe and can contain
* \0 characters in the middle, as the length is stored in the sds header. */
sds sdsnewlen(const void *init, size_t initlen) {
- void *sh;
- sds s;
- char type = sdsReqType(initlen);
- /* Empty strings are usually created in order to append. Use type 8
- * since type 5 is not good at this. */
- if (type == SDS_TYPE_5 && initlen == 0) type = SDS_TYPE_8;
- int hdrlen = sdsHdrSize(type);
- unsigned char *fp; /* flags pointer. */
-
- sh = s_malloc(hdrlen+initlen+1);
- if (sh == NULL) return NULL;
- if (!init)
- memset(sh, 0, hdrlen+initlen+1);
- s = (char*)sh+hdrlen;
- fp = ((unsigned char*)s)-1;
- switch(type) {
- case SDS_TYPE_5: {
- *fp = type | (initlen << SDS_TYPE_BITS);
- break;
- }
- case SDS_TYPE_8: {
- SDS_HDR_VAR(8,s);
- sh->len = initlen;
- sh->alloc = initlen;
- *fp = type;
- break;
- }
- case SDS_TYPE_16: {
- SDS_HDR_VAR(16,s);
- sh->len = initlen;
- sh->alloc = initlen;
- *fp = type;
- break;
- }
- case SDS_TYPE_32: {
- SDS_HDR_VAR(32,s);
- sh->len = initlen;
- sh->alloc = initlen;
- *fp = type;
- break;
- }
- case SDS_TYPE_64: {
- SDS_HDR_VAR(64,s);
- sh->len = initlen;
- sh->alloc = initlen;
- *fp = type;
- break;
- }
+ struct sdshdr *sh;
+
+ if (init) {
+ sh = malloc(sizeof *sh+initlen+1);
+ } else {
+ sh = calloc(sizeof *sh+initlen+1,1);
}
+ if (sh == NULL) return NULL;
+ sh->len = initlen;
+ sh->free = 0;
if (initlen && init)
- memcpy(s, init, initlen);
- s[initlen] = '\0';
- return s;
+ memcpy(sh->buf, init, initlen);
+ sh->buf[initlen] = '\0';
+ return (char*)sh->buf;
}
/* Create an empty (zero length) sds string. Even in this case the string
return sdsnewlen("",0);
}
-/* Create a new sds string starting from a null terminated C string. */
+/* Create a new sds string starting from a null termined C string. */
sds sdsnew(const char *init) {
size_t initlen = (init == NULL) ? 0 : strlen(init);
return sdsnewlen(init, initlen);
/* Free an sds string. No operation is performed if 's' is NULL. */
void sdsfree(sds s) {
if (s == NULL) return;
- s_free((char*)s-sdsHdrSize(s[-1]));
+ free(s-sizeof(struct sdshdr));
}
/* Set the sds string length to the length as obtained with strlen(), so
* the output will be "6" as the string was modified but the logical length
* remains 6 bytes. */
void sdsupdatelen(sds s) {
+ struct sdshdr *sh = (void*) (s-sizeof *sh);
int reallen = strlen(s);
- sdssetlen(s, reallen);
+ sh->free += (sh->len-reallen);
+ sh->len = reallen;
}
-/* Modify an sds string in-place to make it empty (zero length).
+/* Modify an sds string on-place to make it empty (zero length).
* However all the existing buffer is not discarded but set as free space
* so that next append operations will not require allocations up to the
* number of bytes previously available. */
void sdsclear(sds s) {
- sdssetlen(s, 0);
- s[0] = '\0';
+ struct sdshdr *sh = (void*) (s-sizeof *sh);
+ sh->free += sh->len;
+ sh->len = 0;
+ sh->buf[0] = '\0';
}
/* Enlarge the free space at the end of the sds string so that the caller
* Note: this does not change the *length* of the sds string as returned
* by sdslen(), but only the free buffer space we have. */
sds sdsMakeRoomFor(sds s, size_t addlen) {
- void *sh, *newsh;
- size_t avail = sdsavail(s);
+ struct sdshdr *sh, *newsh;
+ size_t free = sdsavail(s);
size_t len, newlen;
- char type, oldtype = s[-1] & SDS_TYPE_MASK;
- int hdrlen;
-
- /* Return ASAP if there is enough space left. */
- if (avail >= addlen) return s;
+ if (free >= addlen) return s;
len = sdslen(s);
- sh = (char*)s-sdsHdrSize(oldtype);
+ sh = (void*) (s-sizeof *sh);
newlen = (len+addlen);
if (newlen < SDS_MAX_PREALLOC)
newlen *= 2;
else
newlen += SDS_MAX_PREALLOC;
+ newsh = realloc(sh, sizeof *newsh+newlen+1);
+ if (newsh == NULL) return NULL;
- type = sdsReqType(newlen);
-
- /* Don't use type 5: the user is appending to the string and type 5 is
- * not able to remember empty space, so sdsMakeRoomFor() must be called
- * at every appending operation. */
- if (type == SDS_TYPE_5) type = SDS_TYPE_8;
-
- hdrlen = sdsHdrSize(type);
- if (oldtype==type) {
- newsh = s_realloc(sh, hdrlen+newlen+1);
- if (newsh == NULL) return NULL;
- s = (char*)newsh+hdrlen;
- } else {
- /* Since the header size changes, need to move the string forward,
- * and can't use realloc */
- newsh = s_malloc(hdrlen+newlen+1);
- if (newsh == NULL) return NULL;
- memcpy((char*)newsh+hdrlen, s, len+1);
- s_free(sh);
- s = (char*)newsh+hdrlen;
- s[-1] = type;
- sdssetlen(s, len);
- }
- sdssetalloc(s, newlen);
- return s;
+ newsh->free = newlen - len;
+ return newsh->buf;
}
/* Reallocate the sds string so that it has no free space at the end. The
* After the call, the passed sds string is no longer valid and all the
* references must be substituted with the new pointer returned by the call. */
sds sdsRemoveFreeSpace(sds s) {
- void *sh, *newsh;
- char type, oldtype = s[-1] & SDS_TYPE_MASK;
- int hdrlen;
- size_t len = sdslen(s);
- sh = (char*)s-sdsHdrSize(oldtype);
-
- type = sdsReqType(len);
- hdrlen = sdsHdrSize(type);
- if (oldtype==type) {
- newsh = s_realloc(sh, hdrlen+len+1);
- if (newsh == NULL) return NULL;
- s = (char*)newsh+hdrlen;
- } else {
- newsh = s_malloc(hdrlen+len+1);
- if (newsh == NULL) return NULL;
- memcpy((char*)newsh+hdrlen, s, len+1);
- s_free(sh);
- s = (char*)newsh+hdrlen;
- s[-1] = type;
- sdssetlen(s, len);
- }
- sdssetalloc(s, len);
- return s;
+ struct sdshdr *sh;
+
+ sh = (void*) (s-sizeof *sh);
+ sh = realloc(sh, sizeof *sh+sh->len+1);
+ sh->free = 0;
+ return sh->buf;
}
/* Return the total size of the allocation of the specifed sds string,
* 4) The implicit null term.
*/
size_t sdsAllocSize(sds s) {
- size_t alloc = sdsalloc(s);
- return sdsHdrSize(s[-1])+alloc+1;
-}
+ struct sdshdr *sh = (void*) (s-sizeof *sh);
-/* Return the pointer of the actual SDS allocation (normally SDS strings
- * are referenced by the start of the string buffer). */
-void *sdsAllocPtr(sds s) {
- return (void*) (s-sdsHdrSize(s[-1]));
+ return sizeof(*sh)+sh->len+sh->free+1;
}
/* Increment the sds length and decrements the left free space at the
* sdsIncrLen(s, nread);
*/
void sdsIncrLen(sds s, int incr) {
- unsigned char flags = s[-1];
- size_t len;
- switch(flags&SDS_TYPE_MASK) {
- case SDS_TYPE_5: {
- unsigned char *fp = ((unsigned char*)s)-1;
- unsigned char oldlen = SDS_TYPE_5_LEN(flags);
- assert((incr > 0 && oldlen+incr < 32) || (incr < 0 && oldlen >= (unsigned int)(-incr)));
- *fp = SDS_TYPE_5 | ((oldlen+incr) << SDS_TYPE_BITS);
- len = oldlen+incr;
- break;
- }
- case SDS_TYPE_8: {
- SDS_HDR_VAR(8,s);
- assert((incr >= 0 && sh->alloc-sh->len >= incr) || (incr < 0 && sh->len >= (unsigned int)(-incr)));
- len = (sh->len += incr);
- break;
- }
- case SDS_TYPE_16: {
- SDS_HDR_VAR(16,s);
- assert((incr >= 0 && sh->alloc-sh->len >= incr) || (incr < 0 && sh->len >= (unsigned int)(-incr)));
- len = (sh->len += incr);
- break;
- }
- case SDS_TYPE_32: {
- SDS_HDR_VAR(32,s);
- assert((incr >= 0 && sh->alloc-sh->len >= (unsigned int)incr) || (incr < 0 && sh->len >= (unsigned int)(-incr)));
- len = (sh->len += incr);
- break;
- }
- case SDS_TYPE_64: {
- SDS_HDR_VAR(64,s);
- assert((incr >= 0 && sh->alloc-sh->len >= (uint64_t)incr) || (incr < 0 && sh->len >= (uint64_t)(-incr)));
- len = (sh->len += incr);
- break;
- }
- default: len = 0; /* Just to avoid compilation warnings. */
- }
- s[len] = '\0';
+ struct sdshdr *sh = (void*) (s-sizeof *sh);
+
+ assert(sh->free >= incr);
+ sh->len += incr;
+ sh->free -= incr;
+ assert(sh->free >= 0);
+ s[sh->len] = '\0';
}
/* Grow the sds to have the specified length. Bytes that were not part of
* if the specified length is smaller than the current length, no operation
* is performed. */
sds sdsgrowzero(sds s, size_t len) {
- size_t curlen = sdslen(s);
+ struct sdshdr *sh = (void*) (s-sizeof *sh);
+ size_t totlen, curlen = sh->len;
if (len <= curlen) return s;
s = sdsMakeRoomFor(s,len-curlen);
if (s == NULL) return NULL;
/* Make sure added region doesn't contain garbage */
+ sh = (void*)(s-sizeof *sh);
memset(s+curlen,0,(len-curlen+1)); /* also set trailing \0 byte */
- sdssetlen(s, len);
+ totlen = sh->len+sh->free;
+ sh->len = len;
+ sh->free = totlen-sh->len;
return s;
}
* After the call, the passed sds string is no longer valid and all the
* references must be substituted with the new pointer returned by the call. */
sds sdscatlen(sds s, const void *t, size_t len) {
+ struct sdshdr *sh;
size_t curlen = sdslen(s);
s = sdsMakeRoomFor(s,len);
if (s == NULL) return NULL;
+ sh = (void*) (s-sizeof *sh);
memcpy(s+curlen, t, len);
- sdssetlen(s, curlen+len);
+ sh->len = curlen+len;
+ sh->free = sh->free-len;
s[curlen+len] = '\0';
return s;
}
/* Destructively modify the sds string 's' to hold the specified binary
* safe string pointed by 't' of length 'len' bytes. */
sds sdscpylen(sds s, const char *t, size_t len) {
- if (sdsalloc(s) < len) {
- s = sdsMakeRoomFor(s,len-sdslen(s));
+ struct sdshdr *sh = (void*) (s-sizeof *sh);
+ size_t totlen = sh->free+sh->len;
+
+ if (totlen < len) {
+ s = sdsMakeRoomFor(s,len-sh->len);
if (s == NULL) return NULL;
+ sh = (void*) (s-sizeof *sh);
+ totlen = sh->free+sh->len;
}
memcpy(s, t, len);
s[len] = '\0';
- sdssetlen(s, len);
+ sh->len = len;
+ sh->free = totlen-len;
return s;
}
* conversion. 's' must point to a string with room for at least
* SDS_LLSTR_SIZE bytes.
*
- * The function returns the length of the null-terminated string
+ * The function returns the lenght of the null-terminated string
* representation stored at 's'. */
#define SDS_LLSTR_SIZE 21
int sdsll2str(char *s, long long value) {
return l;
}
-/* Create an sds string from a long long value. It is much faster than:
- *
- * sdscatprintf(sdsempty(),"%lld\n", value);
- */
-sds sdsfromlonglong(long long value) {
- char buf[SDS_LLSTR_SIZE];
- int len = sdsll2str(buf,value);
-
- return sdsnewlen(buf,len);
-}
-
-/* Like sdscatprintf() but gets va_list instead of being variadic. */
+/* Like sdscatpritf() but gets va_list instead of being variadic. */
sds sdscatvprintf(sds s, const char *fmt, va_list ap) {
va_list cpy;
- char staticbuf[1024], *buf = staticbuf, *t;
- size_t buflen = strlen(fmt)*2;
+ char *buf, *t;
+ size_t buflen = 16;
- /* We try to start using a static buffer for speed.
- * If not possible we revert to heap allocation. */
- if (buflen > sizeof(staticbuf)) {
- buf = s_malloc(buflen);
- if (buf == NULL) return NULL;
- } else {
- buflen = sizeof(staticbuf);
- }
-
- /* Try with buffers two times bigger every time we fail to
- * fit the string in the current buffer size. */
while(1) {
+ buf = malloc(buflen);
+ if (buf == NULL) return NULL;
buf[buflen-2] = '\0';
va_copy(cpy,ap);
vsnprintf(buf, buflen, fmt, cpy);
- va_end(cpy);
if (buf[buflen-2] != '\0') {
- if (buf != staticbuf) s_free(buf);
+ free(buf);
buflen *= 2;
- buf = s_malloc(buflen);
- if (buf == NULL) return NULL;
continue;
}
break;
}
-
- /* Finally concat the obtained string to the SDS string and return it. */
t = sdscat(s, buf);
- if (buf != staticbuf) s_free(buf);
+ free(buf);
return t;
}
* Example:
*
* s = sdsnew("Sum is: ");
- * s = sdscatprintf(s,"%d+%d = %d",a,b,a+b).
+ * s = sdscatprintf(s,"%d+%d = %d",a,b,a+b);
*
* Often you need to create a string from scratch with the printf-alike
* format. When this is the need, just use sdsempty() as the target string:
* %I - 64 bit signed integer (long long, int64_t)
* %u - unsigned int
* %U - 64 bit unsigned integer (unsigned long long, uint64_t)
+ * %T - A size_t variable.
* %% - Verbatim "%" character.
*/
sds sdscatfmt(sds s, char const *fmt, ...) {
+ struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr)));
+ size_t initlen = sdslen(s);
const char *f = fmt;
int i;
va_list ap;
va_start(ap,fmt);
- i = sdslen(s); /* Position of the next byte to write to dest str. */
+ f = fmt; /* Next format specifier byte to process. */
+ i = initlen; /* Position of the next byte to write to dest str. */
while(*f) {
char next, *str;
- size_t l;
+ int l;
long long num;
unsigned long long unum;
/* Make sure there is always space for at least 1 char. */
- if (sdsavail(s)==0) {
+ if (sh->free == 0) {
s = sdsMakeRoomFor(s,1);
+ sh = (void*) (s-(sizeof(struct sdshdr)));
}
switch(*f) {
case 'S':
str = va_arg(ap,char*);
l = (next == 's') ? strlen(str) : sdslen(str);
- if (sdsavail(s) < l) {
+ if (sh->free < l) {
s = sdsMakeRoomFor(s,l);
+ sh = (void*) (s-(sizeof(struct sdshdr)));
}
memcpy(s+i,str,l);
- sdsinclen(s,l);
+ sh->len += l;
+ sh->free -= l;
i += l;
break;
case 'i':
{
char buf[SDS_LLSTR_SIZE];
l = sdsll2str(buf,num);
- if (sdsavail(s) < l) {
+ if (sh->free < l) {
s = sdsMakeRoomFor(s,l);
+ sh = (void*) (s-(sizeof(struct sdshdr)));
}
memcpy(s+i,buf,l);
- sdsinclen(s,l);
+ sh->len += l;
+ sh->free -= l;
i += l;
}
break;
case 'u':
case 'U':
+ case 'T':
if (next == 'u')
unum = va_arg(ap,unsigned int);
- else
+ else if(next == 'U')
unum = va_arg(ap,unsigned long long);
+ else
+ unum = (unsigned long long)va_arg(ap,size_t);
{
char buf[SDS_LLSTR_SIZE];
l = sdsull2str(buf,unum);
- if (sdsavail(s) < l) {
+ if (sh->free < l) {
s = sdsMakeRoomFor(s,l);
+ sh = (void*) (s-(sizeof(struct sdshdr)));
}
memcpy(s+i,buf,l);
- sdsinclen(s,l);
+ sh->len += l;
+ sh->free -= l;
i += l;
}
break;
default: /* Handle %% and generally %<unknown>. */
s[i++] = next;
- sdsinclen(s,1);
+ sh->len += 1;
+ sh->free -= 1;
break;
}
break;
default:
s[i++] = *f;
- sdsinclen(s,1);
+ sh->len += 1;
+ sh->free -= 1;
break;
}
f++;
return s;
}
+
/* Remove the part of the string from left and from right composed just of
* contiguous characters found in 'cset', that is a null terminted C string.
*
* Example:
*
* s = sdsnew("AA...AA.a.aa.aHelloWorld :::");
- * s = sdstrim(s,"Aa. :");
+ * s = sdstrim(s,"A. :");
* printf("%s\n", s);
*
* Output will be just "Hello World".
*/
-sds sdstrim(sds s, const char *cset) {
+void sdstrim(sds s, const char *cset) {
+ struct sdshdr *sh = (void*) (s-sizeof *sh);
char *start, *end, *sp, *ep;
size_t len;
sp = start = s;
ep = end = s+sdslen(s)-1;
while(sp <= end && strchr(cset, *sp)) sp++;
- while(ep > sp && strchr(cset, *ep)) ep--;
+ while(ep > start && strchr(cset, *ep)) ep--;
len = (sp > ep) ? 0 : ((ep-sp)+1);
- if (s != sp) memmove(s, sp, len);
- s[len] = '\0';
- sdssetlen(s,len);
- return s;
+ if (sh->buf != sp) memmove(sh->buf, sp, len);
+ sh->buf[len] = '\0';
+ sh->free = sh->free+(sh->len-len);
+ sh->len = len;
}
/* Turn the string into a smaller (or equal) string containing only the
* sdsrange(s,1,-1); => "ello World"
*/
void sdsrange(sds s, int start, int end) {
+ struct sdshdr *sh = (void*) (s-sizeof *sh);
size_t newlen, len = sdslen(s);
if (len == 0) return;
} else {
start = 0;
}
- if (start && newlen) memmove(s, s+start, newlen);
- s[newlen] = 0;
- sdssetlen(s,newlen);
+ if (start && newlen) memmove(sh->buf, sh->buf+start, newlen);
+ sh->buf[newlen] = 0;
+ sh->free = sh->free+(sh->len-newlen);
+ sh->len = newlen;
}
/* Apply tolower() to every character of the sds string 's'. */
*
* Return value:
*
- * positive if s1 > s2.
- * negative if s1 < s2.
+ * 1 if s1 > s2.
+ * -1 if s1 < s2.
* 0 if s1 and s2 are exactly the same binary string.
*
* If two strings share exactly the same prefix, but one of the two has
if (seplen < 1 || len < 0) return NULL;
- tokens = s_malloc(sizeof(sds)*slots);
+ tokens = malloc(sizeof(sds)*slots);
if (tokens == NULL) return NULL;
if (len == 0) {
sds *newtokens;
slots *= 2;
- newtokens = s_realloc(tokens,sizeof(sds)*slots);
+ newtokens = realloc(tokens,sizeof(sds)*slots);
if (newtokens == NULL) goto cleanup;
tokens = newtokens;
}
{
int i;
for (i = 0; i < elements; i++) sdsfree(tokens[i]);
- s_free(tokens);
+ free(tokens);
*count = 0;
return NULL;
}
if (!tokens) return;
while(count--)
sdsfree(tokens[count]);
- s_free(tokens);
+ free(tokens);
+}
+
+/* Create an sds string from a long long value. It is much faster than:
+ *
+ * sdscatprintf(sdsempty(),"%lld\n", value);
+ */
+sds sdsfromlonglong(long long value) {
+ char buf[32], *p;
+ unsigned long long v;
+
+ v = (value < 0) ? -value : value;
+ p = buf+31; /* point to the last character */
+ do {
+ *p-- = '0'+(v%10);
+ v /= 10;
+ } while(v);
+ if (value < 0) *p-- = '-';
+ p++;
+ return sdsnewlen(p,32-(p-buf));
}
/* Append to the sds string "s" an escaped string representation where
if (*p) p++;
}
/* add the token to the vector */
- vector = s_realloc(vector,((*argc)+1)*sizeof(char*));
+ vector = realloc(vector,((*argc)+1)*sizeof(char*));
vector[*argc] = current;
(*argc)++;
current = NULL;
} else {
/* Even on empty input string return something not NULL. */
- if (vector == NULL) vector = s_malloc(sizeof(void*));
+ if (vector == NULL) vector = malloc(sizeof(void*));
return vector;
}
}
err:
while((*argc)--)
sdsfree(vector[*argc]);
- s_free(vector);
+ free(vector);
if (current) sdsfree(current);
*argc = 0;
return NULL;
/* Join an array of C strings using the specified separator (also a C string).
* Returns the result as an sds string. */
-sds sdsjoin(char **argv, int argc, char *sep) {
+sds sdsjoin(char **argv, int argc, char *sep, size_t seplen) {
sds join = sdsempty();
int j;
for (j = 0; j < argc; j++) {
join = sdscat(join, argv[j]);
- if (j != argc-1) join = sdscat(join,sep);
+ if (j != argc-1) join = sdscatlen(join,sep,seplen);
}
return join;
}
return join;
}
-/* Wrappers to the allocators used by SDS. Note that SDS will actually
- * just use the macros defined into sdsalloc.h in order to avoid to pay
- * the overhead of function calls. Here we define these wrappers only for
- * the programs SDS is linked to, if they want to touch the SDS internals
- * even if they use a different allocator. */
-void *sds_malloc(size_t size) { return s_malloc(size); }
-void *sds_realloc(void *ptr, size_t size) { return s_realloc(ptr,size); }
-void sds_free(void *ptr) { s_free(ptr); }
-
-#if defined(SDS_TEST_MAIN)
+#ifdef SDS_TEST_MAIN
#include <stdio.h>
#include "testhelp.h"
-#include "limits.h"
-#define UNUSED(x) (void)(x)
-int sdsTest(void) {
+int main(void) {
{
+ struct sdshdr *sh;
sds x = sdsnew("foo"), y;
test_cond("Create a string and obtain the length",
sdsfree(x);
x = sdscatprintf(sdsempty(),"%d",123);
test_cond("sdscatprintf() seems working in the base case",
- sdslen(x) == 3 && memcmp(x,"123\0",4) == 0)
-
- sdsfree(x);
- x = sdsnew("--");
- x = sdscatfmt(x, "Hello %s World %I,%I--", "Hi!", LLONG_MIN,LLONG_MAX);
- test_cond("sdscatfmt() seems working in the base case",
- sdslen(x) == 60 &&
- memcmp(x,"--Hello Hi! World -9223372036854775808,"
- "9223372036854775807--",60) == 0)
- printf("[%s]\n",x);
-
- sdsfree(x);
- x = sdsnew("--");
- x = sdscatfmt(x, "%u,%U--", UINT_MAX, ULLONG_MAX);
- test_cond("sdscatfmt() seems working with unsigned numbers",
- sdslen(x) == 35 &&
- memcmp(x,"--4294967295,18446744073709551615--",35) == 0)
-
- sdsfree(x);
- x = sdsnew(" x ");
- sdstrim(x," x");
- test_cond("sdstrim() works when all chars match",
- sdslen(x) == 0)
-
- sdsfree(x);
- x = sdsnew(" x ");
- sdstrim(x," ");
- test_cond("sdstrim() works when a single char remains",
- sdslen(x) == 1 && x[0] == 'x')
+ sdslen(x) == 3 && memcmp(x,"123\0",4) ==0)
sdsfree(x);
x = sdsnew("xxciaoyyy");
memcmp(y,"\"\\a\\n\\x00foo\\r\"",15) == 0)
{
- unsigned int oldfree;
- char *p;
- int step = 10, j, i;
+ int oldfree;
sdsfree(x);
- sdsfree(y);
x = sdsnew("0");
- test_cond("sdsnew() free/len buffers", sdslen(x) == 1 && sdsavail(x) == 0);
-
- /* Run the test a few times in order to hit the first two
- * SDS header types. */
- for (i = 0; i < 10; i++) {
- int oldlen = sdslen(x);
- x = sdsMakeRoomFor(x,step);
- int type = x[-1]&SDS_TYPE_MASK;
-
- test_cond("sdsMakeRoomFor() len", sdslen(x) == oldlen);
- if (type != SDS_TYPE_5) {
- test_cond("sdsMakeRoomFor() free", sdsavail(x) >= step);
- oldfree = sdsavail(x);
- }
- p = x+oldlen;
- for (j = 0; j < step; j++) {
- p[j] = 'A'+j;
- }
- sdsIncrLen(x,step);
- }
- test_cond("sdsMakeRoomFor() content",
- memcmp("0ABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJ",x,101) == 0);
- test_cond("sdsMakeRoomFor() final length",sdslen(x)==101);
-
- sdsfree(x);
+ sh = (void*) (x-(sizeof(struct sdshdr)));
+ test_cond("sdsnew() free/len buffers", sh->len == 1 && sh->free == 0);
+ x = sdsMakeRoomFor(x,1);
+ sh = (void*) (x-(sizeof(struct sdshdr)));
+ test_cond("sdsMakeRoomFor()", sh->len == 1 && sh->free > 0);
+ oldfree = sh->free;
+ x[1] = '1';
+ sdsIncrLen(x,1);
+ test_cond("sdsIncrLen() -- content", x[0] == '0' && x[1] == '1');
+ test_cond("sdsIncrLen() -- len", sh->len == 2);
+ test_cond("sdsIncrLen() -- free", sh->free == oldfree-1);
}
}
test_report()
return 0;
}
#endif
-
-#ifdef SDS_TEST_MAIN
-int main(void) {
- return sdsTest();
-}
-#endif
-/* SDSLib 2.0 -- A C dynamic strings library
+/* SDS (Simple Dynamic Strings), A C dynamic strings library.
*
- * Copyright (c) 2006-2015, Salvatore Sanfilippo <antirez at gmail dot com>
- * Copyright (c) 2015, Oran Agra
- * Copyright (c) 2015, Redis Labs, Inc
+ * Copyright (c) 2006-2014, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <sys/types.h>
#include <stdarg.h>
-#include <stdint.h>
+#ifdef _MSC_VER
+#include "win32.h"
+#endif
typedef char *sds;
-/* Note: sdshdr5 is never used, we just access the flags byte directly.
- * However is here to document the layout of type 5 SDS strings. */
-struct __attribute__ ((__packed__)) sdshdr5 {
- unsigned char flags; /* 3 lsb of type, and 5 msb of string length */
- char buf[];
-};
-struct __attribute__ ((__packed__)) sdshdr8 {
- uint8_t len; /* used */
- uint8_t alloc; /* excluding the header and null terminator */
- unsigned char flags; /* 3 lsb of type, 5 unused bits */
- char buf[];
-};
-struct __attribute__ ((__packed__)) sdshdr16 {
- uint16_t len; /* used */
- uint16_t alloc; /* excluding the header and null terminator */
- unsigned char flags; /* 3 lsb of type, 5 unused bits */
- char buf[];
-};
-struct __attribute__ ((__packed__)) sdshdr32 {
- uint32_t len; /* used */
- uint32_t alloc; /* excluding the header and null terminator */
- unsigned char flags; /* 3 lsb of type, 5 unused bits */
- char buf[];
-};
-struct __attribute__ ((__packed__)) sdshdr64 {
- uint64_t len; /* used */
- uint64_t alloc; /* excluding the header and null terminator */
- unsigned char flags; /* 3 lsb of type, 5 unused bits */
+struct sdshdr {
+ int len;
+ int free;
char buf[];
};
-#define SDS_TYPE_5 0
-#define SDS_TYPE_8 1
-#define SDS_TYPE_16 2
-#define SDS_TYPE_32 3
-#define SDS_TYPE_64 4
-#define SDS_TYPE_MASK 7
-#define SDS_TYPE_BITS 3
-#define SDS_HDR_VAR(T,s) struct sdshdr##T *sh = (struct sdshdr##T *)((s)-(sizeof(struct sdshdr##T)));
-#define SDS_HDR(T,s) ((struct sdshdr##T *)((s)-(sizeof(struct sdshdr##T))))
-#define SDS_TYPE_5_LEN(f) ((f)>>SDS_TYPE_BITS)
-
static inline size_t sdslen(const sds s) {
- unsigned char flags = s[-1];
- switch(flags&SDS_TYPE_MASK) {
- case SDS_TYPE_5:
- return SDS_TYPE_5_LEN(flags);
- case SDS_TYPE_8:
- return SDS_HDR(8,s)->len;
- case SDS_TYPE_16:
- return SDS_HDR(16,s)->len;
- case SDS_TYPE_32:
- return SDS_HDR(32,s)->len;
- case SDS_TYPE_64:
- return SDS_HDR(64,s)->len;
- }
- return 0;
+ struct sdshdr *sh = (struct sdshdr *)(s-sizeof *sh);
+ return sh->len;
}
static inline size_t sdsavail(const sds s) {
- unsigned char flags = s[-1];
- switch(flags&SDS_TYPE_MASK) {
- case SDS_TYPE_5: {
- return 0;
- }
- case SDS_TYPE_8: {
- SDS_HDR_VAR(8,s);
- return sh->alloc - sh->len;
- }
- case SDS_TYPE_16: {
- SDS_HDR_VAR(16,s);
- return sh->alloc - sh->len;
- }
- case SDS_TYPE_32: {
- SDS_HDR_VAR(32,s);
- return sh->alloc - sh->len;
- }
- case SDS_TYPE_64: {
- SDS_HDR_VAR(64,s);
- return sh->alloc - sh->len;
- }
- }
- return 0;
-}
-
-static inline void sdssetlen(sds s, size_t newlen) {
- unsigned char flags = s[-1];
- switch(flags&SDS_TYPE_MASK) {
- case SDS_TYPE_5:
- {
- unsigned char *fp = ((unsigned char*)s)-1;
- *fp = SDS_TYPE_5 | (newlen << SDS_TYPE_BITS);
- }
- break;
- case SDS_TYPE_8:
- SDS_HDR(8,s)->len = newlen;
- break;
- case SDS_TYPE_16:
- SDS_HDR(16,s)->len = newlen;
- break;
- case SDS_TYPE_32:
- SDS_HDR(32,s)->len = newlen;
- break;
- case SDS_TYPE_64:
- SDS_HDR(64,s)->len = newlen;
- break;
- }
-}
-
-static inline void sdsinclen(sds s, size_t inc) {
- unsigned char flags = s[-1];
- switch(flags&SDS_TYPE_MASK) {
- case SDS_TYPE_5:
- {
- unsigned char *fp = ((unsigned char*)s)-1;
- unsigned char newlen = SDS_TYPE_5_LEN(flags)+inc;
- *fp = SDS_TYPE_5 | (newlen << SDS_TYPE_BITS);
- }
- break;
- case SDS_TYPE_8:
- SDS_HDR(8,s)->len += inc;
- break;
- case SDS_TYPE_16:
- SDS_HDR(16,s)->len += inc;
- break;
- case SDS_TYPE_32:
- SDS_HDR(32,s)->len += inc;
- break;
- case SDS_TYPE_64:
- SDS_HDR(64,s)->len += inc;
- break;
- }
-}
-
-/* sdsalloc() = sdsavail() + sdslen() */
-static inline size_t sdsalloc(const sds s) {
- unsigned char flags = s[-1];
- switch(flags&SDS_TYPE_MASK) {
- case SDS_TYPE_5:
- return SDS_TYPE_5_LEN(flags);
- case SDS_TYPE_8:
- return SDS_HDR(8,s)->alloc;
- case SDS_TYPE_16:
- return SDS_HDR(16,s)->alloc;
- case SDS_TYPE_32:
- return SDS_HDR(32,s)->alloc;
- case SDS_TYPE_64:
- return SDS_HDR(64,s)->alloc;
- }
- return 0;
-}
-
-static inline void sdssetalloc(sds s, size_t newlen) {
- unsigned char flags = s[-1];
- switch(flags&SDS_TYPE_MASK) {
- case SDS_TYPE_5:
- /* Nothing to do, this type has no total allocation info. */
- break;
- case SDS_TYPE_8:
- SDS_HDR(8,s)->alloc = newlen;
- break;
- case SDS_TYPE_16:
- SDS_HDR(16,s)->alloc = newlen;
- break;
- case SDS_TYPE_32:
- SDS_HDR(32,s)->alloc = newlen;
- break;
- case SDS_TYPE_64:
- SDS_HDR(64,s)->alloc = newlen;
- break;
- }
+ struct sdshdr *sh = (struct sdshdr *)(s-sizeof *sh);
+ return sh->free;
}
sds sdsnewlen(const void *init, size_t initlen);
sds sdsnew(const char *init);
sds sdsempty(void);
+size_t sdslen(const sds s);
sds sdsdup(const sds s);
void sdsfree(sds s);
+size_t sdsavail(const sds s);
sds sdsgrowzero(sds s, size_t len);
sds sdscatlen(sds s, const void *t, size_t len);
sds sdscat(sds s, const char *t);
#endif
sds sdscatfmt(sds s, char const *fmt, ...);
-sds sdstrim(sds s, const char *cset);
+void sdstrim(sds s, const char *cset);
void sdsrange(sds s, int start, int end);
void sdsupdatelen(sds s);
void sdsclear(sds s);
sds sdscatrepr(sds s, const char *p, size_t len);
sds *sdssplitargs(const char *line, int *argc);
sds sdsmapchars(sds s, const char *from, const char *to, size_t setlen);
-sds sdsjoin(char **argv, int argc, char *sep);
+sds sdsjoin(char **argv, int argc, char *sep, size_t seplen);
sds sdsjoinsds(sds *argv, int argc, const char *sep, size_t seplen);
/* Low level functions exposed to the user API */
void sdsIncrLen(sds s, int incr);
sds sdsRemoveFreeSpace(sds s);
size_t sdsAllocSize(sds s);
-void *sdsAllocPtr(sds s);
-
-/* Export the allocator used by SDS to the program using SDS.
- * Sometimes the program SDS is linked to, may use a different set of
- * allocators, but may want to allocate or free things that SDS will
- * respectively free or allocate. */
-void *sds_malloc(size_t size);
-void *sds_realloc(void *ptr, size_t size);
-void sds_free(void *ptr);
-
-#ifdef REDIS_TEST
-int sdsTest(int argc, char *argv[]);
-#endif
#endif
+++ /dev/null
-/* SDSLib 2.0 -- A C dynamic strings library
- *
- * Copyright (c) 2006-2015, Salvatore Sanfilippo <antirez at gmail dot com>
- * Copyright (c) 2015, Oran Agra
- * Copyright (c) 2015, Redis Labs, Inc
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Redis nor the names of its contributors may be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/* SDS allocator selection.
- *
- * This file is used in order to change the SDS allocator at compile time.
- * Just define the following defines to what you want to use. Also add
- * the include of your alternate allocator if needed (not needed in order
- * to use the default libc allocator). */
-
-#define s_malloc malloc
-#define s_realloc realloc
-#define s_free free
struct {
const char *path;
- } unix_sock;
+ } unix;
};
/* The following lines make up our testing "framework" :) */
if (config.type == CONN_TCP) {
c = redisConnect(config.tcp.host, config.tcp.port);
} else if (config.type == CONN_UNIX) {
- c = redisConnectUnix(config.unix_sock.path);
+ c = redisConnectUnix(config.unix.path);
} else if (config.type == CONN_FD) {
/* Create a dummy connection just to get an fd to inherit */
- redisContext *dummy_ctx = redisConnectUnix(config.unix_sock.path);
+ redisContext *dummy_ctx = redisConnectUnix(config.unix.path);
if (dummy_ctx) {
int fd = disconnect(dummy_ctx, 1);
printf("Connecting to inherited fd %d\n", fd);
test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$7\r\nfoo\0xxx\r\n$3\r\nbar\r\n",len) == 0 &&
len == 4+4+(3+2)+4+(7+2)+4+(3+2));
free(cmd);
-
- sds sds_cmd;
-
- sds_cmd = sdsempty();
- test("Format command into sds by passing argc/argv without lengths: ");
- len = redisFormatSdsCommandArgv(&sds_cmd,argc,argv,NULL);
- test_cond(strncmp(sds_cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$3\r\nbar\r\n",len) == 0 &&
- len == 4+4+(3+2)+4+(3+2)+4+(3+2));
- sdsfree(sds_cmd);
-
- sds_cmd = sdsempty();
- test("Format command into sds by passing argc/argv with lengths: ");
- len = redisFormatSdsCommandArgv(&sds_cmd,argc,argv,lens);
- test_cond(strncmp(sds_cmd,"*3\r\n$3\r\nSET\r\n$7\r\nfoo\0xxx\r\n$3\r\nbar\r\n",len) == 0 &&
- len == 4+4+(3+2)+4+(7+2)+4+(3+2));
- sdsfree(sds_cmd);
}
static void test_append_formatted_commands(struct config config) {
}
static void test_free_null(void) {
- void *redisCtx = NULL;
+ void *redisContext = NULL;
void *reply = NULL;
test("Don't fail when redisFree is passed a NULL value: ");
- redisFree(redisCtx);
- test_cond(redisCtx == NULL);
+ redisFree(redisContext);
+ test_cond(redisContext == NULL);
test("Don't fail when freeReplyObject is passed a NULL value: ");
freeReplyObject(reply);
strcmp(c->errstr,"Connection refused") == 0);
redisFree(c);
- test("Returns error when the unix_sock socket path doesn't accept connections: ");
+ test("Returns error when the unix socket path doesn't accept connections: ");
c = redisConnectUnix((char*)"/tmp/idontexist.sock");
test_cond(c->err == REDIS_ERR_IO); /* Don't care about the message... */
redisFree(c);
test("Reconnect properly uses owned parameters: ");
config.tcp.host = "foo";
- config.unix_sock.path = "foo";
+ config.unix.path = "foo";
redisReconnect(c);
reply = redisCommand(c, "PING");
test_cond(reply != NULL && reply->type == REDIS_REPLY_STATUS && strcmp(reply->str, "PONG") == 0);
c = redisConnectWithTimeout(config.tcp.host, config.tcp.port, config.tcp.timeout);
- test_cond(c->err == REDIS_ERR_IO && strcmp(c->errstr, "Invalid timeout specified") == 0);
+ test_cond(c->err == REDIS_ERR_IO);
redisFree(c);
test("Set error when an invalid timeout sec value is given to redisConnectWithTimeout: ");
c = redisConnectWithTimeout(config.tcp.host, config.tcp.port, config.tcp.timeout);
- test_cond(c->err == REDIS_ERR_IO && strcmp(c->errstr, "Invalid timeout specified") == 0);
+ test_cond(c->err == REDIS_ERR_IO);
redisFree(c);
}
.host = "127.0.0.1",
.port = 6379
},
- .unix_sock = {
+ .unix = {
.path = "/tmp/redis.sock"
}
};
cfg.tcp.port = atoi(argv[0]);
} else if (argc >= 2 && !strcmp(argv[0],"-s")) {
argv++; argc--;
- cfg.unix_sock.path = argv[0];
+ cfg.unix.path = argv[0];
} else if (argc >= 1 && !strcmp(argv[0],"--skip-throughput")) {
throughput = 0;
} else if (argc >= 1 && !strcmp(argv[0],"--skip-inherit-fd")) {
test_append_formatted_commands(cfg);
if (throughput) test_throughput(cfg);
- printf("\nTesting against Unix socket connection (%s):\n", cfg.unix_sock.path);
+ printf("\nTesting against Unix socket connection (%s):\n", cfg.unix.path);
cfg.type = CONN_UNIX;
test_blocking_connection(cfg);
test_blocking_connection_timeouts(cfg);
if (throughput) test_throughput(cfg);
if (test_inherit_fd) {
- printf("\nTesting against inherited fd (%s):\n", cfg.unix_sock.path);
+ printf("\nTesting against inherited fd (%s):\n", cfg.unix.path);
cfg.type = CONN_FD;
test_blocking_connection(cfg);
}
$ vim ~/.gdbinit
-More details in the [troubleshooting debug documentation](http://docs.icinga.com/icinga2/latest/doc/module/icinga2/chapter/troubleshooting#debug).
+More details in the [troubleshooting debug documentation](https://docs.icinga.com/icinga2/latest/doc/module/icinga2/chapter/troubleshooting#debug).