mirror of
https://github.com/wazuh/wazuh-docker.git
synced 2025-10-23 04:51:57 +00:00
Compare commits
4 Commits
05fcfd5652
...
2.1.0_5.5.
Author | SHA1 | Date | |
---|---|---|---|
|
9f192202fd | ||
|
d8cd0ba7d0 | ||
|
349213bac5 | ||
|
8547b3b45a |
6
.env
6
.env
@@ -1,6 +0,0 @@
|
||||
WAZUH_VERSION=4.14.0
|
||||
WAZUH_IMAGE_VERSION=4.14.0
|
||||
WAZUH_TAG_REVISION=1
|
||||
FILEBEAT_TEMPLATE_BRANCH=4.14.0
|
||||
WAZUH_FILEBEAT_MODULE=wazuh-filebeat-0.4.tar.gz
|
||||
WAZUH_UI_REVISION=1
|
103
.github/.goss.yaml
vendored
103
.github/.goss.yaml
vendored
@@ -1,103 +0,0 @@
|
||||
file:
|
||||
/etc/filebeat/filebeat.yml:
|
||||
exists: true
|
||||
mode: "0644"
|
||||
owner: root
|
||||
group: root
|
||||
filetype: file
|
||||
contains: []
|
||||
/var/ossec/bin/wazuh-control:
|
||||
exists: true
|
||||
mode: "0750"
|
||||
owner: root
|
||||
group: root
|
||||
filetype: file
|
||||
contains: []
|
||||
/var/ossec/etc/lists/audit-keys:
|
||||
exists: true
|
||||
mode: "0660"
|
||||
owner: wazuh
|
||||
group: wazuh
|
||||
filetype: file
|
||||
contains: []
|
||||
/var/ossec/etc/ossec.conf:
|
||||
exists: true
|
||||
mode: "0660"
|
||||
owner: root
|
||||
group: wazuh
|
||||
filetype: file
|
||||
contains: []
|
||||
/var/ossec/etc/rules/local_rules.xml:
|
||||
exists: true
|
||||
mode: "0660"
|
||||
owner: wazuh
|
||||
group: wazuh
|
||||
filetype: file
|
||||
contains: []
|
||||
/var/ossec/etc/sslmanager.cert:
|
||||
exists: true
|
||||
mode: "0640"
|
||||
owner: root
|
||||
group: root
|
||||
filetype: file
|
||||
contains: []
|
||||
/var/ossec/etc/sslmanager.key:
|
||||
exists: true
|
||||
mode: "0640"
|
||||
owner: root
|
||||
group: root
|
||||
filetype: file
|
||||
contains: []
|
||||
package:
|
||||
filebeat:
|
||||
installed: true
|
||||
versions:
|
||||
- 7.10.2
|
||||
wazuh-manager:
|
||||
installed: true
|
||||
versions:
|
||||
- 4.14.0
|
||||
port:
|
||||
tcp:1514:
|
||||
listening: true
|
||||
ip:
|
||||
- 0.0.0.0
|
||||
tcp:1515:
|
||||
listening: true
|
||||
ip:
|
||||
- 0.0.0.0
|
||||
tcp:55000:
|
||||
listening: true
|
||||
ip:
|
||||
- 0.0.0.0
|
||||
process:
|
||||
filebeat:
|
||||
running: true
|
||||
wazuh-analysisd:
|
||||
running: true
|
||||
wazuh-authd:
|
||||
running: true
|
||||
wazuh-execd:
|
||||
running: true
|
||||
wazuh-monitord:
|
||||
running: true
|
||||
wazuh-remoted:
|
||||
running: true
|
||||
wazuh-syscheckd:
|
||||
running: true
|
||||
s6-supervise:
|
||||
running: true
|
||||
wazuh-db:
|
||||
running: true
|
||||
wazuh-modulesd:
|
||||
running: true
|
||||
user:
|
||||
wazuh:
|
||||
exists: true
|
||||
groups:
|
||||
- wazuh
|
||||
home: /var/ossec
|
||||
shell: /sbin/nologin
|
||||
group:
|
||||
wazuh:
|
||||
exists: true
|
245
.github/free-disk-space/action.yml
vendored
245
.github/free-disk-space/action.yml
vendored
@@ -1,245 +0,0 @@
|
||||
name: "Free Disk Space (Ubuntu)"
|
||||
description: "A configurable GitHub Action to free up disk space on an Ubuntu GitHub Actions runner."
|
||||
|
||||
# Thanks @jlumbroso for the action code https://github.com/jlumbroso/free-disk-space/
|
||||
# See: https://docs.github.com/en/actions/creating-actions/metadata-syntax-for-github-actions#branding
|
||||
|
||||
inputs:
|
||||
android:
|
||||
description: "Remove Android runtime"
|
||||
required: false
|
||||
default: "true"
|
||||
dotnet:
|
||||
description: "Remove .NET runtime"
|
||||
required: false
|
||||
default: "true"
|
||||
haskell:
|
||||
description: "Remove Haskell runtime"
|
||||
required: false
|
||||
default: "true"
|
||||
|
||||
# option inspired by:
|
||||
# https://github.com/apache/flink/blob/master/tools/azure-pipelines/free_disk_space.sh
|
||||
large-packages:
|
||||
description: "Remove large packages"
|
||||
required: false
|
||||
default: "true"
|
||||
|
||||
docker-images:
|
||||
description: "Remove Docker images"
|
||||
required: false
|
||||
default: "true"
|
||||
|
||||
# option inspired by:
|
||||
# https://github.com/actions/virtual-environments/issues/2875#issuecomment-1163392159
|
||||
tool-cache:
|
||||
description: "Remove image tool cache"
|
||||
required: false
|
||||
default: "false"
|
||||
|
||||
swap-storage:
|
||||
description: "Remove swap storage"
|
||||
required: false
|
||||
default: "true"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- shell: bash
|
||||
run: |
|
||||
|
||||
# ======
|
||||
# MACROS
|
||||
# ======
|
||||
|
||||
# macro to print a line of equals
|
||||
# (silly but works)
|
||||
printSeparationLine() {
|
||||
str=${1:=}
|
||||
num=${2:-80}
|
||||
counter=1
|
||||
output=""
|
||||
while [ $counter -le $num ]
|
||||
do
|
||||
output="${output}${str}"
|
||||
counter=$((counter+1))
|
||||
done
|
||||
echo "${output}"
|
||||
}
|
||||
|
||||
# macro to compute available space
|
||||
# REF: https://unix.stackexchange.com/a/42049/60849
|
||||
# REF: https://stackoverflow.com/a/450821/408734
|
||||
getAvailableSpace() { echo $(df -a $1 | awk 'NR > 1 {avail+=$4} END {print avail}'); }
|
||||
|
||||
# macro to make Kb human readable (assume the input is Kb)
|
||||
# REF: https://unix.stackexchange.com/a/44087/60849
|
||||
formatByteCount() { echo $(numfmt --to=iec-i --suffix=B --padding=7 $1'000'); }
|
||||
|
||||
# macro to output saved space
|
||||
printSavedSpace() {
|
||||
saved=${1}
|
||||
title=${2:-}
|
||||
|
||||
echo ""
|
||||
printSeparationLine '*' 80
|
||||
if [ ! -z "${title}" ]; then
|
||||
echo "=> ${title}: Saved $(formatByteCount $saved)"
|
||||
else
|
||||
echo "=> Saved $(formatByteCount $saved)"
|
||||
fi
|
||||
printSeparationLine '*' 80
|
||||
echo ""
|
||||
}
|
||||
|
||||
# macro to print output of dh with caption
|
||||
printDH() {
|
||||
caption=${1:-}
|
||||
|
||||
printSeparationLine '=' 80
|
||||
echo "${caption}"
|
||||
echo ""
|
||||
echo "$ dh -h /"
|
||||
echo ""
|
||||
df -h /
|
||||
echo "$ dh -a /"
|
||||
echo ""
|
||||
df -a /
|
||||
echo "$ dh -a"
|
||||
echo ""
|
||||
df -a
|
||||
printSeparationLine '=' 80
|
||||
}
|
||||
|
||||
|
||||
|
||||
# ======
|
||||
# SCRIPT
|
||||
# ======
|
||||
|
||||
# Display initial disk space stats
|
||||
|
||||
AVAILABLE_INITIAL=$(getAvailableSpace)
|
||||
AVAILABLE_ROOT_INITIAL=$(getAvailableSpace '/')
|
||||
|
||||
printDH "BEFORE CLEAN-UP:"
|
||||
echo ""
|
||||
|
||||
|
||||
# Option: Remove Android library
|
||||
|
||||
if [[ ${{ inputs.android }} == 'true' ]]; then
|
||||
BEFORE=$(getAvailableSpace)
|
||||
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
|
||||
AFTER=$(getAvailableSpace)
|
||||
SAVED=$((AFTER-BEFORE))
|
||||
printSavedSpace $SAVED "Android library"
|
||||
fi
|
||||
|
||||
# Option: Remove .NET runtime
|
||||
|
||||
if [[ ${{ inputs.dotnet }} == 'true' ]]; then
|
||||
BEFORE=$(getAvailableSpace)
|
||||
|
||||
# https://github.community/t/bigger-github-hosted-runners-disk-space/17267/11
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
|
||||
AFTER=$(getAvailableSpace)
|
||||
SAVED=$((AFTER-BEFORE))
|
||||
printSavedSpace $SAVED ".NET runtime"
|
||||
fi
|
||||
|
||||
# Option: Remove Haskell runtime
|
||||
|
||||
if [[ ${{ inputs.haskell }} == 'true' ]]; then
|
||||
BEFORE=$(getAvailableSpace)
|
||||
|
||||
sudo rm -rf /opt/ghc || true
|
||||
sudo rm -rf /usr/local/.ghcup || true
|
||||
|
||||
AFTER=$(getAvailableSpace)
|
||||
SAVED=$((AFTER-BEFORE))
|
||||
printSavedSpace $SAVED "Haskell runtime"
|
||||
fi
|
||||
|
||||
# Option: Remove large packages
|
||||
# REF: https://github.com/apache/flink/blob/master/tools/azure-pipelines/free_disk_space.sh
|
||||
|
||||
if [[ ${{ inputs.large-packages }} == 'true' ]]; then
|
||||
BEFORE=$(getAvailableSpace)
|
||||
|
||||
sudo apt-get remove -y '^aspnetcore-.*' || echo "::warning::The command [sudo apt-get remove -y '^aspnetcore-.*'] failed to complete successfully. Proceeding..."
|
||||
sudo apt-get remove -y '^dotnet-.*' --fix-missing || echo "::warning::The command [sudo apt-get remove -y '^dotnet-.*' --fix-missing] failed to complete successfully. Proceeding..."
|
||||
sudo apt-get remove -y '^llvm-.*' --fix-missing || echo "::warning::The command [sudo apt-get remove -y '^llvm-.*' --fix-missing] failed to complete successfully. Proceeding..."
|
||||
sudo apt-get remove -y 'php.*' --fix-missing || echo "::warning::The command [sudo apt-get remove -y 'php.*' --fix-missing] failed to complete successfully. Proceeding..."
|
||||
sudo apt-get remove -y '^mongodb-.*' --fix-missing || echo "::warning::The command [sudo apt-get remove -y '^mongodb-.*' --fix-missing] failed to complete successfully. Proceeding..."
|
||||
sudo apt-get remove -y '^mysql-.*' --fix-missing || echo "::warning::The command [sudo apt-get remove -y '^mysql-.*' --fix-missing] failed to complete successfully. Proceeding..."
|
||||
sudo apt-get remove -y azure-cli google-chrome-stable firefox powershell mono-devel libgl1-mesa-dri --fix-missing || echo "::warning::The command [sudo apt-get remove -y azure-cli google-chrome-stable firefox powershell mono-devel libgl1-mesa-dri --fix-missing] failed to complete successfully. Proceeding..."
|
||||
sudo apt-get remove -y google-cloud-sdk --fix-missing || echo "::debug::The command [sudo apt-get remove -y google-cloud-sdk --fix-missing] failed to complete successfully. Proceeding..."
|
||||
sudo apt-get remove -y google-cloud-cli --fix-missing || echo "::debug::The command [sudo apt-get remove -y google-cloud-cli --fix-missing] failed to complete successfully. Proceeding..."
|
||||
sudo apt-get autoremove -y || echo "::warning::The command [sudo apt-get autoremove -y] failed to complete successfully. Proceeding..."
|
||||
sudo apt-get clean || echo "::warning::The command [sudo apt-get clean] failed to complete successfully. Proceeding..."
|
||||
|
||||
AFTER=$(getAvailableSpace)
|
||||
SAVED=$((AFTER-BEFORE))
|
||||
printSavedSpace $SAVED "Large misc. packages"
|
||||
fi
|
||||
|
||||
# Option: Remove Docker images
|
||||
|
||||
if [[ ${{ inputs.docker-images }} == 'true' ]]; then
|
||||
BEFORE=$(getAvailableSpace)
|
||||
|
||||
sudo docker image prune --all --force || true
|
||||
|
||||
AFTER=$(getAvailableSpace)
|
||||
SAVED=$((AFTER-BEFORE))
|
||||
printSavedSpace $SAVED "Docker images"
|
||||
fi
|
||||
|
||||
# Option: Remove tool cache
|
||||
# REF: https://github.com/actions/virtual-environments/issues/2875#issuecomment-1163392159
|
||||
|
||||
if [[ ${{ inputs.tool-cache }} == 'true' ]]; then
|
||||
BEFORE=$(getAvailableSpace)
|
||||
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true
|
||||
|
||||
AFTER=$(getAvailableSpace)
|
||||
SAVED=$((AFTER-BEFORE))
|
||||
printSavedSpace $SAVED "Tool cache"
|
||||
fi
|
||||
|
||||
# Option: Remove Swap storage
|
||||
|
||||
if [[ ${{ inputs.swap-storage }} == 'true' ]]; then
|
||||
BEFORE=$(getAvailableSpace)
|
||||
|
||||
sudo swapoff -a || true
|
||||
sudo rm -f /mnt/swapfile || true
|
||||
free -h
|
||||
|
||||
AFTER=$(getAvailableSpace)
|
||||
SAVED=$((AFTER-BEFORE))
|
||||
printSavedSpace $SAVED "Swap storage"
|
||||
fi
|
||||
|
||||
|
||||
|
||||
# Output saved space statistic
|
||||
|
||||
AVAILABLE_END=$(getAvailableSpace)
|
||||
AVAILABLE_ROOT_END=$(getAvailableSpace '/')
|
||||
|
||||
echo ""
|
||||
printDH "AFTER CLEAN-UP:"
|
||||
|
||||
echo ""
|
||||
echo ""
|
||||
|
||||
echo "/dev/root:"
|
||||
printSavedSpace $((AVAILABLE_ROOT_END - AVAILABLE_ROOT_INITIAL))
|
||||
echo "overall:"
|
||||
printSavedSpace $((AVAILABLE_END - AVAILABLE_INITIAL))
|
39
.github/multi-node-filebeat-check.sh
vendored
39
.github/multi-node-filebeat-check.sh
vendored
@@ -1,39 +0,0 @@
|
||||
COMMAND_TO_EXECUTE="filebeat test output"
|
||||
|
||||
MASTER_CONTAINERS=$(docker ps --format '{{.Names}}' | grep -E 'master')
|
||||
|
||||
if [ -z "$MASTER_CONTAINERS" ]; then
|
||||
echo "No containers were found with 'master' in their name."
|
||||
else
|
||||
for MASTER_CONTAINERS in $MASTER_CONTAINERS; do
|
||||
FILEBEAT_OUTPUT=$(docker exec "$MASTER_CONTAINERS" $COMMAND_TO_EXECUTE)
|
||||
FILEBEAT_STATUS=$(echo "${FILEBEAT_OUTPUT}" | grep -c OK)
|
||||
if [[ $FILEBEAT_STATUS -eq 7 ]]; then
|
||||
echo "No errors in filebeat"
|
||||
echo "${FILEBEAT_OUTPUT}"
|
||||
else
|
||||
echo "Errors in filebeat"
|
||||
echo "${FILEBEAT_OUTPUT}"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
MASTER_CONTAINERS=$(docker ps --format '{{.Names}}' | grep -E 'worker')
|
||||
|
||||
if [ -z "$MASTER_CONTAINERS" ]; then
|
||||
echo "No containers were found with 'worker' in their name."
|
||||
else
|
||||
for MASTER_CONTAINERS in $MASTER_CONTAINERS; do
|
||||
FILEBEAT_OUTPUT=$(docker exec "$MASTER_CONTAINERS" $COMMAND_TO_EXECUTE)
|
||||
FILEBEAT_STATUS=$(echo "${FILEBEAT_OUTPUT}" | grep -c OK)
|
||||
if [[ $FILEBEAT_STATUS -eq 7 ]]; then
|
||||
echo "No errors in filebeat"
|
||||
echo "${FILEBEAT_OUTPUT}"
|
||||
else
|
||||
echo "Errors in filebeat"
|
||||
echo "${FILEBEAT_OUTPUT}"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
fi
|
16
.github/multi-node-log-check.sh
vendored
16
.github/multi-node-log-check.sh
vendored
@@ -1,16 +0,0 @@
|
||||
log1=$(docker exec multi-node_wazuh.master_1 sh -c 'cat /var/ossec/logs/ossec.log' | grep -P "ERR|WARN|CRIT")
|
||||
if [[ -z "$log1" ]]; then
|
||||
echo "No errors in master ossec.log"
|
||||
else
|
||||
echo "Errors in master ossec.log:"
|
||||
echo "${log1}"
|
||||
exit 1
|
||||
fi
|
||||
log2=$(docker exec multi-node_wazuh.worker_1 sh -c 'cat /var/ossec/logs/ossec.log' | grep -P "ERR|WARN|CRIT")
|
||||
if [[ -z "${log2}" ]]; then
|
||||
echo "No errors in worker ossec.log"
|
||||
else
|
||||
echo "Errors in worker ossec.log:"
|
||||
echo "${log2}"
|
||||
exit 1
|
||||
fi
|
20
.github/single-node-filebeat-check.sh
vendored
20
.github/single-node-filebeat-check.sh
vendored
@@ -1,20 +0,0 @@
|
||||
COMMAND_TO_EXECUTE="filebeat test output"
|
||||
|
||||
MASTER_CONTAINERS=$(docker ps --format '{{.Names}}' | grep -E 'manager')
|
||||
|
||||
if [ -z "$MASTER_CONTAINERS" ]; then
|
||||
echo "No containers were found with 'manager' in their name."
|
||||
else
|
||||
for MASTER_CONTAINERS in $MASTER_CONTAINERS; do
|
||||
FILEBEAT_OUTPUT=$(docker exec "$MASTER_CONTAINERS" $COMMAND_TO_EXECUTE)
|
||||
FILEBEAT_STATUS=$(echo "${FILEBEAT_OUTPUT}" | grep -c OK)
|
||||
if [[ $FILEBEAT_STATUS -eq 7 ]]; then
|
||||
echo "No errors in filebeat"
|
||||
echo "${FILEBEAT_OUTPUT}"
|
||||
else
|
||||
echo "Errors in filebeat"
|
||||
echo "${FILEBEAT_OUTPUT}"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
fi
|
8
.github/single-node-log-check.sh
vendored
8
.github/single-node-log-check.sh
vendored
@@ -1,8 +0,0 @@
|
||||
log=$(docker exec single-node_wazuh.manager_1 sh -c 'cat /var/ossec/logs/ossec.log' | grep -P "ERR|WARN|CRIT")
|
||||
if [[ -z "$log" ]]; then
|
||||
echo "No errors in ossec.log"
|
||||
else
|
||||
echo "Errors in ossec.log:"
|
||||
echo "${log}"
|
||||
exit 1
|
||||
fi
|
142
.github/workflows/4_bumper_repository.yml
vendored
142
.github/workflows/4_bumper_repository.yml
vendored
@@ -1,142 +0,0 @@
|
||||
name: Repository bumper
|
||||
run-name: Bump ${{ github.ref_name }} (${{ inputs.id }})
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Target version (e.g. 1.2.3)'
|
||||
default: ''
|
||||
required: false
|
||||
type: string
|
||||
stage:
|
||||
description: 'Version stage (e.g. alpha0)'
|
||||
default: ''
|
||||
required: false
|
||||
type: string
|
||||
tag:
|
||||
description: 'Change branches references to tag-like references (e.g. v4.12.0-alpha7)'
|
||||
default: false
|
||||
required: false
|
||||
type: boolean
|
||||
issue-link:
|
||||
description: 'Issue link in format https://github.com/wazuh/<REPO>/issues/<ISSUE-NUMBER>'
|
||||
required: true
|
||||
type: string
|
||||
id:
|
||||
description: 'Optional identifier for the run'
|
||||
required: false
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
bump:
|
||||
name: Repository bumper
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
env:
|
||||
CI_COMMIT_AUTHOR: wazuhci
|
||||
CI_COMMIT_EMAIL: 22834044+wazuhci@users.noreply.github.com
|
||||
CI_GPG_PRIVATE_KEY: ${{ secrets.CI_WAZUHCI_GPG_PRIVATE }}
|
||||
GH_TOKEN: ${{ secrets.CI_WAZUHCI_BUMPER_TOKEN }}
|
||||
BUMP_SCRIPT_PATH: tools/repository_bumper.sh
|
||||
BUMP_LOG_PATH: tools
|
||||
|
||||
steps:
|
||||
- name: Dump event payload
|
||||
run: |
|
||||
cat $GITHUB_EVENT_PATH | jq '.inputs'
|
||||
|
||||
- name: Set up GPG key
|
||||
id: signing_setup
|
||||
run: |
|
||||
echo "${{ env.CI_GPG_PRIVATE_KEY }}" | gpg --batch --import
|
||||
KEY_ID=$(gpg --list-secret-keys --with-colons | awk -F: '/^sec/ {print $5; exit}')
|
||||
echo "gpg_key_id=$KEY_ID" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up git
|
||||
run: |
|
||||
git config --global user.name "${{ env.CI_COMMIT_AUTHOR }}"
|
||||
git config --global user.email "${{ env.CI_COMMIT_EMAIL }}"
|
||||
git config --global commit.gpgsign true
|
||||
git config --global user.signingkey "${{ steps.signing_setup.outputs.gpg_key_id }}"
|
||||
echo "use-agent" >> ~/.gnupg/gpg.conf
|
||||
echo "pinentry-mode loopback" >> ~/.gnupg/gpg.conf
|
||||
echo "allow-loopback-pinentry" >> ~/.gnupg/gpg-agent.conf
|
||||
echo RELOADAGENT | gpg-connect-agent
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
export GPG_TTY=$(tty)
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Using workflow-specific GITHUB_TOKEN because currently CI_WAZUHCI_BUMPER_TOKEN
|
||||
# doesn't have all the necessary permissions
|
||||
token: ${{ env.GH_TOKEN }}
|
||||
|
||||
- name: Determine branch name
|
||||
id: vars
|
||||
env:
|
||||
VERSION: ${{ inputs.version }}
|
||||
STAGE: ${{ inputs.stage }}
|
||||
TAG: ${{ inputs.tag }}
|
||||
run: |
|
||||
script_params=""
|
||||
version=${{ env.VERSION }}
|
||||
stage=${{ env.STAGE }}
|
||||
tag=${{ env.TAG }}
|
||||
|
||||
# Both version and stage provided
|
||||
if [[ -n "$version" && -n "$stage" && "$tag" != "true" ]]; then
|
||||
script_params="--version ${version} --stage ${stage}"
|
||||
elif [[ -n "$version" && -n "$stage" && "$tag" == "true" ]]; then
|
||||
script_params="--version ${version} --stage ${stage} --tag ${tag}"
|
||||
fi
|
||||
|
||||
issue_number=$(echo "${{ inputs.issue-link }}" | awk -F'/' '{print $NF}')
|
||||
BRANCH_NAME="enhancement/wqa${issue_number}-bump-${{ github.ref_name }}"
|
||||
echo "branch_name=$BRANCH_NAME" >> $GITHUB_OUTPUT
|
||||
echo "script_params=${script_params}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Create and switch to bump branch
|
||||
run: |
|
||||
git checkout -b ${{ steps.vars.outputs.branch_name }}
|
||||
|
||||
- name: Make version bump changes
|
||||
run: |
|
||||
echo "Running bump script"
|
||||
bash ${{ env.BUMP_SCRIPT_PATH }} ${{ steps.vars.outputs.script_params }}
|
||||
|
||||
- name: Commit and push changes
|
||||
run: |
|
||||
git add .
|
||||
git commit -m "feat: bump ${{ github.ref_name }}"
|
||||
git push origin ${{ steps.vars.outputs.branch_name }}
|
||||
|
||||
- name: Create pull request
|
||||
id: create_pr
|
||||
run: |
|
||||
gh auth setup-git
|
||||
PR_URL=$(gh pr create \
|
||||
--title "Bump ${{ github.ref_name }} branch" \
|
||||
--body "Issue: ${{ inputs.issue-link }}" \
|
||||
--base ${{ github.ref_name }} \
|
||||
--head ${{ steps.vars.outputs.branch_name }})
|
||||
|
||||
echo "Pull request created: ${PR_URL}"
|
||||
echo "pull_request_url=${PR_URL}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Merge pull request
|
||||
run: |
|
||||
# Any checks for the PR are bypassed since the branch is expected to be functional (i.e. the bump process does not introduce any bugs)
|
||||
gh pr merge "${{ steps.create_pr.outputs.pull_request_url }}" --merge --admin
|
||||
|
||||
- name: Show logs
|
||||
run: |
|
||||
echo "Bump complete."
|
||||
echo "Branch: ${{ steps.vars.outputs.branch_name }}"
|
||||
echo "PR: ${{ steps.create_pr.outputs.pull_request_url }}"
|
||||
echo "Bumper scripts logs:"
|
||||
cat ${BUMP_LOG_PATH}/repository_bumper*log
|
159
.github/workflows/Procedure_push_docker_images.yml
vendored
159
.github/workflows/Procedure_push_docker_images.yml
vendored
@@ -1,159 +0,0 @@
|
||||
run-name: Launch Push Docker Images - ${{ inputs.id }}
|
||||
name: Push Docker Images
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
image_tag:
|
||||
description: 'Docker image tag'
|
||||
default: '4.14.0'
|
||||
required: true
|
||||
docker_reference:
|
||||
description: 'wazuh-docker reference'
|
||||
required: true
|
||||
products:
|
||||
description: 'Comma-separated list of the image names to build and push'
|
||||
default: 'wazuh-manager,wazuh-dashboard,wazuh-indexer,wazuh-agent'
|
||||
required: true
|
||||
filebeat_module_version:
|
||||
description: 'Filebeat module version'
|
||||
default: '0.4'
|
||||
required: true
|
||||
revision:
|
||||
description: 'Package revision'
|
||||
default: '1'
|
||||
required: true
|
||||
push_images:
|
||||
description: 'Push images'
|
||||
type: boolean
|
||||
default: true
|
||||
required: true
|
||||
id:
|
||||
description: "ID used to identify the workflow uniquely."
|
||||
type: string
|
||||
required: false
|
||||
dev:
|
||||
description: "Add tag suffix '-dev' to the image tag ?"
|
||||
type: boolean
|
||||
default: true
|
||||
required: false
|
||||
workflow_call:
|
||||
inputs:
|
||||
image_tag:
|
||||
description: 'Docker image tag'
|
||||
default: '4.14.0'
|
||||
required: true
|
||||
type: string
|
||||
docker_reference:
|
||||
description: 'wazuh-docker reference'
|
||||
required: false
|
||||
type: string
|
||||
products:
|
||||
description: 'Comma-separated list of the image names to build and push'
|
||||
default: 'wazuh-manager,wazuh-dashboard,wazuh-indexer,wazuh-agent'
|
||||
required: true
|
||||
type: string
|
||||
filebeat_module_version:
|
||||
description: 'Filebeat module version'
|
||||
default: '0.4'
|
||||
required: true
|
||||
type: string
|
||||
revision:
|
||||
description: 'Package revision'
|
||||
default: '1'
|
||||
required: true
|
||||
type: string
|
||||
push_images:
|
||||
description: 'Push images'
|
||||
type: boolean
|
||||
default: true
|
||||
required: true
|
||||
id:
|
||||
description: "ID used to identify the workflow uniquely."
|
||||
type: string
|
||||
required: false
|
||||
dev:
|
||||
description: "Add tag suffix '-dev' to the image tag ?"
|
||||
type: boolean
|
||||
default: false
|
||||
required: false
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- name: Print inputs
|
||||
run: |
|
||||
echo "---------------------------------------------"
|
||||
echo "Running Procedure_push_docker_images workflow"
|
||||
echo "---------------------------------------------"
|
||||
echo "* BRANCH: ${{ github.ref }}"
|
||||
echo "* COMMIT: ${{ github.sha }}"
|
||||
echo "---------------------------------------------"
|
||||
echo "Inputs provided:"
|
||||
echo "---------------------------------------------"
|
||||
echo "* id: ${{ inputs.id }}"
|
||||
echo "* image_tag: ${{ inputs.image_tag }}"
|
||||
echo "* docker_reference: ${{ inputs.docker_reference }}"
|
||||
echo "* products: ${{ inputs.products }}"
|
||||
echo "* filebeat_module_version: ${{ inputs.filebeat_module_version }}"
|
||||
echo "* revision: ${{ inputs.revision }}"
|
||||
echo "* push_images: ${{ inputs.push_images }}"
|
||||
echo "* dev: ${{ inputs.dev }}"
|
||||
echo "---------------------------------------------"
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.docker_reference }}
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build Wazuh images
|
||||
run: |
|
||||
IMAGE_TAG=${{ inputs.image_tag }}
|
||||
FILEBEAT_MODULE_VERSION=${{ inputs.filebeat_module_version }}
|
||||
REVISION=${{ inputs.revision }}
|
||||
|
||||
if [[ "$IMAGE_TAG" == *"-"* ]]; then
|
||||
IFS='-' read -r -a tokens <<< "$IMAGE_TAG"
|
||||
if [ -z "${tokens[1]}" ]; then
|
||||
echo "Invalid image tag: $IMAGE_TAG"
|
||||
exit 1
|
||||
fi
|
||||
DEV_STAGE=${tokens[1]}
|
||||
WAZUH_VER=${tokens[0]}
|
||||
./build-docker-images/build-images.sh -v $WAZUH_VER -r $REVISION -d $DEV_STAGE -f $FILEBEAT_MODULE_VERSION
|
||||
else
|
||||
./build-docker-images/build-images.sh -v $IMAGE_TAG -r $REVISION -f $FILEBEAT_MODULE_VERSION
|
||||
fi
|
||||
|
||||
# Save .env file (generated by build-images.sh) contents to $GITHUB_ENV
|
||||
ENV_FILE_PATH=".env"
|
||||
|
||||
if [ -f $ENV_FILE_PATH ]; then
|
||||
while IFS= read -r line || [ -n "$line" ]; do
|
||||
echo "$line" >> $GITHUB_ENV
|
||||
done < $ENV_FILE_PATH
|
||||
else
|
||||
echo "The environment file $ENV_FILE_PATH does not exist!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Tag and Push Wazuh images
|
||||
if: ${{ inputs.push_images }}
|
||||
run: |
|
||||
IMAGE_TAG="${{ inputs.image_tag }}$( [ "${{ inputs.dev }}" == "true" ] && echo '-dev' || true )"
|
||||
IMAGE_NAMES=${{ inputs.products }}
|
||||
IFS=',' read -r -a images <<< "$IMAGE_NAMES"
|
||||
for image in "${images[@]}"; do
|
||||
echo "Tagging and pushing wazuh/$image:${WAZUH_VERSION} to wazuh/$image:$IMAGE_TAG"
|
||||
docker tag wazuh/$image:${WAZUH_VERSION} wazuh/$image:$IMAGE_TAG
|
||||
echo "Pushing wazuh/$image:$IMAGE_TAG ..."
|
||||
docker push wazuh/$image:$IMAGE_TAG
|
||||
done
|
368
.github/workflows/push.yml
vendored
368
.github/workflows/push.yml
vendored
@@ -1,368 +0,0 @@
|
||||
name: Wazuh Docker pipeline
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
build-docker-images:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Build Wazuh images
|
||||
run: build-docker-images/build-images.sh
|
||||
|
||||
- name: Create enviroment variables
|
||||
run: cat .env > $GITHUB_ENV
|
||||
|
||||
- name: Create backup Docker images
|
||||
run: |
|
||||
mkdir -p /home/runner/work/wazuh-docker/wazuh-docker/docker-images/
|
||||
docker save wazuh/wazuh-manager:${{env.WAZUH_IMAGE_VERSION}} -o /home/runner/work/wazuh-docker/wazuh-docker/docker-images/wazuh-manager.tar
|
||||
docker save wazuh/wazuh-indexer:${{env.WAZUH_IMAGE_VERSION}} -o /home/runner/work/wazuh-docker/wazuh-docker/docker-images/wazuh-indexer.tar
|
||||
docker save wazuh/wazuh-dashboard:${{env.WAZUH_IMAGE_VERSION}} -o /home/runner/work/wazuh-docker/wazuh-docker/docker-images/wazuh-dashboard.tar
|
||||
docker save wazuh/wazuh-agent:${{env.WAZUH_IMAGE_VERSION}} -o /home/runner/work/wazuh-docker/wazuh-docker/docker-images/wazuh-agent.tar
|
||||
|
||||
- name: Temporarily save Wazuh manager Docker image
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: docker-artifact-manager
|
||||
path: /home/runner/work/wazuh-docker/wazuh-docker/docker-images/wazuh-manager.tar
|
||||
retention-days: 1
|
||||
|
||||
- name: Temporarily save Wazuh indexer Docker image
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: docker-artifact-indexer
|
||||
path: /home/runner/work/wazuh-docker/wazuh-docker/docker-images/wazuh-indexer.tar
|
||||
retention-days: 1
|
||||
|
||||
- name: Temporarily save Wazuh dashboard Docker image
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: docker-artifact-dashboard
|
||||
path: /home/runner/work/wazuh-docker/wazuh-docker/docker-images/wazuh-dashboard.tar
|
||||
retention-days: 1
|
||||
|
||||
- name: Temporarily save Wazuh agent Docker image
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: docker-artifact-agent
|
||||
path: /home/runner/work/wazuh-docker/wazuh-docker/docker-images/wazuh-agent.tar
|
||||
retention-days: 1
|
||||
|
||||
- name: Install Goss
|
||||
uses: e1himself/goss-installation-action@v1.0.3
|
||||
with:
|
||||
version: v0.3.16
|
||||
|
||||
- name: Execute Goss tests (wazuh-manager)
|
||||
run: dgoss run wazuh/wazuh-manager:${{env.WAZUH_IMAGE_VERSION}}
|
||||
env:
|
||||
GOSS_SLEEP: 30
|
||||
GOSS_FILE: .github/.goss.yaml
|
||||
|
||||
check-single-node:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: build-docker-images
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create enviroment variables
|
||||
run: cat .env > $GITHUB_ENV
|
||||
|
||||
- name: Retrieve saved Wazuh indexer Docker image
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: docker-artifact-indexer
|
||||
|
||||
- name: Retrieve saved Wazuh manager Docker image
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: docker-artifact-manager
|
||||
|
||||
- name: Retrieve saved Wazuh dashboard Docker image
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: docker-artifact-dashboard
|
||||
|
||||
- name: Retrieve saved Wazuh agent Docker image
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: docker-artifact-agent
|
||||
|
||||
- name: Docker load
|
||||
run: |
|
||||
docker load --input ./wazuh-indexer.tar
|
||||
docker load --input ./wazuh-dashboard.tar
|
||||
docker load --input ./wazuh-manager.tar
|
||||
docker load --input ./wazuh-agent.tar
|
||||
|
||||
- name: Create single node certficates
|
||||
run: docker compose -f single-node/generate-indexer-certs.yml run --rm generator
|
||||
|
||||
- name: Start single node stack
|
||||
run: docker compose -f single-node/docker-compose.yml up -d
|
||||
|
||||
- name: Check Wazuh indexer start
|
||||
run: |
|
||||
sleep 60
|
||||
status_green="`curl -XGET "https://0.0.0.0:9200/_cluster/health" -u admin:SecretPassword -k -s | grep green | wc -l`"
|
||||
if [[ $status_green -eq 1 ]]; then
|
||||
curl -XGET "https://0.0.0.0:9200/_cluster/health" -u admin:SecretPassword -k -s
|
||||
else
|
||||
curl -XGET "https://0.0.0.0:9200/_cluster/health" -u admin:SecretPassword -k -s
|
||||
exit 1
|
||||
fi
|
||||
status_index="`curl -XGET "https://0.0.0.0:9200/_cat/indices" -u admin:SecretPassword -k -s | wc -l`"
|
||||
status_index_green="`curl -XGET "https://0.0.0.0:9200/_cat/indices" -u admin:SecretPassword -k -s | grep "green" | wc -l`"
|
||||
if [[ $status_index_green -eq $status_index ]]; then
|
||||
curl -XGET "https://0.0.0.0:9200/_cat/indices" -u admin:SecretPassword -k -s
|
||||
else
|
||||
curl -XGET "https://0.0.0.0:9200/_cat/indices" -u admin:SecretPassword -k -s
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
- name: Check Wazuh indexer nodes
|
||||
run: |
|
||||
nodes="`curl -XGET "https://0.0.0.0:9200/_cat/nodes" -u admin:SecretPassword -k -s | grep -E "indexer" | wc -l`"
|
||||
if [[ $nodes -eq 1 ]]; then
|
||||
echo "Wazuh indexer nodes: ${nodes}"
|
||||
else
|
||||
echo "Wazuh indexer nodes: ${nodes}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check documents into wazuh-alerts index
|
||||
run: |
|
||||
sleep 120
|
||||
docs="`curl -XGET "https://0.0.0.0:9200/wazuh-alerts*/_count" -u admin:SecretPassword -k -s | jq -r ".count"`"
|
||||
if [[ $docs -gt 0 ]]; then
|
||||
echo "wazuh-alerts index documents: ${docs}"
|
||||
else
|
||||
echo "wazuh-alerts index documents: ${docs}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check Wazuh templates
|
||||
run: |
|
||||
qty_templates="`curl -XGET "https://0.0.0.0:9200/_cat/templates" -u admin:SecretPassword -k -s | grep -P "wazuh|wazuh-agent|wazuh-statistics" | wc -l`"
|
||||
templates="`curl -XGET "https://0.0.0.0:9200/_cat/templates" -u admin:SecretPassword -k -s | grep -P "wazuh|wazuh-agent|wazuh-statistics"`"
|
||||
if [[ $qty_templates -gt 3 ]]; then
|
||||
echo "wazuh templates:"
|
||||
echo "${templates}"
|
||||
else
|
||||
echo "wazuh templates:"
|
||||
echo "${templates}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check Wazuh manager start
|
||||
run: |
|
||||
services="`curl -k -s -X GET "https://0.0.0.0:55000/manager/status?pretty=true" -H "Authorization: Bearer ${{env.TOKEN}}" | jq -r .data.affected_items | grep running | wc -l`"
|
||||
if [[ $services -gt 9 ]]; then
|
||||
echo "Wazuh Manager Services: ${services}"
|
||||
echo "OK"
|
||||
else
|
||||
echo "Wazuh indexer nodes: ${nodes}"
|
||||
curl -k -X GET "https://0.0.0.0:55000/manager/status?pretty=true" -H "Authorization: Bearer ${{env.TOKEN}}" | jq -r .data.affected_items
|
||||
exit 1
|
||||
fi
|
||||
env:
|
||||
TOKEN: $(curl -s -u wazuh-wui:MyS3cr37P450r.*- -k -X GET "https://0.0.0.0:55000/security/user/authenticate?raw=true")
|
||||
|
||||
- name: Check filebeat output
|
||||
run: ./.github/single-node-filebeat-check.sh
|
||||
|
||||
- name: Check Wazuh dashboard service URL
|
||||
run: |
|
||||
status=$(curl -XGET --silent https://0.0.0.0:443/app/status -k -u admin:SecretPassword -I -s | grep -E "^HTTP" | awk '{print $2}')
|
||||
if [[ $status -eq 200 ]]; then
|
||||
echo "Wazuh dashboard status: ${status}"
|
||||
else
|
||||
echo "Wazuh dashboard status: ${status}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Modify Docker endpoint into Wazuh agent docker-compose.yml file
|
||||
run: sed -i "s/<WAZUH_MANAGER_IP>/$(ip addr show docker0 | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1)/g" wazuh-agent/docker-compose.yml
|
||||
|
||||
- name: Start Wazuh agent
|
||||
run: docker compose -f wazuh-agent/docker-compose.yml up -d
|
||||
|
||||
- name: Check Wazuh agent enrollment
|
||||
run: |
|
||||
sleep 20
|
||||
curl -k -s -X GET "https://localhost:55000/agents?pretty=true" -H "Authorization: Bearer ${{env.TOKEN}}"
|
||||
env:
|
||||
TOKEN: $(curl -s -u wazuh-wui:MyS3cr37P450r.*- -k -X GET "https://0.0.0.0:55000/security/user/authenticate?raw=true")
|
||||
|
||||
- name: Check errors in ossec.log for Wazuh manager
|
||||
run: ./.github/single-node-log-check.sh
|
||||
|
||||
check-multi-node:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: build-docker-images
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create enviroment variables
|
||||
run: cat .env > $GITHUB_ENV
|
||||
|
||||
- name: free disk space
|
||||
uses: ./.github/free-disk-space
|
||||
|
||||
- name: Retrieve saved Wazuh dashboard Docker image
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: docker-artifact-dashboard
|
||||
|
||||
- name: Retrieve saved Wazuh manager Docker image
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: docker-artifact-manager
|
||||
|
||||
- name: Retrieve saved Wazuh indexer Docker image
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: docker-artifact-indexer
|
||||
|
||||
- name: Retrieve saved Wazuh agent Docker image
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: docker-artifact-agent
|
||||
|
||||
- name: Docker load
|
||||
run: |
|
||||
docker load --input ./wazuh-manager.tar
|
||||
docker load --input ./wazuh-indexer.tar
|
||||
docker load --input ./wazuh-dashboard.tar
|
||||
docker load --input ./wazuh-agent.tar
|
||||
rm -rf wazuh-manager.tar wazuh-indexer.tar wazuh-dashboard.tar wazuh-agent.tar
|
||||
|
||||
- name: Create multi node certficates
|
||||
run: docker compose -f multi-node/generate-indexer-certs.yml run --rm generator
|
||||
|
||||
- name: Start multi node stack
|
||||
run: docker compose -f multi-node/docker-compose.yml up -d
|
||||
|
||||
- name: Check Wazuh indexer start
|
||||
run: |
|
||||
until [[ `curl -XGET "https://0.0.0.0:9200/_cluster/health" -u admin:SecretPassword -k -s | grep green | wc -l` -eq 1 ]]
|
||||
do
|
||||
echo 'Waiting for Wazuh indexer start'
|
||||
free -m
|
||||
df -h
|
||||
sleep 120
|
||||
done
|
||||
status_green="`curl -XGET "https://0.0.0.0:9200/_cluster/health" -u admin:SecretPassword -k -s | grep green | wc -l`"
|
||||
if [[ $status_green -eq 1 ]]; then
|
||||
curl -XGET "https://0.0.0.0:9200/_cluster/health" -u admin:SecretPassword -k -s
|
||||
else
|
||||
curl -XGET "https://0.0.0.0:9200/_cluster/health" -u admin:SecretPassword -k -s
|
||||
exit 1
|
||||
fi
|
||||
status_index="`curl -XGET "https://0.0.0.0:9200/_cat/indices" -u admin:SecretPassword -k -s | wc -l`"
|
||||
status_index_green="`curl -XGET "https://0.0.0.0:9200/_cat/indices" -u admin:SecretPassword -k -s | grep -E "green" | wc -l`"
|
||||
if [[ $status_index_green -eq $status_index ]]; then
|
||||
curl -XGET "https://0.0.0.0:9200/_cat/indices" -u admin:SecretPassword -k -s
|
||||
else
|
||||
curl -XGET "https://0.0.0.0:9200/_cat/indices" -u admin:SecretPassword -k -s
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check Wazuh indexer nodes
|
||||
run: |
|
||||
nodes="`curl -XGET "https://0.0.0.0:9200/_cat/nodes" -u admin:SecretPassword -k -s | grep -E "indexer" | wc -l`"
|
||||
if [[ $nodes -eq 3 ]]; then
|
||||
echo "Wazuh indexer nodes: ${nodes}"
|
||||
else
|
||||
echo "Wazuh indexer nodes: ${nodes}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check documents into wazuh-alerts index
|
||||
run: |
|
||||
until [[ $(``curl -XGET "https://0.0.0.0:9200/wazuh-alerts*/_count" -u admin:SecretPassword -k -s | jq -r ".count"``) -gt 0 ]]
|
||||
do
|
||||
echo 'Waiting for Wazuh indexer events'
|
||||
free -m
|
||||
df -h
|
||||
sleep 10
|
||||
done
|
||||
docs="`curl -XGET "https://0.0.0.0:9200/wazuh-alerts*/_count" -u admin:SecretPassword -k -s | jq -r ".count"`"
|
||||
if [[ $docs -gt 0 ]]; then
|
||||
echo "wazuh-alerts index documents: ${docs}"
|
||||
else
|
||||
echo "wazuh-alerts index documents: ${docs}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check Wazuh templates
|
||||
run: |
|
||||
qty_templates="`curl -XGET "https://0.0.0.0:9200/_cat/templates" -u admin:SecretPassword -k -s | grep "wazuh" | wc -l`"
|
||||
templates="`curl -XGET "https://0.0.0.0:9200/_cat/templates" -u admin:SecretPassword -k -s | grep "wazuh"`"
|
||||
if [[ $qty_templates -gt 3 ]]; then
|
||||
echo "wazuh templates:"
|
||||
echo "${templates}"
|
||||
else
|
||||
echo "wazuh templates:"
|
||||
echo "${templates}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check Wazuh manager start
|
||||
run: |
|
||||
services="`curl -k -s -X GET "https://0.0.0.0:55000/manager/status?pretty=true" -H "Authorization: Bearer ${{env.TOKEN}}" | jq -r .data.affected_items | grep running | wc -l`"
|
||||
if [[ $services -gt 10 ]]; then
|
||||
echo "Wazuh Manager Services: ${services}"
|
||||
echo "OK"
|
||||
else
|
||||
echo "Wazuh indexer nodes: ${nodes}"
|
||||
curl -k -s -X GET "https://0.0.0.0:55000/manager/status?pretty=true" -H "Authorization: Bearer ${{env.TOKEN}}" | jq -r .data.affected_items
|
||||
exit 1
|
||||
fi
|
||||
nodes=$(curl -k -s -X GET "https://0.0.0.0:55000/cluster/nodes" -H "Authorization: Bearer ${{env.TOKEN}}" | jq -r ".data.affected_items[].name" | wc -l)
|
||||
if [[ $nodes -eq 2 ]]; then
|
||||
echo "Wazuh manager nodes: ${nodes}"
|
||||
else
|
||||
echo "Wazuh manager nodes: ${nodes}"
|
||||
exit 1
|
||||
fi
|
||||
env:
|
||||
TOKEN: $(curl -s -u wazuh-wui:MyS3cr37P450r.*- -k -X GET "https://0.0.0.0:55000/security/user/authenticate?raw=true")
|
||||
|
||||
- name: Check filebeat output
|
||||
run: ./.github/multi-node-filebeat-check.sh
|
||||
|
||||
- name: Check Wazuh dashboard service URL
|
||||
run: |
|
||||
status=$(curl -XGET --silent https://0.0.0.0:443/app/status -k -u admin:SecretPassword -I | grep -E "^HTTP" | awk '{print $2}')
|
||||
if [[ $status -eq 200 ]]; then
|
||||
echo "Wazuh dashboard status: ${status}"
|
||||
else
|
||||
echo "Wazuh dashboard status: ${status}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Modify Docker endpoint into Wazuh agent docker-compose.yml file
|
||||
run: sed -i "s/<WAZUH_MANAGER_IP>/$(ip addr show docker0 | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1)/g" wazuh-agent/docker-compose.yml
|
||||
|
||||
- name: Start Wazuh agent
|
||||
run: docker compose -f wazuh-agent/docker-compose.yml up -d
|
||||
|
||||
- name: Check Wazuh agent enrollment
|
||||
run: |
|
||||
sleep 20
|
||||
curl -k -s -X GET "https://localhost:55000/agents?pretty=true" -H "Authorization: Bearer ${{env.TOKEN}}"
|
||||
env:
|
||||
TOKEN: $(curl -s -u wazuh-wui:MyS3cr37P450r.*- -k -X GET "https://0.0.0.0:55000/security/user/authenticate?raw=true")
|
||||
|
||||
- name: Check errors in ossec.log for Wazuh manager
|
||||
run: ./.github/multi-node-log-check.sh
|
76
.github/workflows/trivy-dashboard.yml
vendored
76
.github/workflows/trivy-dashboard.yml
vendored
@@ -1,76 +0,0 @@
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
name: Trivy scan Wazuh dashboard
|
||||
|
||||
on:
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
schedule:
|
||||
- cron: '34 2 * * 1'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
|
||||
|
||||
name: Build images and upload Trivy results
|
||||
runs-on: "ubuntu-22.04"
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Installing dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y jq
|
||||
|
||||
- name: Checkout latest tag
|
||||
run: |
|
||||
latest=$(curl -s "https://api.github.com/repos/wazuh/wazuh-docker/releases/latest" | jq -r '.tag_name')
|
||||
git fetch origin
|
||||
git checkout $latest
|
||||
|
||||
- name: Build Wazuh images
|
||||
run: build-docker-images/build-images.sh
|
||||
|
||||
- name: Create enviroment variables
|
||||
run: |
|
||||
cat .env > $GITHUB_ENV
|
||||
echo "GITHUB_REF_NAME="${GITHUB_REF_NAME%/*} >> $GITHUB_ENV
|
||||
|
||||
- name: Run Trivy vulnerability scanner for Wazuh dashboard
|
||||
uses: aquasecurity/trivy-action@2a2157eb22c08c9a1fac99263430307b8d1bc7a2
|
||||
with:
|
||||
image-ref: 'wazuh/wazuh-dashboard:${{env.WAZUH_IMAGE_VERSION}}'
|
||||
format: 'template'
|
||||
template: '@/contrib/sarif.tpl'
|
||||
output: 'trivy-results-dashboard.sarif'
|
||||
severity: 'LOW,MEDIUM,CRITICAL,HIGH'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v2
|
||||
with:
|
||||
sarif_file: 'trivy-results-dashboard.sarif'
|
||||
|
||||
- name: Slack notification
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: cicd-monitoring
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
#SLACK_ICON: https://github.com/rtCamp.png?size=48
|
||||
SLACK_MESSAGE: "Check the results: https://github.com/wazuh/wazuh-docker/security/code-scanning?query=is%3Aopen+branch%3A${{ env.GITHUB_REF_NAME }}"
|
||||
SLACK_TITLE: Wazuh docker Trivy vulnerability scan finished.
|
||||
SLACK_USERNAME: github_actions
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
76
.github/workflows/trivy-indexer.yml
vendored
76
.github/workflows/trivy-indexer.yml
vendored
@@ -1,76 +0,0 @@
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
name: Trivy scan Wazuh indexer
|
||||
|
||||
on:
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
schedule:
|
||||
- cron: '34 2 * * 1'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
|
||||
|
||||
name: Build images and upload Trivy results
|
||||
runs-on: "ubuntu-22.04"
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Installing dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y jq
|
||||
|
||||
- name: Checkout latest tag
|
||||
run: |
|
||||
latest=$(curl -s "https://api.github.com/repos/wazuh/wazuh-docker/releases/latest" | jq -r '.tag_name')
|
||||
git fetch origin
|
||||
git checkout $latest
|
||||
|
||||
- name: Build Wazuh images
|
||||
run: build-docker-images/build-images.sh
|
||||
|
||||
- name: Create enviroment variables
|
||||
run: |
|
||||
cat .env > $GITHUB_ENV
|
||||
echo "GITHUB_REF_NAME="${GITHUB_REF_NAME%/*} >> $GITHUB_ENV
|
||||
|
||||
- name: Run Trivy vulnerability scanner for Wazuh indexer
|
||||
uses: aquasecurity/trivy-action@2a2157eb22c08c9a1fac99263430307b8d1bc7a2
|
||||
with:
|
||||
image-ref: 'wazuh/wazuh-indexer:${{env.WAZUH_IMAGE_VERSION}}'
|
||||
format: 'template'
|
||||
template: '@/contrib/sarif.tpl'
|
||||
output: 'trivy-results-indexer.sarif'
|
||||
severity: 'LOW,MEDIUM,CRITICAL,HIGH'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v2
|
||||
with:
|
||||
sarif_file: 'trivy-results-indexer.sarif'
|
||||
|
||||
- name: Slack notification
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: cicd-monitoring
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
#SLACK_ICON: https://github.com/rtCamp.png?size=48
|
||||
SLACK_MESSAGE: "Check the results: https://github.com/wazuh/wazuh-docker/security/code-scanning?query=is%3Aopen+branch%3A${{ env.GITHUB_REF_NAME }}"
|
||||
SLACK_TITLE: Wazuh docker Trivy vulnerability scan finished.
|
||||
SLACK_USERNAME: github_actions
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
76
.github/workflows/trivy-manager.yml
vendored
76
.github/workflows/trivy-manager.yml
vendored
@@ -1,76 +0,0 @@
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
name: Trivy scan Wazuh manager
|
||||
|
||||
on:
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
schedule:
|
||||
- cron: '34 2 * * 1'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
|
||||
|
||||
name: Build images and upload Trivy results
|
||||
runs-on: "ubuntu-22.04"
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Installing dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y jq
|
||||
|
||||
- name: Checkout latest tag
|
||||
run: |
|
||||
latest=$(curl -s "https://api.github.com/repos/wazuh/wazuh-docker/releases/latest" | jq -r '.tag_name')
|
||||
git fetch origin
|
||||
git checkout $latest
|
||||
|
||||
- name: Build Wazuh images
|
||||
run: build-docker-images/build-images.sh
|
||||
|
||||
- name: Create enviroment variables
|
||||
run: |
|
||||
cat .env > $GITHUB_ENV
|
||||
echo "GITHUB_REF_NAME="${GITHUB_REF_NAME%/*} >> $GITHUB_ENV
|
||||
|
||||
- name: Run Trivy vulnerability scanner for Wazuh manager
|
||||
uses: aquasecurity/trivy-action@2a2157eb22c08c9a1fac99263430307b8d1bc7a2
|
||||
with:
|
||||
image-ref: 'wazuh/wazuh-manager:${{env.WAZUH_IMAGE_VERSION}}'
|
||||
format: 'template'
|
||||
template: '@/contrib/sarif.tpl'
|
||||
output: 'trivy-results-manager.sarif'
|
||||
severity: 'LOW,MEDIUM,CRITICAL,HIGH'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v2
|
||||
with:
|
||||
sarif_file: 'trivy-results-manager.sarif'
|
||||
|
||||
- name: Slack notification
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: cicd-monitoring
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
#SLACK_ICON: https://github.com/rtCamp.png?size=48
|
||||
SLACK_MESSAGE: "Check the results: https://github.com/wazuh/wazuh-docker/security/code-scanning?query=is%3Aopen+branch%3A${{ env.GITHUB_REF_NAME }}"
|
||||
SLACK_TITLE: Wazuh docker Trivy vulnerability scan finished.
|
||||
SLACK_USERNAME: github_actions
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,5 +0,0 @@
|
||||
single-node/config/wazuh_indexer_ssl_certs/*.pem
|
||||
single-node/config/wazuh_indexer_ssl_certs/*.key
|
||||
multi-node/config/wazuh_indexer_ssl_certs/*.pem
|
||||
multi-node/config/wazuh_indexer_ssl_certs/*.key
|
||||
*.log
|
812
CHANGELOG.md
812
CHANGELOG.md
@@ -1,812 +0,0 @@
|
||||
# Change Log
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
## [4.14.0]
|
||||
|
||||
### Added
|
||||
|
||||
- None
|
||||
|
||||
### Changed
|
||||
|
||||
- Change filebeat install method ([#2020](https://github.com/wazuh/wazuh-docker/pull/2020))
|
||||
- Remove dashboard chat setting ([#2021](https://github.com/wazuh/wazuh-docker/pull/2021))
|
||||
- Rollback data source setting ([#1999](https://github.com/wazuh/wazuh-docker/pull/1999))
|
||||
- Dashboard settings added ([#1998](https://github.com/wazuh/wazuh-docker/pull/1998))
|
||||
- Add filebeat config file in the PERMANENT_DATA_EXCP list ([#1898](https://github.com/wazuh/wazuh-docker/pull/1898))
|
||||
- Change validation of existing certs tool in S3 buckets ([#1880](https://github.com/wazuh/wazuh-docker/pull/1880))
|
||||
|
||||
### Fixed
|
||||
|
||||
- Change Wazuh indexer directory owner ([#2029](https://github.com/wazuh/wazuh-docker/pull/2029))
|
||||
- Double the amount of space consumed in Wazuh Indexer ([#1953](https://github.com/wazuh/wazuh-docker/pull/1953))
|
||||
- Fix config directory for opensearch_security plugin work ([#1951](https://github.com/wazuh/wazuh-docker/pull/1951))
|
||||
- Update Dockerfile to copy opensearch-security files ([#1928](https://github.com/wazuh/wazuh-docker/pull/1928))
|
||||
|
||||
### Deleted
|
||||
|
||||
- None
|
||||
|
||||
## [4.13.1]
|
||||
|
||||
### Added
|
||||
|
||||
- None
|
||||
|
||||
### Changed
|
||||
|
||||
- None
|
||||
|
||||
### Fixed
|
||||
|
||||
- None
|
||||
|
||||
### Deleted
|
||||
|
||||
- None
|
||||
|
||||
## [4.13.0]
|
||||
|
||||
### Added
|
||||
|
||||
- Add opensearch_dashboard.yml parameters. ([#1985](https://github.com/wazuh/wazuh-docker/pull/1985))
|
||||
- Set right ownership for malicious-ioc files on container start ([#1926](https://github.com/wazuh/wazuh-docker/pull/1926))
|
||||
- Delete services statement in wazuh agent deployment. ([#1925](https://github.com/wazuh/wazuh-docker/pull/1925))
|
||||
- Add permanent_data exceptions. ([#1890](https://github.com/wazuh/wazuh-docker/pull/1890))
|
||||
- Integrate bumper script via GitHub action. ([#1863](https://github.com/wazuh/wazuh-docker/pull/1863))
|
||||
- Add missing malicious-ioc ruleset lists ([#1870](https://github.com/wazuh/wazuh-docker/pull/1870))
|
||||
- Added repository_bumper script. ([#1781](https://github.com/wazuh/wazuh-docker/pull/1781))
|
||||
- Fix Warning message when migrating Docker compose v2 ([#1828](https://github.com/wazuh/wazuh-docker/pull/1828))
|
||||
- Add technical documentation ([#1822](https://github.com/wazuh/wazuh-docker/pull/1822))
|
||||
- Add wazuh agent test and push ([#1817](https://github.com/wazuh/wazuh-docker/pull/1817))
|
||||
- Add Wazuh agent image build and deploy ([#1816](https://github.com/wazuh/wazuh-docker/pull/1816))
|
||||
|
||||
### Changed
|
||||
|
||||
- Syscollector configuration change ([#1994](https://github.com/wazuh/wazuh-docker/pull/1994))
|
||||
- Modify wazuh-keystore use ([#1750](https://github.com/wazuh/wazuh-docker/pull/1750)) \- (wazuh-keystore)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Add wazuh-template.json into permanent data exception ([#1968](https://github.com/wazuh/wazuh-docker/pull/1968))
|
||||
|
||||
### Deleted
|
||||
|
||||
- Remove default docker reference version from workflow ([#1761](https://github.com/wazuh/wazuh-docker/pull/1761))
|
||||
- Remove 'stable' branch ocurrencies ([#1757](https://github.com/wazuh/wazuh-docker/pull/1757))
|
||||
|
||||
## [4.12.0]
|
||||
|
||||
### Added
|
||||
|
||||
- None
|
||||
|
||||
### Changed
|
||||
|
||||
- Change VERSION file format ([#1728](https://github.com/wazuh/wazuh-docker/pull/1728)) \- (VERSION file)
|
||||
- Change Ubuntu version used in workflows ([#1662](https://github.com/wazuh/wazuh-docker/pull/1662)) \- (Docker workflows)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix permanent data scripts ([#1603](https://github.com/wazuh/wazuh-docker/pull/1603))
|
||||
|
||||
### Deleted
|
||||
|
||||
- None
|
||||
|
||||
## [4.11.2]
|
||||
|
||||
### Added
|
||||
|
||||
- None
|
||||
|
||||
### Changed
|
||||
|
||||
- None
|
||||
|
||||
### Fixed
|
||||
|
||||
- None
|
||||
|
||||
### Deleted
|
||||
|
||||
- None
|
||||
|
||||
## [4.11.1]
|
||||
|
||||
### Added
|
||||
|
||||
- None
|
||||
|
||||
### Changed
|
||||
|
||||
- None
|
||||
|
||||
### Fixed
|
||||
|
||||
- None
|
||||
|
||||
### Deleted
|
||||
|
||||
- None
|
||||
|
||||
## [4.11.0]
|
||||
|
||||
### Added
|
||||
|
||||
- None
|
||||
|
||||
### Changed
|
||||
|
||||
- None
|
||||
|
||||
### Fixed
|
||||
|
||||
- Change the cleaning disk step ([#1663](https://github.com/wazuh/wazuh-docker/pull/1663))
|
||||
|
||||
### Deleted
|
||||
|
||||
- None
|
||||
|
||||
## [4.10.1]
|
||||
|
||||
### Added
|
||||
|
||||
- None
|
||||
|
||||
### Changed
|
||||
|
||||
- None
|
||||
|
||||
### Fixed
|
||||
|
||||
- None
|
||||
|
||||
### Deleted
|
||||
|
||||
- None
|
||||
|
||||
## [4.10.0]
|
||||
|
||||
### Added
|
||||
|
||||
- Improve the push docker images workflow ([#1551](https://github.com/wazuh/wazuh-docker/pull/1551))
|
||||
- Update the Procedure push docker images workflow file ([#1524](https://github.com/wazuh/wazuh-docker/pull/1524))
|
||||
- Add the push_docker_images procedure workflow file ([#1518](https://github.com/wazuh/wazuh-docker/pull/1518))
|
||||
|
||||
### Changed
|
||||
|
||||
- None
|
||||
|
||||
### Fixed
|
||||
|
||||
- Add unset capabilities. ([#1619](https://github.com/wazuh/wazuh-docker/pull/1619))
|
||||
- Removed references to module enabling because they are now enabled by default. ([#1416](https://github.com/wazuh/wazuh-docker/pull/1416))
|
||||
|
||||
### Deleted
|
||||
|
||||
- None
|
||||
|
||||
## [4.9.2]
|
||||
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.9.2](https://github.com/wazuh/wazuh/blob/v4.9.2/CHANGELOG.md#v492)
|
||||
|
||||
## [4.9.1]
|
||||
|
||||
### Added
|
||||
|
||||
- None
|
||||
|
||||
### Changed
|
||||
|
||||
- None
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix typos into Wazuh manager entrypoint ([#1569](https://github.com/wazuh/wazuh-docker/pull/1569))
|
||||
|
||||
### Deleted
|
||||
|
||||
- None
|
||||
|
||||
## Wazuh Docker v4.9.0
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.9.0](https://github.com/wazuh/wazuh/blob/v4.9.0/CHANGELOG.md#v490)
|
||||
|
||||
## Wazuh Docker v4.8.2
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.8.2](https://github.com/wazuh/wazuh/blob/v4.8.2/CHANGELOG.md#v482)
|
||||
|
||||
## Wazuh Docker v4.8.1
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.8.1](https://github.com/wazuh/wazuh/blob/v4.8.1/CHANGELOG.md#v481)
|
||||
|
||||
## Wazuh Docker v4.8.0
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.8.0](https://github.com/wazuh/wazuh/blob/v4.8.0/CHANGELOG.md#v480)
|
||||
|
||||
## Wazuh Docker v4.7.5
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.7.5](https://github.com/wazuh/wazuh/blob/v4.7.5/CHANGELOG.md#v475)
|
||||
|
||||
## Wazuh Docker v4.7.4
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.7.4](https://github.com/wazuh/wazuh/blob/v4.7.4/CHANGELOG.md#v474)
|
||||
|
||||
## Wazuh Docker v4.7.3
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.7.3](https://github.com/wazuh/wazuh/blob/v4.7.3/CHANGELOG.md#v473)
|
||||
|
||||
## Wazuh Docker v4.7.2
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.7.2](https://github.com/wazuh/wazuh/blob/v4.7.2/CHANGELOG.md#v472)
|
||||
|
||||
## Wazuh Docker v4.7.1
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.7.1](https://github.com/wazuh/wazuh/blob/v4.7.1/CHANGELOG.md#v471)
|
||||
|
||||
## Wazuh Docker v4.7.0
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.7.0](https://github.com/wazuh/wazuh/blob/v4.7.0/CHANGELOG.md#v470)
|
||||
|
||||
## Wazuh Docker v4.6.0
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.6.0](https://github.com/wazuh/wazuh/blob/v4.6.0/CHANGELOG.md#v460)
|
||||
|
||||
## Wazuh Docker v4.5.4
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.5.4](https://github.com/wazuh/wazuh/blob/v4.5.4/CHANGELOG.md#v454)
|
||||
|
||||
## Wazuh Docker v4.5.3
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.5.3](https://github.com/wazuh/wazuh/blob/v4.5.3/CHANGELOG.md#v453)
|
||||
|
||||
## Wazuh Docker v4.5.2
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.5.2](https://github.com/wazuh/wazuh/blob/v4.5.2/CHANGELOG.md#v452)
|
||||
|
||||
## Wazuh Docker v4.5.1
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.5.1](https://github.com/wazuh/wazuh/blob/v4.5.1/CHANGELOG.md#v451)
|
||||
|
||||
## Wazuh Docker v4.5.0
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.5.0](https://github.com/wazuh/wazuh/blob/v4.5.0/CHANGELOG.md#v450)
|
||||
|
||||
## Wazuh Docker v4.4.5
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.4.5](https://github.com/wazuh/wazuh/blob/v4.4.5/CHANGELOG.md#v445)
|
||||
|
||||
## Wazuh Docker v4.4.4
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.4.4](https://github.com/wazuh/wazuh/blob/v4.4.4/CHANGELOG.md#v444)
|
||||
|
||||
## Wazuh Docker v4.4.3
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.4.3](https://github.com/wazuh/wazuh/blob/v4.4.3/CHANGELOG.md#v443)
|
||||
|
||||
## Wazuh Docker v4.4.2
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.4.2](https://github.com/wazuh/wazuh/blob/v4.4.2/CHANGELOG.md#v442)
|
||||
|
||||
## Wazuh Docker v4.4.1
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.4.1](https://github.com/wazuh/wazuh/blob/v4.4.1/CHANGELOG.md#v441)
|
||||
|
||||
## Wazuh Docker v4.4.0
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.4.0](https://github.com/wazuh/wazuh/blob/v4.4.0/CHANGELOG.md#v440)
|
||||
|
||||
## Wazuh Docker v4.3.11
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.3.11](https://github.com/wazuh/wazuh/blob/v4.3.11/CHANGELOG.md#v4311)
|
||||
|
||||
## Wazuh Docker v4.3.10
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.3.10](https://github.com/wazuh/wazuh/blob/v4.3.10/CHANGELOG.md#v4310)
|
||||
|
||||
|
||||
## Wazuh Docker v4.3.9
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.3.9](https://github.com/wazuh/wazuh/blob/v4.3.9/CHANGELOG.md#v439)
|
||||
|
||||
|
||||
## Wazuh Docker v4.3.8
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.3.8](https://github.com/wazuh/wazuh/blob/v4.3.8/CHANGELOG.md#v438)
|
||||
|
||||
## Wazuh Docker v4.3.7
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.3.7](https://github.com/wazuh/wazuh/blob/v4.3.7/CHANGELOG.md#v437)
|
||||
|
||||
## Wazuh Docker v4.3.6
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.3.6](https://github.com/wazuh/wazuh/blob/v4.3.6/CHANGELOG.md#v436)
|
||||
|
||||
## Wazuh Docker v4.3.5
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.3.5](https://github.com/wazuh/wazuh/blob/v4.3.5/CHANGELOG.md#v435)
|
||||
|
||||
## Wazuh Docker v4.3.4
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.3.4](https://github.com/wazuh/wazuh/blob/v4.3.4/CHANGELOG.md#v434)
|
||||
|
||||
## Wazuh Docker v4.3.3
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.3.3](https://github.com/wazuh/wazuh/blob/v4.3.3/CHANGELOG.md#v433)
|
||||
|
||||
## Wazuh Docker v4.3.2
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.3.2](https://github.com/wazuh/wazuh/blob/v4.3.2/CHANGELOG.md#v432)
|
||||
|
||||
## Wazuh Docker v4.3.1
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.3.1](https://github.com/wazuh/wazuh/blob/v4.3.1/CHANGELOG.md#v431)
|
||||
|
||||
## Wazuh Docker v4.3.0
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.3.0](https://github.com/wazuh/wazuh/blob/v4.3.0/CHANGELOG.md#v430)
|
||||
|
||||
## Wazuh Docker v4.2.7
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.2.7](https://github.com/wazuh/wazuh/blob/v4.2.7/CHANGELOG.md#v427)
|
||||
|
||||
## Wazuh Docker v4.2.6
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.2.6](https://github.com/wazuh/wazuh/blob/v4.2.6/CHANGELOG.md#v426)
|
||||
|
||||
## Wazuh Docker v4.2.5
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.2.5](https://github.com/wazuh/wazuh/blob/v4.2.5/CHANGELOG.md#v425)
|
||||
|
||||
## Wazuh Docker v4.2.4
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.2.4](https://github.com/wazuh/wazuh/blob/v4.2.4/CHANGELOG.md#v424)
|
||||
|
||||
## Wazuh Docker v4.2.3
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.2.3](https://github.com/wazuh/wazuh/blob/v4.2.3/CHANGELOG.md#v423)
|
||||
|
||||
## Wazuh Docker v4.2.2
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.2.2](https://github.com/wazuh/wazuh/blob/v4.2.2/CHANGELOG.md#v422)
|
||||
|
||||
## Wazuh Docker v4.2.1
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.2.1](https://github.com/wazuh/wazuh/blob/v4.2.1/CHANGELOG.md#v421)
|
||||
|
||||
## Wazuh Docker v4.2.0
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.2.0](https://github.com/wazuh/wazuh/blob/v4.2.0/CHANGELOG.md#v420)
|
||||
|
||||
## Wazuh Docker v4.1.5
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.1.5](https://github.com/wazuh/wazuh/blob/v4.1.5/CHANGELOG.md#v415)
|
||||
- Update ODFE compatibility to version 1.13.2
|
||||
|
||||
## Wazuh Docker v4.1.4
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.1.4](https://github.com/wazuh/wazuh/blob/v4.1.4/CHANGELOG.md#v414)
|
||||
|
||||
## Wazuh Docker v4.1.3
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.1.3](https://github.com/wazuh/wazuh/blob/v4.1.3/CHANGELOG.md#v413)
|
||||
|
||||
## Wazuh Docker v4.1.2
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.1.2](https://github.com/wazuh/wazuh/blob/v4.1.2/CHANGELOG.md#v412)
|
||||
|
||||
## Wazuh Docker v4.1.1
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.1.1](https://github.com/wazuh/wazuh/blob/v4.1.1/CHANGELOG.md#v411)
|
||||
|
||||
## Wazuh Docker v4.1.0
|
||||
### Added
|
||||
|
||||
- Update Wazuh to version [4.1.0](https://github.com/wazuh/wazuh/blob/v4.1.0/CHANGELOG.md#v410)
|
||||
- Update ODFE compatibility to version 1.12.0
|
||||
- Add support for Elasticsearch (xpack) images once again (7.10.2) ([@xr09](https://github.com/xr09)) [#409](https://github.com/wazuh/wazuh-docker/pull/409)
|
||||
- Re-enable entrypoint scripts ([@xr09](https://github.com/xr09)) [#435](https://github.com/wazuh/wazuh-docker/pull/435)
|
||||
- Add Goss binary for healthchecks ([@xr09](https://github.com/xr09)) [$441](https://github.com/wazuh/wazuh-docker/pull/441)
|
||||
- Update s6-overlay to latest version
|
||||
|
||||
## Wazuh Docker v4.0.4_1.11.0
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version [4.0.4](https://github.com/wazuh/wazuh/blob/v4.0.4/CHANGELOG.md#v404)
|
||||
|
||||
|
||||
## Wazuh Docker v4.0.3_1.11.0
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 4.0.3
|
||||
|
||||
|
||||
## Wazuh Docker v4.0.2_1.11.0
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 4.0.2
|
||||
|
||||
## Wazuh Docker v4.0.1_1.11.0
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 4.0.1
|
||||
- Opendistro 1.11.0 compatiblity
|
||||
- Re-enabled dumping ossec.log to stdout
|
||||
|
||||
## Wazuh Docker v4.0.0_1.10.1
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 4.0.0
|
||||
- Updating Wazuh cluster key dynamically ([@1stOfHisGame](https://github.com/1stOfHisGame)) [#393](https://github.com/wazuh/wazuh-docker/pull/393)
|
||||
- Switched to CentOS 7 for base image ([@xr09](https://github.com/xr09)) [#259](https://github.com/wazuh/wazuh-docker/issues/259)
|
||||
- Using s6-overlay for process management ([@xr09](https://github.com/xr09)) [#274](https://github.com/wazuh/wazuh-docker/issues/274)
|
||||
- Allow the creation of custom API users ([@xr09](https://github.com/xr09)) [#395](https://github.com/wazuh/wazuh-docker/issues/395)
|
||||
- OpenDistro support ([@xr09](https://github.com/xr09)) [#373](https://github.com/wazuh/wazuh-docker/pull/373)
|
||||
|
||||
|
||||
### Changed
|
||||
|
||||
- Removal of Elastic images
|
||||
|
||||
|
||||
## Wazuh Docker v3.13.2_7.9.1
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.13.2_7.9.1
|
||||
- Add CLUSTER_NETWORK_HOST environment variable ([@jfut](https://github.com/jfut)) [#372](https://github.com/wazuh/wazuh-docker/pull/372)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Too many redirects when running on port 80 ([@chowmean](https://github.com/chowmean)) [#377](https://github.com/wazuh/wazuh-docker/pull/377)
|
||||
- Move Filebeat installation to build stage ([@xr09](https://github.com/xr09)) [#378](https://github.com/wazuh/wazuh-docker/pull/378)
|
||||
|
||||
|
||||
## Wazuh Docker v3.13.1_7.8.0
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.13.1_7.8.0
|
||||
|
||||
|
||||
## Wazuh Docker v3.13.0_7.7.1
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.13.3_7.7.1
|
||||
|
||||
### Fixed
|
||||
|
||||
- Save agentless state ([@xr09](https://github.com/xr09)) [#350](https://github.com/wazuh/wazuh-docker/pull/350)
|
||||
- Use HTTP credentials for service check when required ([@xr09](https://github.com/xr09)) [#356](https://github.com/wazuh/wazuh-docker/pull/356)
|
||||
|
||||
## Wazuh Docker v3.12.3_7.6.2
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.12.3_7.6.2
|
||||
|
||||
|
||||
## Wazuh Docker v3.12.2_7.6.2
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.12.2_7.6.2
|
||||
|
||||
## Wazuh Docker v3.12.1_7.6.2
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.12.1_7.6.2
|
||||
|
||||
### Fixed
|
||||
|
||||
- Agent timestamp not being properly saved ([@xr09](https://github.com/xr09)) [#323](https://github.com/wazuh/wazuh-docker/pull/323)
|
||||
|
||||
|
||||
## Wazuh Docker v3.12.0_7.6.1
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.12.0_7.6.1
|
||||
|
||||
|
||||
## Wazuh Docker v3.11.4_7.6.1
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.11.4_7.6.1
|
||||
|
||||
- Enable HTTP v2 on nginx ([@xr09](https://github.com/xr09)) [#308](https://github.com/wazuh/wazuh-docker/pull/308)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Updated NGINX config syntax ([@xr09](https://github.com/xr09)) [#303](https://github.com/wazuh/wazuh-docker/pull/303)
|
||||
|
||||
|
||||
## Wazuh Docker v3.11.3_7.5.2
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.11.3_7.5.2
|
||||
|
||||
## Wazuh Docker v3.11.2_7.5.1
|
||||
|
||||
### Added
|
||||
|
||||
- Bumped Node.js to version 10 ([@xr09](https://github.com/xr09)) [#8615cd4](https://github.com/wazuh/wazuh-docker/commit/8615cd4d2152601e55becc7c3675360938e74b6a)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix S3 Plugin ([@AnthonySendra](https://github.com/AnthonySendra)) [#293](https://github.com/wazuh/wazuh-docker/pull/293)
|
||||
|
||||
## Wazuh Docker v3.11.1_7.5.1
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.11.1_7.5.1
|
||||
- Filebeat configuration file updated to latest version ([@manuasir](https://github.com/manuasir)) [#271](https://github.com/wazuh/wazuh-docker/pull/271)
|
||||
- Allow using the hostname as node_name for managers ([@JPLachance](https://github.com/JPLachance)) [#261](https://github.com/wazuh/wazuh-docker/pull/261)
|
||||
|
||||
## Wazuh Docker v3.11.0_7.5.1
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.11.0_7.5.1
|
||||
|
||||
## Wazuh Docker v3.10.2_7.5.0
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.10.2_7.5.0
|
||||
|
||||
## Wazuh Docker v3.10.2_7.3.2
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.10.2_7.3.2
|
||||
|
||||
## Wazuh Docker v3.10.0_7.3.2
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.10.0_7.3.2
|
||||
|
||||
## Wazuh Docker v3.9.5_7.2.1
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.9.5_7.2.1
|
||||
|
||||
## Wazuh Docker v3.9.4_7.2.0
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.9.4_7.2.0
|
||||
- Implemented Wazuh Filebeat Module ([jm404](https://www.github.com/jm404)) [#2a77c6a](https://github.com/wazuh/wazuh-docker/commit/2a77c6a6e6bf78f2492adeedbade7a507d9974b2)
|
||||
|
||||
## Wazuh Docker v3.9.3_7.2.0
|
||||
|
||||
### Fixed
|
||||
- Wazuh-docker reinserts cluster settings after resuming containers ([@manuasir](https://github.com/manuasir)) [#213](https://github.com/wazuh/wazuh-docker/pull/213)
|
||||
|
||||
## Wazuh Docker v3.9.2_7.1.1
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.9.2_7.1.1
|
||||
|
||||
## Wazuh Docker v3.9.2_6.8.0
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.9.2_6.8.0
|
||||
|
||||
## Wazuh Docker v3.9.1_7.1.0
|
||||
|
||||
### Added
|
||||
|
||||
- Support for Elastic v7.1.0
|
||||
- New environment variables for Kibana ([@manuasir](https://github.com/manuasir)) [#22ad43](https://github.com/wazuh/wazuh-docker/commit/22ad4360f548e54bb0c5e929f8c84a186ad2ab88)
|
||||
|
||||
## Wazuh Docker v3.9.1_6.8.0
|
||||
|
||||
### Added
|
||||
|
||||
- Update to Wazuh version 3.9.1_6.8.0 ([#181](https://github.com/wazuh/wazuh-docker/pull/181))
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed `ELASTICSEARCH_KIBANA_IP` environment variable ([@manuasir](https://github.com/manuasir)) ([#181](https://github.com/wazuh/wazuh-docker/pull/181))
|
||||
|
||||
## Wazuh Docker v3.9.0_6.7.2
|
||||
|
||||
### Changed
|
||||
|
||||
- Update Elastic Stack version to 6.7.2.
|
||||
|
||||
## Wazuh Docker v3.9.0_6.7.1
|
||||
|
||||
### Added
|
||||
|
||||
- Support for xPACK authorized requests ([@manuasir](https://github.com/manuasir)) ([#119](https://github.com/wazuh/wazuh-docker/pull/119))
|
||||
- Add Elasticsearch cluster configuration ([@SitoRBJ](https://github.com/SitoRBJ)). ([#146](https://github.com/wazuh/wazuh-docker/pull/146))
|
||||
- Add Elasticsearch cluster configuration ([@Phandora](https://github.com/Phandora)) ([#140](https://github.com/wazuh/wazuh-docker/pull/140))
|
||||
- Setting Nginx to support several user/passwords in Kibana ([@toniMR](https://github.com/toniMR)) ([#136](https://github.com/wazuh/wazuh-docker/pull/136))
|
||||
|
||||
|
||||
### Changed
|
||||
|
||||
- Use LS_JAVA_OPTS instead of old LS_HEAP_SIZE ([@ruffy91](https://github.com/ruffy91)) ([#139](https://github.com/wazuh/wazuh-docker/pull/139))
|
||||
- Changing the original Wazuh docker image to allow adding code in the entrypoint ([@Phandora](https://github.com/phandora)) ([#151](https://github.com/wazuh/wazuh-docker/pull/151))
|
||||
|
||||
### Removed
|
||||
|
||||
- Removing files from Wazuh image ([@Phandora](https://github.com/phandora)) ([#153](https://github.com/wazuh/wazuh-docker/pull/153))
|
||||
|
||||
## Wazuh Docker v3.8.2_6.7.0
|
||||
|
||||
### Changed
|
||||
|
||||
- Update Elastic Stack version to 6.7.0. ([#144](https://github.com/wazuh/wazuh-docker/pull/144))
|
||||
|
||||
## Wazuh Docker v3.8.2_6.6.2
|
||||
|
||||
### Changed
|
||||
|
||||
- Update Elastic Stack version to 6.6.2. ([#130](https://github.com/wazuh/wazuh-docker/pull/130))
|
||||
|
||||
## Wazuh Docker v3.8.2_6.6.1
|
||||
|
||||
### Changed
|
||||
|
||||
- Update Elastic Stack version to 6.6.1. ([#129](https://github.com/wazuh/wazuh-docker/pull/129))
|
||||
|
||||
## Wazuh Docker v3.8.2_6.5.4
|
||||
|
||||
### Added
|
||||
|
||||
- Add Wazuh-Elasticsearch. ([#106](https://github.com/wazuh/wazuh-docker/pull/106))
|
||||
- Store Filebeat _/var/lib/filebeat/registry._ ([#109](https://github.com/wazuh/wazuh-docker/pull/109))
|
||||
- Adding the option to disable some xpack features. ([#111](https://github.com/wazuh/wazuh-docker/pull/111))
|
||||
- Wazuh-Kibana customizable at plugin level. ([#117](https://github.com/wazuh/wazuh-docker/pull/117))
|
||||
- Adding env variables for alerts data flow. ([#118](https://github.com/wazuh/wazuh-docker/pull/118))
|
||||
- New Logstash entrypoint added. ([#135](https://github.com/wazuh/wazuh-docker/pull/135/files))
|
||||
- Welcome screen management. ([#133](https://github.com/wazuh/wazuh-docker/pull/133))
|
||||
|
||||
### Changed
|
||||
|
||||
- Update to Wazuh version 3.8.2. ([#105](https://github.com/wazuh/wazuh-docker/pull/105))
|
||||
|
||||
### Removed
|
||||
|
||||
- Remove alerts created in build time. ([#137](https://github.com/wazuh/wazuh-docker/pull/137))
|
||||
|
||||
|
||||
## Wazuh Docker v3.8.1_6.5.4
|
||||
|
||||
### Changed
|
||||
- Update to Wazuh version 3.8.1. ([#102](https://github.com/wazuh/wazuh-docker/pull/102))
|
||||
|
||||
## Wazuh Docker v3.8.0_6.5.4
|
||||
|
||||
### Changed
|
||||
|
||||
- Upgrade version 3.8.0_6.5.4. ([#97](https://github.com/wazuh/wazuh-docker/pull/97))
|
||||
|
||||
### Removed
|
||||
|
||||
- Remove cluster.py work around. ([#99](https://github.com/wazuh/wazuh-docker/pull/99))
|
||||
|
||||
## Wazuh Docker v3.7.2_6.5.4
|
||||
|
||||
### Added
|
||||
|
||||
- Improvements to Kibana settings added. ([#91](https://github.com/wazuh/wazuh-docker/pull/91))
|
||||
- Add Kibana environmental variables for Wazuh APP config.yml. ([#89](https://github.com/wazuh/wazuh-docker/pull/89))
|
||||
|
||||
### Changed
|
||||
|
||||
- Update Elastic Stack version to 6.5.4. ([#82](https://github.com/wazuh/wazuh-docker/pull/82))
|
||||
- Add env credentials for nginx. ([#86](https://github.com/wazuh/wazuh-docker/pull/86))
|
||||
- Improve filebeat configuration ([#88](https://github.com/wazuh/wazuh-docker/pull/88))
|
||||
|
||||
### Fixed
|
||||
|
||||
- Temporary fix for Wazuh cluster master node in Kubernetes. ([#84](https://github.com/wazuh/wazuh-docker/pull/84))
|
||||
|
||||
## Wazuh Docker v3.7.2_6.5.3
|
||||
|
||||
### Changed
|
||||
|
||||
- Erasing temporary fix for AWS integration. ([#81](https://github.com/wazuh/wazuh-docker/pull/81))
|
||||
|
||||
### Fixed
|
||||
|
||||
- Upgrading errors due to wrong files. ([#80](https://github.com/wazuh/wazuh-docker/pull/80))
|
||||
|
||||
|
||||
## Wazuh Docker v3.7.0_6.5.0
|
||||
|
||||
### Changed
|
||||
|
||||
- Adapt to Elastic stack 6.5.0.
|
||||
|
||||
## Wazuh Docker v3.7.0_6.4.3
|
||||
|
||||
### Added
|
||||
|
||||
- Allow custom scripts or commands before service start ([#58](https://github.com/wazuh/wazuh-docker/pull/58))
|
||||
- Added description for wazuh-nginx ([#59](https://github.com/wazuh/wazuh-docker/pull/59))
|
||||
- Added license file to match https://github.com/wazuh/wazuh LICENSE ([#60](https://github.com/wazuh/wazuh-docker/pull/60))
|
||||
- Added SMTP packages ([#67](https://github.com/wazuh/wazuh-docker/pull/67))
|
||||
|
||||
### Changed
|
||||
|
||||
- Increased proxy buffer for NGINX Kibana ([#51](https://github.com/wazuh/wazuh-docker/pull/51))
|
||||
- Updated logstash config to remove deprecation warnings ([#55](https://github.com/wazuh/wazuh-docker/pull/55))
|
||||
- Set ossec user's home path ([#61](https://github.com/wazuh/wazuh-docker/pull/61))
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed a bug that prevents the API from starting when the Wazuh manager was updated. Change in the files that are stored in the volume. ([#65](https://github.com/wazuh/wazuh-docker/pull/65))
|
||||
- Fixed script reference ([#62](https://github.com/wazuh/wazuh-docker/pull/62/files))
|
||||
|
||||
## Wazuh Docker v3.6.1_6.4.3
|
||||
|
||||
Wazuh-Docker starting point.
|
475
LICENSE
475
LICENSE
@@ -1,475 +0,0 @@
|
||||
|
||||
Portions Copyright (C) 2017, Wazuh Inc.
|
||||
Based on work Copyright (C) 2003 - 2013 Trend Micro, Inc.
|
||||
|
||||
This program is a free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License (version 2) as
|
||||
published by the FSF - Free Software Foundation.
|
||||
|
||||
In addition, certain source files in this program permit linking with the
|
||||
OpenSSL library (http://www.openssl.org), which otherwise wouldn't be allowed
|
||||
under the GPL. For purposes of identifying OpenSSL, most source files giving
|
||||
this permission limit it to versions of OpenSSL having a license identical to
|
||||
that listed in this file (see section "OpenSSL LICENSE" below). It is not
|
||||
necessary for the copyright years to match between this file and the OpenSSL
|
||||
version in question. However, note that because this file is an extension of
|
||||
the license statements of these source files, this file may not be changed
|
||||
except with permission from all copyright holders of source files in this
|
||||
program which reference this file.
|
||||
|
||||
Note that this license applies to the source code, as well as
|
||||
decoders, rules and any other data file included with OSSEC (unless
|
||||
otherwise specified).
|
||||
|
||||
For the purpose of this license, we consider an application to constitute a
|
||||
"derivative work" or a work based on this program if it does any of the
|
||||
following (list not exclusive):
|
||||
|
||||
* Integrates source code/data files from OSSEC.
|
||||
* Includes OSSEC copyrighted material.
|
||||
* Includes/integrates OSSEC into a proprietary executable installer.
|
||||
* Links to a library or executes a program that does any of the above.
|
||||
|
||||
This list is not exclusive, but just a clarification of our interpretation
|
||||
of derived works. These restrictions only apply if you actually redistribute
|
||||
OSSEC (or parts of it).
|
||||
|
||||
We don't consider these to be added restrictions on top of the GPL,
|
||||
but just a clarification of how we interpret "derived works" as it
|
||||
applies to OSSEC. This is similar to the way Linus Torvalds has
|
||||
announced his interpretation of how "derived works" applies to Linux kernel
|
||||
modules. Our interpretation refers only to OSSEC - we don't speak
|
||||
for any other GPL products.
|
||||
|
||||
* As a special exception, the copyright holders give
|
||||
* permission to link the code of portions of this program with the
|
||||
* OpenSSL library under certain conditions as described in each
|
||||
* individual source file, and distribute linked combinations
|
||||
* including the two.
|
||||
* You must obey the GNU General Public License in all respects
|
||||
* for all of the code used other than OpenSSL. If you modify
|
||||
* file(s) with this exception, you may extend this exception to your
|
||||
* version of the file(s), but you are not obligated to do so. If you
|
||||
* do not wish to do so, delete this exception statement from your
|
||||
* version. If you delete this exception statement from all source
|
||||
* files in the program, then also delete it here.
|
||||
|
||||
OSSEC HIDS is distributed in the hope that it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE.
|
||||
See the GNU General Public License Version 2 below for more details.
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 2, June 1991
|
||||
|
||||
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The licenses for most software are designed to take away your
|
||||
freedom to share and change it. By contrast, the GNU General Public
|
||||
License is intended to guarantee your freedom to share and change free
|
||||
software--to make sure the software is free for all its users. This
|
||||
General Public License applies to most of the Free Software
|
||||
Foundation's software and to any other program whose authors commit to
|
||||
using it. (Some other Free Software Foundation software is covered by
|
||||
the GNU Lesser General Public License instead.) You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
this service if you wish), that you receive source code or can get it
|
||||
if you want it, that you can change the software or use pieces of it
|
||||
in new free programs; and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to make restrictions that forbid
|
||||
anyone to deny you these rights or to ask you to surrender the rights.
|
||||
These restrictions translate to certain responsibilities for you if you
|
||||
distribute copies of the software, or if you modify it.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must give the recipients all the rights that
|
||||
you have. You must make sure that they, too, receive or can get the
|
||||
source code. And you must show them these terms so they know their
|
||||
rights.
|
||||
|
||||
We protect your rights with two steps: (1) copyright the software, and
|
||||
(2) offer you this license which gives you legal permission to copy,
|
||||
distribute and/or modify the software.
|
||||
|
||||
Also, for each author's protection and ours, we want to make certain
|
||||
that everyone understands that there is no warranty for this free
|
||||
software. If the software is modified by someone else and passed on, we
|
||||
want its recipients to know that what they have is not the original, so
|
||||
that any problems introduced by others will not reflect on the original
|
||||
authors' reputations.
|
||||
|
||||
Finally, any free program is threatened constantly by software
|
||||
patents. We wish to avoid the danger that redistributors of a free
|
||||
program will individually obtain patent licenses, in effect making the
|
||||
program proprietary. To prevent this, we have made it clear that any
|
||||
patent must be licensed for everyone's free use or not licensed at all.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. This License applies to any program or other work which contains
|
||||
a notice placed by the copyright holder saying it may be distributed
|
||||
under the terms of this General Public License. The "Program", below,
|
||||
refers to any such program or work, and a "work based on the Program"
|
||||
means either the Program or any derivative work under copyright law:
|
||||
that is to say, a work containing the Program or a portion of it,
|
||||
either verbatim or with modifications and/or translated into another
|
||||
language. (Hereinafter, translation is included without limitation in
|
||||
the term "modification".) Each licensee is addressed as "you".
|
||||
|
||||
Activities other than copying, distribution and modification are not
|
||||
covered by this License; they are outside its scope. The act of
|
||||
running the Program is not restricted, and the output from the Program
|
||||
is covered only if its contents constitute a work based on the
|
||||
Program (independent of having been made by running the Program).
|
||||
Whether that is true depends on what the Program does.
|
||||
|
||||
1. You may copy and distribute verbatim copies of the Program's
|
||||
source code as you receive it, in any medium, provided that you
|
||||
conspicuously and appropriately publish on each copy an appropriate
|
||||
copyright notice and disclaimer of warranty; keep intact all the
|
||||
notices that refer to this License and to the absence of any warranty;
|
||||
and give any other recipients of the Program a copy of this License
|
||||
along with the Program.
|
||||
|
||||
You may charge a fee for the physical act of transferring a copy, and
|
||||
you may at your option offer warranty protection in exchange for a fee.
|
||||
|
||||
2. You may modify your copy or copies of the Program or any portion
|
||||
of it, thus forming a work based on the Program, and copy and
|
||||
distribute such modifications or work under the terms of Section 1
|
||||
above, provided that you also meet all of these conditions:
|
||||
|
||||
a) You must cause the modified files to carry prominent notices
|
||||
stating that you changed the files and the date of any change.
|
||||
|
||||
b) You must cause any work that you distribute or publish, that in
|
||||
whole or in part contains or is derived from the Program or any
|
||||
part thereof, to be licensed as a whole at no charge to all third
|
||||
parties under the terms of this License.
|
||||
|
||||
c) If the modified program normally reads commands interactively
|
||||
when run, you must cause it, when started running for such
|
||||
interactive use in the most ordinary way, to print or display an
|
||||
announcement including an appropriate copyright notice and a
|
||||
notice that there is no warranty (or else, saying that you provide
|
||||
a warranty) and that users may redistribute the program under
|
||||
these conditions, and telling the user how to view a copy of this
|
||||
License. (Exception: if the Program itself is interactive but
|
||||
does not normally print such an announcement, your work based on
|
||||
the Program is not required to print an announcement.)
|
||||
|
||||
These requirements apply to the modified work as a whole. If
|
||||
identifiable sections of that work are not derived from the Program,
|
||||
and can be reasonably considered independent and separate works in
|
||||
themselves, then this License, and its terms, do not apply to those
|
||||
sections when you distribute them as separate works. But when you
|
||||
distribute the same sections as part of a whole which is a work based
|
||||
on the Program, the distribution of the whole must be on the terms of
|
||||
this License, whose permissions for other licensees extend to the
|
||||
entire whole, and thus to each and every part regardless of who wrote it.
|
||||
|
||||
Thus, it is not the intent of this section to claim rights or contest
|
||||
your rights to work written entirely by you; rather, the intent is to
|
||||
exercise the right to control the distribution of derivative or
|
||||
collective works based on the Program.
|
||||
|
||||
In addition, mere aggregation of another work not based on the Program
|
||||
with the Program (or with a work based on the Program) on a volume of
|
||||
a storage or distribution medium does not bring the other work under
|
||||
the scope of this License.
|
||||
|
||||
3. You may copy and distribute the Program (or a work based on it,
|
||||
under Section 2) in object code or executable form under the terms of
|
||||
Sections 1 and 2 above provided that you also do one of the following:
|
||||
|
||||
a) Accompany it with the complete corresponding machine-readable
|
||||
source code, which must be distributed under the terms of Sections
|
||||
1 and 2 above on a medium customarily used for software interchange; or,
|
||||
|
||||
b) Accompany it with a written offer, valid for at least three
|
||||
years, to give any third party, for a charge no more than your
|
||||
cost of physically performing source distribution, a complete
|
||||
machine-readable copy of the corresponding source code, to be
|
||||
distributed under the terms of Sections 1 and 2 above on a medium
|
||||
customarily used for software interchange; or,
|
||||
|
||||
c) Accompany it with the information you received as to the offer
|
||||
to distribute corresponding source code. (This alternative is
|
||||
allowed only for noncommercial distribution and only if you
|
||||
received the program in object code or executable form with such
|
||||
an offer, in accord with Subsection b above.)
|
||||
|
||||
The source code for a work means the preferred form of the work for
|
||||
making modifications to it. For an executable work, complete source
|
||||
code means all the source code for all modules it contains, plus any
|
||||
associated interface definition files, plus the scripts used to
|
||||
control compilation and installation of the executable. However, as a
|
||||
special exception, the source code distributed need not include
|
||||
anything that is normally distributed (in either source or binary
|
||||
form) with the major components (compiler, kernel, and so on) of the
|
||||
operating system on which the executable runs, unless that component
|
||||
itself accompanies the executable.
|
||||
|
||||
If distribution of executable or object code is made by offering
|
||||
access to copy from a designated place, then offering equivalent
|
||||
access to copy the source code from the same place counts as
|
||||
distribution of the source code, even though third parties are not
|
||||
compelled to copy the source along with the object code.
|
||||
|
||||
4. You may not copy, modify, sublicense, or distribute the Program
|
||||
except as expressly provided under this License. Any attempt
|
||||
otherwise to copy, modify, sublicense or distribute the Program is
|
||||
void, and will automatically terminate your rights under this License.
|
||||
However, parties who have received copies, or rights, from you under
|
||||
this License will not have their licenses terminated so long as such
|
||||
parties remain in full compliance.
|
||||
|
||||
5. You are not required to accept this License, since you have not
|
||||
signed it. However, nothing else grants you permission to modify or
|
||||
distribute the Program or its derivative works. These actions are
|
||||
prohibited by law if you do not accept this License. Therefore, by
|
||||
modifying or distributing the Program (or any work based on the
|
||||
Program), you indicate your acceptance of this License to do so, and
|
||||
all its terms and conditions for copying, distributing or modifying
|
||||
the Program or works based on it.
|
||||
|
||||
6. Each time you redistribute the Program (or any work based on the
|
||||
Program), the recipient automatically receives a license from the
|
||||
original licensor to copy, distribute or modify the Program subject to
|
||||
these terms and conditions. You may not impose any further
|
||||
restrictions on the recipients' exercise of the rights granted herein.
|
||||
You are not responsible for enforcing compliance by third parties to
|
||||
this License.
|
||||
|
||||
7. If, as a consequence of a court judgment or allegation of patent
|
||||
infringement or for any other reason (not limited to patent issues),
|
||||
conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot
|
||||
distribute so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you
|
||||
may not distribute the Program at all. For example, if a patent
|
||||
license would not permit royalty-free redistribution of the Program by
|
||||
all those who receive copies directly or indirectly through you, then
|
||||
the only way you could satisfy both it and this License would be to
|
||||
refrain entirely from distribution of the Program.
|
||||
|
||||
If any portion of this section is held invalid or unenforceable under
|
||||
any particular circumstance, the balance of the section is intended to
|
||||
apply and the section as a whole is intended to apply in other
|
||||
circumstances.
|
||||
|
||||
It is not the purpose of this section to induce you to infringe any
|
||||
patents or other property right claims or to contest validity of any
|
||||
such claims; this section has the sole purpose of protecting the
|
||||
integrity of the free software distribution system, which is
|
||||
implemented by public license practices. Many people have made
|
||||
generous contributions to the wide range of software distributed
|
||||
through that system in reliance on consistent application of that
|
||||
system; it is up to the author/donor to decide if he or she is willing
|
||||
to distribute software through any other system and a licensee cannot
|
||||
impose that choice.
|
||||
|
||||
This section is intended to make thoroughly clear what is believed to
|
||||
be a consequence of the rest of this License.
|
||||
|
||||
8. If the distribution and/or use of the Program is restricted in
|
||||
certain countries either by patents or by copyrighted interfaces, the
|
||||
original copyright holder who places the Program under this License
|
||||
may add an explicit geographical distribution limitation excluding
|
||||
those countries, so that distribution is permitted only in or among
|
||||
countries not thus excluded. In such case, this License incorporates
|
||||
the limitation as if written in the body of this License.
|
||||
|
||||
9. The Free Software Foundation may publish revised and/or new versions
|
||||
of the General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the Program
|
||||
specifies a version number of this License which applies to it and "any
|
||||
later version", you have the option of following the terms and conditions
|
||||
either of that version or of any later version published by the Free
|
||||
Software Foundation. If the Program does not specify a version number of
|
||||
this License, you may choose any version ever published by the Free Software
|
||||
Foundation.
|
||||
|
||||
10. If you wish to incorporate parts of the Program into other free
|
||||
programs whose distribution conditions are different, write to the author
|
||||
to ask for permission. For software which is copyrighted by the Free
|
||||
Software Foundation, write to the Free Software Foundation; we sometimes
|
||||
make exceptions for this. Our decision will be guided by the two goals
|
||||
of preserving the free status of all derivatives of our free software and
|
||||
of promoting the sharing and reuse of software generally.
|
||||
|
||||
NO WARRANTY
|
||||
|
||||
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
|
||||
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
|
||||
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
|
||||
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
|
||||
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
|
||||
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
|
||||
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
|
||||
REPAIR OR CORRECTION.
|
||||
|
||||
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
|
||||
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
|
||||
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
|
||||
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
|
||||
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
|
||||
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
|
||||
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
OpenSSL License
|
||||
---------------
|
||||
|
||||
LICENSE ISSUES
|
||||
==============
|
||||
|
||||
The OpenSSL toolkit stays under a dual license, i.e. both the conditions of
|
||||
the OpenSSL License and the original SSLeay license apply to the toolkit.
|
||||
See below for the actual license texts. Actually both licenses are BSD-style
|
||||
Open Source licenses. In case of any license issues related to OpenSSL
|
||||
please contact openssl-core@openssl.org.
|
||||
|
||||
OpenSSL License
|
||||
---------------
|
||||
|
||||
/* ====================================================================
|
||||
* Copyright (c) 1998-2001 The OpenSSL Project. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* 3. All advertising materials mentioning features or use of this
|
||||
* software must display the following acknowledgment:
|
||||
* "This product includes software developed by the OpenSSL Project
|
||||
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
|
||||
*
|
||||
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
|
||||
* endorse or promote products derived from this software without
|
||||
* prior written permission. For written permission, please contact
|
||||
* openssl-core@openssl.org.
|
||||
*
|
||||
* 5. Products derived from this software may not be called "OpenSSL"
|
||||
* nor may "OpenSSL" appear in their names without prior written
|
||||
* permission of the OpenSSL Project.
|
||||
*
|
||||
* 6. Redistributions of any form whatsoever must retain the following
|
||||
* acknowledgment:
|
||||
* "This product includes software developed by the OpenSSL Project
|
||||
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
|
||||
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
|
||||
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
||||
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
|
||||
* OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
* ====================================================================
|
||||
*
|
||||
* This product includes cryptographic software written by Eric Young
|
||||
* (eay@cryptsoft.com). This product includes software written by Tim
|
||||
* Hudson (tjh@cryptsoft.com).
|
||||
*
|
||||
*/
|
||||
|
||||
Original SSLeay License
|
||||
-----------------------
|
||||
|
||||
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
|
||||
* All rights reserved.
|
||||
*
|
||||
* This package is an SSL implementation written
|
||||
* by Eric Young (eay@cryptsoft.com).
|
||||
* The implementation was written so as to conform with Netscapes SSL.
|
||||
*
|
||||
* This library is free for commercial and non-commercial use as long as
|
||||
* the following conditions are aheared to. The following conditions
|
||||
* apply to all code found in this distribution, be it the RC4, RSA,
|
||||
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
|
||||
* included with this distribution is covered by the same copyright terms
|
||||
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
|
||||
*
|
||||
* Copyright remains Eric Young's, and as such any Copyright notices in
|
||||
* the code are not to be removed.
|
||||
* If this package is used in a product, Eric Young should be given attribution
|
||||
* as the author of the parts of the library used.
|
||||
* This can be in the form of a textual message at program startup or
|
||||
* in documentation (online or textual) provided with the package.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. All advertising materials mentioning features or use of this software
|
||||
* must display the following acknowledgement:
|
||||
* "This product includes cryptographic software written by
|
||||
* Eric Young (eay@cryptsoft.com)"
|
||||
* The word 'cryptographic' can be left out if the routines from the library
|
||||
* being used are not cryptographic related :-).
|
||||
* 4. If you include any Windows specific code (or a derivative thereof) from
|
||||
* the apps directory (application code) you must include an acknowledgement:
|
||||
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* The licence and distribution terms for any publically available version or
|
||||
* derivative of this code cannot be changed. i.e. this code cannot simply be
|
||||
* copied and put under another distribution licence
|
||||
* [including the GNU Public Licence.]
|
||||
*/
|
56
README.md
56
README.md
@@ -1,58 +1,42 @@
|
||||
# Wazuh containers for Docker
|
||||
|
||||
[](https://wazuh.com/community/join-us-on-slack/)
|
||||
[](https://groups.google.com/forum/#!forum/wazuh)
|
||||
In this repository you will find the containers to run:
|
||||
|
||||
## Description
|
||||
* wazuh: It runs the Wazuh manager, Wazuh API and Filebeat (for integration with Elastic Stack)
|
||||
* wazuh-logstash: It is used to receive alerts generated by the manager and feed Elasticsearch using an alerts template
|
||||
* wazuh-kibana: Provides a web user interface to browse through alerts data. It includes Wazuh plugin for Kibana, that allows you to visualize agents configuration and status.
|
||||
|
||||
The `wazuh/wazuh-docker` repository provides resources to deploy the Wazuh cybersecurity platform using Docker containers. This setup enables easy installation and orchestration of the full Wazuh stack, including the Wazuh server, dashboard (based on OpenSearch Dashboards), and OpenSearch for indexing and search.
|
||||
In addition, a docker-compose file is provided to launch the containers mentioned above. It also launches an Elasticsearch container (working as a single-node cluster) using Elastic Stack Docker images.
|
||||
|
||||
## Capabilities
|
||||
## Current release
|
||||
|
||||
- Full deployment of the Wazuh stack using Docker.
|
||||
- `docker compose` support for orchestration.
|
||||
- Scalable architecture with multi-node support.
|
||||
- Data persistence through configurable volumes.
|
||||
- Ready-to-use configurations for production or testing environments.
|
||||
Containers are currently tested on Wazuh version 2.0 and Elastic Stack version 5.5.2. We will do our best to keep this repository updated to latest versions of both Wazuh and Elastic Stack.
|
||||
|
||||
## Branch Convention
|
||||
## Installation notes
|
||||
|
||||
- `main`: Developing and testing of new features.
|
||||
- `X.Y.Z`: Version-specific branches (e.g., `4.14.0`, `4.13.0`, etc.).
|
||||
To run all docker instances you can just run ``docker-compose up``, from the directory where you have docker-compose.yml file. The following is part of the expected behavior when setting up the system:
|
||||
|
||||
## Documentation
|
||||
* Both wazuh-kibana and wazuh-logstash containers will run multiple queries to Elasticsearch API using curl, to learn when Elasticsearch is up. It is expected to see several ``Failed to connect to elasticsearch port 9200`` log messages, until Elasticesearch is started. Then the set up process will continue normally.
|
||||
* Kibana container can take a few minutes to install Wazuh plugin, this takes place after ``Optimizing and caching browser bundles...`` is printed out.
|
||||
* It is recommended to set Docker host preferences to give at least 4GB memory per container (this doesn't necessarily mean they all will use it, but Elasticsearch requires them to work properly).
|
||||
|
||||
Official documentation is available at:
|
||||
Once installed you can browse through the interface at: http://127.0.0.1:5601
|
||||
|
||||
[https://documentation.wazuh.com/current/deployment-options/docker/index.html](https://documentation.wazuh.com/current/deployment-options/docker/index.html)
|
||||
## More documentation
|
||||
|
||||
You can also explore internal documentation in the [`docs`](https://github.com/wazuh/wazuh-docker/tree/main/docs) folder of this repository.
|
||||
* [Wazuh full documentation](http://documentation.wazuh.com)
|
||||
* [Wazuh documentation for Docker](https://documentation.wazuh.com/current/docker/index.html)
|
||||
* [Docker hub](https://hub.docker.com/u/wazuh)
|
||||
|
||||
## Get Involved
|
||||
|
||||
- **Fork the repository** and create your own branches to add features or fix bugs.
|
||||
- **Open issues** to report bugs or request features.
|
||||
- **Submit pull requests** following the contributing guidelines.
|
||||
- Participate in [discussions](https://github.com/wazuh/wazuh-docker/discussions) if available.
|
||||
|
||||
## Authors / Maintainers
|
||||
## Credits
|
||||
|
||||
These Docker containers are based on:
|
||||
|
||||
* "deviantony" dockerfiles which can be found at [https://github.com/deviantony/docker-elk](https://github.com/deviantony/docker-elk)
|
||||
* "xetus-oss" dockerfiles, which can be found at [https://github.com/xetus-oss/docker-ossec-server](https://github.com/xetus-oss/docker-ossec-server)
|
||||
|
||||
This project is maintained by the [Wazuh](https://wazuh.com) team, with active contributions from the community.
|
||||
We thank you them and everyone else who has contributed to this project.
|
||||
|
||||
See the full list of contributors at:
|
||||
[https://github.com/wazuh/wazuh-docker/graphs/contributors](https://github.com/wazuh/wazuh-docker/graphs/contributors)
|
||||
|
||||
We thank them and everyone else who has contributed to this project.
|
||||
|
||||
## License and copyright
|
||||
|
||||
Wazuh Docker Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
|
||||
## Web references
|
||||
## Wazuh official website
|
||||
|
||||
[Wazuh website](http://wazuh.com)
|
||||
|
45
SECURITY.md
45
SECURITY.md
@@ -1,45 +0,0 @@
|
||||
# Wazuh Open Source Project Security Policy
|
||||
|
||||
Version: 2023-06-12
|
||||
|
||||
## Introduction
|
||||
This document outlines the Security Policy for Wazuh's open source projects. It emphasizes our commitment to maintain a secure environment for our users and contributors, and reflects our belief in the power of collaboration to identify and resolve security vulnerabilities.
|
||||
|
||||
## Scope
|
||||
This policy applies to all open source projects developed, maintained, or hosted by Wazuh.
|
||||
|
||||
## Reporting Security Vulnerabilities
|
||||
If you believe you've discovered a potential security vulnerability in one of our open source projects, we strongly encourage you to report it to us responsibly.
|
||||
|
||||
Please submit your findings as security advisories under the "Security" tab in the relevant GitHub repository. Alternatively, you may send the details of your findings to [security@wazuh.com](mailto:security@wazuh.com).
|
||||
|
||||
## Vulnerability Disclosure Policy
|
||||
Upon receiving a report of a potential vulnerability, our team will initiate an investigation. If the reported issue is confirmed as a vulnerability, we will take the following steps:
|
||||
|
||||
1. Acknowledgment: We will acknowledge the receipt of your vulnerability report and begin our investigation.
|
||||
2. Validation: We will validate the issue and work on reproducing it in our environment.
|
||||
3. Remediation: We will work on a fix and thoroughly test it
|
||||
4. Release & Disclosure: After 90 days from the discovery of the vulnerability, or as soon as a fix is ready and thoroughly tested (whichever comes first), we will release a security update for the affected project. We will also publicly disclose the vulnerability by publishing a CVE (Common Vulnerabilities and Exposures) and acknowledging the discovering party.
|
||||
5. Exceptions: In order to preserve the security of the Wazuh community at large, we might extend the disclosure period to allow users to patch their deployments.
|
||||
|
||||
This 90-day period allows for end-users to update their systems and minimizes the risk of widespread exploitation of the vulnerability.
|
||||
|
||||
## Automatic Scanning
|
||||
We leverage GitHub Actions to perform automated scans of our supply chain. These scans assist us in identifying vulnerabilities and outdated dependencies in a proactive and timely manner.
|
||||
|
||||
## Credit
|
||||
We believe in giving credit where credit is due. If you report a security vulnerability to us, and we determine that it is a valid vulnerability, we will publicly credit you for the discovery when we disclose the vulnerability. If you wish to remain anonymous, please indicate so in your initial report.
|
||||
|
||||
We do appreciate and encourage feedback from our community, but currently we do not have a bounty program. We might start bounty programs in the future.
|
||||
|
||||
## Compliance with this Policy
|
||||
We consider the discovery and reporting of security vulnerabilities an important public service. We encourage responsible reporting of any vulnerabilities that may be found in our site or applications.
|
||||
|
||||
Furthermore, we will not take legal action against or suspend or terminate access to the site or services of those who discover and report security vulnerabilities in accordance with this policy because of the fact.
|
||||
|
||||
We ask that all users and contributors respect this policy and the security of our community's users by disclosing vulnerabilities to us in accordance with this policy.
|
||||
|
||||
## Changes to this Security Policy
|
||||
This policy may be revised from time to time. Each version of the policy will be identified at the top of the page by its effective date.
|
||||
|
||||
If you have any questions about this Security Policy, please contact us at [security@wazuh.com](mailto:security@wazuh.com)
|
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"version": "4.14.0",
|
||||
"stage": "rc1"
|
||||
}
|
@@ -1,32 +0,0 @@
|
||||
# Wazuh Docker Image Builder
|
||||
|
||||
The creation of the images for the Wazuh stack deployment in Docker is done with the build-images.yml script
|
||||
|
||||
To execute the process, the following must be executed in the root of the wazuh-docker repository:
|
||||
|
||||
```
|
||||
$ build-docker-images/build-images.sh
|
||||
```
|
||||
|
||||
This script initializes the environment variables needed to build each of the images.
|
||||
|
||||
The script allows you to build images from other versions of Wazuh, to do this you must use the -v or --version argument:
|
||||
|
||||
```
|
||||
$ build-docker-images/build-images.sh -v 4.14.0
|
||||
```
|
||||
|
||||
To get all the available script options use the -h or --help option:
|
||||
|
||||
```
|
||||
$ build-docker-images/build-images.sh -h
|
||||
|
||||
Usage: build-docker-images/build-images.sh [OPTIONS]
|
||||
|
||||
-d, --dev <ref> [Optional] Set the development stage you want to build, example rc1 or beta1, not used by default.
|
||||
-f, --filebeat-module <ref> [Optional] Set Filebeat module version. By default 0.4.
|
||||
-r, --revision <rev> [Optional] Package revision. By default 1
|
||||
-v, --version <ver> [Optional] Set the Wazuh version should be builded. By default, 4.14.0.
|
||||
-h, --help Show this help.
|
||||
|
||||
```
|
@@ -1,139 +0,0 @@
|
||||
WAZUH_IMAGE_VERSION=4.14.0
|
||||
WAZUH_VERSION=$(echo $WAZUH_IMAGE_VERSION | sed -e 's/\.//g')
|
||||
WAZUH_TAG_REVISION=1
|
||||
WAZUH_CURRENT_VERSION=$(curl --silent https://api.github.com/repos/wazuh/wazuh/releases/latest | grep '["]tag_name["]:' | sed -E 's/.*\"([^\"]+)\".*/\1/' | cut -c 2- | sed -e 's/\.//g')
|
||||
IMAGE_VERSION=${WAZUH_IMAGE_VERSION}
|
||||
|
||||
# Wazuh package generator
|
||||
# Copyright (C) 2023, Wazuh Inc.
|
||||
#
|
||||
# This program is a free software; you can redistribute it
|
||||
# and/or modify it under the terms of the GNU General Public
|
||||
# License (version 2) as published by the FSF - Free Software
|
||||
# Foundation.
|
||||
|
||||
WAZUH_IMAGE_VERSION="4.14.0"
|
||||
WAZUH_TAG_REVISION="1"
|
||||
WAZUH_DEV_STAGE=""
|
||||
FILEBEAT_MODULE_VERSION="0.4"
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
trap ctrl_c INT
|
||||
|
||||
clean() {
|
||||
exit_code=$1
|
||||
|
||||
exit ${exit_code}
|
||||
}
|
||||
|
||||
ctrl_c() {
|
||||
clean 1
|
||||
}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
build() {
|
||||
|
||||
WAZUH_VERSION="$(echo $WAZUH_IMAGE_VERSION | sed -e 's/\.//g')"
|
||||
FILEBEAT_TEMPLATE_BRANCH="${WAZUH_IMAGE_VERSION}"
|
||||
WAZUH_FILEBEAT_MODULE="wazuh-filebeat-${FILEBEAT_MODULE_VERSION}.tar.gz"
|
||||
WAZUH_UI_REVISION="${WAZUH_TAG_REVISION}"
|
||||
|
||||
if [ "${WAZUH_DEV_STAGE}" ];then
|
||||
FILEBEAT_TEMPLATE_BRANCH="v${FILEBEAT_TEMPLATE_BRANCH}-${WAZUH_DEV_STAGE,,}"
|
||||
if ! curl --output /dev/null --silent --head --fail "https://github.com/wazuh/wazuh/tree/${FILEBEAT_TEMPLATE_BRANCH}"; then
|
||||
echo "The indicated branch does not exist in the wazuh/wazuh repository: ${FILEBEAT_TEMPLATE_BRANCH}"
|
||||
clean 1
|
||||
fi
|
||||
else
|
||||
if curl --output /dev/null --silent --head --fail "https://github.com/wazuh/wazuh/tree/v${FILEBEAT_TEMPLATE_BRANCH}"; then
|
||||
FILEBEAT_TEMPLATE_BRANCH="v${FILEBEAT_TEMPLATE_BRANCH}"
|
||||
elif curl --output /dev/null --silent --head --fail "https://github.com/wazuh/wazuh/tree/${FILEBEAT_TEMPLATE_BRANCH}"; then
|
||||
FILEBEAT_TEMPLATE_BRANCH="${FILEBEAT_TEMPLATE_BRANCH}"
|
||||
else
|
||||
echo "The indicated branch does not exist in the wazuh/wazuh repository: ${FILEBEAT_TEMPLATE_BRANCH}"
|
||||
clean 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo WAZUH_VERSION=$WAZUH_IMAGE_VERSION > .env
|
||||
echo WAZUH_IMAGE_VERSION=$WAZUH_IMAGE_VERSION >> .env
|
||||
echo WAZUH_TAG_REVISION=$WAZUH_TAG_REVISION >> .env
|
||||
echo FILEBEAT_TEMPLATE_BRANCH=$FILEBEAT_TEMPLATE_BRANCH >> .env
|
||||
echo WAZUH_FILEBEAT_MODULE=$WAZUH_FILEBEAT_MODULE >> .env
|
||||
echo WAZUH_UI_REVISION=$WAZUH_UI_REVISION >> .env
|
||||
|
||||
docker compose -f build-docker-images/build-images.yml --env-file .env build --no-cache || clean 1
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
help() {
|
||||
echo
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo
|
||||
echo " -d, --dev <ref> [Optional] Set the development stage you want to build, example rc1 or beta1, not used by default."
|
||||
echo " -f, --filebeat-module <ref> [Optional] Set Filebeat module version. By default ${FILEBEAT_MODULE_VERSION}."
|
||||
echo " -r, --revision <rev> [Optional] Package revision. By default ${WAZUH_TAG_REVISION}"
|
||||
echo " -v, --version <ver> [Optional] Set the Wazuh version should be builded. By default, ${WAZUH_IMAGE_VERSION}."
|
||||
echo " -h, --help Show this help."
|
||||
echo
|
||||
exit $1
|
||||
}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
main() {
|
||||
while [ -n "${1}" ]
|
||||
do
|
||||
case "${1}" in
|
||||
"-h"|"--help")
|
||||
help 0
|
||||
;;
|
||||
"-d"|"--dev")
|
||||
if [ -n "${2}" ]; then
|
||||
WAZUH_DEV_STAGE="${2}"
|
||||
shift 2
|
||||
else
|
||||
help 1
|
||||
fi
|
||||
;;
|
||||
"-f"|"--filebeat-module")
|
||||
if [ -n "${2}" ]; then
|
||||
FILEBEAT_MODULE_VERSION="${2}"
|
||||
shift 2
|
||||
else
|
||||
help 1
|
||||
fi
|
||||
;;
|
||||
"-r"|"--revision")
|
||||
if [ -n "${2}" ]; then
|
||||
WAZUH_TAG_REVISION="${2}"
|
||||
shift 2
|
||||
else
|
||||
help 1
|
||||
fi
|
||||
;;
|
||||
"-v"|"--version")
|
||||
if [ -n "$2" ]; then
|
||||
WAZUH_IMAGE_VERSION="$2"
|
||||
shift 2
|
||||
else
|
||||
help 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
help 1
|
||||
esac
|
||||
done
|
||||
|
||||
build || clean 1
|
||||
|
||||
clean 0
|
||||
}
|
||||
|
||||
main "$@"
|
@@ -1,102 +0,0 @@
|
||||
# Wazuh App Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
services:
|
||||
wazuh.manager:
|
||||
build:
|
||||
context: wazuh-manager/
|
||||
args:
|
||||
WAZUH_VERSION: ${WAZUH_VERSION}
|
||||
WAZUH_TAG_REVISION: ${WAZUH_TAG_REVISION}
|
||||
FILEBEAT_TEMPLATE_BRANCH: ${FILEBEAT_TEMPLATE_BRANCH}
|
||||
WAZUH_FILEBEAT_MODULE: ${WAZUH_FILEBEAT_MODULE}
|
||||
image: wazuh/wazuh-manager:${WAZUH_IMAGE_VERSION}
|
||||
hostname: wazuh.manager
|
||||
restart: always
|
||||
ports:
|
||||
- "1514:1514"
|
||||
- "1515:1515"
|
||||
- "514:514/udp"
|
||||
- "55000:55000"
|
||||
environment:
|
||||
- INDEXER_URL=https://wazuh.indexer:9200
|
||||
- INDEXER_USERNAME=admin
|
||||
- INDEXER_PASSWORD=admin
|
||||
- FILEBEAT_SSL_VERIFICATION_MODE=none
|
||||
volumes:
|
||||
- wazuh_api_configuration:/var/ossec/api/configuration
|
||||
- wazuh_etc:/var/ossec/etc
|
||||
- wazuh_logs:/var/ossec/logs
|
||||
- wazuh_queue:/var/ossec/queue
|
||||
- wazuh_var_multigroups:/var/ossec/var/multigroups
|
||||
- wazuh_integrations:/var/ossec/integrations
|
||||
- wazuh_active_response:/var/ossec/active-response/bin
|
||||
- wazuh_agentless:/var/ossec/agentless
|
||||
- wazuh_wodles:/var/ossec/wodles
|
||||
- filebeat_etc:/etc/filebeat
|
||||
- filebeat_var:/var/lib/filebeat
|
||||
|
||||
wazuh.agent:
|
||||
build:
|
||||
context: wazuh-agent/
|
||||
args:
|
||||
WAZUH_VERSION: ${WAZUH_VERSION}
|
||||
WAZUH_TAG_REVISION: ${WAZUH_TAG_REVISION}
|
||||
image: wazuh/wazuh-agent:${WAZUH_IMAGE_VERSION}
|
||||
hostname: wazuh.agent
|
||||
restart: always
|
||||
|
||||
wazuh.indexer:
|
||||
build:
|
||||
context: wazuh-indexer/
|
||||
args:
|
||||
WAZUH_VERSION: ${WAZUH_VERSION}
|
||||
WAZUH_TAG_REVISION: ${WAZUH_TAG_REVISION}
|
||||
image: wazuh/wazuh-indexer:${WAZUH_IMAGE_VERSION}
|
||||
hostname: wazuh.indexer
|
||||
restart: always
|
||||
ports:
|
||||
- "9200:9200"
|
||||
environment:
|
||||
- "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
|
||||
wazuh.dashboard:
|
||||
build:
|
||||
context: wazuh-dashboard/
|
||||
args:
|
||||
WAZUH_VERSION: ${WAZUH_VERSION}
|
||||
WAZUH_TAG_REVISION: ${WAZUH_TAG_REVISION}
|
||||
WAZUH_UI_REVISION: ${WAZUH_UI_REVISION}
|
||||
image: wazuh/wazuh-dashboard:${WAZUH_IMAGE_VERSION}
|
||||
hostname: wazuh.dashboard
|
||||
restart: always
|
||||
ports:
|
||||
- 443:443
|
||||
environment:
|
||||
- INDEXER_USERNAME=admin
|
||||
- INDEXER_PASSWORD=admin
|
||||
- SERVER_SSL_ENABLED=false
|
||||
- WAZUH_API_URL=https://wazuh.manager
|
||||
depends_on:
|
||||
- wazuh.indexer
|
||||
links:
|
||||
- wazuh.indexer:wazuh.indexer
|
||||
- wazuh.manager:wazuh.manager
|
||||
|
||||
volumes:
|
||||
wazuh_api_configuration:
|
||||
wazuh_etc:
|
||||
wazuh_logs:
|
||||
wazuh_queue:
|
||||
wazuh_var_multigroups:
|
||||
wazuh_integrations:
|
||||
wazuh_active_response:
|
||||
wazuh_agentless:
|
||||
wazuh_wodles:
|
||||
filebeat_etc:
|
||||
filebeat_var:
|
@@ -1,36 +0,0 @@
|
||||
# Wazuh Docker Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
FROM amazonlinux:2023
|
||||
|
||||
RUN rm /bin/sh && ln -s /bin/bash /bin/sh
|
||||
|
||||
ARG WAZUH_VERSION
|
||||
ARG WAZUH_TAG_REVISION
|
||||
ARG S6_VERSION="v2.2.0.3"
|
||||
ARG WAZUH_MANAGER='CHANGE_MANAGER_IP'
|
||||
ARG WAZUH_MANAGER_PORT='CHANGE_MANAGER_PORT'
|
||||
ARG WAZUH_REGISTRATION_SERVER='CHANGE_ENROLL_IP'
|
||||
ARG WAZUH_REGISTRATION_PORT='CHANGE_ENROLL_PORT'
|
||||
ARG WAZUH_AGENT_NAME='CHANGEE_AGENT_NAME'
|
||||
|
||||
COPY config/check_repository.sh /
|
||||
|
||||
RUN yum install curl-minimal tar gzip procps -y &&\
|
||||
yum clean all
|
||||
|
||||
RUN chmod 775 /check_repository.sh
|
||||
RUN source /check_repository.sh
|
||||
|
||||
RUN yum install wazuh-agent-${WAZUH_VERSION}-${WAZUH_TAG_REVISION} -y && \
|
||||
yum clean all && \
|
||||
sed -i '/<authorization_pass_path>/d' /var/ossec/etc/ossec.conf && \
|
||||
curl --fail --silent -L https://github.com/just-containers/s6-overlay/releases/download/${S6_VERSION}/s6-overlay-amd64.tar.gz \
|
||||
-o /tmp/s6-overlay-amd64.tar.gz && \
|
||||
tar xzf /tmp/s6-overlay-amd64.tar.gz -C / --exclude="./bin" && \
|
||||
tar xzf /tmp/s6-overlay-amd64.tar.gz -C /usr ./bin && \
|
||||
rm /tmp/s6-overlay-amd64.tar.gz
|
||||
|
||||
COPY config/etc/ /etc/
|
||||
|
||||
RUN rm /etc/yum.repos.d/wazuh.repo
|
||||
|
||||
ENTRYPOINT [ "/init" ]
|
@@ -1,15 +0,0 @@
|
||||
## variables
|
||||
APT_KEY=https://packages-dev.wazuh.com/key/GPG-KEY-WAZUH
|
||||
GPG_SIGN="gpgcheck=1\ngpgkey=${APT_KEY}]"
|
||||
REPOSITORY="[wazuh]\n${GPG_SIGN}\nenabled=1\nname=EL-\$releasever - Wazuh\nbaseurl=https://packages-dev.wazuh.com/pre-release/yum/\nprotect=1"
|
||||
WAZUH_TAG=$(curl --silent https://api.github.com/repos/wazuh/wazuh/git/refs/tags | grep '["]ref["]:' | sed -E 's/.*\"([^\"]+)\".*/\1/' | cut -c 11- | grep ^v${WAZUH_VERSION}$)
|
||||
|
||||
## check tag to use the correct repository
|
||||
if [[ -n "${WAZUH_TAG}" ]]; then
|
||||
APT_KEY=https://packages.wazuh.com/key/GPG-KEY-WAZUH
|
||||
GPG_SIGN="gpgcheck=1\ngpgkey=${APT_KEY}]"
|
||||
REPOSITORY="[wazuh]\n${GPG_SIGN}\nenabled=1\nname=EL-\$releasever - Wazuh\nbaseurl=https://packages.wazuh.com/4.x/yum/\nprotect=1"
|
||||
fi
|
||||
|
||||
rpm --import "${APT_KEY}"
|
||||
echo -e "${REPOSITORY}" | tee /etc/yum.repos.d/wazuh.repo
|
@@ -1,90 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
# Wazuh App Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
|
||||
WAZUH_INSTALL_PATH=/var/ossec
|
||||
WAZUH_CONFIG_MOUNT=/wazuh-config-mount
|
||||
WAZUH_MANAGER_SERVER=$WAZUH_MANAGER_SERVER
|
||||
WAZUH_MANAGER_PORT=${WAZUH_MANAGER_PORT:-"1514"}
|
||||
WAZUH_REGISTRATION_SERVER=${WAZUH_REGISTRATION_SERVER:-$WAZUH_MANAGER_SERVER}
|
||||
WAZUH_REGISTRATION_PORT=${WAZUH_REGISTRATION_PORT:-"1515"}
|
||||
WAZUH_REGISTRATION_PASSWORD=$WAZUH_REGISTRATION_PASSWORD
|
||||
WAZUH_AGENT_NAME=${WAZUH_AGENT_NAME:-"wazuh-agent-$HOSTNAME"}
|
||||
|
||||
##############################################################################
|
||||
# Aux functions
|
||||
##############################################################################
|
||||
print() {
|
||||
echo -e $1
|
||||
}
|
||||
|
||||
error_and_exit() {
|
||||
echo "Error executing command: '$1'."
|
||||
echo 'Exiting.'
|
||||
exit 1
|
||||
}
|
||||
|
||||
exec_cmd() {
|
||||
eval $1 > /dev/null 2>&1 || error_and_exit "$1"
|
||||
}
|
||||
|
||||
exec_cmd_stdout() {
|
||||
eval $1 2>&1 || error_and_exit "$1"
|
||||
}
|
||||
|
||||
##############################################################################
|
||||
# Copy all files from $WAZUH_CONFIG_MOUNT to $WAZUH_INSTALL_PATH and respect
|
||||
# destination files permissions
|
||||
#
|
||||
# For example, to mount the file /var/ossec/data/etc/ossec.conf, mount it at
|
||||
# $WAZUH_CONFIG_MOUNT/etc/ossec.conf in your container and this code will
|
||||
# replace the ossec.conf file in /var/ossec/data/etc with yours.
|
||||
##############################################################################
|
||||
|
||||
mount_files() {
|
||||
if [ -e "$WAZUH_CONFIG_MOUNT" ]
|
||||
then
|
||||
print "Identified Wazuh configuration files to mount..."
|
||||
exec_cmd_stdout "cp --verbose -r $WAZUH_CONFIG_MOUNT/* $WAZUH_INSTALL_PATH"
|
||||
else
|
||||
print "No Wazuh configuration files to mount..."
|
||||
fi
|
||||
}
|
||||
|
||||
##############################################################################
|
||||
# Allow users to set the manager ip and port, enrollment ip and port and
|
||||
# enroll dynamically on container start.
|
||||
#
|
||||
# To use this:
|
||||
# 1. Create your own ossec.conf file
|
||||
# 2. In your ossec.conf file, use the <agent> configuration
|
||||
# 3. Mount your custom ossec.conf file at $WAZUH_CONFIG_MOUNT/etc/ossec.conf
|
||||
##############################################################################
|
||||
|
||||
set_manager_conn() {
|
||||
echo "ossec.conf configuration"
|
||||
sed -i "s#<address>CHANGE_MANAGER_IP</address>#<address>$WAZUH_MANAGER_SERVER</address>#g" ${WAZUH_INSTALL_PATH}/etc/ossec.conf
|
||||
sed -i "s#<port>CHANGE_MANAGER_PORT</port>#<port>$WAZUH_MANAGER_PORT</port>#g" ${WAZUH_INSTALL_PATH}/etc/ossec.conf
|
||||
sed -i "s#<manager_address>CHANGE_ENROLL_IP</manager_address>#<manager_address>$WAZUH_REGISTRATION_SERVER</manager_address>#g" ${WAZUH_INSTALL_PATH}/etc/ossec.conf
|
||||
sed -i "s#<port>CHANGE_ENROLL_PORT</port>#<port>$WAZUH_REGISTRATION_PORT</port>#g" ${WAZUH_INSTALL_PATH}/etc/ossec.conf
|
||||
sed -i "s#<agent_name>CHANGEE_AGENT_NAME</agent_name>#<agent_name>$WAZUH_AGENT_NAME</agent_name>#g" ${WAZUH_INSTALL_PATH}/etc/ossec.conf
|
||||
[ -n "$WAZUH_REGISTRATION_PASSWORD" ] && \
|
||||
echo "$WAZUH_REGISTRATION_PASSWORD" > ${WAZUH_INSTALL_PATH}/etc/authd.pass && \
|
||||
chown root:wazuh ${WAZUH_INSTALL_PATH}/etc/authd.pass && \
|
||||
chmod 640 ${WAZUH_INSTALL_PATH}/etc/authd.pass
|
||||
}
|
||||
|
||||
##############################################################################
|
||||
# Main function
|
||||
##############################################################################
|
||||
|
||||
main() {
|
||||
|
||||
# Mount selected files (WAZUH_CONFIG_MOUNT) to container
|
||||
mount_files
|
||||
|
||||
# Configure agent variables
|
||||
set_manager_conn
|
||||
|
||||
}
|
||||
|
||||
main
|
@@ -1,44 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
##############################################################################
|
||||
# Migration sequence
|
||||
# Detect if there is a mounted volume on /wazuh-migration and copy the data
|
||||
# to /var/ossec, finally it will create a flag ".migration-completed" inside
|
||||
# the mounted volume
|
||||
##############################################################################
|
||||
|
||||
function __colortext()
|
||||
{
|
||||
echo -e " \e[1;$2m$1\e[0m"
|
||||
}
|
||||
|
||||
function echogreen()
|
||||
{
|
||||
echo $(__colortext "$1" "32")
|
||||
}
|
||||
|
||||
function echoyellow()
|
||||
{
|
||||
echo $(__colortext "$1" "33")
|
||||
}
|
||||
|
||||
function echored()
|
||||
{
|
||||
echo $(__colortext "$1" "31")
|
||||
}
|
||||
|
||||
function_entrypoint_scripts() {
|
||||
# It will run every .sh script located in entrypoint-scripts folder in lexicographical order
|
||||
if [ -d "/entrypoint-scripts/" ]
|
||||
then
|
||||
for script in `ls /entrypoint-scripts/*.sh | sort -n`; do
|
||||
bash "$script"
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
# run entrypoint scripts
|
||||
function_entrypoint_scripts
|
||||
|
||||
# Start Wazuh
|
||||
/var/ossec/bin/wazuh-control start
|
@@ -1,4 +0,0 @@
|
||||
#!/usr/bin/with-contenv sh
|
||||
|
||||
# dumping ossec.log to standard output
|
||||
exec tail -F /var/ossec/logs/ossec.log
|
@@ -1,97 +0,0 @@
|
||||
# Wazuh Docker Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
FROM amazonlinux:2023 AS builder
|
||||
|
||||
ARG WAZUH_VERSION
|
||||
ARG WAZUH_TAG_REVISION
|
||||
ARG WAZUH_UI_REVISION
|
||||
ARG INSTALL_DIR=/usr/share/wazuh-dashboard
|
||||
|
||||
# Update and install dependencies
|
||||
RUN yum install curl-minimal libcap openssl -y
|
||||
|
||||
COPY config/check_repository.sh /
|
||||
RUN chmod 775 /check_repository.sh && \
|
||||
source /check_repository.sh
|
||||
|
||||
RUN yum install wazuh-dashboard-${WAZUH_VERSION}-${WAZUH_TAG_REVISION} -y && \
|
||||
yum clean all
|
||||
|
||||
# Create and set permissions to data directories
|
||||
RUN mkdir -p $INSTALL_DIR/data/wazuh && chmod -R 775 $INSTALL_DIR/data/wazuh
|
||||
RUN mkdir -p $INSTALL_DIR/data/wazuh/config && chmod -R 775 $INSTALL_DIR/data/wazuh/config
|
||||
RUN mkdir -p $INSTALL_DIR/data/wazuh/logs && chmod -R 775 $INSTALL_DIR/data/wazuh/logs
|
||||
COPY config/wazuh.yml $INSTALL_DIR/data/wazuh/config/
|
||||
RUN setcap 'cap_net_bind_service=-ep' /usr/share/wazuh-dashboard/node/bin/node
|
||||
RUN setcap 'cap_net_bind_service=-ep' /usr/share/wazuh-dashboard/node/fallback/bin/node
|
||||
|
||||
# Generate certificates
|
||||
COPY config/config.sh .
|
||||
COPY config/config.yml /
|
||||
RUN bash config.sh
|
||||
|
||||
################################################################################
|
||||
# Build stage 1 (the current Wazuh dashboard image):
|
||||
#
|
||||
# Copy wazuh-dashboard from stage 0
|
||||
# Add entrypoint
|
||||
# Add wazuh_app_config
|
||||
################################################################################
|
||||
FROM amazonlinux:2023
|
||||
|
||||
# Set environment variables
|
||||
ENV USER="wazuh-dashboard" \
|
||||
GROUP="wazuh-dashboard" \
|
||||
NAME="wazuh-dashboard" \
|
||||
INSTALL_DIR="/usr/share/wazuh-dashboard"
|
||||
|
||||
# Set Wazuh app variables
|
||||
ENV PATTERN="" \
|
||||
CHECKS_PATTERN="" \
|
||||
CHECKS_TEMPLATE="" \
|
||||
CHECKS_API="" \
|
||||
CHECKS_SETUP="" \
|
||||
APP_TIMEOUT="" \
|
||||
API_SELECTOR="" \
|
||||
IP_SELECTOR="" \
|
||||
IP_IGNORE="" \
|
||||
WAZUH_MONITORING_ENABLED="" \
|
||||
WAZUH_MONITORING_FREQUENCY="" \
|
||||
WAZUH_MONITORING_SHARDS="" \
|
||||
WAZUH_MONITORING_REPLICAS=""
|
||||
|
||||
# Update and install dependencies
|
||||
RUN yum install shadow-utils -y
|
||||
|
||||
# Create wazuh-dashboard user and group
|
||||
RUN getent group $GROUP || groupadd -r -g 1000 $GROUP
|
||||
RUN useradd --system \
|
||||
--uid 1000 \
|
||||
--no-create-home \
|
||||
--home-dir $INSTALL_DIR \
|
||||
--gid $GROUP \
|
||||
--shell /sbin/nologin \
|
||||
--comment "$USER user" \
|
||||
$USER
|
||||
|
||||
# Copy and set permissions to scripts
|
||||
COPY config/entrypoint.sh /
|
||||
COPY config/wazuh_app_config.sh /
|
||||
RUN chmod 700 /entrypoint.sh
|
||||
RUN chmod 700 /wazuh_app_config.sh
|
||||
RUN chown 1000:1000 /*.sh
|
||||
|
||||
# Copy Install dir from builder to current image
|
||||
COPY --from=builder --chown=1000:1000 $INSTALL_DIR $INSTALL_DIR
|
||||
|
||||
# Create custom directory
|
||||
RUN mkdir -p /usr/share/wazuh-dashboard/plugins/wazuh/public/assets/custom
|
||||
RUN chown 1000:1000 /usr/share/wazuh-dashboard/plugins/wazuh/public/assets/custom
|
||||
|
||||
# Set workdir and user
|
||||
WORKDIR $INSTALL_DIR
|
||||
USER wazuh-dashboard
|
||||
|
||||
# Services ports
|
||||
EXPOSE 443
|
||||
|
||||
ENTRYPOINT [ "/entrypoint.sh" ]
|
@@ -1,15 +0,0 @@
|
||||
## variables
|
||||
APT_KEY=https://packages-dev.wazuh.com/key/GPG-KEY-WAZUH
|
||||
GPG_SIGN="gpgcheck=1\ngpgkey=${APT_KEY}]"
|
||||
REPOSITORY="[wazuh]\n${GPG_SIGN}\nenabled=1\nname=EL-\$releasever - Wazuh\nbaseurl=https://packages-dev.wazuh.com/pre-release/yum/\nprotect=1"
|
||||
WAZUH_TAG=$(curl --silent https://api.github.com/repos/wazuh/wazuh/git/refs/tags | grep '["]ref["]:' | sed -E 's/.*\"([^\"]+)\".*/\1/' | cut -c 11- | grep ^v${WAZUH_VERSION}$)
|
||||
|
||||
## check tag to use the correct repository
|
||||
if [[ -n "${WAZUH_TAG}" ]]; then
|
||||
APT_KEY=https://packages.wazuh.com/key/GPG-KEY-WAZUH
|
||||
GPG_SIGN="gpgcheck=1\ngpgkey=${APT_KEY}]"
|
||||
REPOSITORY="[wazuh]\n${GPG_SIGN}\nenabled=1\nname=EL-\$releasever - Wazuh\nbaseurl=https://packages.wazuh.com/4.x/yum/\nprotect=1"
|
||||
fi
|
||||
|
||||
rpm --import "${APT_KEY}"
|
||||
echo -e "${REPOSITORY}" | tee /etc/yum.repos.d/wazuh.repo
|
@@ -1,42 +0,0 @@
|
||||
# Wazuh Docker Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
# This has to be exported to make some magic below work.
|
||||
export DH_OPTIONS
|
||||
|
||||
export NAME=wazuh-dashboard
|
||||
export TARGET_DIR=${CURDIR}/debian/${NAME}
|
||||
export INSTALLATION_DIR=/usr/share/${NAME}
|
||||
export CONFIG_DIR=${INSTALLATION_DIR}/config
|
||||
|
||||
## Variables
|
||||
CERT_TOOL=wazuh-certs-tool.sh
|
||||
PACKAGES_URL=https://packages.wazuh.com/4.14/
|
||||
PACKAGES_DEV_URL=https://packages-dev.wazuh.com/4.14/
|
||||
|
||||
## Check if the cert tool exists in S3 buckets
|
||||
CERT_TOOL_PACKAGES=$(curl --silent -I $PACKAGES_URL$CERT_TOOL | grep -E "^HTTP" | awk '{print $2}')
|
||||
CERT_TOOL_PACKAGES_DEV=$(curl --silent -I $PACKAGES_DEV_URL$CERT_TOOL | grep -E "^HTTP" | awk '{print $2}')
|
||||
|
||||
## If cert tool exists in some bucket, download it, if not exit 1
|
||||
if [ "$CERT_TOOL_PACKAGES" = "200" ]; then
|
||||
curl -o $CERT_TOOL $PACKAGES_URL$CERT_TOOL
|
||||
echo "Cert tool exists in Packages bucket"
|
||||
elif [ "$CERT_TOOL_PACKAGES_DEV" = "200" ]; then
|
||||
curl -o $CERT_TOOL $PACKAGES_DEV_URL$CERT_TOOL
|
||||
echo "Cert tool exists in Packages-dev bucket"
|
||||
else
|
||||
echo "Cert tool does not exist in any bucket"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
chmod 755 $CERT_TOOL && bash /$CERT_TOOL -A
|
||||
|
||||
# Create certs directory
|
||||
mkdir -p ${CONFIG_DIR}/certs
|
||||
|
||||
# Copy Wazuh dashboard certs to install config dir
|
||||
cp /wazuh-certificates/demo.dashboard.pem ${CONFIG_DIR}/certs/dashboard.pem
|
||||
cp /wazuh-certificates/demo.dashboard-key.pem ${CONFIG_DIR}/certs/dashboard-key.pem
|
||||
cp /wazuh-certificates/root-ca.pem ${CONFIG_DIR}/certs/root-ca.pem
|
||||
|
||||
chmod -R 500 ${CONFIG_DIR}/certs
|
||||
chmod -R 400 ${CONFIG_DIR}/certs/*
|
@@ -1,5 +0,0 @@
|
||||
nodes:
|
||||
# Wazuh dashboard server nodes
|
||||
dashboard:
|
||||
- name: demo.dashboard
|
||||
ip: demo.dashboard
|
@@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Wazuh Docker Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
|
||||
INSTALL_DIR=/usr/share/wazuh-dashboard
|
||||
DASHBOARD_USERNAME="${DASHBOARD_USERNAME:-kibanaserver}"
|
||||
DASHBOARD_PASSWORD="${DASHBOARD_PASSWORD:-kibanaserver}"
|
||||
|
||||
# Create and configure Wazuh dashboard keystore
|
||||
|
||||
yes | $INSTALL_DIR/bin/opensearch-dashboards-keystore create --allow-root && \
|
||||
echo $DASHBOARD_USERNAME | $INSTALL_DIR/bin/opensearch-dashboards-keystore add opensearch.username --stdin --allow-root && \
|
||||
echo $DASHBOARD_PASSWORD | $INSTALL_DIR/bin/opensearch-dashboards-keystore add opensearch.password --stdin --allow-root
|
||||
|
||||
##############################################################################
|
||||
# Start Wazuh dashboard
|
||||
##############################################################################
|
||||
|
||||
/wazuh_app_config.sh $WAZUH_UI_REVISION
|
||||
|
||||
/usr/share/wazuh-dashboard/bin/opensearch-dashboards -c /usr/share/wazuh-dashboard/config/opensearch_dashboards.yml
|
@@ -1,155 +0,0 @@
|
||||
---
|
||||
#
|
||||
# Wazuh app - App configuration file
|
||||
# Copyright (C) 2017, Wazuh Inc.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Find more information about this on the LICENSE file.
|
||||
#
|
||||
# ======================== Wazuh app configuration file ========================
|
||||
#
|
||||
# Please check the documentation for more information on configuration options:
|
||||
# https://documentation.wazuh.com/current/installation-guide/index.html
|
||||
#
|
||||
# Also, you can check our repository:
|
||||
# https://github.com/wazuh/wazuh-dashboard-plugins
|
||||
#
|
||||
# ------------------------------- Index patterns -------------------------------
|
||||
#
|
||||
# Default index pattern to use.
|
||||
#pattern: wazuh-alerts-*
|
||||
#
|
||||
# ----------------------------------- Checks -----------------------------------
|
||||
#
|
||||
# Defines which checks must to be consider by the healthcheck
|
||||
# step once the Wazuh app starts. Values must to be true or false.
|
||||
#checks.pattern : true
|
||||
#checks.template: true
|
||||
#checks.api : true
|
||||
#checks.setup : true
|
||||
#checks.metaFields: true
|
||||
#
|
||||
# --------------------------------- Extensions ---------------------------------
|
||||
#
|
||||
# Defines which extensions should be activated when you add a new API entry.
|
||||
# You can change them after Wazuh app starts.
|
||||
# Values must to be true or false.
|
||||
#extensions.pci : true
|
||||
#extensions.gdpr : true
|
||||
#extensions.hipaa : true
|
||||
#extensions.nist : true
|
||||
#extensions.tsc : true
|
||||
#extensions.audit : true
|
||||
#extensions.oscap : false
|
||||
#extensions.ciscat : false
|
||||
#extensions.aws : false
|
||||
#extensions.gcp : false
|
||||
#extensions.virustotal: false
|
||||
#extensions.osquery : false
|
||||
#extensions.docker : false
|
||||
#
|
||||
# ---------------------------------- Time out ----------------------------------
|
||||
#
|
||||
# Defines maximum timeout to be used on the Wazuh app requests.
|
||||
# It will be ignored if it is bellow 1500.
|
||||
# It means milliseconds before we consider a request as failed.
|
||||
# Default: 20000
|
||||
#timeout: 20000
|
||||
#
|
||||
# -------------------------------- API selector --------------------------------
|
||||
#
|
||||
# Defines if the user is allowed to change the selected
|
||||
# API directly from the Wazuh app top menu.
|
||||
# Default: true
|
||||
#api.selector: true
|
||||
#
|
||||
# --------------------------- Index pattern selector ---------------------------
|
||||
#
|
||||
# Defines if the user is allowed to change the selected
|
||||
# index pattern directly from the Wazuh app top menu.
|
||||
# Default: true
|
||||
#ip.selector: true
|
||||
#
|
||||
# List of index patterns to be ignored
|
||||
#ip.ignore: []
|
||||
#
|
||||
# ------------------------------ wazuh-monitoring ------------------------------
|
||||
#
|
||||
# Custom setting to enable/disable wazuh-monitoring indices.
|
||||
# Values: true, false, worker
|
||||
# If worker is given as value, the app will show the Agents status
|
||||
# visualization but won't insert data on wazuh-monitoring indices.
|
||||
# Default: true
|
||||
#wazuh.monitoring.enabled: true
|
||||
#
|
||||
# Custom setting to set the frequency for wazuh-monitoring indices cron task.
|
||||
# Default: 900 (s)
|
||||
#wazuh.monitoring.frequency: 900
|
||||
#
|
||||
# Configure wazuh-monitoring-* indices shards and replicas.
|
||||
#wazuh.monitoring.shards: 2
|
||||
#wazuh.monitoring.replicas: 0
|
||||
#
|
||||
# Configure wazuh-monitoring-* indices custom creation interval.
|
||||
# Values: h (hourly), d (daily), w (weekly), m (monthly)
|
||||
# Default: d
|
||||
#wazuh.monitoring.creation: d
|
||||
#
|
||||
# Default index pattern to use for Wazuh monitoring
|
||||
#wazuh.monitoring.pattern: wazuh-monitoring-*
|
||||
#
|
||||
# --------------------------------- wazuh-cron ----------------------------------
|
||||
#
|
||||
# Customize the index prefix of predefined jobs
|
||||
# This change is not retroactive, if you change it new indexes will be created
|
||||
# cron.prefix: test
|
||||
#
|
||||
# ------------------------------ wazuh-statistics -------------------------------
|
||||
#
|
||||
# Custom setting to enable/disable statistics tasks.
|
||||
#cron.statistics.status: true
|
||||
#
|
||||
# Enter the ID of the APIs you want to save data from, leave this empty to run
|
||||
# the task on all configured APIs
|
||||
#cron.statistics.apis: []
|
||||
#
|
||||
# Define the frequency of task execution using cron schedule expressions
|
||||
#cron.statistics.interval: 0 0 * * * *
|
||||
#
|
||||
# Define the name of the index in which the documents are to be saved.
|
||||
#cron.statistics.index.name: statistics
|
||||
#
|
||||
# Define the interval in which the index will be created
|
||||
#cron.statistics.index.creation: w
|
||||
#
|
||||
# ------------------------------- App privileges --------------------------------
|
||||
#admin: true
|
||||
#
|
||||
# ---------------------------- Hide manager alerts ------------------------------
|
||||
# Hide the alerts of the manager in all dashboards and discover
|
||||
#hideManagerAlerts: false
|
||||
#
|
||||
# ------------------------------- App logging level -----------------------------
|
||||
# Set the logging level for the Wazuh App log files.
|
||||
# Default value: info
|
||||
# Allowed values: info, debug
|
||||
#logs.level: info
|
||||
#
|
||||
# -------------------------------- Enrollment DNS -------------------------------
|
||||
# Set the variable WAZUH_REGISTRATION_SERVER in agents deployment.
|
||||
# Default value: ''
|
||||
#enrollment.dns: ''
|
||||
#
|
||||
#-------------------------------- API entries -----------------------------------
|
||||
#The following configuration is the default structure to define an API entry.
|
||||
#
|
||||
#hosts:
|
||||
# - <id>:
|
||||
# url: http(s)://<url>
|
||||
# port: <port>
|
||||
# username: <username>
|
||||
# password: <password>
|
@@ -1,52 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Wazuh Docker Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
|
||||
wazuh_url="${WAZUH_API_URL:-https://wazuh}"
|
||||
wazuh_port="${API_PORT:-55000}"
|
||||
api_username="${API_USERNAME:-wazuh-wui}"
|
||||
api_password="${API_PASSWORD:-wazuh-wui}"
|
||||
api_run_as="${RUN_AS:-false}"
|
||||
|
||||
dashboard_config_file="/usr/share/wazuh-dashboard/data/wazuh/config/wazuh.yml"
|
||||
|
||||
declare -A CONFIG_MAP=(
|
||||
[pattern]=$PATTERN
|
||||
[checks.pattern]=$CHECKS_PATTERN
|
||||
[checks.template]=$CHECKS_TEMPLATE
|
||||
[checks.api]=$CHECKS_API
|
||||
[checks.setup]=$CHECKS_SETUP
|
||||
[timeout]=$APP_TIMEOUT
|
||||
[api.selector]=$API_SELECTOR
|
||||
[ip.selector]=$IP_SELECTOR
|
||||
[ip.ignore]=$IP_IGNORE
|
||||
[wazuh.monitoring.enabled]=$WAZUH_MONITORING_ENABLED
|
||||
[wazuh.monitoring.frequency]=$WAZUH_MONITORING_FREQUENCY
|
||||
[wazuh.monitoring.shards]=$WAZUH_MONITORING_SHARDS
|
||||
[wazuh.monitoring.replicas]=$WAZUH_MONITORING_REPLICAS
|
||||
)
|
||||
|
||||
for i in "${!CONFIG_MAP[@]}"
|
||||
do
|
||||
if [ "${CONFIG_MAP[$i]}" != "" ]; then
|
||||
sed -i 's/.*#'"$i"'.*/'"$i"': '"${CONFIG_MAP[$i]}"'/' $dashboard_config_file
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
grep -q 1513629884013 $dashboard_config_file
|
||||
_config_exists=$?
|
||||
|
||||
if [[ $_config_exists -ne 0 ]]; then
|
||||
cat << EOF >> $dashboard_config_file
|
||||
hosts:
|
||||
- 1513629884013:
|
||||
url: $wazuh_url
|
||||
port: $wazuh_port
|
||||
username: $api_username
|
||||
password: $api_password
|
||||
run_as: $api_run_as
|
||||
EOF
|
||||
else
|
||||
echo "Wazuh APP already configured"
|
||||
fi
|
||||
|
@@ -1,93 +0,0 @@
|
||||
# Wazuh Docker Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
FROM amazonlinux:2023 AS builder
|
||||
|
||||
ARG WAZUH_VERSION
|
||||
ARG WAZUH_TAG_REVISION
|
||||
|
||||
RUN yum install curl-minimal openssl xz tar findutils shadow-utils -y
|
||||
|
||||
COPY config/check_repository.sh /
|
||||
RUN chmod 775 /check_repository.sh && \
|
||||
source /check_repository.sh
|
||||
|
||||
RUN yum install wazuh-indexer-${WAZUH_VERSION}-${WAZUH_TAG_REVISION} -y && \
|
||||
yum clean all
|
||||
|
||||
COPY config/opensearch.yml /
|
||||
|
||||
COPY config/config.sh .
|
||||
|
||||
COPY config/config.yml /
|
||||
|
||||
COPY config/action_groups.yml /
|
||||
|
||||
COPY config/internal_users.yml /
|
||||
|
||||
COPY config/roles_mapping.yml /
|
||||
|
||||
COPY config/roles.yml /
|
||||
|
||||
RUN bash config.sh
|
||||
|
||||
################################################################################
|
||||
# Build stage 1 (the actual Wazuh indexer image):
|
||||
#
|
||||
# Copy wazuh-indexer from stage 0
|
||||
# Add entrypoint
|
||||
|
||||
################################################################################
|
||||
FROM amazonlinux:2023
|
||||
|
||||
ENV USER="wazuh-indexer" \
|
||||
GROUP="wazuh-indexer" \
|
||||
NAME="wazuh-indexer" \
|
||||
INSTALL_DIR="/usr/share/wazuh-indexer"
|
||||
|
||||
RUN yum install curl-minimal shadow-utils findutils hostname -y
|
||||
|
||||
RUN getent group $GROUP || groupadd -r -g 1000 $GROUP
|
||||
|
||||
RUN useradd --system \
|
||||
--uid 1000 \
|
||||
--no-create-home \
|
||||
--home-dir $INSTALL_DIR \
|
||||
--gid $GROUP \
|
||||
--shell /sbin/nologin \
|
||||
--comment "$USER user" \
|
||||
$USER
|
||||
|
||||
WORKDIR $INSTALL_DIR
|
||||
|
||||
COPY config/entrypoint.sh /
|
||||
|
||||
COPY config/securityadmin.sh /
|
||||
|
||||
RUN chmod 700 /entrypoint.sh && chmod 700 /securityadmin.sh && \
|
||||
mkdir -p /usr/share/wazuh-indexer && \
|
||||
chown 1000:1000 /usr/share/wazuh-indexer && \
|
||||
chown 1000:1000 /*.sh
|
||||
|
||||
COPY --from=builder --chown=1000:1000 /usr/share/wazuh-indexer /usr/share/wazuh-indexer
|
||||
COPY --from=builder --chown=1000:1000 /etc/wazuh-indexer /usr/share/wazuh-indexer/config
|
||||
COPY --from=builder --chown=1000:1000 /debian/wazuh-indexer/usr/share/wazuh-indexer /usr/share/wazuh-indexer
|
||||
COPY --from=builder --chown=0:0 /debian/wazuh-indexer/usr/lib/systemd /usr/lib/systemd
|
||||
COPY --from=builder --chown=0:0 /debian/wazuh-indexer/usr/lib/sysctl.d /usr/lib/sysctl.d
|
||||
COPY --from=builder --chown=0:0 /debian/wazuh-indexer/usr/lib/tmpfiles.d /usr/lib/tmpfiles.d
|
||||
|
||||
RUN mkdir -p /var/lib/wazuh-indexer && chown 1000:1000 /var/lib/wazuh-indexer && \
|
||||
mkdir -p /usr/share/wazuh-indexer/logs && chown 1000:1000 /usr/share/wazuh-indexer/logs && \
|
||||
mkdir -p /run/wazuh-indexer && chown 1000:1000 /run/wazuh-indexer && \
|
||||
mkdir -p /var/log/wazuh-indexer && chown 1000:1000 /var/log/wazuh-indexer && \
|
||||
chmod 700 /usr/share/wazuh-indexer && \
|
||||
chmod 700 /usr/share/wazuh-indexer/config && \
|
||||
chmod 600 /usr/share/wazuh-indexer/config/jvm.options && \
|
||||
chmod 600 /usr/share/wazuh-indexer/config/opensearch.yml
|
||||
|
||||
USER wazuh-indexer
|
||||
|
||||
# Services ports
|
||||
EXPOSE 9200
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
# Dummy overridable parameter parsed by entrypoint
|
||||
CMD ["opensearchwrapper"]
|
@@ -1,12 +0,0 @@
|
||||
---
|
||||
_meta:
|
||||
type: "actiongroups"
|
||||
config_version: 2
|
||||
|
||||
# ISM API permissions group
|
||||
manage_ism:
|
||||
reserved: true
|
||||
hidden: false
|
||||
allowed_actions:
|
||||
- "cluster:admin/opendistro/ism/*"
|
||||
static: false
|
@@ -1,15 +0,0 @@
|
||||
## variables
|
||||
APT_KEY=https://packages-dev.wazuh.com/key/GPG-KEY-WAZUH
|
||||
GPG_SIGN="gpgcheck=1\ngpgkey=${APT_KEY}]"
|
||||
REPOSITORY="[wazuh]\n${GPG_SIGN}\nenabled=1\nname=EL-\$releasever - Wazuh\nbaseurl=https://packages-dev.wazuh.com/pre-release/yum/\nprotect=1"
|
||||
WAZUH_TAG=$(curl --silent https://api.github.com/repos/wazuh/wazuh/git/refs/tags | grep '["]ref["]:' | sed -E 's/.*\"([^\"]+)\".*/\1/' | cut -c 11- | grep ^v${WAZUH_VERSION}$)
|
||||
|
||||
## check tag to use the correct repository
|
||||
if [[ -n "${WAZUH_TAG}" ]]; then
|
||||
APT_KEY=https://packages.wazuh.com/key/GPG-KEY-WAZUH
|
||||
GPG_SIGN="gpgcheck=1\ngpgkey=${APT_KEY}]"
|
||||
REPOSITORY="[wazuh]\n${GPG_SIGN}\nenabled=1\nname=EL-\$releasever - Wazuh\nbaseurl=https://packages.wazuh.com/4.x/yum/\nprotect=1"
|
||||
fi
|
||||
|
||||
rpm --import "${APT_KEY}"
|
||||
echo -e "${REPOSITORY}" | tee /etc/yum.repos.d/wazuh.repo
|
@@ -1,102 +0,0 @@
|
||||
# Wazuh Docker Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
# This has to be exported to make some magic below work.
|
||||
export DH_OPTIONS
|
||||
|
||||
export NAME=wazuh-indexer
|
||||
export TARGET_DIR=${CURDIR}/debian/${NAME}
|
||||
|
||||
# Package build options
|
||||
export USER=${NAME}
|
||||
export GROUP=${NAME}
|
||||
export VERSION=${WAZUH_VERSION}-${WAZUH_TAG_REVISION}
|
||||
export LOG_DIR=/var/log/${NAME}
|
||||
export LIB_DIR=/var/lib/${NAME}
|
||||
export PID_DIR=/run/${NAME}
|
||||
export INSTALLATION_DIR=/usr/share/${NAME}
|
||||
export CONFIG_DIR=${INSTALLATION_DIR}/config
|
||||
export BASE_DIR=${NAME}-*
|
||||
export INDEXER_FILE=wazuh-indexer-base.tar.xz
|
||||
export BASE_FILE=wazuh-indexer-base-${VERSION}-linux-x64.tar.xz
|
||||
export REPO_DIR=/unattended_installer
|
||||
|
||||
## Variables
|
||||
CERT_TOOL=wazuh-certs-tool.sh
|
||||
PASSWORD_TOOL=wazuh-passwords-tool.sh
|
||||
PACKAGES_URL=https://packages.wazuh.com/4.14/
|
||||
PACKAGES_DEV_URL=https://packages-dev.wazuh.com/4.14/
|
||||
|
||||
## Check if the cert tool exists in S3 buckets
|
||||
CERT_TOOL_PACKAGES=$(curl --silent -I $PACKAGES_URL$CERT_TOOL | grep -E "^HTTP" | awk '{print $2}')
|
||||
CERT_TOOL_PACKAGES_DEV=$(curl --silent -I $PACKAGES_DEV_URL$CERT_TOOL | grep -E "^HTTP" | awk '{print $2}')
|
||||
|
||||
## If cert tool exists in some bucket, download it, if not exit 1
|
||||
if [ "$CERT_TOOL_PACKAGES" = "200" ]; then
|
||||
curl -o $CERT_TOOL $PACKAGES_URL$CERT_TOOL
|
||||
echo "Cert tool exists in Packages bucket"
|
||||
elif [ "$CERT_TOOL_PACKAGES_DEV" = "200" ]; then
|
||||
curl -o $CERT_TOOL $PACKAGES_DEV_URL$CERT_TOOL
|
||||
echo "Cert tool exists in Packages-dev bucket"
|
||||
else
|
||||
echo "Cert tool does not exist in any bucket"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
## Check if the password tool exists in S3 buckets
|
||||
PASSWORD_TOOL_PACKAGES=$(curl --silent -I $PACKAGES_URL$PASSWORD_TOOL | grep -E "^HTTP" | awk '{print $2}')
|
||||
PASSWORD_TOOL_PACKAGES_DEV=$(curl --silent -I $PACKAGES_DEV_URL$PASSWORD_TOOL | grep -E "^HTTP" | awk '{print $2}')
|
||||
|
||||
## If password tool exists in some bucket, download it, if not exit 1
|
||||
if [ "$PASSWORD_TOOL_PACKAGES" = "200" ]; then
|
||||
curl -o $PASSWORD_TOOL $PACKAGES_URL$PASSWORD_TOOL
|
||||
echo "Password tool exists in Packages bucket"
|
||||
elif [ "$PASSWORD_TOOL_PACKAGES_DEV" = "200" ]; then
|
||||
curl -o $PASSWORD_TOOL $PACKAGES_DEV_URL$PASSWORD_TOOL
|
||||
echo "Password tool exists in Packages-dev bucket"
|
||||
else
|
||||
echo "Password tool does not exist in any bucket"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
chmod 755 $CERT_TOOL && bash /$CERT_TOOL -A
|
||||
|
||||
# copy to target
|
||||
mkdir -p ${TARGET_DIR}${INSTALLATION_DIR}
|
||||
mkdir -p ${TARGET_DIR}${INSTALLATION_DIR}/opensearch-security/
|
||||
mkdir -p ${TARGET_DIR}${CONFIG_DIR}
|
||||
mkdir -p ${TARGET_DIR}${LIB_DIR}
|
||||
mkdir -p ${TARGET_DIR}${LOG_DIR}
|
||||
mkdir -p ${TARGET_DIR}/etc/init.d
|
||||
mkdir -p ${TARGET_DIR}/etc/default
|
||||
mkdir -p ${TARGET_DIR}/usr/lib/tmpfiles.d
|
||||
mkdir -p ${TARGET_DIR}/usr/lib/sysctl.d
|
||||
mkdir -p ${TARGET_DIR}/usr/lib/systemd/system
|
||||
mkdir -p ${TARGET_DIR}${CONFIG_DIR}/certs
|
||||
# Copy Wazuh's config files for the security plugin
|
||||
cp -pr /roles_mapping.yml ${TARGET_DIR}${INSTALLATION_DIR}/opensearch-security/
|
||||
cp -pr /roles.yml ${TARGET_DIR}${INSTALLATION_DIR}/opensearch-security/
|
||||
cp -pr /action_groups.yml ${TARGET_DIR}${INSTALLATION_DIR}/opensearch-security/
|
||||
cp -pr /internal_users.yml ${TARGET_DIR}${INSTALLATION_DIR}/opensearch-security/
|
||||
cp -pr /opensearch.yml ${TARGET_DIR}${CONFIG_DIR}
|
||||
# Copy Wazuh indexer's certificates
|
||||
cp -pr /wazuh-certificates/demo.indexer.pem ${TARGET_DIR}${CONFIG_DIR}/certs/indexer.pem
|
||||
cp -pr /wazuh-certificates/demo.indexer-key.pem ${TARGET_DIR}${CONFIG_DIR}/certs/indexer-key.pem
|
||||
cp -pr /wazuh-certificates/root-ca.key ${TARGET_DIR}${CONFIG_DIR}/certs/root-ca.key
|
||||
cp -pr /wazuh-certificates/root-ca.pem ${TARGET_DIR}${CONFIG_DIR}/certs/root-ca.pem
|
||||
cp -pr /wazuh-certificates/admin.pem ${TARGET_DIR}${CONFIG_DIR}/certs/admin.pem
|
||||
cp -pr /wazuh-certificates/admin-key.pem ${TARGET_DIR}${CONFIG_DIR}/certs/admin-key.pem
|
||||
|
||||
# Delete xms and xmx parameters in jvm.options
|
||||
sed '/-Xms/d' -i /etc/wazuh-indexer/jvm.options
|
||||
sed '/-Xmx/d' -i /etc/wazuh-indexer/jvm.options
|
||||
sed -i 's/-Djava.security.policy=file:\/\/\/etc\/wazuh-indexer\/opensearch-performance-analyzer\/opensearch_security.policy/-Djava.security.policy=file:\/\/\/usr\/share\/wazuh-indexer\/opensearch-performance-analyzer\/opensearch_security.policy/g' /etc/wazuh-indexer/jvm.options
|
||||
|
||||
|
||||
chmod -R 500 ${TARGET_DIR}${CONFIG_DIR}/certs
|
||||
chmod -R 400 ${TARGET_DIR}${CONFIG_DIR}/certs/*
|
||||
|
||||
find ${TARGET_DIR} -type d -exec chmod 750 {} \;
|
||||
find ${TARGET_DIR} -type f -perm 644 -exec chmod 640 {} \;
|
||||
find ${TARGET_DIR} -type f -perm 664 -exec chmod 660 {} \;
|
||||
find ${TARGET_DIR} -type f -perm 755 -exec chmod 750 {} \;
|
||||
find ${TARGET_DIR} -type f -perm 744 -exec chmod 740 {} \;
|
@@ -1,5 +0,0 @@
|
||||
nodes:
|
||||
# Wazuh indexer server nodes
|
||||
indexer:
|
||||
- name: demo.indexer
|
||||
ip: demo.indexer
|
@@ -1,93 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Wazuh Docker Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
set -e
|
||||
|
||||
umask 0002
|
||||
|
||||
export USER=wazuh-indexer
|
||||
export INSTALLATION_DIR=/usr/share/wazuh-indexer
|
||||
export OPENSEARCH_PATH_CONF=${INSTALLATION_DIR}/config
|
||||
export JAVA_HOME=${INSTALLATION_DIR}/jdk
|
||||
export DISCOVERY=$(grep -oP "(?<=discovery.type: ).*" ${OPENSEARCH_PATH_CONF}/opensearch.yml)
|
||||
export CACERT=$(grep -oP "(?<=plugins.security.ssl.transport.pemtrustedcas_filepath: ).*" ${OPENSEARCH_PATH_CONF}/opensearch.yml)
|
||||
export CERT="${OPENSEARCH_PATH_CONF}/certs/admin.pem"
|
||||
export KEY="${OPENSEARCH_PATH_CONF}/certs/admin-key.pem"
|
||||
|
||||
run_as_other_user_if_needed() {
|
||||
if [[ "$(id -u)" == "0" ]]; then
|
||||
# If running as root, drop to specified UID and run command
|
||||
exec chroot --userspec=1000:0 / "${@}"
|
||||
else
|
||||
# Either we are running in Openshift with random uid and are a member of the root group
|
||||
# or with a custom --user
|
||||
exec "${@}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Allow user specify custom CMD, maybe bin/opensearch itself
|
||||
# for example to directly specify `-E` style parameters for opensearch on k8s
|
||||
# or simply to run /bin/bash to check the image
|
||||
if [[ "$1" != "opensearchwrapper" ]]; then
|
||||
if [[ "$(id -u)" == "0" && $(basename "$1") == "opensearch" ]]; then
|
||||
# Rewrite CMD args to replace $1 with `opensearch` explicitly,
|
||||
# Without this, user could specify `opensearch -E x.y=z` but
|
||||
# `bin/opensearch -E x.y=z` would not work.
|
||||
set -- "opensearch" "${@:2}"
|
||||
# Use chroot to switch to UID 1000 / GID 0
|
||||
exec chroot --userspec=1000:0 / "$@"
|
||||
else
|
||||
# User probably wants to run something else, like /bin/bash, with another uid forced (Openshift?)
|
||||
exec "$@"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Allow environment variables to be set by creating a file with the
|
||||
# contents, and setting an environment variable with the suffix _FILE to
|
||||
# point to it. This can be used to provide secrets to a container, without
|
||||
# the values being specified explicitly when running the container.
|
||||
#
|
||||
# This is also sourced in opensearch-env, and is only needed here
|
||||
# as well because we use INDEXER_PASSWORD below. Sourcing this script
|
||||
# is idempotent.
|
||||
source /usr/share/wazuh-indexer/bin/opensearch-env-from-file
|
||||
|
||||
if [[ -f bin/opensearch-users ]]; then
|
||||
# Check for the INDEXER_PASSWORD environment variable to set the
|
||||
# bootstrap password for Security.
|
||||
#
|
||||
# This is only required for the first node in a cluster with Security
|
||||
# enabled, but we have no way of knowing which node we are yet. We'll just
|
||||
# honor the variable if it's present.
|
||||
if [[ -n "$INDEXER_PASSWORD" ]]; then
|
||||
[[ -f /usr/share/wazuh-indexer/opensearch.keystore ]] || (run_as_other_user_if_needed opensearch-keystore create)
|
||||
if ! (run_as_other_user_if_needed opensearch-keystore has-passwd --silent) ; then
|
||||
# keystore is unencrypted
|
||||
if ! (run_as_other_user_if_needed opensearch-keystore list | grep -q '^bootstrap.password$'); then
|
||||
(run_as_other_user_if_needed echo "$INDEXER_PASSWORD" | opensearch-keystore add -x 'bootstrap.password')
|
||||
fi
|
||||
else
|
||||
# keystore requires password
|
||||
if ! (run_as_other_user_if_needed echo "$KEYSTORE_PASSWORD" \
|
||||
| opensearch-keystore list | grep -q '^bootstrap.password$') ; then
|
||||
COMMANDS="$(printf "%s\n%s" "$KEYSTORE_PASSWORD" "$INDEXER_PASSWORD")"
|
||||
(run_as_other_user_if_needed echo "$COMMANDS" | opensearch-keystore add -x 'bootstrap.password')
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$(id -u)" == "0" ]]; then
|
||||
# If requested and running as root, mutate the ownership of bind-mounts
|
||||
if [[ -n "$TAKE_FILE_OWNERSHIP" ]]; then
|
||||
chown -R 1000:0 /usr/share/wazuh-indexer/{data,logs}
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
#if [[ "$DISCOVERY" == "single-node" ]] && [[ ! -f "/var/lib/wazuh-indexer/.flag" ]]; then
|
||||
# run securityadmin.sh for single node with CACERT, CERT and KEY parameter
|
||||
# nohup /securityadmin.sh &
|
||||
# touch "/var/lib/wazuh-indexer/.flag"
|
||||
#fi
|
||||
|
||||
run_as_other_user_if_needed /usr/share/wazuh-indexer/bin/opensearch <<<"$KEYSTORE_PASSWORD"
|
@@ -1,74 +0,0 @@
|
||||
---
|
||||
# This is the internal user database
|
||||
# The hash value is a bcrypt hash and can be generated with plugin/tools/hash.sh
|
||||
|
||||
_meta:
|
||||
type: "internalusers"
|
||||
config_version: 2
|
||||
|
||||
# Define your internal users here
|
||||
|
||||
## Demo users
|
||||
|
||||
admin:
|
||||
hash: "$2a$12$VcCDgh2NDk07JGN0rjGbM.Ad41qVR/YFJcgHp0UGns5JDymv..TOG"
|
||||
reserved: true
|
||||
backend_roles:
|
||||
- "admin"
|
||||
description: "Demo admin user"
|
||||
|
||||
kibanaserver:
|
||||
hash: "$2a$12$4AcgAt3xwOWadA5s5blL6ev39OXDNhmOesEoo33eZtrq2N0YrU3H."
|
||||
reserved: true
|
||||
description: "Demo kibanaserver user"
|
||||
|
||||
kibanaro:
|
||||
hash: "$2a$12$JJSXNfTowz7Uu5ttXfeYpeYE0arACvcwlPBStB1F.MI7f0U9Z4DGC"
|
||||
reserved: false
|
||||
backend_roles:
|
||||
- "kibanauser"
|
||||
- "readall"
|
||||
attributes:
|
||||
attribute1: "value1"
|
||||
attribute2: "value2"
|
||||
attribute3: "value3"
|
||||
description: "Demo kibanaro user"
|
||||
|
||||
logstash:
|
||||
hash: "$2a$12$u1ShR4l4uBS3Uv59Pa2y5.1uQuZBrZtmNfqB3iM/.jL0XoV9sghS2"
|
||||
reserved: false
|
||||
backend_roles:
|
||||
- "logstash"
|
||||
description: "Demo logstash user"
|
||||
|
||||
readall:
|
||||
hash: "$2a$12$ae4ycwzwvLtZxwZ82RmiEunBbIPiAmGZduBAjKN0TXdwQFtCwARz2"
|
||||
reserved: false
|
||||
backend_roles:
|
||||
- "readall"
|
||||
description: "Demo readall user"
|
||||
|
||||
snapshotrestore:
|
||||
hash: "$2y$12$DpwmetHKwgYnorbgdvORCenv4NAK8cPUg8AI6pxLCuWf/ALc0.v7W"
|
||||
reserved: false
|
||||
backend_roles:
|
||||
- "snapshotrestore"
|
||||
description: "Demo snapshotrestore user"
|
||||
|
||||
wazuh_admin:
|
||||
hash: "$2y$12$d2awHiOYvZjI88VfsDON.u6buoBol0gYPJEgdG1ArKVE0OMxViFfu"
|
||||
reserved: true
|
||||
hidden: false
|
||||
backend_roles: []
|
||||
attributes: {}
|
||||
opendistro_security_roles: []
|
||||
static: false
|
||||
|
||||
wazuh_user:
|
||||
hash: "$2y$12$BQixeoQdRubZdVf/7sq1suHwiVRnSst1.lPI2M0.GPZms4bq2D9vO"
|
||||
reserved: true
|
||||
hidden: false
|
||||
backend_roles: []
|
||||
attributes: {}
|
||||
opendistro_security_roles: []
|
||||
static: false
|
@@ -1,27 +0,0 @@
|
||||
network.host: "0.0.0.0"
|
||||
node.name: "wazuh.indexer"
|
||||
cluster.name: "wazuh-cluster"
|
||||
path.data: /var/lib/wazuh-indexer
|
||||
path.logs: /var/log/wazuh-indexer
|
||||
discovery.type: single-node
|
||||
compatibility.override_main_response_version: true
|
||||
plugins.security.ssl.http.pemcert_filepath: /usr/share/wazuh-indexer/config/certs/indexer.pem
|
||||
plugins.security.ssl.http.pemkey_filepath: /usr/share/wazuh-indexer/config/certs/indexer-key.pem
|
||||
plugins.security.ssl.http.pemtrustedcas_filepath: /usr/share/wazuh-indexer/config/certs/root-ca.pem
|
||||
plugins.security.ssl.transport.pemcert_filepath: /usr/share/wazuh-indexer/config/certs/indexer.pem
|
||||
plugins.security.ssl.transport.pemkey_filepath: /usr/share/wazuh-indexer/config/certs/indexer-key.pem
|
||||
plugins.security.ssl.transport.pemtrustedcas_filepath: /usr/share/wazuh-indexer/config/certs/root-ca.pem
|
||||
plugins.security.ssl.http.enabled: true
|
||||
plugins.security.ssl.transport.enforce_hostname_verification: false
|
||||
plugins.security.ssl.transport.resolve_hostname: false
|
||||
plugins.security.authcz.admin_dn:
|
||||
- "CN=admin,OU=Wazuh,O=Wazuh,L=California,C=US"
|
||||
plugins.security.check_snapshot_restore_write_privileges: true
|
||||
plugins.security.enable_snapshot_restore_privilege: true
|
||||
plugins.security.nodes_dn:
|
||||
- "CN=demo.indexer,OU=Wazuh,O=Wazuh,L=California,C=US"
|
||||
plugins.security.restapi.roles_enabled:
|
||||
- "all_access"
|
||||
- "security_rest_api_access"
|
||||
plugins.security.system_indices.enabled: true
|
||||
plugins.security.system_indices.indices: [".opendistro-alerting-config", ".opendistro-alerting-alert*", ".opendistro-anomaly-results*", ".opendistro-anomaly-detector*", ".opendistro-anomaly-checkpoints", ".opendistro-anomaly-detection-state", ".opendistro-reports-*", ".opendistro-notifications-*", ".opendistro-notebooks", ".opensearch-observability", ".opendistro-asynchronous-search-response*", ".replication-metadata-store"]
|
@@ -1,171 +0,0 @@
|
||||
_meta:
|
||||
type: "roles"
|
||||
config_version: 2
|
||||
|
||||
# Restrict users so they can only view visualization and dashboards on kibana
|
||||
kibana_read_only:
|
||||
reserved: true
|
||||
|
||||
# The security REST API access role is used to assign specific users access to change the security settings through the REST API.
|
||||
security_rest_api_access:
|
||||
reserved: true
|
||||
|
||||
# Allows users to view monitors, destinations and alerts
|
||||
alerting_read_access:
|
||||
reserved: true
|
||||
cluster_permissions:
|
||||
- 'cluster:admin/opendistro/alerting/alerts/get'
|
||||
- 'cluster:admin/opendistro/alerting/destination/get'
|
||||
- 'cluster:admin/opendistro/alerting/monitor/get'
|
||||
- 'cluster:admin/opendistro/alerting/monitor/search'
|
||||
|
||||
# Allows users to view and acknowledge alerts
|
||||
alerting_ack_alerts:
|
||||
reserved: true
|
||||
cluster_permissions:
|
||||
- 'cluster:admin/opendistro/alerting/alerts/*'
|
||||
|
||||
# Allows users to use all alerting functionality
|
||||
alerting_full_access:
|
||||
reserved: true
|
||||
cluster_permissions:
|
||||
- 'cluster_monitor'
|
||||
- 'cluster:admin/opendistro/alerting/*'
|
||||
index_permissions:
|
||||
- index_patterns:
|
||||
- '*'
|
||||
allowed_actions:
|
||||
- 'indices_monitor'
|
||||
- 'indices:admin/aliases/get'
|
||||
- 'indices:admin/mappings/get'
|
||||
|
||||
# Allow users to read Anomaly Detection detectors and results
|
||||
anomaly_read_access:
|
||||
reserved: true
|
||||
cluster_permissions:
|
||||
- 'cluster:admin/opendistro/ad/detector/info'
|
||||
- 'cluster:admin/opendistro/ad/detector/search'
|
||||
- 'cluster:admin/opendistro/ad/detectors/get'
|
||||
- 'cluster:admin/opendistro/ad/result/search'
|
||||
- 'cluster:admin/opendistro/ad/tasks/search'
|
||||
|
||||
# Allows users to use all Anomaly Detection functionality
|
||||
anomaly_full_access:
|
||||
reserved: true
|
||||
cluster_permissions:
|
||||
- 'cluster_monitor'
|
||||
- 'cluster:admin/opendistro/ad/*'
|
||||
index_permissions:
|
||||
- index_patterns:
|
||||
- '*'
|
||||
allowed_actions:
|
||||
- 'indices_monitor'
|
||||
- 'indices:admin/aliases/get'
|
||||
- 'indices:admin/mappings/get'
|
||||
|
||||
# Allows users to read Notebooks
|
||||
notebooks_read_access:
|
||||
reserved: true
|
||||
cluster_permissions:
|
||||
- 'cluster:admin/opendistro/notebooks/list'
|
||||
- 'cluster:admin/opendistro/notebooks/get'
|
||||
|
||||
# Allows users to all Notebooks functionality
|
||||
notebooks_full_access:
|
||||
reserved: true
|
||||
cluster_permissions:
|
||||
- 'cluster:admin/opendistro/notebooks/create'
|
||||
- 'cluster:admin/opendistro/notebooks/update'
|
||||
- 'cluster:admin/opendistro/notebooks/delete'
|
||||
- 'cluster:admin/opendistro/notebooks/get'
|
||||
- 'cluster:admin/opendistro/notebooks/list'
|
||||
|
||||
# Allows users to read and download Reports
|
||||
reports_instances_read_access:
|
||||
reserved: true
|
||||
cluster_permissions:
|
||||
- 'cluster:admin/opendistro/reports/instance/list'
|
||||
- 'cluster:admin/opendistro/reports/instance/get'
|
||||
- 'cluster:admin/opendistro/reports/menu/download'
|
||||
|
||||
# Allows users to read and download Reports and Report-definitions
|
||||
reports_read_access:
|
||||
reserved: true
|
||||
cluster_permissions:
|
||||
- 'cluster:admin/opendistro/reports/definition/get'
|
||||
- 'cluster:admin/opendistro/reports/definition/list'
|
||||
- 'cluster:admin/opendistro/reports/instance/list'
|
||||
- 'cluster:admin/opendistro/reports/instance/get'
|
||||
- 'cluster:admin/opendistro/reports/menu/download'
|
||||
|
||||
# Allows users to all Reports functionality
|
||||
reports_full_access:
|
||||
reserved: true
|
||||
cluster_permissions:
|
||||
- 'cluster:admin/opendistro/reports/definition/create'
|
||||
- 'cluster:admin/opendistro/reports/definition/update'
|
||||
- 'cluster:admin/opendistro/reports/definition/on_demand'
|
||||
- 'cluster:admin/opendistro/reports/definition/delete'
|
||||
- 'cluster:admin/opendistro/reports/definition/get'
|
||||
- 'cluster:admin/opendistro/reports/definition/list'
|
||||
- 'cluster:admin/opendistro/reports/instance/list'
|
||||
- 'cluster:admin/opendistro/reports/instance/get'
|
||||
- 'cluster:admin/opendistro/reports/menu/download'
|
||||
|
||||
# Allows users to use all asynchronous-search functionality
|
||||
asynchronous_search_full_access:
|
||||
reserved: true
|
||||
cluster_permissions:
|
||||
- 'cluster:admin/opendistro/asynchronous_search/*'
|
||||
index_permissions:
|
||||
- index_patterns:
|
||||
- '*'
|
||||
allowed_actions:
|
||||
- 'indices:data/read/search*'
|
||||
|
||||
# Allows users to read stored asynchronous-search results
|
||||
asynchronous_search_read_access:
|
||||
reserved: true
|
||||
cluster_permissions:
|
||||
- 'cluster:admin/opendistro/asynchronous_search/get'
|
||||
|
||||
wazuh_ui_user:
|
||||
reserved: true
|
||||
hidden: false
|
||||
cluster_permissions: []
|
||||
index_permissions:
|
||||
- index_patterns:
|
||||
- "wazuh-*"
|
||||
dls: ""
|
||||
fls: []
|
||||
masked_fields: []
|
||||
allowed_actions:
|
||||
- "read"
|
||||
tenant_permissions: []
|
||||
static: false
|
||||
|
||||
wazuh_ui_admin:
|
||||
reserved: true
|
||||
hidden: false
|
||||
cluster_permissions: []
|
||||
index_permissions:
|
||||
- index_patterns:
|
||||
- "wazuh-*"
|
||||
dls: ""
|
||||
fls: []
|
||||
masked_fields: []
|
||||
allowed_actions:
|
||||
- "read"
|
||||
- "delete"
|
||||
- "manage"
|
||||
- "index"
|
||||
tenant_permissions: []
|
||||
static: false
|
||||
|
||||
# ISM API permissions role
|
||||
manage_ism:
|
||||
reserved: true
|
||||
hidden: false
|
||||
cluster_permissions:
|
||||
- "manage_ism"
|
||||
static: false
|
@@ -1,78 +0,0 @@
|
||||
---
|
||||
# In this file users, backendroles and hosts can be mapped to Wazuh indexer Security roles.
|
||||
# Permissions for Wazuh indexer roles are configured in roles.yml
|
||||
|
||||
_meta:
|
||||
type: "rolesmapping"
|
||||
config_version: 2
|
||||
|
||||
# Define your roles mapping here
|
||||
|
||||
## Demo roles mapping
|
||||
|
||||
all_access:
|
||||
reserved: false
|
||||
backend_roles:
|
||||
- "admin"
|
||||
description: "Maps admin to all_access"
|
||||
|
||||
own_index:
|
||||
reserved: false
|
||||
users:
|
||||
- "*"
|
||||
description: "Allow full access to an index named like the username"
|
||||
|
||||
logstash:
|
||||
reserved: false
|
||||
backend_roles:
|
||||
- "logstash"
|
||||
|
||||
kibana_user:
|
||||
reserved: false
|
||||
backend_roles:
|
||||
- "kibanauser"
|
||||
users:
|
||||
- "wazuh_user"
|
||||
- "wazuh_admin"
|
||||
description: "Maps kibanauser to kibana_user"
|
||||
|
||||
readall:
|
||||
reserved: false
|
||||
backend_roles:
|
||||
- "readall"
|
||||
|
||||
manage_snapshots:
|
||||
reserved: false
|
||||
backend_roles:
|
||||
- "snapshotrestore"
|
||||
|
||||
kibana_server:
|
||||
reserved: true
|
||||
users:
|
||||
- "kibanaserver"
|
||||
|
||||
wazuh_ui_admin:
|
||||
reserved: true
|
||||
hidden: false
|
||||
backend_roles: []
|
||||
hosts: []
|
||||
users:
|
||||
- "wazuh_admin"
|
||||
- "kibanaserver"
|
||||
and_backend_roles: []
|
||||
|
||||
wazuh_ui_user:
|
||||
reserved: true
|
||||
hidden: false
|
||||
backend_roles: []
|
||||
hosts: []
|
||||
users:
|
||||
- "wazuh_user"
|
||||
and_backend_roles: []
|
||||
|
||||
# ISM API permissions role mapping
|
||||
manage_ism:
|
||||
reserved: true
|
||||
hidden: false
|
||||
users:
|
||||
- "kibanaserver"
|
@@ -1,3 +0,0 @@
|
||||
# Wazuh Docker Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
sleep 30
|
||||
bash /usr/share/wazuh-indexer/plugins/opensearch-security/tools/securityadmin.sh -cd /usr/share/wazuh-indexer/opensearch-security/ -nhnv -cacert $CACERT -cert $CERT -key $KEY -p 9200 -icl
|
@@ -1,69 +0,0 @@
|
||||
# Wazuh Docker Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
FROM amazonlinux:2023
|
||||
|
||||
RUN rm /bin/sh && ln -s /bin/bash /bin/sh
|
||||
|
||||
ARG WAZUH_VERSION
|
||||
ARG WAZUH_TAG_REVISION
|
||||
ARG FILEBEAT_TEMPLATE_BRANCH
|
||||
ARG FILEBEAT_CHANNEL=filebeat-oss
|
||||
ARG FILEBEAT_VERSION=7.10.2
|
||||
ARG FILEBEAT_REVISION=2
|
||||
ARG WAZUH_FILEBEAT_MODULE
|
||||
ARG S6_VERSION="v2.2.0.3"
|
||||
|
||||
RUN yum install curl-minimal xz gnupg tar gzip openssl findutils procps -y &&\
|
||||
yum clean all
|
||||
|
||||
COPY config/check_repository.sh /
|
||||
COPY config/filebeat_module.sh /
|
||||
COPY config/permanent_data.env config/permanent_data.sh /
|
||||
|
||||
RUN chmod 775 /check_repository.sh
|
||||
RUN source /check_repository.sh
|
||||
|
||||
RUN yum install wazuh-manager-${WAZUH_VERSION}-${WAZUH_TAG_REVISION} -y && \
|
||||
yum clean all && \
|
||||
chmod 775 /filebeat_module.sh && \
|
||||
source /filebeat_module.sh && \
|
||||
rm /filebeat_module.sh && \
|
||||
curl --fail --silent -L https://github.com/just-containers/s6-overlay/releases/download/${S6_VERSION}/s6-overlay-amd64.tar.gz \
|
||||
-o /tmp/s6-overlay-amd64.tar.gz && \
|
||||
tar xzf /tmp/s6-overlay-amd64.tar.gz -C / --exclude="./bin" && \
|
||||
tar xzf /tmp/s6-overlay-amd64.tar.gz -C /usr ./bin && \
|
||||
rm /tmp/s6-overlay-amd64.tar.gz
|
||||
|
||||
COPY config/etc/ /etc/
|
||||
COPY --chown=root:wazuh config/create_user.py /var/ossec/framework/scripts/create_user.py
|
||||
|
||||
COPY config/filebeat.yml /etc/filebeat/
|
||||
|
||||
RUN chmod go-w /etc/filebeat/filebeat.yml
|
||||
|
||||
ADD https://raw.githubusercontent.com/wazuh/wazuh/$FILEBEAT_TEMPLATE_BRANCH/extensions/elasticsearch/7.x/wazuh-template.json /etc/filebeat
|
||||
RUN chmod go-w /etc/filebeat/wazuh-template.json
|
||||
|
||||
# Prepare permanent data
|
||||
# Sync calls are due to https://github.com/docker/docker/issues/9547
|
||||
|
||||
#Make mount directories for keep permissions
|
||||
|
||||
RUN mkdir -p /var/ossec/var/multigroups && \
|
||||
chown root:wazuh /var/ossec/var/multigroups && \
|
||||
chmod 770 /var/ossec/var/multigroups && \
|
||||
mkdir -p /var/ossec/agentless && \
|
||||
chown root:wazuh /var/ossec/agentless && \
|
||||
chmod 770 /var/ossec/agentless && \
|
||||
mkdir -p /var/ossec/active-response/bin && \
|
||||
chown root:wazuh /var/ossec/active-response/bin && \
|
||||
chmod 770 /var/ossec/active-response/bin && \
|
||||
chmod 755 /permanent_data.sh && \
|
||||
sync && /permanent_data.sh && \
|
||||
sync && rm /permanent_data.sh
|
||||
|
||||
RUN rm /etc/yum.repos.d/wazuh.repo
|
||||
|
||||
# Services ports
|
||||
EXPOSE 55000/tcp 1514/tcp 1515/tcp 514/udp 1516/tcp
|
||||
|
||||
ENTRYPOINT [ "/init" ]
|
@@ -1,15 +0,0 @@
|
||||
## variables
|
||||
APT_KEY=https://packages-dev.wazuh.com/key/GPG-KEY-WAZUH
|
||||
GPG_SIGN="gpgcheck=1\ngpgkey=${APT_KEY}]"
|
||||
REPOSITORY="[wazuh]\n${GPG_SIGN}\nenabled=1\nname=EL-\$releasever - Wazuh\nbaseurl=https://packages-dev.wazuh.com/pre-release/yum/\nprotect=1"
|
||||
WAZUH_TAG=$(curl --silent https://api.github.com/repos/wazuh/wazuh/git/refs/tags | grep '["]ref["]:' | sed -E 's/.*\"([^\"]+)\".*/\1/' | cut -c 11- | grep ^v${WAZUH_VERSION}$)
|
||||
|
||||
## check tag to use the correct repository
|
||||
if [[ -n "${WAZUH_TAG}" ]]; then
|
||||
APT_KEY=https://packages.wazuh.com/key/GPG-KEY-WAZUH
|
||||
GPG_SIGN="gpgcheck=1\ngpgkey=${APT_KEY}]"
|
||||
REPOSITORY="[wazuh]\n${GPG_SIGN}\nenabled=1\nname=EL-\$releasever - Wazuh\nbaseurl=https://packages.wazuh.com/4.x/yum/\nprotect=1"
|
||||
fi
|
||||
|
||||
rpm --import "${APT_KEY}"
|
||||
echo -e "${REPOSITORY}" | tee /etc/yum.repos.d/wazuh.repo
|
@@ -1,102 +0,0 @@
|
||||
import logging
|
||||
import sys
|
||||
import json
|
||||
import random
|
||||
import string
|
||||
import os
|
||||
|
||||
# Set framework path
|
||||
sys.path.append(os.path.dirname(sys.argv[0]) + "/../framework")
|
||||
|
||||
USER_FILE_PATH = "/var/ossec/api/configuration/admin.json"
|
||||
SPECIAL_CHARS = "@$!%*?&-_"
|
||||
|
||||
|
||||
try:
|
||||
from wazuh.rbac.orm import check_database_integrity
|
||||
from wazuh.security import (
|
||||
create_user,
|
||||
get_users,
|
||||
get_roles,
|
||||
set_user_role,
|
||||
update_user,
|
||||
)
|
||||
except ModuleNotFoundError as e:
|
||||
logging.error("No module 'wazuh' found.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def read_user_file(path=USER_FILE_PATH):
|
||||
with open(path) as user_file:
|
||||
data = json.load(user_file)
|
||||
return data["username"], data["password"]
|
||||
|
||||
|
||||
def db_users():
|
||||
users_result = get_users()
|
||||
return {user["username"]: user["id"] for user in users_result.affected_items}
|
||||
|
||||
|
||||
def db_roles():
|
||||
roles_result = get_roles()
|
||||
return {role["name"]: role["id"] for role in roles_result.affected_items}
|
||||
|
||||
def disable_user(uid):
|
||||
random_pass = "".join(
|
||||
random.choices(
|
||||
string.ascii_uppercase
|
||||
+ string.ascii_lowercase
|
||||
+ string.digits
|
||||
+ SPECIAL_CHARS,
|
||||
k=8,
|
||||
)
|
||||
)
|
||||
# assure there must be at least one character from each group
|
||||
random_pass = random_pass + ''.join([random.choice(chars) for chars in [string.ascii_lowercase, string.digits, string.ascii_uppercase, SPECIAL_CHARS]])
|
||||
random_pass = ''.join(random.sample(random_pass,len(random_pass)))
|
||||
update_user(
|
||||
user_id=[
|
||||
str(uid),
|
||||
],
|
||||
password=random_pass,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if not os.path.exists(USER_FILE_PATH):
|
||||
# abort if no user file detected
|
||||
sys.exit(0)
|
||||
username, password = read_user_file()
|
||||
|
||||
# create RBAC database
|
||||
check_database_integrity()
|
||||
|
||||
initial_users = db_users()
|
||||
if username not in initial_users:
|
||||
# create a new user
|
||||
create_user(username=username, password=password)
|
||||
users = db_users()
|
||||
uid = users[username]
|
||||
roles = db_roles()
|
||||
rid = roles["administrator"]
|
||||
set_user_role(
|
||||
user_id=[
|
||||
str(uid),
|
||||
],
|
||||
role_ids=[
|
||||
str(rid),
|
||||
],
|
||||
)
|
||||
else:
|
||||
# modify an existing user ("wazuh" or "wazuh-wui")
|
||||
uid = initial_users[username]
|
||||
update_user(
|
||||
user_id=[
|
||||
str(uid),
|
||||
],
|
||||
password=password,
|
||||
)
|
||||
# disable unused default users
|
||||
for def_user in ['wazuh', 'wazuh-wui']:
|
||||
if def_user != username:
|
||||
disable_user(initial_users[def_user])
|
@@ -1,234 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
# Wazuh App Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
|
||||
# Variables
|
||||
source /permanent_data.env
|
||||
|
||||
WAZUH_INSTALL_PATH=/var/ossec
|
||||
WAZUH_CONFIG_MOUNT=/wazuh-config-mount
|
||||
AUTO_ENROLLMENT_ENABLED=${AUTO_ENROLLMENT_ENABLED:-true}
|
||||
|
||||
|
||||
##############################################################################
|
||||
# Aux functions
|
||||
##############################################################################
|
||||
print() {
|
||||
echo -e $1
|
||||
}
|
||||
|
||||
error_and_exit() {
|
||||
echo "Error executing command: '$1'."
|
||||
echo 'Exiting.'
|
||||
exit 1
|
||||
}
|
||||
|
||||
exec_cmd() {
|
||||
eval $1 > /dev/null 2>&1 || error_and_exit "$1"
|
||||
}
|
||||
|
||||
exec_cmd_stdout() {
|
||||
eval $1 2>&1 || error_and_exit "$1"
|
||||
}
|
||||
|
||||
|
||||
##############################################################################
|
||||
# This function will attempt to mount every directory in PERMANENT_DATA
|
||||
# into the respective path.
|
||||
# If the path is empty means permanent data volume is also empty, so a backup
|
||||
# will be copied into it. Otherwise it will not be copied because there is
|
||||
# already data inside the volume for the specified path.
|
||||
##############################################################################
|
||||
|
||||
mount_permanent_data() {
|
||||
for permanent_dir in "${PERMANENT_DATA[@]}"; do
|
||||
data_tmp="${WAZUH_INSTALL_PATH}/data_tmp/permanent${permanent_dir}/"
|
||||
print ${data_tmp}
|
||||
# Check if the path is not empty
|
||||
if find ${permanent_dir} -mindepth 1 | read; then
|
||||
print "The path ${permanent_dir} is already mounted"
|
||||
else
|
||||
print "Installing ${permanent_dir}"
|
||||
exec_cmd "cp -ar ${data_tmp}. ${permanent_dir}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
##############################################################################
|
||||
# This function will replace from the permanent data volume every file
|
||||
# contained in PERMANENT_DATA_EXCP
|
||||
# Some files as 'internal_options.conf' are saved as permanent data, but
|
||||
# they must be updated to work properly if wazuh version is changed.
|
||||
##############################################################################
|
||||
|
||||
apply_exclusion_data() {
|
||||
for exclusion_file in "${PERMANENT_DATA_EXCP[@]}"; do
|
||||
if [ -e ${WAZUH_INSTALL_PATH}/data_tmp/exclusion/${exclusion_file} ]
|
||||
then
|
||||
DIR=$(dirname "${exclusion_file}")
|
||||
if [ ! -e ${DIR} ]
|
||||
then
|
||||
mkdir -p ${DIR}
|
||||
fi
|
||||
|
||||
print "Updating ${exclusion_file}"
|
||||
exec_cmd "cp -p ${WAZUH_INSTALL_PATH}/data_tmp/exclusion/${exclusion_file} ${exclusion_file}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
##############################################################################
|
||||
# This function will rename in the permanent data volume every file
|
||||
# contained in PERMANENT_DATA_MOVE
|
||||
##############################################################################
|
||||
|
||||
move_data_files() {
|
||||
for mov_file in "${PERMANENT_DATA_MOVE[@]}"; do
|
||||
file_split=( $mov_file )
|
||||
if [ -e ${file_split[0]} ]
|
||||
then
|
||||
print "moving ${mov_file}"
|
||||
exec_cmd "mv -f ${mov_file}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
##############################################################################
|
||||
# This function will delete from the permanent data volume every file
|
||||
# contained in PERMANENT_DATA_DEL
|
||||
##############################################################################
|
||||
|
||||
remove_data_files() {
|
||||
for del_file in "${PERMANENT_DATA_DEL[@]}"; do
|
||||
if [ -e ${del_file} ]
|
||||
then
|
||||
print "Removing ${del_file}"
|
||||
exec_cmd "rm -f ${del_file}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
##############################################################################
|
||||
# Create certificates: Manager
|
||||
##############################################################################
|
||||
|
||||
create_ossec_key_cert() {
|
||||
print "Creating wazuh-authd key and cert"
|
||||
exec_cmd "openssl genrsa -out ${WAZUH_INSTALL_PATH}/etc/sslmanager.key 4096"
|
||||
exec_cmd "openssl req -new -x509 -key ${WAZUH_INSTALL_PATH}/etc/sslmanager.key -out ${WAZUH_INSTALL_PATH}/etc/sslmanager.cert -days 3650 -subj /CN=${HOSTNAME}/"
|
||||
}
|
||||
|
||||
##############################################################################
|
||||
# Copy all files from $WAZUH_CONFIG_MOUNT to $WAZUH_INSTALL_PATH and respect
|
||||
# destination files permissions
|
||||
#
|
||||
# For example, to mount the file /var/ossec/data/etc/ossec.conf, mount it at
|
||||
# $WAZUH_CONFIG_MOUNT/etc/ossec.conf in your container and this code will
|
||||
# replace the ossec.conf file in /var/ossec/data/etc with yours.
|
||||
##############################################################################
|
||||
|
||||
mount_files() {
|
||||
if [ -e "$WAZUH_CONFIG_MOUNT" ]
|
||||
then
|
||||
print "Identified Wazuh configuration files to mount..."
|
||||
exec_cmd_stdout "cp --verbose -r $WAZUH_CONFIG_MOUNT/* $WAZUH_INSTALL_PATH"
|
||||
else
|
||||
print "No Wazuh configuration files to mount..."
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
##############################################################################
|
||||
# Allow users to set the container hostname as <node_name> dynamically on
|
||||
# container start.
|
||||
#
|
||||
# To use this:
|
||||
# 1. Create your own ossec.conf file
|
||||
# 2. In your ossec.conf file, set to_be_replaced_by_hostname as your node_name
|
||||
# 3. Mount your custom ossec.conf file at $WAZUH_CONFIG_MOUNT/etc/ossec.conf
|
||||
##############################################################################
|
||||
|
||||
set_custom_hostname() {
|
||||
sed -i 's/<node_name>to_be_replaced_by_hostname<\/node_name>/<node_name>'"${HOSTNAME}"'<\/node_name>/g' ${WAZUH_INSTALL_PATH}/etc/ossec.conf
|
||||
}
|
||||
|
||||
##############################################################################
|
||||
# Allow users to set the container cluster key dynamically on
|
||||
# container start.
|
||||
#
|
||||
# To use this:
|
||||
# 1. Create your own ossec.conf file
|
||||
# 2. In your ossec.conf file, set to_be_replaced_by_cluster_key as your key
|
||||
# 3. Mount your custom ossec.conf file at $WAZUH_CONFIG_MOUNT/etc/ossec.conf
|
||||
##############################################################################
|
||||
|
||||
set_custom_cluster_key() {
|
||||
sed -i 's/<key>to_be_replaced_by_cluster_key<\/key>/<key>'"${WAZUH_CLUSTER_KEY}"'<\/key>/g' ${WAZUH_INSTALL_PATH}/etc/ossec.conf
|
||||
}
|
||||
|
||||
##############################################################################
|
||||
# Set correct ownership for Wazuh related directories
|
||||
# on container start.
|
||||
##############################################################################
|
||||
|
||||
configure_permissions() {
|
||||
chown -R wazuh:wazuh /var/ossec/queue/rids
|
||||
chown -R wazuh:wazuh /var/ossec/etc/lists
|
||||
}
|
||||
|
||||
##############################################################################
|
||||
# Change any ossec user/group to wazuh user/group
|
||||
##############################################################################
|
||||
|
||||
set_correct_permOwner() {
|
||||
find / -group 997 -exec chown :999 {} +;
|
||||
find / -group 101 -exec chown :999 {} +;
|
||||
find / -user 101 -exec chown 999 {} +;
|
||||
}
|
||||
|
||||
##############################################################################
|
||||
# Main function
|
||||
##############################################################################
|
||||
|
||||
main() {
|
||||
# Mount permanent data (i.e. ossec.conf)
|
||||
mount_permanent_data
|
||||
|
||||
# Restore files stored in permanent data that are not permanent (i.e. internal_options.conf)
|
||||
apply_exclusion_data
|
||||
|
||||
# Apply correct permission and ownership
|
||||
set_correct_permOwner
|
||||
|
||||
# Rename files stored in permanent data (i.e. queue/ossec)
|
||||
move_data_files
|
||||
|
||||
# Remove some files in permanent_data (i.e. .template.db)
|
||||
remove_data_files
|
||||
|
||||
# Generate wazuh-authd certs if AUTO_ENROLLMENT_ENABLED is true and does not exist
|
||||
if [ $AUTO_ENROLLMENT_ENABLED == true ]
|
||||
then
|
||||
if [ ! -e ${WAZUH_INSTALL_PATH}/etc/sslmanager.key ]
|
||||
then
|
||||
create_ossec_key_cert
|
||||
fi
|
||||
fi
|
||||
|
||||
# Mount selected files (WAZUH_CONFIG_MOUNT) to container
|
||||
mount_files
|
||||
|
||||
# Allow setting custom hostname
|
||||
set_custom_hostname
|
||||
|
||||
# Allow setting custom cluster key
|
||||
set_custom_cluster_key
|
||||
|
||||
# Delete temporary data folder
|
||||
rm -rf ${WAZUH_INSTALL_PATH}/data_tmp
|
||||
|
||||
# Set correct ownership for Wazuh related directories
|
||||
configure_permissions
|
||||
}
|
||||
|
||||
main
|
@@ -1,51 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
# Wazuh App Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
|
||||
set -e
|
||||
|
||||
if [ "$INDEXER_URL" != "" ]; then
|
||||
>&2 echo "Customize Elasticsearch output IP"
|
||||
sed -i "s|hosts:.*|hosts: ['$INDEXER_URL']|g" /etc/filebeat/filebeat.yml
|
||||
fi
|
||||
|
||||
# Configure filebeat.yml security settings
|
||||
|
||||
if [ "$INDEXER_USERNAME" != "" ]; then
|
||||
>&2 echo "Configuring username."
|
||||
sed -i "s|#username:.*|username:|g" /etc/filebeat/filebeat.yml
|
||||
sed -i "s|username:.*|username: '$INDEXER_USERNAME'|g" /etc/filebeat/filebeat.yml
|
||||
fi
|
||||
|
||||
if [ "$INDEXER_PASSWORD" != "" ]; then
|
||||
>&2 echo "Configuring password."
|
||||
sed -i "s|#password:.*|password:|g" /etc/filebeat/filebeat.yml
|
||||
sed -i "s|password:.*|password: '$INDEXER_PASSWORD'|g" /etc/filebeat/filebeat.yml
|
||||
fi
|
||||
|
||||
if [ "$FILEBEAT_SSL_VERIFICATION_MODE" != "" ]; then
|
||||
>&2 echo "Configuring SSL verification mode."
|
||||
sed -i "s|#ssl.verification_mode:.*|ssl.verification_mode:|g" /etc/filebeat/filebeat.yml
|
||||
sed -i "s|ssl.verification_mode:.*|ssl.verification_mode: '$FILEBEAT_SSL_VERIFICATION_MODE'|g" /etc/filebeat/filebeat.yml
|
||||
fi
|
||||
|
||||
if [ "$SSL_CERTIFICATE_AUTHORITIES" != "" ]; then
|
||||
>&2 echo "Configuring Certificate Authorities."
|
||||
sed -i "s|#ssl.certificate_authorities:.*|ssl.certificate_authorities:|g" /etc/filebeat/filebeat.yml
|
||||
sed -i "s|ssl.certificate_authorities:.*|ssl.certificate_authorities: ['$SSL_CERTIFICATE_AUTHORITIES']|g" /etc/filebeat/filebeat.yml
|
||||
fi
|
||||
|
||||
if [ "$SSL_CERTIFICATE" != "" ]; then
|
||||
>&2 echo "Configuring SSL Certificate."
|
||||
sed -i "s|#ssl.certificate:.*|ssl.certificate:|g" /etc/filebeat/filebeat.yml
|
||||
sed -i "s|ssl.certificate:.*|ssl.certificate: '$SSL_CERTIFICATE'|g" /etc/filebeat/filebeat.yml
|
||||
fi
|
||||
|
||||
if [ "$SSL_KEY" != "" ]; then
|
||||
>&2 echo "Configuring SSL Key."
|
||||
sed -i "s|#ssl.key:.*|ssl.key:|g" /etc/filebeat/filebeat.yml
|
||||
sed -i "s|ssl.key:.*|ssl.key: '$SSL_KEY'|g" /etc/filebeat/filebeat.yml
|
||||
fi
|
||||
|
||||
|
||||
chmod go-w /etc/filebeat/filebeat.yml || true
|
||||
chown root: /etc/filebeat/filebeat.yml || true
|
@@ -1,136 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
##############################################################################
|
||||
# Migration sequence
|
||||
# Detect if there is a mounted volume on /wazuh-migration and copy the data
|
||||
# to /var/ossec, finally it will create a flag ".migration-completed" inside
|
||||
# the mounted volume
|
||||
##############################################################################
|
||||
|
||||
function __colortext()
|
||||
{
|
||||
echo -e " \e[1;$2m$1\e[0m"
|
||||
}
|
||||
|
||||
function echogreen()
|
||||
{
|
||||
echo $(__colortext "$1" "32")
|
||||
}
|
||||
|
||||
function echoyellow()
|
||||
{
|
||||
echo $(__colortext "$1" "33")
|
||||
}
|
||||
|
||||
function echored()
|
||||
{
|
||||
echo $(__colortext "$1" "31")
|
||||
}
|
||||
|
||||
function_wazuh_migration(){
|
||||
if [ -d "/wazuh-migration" ]; then
|
||||
if [ ! -e /wazuh-migration/.migration-completed ]; then
|
||||
if [ ! -e /wazuh-migration/global.db ]; then
|
||||
echoyellow "The volume mounted on /wazuh-migration does not contain all the correct files."
|
||||
return
|
||||
fi
|
||||
|
||||
\cp -f /wazuh-migration/data/etc/ossec.conf /var/ossec/etc/ossec.conf
|
||||
chown root:wazuh /var/ossec/etc/ossec.conf
|
||||
chmod 640 /var/ossec/etc/ossec.conf
|
||||
|
||||
\cp -f /wazuh-migration/data/etc/client.keys /var/ossec/etc/client.keys
|
||||
chown wazuh:wazuh /var/ossec/etc/client.keys
|
||||
chmod 640 /var/ossec/etc/client.keys
|
||||
|
||||
\cp -f /wazuh-migration/data/etc/sslmanager.cert /var/ossec/etc/sslmanager.cert
|
||||
\cp -f /wazuh-migration/data/etc/sslmanager.key /var/ossec/etc/sslmanager.key
|
||||
chown root:root /var/ossec/etc/sslmanager.cert /var/ossec/etc/sslmanager.key
|
||||
chmod 640 /var/ossec/etc/sslmanager.cert /var/ossec/etc/sslmanager.key
|
||||
|
||||
\cp -f /wazuh-migration/data/etc/shared/default/agent.conf /var/ossec/etc/shared/default/agent.conf
|
||||
chown wazuh:wazuh /var/ossec/etc/shared/default/agent.conf
|
||||
chmod 660 /var/ossec/etc/shared/default/agent.conf
|
||||
|
||||
\cp -f /wazuh-migration/data/etc/decoders/* /var/ossec/etc/decoders/
|
||||
chown wazuh:wazuh /var/ossec/etc/decoders/*
|
||||
chmod 660 /var/ossec/etc/decoders/*
|
||||
|
||||
\cp -f /wazuh-migration/data/etc/rules/* /var/ossec/etc/rules/
|
||||
chown wazuh:wazuh /var/ossec/etc/rules/*
|
||||
chmod 660 /var/ossec/etc/rules/*
|
||||
|
||||
if [ -e /wazuh-migration/data/agentless/.passlist ]; then
|
||||
\cp -f /wazuh-migration/data/agentless/.passlist /var/ossec/agentless/.passlist
|
||||
chown root:wazuh /var/ossec/agentless/.passlist
|
||||
chmod 640 /var/ossec/agentless/.passlist
|
||||
fi
|
||||
|
||||
\cp -f /wazuh-migration/global.db /var/ossec/queue/db/global.db
|
||||
chown wazuh:wazuh /var/ossec/queue/db/global.db
|
||||
chmod 640 /var/ossec/queue/db/global.db
|
||||
|
||||
# mark volume as migrated
|
||||
touch /wazuh-migration/.migration-completed
|
||||
|
||||
echogreen "Migration completed succesfully"
|
||||
else
|
||||
echoyellow "This volume has already been migrated. You may proceed and remove it from the mount point (/wazuh-migration)"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function_create_custom_user() {
|
||||
if [[ ! -z $API_USERNAME ]] && [[ ! -z $API_PASSWORD ]]; then
|
||||
cat << EOF > /var/ossec/api/configuration/admin.json
|
||||
{
|
||||
"username": "$API_USERNAME",
|
||||
"password": "$API_PASSWORD"
|
||||
}
|
||||
EOF
|
||||
|
||||
# create or customize API user
|
||||
if /var/ossec/framework/python/bin/python3 /var/ossec/framework/scripts/create_user.py; then
|
||||
# remove json if exit code is 0
|
||||
rm /var/ossec/api/configuration/admin.json
|
||||
else
|
||||
echored "There was an error configuring the API user"
|
||||
# terminate container to avoid unpredictable behavior
|
||||
exec s6-svscanctl -t /var/run/s6/services
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function_entrypoint_scripts() {
|
||||
# It will run every .sh script located in entrypoint-scripts folder in lexicographical order
|
||||
if [ -d "/entrypoint-scripts/" ]
|
||||
then
|
||||
for script in `ls /entrypoint-scripts/*.sh | sort -n`; do
|
||||
bash "$script"
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
function_configure_vulnerability_detection() {
|
||||
if [ "$INDEXER_PASSWORD" != "" ]; then
|
||||
>&2 echo "Configuring password."
|
||||
echo "$INDEXER_USERNAME" | /var/ossec/bin/wazuh-keystore -f indexer -k username
|
||||
echo "$INDEXER_PASSWORD" | /var/ossec/bin/wazuh-keystore -f indexer -k password
|
||||
fi
|
||||
}
|
||||
|
||||
# Migrate data from /wazuh-migration volume
|
||||
function_wazuh_migration
|
||||
|
||||
# create API custom user
|
||||
function_create_custom_user
|
||||
|
||||
# configure Vulnerabilty detection
|
||||
function_configure_vulnerability_detection
|
||||
|
||||
# run entrypoint scripts
|
||||
function_entrypoint_scripts
|
||||
|
||||
# Start Wazuh
|
||||
/var/ossec/bin/wazuh-control start
|
@@ -1,6 +0,0 @@
|
||||
#!/usr/bin/env sh
|
||||
echo >&2 "Filebeat exited. code=${1}"
|
||||
|
||||
# terminate other services to exit from the container
|
||||
exec s6-svscanctl -t /var/run/s6/services
|
||||
|
@@ -1,4 +0,0 @@
|
||||
#!/usr/bin/with-contenv sh
|
||||
echo >&2 "starting Filebeat"
|
||||
|
||||
exec /usr/share/filebeat/bin/filebeat -e -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat
|
@@ -1,4 +0,0 @@
|
||||
#!/usr/bin/with-contenv sh
|
||||
|
||||
# dumping ossec.log to standard output
|
||||
exec tail -F /var/ossec/logs/ossec.log
|
@@ -1,31 +0,0 @@
|
||||
|
||||
# Wazuh - Filebeat configuration file
|
||||
filebeat.modules:
|
||||
- module: wazuh
|
||||
alerts:
|
||||
enabled: true
|
||||
archives:
|
||||
enabled: false
|
||||
|
||||
setup.template.json.enabled: true
|
||||
setup.template.overwrite: true
|
||||
setup.template.json.path: '/etc/filebeat/wazuh-template.json'
|
||||
setup.template.json.name: 'wazuh'
|
||||
setup.ilm.enabled: false
|
||||
output.elasticsearch:
|
||||
hosts: ['https://wazuh.indexer:9200']
|
||||
#username:
|
||||
#password:
|
||||
#ssl.verification_mode:
|
||||
#ssl.certificate_authorities:
|
||||
#ssl.certificate:
|
||||
#ssl.key:
|
||||
|
||||
logging.metrics.enabled: false
|
||||
|
||||
seccomp:
|
||||
default_action: allow
|
||||
syscalls:
|
||||
- action: allow
|
||||
names:
|
||||
- rseq
|
@@ -1,11 +0,0 @@
|
||||
## variables
|
||||
REPOSITORY="packages-dev.wazuh.com/pre-release"
|
||||
WAZUH_TAG=$(curl --silent https://api.github.com/repos/wazuh/wazuh/git/refs/tags | grep '["]ref["]:' | sed -E 's/.*\"([^\"]+)\".*/\1/' | cut -c 11- | grep ^v${WAZUH_VERSION}$)
|
||||
|
||||
## check tag to use the correct repository
|
||||
if [[ -n "${WAZUH_TAG}" ]]; then
|
||||
REPOSITORY="packages.wazuh.com/4.x"
|
||||
fi
|
||||
|
||||
yum install filebeat-${FILEBEAT_VERSION}-${FILEBEAT_REVISION} -y && \
|
||||
curl -s https://${REPOSITORY}/filebeat/${WAZUH_FILEBEAT_MODULE} | tar -xvz -C /usr/share/filebeat/module
|
@@ -1,115 +0,0 @@
|
||||
# Permanent data mounted in volumes
|
||||
i=0
|
||||
PERMANENT_DATA[((i++))]="/var/ossec/api/configuration"
|
||||
PERMANENT_DATA[((i++))]="/var/ossec/etc"
|
||||
PERMANENT_DATA[((i++))]="/var/ossec/logs"
|
||||
PERMANENT_DATA[((i++))]="/var/ossec/queue"
|
||||
PERMANENT_DATA[((i++))]="/var/ossec/agentless"
|
||||
PERMANENT_DATA[((i++))]="/var/ossec/var/multigroups"
|
||||
PERMANENT_DATA[((i++))]="/var/ossec/integrations"
|
||||
PERMANENT_DATA[((i++))]="/var/ossec/active-response/bin"
|
||||
PERMANENT_DATA[((i++))]="/var/ossec/wodles"
|
||||
PERMANENT_DATA[((i++))]="/etc/filebeat"
|
||||
|
||||
export PERMANENT_DATA
|
||||
|
||||
# Files mounted in a volume that should not be permanent
|
||||
i=0
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/etc/internal_options.conf"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/integrations/slack"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/integrations/slack.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/integrations/virustotal"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/integrations/virustotal.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/integrations/shuffle"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/integrations/shuffle.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/integrations/pagerduty"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/integrations/pagerduty.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/integrations/maltiverse"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/integrations/maltiverse.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/active-response/bin/default-firewall-drop"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/active-response/bin/disable-account"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/active-response/bin/firewalld-drop"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/active-response/bin/firewall-drop"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/active-response/bin/host-deny"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/active-response/bin/ip-customblock"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/active-response/bin/ipfw"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/active-response/bin/kaspersky.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/active-response/bin/kaspersky"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/active-response/bin/npf"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/active-response/bin/wazuh-slack"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/active-response/bin/pf"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/active-response/bin/restart-wazuh"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/active-response/bin/restart.sh"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/active-response/bin/route-null"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/agentless/sshlogin.exp"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/agentless/ssh_pixconfig_diff"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/agentless/ssh_asa-fwsmconfig_diff"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/agentless/ssh_integrity_check_bsd"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/agentless/main.exp"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/agentless/su.exp"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/agentless/ssh_integrity_check_linux"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/agentless/register_host.sh"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/agentless/ssh_generic_diff"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/agentless/ssh_foundry_diff"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/agentless/ssh_nopass.exp"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/agentless/ssh.exp"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/utils.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/aws-s3"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/aws-s3.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/__init__.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/aws_tools.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/wazuh_integration.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/buckets_s3/__init__.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/buckets_s3/aws_bucket.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/buckets_s3/cloudtrail.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/buckets_s3/config.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/buckets_s3/guardduty.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/buckets_s3/load_balancers.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/buckets_s3/server_access.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/buckets_s3/umbrella.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/buckets_s3/vpcflow.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/buckets_s3/waf.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/services/__init__.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/services/aws_service.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/services/cloudwatchlogs.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/services/inspector.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/subscribers/__init__.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/subscribers/s3_log_handler.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/subscribers/sqs_message_processor.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/aws/subscribers/sqs_queue.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/azure/azure-logs"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/azure/azure-logs.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/azure/db/orm.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/azure/db/utils.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/azure/db/__init__.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/azure/azure_utils.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/azure/azure_services/__init__.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/azure/azure_services/analytics.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/azure/azure_services/graph.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/azure/azure_services/storage.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/docker/DockerListener"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/docker/DockerListener.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/gcloud/gcloud"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/gcloud/gcloud.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/gcloud/integration.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/gcloud/tools.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/gcloud/exceptions.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/gcloud/buckets/bucket.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/gcloud/buckets/access_logs.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/wodles/gcloud/pubsub/subscriber.py"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/etc/lists/malicious-ioc/malicious-ip"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/etc/lists/malicious-ioc/malicious-domains"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/var/ossec/etc/lists/malicious-ioc/malware-hashes"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/etc/filebeat/wazuh-template.json"
|
||||
PERMANENT_DATA_EXCP[((i++))]="/etc/filebeat/filebeat.yml"
|
||||
export PERMANENT_DATA_EXCP
|
||||
|
||||
# Files mounted in a volume that should be deleted
|
||||
i=0
|
||||
PERMANENT_DATA_DEL[((i++))]="/var/ossec/queue/db/.template.db"
|
||||
export PERMANENT_DATA_DEL
|
||||
|
||||
i=0
|
||||
PERMANENT_DATA_MOVE[((i++))]="/var/ossec/logs/ossec /var/ossec/logs/wazuh"
|
||||
PERMANENT_DATA_MOVE[((i++))]="/var/ossec/queue/ossec /var/ossec/queue/sockets"
|
||||
export PERMANENT_DATA_MOVE
|
@@ -1,36 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Wazuh App Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
|
||||
# Variables
|
||||
source /permanent_data.env
|
||||
|
||||
WAZUH_INSTALL_PATH=/var/ossec
|
||||
DATA_TMP_PATH=${WAZUH_INSTALL_PATH}/data_tmp
|
||||
mkdir ${DATA_TMP_PATH}
|
||||
|
||||
# Move exclusion files to EXCLUSION_PATH
|
||||
EXCLUSION_PATH=${DATA_TMP_PATH}/exclusion
|
||||
mkdir ${EXCLUSION_PATH}
|
||||
|
||||
for exclusion_file in "${PERMANENT_DATA_EXCP[@]}"; do
|
||||
# Create the directory for the exclusion file if it does not exist
|
||||
DIR=$(dirname "${exclusion_file}")
|
||||
if [ ! -e ${EXCLUSION_PATH}/${DIR} ]
|
||||
then
|
||||
mkdir -p ${EXCLUSION_PATH}/${DIR}
|
||||
fi
|
||||
|
||||
mv ${exclusion_file} ${EXCLUSION_PATH}/${exclusion_file}
|
||||
done
|
||||
|
||||
# Move permanent files to PERMANENT_PATH
|
||||
PERMANENT_PATH=${DATA_TMP_PATH}/permanent
|
||||
mkdir ${PERMANENT_PATH}
|
||||
|
||||
for permanent_dir in "${PERMANENT_DATA[@]}"; do
|
||||
# Create the directory for the permanent file if it does not exist
|
||||
DIR=$(dirname "${permanent_dir}")
|
||||
mkdir -p ${PERMANENT_PATH}${DIR}
|
||||
cp -ar ${permanent_dir} ${PERMANENT_PATH}${DIR}
|
||||
|
||||
done
|
74
docker-compose.yml
Normal file
74
docker-compose.yml
Normal file
@@ -0,0 +1,74 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
wazuh:
|
||||
image: wazuh/wazuh
|
||||
hostname: wazuh-manager
|
||||
restart: always
|
||||
ports:
|
||||
- "1514:1514/udp"
|
||||
- "1515:1515"
|
||||
- "514:514/udp"
|
||||
- "55000:55000"
|
||||
networks:
|
||||
- docker_elk
|
||||
# volumes:
|
||||
# - my-path:/var/ossec/data
|
||||
# - my-path:/etc/postfix
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
logstash:
|
||||
image: wazuh/wazuh-logstash
|
||||
hostname: logstash
|
||||
restart: always
|
||||
command: -f /etc/logstash/conf.d/
|
||||
# volumes:
|
||||
# - my-path:/etc/logstash/conf.d
|
||||
links:
|
||||
- kibana
|
||||
- elasticsearch:elasticsearch
|
||||
ports:
|
||||
- "5000:5000"
|
||||
networks:
|
||||
- docker_elk
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
environment:
|
||||
- LS_HEAP_SIZE=2048m
|
||||
elasticsearch:
|
||||
image: elasticsearch:5.5.2
|
||||
hostname: elasticsearch
|
||||
restart: always
|
||||
command: elasticsearch -E node.name="node-1" -E cluster.name="wazuh" -E network.host=0.0.0.0
|
||||
ports:
|
||||
- "9200:9200"
|
||||
- "9300:9300"
|
||||
environment:
|
||||
ES_JAVA_OPTS: "-Xms2g -Xmx2g"
|
||||
# volumes:
|
||||
# - my-path:/usr/share/elasticsearch/data
|
||||
networks:
|
||||
- docker_elk
|
||||
kibana:
|
||||
image: wazuh/wazuh-kibana
|
||||
hostname: kibana
|
||||
restart: always
|
||||
ports:
|
||||
- "5601:5601"
|
||||
networks:
|
||||
- docker_elk
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
links:
|
||||
- elasticsearch:elasticsearch
|
||||
- wazuh
|
||||
entrypoint: sh wait-for-it.sh elasticsearch
|
||||
# environment:
|
||||
# - "WAZUH_KIBANA_PLUGIN_URL=http://your.repo/wazuhapp-2.1.0-5.5.1.zip"
|
||||
|
||||
networks:
|
||||
docker_elk:
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.25.0.0/24
|
1
docs/.gitignore
vendored
1
docs/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
book
|
212
docs/README.md
212
docs/README.md
@@ -1,212 +0,0 @@
|
||||
# Wazuh containers for Docker
|
||||
|
||||
[](https://wazuh.com/community/join-us-on-slack/)
|
||||
[](https://groups.google.com/forum/#!forum/wazuh)
|
||||
[](https://documentation.wazuh.com)
|
||||
[](https://wazuh.com)
|
||||
|
||||
In this repository you will find the containers to run:
|
||||
|
||||
* Wazuh manager: it runs the Wazuh manager, Wazuh API and Filebeat OSS
|
||||
* Wazuh dashboard: provides a web user interface to browse through alert data and allows you to visualize the agents configuration and status.
|
||||
* Wazuh indexer: Wazuh indexer container (working as a single-node cluster or as a multi-node cluster). **Be aware to increase the `vm.max_map_count` setting, as it's detailed in the [Wazuh documentation](https://documentation.wazuh.com/current/docker/wazuh-container.html#increase-max-map-count-on-your-host-linux).**
|
||||
* Wazuh agent: This container contains the Wazuh agent services. Current functionality is limited.
|
||||
|
||||
The folder `build-docker-images` contains a README explaining how to build the Wazuh images and the necessary assets.
|
||||
The folder `indexer-certs-creator` contains a README explaining how to create the certificates creator tool and the necessary assets.
|
||||
The folder `single-node` contains a README explaining how to run a Wazuh environment with one Wazuh manager, one Wazuh indexer, and one Wazuh dashboard.
|
||||
The folder `multi-node` contains a README explaining how to run a Wazuh environment with two Wazuh managers, three Wazuh indexers, and one Wazuh dashboard.
|
||||
The folder `wazuh-agent` contains a README explaining how to run a container with Wazuh agent.
|
||||
|
||||
## Documentation
|
||||
|
||||
* [Wazuh full documentation](http://documentation.wazuh.com)
|
||||
* [Wazuh documentation for Docker](https://documentation.wazuh.com/current/docker/index.html)
|
||||
* [Docker Hub](https://hub.docker.com/u/wazuh)
|
||||
|
||||
## Directory structure
|
||||
|
||||
├── build-docker-images
|
||||
│ ├── build-images.sh
|
||||
│ ├── build-images.yml
|
||||
│ ├── README.md
|
||||
│ ├── wazuh-agent
|
||||
│ │ ├── config
|
||||
│ │ │ ├── check_repository.sh
|
||||
│ │ │ └── etc
|
||||
│ │ │ ├── cont-init.d
|
||||
│ │ │ │ ├── 0-wazuh-init
|
||||
│ │ │ │ └── 1-agent
|
||||
│ │ │ └── services.d
|
||||
│ │ │ └── ossec-logs
|
||||
│ │ │ └── run
|
||||
│ │ └── Dockerfile
|
||||
│ ├── wazuh-dashboard
|
||||
│ │ ├── config
|
||||
│ │ │ ├── check_repository.sh
|
||||
│ │ │ ├── config.sh
|
||||
│ │ │ ├── config.yml
|
||||
│ │ │ ├── entrypoint.sh
|
||||
│ │ │ ├── wazuh_app_config.sh
|
||||
│ │ │ └── wazuh.yml
|
||||
│ │ └── Dockerfile
|
||||
│ ├── wazuh-indexer
|
||||
│ │ ├── config
|
||||
│ │ │ ├── action_groups.yml
|
||||
│ │ │ ├── check_repository.sh
|
||||
│ │ │ ├── config.sh
|
||||
│ │ │ ├── config.yml
|
||||
│ │ │ ├── entrypoint.sh
|
||||
│ │ │ ├── internal_users.yml
|
||||
│ │ │ ├── opensearch.yml
|
||||
│ │ │ ├── roles_mapping.yml
|
||||
│ │ │ ├── roles.yml
|
||||
│ │ │ └── securityadmin.sh
|
||||
│ │ └── Dockerfile
|
||||
│ └── wazuh-manager
|
||||
│ ├── config
|
||||
│ │ ├── check_repository.sh
|
||||
│ │ ├── create_user.py
|
||||
│ │ ├── etc
|
||||
│ │ │ ├── cont-init.d
|
||||
│ │ │ │ ├── 0-wazuh-init
|
||||
│ │ │ │ ├── 1-config-filebeat
|
||||
│ │ │ │ └── 2-manager
|
||||
│ │ │ └── services.d
|
||||
│ │ │ ├── filebeat
|
||||
│ │ │ │ ├── finish
|
||||
│ │ │ │ └── run
|
||||
│ │ │ └── ossec-logs
|
||||
│ │ │ └── run
|
||||
│ │ ├── filebeat_module.sh
|
||||
│ │ ├── filebeat.yml
|
||||
│ │ ├── permanent_data.env
|
||||
│ │ └── permanent_data.sh
|
||||
│ └── Dockerfile
|
||||
├── CHANGELOG.md
|
||||
├── docs
|
||||
│ ├── book.toml
|
||||
│ ├── build.sh
|
||||
│ ├── dev
|
||||
│ │ ├── build-image.md
|
||||
│ │ ├── README.md
|
||||
│ │ ├── run-tests.md
|
||||
│ │ └── setup.md
|
||||
│ ├── README.md
|
||||
│ ├── ref
|
||||
│ │ ├── configuration
|
||||
│ │ │ ├── configuration-files.md
|
||||
│ │ │ ├── environment-variables.md
|
||||
│ │ │ └── README.md
|
||||
│ │ ├── getting-started
|
||||
│ │ │ ├── deployment
|
||||
│ │ │ │ ├── multi-node.md
|
||||
│ │ │ │ ├── README.md
|
||||
│ │ │ │ ├── single-node.md
|
||||
│ │ │ │ └── wazuh-agent.md
|
||||
│ │ │ ├── README.md
|
||||
│ │ │ └── requirements.md
|
||||
│ │ ├── glossary.md
|
||||
│ │ ├── Introduction
|
||||
│ │ │ ├── compatibility.md
|
||||
│ │ │ ├── description.md
|
||||
│ │ │ └── README.md
|
||||
│ │ ├── README.md
|
||||
│ │ └── upgrade.md
|
||||
│ ├── server.sh
|
||||
│ └── SUMMARY.md
|
||||
├── indexer-certs-creator
|
||||
│ ├── config
|
||||
│ │ └── entrypoint.sh
|
||||
│ ├── Dockerfile
|
||||
│ └── README.md
|
||||
├── LICENSE
|
||||
├── multi-node
|
||||
│ ├── config
|
||||
│ │ ├── certs.yml
|
||||
│ │ ├── nginx
|
||||
│ │ │ └── nginx.conf
|
||||
│ │ ├── wazuh_cluster
|
||||
│ │ │ ├── wazuh_manager.conf
|
||||
│ │ │ └── wazuh_worker.conf
|
||||
│ │ ├── wazuh_dashboard
|
||||
│ │ │ ├── opensearch_dashboards.yml
|
||||
│ │ │ └── wazuh.yml
|
||||
│ │ └── wazuh_indexer
|
||||
│ │ ├── internal_users.yml
|
||||
│ │ ├── wazuh1.indexer.yml
|
||||
│ │ ├── wazuh2.indexer.yml
|
||||
│ │ └── wazuh3.indexer.yml
|
||||
│ ├── docker-compose.yml
|
||||
│ ├── generate-indexer-certs.yml
|
||||
│ ├── Migration-to-Wazuh-4.4.md
|
||||
│ ├── README.md
|
||||
│ └── volume-migrator.sh
|
||||
├── README.md
|
||||
├── SECURITY.md
|
||||
├── single-node
|
||||
│ ├── config
|
||||
│ │ ├── certs.yml
|
||||
│ │ ├── wazuh_cluster
|
||||
│ │ │ └── wazuh_manager.conf
|
||||
│ │ ├── wazuh_dashboard
|
||||
│ │ │ ├── opensearch_dashboards.yml
|
||||
│ │ │ └── wazuh.yml
|
||||
│ │ ├── wazuh_indexer
|
||||
│ │ │ ├── internal_users.yml
|
||||
│ │ │ └── wazuh.indexer.yml
|
||||
│ │ └── wazuh_indexer_ssl_certs [error opening dir]
|
||||
│ ├── docker-compose.yml
|
||||
│ ├── generate-indexer-certs.yml
|
||||
│ └── README.md
|
||||
├── VERSION.json
|
||||
└── wazuh-agent
|
||||
├── config
|
||||
│ └── wazuh-agent-conf
|
||||
└── docker-compose.yml
|
||||
|
||||
## Branches
|
||||
|
||||
* `main` branch contains the latest code, be aware of possible bugs on this branch.
|
||||
|
||||
## Compatibility Matrix
|
||||
|
||||
| Wazuh version | ODFE | XPACK |
|
||||
|---------------|---------|--------|
|
||||
| v4.3.0+ | | |
|
||||
| v4.2.7 | 1.13.2 | 7.11.2 |
|
||||
| v4.2.6 | 1.13.2 | 7.11.2 |
|
||||
| v4.2.5 | 1.13.2 | 7.11.2 |
|
||||
| v4.2.4 | 1.13.2 | 7.11.2 |
|
||||
| v4.2.3 | 1.13.2 | 7.11.2 |
|
||||
| v4.2.2 | 1.13.2 | 7.11.2 |
|
||||
| v4.2.1 | 1.13.2 | 7.11.2 |
|
||||
| v4.2.0 | 1.13.2 | 7.10.2 |
|
||||
| v4.1.5 | 1.13.2 | 7.10.2 |
|
||||
| v4.1.4 | 1.12.0 | 7.10.2 |
|
||||
| v4.1.3 | 1.12.0 | 7.10.2 |
|
||||
| v4.1.2 | 1.12.0 | 7.10.2 |
|
||||
| v4.1.1 | 1.12.0 | 7.10.2 |
|
||||
| v4.1.0 | 1.12.0 | 7.10.2 |
|
||||
| v4.0.4 | 1.11.0 | |
|
||||
| v4.0.3 | 1.11.0 | |
|
||||
| v4.0.2 | 1.11.0 | |
|
||||
| v4.0.1 | 1.11.0 | |
|
||||
| v4.0.0 | 1.10.1 | |
|
||||
|
||||
## Credits and Thank you
|
||||
|
||||
These Docker containers are based on:
|
||||
|
||||
* "deviantony" dockerfiles which can be found at [https://github.com/deviantony/docker-elk](https://github.com/deviantony/docker-elk)
|
||||
* "xetus-oss" dockerfiles, which can be found at [https://github.com/xetus-oss/docker-ossec-server](https://github.com/xetus-oss/docker-ossec-server)
|
||||
|
||||
We thank them and everyone else who has contributed to this project.
|
||||
|
||||
## License and copyright
|
||||
|
||||
Wazuh Docker Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
|
||||
## Web references
|
||||
|
||||
[Wazuh website](http://wazuh.com)
|
@@ -1,26 +0,0 @@
|
||||
# Summary
|
||||
|
||||
- [Introduction](README.md)
|
||||
|
||||
# Development Guide
|
||||
|
||||
- [Introduction](dev/introduction.md)
|
||||
- [Setup Environment](dev/setup.md)
|
||||
- [Build Image](dev/build-image.md)
|
||||
- [Run Tests](dev/run-tests.md)
|
||||
|
||||
# Reference Manual
|
||||
|
||||
- [Introduction](ref/Introduction/introduction.md)
|
||||
- [Description](ref/Introduction/description.md)
|
||||
- [Getting Started](ref/getting-started/getting-started.md)
|
||||
- [Requirements](ref/getting-started/requirements.md)
|
||||
- [Deployment](ref/getting-started/deployment/deployment.md)
|
||||
- [Single Node Wazuh Stack](ref/getting-started/deployment/single-node.md)
|
||||
- [Multi Node Wazuh Stack](ref/getting-started/deployment/multi-node.md)
|
||||
- [Wazuh Agent](ref/getting-started/deployment/wazuh-agent.md)
|
||||
- [Configuration](ref/configuration/configuration.md)
|
||||
- [Environment Variabless](ref/configuration/environment-variables.md)
|
||||
- [Configuration files](ref/configuration/configuration-files.md)
|
||||
- [Upgrade](ref/upgrade.md)
|
||||
- [Glossary](ref/glossary.md)
|
@@ -1,7 +0,0 @@
|
||||
[book]
|
||||
title = "Wazuh Docker Documentation"
|
||||
description = "Technical documentation for Wazuh Docker deployment."
|
||||
authors = ["Victor Erenu"]
|
||||
multilingual = false
|
||||
src = "."
|
||||
language = "en"
|
@@ -1,3 +0,0 @@
|
||||
#! /bin/sh
|
||||
|
||||
mdbook build
|
@@ -1,32 +0,0 @@
|
||||
# Wazuh Docker Image Builder
|
||||
|
||||
The creation of the images for the Wazuh stack deployment in Docker is done with the build-images.yml script
|
||||
|
||||
To execute the process, the following must be executed in the root of the wazuh-docker repository:
|
||||
|
||||
```
|
||||
$ build-docker-images/build-images.sh
|
||||
```
|
||||
|
||||
This script initializes the environment variables needed to build each of the images.
|
||||
|
||||
The script allows you to build images from other versions of Wazuh, to do this you must use the -v or --version argument:
|
||||
|
||||
```
|
||||
$ build-docker-images/build-images.sh -v 4.14.0
|
||||
```
|
||||
|
||||
To get all the available script options use the -h or --help option:
|
||||
|
||||
```
|
||||
$ build-docker-images/build-images.sh -h
|
||||
|
||||
Usage: build-docker-images/build-images.sh [OPTIONS]
|
||||
|
||||
-d, --dev <ref> [Optional] Set the development stage you want to build, example rc1 or beta1, not used by default.
|
||||
-f, --filebeat-module <ref> [Optional] Set Filebeat module version. By default 0.4.
|
||||
-r, --revision <rev> [Optional] Package revision. By default 1
|
||||
-v, --version <ver> [Optional] Set the Wazuh version should be builded. By default, 4.14.0.
|
||||
-h, --help Show this help.
|
||||
|
||||
```
|
@@ -1,40 +0,0 @@
|
||||
# Development Guide - Introduction
|
||||
|
||||
Welcome to the Development Guide for Wazuh-docker version 4.14.0. This guide is intended for developers, contributors, and advanced users who wish to understand the development aspects of the Wazuh-Docker project, build custom Docker images, or contribute to its development.
|
||||
|
||||
## Purpose of This Guide
|
||||
|
||||
The primary goals of this guide are:
|
||||
|
||||
- To provide a clear understanding of the development environment setup.
|
||||
- To outline the process for building Wazuh Docker images from source.
|
||||
- To explain how to run tests to ensure the integrity and functionality of the images.
|
||||
- To offer insights into the project structure and contribution guidelines (though detailed contribution guidelines are typically found in `CONTRIBUTING.md` in the repository).
|
||||
|
||||
## Who Should Use This Guide?
|
||||
|
||||
This guide is for you if you want to:
|
||||
|
||||
- Modify existing Wazuh Docker images.
|
||||
- Build Wazuh Docker images for a specific Wazuh version or with custom configurations.
|
||||
- Understand the build process and scripts used in this project.
|
||||
- Contribute code, features, or bug fixes to the Wazuh-Docker repository.
|
||||
|
||||
## What This Guide Covers
|
||||
|
||||
This guide is organized into the following sections:
|
||||
|
||||
- **[Setup Environment](setup.md)**: Instructions on how to prepare your local machine for Wazuh-Docker development, including necessary tools and dependencies.
|
||||
- **[Build Image](build-image.md)**: Step-by-step procedures for building the various Wazuh Docker images (Wazuh manager, Wazuh indexer, Wazuh dashboard).
|
||||
- **[Run Tests](run-tests.md)**: Information on how to execute automated tests to validate the built images and configurations.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you begin, it's assumed that you have a basic understanding of:
|
||||
|
||||
- Docker and Docker Compose.
|
||||
- Linux command-line interface.
|
||||
- Version control systems like Git.
|
||||
- The Wazuh platform and its components.
|
||||
|
||||
We encourage you to explore the Wazuh-Docker repository and familiarize yourself with its structure. If you plan to contribute, please also review the project's contribution guidelines.
|
@@ -1,28 +0,0 @@
|
||||
# Pull Request Test Execution
|
||||
|
||||
This repository includes automated tests designed to validate the correct deployment of Wazuh using Docker. These tests are executed on every pull request (PR) to ensure the integrity and stability of the system when changes are introduced.
|
||||
|
||||
## Purpose
|
||||
|
||||
The main objective of the tests is to verify that the Wazuh Docker environment can be successfully deployed and that all its core components (Wazuh Manager, Indexer, Dashboard, and Agents) operate as expected after any modification in the codebase.
|
||||
|
||||
## When Tests Run
|
||||
|
||||
- Tests are automatically triggered on every pull request (PR) opened against the repository.
|
||||
- They also run when changes are pushed to an existing PR.
|
||||
|
||||
## What Is Tested
|
||||
|
||||
The tests aim to ensure:
|
||||
- Successful build and startup of all Docker containers.
|
||||
- Proper communication between components (e.g., Manager ↔ Indexer, Dashboard ↔ API).
|
||||
- No critical errors appear in the logs.
|
||||
- Key services are healthy and accessible.
|
||||
|
||||
## Benefits
|
||||
|
||||
- Reduces the risk of breaking the deployment flow.
|
||||
- Ensures system consistency during feature development and refactoring.
|
||||
- Provides early feedback on integration issues before merging.
|
||||
|
||||
---
|
@@ -1,55 +0,0 @@
|
||||
# Development Guide - Setup Environment
|
||||
|
||||
This section outlines the steps required to set up your local development environment for working with the Wazuh-Docker project (version 4.14.0). A proper setup is crucial for building images, running tests, and contributing effectively.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you begin, ensure your system meets the following requirements:
|
||||
|
||||
1. **Operating System**:
|
||||
* A Linux-based distribution is recommended (e.g., Ubuntu, RedHat).
|
||||
* macOS or Windows with WSL 2 can also be used, but some scripts might require adjustments.
|
||||
|
||||
2. **Docker and Docker Compose**:
|
||||
* **Docker Engine**: Install the latest stable version of Docker Engine. Refer to the [official Docker documentation](https://docs.docker.com/engine/install/) for installation instructions specific to your OS.
|
||||
|
||||
3. **Git**:
|
||||
* Install Git for cloning the repository and managing versions. Most systems have Git pre-installed. If not, visit [https://git-scm.com/downloads](https://git-scm.com/downloads).
|
||||
|
||||
5. **Sufficient System Resources**:
|
||||
* **RAM**: At least 8GB of RAM is recommended, especially if you plan to run multiple Wazuh components locally. 16GB or more is ideal.
|
||||
* **CPU**: A multi-core processor (2+ cores) is recommended.
|
||||
* **Disk Space**: Ensure you have sufficient disk space (at least 20-30GB) for Docker images, containers, and Wazuh data.
|
||||
|
||||
## Setting Up the Environment
|
||||
|
||||
Follow these steps to prepare your development environment:
|
||||
|
||||
1. **Clone the Repository**:
|
||||
Clone the `wazuh-docker` repository from GitHub. It's important to check out the specific branch you intend to work with, in this case, `4.14.0`.
|
||||
|
||||
```bash
|
||||
git clone [https://github.com/wazuh/wazuh-docker.git](https://github.com/wazuh/wazuh-docker.git)
|
||||
cd wazuh-docker
|
||||
git checkout v4.14.0
|
||||
```
|
||||
|
||||
2. **Verify Docker Installation**:
|
||||
Ensure Docker is running and accessible by your user (you might need to add your user to the `docker` group or use `sudo`).
|
||||
|
||||
```bash
|
||||
docker --version
|
||||
docker info
|
||||
```
|
||||
These commands should output the versions of Docker and information about your Docker setup without errors.
|
||||
|
||||
3. **Review Project Structure**:
|
||||
Familiarize yourself with the directory structure of the cloned repository. Key directories often include:
|
||||
* `build-docker-images/wazuh-manager/`: Dockerfile and related files for the Wazuh manager.
|
||||
* `build-docker-images/wazuh-indexer/`: Dockerfile and related files for the Wazuh indexer.
|
||||
* `build-docker-images/wazuh-dashboard/`: Dockerfile and related files for the Wazuh dashboard.
|
||||
* `build-docker-images/wazuh-agent/` : Dockerfile and related files for Wazuh agents.
|
||||
* `single-node/` : Compose and configuration files for Wazuh deployment with 1 container of each Wazuh component.
|
||||
* `multi-node/` : Compose and configuration files for Wazuh deployment with 1 container of Wazuh dashboardm 2 containers of Wazuh manager (1 master and 1 worker) and 3 containers of Wazuh indexer.
|
||||
* `wazuh-agent/` : Compose and configuration files for Wazuh agent deployment.
|
||||
|
@@ -1,45 +0,0 @@
|
||||
# Reference Manual - Description
|
||||
|
||||
This section provides a detailed description of Wazuh-docker (version 4.14.0), its components, and its architecture when deployed using Docker containers. Understanding these aspects is key to effectively deploying and managing your Wazuh environment.
|
||||
|
||||
## What is Wazuh?
|
||||
|
||||
Wazuh is a free, open-source, and enterprise-ready security monitoring solution for threat detection, integrity monitoring, incident response, and compliance. It consists of several key components that work together to provide comprehensive security visibility.
|
||||
|
||||
## What is Wazuh-docker?
|
||||
|
||||
Wazuh-docker is a project that provides Docker images and `docker compose` configurations to simplify the deployment and management of the Wazuh platform. By containerizing Wazuh components, Wazuh-docker offers:
|
||||
|
||||
- **Rapid Deployment**: Quickly set up a full Wazuh environment.
|
||||
- **Consistency**: Ensures that Wazuh runs the same way across different environments.
|
||||
- **Scalability**: Easier to scale components as needed (especially with orchestrators like Kubernetes, though this documentation primarily focuses on Docker Compose).
|
||||
- **Isolation**: Components run in isolated containers, reducing conflicts.
|
||||
- **Portability**: Run Wazuh on Linux system that supports Docker.
|
||||
|
||||
## Core Components in Wazuh-Docker
|
||||
|
||||
The Wazuh-Docker project typically provides images for the following core Wazuh components, adapted for version 4.14.0:
|
||||
|
||||
1. **Wazuh Manager**:
|
||||
- The central component that collects and analyzes data from deployed Wazuh agents.
|
||||
- It performs log analysis, file integrity checking, rootkit detection, real-time alerting, and active response.
|
||||
- In a Docker deployment, the Wazuh manager runs in its own container. It exposes ports for agent communication and API access.
|
||||
|
||||
2. **Wazuh Indexer**:
|
||||
- A highly scalable, full-text search and analytics engine.
|
||||
- Based on OpenSearch (or historically Elasticsearch), it stores and indexes alerts and monitoring data generated by the Wazuh manager.
|
||||
- The Wazuh indexer container provides the data persistence layer for Wazuh alerts and events. For version 4.14.0, this is typically an OpenSearch-based component.
|
||||
|
||||
3. **Wazuh Dashboard**:
|
||||
- A flexible visualization tool based on OpenSearch Dashboards (or historically Kibana).
|
||||
- It provides a web interface for querying, visualizing, and analyzing Wazuh data stored in the Wazuh indexer.
|
||||
- Users can explore security events, manage agent configurations (via the Wazuh plugin), and generate reports.
|
||||
|
||||
## Key Features of Wazuh-Docker Deployments
|
||||
|
||||
- **Docker Compose**: Most deployments are orchestrated using `docker-compose.yml` files, which define the services, networks, volumes, and configurations for the Wazuh stack.
|
||||
- **Persistent Data**: Docker volumes are used to persist critical data, such as Wazuh manager configurations, agent keys, Wazuh indexer data, and Wazuh dashboard settings, even if containers are stopped or recreated.
|
||||
- **Networking**: Docker networks are configured to allow communication between the Wazuh components.
|
||||
- **Environment Variables**: Configuration of containers is often managed through environment variables passed at runtime.
|
||||
|
||||
Understanding this architecture and the role of each component is fundamental for successful deployment, troubleshooting, and scaling of your Wazuh environment using Wazuh-Docker.
|
@@ -1,47 +0,0 @@
|
||||
# Reference Manual - Introduction
|
||||
|
||||
Welcome to the Reference Manual for Wazuh-Docker, version 4.14.0. This manual provides comprehensive information about deploying, configuring, and managing your Wazuh environment using Docker.
|
||||
|
||||
## Purpose of This Manual
|
||||
|
||||
This Reference Manual is designed to be your go-to resource for understanding the intricacies of Wazuh-Docker. It aims to cover:
|
||||
|
||||
- The core concepts and architecture of Wazuh when deployed with Docker.
|
||||
- Step-by-step guidance for getting started, from requirements to various deployment scenarios.
|
||||
- Detailed explanations of configuration options, including environment variables and persistent data management.
|
||||
- Procedures for common operational tasks like upgrading your deployment.
|
||||
- A glossary of terms to help you understand Wazuh and Docker-specific terminology.
|
||||
|
||||
## Who Should Use This Manual?
|
||||
|
||||
This manual is intended for:
|
||||
|
||||
- **System Administrators** responsible for deploying and maintaining Wazuh.
|
||||
- **Security Analysts** who use Wazuh and need to understand its Dockerized deployment.
|
||||
- **DevOps Engineers** integrating Wazuh into their CI/CD pipelines or containerized infrastructure.
|
||||
- Anyone seeking detailed technical information about Wazuh-Docker.
|
||||
|
||||
## How This Manual is Organized
|
||||
|
||||
This manual is structured to help you find information efficiently:
|
||||
|
||||
- **[Description](description.md)**: Provides a detailed overview of Wazuh-Docker, its components, and how they work together in a containerized setup.
|
||||
- **[Getting Started](getting-started/getting-started.md)**: Guides you through the initial setup, from prerequisites to deploying your first Wazuh stack.
|
||||
- **[Requirements](getting-started/requirements.md)**: Lists the necessary hardware and software.
|
||||
- **[Deployment](getting-started/deployment/README.md)**: Offers instructions for different deployment models:
|
||||
- [Single Node Wazuh Stack](getting-started/deployment/single-node.md)
|
||||
- [Multi Node Wazuh Stack](getting-started/deployment/multi-node.md)
|
||||
- [Wazuh Agent](getting-started/deployment/wazuh-agent.md)
|
||||
- **[Configuration](configuration/configuration.md)**: Explains how to customize your Wazuh-Docker deployment.
|
||||
- [Environment Variables](configuration/environment-variables.md)
|
||||
- [Configuration Files](configuration/configuration-files.md)
|
||||
- **[Upgrade](upgrade.md)**: Provides instructions for upgrading your Wazuh-Docker deployment to a newer version.
|
||||
- **[Glossary](glossary.md)**: Defines key terms and concepts.
|
||||
|
||||
## Using This Manual
|
||||
|
||||
- If you are new to Wazuh-docker, we recommend starting with the [Description](description.md) and then proceeding to the [Getting Started](getting-started/getting-started.md) section.
|
||||
- If you need to customize your deployment, refer to the [Configuration](configuration/configuration.md) section.
|
||||
- For specific terms or concepts, consult the [Glossary](glossary.md).
|
||||
|
||||
This manual refers to version 4.14.0 of Wazuh-Docker. Ensure you are using the documentation that corresponds to your deployed version.
|
@@ -1,32 +0,0 @@
|
||||
# Configuration files
|
||||
|
||||
### 1. Wazuh Manager Configuration
|
||||
|
||||
* **`ossec.conf`**: The main configuration file for the Wazuh manager. It controls rules, decoders, agent enrollment, active responses, integrations, clustering, and more.
|
||||
* **Customization**: Mount a custom `ossec.conf` or specific configuration snippets (e.g., local rules in `local_rules.xml`) into the manager container at `/wazuh-mount-point/`, which will be copied to the path `/var/ossec` (e.g., the file `/var/ossec/etc/ossec.conf` must be mounted at `/wazuh-mount-point/etc/ossec.conf`) .
|
||||
|
||||
### 2. Wazuh Indexer Configuration
|
||||
|
||||
* **`opensearch.yml`**: The primary configuration file for OpenSearch. Controls cluster settings, network binding, path settings, discovery, memory allocation, etc.
|
||||
* **Customization**: Mount a custom `opensearch.yml` into the indexer container(s) at `/usr/share/wazuh-indexer/config/opensearch.yml`.
|
||||
* **JVM Settings (`jvm.options`)**: Manages Java Virtual Machine settings, especially heap size (`-Xms`, `-Xmx`). Critical for performance and stability.
|
||||
* **Customization**: Mount a custom `jvm.options` file or set `OPENSEARCH_JAVA_OPTS` environment variable.
|
||||
|
||||
### 3. Wazuh Dashboard (OpenSearch Dashboards) Configuration
|
||||
|
||||
* **`opensearch_dashboards.yml`**: The main configuration file for OpenSearch Dashboards. Controls server host/port, OpenSearch connection URL, SSL settings, and Wazuh plugin settings.
|
||||
* **Customization**: Mount a custom `opensearch_dashboards.yml` into the dashboard container at `/usr/share/wazuh-dashboard/config/opensearch_dashboards.yml` and custom `wazuh.yml` into the dashboard container at `/usr/share/wazuh-dashboard/data/wazuh/config/wazuh.yml` .
|
||||
* **Wazuh Plugin Settings**: The Wazuh plugin for the dashboard has its own configuration, often within `opensearch_dashboards.yml` or managed through environment variables, specifying the Wazuh API URL and credentials.
|
||||
|
||||
## Applying Configuration Changes
|
||||
|
||||
1. **Modify `docker-compose.yml`**:
|
||||
* For changes to environment variables, port mappings, or volume mounts.
|
||||
* After changes, you typically need to stop and restart the containers:
|
||||
```bash
|
||||
docker compose down
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
|
||||
Consult the official Wazuh documentation for version 4.14.0 for detailed information on all possible configuration parameters for each component.
|
@@ -1,28 +0,0 @@
|
||||
# Reference Manual - Configuration
|
||||
|
||||
This section details how to configure your Wazuh-Docker deployment (version 4.14.0). Proper configuration is key to tailoring the Wazuh stack to your specific needs, managing data persistence, and integrating with your environment.
|
||||
|
||||
## Overview of Configuration Methods
|
||||
|
||||
Configuring Wazuh components within a Docker environment typically involves several methods:
|
||||
|
||||
1. **[Environment Variables](environment-variables.md)**:
|
||||
* Many container settings are controlled by passing environment variables at runtime (e.g., via the `docker-compose.yml` file or `docker run` commands).
|
||||
* These are often used for setting up initial passwords, component versions, cluster names, or basic operational parameters.
|
||||
|
||||
2. **[Configuration Files](configuration-files.md)**:
|
||||
* Core Wazuh components (manager, indexer, dashboard) rely on their traditional configuration files (e.g., `ossec.conf`, `opensearch.yml`, `opensearch_dashboards.yml`).
|
||||
* To customize these, you typically mount your custom configuration files into the containers, replacing or supplementing the defaults. This is managed using Docker volumes in your `docker-compose.yml`.
|
||||
|
||||
3. **Docker Compose File (`docker-compose.yml`)**:
|
||||
* The `docker-compose.yml` file itself is a primary configuration tool. It defines:
|
||||
* Which services (containers) to run.
|
||||
* The Docker images to use.
|
||||
* Port mappings.
|
||||
* Volume mounts for persistent data and custom configurations.
|
||||
* Network configurations.
|
||||
* Resource limits (CPU, memory).
|
||||
* Dependencies between services.
|
||||
|
||||
4. **Persistent Data Volumes**:
|
||||
* Configuration related to data storage (e.g., paths for Wazuh Indexer data, Wazuh manager logs and agent keys) is managed through Docker volumes. Persisting these volumes ensures your data and critical configurations survive container restarts or recreations.
|
@@ -1,116 +0,0 @@
|
||||
# Environment Variables in Wazuh Docker Deployment
|
||||
|
||||
This document outlines the environment variables applicable to the Wazuh Docker deployment, covering the Wazuh Manager, Indexer, Dashboard, and Agent components. It also explains how to override configuration settings using environment variables.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Wazuh Manager](#wazuh-manager)
|
||||
- [Wazuh Indexer](#wazuh-indexer)
|
||||
- [Wazuh Dashboard](#wazuh-dashboard)
|
||||
- [Wazuh Agent](#wazuh-agent)
|
||||
- [Overriding Configuration Files with Environment Variables](#overriding-configuration-files-with-environment-variables)
|
||||
|
||||
---
|
||||
|
||||
## Wazuh Manager
|
||||
|
||||
The Wazuh Manager container accepts the following environment variables, which can be set in the `docker-compose.yml` file under the `environment` section:
|
||||
|
||||
```yaml
|
||||
environment:
|
||||
- INDEXER_USERNAME=admin
|
||||
- INDEXER_PASSWORD=SecretPassword
|
||||
- WAZUH_API_URL=https://wazuh.manager
|
||||
- DASHBOARD_USERNAME=kibanaserver
|
||||
- DASHBOARD_PASSWORD=kibanaserver
|
||||
- API_USERNAME=wazuh-wui
|
||||
- API_PASSWORD=MyS3cr37P450r.*-
|
||||
```
|
||||
|
||||
**Variable Descriptions:**
|
||||
|
||||
- `INDEXER_USERNAME` / `INDEXER_PASSWORD`: Credentials for accessing the Wazuh Indexer with `admin` user or a user with the same permissions.
|
||||
- `WAZUH_API_URL`: URL of the Wazuh API, used by other services for communication.
|
||||
- `DASHBOARD_USERNAME` / `DASHBOARD_PASSWORD`: Credentials for the Wazuh Dashboard to authenticate with the Indexer.
|
||||
- `API_USERNAME` / `API_PASSWORD`: Credentials for the Wazuh API user, utilized by the Dashboard for API interactions.
|
||||
|
||||
---
|
||||
|
||||
## Wazuh Indexer
|
||||
|
||||
The Wazuh Indexer services (`single-node` and `multi-node`) use the following environment variable:
|
||||
|
||||
```yaml
|
||||
environment:
|
||||
- "OPENSEARCH_JAVA_OPTS=-Xms1g -Xmx1g"
|
||||
```
|
||||
|
||||
**Variable Descriptions:**
|
||||
|
||||
- `OPENSEARCH_JAVA_OPTS`: Sets JVM heap size and other Java options.
|
||||
|
||||
---
|
||||
|
||||
## Wazuh Dashboard
|
||||
|
||||
The Wazuh Dashboard container accepts the following environment variables, which should be set in the `docker-compose.yml` file:
|
||||
|
||||
```yaml
|
||||
environment:
|
||||
- INDEXER_USERNAME=admin
|
||||
- INDEXER_PASSWORD=SecretPassword
|
||||
- WAZUH_API_URL=https://wazuh.manager
|
||||
- DASHBOARD_USERNAME=kibanaserver
|
||||
- DASHBOARD_PASSWORD=kibanaserver
|
||||
- API_USERNAME=wazuh-wui
|
||||
- API_PASSWORD=MyS3cr37P450r.*-
|
||||
```
|
||||
|
||||
**Variable Descriptions:**
|
||||
|
||||
- `INDEXER_USERNAME` / `INDEXER_PASSWORD`: Credentials used by the Dashboard to authenticate with the Wazuh Indexer.
|
||||
- `WAZUH_API_URL`: Base URL of the Wazuh API, used for querying and visualizing security data.
|
||||
- `DASHBOARD_USERNAME` / `DASHBOARD_PASSWORD`: User credentials for the Dashboard interface.
|
||||
- `API_USERNAME` / `API_PASSWORD`: API user credentials for authenticating Wazuh API requests initiated by the Dashboard.
|
||||
|
||||
These variables are critical for enabling communication between the Wazuh Dashboard, the Wazuh Indexer, and the Wazuh API.
|
||||
|
||||
---
|
||||
|
||||
## Wazuh Agent
|
||||
|
||||
The Wazuh Agent container uses the following environment variables to dynamically update the `ossec.conf` configuration file at runtime:
|
||||
|
||||
```yaml
|
||||
environment:
|
||||
- WAZUH_MANAGER_SERVER=wazuh.manager
|
||||
- WAZUH_MANAGER_PORT=1514
|
||||
- WAZUH_REGISTRATION_SERVER=wazuh.manager
|
||||
- WAZUH_REGISTRATION_PORT=1515
|
||||
- WAZUH_AGENT_NAME=my-agent
|
||||
- WAZUH_REGISTRATION_PASSWORD=StrongPassword
|
||||
```
|
||||
|
||||
These variables are used by the `set_manager_conn()` function in the entrypoint script to replace placeholder values in `ossec.conf` and set the enrollment password.
|
||||
|
||||
---
|
||||
|
||||
## Overriding Configuration Files with Environment Variables
|
||||
|
||||
To override configuration values from files such as `opensearch.yml` and `opensearch_dashboards.yml` using environment variables:
|
||||
|
||||
1. Convert the configuration key to uppercase.
|
||||
2. Replace any dots (`.`) in the key with underscores (`_`).
|
||||
3. Assign the corresponding value.
|
||||
|
||||
### Examples:
|
||||
|
||||
| YAML Key | Environment Variable |
|
||||
|-----------------------------------------|--------------------------------------------|
|
||||
| `discovery.type: single-node` | `DISCOVERY_TYPE=single-node` |
|
||||
| `opensearch.hosts: https://url:9200` | `OPENSEARCH_HOSTS=https://url:9200` |
|
||||
| `server.port: 5601` | `SERVER_PORT=5601` |
|
||||
|
||||
This approach allows you to configure the services dynamically via Docker without modifying internal files.
|
||||
|
||||
---
|
@@ -1,46 +0,0 @@
|
||||
# Reference Manual - Deployment
|
||||
|
||||
This section provides detailed instructions for deploying Wazuh-Docker (version 4.14.0) in various configurations. Choose the deployment model that best suits your needs, from simple single-node setups for testing to more robust multi-node configurations for production environments.
|
||||
|
||||
## Overview of Deployment Options
|
||||
|
||||
Wazuh-Docker offers flexibility in how you can deploy the Wazuh stack. The primary methods covered in this documentation are:
|
||||
|
||||
1. **[Single Node Wazuh Stack](single-node.md)**:
|
||||
* **Description**: Deploys all core Wazuh components (Wazuh manager, Wazuh indexer, Wazuh dashboard) as Docker containers on a single host machine.
|
||||
* **Use Cases**: Ideal for development, testing, demonstrations, proof-of-concepts, and small-scale production environments where simplicity is prioritized and high availability is not a critical concern.
|
||||
* **Pros**: Easiest and quickest to set up.
|
||||
* **Cons**: Single point of failure; limited scalability compared to multi-node.
|
||||
|
||||
2. **[Multi Node Wazuh Stack](multi-node.md)**:
|
||||
* **Description**: This typically refers to deploying a Wazuh Indexer cluster and potentially multiple Wazuh managers for improved scalability and resilience. While true multi-host orchestration often uses tools like Kubernetes, this section may cover configurations achievable with Docker Compose, possibly across multiple Docker hosts or with clustered services on a single powerful host.
|
||||
* **Use Cases**: Production environments requiring higher availability, data redundancy (for Wazuh Indexer), and the ability to handle a larger number of agents.
|
||||
* **Pros**: Improved fault tolerance (for clustered components like the Indexer), better performance distribution.
|
||||
* **Cons**: More complex to set up and manage than a single-node deployment.
|
||||
|
||||
## Before You Begin Deployment
|
||||
|
||||
Ensure you have:
|
||||
|
||||
- Met all the [System Requirements](ref/getting-started/requirements.md).
|
||||
- Installed Docker and Docker Compose on your host(s).
|
||||
- Cloned the `wazuh-docker` repository (version `4.14.0`) or downloaded the necessary deployment files.
|
||||
```bash
|
||||
git clone [https://github.com/wazuh/wazuh-docker.git](https://github.com/wazuh/wazuh-docker.git)
|
||||
cd wazuh-docker
|
||||
git checkout v4.14.0
|
||||
```
|
||||
- Made a backup of any existing Wazuh data if you are migrating or upgrading.
|
||||
|
||||
## Choosing the Right Deployment
|
||||
|
||||
Consider the following factors when choosing a deployment model:
|
||||
|
||||
- **Scale**: How many agents do you plan to connect?
|
||||
- **Availability**: What are your uptime requirements?
|
||||
- **Resources**: What hardware resources (CPU, RAM, disk) are available?
|
||||
- **Complexity**: What is your team's familiarity with Docker and distributed systems?
|
||||
|
||||
For most new users, starting with the [Single Node Wazuh Stack](single-node.md) is recommended to familiarize themselves with Wazuh-Docker. You can then explore more complex setups as your needs grow.
|
||||
|
||||
Navigate to the specific deployment guide linked above for detailed, step-by-step instructions.
|
@@ -1,34 +0,0 @@
|
||||
# Wazuh Docker Deployment
|
||||
|
||||
## Deploying Wazuh Docker in a Multi-Node Configuration
|
||||
|
||||
This deployment utilizes the `multi-node/docker-compose.yml` file, which defines a cluster setup with two Wazuh manager containers, three Wazuh indexer containers, and one Wazuh dashboard container. Follow these steps to deploy this configuration:
|
||||
|
||||
1. Navigate to the `multi-node` directory within your repository:
|
||||
```bash
|
||||
cd multi-node
|
||||
```
|
||||
|
||||
2. Increase `vm.max_map_count` on each Docker host that will run a Wazuh Indexer container (Linux). This setting is crucial for Wazuh Indexer to operate correctly. This command requires root permissions:
|
||||
```bash
|
||||
sudo sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
**Note:** This change is temporary and will revert upon reboot. To make it permanent on each relevant host, you'll need to edit the `/etc/sysctl.conf` file, add `vm.max_map_count=262144`, and then apply the change with `sudo sysctl -p`.
|
||||
|
||||
3. Run the script to generate the necessary certificates for the Wazuh Stack. This ensures secure communication between the nodes:
|
||||
```bash
|
||||
docker compose -f generate-indexer-certs.yml run --rm generator
|
||||
```
|
||||
|
||||
4. Start the Wazuh environment using `docker compose`:
|
||||
|
||||
* To run in the foreground (logs will be displayed in your current terminal; press `Ctrl+C` to stop):
|
||||
```bash
|
||||
docker compose up
|
||||
```
|
||||
* To run in the background (detached mode, allowing the containers to run independently of your terminal):
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Please allow some time for the environment to initialize, especially on the first run. A multi-node setup can take a few minutes (depending on your host resources and network) as the Wazuh Indexer cluster forms, and the necessary indexes and index patterns are generated.
|
@@ -1,35 +0,0 @@
|
||||
# Wazuh Docker Deployment
|
||||
|
||||
## Deploying Wazuh Docker in a Single-Node Configuration
|
||||
|
||||
This deployment uses the `single-node/docker-compose.yml` file, which defines a setup with one Wazuh manager container, one Wazuh indexer container, and one Wazuh dashboard container. Follow these steps to deploy it:
|
||||
|
||||
1. Navigate to the `single-node` directory within your repository:
|
||||
```bash
|
||||
cd single-node
|
||||
```
|
||||
|
||||
2. Increase `vm.max_map_count` on each Docker host that will run a Wazuh Indexer container (Linux). This setting is crucial for Wazuh Indexer to operate correctly. This command requires root permissions:
|
||||
```bash
|
||||
sudo sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
**Note:** This change is temporary and will revert upon reboot. To make it permanent, you'll need to edit the `/etc/sysctl.conf` file and add `vm.max_map_count=262144`, then apply with `sudo sysctl -p`.
|
||||
|
||||
3. Run the script to generate the necessary certificates for the Wazuh Stack. This ensures secure communication between the nodes:
|
||||
```bash
|
||||
docker compose -f generate-indexer-certs.yml run --rm generator
|
||||
```
|
||||
|
||||
4. Start the Wazuh environment using `docker compose`:
|
||||
|
||||
* To run in the foreground (logs will be displayed in your current terminal; press `Ctrl+C` to stop):
|
||||
```bash
|
||||
docker compose up
|
||||
```
|
||||
* To run in the background (detached mode, allowing the containers to run independently of your terminal):
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Please allow some time for the environment to initialize, especially on the first run. It can take approximately a minute or two (depending on your host's resources) as the Wazuh Indexer starts up and generates the necessary indexes and index patterns.
|
||||
|
@@ -1,36 +0,0 @@
|
||||
# Wazuh Docker Deployment
|
||||
|
||||
## Deploying the Wazuh Agent
|
||||
|
||||
Follow these steps to deploy the Wazuh agent using Docker.
|
||||
|
||||
1. Navigate to the `wazuh-agent` directory within your repository:
|
||||
```bash
|
||||
cd wazuh-agent
|
||||
```
|
||||
|
||||
2. Edit the `docker-compose.yml` file. You need to update the `WAZUH_MANAGER_SERVER` environment variable with the IP address or hostname of your Wazuh manager.
|
||||
|
||||
Locate the `environment` section for the agent service and update it as follows:
|
||||
```yaml
|
||||
# Inside your docker-compose.yml file
|
||||
# services:
|
||||
# wazuh-agent:
|
||||
# ...
|
||||
environment:
|
||||
- WAZUH_MANAGER_SERVER=<YOUR_WAZUH_MANAGER_IP_OR_HOSTNAME>
|
||||
# ...
|
||||
```
|
||||
**Note:** Replace `<YOUR_WAZUH_MANAGER_IP_OR_HOSTNAME>` with the actual IP address or hostname of your Wazuh manager.
|
||||
|
||||
3. Start the environment using `docker compose`:
|
||||
|
||||
* To run in the foreground (logs will be displayed in your current terminal, and you can stop it with `Ctrl+C`):
|
||||
```bash
|
||||
docker compose up
|
||||
```
|
||||
|
||||
* To run in the background (detached mode, allowing the container to run independently of your terminal):
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
@@ -1,58 +0,0 @@
|
||||
# Reference Manual - Getting Started
|
||||
|
||||
This section guides you through the initial steps to get your Wazuh-docker (version 4.14.0) environment up and running. We will cover the prerequisites and point you to the deployment instructions.
|
||||
|
||||
## Overview
|
||||
|
||||
Getting started with Wazuh-Docker involves the following general steps:
|
||||
|
||||
1. **Understanding Requirements**: Ensuring your system meets the necessary hardware and software prerequisites.
|
||||
2. **Choosing a Deployment Type**: Deciding whether a single-node or multi-node deployment is suitable for your needs.
|
||||
3. **Setting up Docker**: Installing Docker and Docker Compose if you haven't already.
|
||||
4. **Obtaining Wazuh-Docker Files**: Cloning the `wazuh-docker` repository or downloading the necessary `docker-compose.yml` and configuration files.
|
||||
5. **Deploying the Stack**: Running `docker compose up` to launch the Wazuh components.
|
||||
6. **Initial Configuration & Verification**: Performing any initial setup steps and verifying that all components are working correctly.
|
||||
7. **Deploying Wazuh Agents**: Installing and configuring Wazuh agents on the endpoints you want to monitor and connecting them to your Wazuh manager.
|
||||
|
||||
## Before You Begin
|
||||
|
||||
Before diving into the deployment, please ensure you have reviewed:
|
||||
|
||||
- The [Description](ref/Introduction/description.md) of Wazuh-docker to understand the components and architecture.
|
||||
- The [Requirements](ref/getting-started/requirements.md) to confirm your environment is suitable.
|
||||
|
||||
## Steps to Get Started
|
||||
|
||||
1. **Meet the [Requirements](requirements.md)**:
|
||||
Verify that your host system has sufficient RAM, CPU, and disk space. Ensure Docker and Docker Compose are installed and functioning correctly.
|
||||
|
||||
2. **Obtain Wazuh-docker Configuration**:
|
||||
You'll need the Docker Compose files and any associated configuration files from the `wazuh-docker` repository for version 4.14.0.
|
||||
```bash
|
||||
git clone [https://github.com/wazuh/wazuh-docker.git](https://github.com/wazuh/wazuh-docker.git)
|
||||
cd wazuh-docker
|
||||
git checkout v4.14.0
|
||||
# Navigate to the specific docker-compose directory, e.g., single-node or multi-node
|
||||
# cd docker-compose/single-node/ (example path)
|
||||
```
|
||||
Alternatively, you might download specific `docker-compose.yml` files if provided as part of a release package.
|
||||
|
||||
3. **Choose Your [Deployment Strategy](deployment/deployment.md)**:
|
||||
Wazuh-docker supports different deployment models. Select the one that best fits your use case:
|
||||
* **[Single Node Wazuh Stack](deployment/single-node.md)**: Ideal for testing, small environments, or proof-of-concept deployments. All main components (Wazuh manager, Wazuh indexer, Wazuh dashboard) run on a single Docker host.
|
||||
* **[Multi Node Wazuh Stack](deployment/multi-node.md)**: Suitable for production environments requiring high availability and scalability. Components might be distributed across multiple hosts or configured in a clustered mode. (Note: True multi-host orchestration often involves Kubernetes, but multi-node within Docker Compose typically refers to clustered Wazuh Indexer/Manager setups on one or more Docker hosts managed carefully).
|
||||
* **[Wazuh Agent Deployment](deployment/wazuh-agent.md)**: Instructions for deploying Wazuh agents on your endpoints and connecting them to the Wazuh manager running in Docker.
|
||||
|
||||
4. **Follow Deployment Instructions**:
|
||||
Once you've chosen a deployment strategy, follow the detailed instructions provided in the respective sections linked above. This will typically involve:
|
||||
* Configuring environment variables (if necessary).
|
||||
* Initializing persistent volumes.
|
||||
* Starting the services.
|
||||
|
||||
5. **Post-Deployment**:
|
||||
After the stack is running:
|
||||
* Access the Wazuh Dashboard via your web browser.
|
||||
* Verify that all services are healthy.
|
||||
* Begin enrolling Wazuh agents.
|
||||
|
||||
This Getting Started guide provides a high-level overview. For detailed, step-by-step instructions, please refer to the specific pages linked within this section.
|
@@ -1,73 +0,0 @@
|
||||
# Reference Manual - Requirements
|
||||
|
||||
Before deploying Wazuh-Docker (version 4.14.0), it's essential to ensure your environment meets the necessary hardware and software requirements. Meeting these prerequisites will help ensure a stable and performant Wazuh deployment.
|
||||
|
||||
## Host System Requirements
|
||||
|
||||
These are general recommendations. Actual needs may vary based on the number of agents, data volume, and usage patterns.
|
||||
|
||||
### Hardware:
|
||||
|
||||
* **CPU**:
|
||||
* **Minimum**: 2 CPU cores.
|
||||
* **Recommended**: 4 CPU cores or more, especially for production environments or deployments with a significant number of agents.
|
||||
* **RAM**:
|
||||
* **Minimum (Single-Node Test/Small Environment)**: 4 GB RAM. This is a tight minimum; 6 GB is safer.
|
||||
* Wazuh Indexer (OpenSearch): Typically requires at least 1 GB RAM allocated to its JVM heap.
|
||||
* Wazuh Manager: Resource usage depends on the number of agents.
|
||||
* Wazuh Dashboard (OpenSearch Dashboards): Also consumes memory.
|
||||
* **Recommended (Production/Multiple Agents)**: 8 GB RAM or more.
|
||||
* **Disk Space**:
|
||||
* **Minimum**: 50 GB of free disk space.
|
||||
* **Recommended**: 100 GB or more, particularly for the Wazuh Indexer data. Disk space requirements will grow over time as more data is collected and indexed.
|
||||
* **Disk Type**: SSDs (Solid State Drives) are highly recommended for the Wazuh Indexer data volumes for optimal performance.
|
||||
* **Network**:
|
||||
* A stable network connection with sufficient bandwidth, especially if agents are reporting from remote locations.
|
||||
|
||||
### Software:
|
||||
|
||||
* **Operating System**:
|
||||
* A 64-bit Linux distribution is preferred (e.g., Ubuntu, CentOS, RHEL, Debian).
|
||||
* **Docker Engine**:
|
||||
* Version `20.10.0` or newer.
|
||||
* Install Docker by following the official instructions: [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
* **Git Client**:
|
||||
* Required for cloning the `wazuh-docker` repository.
|
||||
* **Web Browser**:
|
||||
* A modern web browser (e.g., Chrome, Firefox, Edge, Safari) for accessing the Wazuh Dashboard.
|
||||
* **`vm.max_map_count` (Linux Hosts for Wazuh Indexer/OpenSearch)**:
|
||||
* The Wazuh Indexer (OpenSearch) requires a higher `vm.max_map_count` setting than the default on most Linux systems.
|
||||
* Set it permanently:
|
||||
1. Edit `/etc/sysctl.conf` and add/modify the line:
|
||||
```
|
||||
vm.max_map_count=262144
|
||||
```
|
||||
2. Apply the change without rebooting:
|
||||
```bash
|
||||
sudo sysctl -p
|
||||
```
|
||||
* This is crucial for the stability of the Wazuh Indexer.
|
||||
|
||||
## Network Ports
|
||||
|
||||
Ensure that the necessary network ports are open and available on the Docker host and any firewalls:
|
||||
|
||||
* **Wazuh Manager**:
|
||||
* `1514/UDP`: For agent communication (syslog).
|
||||
* `1514/TCP`: For agent communication (if using TCP).
|
||||
* `1515/TCP`: For agent enrollment.
|
||||
* `55000/TCP`: For Wazuh API (default).
|
||||
* **Wazuh Indexer**:
|
||||
* `9200/TCP`: For HTTP REST API.
|
||||
* `9300/TCP`: For inter-node communication (if clustered).
|
||||
* **Wazuh Dashboard**:
|
||||
* `5601/TCP` (or `443/TCP` if HTTPS is configured via a reverse proxy): For web access.
|
||||
|
||||
Port mappings in `docker-compose.yml` will expose these container ports on the host. Adjust host ports if defaults cause conflicts.
|
||||
|
||||
## Important Considerations
|
||||
|
||||
* **Production Environments**: For production, it's highly recommended to follow best practices for securing Docker and your host system. Consider using a multi-node setup for resilience.
|
||||
* **Resource Allocation**: Monitor resource usage after deployment and adjust allocations (CPU, RAM for Docker, JVM heap for Wazuh Indexer) as necessary.
|
||||
|
||||
Meeting these requirements will pave the way for a smoother deployment and a more stable Wazuh-Docker experience.
|
@@ -1,89 +0,0 @@
|
||||
# Reference Manual - Glossary
|
||||
|
||||
This glossary defines key terms and concepts related to Wazuh, Docker, and their use together in the Wazuh-Docker project (version 4.14.0).
|
||||
|
||||
---
|
||||
|
||||
**A**
|
||||
|
||||
- **Active Response**: A Wazuh capability that allows automatic actions to be taken on an agent or manager in response to specific triggers or alerts (e.g., blocking an IP address, stopping a process).
|
||||
- **Agent (Wazuh Agent)**: Software installed on monitored endpoints (servers, workstations, cloud instances) that collects security data (logs, file integrity, configuration assessments, etc.) and forwards it to the Wazuh Manager.
|
||||
- **Alert**: A notification generated by the Wazuh Manager when an event or a series of events matches a predefined rule, indicating a potential security issue, misconfiguration, or policy violation.
|
||||
- **API (Wazuh API)**: An application programming interface provided by the Wazuh Manager that allows for programmatic interaction with the Wazuh system, such as managing agents, retrieving alerts, updating rulesets, and checking system health.
|
||||
|
||||
**C**
|
||||
|
||||
- **CDB List (Constant DataBase List)**: Key-value pair files used by Wazuh rules for fast lookups. Useful for whitelisting, blacklisting, or correlating events with known indicators.
|
||||
- **Cluster**:
|
||||
- **Wazuh Indexer Cluster (OpenSearch/Elasticsearch Cluster)**: A group of interconnected Wazuh Indexer nodes that work together to store, index, and search data, providing scalability and high availability.
|
||||
- **Wazuh Manager Cluster**: A group of Wazuh managers working together to provide load balancing and high availability for agent connections and event processing.
|
||||
- **Container (Docker Container)**: A lightweight, standalone, executable package of software that includes everything needed to run it: code, runtime, system tools, system libraries, and settings. Wazuh-Docker runs each Wazuh component (manager, indexer, dashboard) in its own container.
|
||||
- **Containerization**: The process of packaging an application and its dependencies into a container.
|
||||
|
||||
**D**
|
||||
|
||||
- **Dashboard (Wazuh Dashboard / OpenSearch Dashboards / Kibana)**: A web-based visualization tool used to explore, analyze, and visualize data stored in the Wazuh Indexer. It provides dashboards, visualizations, and a query interface for security events and alerts. For Wazuh 4.14.0, this is typically OpenSearch Dashboards.
|
||||
- **Decoder**: A component in the Wazuh Manager that parses and extracts relevant information (fields) from raw log messages or event data.
|
||||
- **Docker**: An open platform for developing, shipping, and running applications inside containers.
|
||||
- **Docker Compose**: A tool for defining and running multi-container Docker applications. It uses a YAML file (`docker-compose.yml`) to configure the application's services, networks, and volumes.
|
||||
- **Dockerfile**: A text document that contains all the commands a user could call on the command line to assemble an image. Docker can build images automatically by reading the instructions from a Dockerfile.
|
||||
- **Docker Hub**: A cloud-based registry service that allows you to link to code repositories, build your images and test them, stores manually pushed images, and links to Docker Cloud so you can deploy images to your hosts. Wazuh Docker images are often hosted here.
|
||||
- **Docker Image**: A read-only template with instructions for creating a Docker container. Images are used to instantiate containers.
|
||||
- **Docker Volume**: A mechanism for persisting data generated by and used by Docker containers. Volumes are managed by Docker and are stored on the host filesystem, separate from the container's lifecycle. Essential for storing Wazuh data, configurations, and logs.
|
||||
|
||||
**E**
|
||||
|
||||
- **Endpoint**: Any device (server, desktop, laptop, virtual machine, cloud instance) that is monitored by a Wazuh agent.
|
||||
- **Environment Variable**: A variable whose value is set outside the program, typically by the operating system or a container runtime, and can be accessed by the program to modify its behavior. Used extensively in Wazuh-Docker for configuration.
|
||||
|
||||
**F**
|
||||
|
||||
- **File Integrity Monitoring (FIM)**: A Wazuh capability that monitors files and directories for changes, additions, or deletions, helping to detect unauthorized modifications.
|
||||
|
||||
**I**
|
||||
|
||||
- **Indexer (Wazuh Indexer / OpenSearch / Elasticsearch)**: The component responsible for storing, indexing, and making searchable the alerts and event data generated by the Wazuh Manager. For Wazuh 4.14.0, this is typically OpenSearch.
|
||||
|
||||
**L**
|
||||
|
||||
- **Log Analysis**: A core function of the Wazuh Manager, involving the collection, normalization, parsing, and analysis of log data from various sources.
|
||||
|
||||
**M**
|
||||
|
||||
- **Manager (Wazuh Manager)**: The central component of the Wazuh platform. It collects data from agents, analyzes it using rules and decoders, generates alerts, and manages agents.
|
||||
|
||||
**N**
|
||||
|
||||
- **Node**:
|
||||
- **Wazuh Indexer Node**: A single instance of a Wazuh Indexer (OpenSearch/Elasticsearch) process, typically running in a container. Multiple nodes can form a cluster.
|
||||
- **Wazuh Manager Node**: A single instance of a Wazuh manager, which can operate standalone or as part of a manager cluster.
|
||||
|
||||
**O**
|
||||
|
||||
- **`ossec.conf`**: The main configuration file for the Wazuh Manager and Wazuh Agent.
|
||||
|
||||
**R**
|
||||
|
||||
- **Rule**: A set of conditions defined in the Wazuh Manager that, when met by an event or a sequence of events, trigger an alert.
|
||||
- **Ruleset**: The collection of all rules and decoders used by the Wazuh Manager.
|
||||
|
||||
**S**
|
||||
|
||||
- **Scalability**: The ability of the system to handle a growing amount of work by adding resources. In Wazuh-Docker, this can refer to scaling the number of agents, or the capacity of the indexer/manager cluster.
|
||||
- **Security Information and Event Management (SIEM)**: A field of computer security that combines security information management (SIM) and security event management (SEM) to provide real-time analysis of security alerts generated by applications and network hardware. Wazuh is a SIEM solution.
|
||||
- **Service (Docker Compose Service)**: A definition of a container within a `docker-compose.yml` file, including its image, ports, volumes, environment variables, etc.
|
||||
|
||||
**V**
|
||||
|
||||
- **Volume (Docker Volume)**: See Docker Volume.
|
||||
|
||||
**W**
|
||||
|
||||
- **Wazuh**: An open-source security platform that provides threat prevention, detection, and response.
|
||||
- **Wazuh API**: See API.
|
||||
- **Wazuh Dashboard**: See Dashboard.
|
||||
- **Wazuh Indexer**: See Indexer.
|
||||
- **Wazuh Manager**: See Manager.
|
||||
|
||||
---
|
||||
This glossary provides a starting point. For more detailed definitions or terms not listed here, please refer to the official Wazuh and Docker documentation.
|
@@ -1,10 +0,0 @@
|
||||
# Upgrading Wazuh in Docker
|
||||
|
||||
To upgrade your Wazuh deployment when using Docker, we recommend following the official Wazuh documentation. It contains the most accurate and up-to-date information for upgrading from previous versions to the current one.
|
||||
|
||||
> 📘 Please refer to the official guide:
|
||||
> [Upgrading Wazuh Docker](https://documentation.wazuh.com/current/deployment-options/docker/upgrading-wazuh-docker.html)
|
||||
|
||||
This external guide provides detailed upgrade instructions that cover multiple scenarios and configurations.
|
||||
|
||||
Following the official documentation ensures a smoother and safer upgrade process, with fewer risks of data loss or configuration issues.
|
@@ -1,3 +0,0 @@
|
||||
#! /bin/sh
|
||||
|
||||
mdbook serve
|
BIN
images/image-1.png
Normal file
BIN
images/image-1.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 81 KiB |
BIN
images/image-2.png
Normal file
BIN
images/image-2.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 86 KiB |
@@ -1,12 +0,0 @@
|
||||
# Wazuh Docker Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
FROM ubuntu:focal
|
||||
|
||||
RUN apt-get update && apt-get install openssl curl -y
|
||||
|
||||
WORKDIR /
|
||||
|
||||
COPY config/entrypoint.sh /
|
||||
|
||||
RUN chmod 700 /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
@@ -1,9 +0,0 @@
|
||||
# Certificate creation image build
|
||||
|
||||
The dockerfile hosted in this directory is used to build the image used to boot Wazuh's single node and multi node stacks.
|
||||
|
||||
To create the image, the following command must be executed:
|
||||
|
||||
```
|
||||
$ docker build -t wazuh/wazuh-certs-generator:0.0.2 .
|
||||
```
|
@@ -1,62 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Wazuh Docker Copyright (C) 2017, Wazuh Inc. (License GPLv2)
|
||||
|
||||
##############################################################################
|
||||
# Downloading Cert Gen Tool
|
||||
##############################################################################
|
||||
|
||||
## Variables
|
||||
CERT_TOOL=wazuh-certs-tool.sh
|
||||
PASSWORD_TOOL=wazuh-passwords-tool.sh
|
||||
PACKAGES_URL=https://packages.wazuh.com/4.14/
|
||||
PACKAGES_DEV_URL=https://packages-dev.wazuh.com/4.14/
|
||||
|
||||
## Check if the cert tool exists in S3 buckets
|
||||
CERT_TOOL_PACKAGES=$(curl --silent --head --location --output /dev/null --write-out "%{http_code}" "$PACKAGES_URL$CERT_TOOL")
|
||||
CERT_TOOL_PACKAGES_DEV=$(curl --silent --head --location --output /dev/null --write-out "%{http_code}" "$PACKAGES_DEV_URL$CERT_TOOL")
|
||||
|
||||
## If cert tool exists in some bucket, download it, if not exit 1
|
||||
if [ "$CERT_TOOL_PACKAGES" = "200" ]; then
|
||||
curl -o $CERT_TOOL $PACKAGES_URL$CERT_TOOL -s
|
||||
echo "The tool to create the certificates exists in the in Packages bucket"
|
||||
elif [ "$CERT_TOOL_PACKAGES_DEV" = "200" ]; then
|
||||
curl -o $CERT_TOOL $PACKAGES_DEV_URL$CERT_TOOL -s
|
||||
echo "The tool to create the certificates exists in Packages-dev bucket"
|
||||
else
|
||||
echo "The tool to create the certificates does not exist in any bucket"
|
||||
echo "ERROR: certificates were not created"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cp /config/certs.yml /config.yml
|
||||
|
||||
chmod 700 /$CERT_TOOL
|
||||
|
||||
##############################################################################
|
||||
# Creating Cluster certificates
|
||||
##############################################################################
|
||||
|
||||
## Execute cert tool and parsin cert.yml to set UID permissions
|
||||
source /$CERT_TOOL -A
|
||||
nodes_server=$( cert_parseYaml /config.yml | grep -E "nodes[_]+server[_]+[0-9]+=" | sed -e 's/nodes__server__[0-9]=//' | sed 's/"//g' )
|
||||
node_names=($nodes_server)
|
||||
|
||||
echo "Moving created certificates to the destination directory"
|
||||
cp /wazuh-certificates/* /certificates/
|
||||
echo "Changing certificate permissions"
|
||||
chmod -R 500 /certificates
|
||||
chmod -R 400 /certificates/*
|
||||
echo "Setting UID indexer and dashboard"
|
||||
chown 1000:1000 /certificates/*
|
||||
echo "Setting UID for wazuh manager and worker"
|
||||
cp /certificates/root-ca.pem /certificates/root-ca-manager.pem
|
||||
cp /certificates/root-ca.key /certificates/root-ca-manager.key
|
||||
chown 999:999 /certificates/root-ca-manager.pem
|
||||
chown 999:999 /certificates/root-ca-manager.key
|
||||
|
||||
for i in ${node_names[@]};
|
||||
do
|
||||
chown 999:999 "/certificates/${i}.pem"
|
||||
chown 999:999 "/certificates/${i}-key.pem"
|
||||
done
|
||||
|
7
kibana/Dockerfile
Normal file
7
kibana/Dockerfile
Normal file
@@ -0,0 +1,7 @@
|
||||
FROM kibana:5.5.2
|
||||
|
||||
RUN apt-get update && apt-get install -y curl
|
||||
|
||||
COPY ./config/kibana.yml /opt/kibana/config/kibana.yml
|
||||
|
||||
COPY config/wait-for-it.sh /
|
92
kibana/config/kibana.yml
Normal file
92
kibana/config/kibana.yml
Normal file
@@ -0,0 +1,92 @@
|
||||
# Kibana is served by a back end server. This setting specifies the port to use.
|
||||
server.port: 5601
|
||||
|
||||
# This setting specifies the IP address of the back end server.
|
||||
server.host: "0.0.0.0"
|
||||
|
||||
# Enables you to specify a path to mount Kibana at if you are running behind a proxy. This setting
|
||||
# cannot end in a slash.
|
||||
# server.basePath: ""
|
||||
|
||||
# The maximum payload size in bytes for incoming server requests.
|
||||
# server.maxPayloadBytes: 1048576
|
||||
|
||||
# The Kibana server's name. This is used for display purposes.
|
||||
# server.name: "your-hostname"
|
||||
|
||||
# The URL of the Elasticsearch instance to use for all your queries.
|
||||
elasticsearch.url: "http://elasticsearch:9200"
|
||||
|
||||
# When this setting’s value is true Kibana uses the hostname specified in the server.host
|
||||
# setting. When the value of this setting is false, Kibana uses the hostname of the host
|
||||
# that connects to this Kibana instance.
|
||||
# elasticsearch.preserveHost: true
|
||||
|
||||
# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
|
||||
# dashboards. Kibana creates a new index if the index doesn’t already exist.
|
||||
# kibana.index: ".kibana"
|
||||
|
||||
# The default application to load.
|
||||
# kibana.defaultAppId: "discover"
|
||||
|
||||
# If your Elasticsearch is protected with basic authentication, these settings provide
|
||||
# the username and password that the Kibana server uses to perform maintenance on the Kibana
|
||||
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
|
||||
# is proxied through the Kibana server.
|
||||
# elasticsearch.username: "user"
|
||||
# elasticsearch.password: "pass"
|
||||
|
||||
# Paths to the PEM-format SSL certificate and SSL key files, respectively. These
|
||||
# files enable SSL for outgoing requests from the Kibana server to the browser.
|
||||
# server.ssl.cert: /path/to/your/server.crt
|
||||
# server.ssl.key: /path/to/your/server.key
|
||||
|
||||
# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
|
||||
# These files validate that your Elasticsearch backend uses the same key files.
|
||||
# elasticsearch.ssl.cert: /path/to/your/client.crt
|
||||
# elasticsearch.ssl.key: /path/to/your/client.key
|
||||
|
||||
# Optional setting that enables you to specify a path to the PEM file for the certificate
|
||||
# authority for your Elasticsearch instance.
|
||||
# elasticsearch.ssl.ca: /path/to/your/CA.pem
|
||||
|
||||
# To disregard the validity of SSL certificates, change this setting’s value to false.
|
||||
# elasticsearch.ssl.verify: true
|
||||
|
||||
# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
|
||||
# the elasticsearch.requestTimeout setting.
|
||||
# elasticsearch.pingTimeout: 1500
|
||||
|
||||
# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
|
||||
# must be a positive integer.
|
||||
# elasticsearch.requestTimeout: 30000
|
||||
|
||||
# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
|
||||
# headers, set this value to [] (an empty list).
|
||||
# elasticsearch.requestHeadersWhitelist: [ authorization ]
|
||||
|
||||
# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
|
||||
# elasticsearch.shardTimeout: 0
|
||||
|
||||
# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying.
|
||||
# elasticsearch.startupTimeout: 5000
|
||||
|
||||
# Specifies the path where Kibana creates the process ID file.
|
||||
# pid.file: /var/run/kibana.pid
|
||||
|
||||
# Enables you specify a file where Kibana stores log output.
|
||||
# logging.dest: stdout
|
||||
|
||||
# Set the value of this setting to true to suppress all logging output.
|
||||
# logging.silent: false
|
||||
|
||||
# Set the value of this setting to true to suppress all logging output other than error messages.
|
||||
logging.quiet: true
|
||||
|
||||
# Set the value of this setting to true to log all events, including system usage information
|
||||
# and all requests.
|
||||
# logging.verbose: false
|
||||
|
||||
# Set the interval in milliseconds to sample system and process performance
|
||||
# metrics. Minimum is 100ms. Defaults to 10000.
|
||||
# ops.interval: 10000
|
58
kibana/config/wait-for-it.sh
Normal file
58
kibana/config/wait-for-it.sh
Normal file
@@ -0,0 +1,58 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
host="$1"
|
||||
shift
|
||||
cmd="kibana"
|
||||
WAZUH_KIBANA_PLUGIN_URL=${WAZUH_KIBANA_PLUGIN_URL:-https://packages.wazuh.com/wazuhapp/wazuhapp-2.1.0_5.5.2.zip}
|
||||
|
||||
until curl -XGET $host:9200; do
|
||||
>&2 echo "Elastic is unavailable - sleeping"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
sleep 30
|
||||
|
||||
>&2 echo "Elastic is up - executing command"
|
||||
|
||||
if /usr/share/kibana/bin/kibana-plugin list | grep wazuh; then
|
||||
echo "Wazuh APP already installed"
|
||||
else
|
||||
/usr/share/kibana/bin/kibana-plugin install ${WAZUH_KIBANA_PLUGIN_URL}
|
||||
fi
|
||||
|
||||
sleep 30
|
||||
|
||||
echo "Configuring defaultIndex to wazuh-alerts-*"
|
||||
|
||||
curl -s -XPUT http://$host:9200/.kibana/config/5.5.2 -d '{"defaultIndex" : "wazuh-alerts-*"}' > /dev/null
|
||||
|
||||
sleep 30
|
||||
|
||||
echo "Setting API credentials into Wazuh APP"
|
||||
|
||||
CONFIG_CODE=$(curl -s -o /dev/null -w "%{http_code}" -XGET http://$host:9200/.wazuh/wazuh-configuration/apiconfig)
|
||||
if [ "x$CONFIG_CODE" = "x404" ]; then
|
||||
curl -s -XPOST http://$host:9200/.wazuh/wazuh-configuration/apiconfig -H 'Content-Type: application/json' -d'
|
||||
{
|
||||
"api_user": "foo",
|
||||
"api_password": "YmFy",
|
||||
"url": "http://wazuh",
|
||||
"api_port": "55000",
|
||||
"insecure": "true",
|
||||
"component": "API",
|
||||
"active": "true",
|
||||
"manager": "wazuh-manager",
|
||||
"extensions": {
|
||||
"oscap": true,
|
||||
"audit": true,
|
||||
"pci": true
|
||||
}
|
||||
}
|
||||
' > /dev/null
|
||||
else
|
||||
echo "Wazuh APP already configured"
|
||||
fi
|
||||
|
||||
exec $cmd
|
12
logstash/Dockerfile
Normal file
12
logstash/Dockerfile
Normal file
@@ -0,0 +1,12 @@
|
||||
FROM logstash:5.5.2
|
||||
|
||||
RUN apt-get update
|
||||
|
||||
COPY config/logstash.conf /etc/logstash/conf.d/logstash.conf
|
||||
COPY config/wazuh-elastic5-template.json /etc/logstash/wazuh-elastic5-template.json
|
||||
|
||||
|
||||
ADD config/run.sh /tmp/run.sh
|
||||
RUN chmod 755 /tmp/run.sh
|
||||
|
||||
ENTRYPOINT ["/tmp/run.sh"]
|
43
logstash/config/logstash.conf
Normal file
43
logstash/config/logstash.conf
Normal file
@@ -0,0 +1,43 @@
|
||||
# Wazuh - Logstash configuration file
|
||||
## Remote Wazuh Manager - Filebeat input
|
||||
input {
|
||||
beats {
|
||||
port => 5000
|
||||
codec => "json_lines"
|
||||
# ssl => true
|
||||
# ssl_certificate => "/etc/logstash/logstash.crt"
|
||||
# ssl_key => "/etc/logstash/logstash.key"
|
||||
}
|
||||
}
|
||||
## Local Wazuh Manager - JSON file input
|
||||
#input {
|
||||
# file {
|
||||
# type => "wazuh-alerts"
|
||||
# path => "/var/ossec/logs/alerts/alerts.json"
|
||||
# codec => "json"
|
||||
# }
|
||||
#}
|
||||
filter {
|
||||
geoip {
|
||||
source => "srcip"
|
||||
target => "GeoLocation"
|
||||
fields => ["city_name", "continent_code", "country_code2", "country_name", "region_name", "location"]
|
||||
}
|
||||
date {
|
||||
match => ["timestamp", "ISO8601"]
|
||||
target => "@timestamp"
|
||||
}
|
||||
mutate {
|
||||
remove_field => [ "timestamp", "beat", "fields", "input_type", "tags", "count", "@version", "log", "offset", "type"]
|
||||
}
|
||||
}
|
||||
output {
|
||||
elasticsearch {
|
||||
hosts => ["elasticsearch:9200"]
|
||||
index => "wazuh-alerts-%{+YYYY.MM.dd}"
|
||||
document_type => "wazuh"
|
||||
template => "/etc/logstash/wazuh-elastic5-template.json"
|
||||
template_name => "wazuh"
|
||||
template_overwrite => true
|
||||
}
|
||||
}
|
31
logstash/config/run.sh
Normal file
31
logstash/config/run.sh
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# OSSEC container bootstrap. See the README for information of the environment
|
||||
# variables expected by this script.
|
||||
#
|
||||
|
||||
#
|
||||
|
||||
#
|
||||
# Apply Templates
|
||||
#
|
||||
|
||||
set -e
|
||||
host="elasticsearch"
|
||||
until curl -XGET $host:9200; do
|
||||
>&2 echo "Elastic is unavailable - sleeping"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Add logstash as command if needed
|
||||
if [ "${1:0:1}" = '-' ]; then
|
||||
set -- logstash "$@"
|
||||
fi
|
||||
|
||||
# Run as user "logstash" if the command is "logstash"
|
||||
if [ "$1" = 'logstash' ]; then
|
||||
set -- gosu logstash "$@"
|
||||
fi
|
||||
|
||||
exec "$@"
|
620
logstash/config/wazuh-elastic5-template.json
Normal file
620
logstash/config/wazuh-elastic5-template.json
Normal file
@@ -0,0 +1,620 @@
|
||||
{
|
||||
"order": 0,
|
||||
"template": "wazuh*",
|
||||
"settings": {
|
||||
"index.refresh_interval": "5s"
|
||||
},
|
||||
"mappings": {
|
||||
"wazuh": {
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"string_as_keyword": {
|
||||
"match_mapping_type": "string",
|
||||
"mapping": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"@timestamp": {
|
||||
"type": "date",
|
||||
"format": "dateOptionalTime"
|
||||
},
|
||||
"@version": {
|
||||
"type": "text"
|
||||
},
|
||||
"agent": {
|
||||
"properties": {
|
||||
"ip": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"id": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"name": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
"manager": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
"dstuser": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"AlertsFile": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"full_log": {
|
||||
"type": "text"
|
||||
},
|
||||
"previous_log": {
|
||||
"type": "text"
|
||||
},
|
||||
"GeoLocation": {
|
||||
"properties": {
|
||||
"area_code": {
|
||||
"type": "long"
|
||||
},
|
||||
"city_name": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"continent_code": {
|
||||
"type": "text"
|
||||
},
|
||||
"coordinates": {
|
||||
"type": "double"
|
||||
},
|
||||
"country_code2": {
|
||||
"type": "text"
|
||||
},
|
||||
"country_code3": {
|
||||
"type": "text"
|
||||
},
|
||||
"country_name": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"dma_code": {
|
||||
"type": "long"
|
||||
},
|
||||
"ip": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"latitude": {
|
||||
"type": "double"
|
||||
},
|
||||
"location": {
|
||||
"type": "geo_point"
|
||||
},
|
||||
"longitude": {
|
||||
"type": "double"
|
||||
},
|
||||
"postal_code": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"real_region_name": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"region_name": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"timezone": {
|
||||
"type": "text"
|
||||
}
|
||||
}
|
||||
},
|
||||
"host": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"syscheck": {
|
||||
"properties": {
|
||||
"path": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"sha1_before": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"sha1_after": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"uid_before": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"uid_after": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"gid_before": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"gid_after": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"perm_before": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"perm_after": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"md5_after": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"md5_before": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"gname_after": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"gname_before": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"inode_after": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"inode_before": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"mtime_after": {
|
||||
"type": "date",
|
||||
"format": "dateOptionalTime",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"mtime_before": {
|
||||
"type": "date",
|
||||
"format": "dateOptionalTime",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"uname_after": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"uname_before": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"size_before": {
|
||||
"type": "long",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"size_after": {
|
||||
"type": "long",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"diff": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"event": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
"location": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"message": {
|
||||
"type": "text"
|
||||
},
|
||||
"offset": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"rule": {
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"groups": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"level": {
|
||||
"type": "long",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"id": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"cve": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"info": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"frequency": {
|
||||
"type": "long",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"firedtimes": {
|
||||
"type": "long",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"cis": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"pci_dss": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
"decoder": {
|
||||
"properties": {
|
||||
"parent": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"name": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"ftscomment": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"fts": {
|
||||
"type": "long",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"accumulate": {
|
||||
"type": "long",
|
||||
"doc_values": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
"srcip": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"protocol": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"action": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"dstip": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"dstport": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"srcuser": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"program_name": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"id": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"status": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"command": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"url": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"data": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"system_name": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"type": {
|
||||
"type": "text"
|
||||
},
|
||||
"title": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"oscap": {
|
||||
"properties": {
|
||||
"check.title": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"check.id": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"check.result": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"check.severity": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"check.description": {
|
||||
"type": "text"
|
||||
},
|
||||
"check.rationale": {
|
||||
"type": "text"
|
||||
},
|
||||
"check.references": {
|
||||
"type": "text"
|
||||
},
|
||||
"check.identifiers": {
|
||||
"type": "text"
|
||||
},
|
||||
"check.oval.id": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"scan.id": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"scan.content": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"scan.benchmark.id": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"scan.profile.title": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"scan.profile.id": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"scan.score": {
|
||||
"type": "double",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"scan.return_code": {
|
||||
"type": "long",
|
||||
"doc_values": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
"audit": {
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"id": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"syscall": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"exit": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"ppid": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"pid": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"auid": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"uid": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"gid": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"euid": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"suid": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"fsuid": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"egid": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"sgid": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"fsgid": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"tty": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"session": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"command": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"exe": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"key": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"cwd": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"directory.name": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"directory.inode": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"directory.mode": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"file.name": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"file.inode": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"file.mode": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"acct": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"dev": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"enforcing": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"list": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"old-auid": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"old-ses": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"old_enforcing": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"old_prom": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"op": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"prom": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"res": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"srcip": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"subj": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
},
|
||||
"success": {
|
||||
"type": "keyword",
|
||||
"doc_values": "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"agent": {
|
||||
"properties": {
|
||||
"@timestamp": {
|
||||
"type": "date",
|
||||
"format": "dateOptionalTime"
|
||||
},
|
||||
"status": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"ip": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"host": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"name": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"id": {
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,361 +0,0 @@
|
||||
# Opendistro data migration to Wazuh indexer on docker.
|
||||
This procedure explains how to migrate Opendistro data from Opendistro to Wazuh indexer in docker production deployments.
|
||||
The example is migrating from v4.2 to v4.4.
|
||||
|
||||
## Procedure
|
||||
Assuming that you have a v4.2 production deployment, perform the following steps.
|
||||
|
||||
**1. Stop 4.2 environment**
|
||||
`docker-compose -f production-cluster.yml stop`
|
||||
|
||||
**2. List elasticsearch volumes**
|
||||
`docker volume ls --filter name='wazuh-docker_elastic-data'`
|
||||
|
||||
**3. Inspect elasticsearch volume**
|
||||
`docker volume inspect wazuh-docker_elastic-data-1`
|
||||
|
||||
**4. Spin down the 4.2 environment.**
|
||||
`docker-compose -f production-cluster.yml down`
|
||||
|
||||
**Steps 5 and 6 can be done with the volume-migrator.sh script, specifying Docker compose version and project name as parameters.**
|
||||
|
||||
Ex: $ multi-node/volume-migrator.sh 1.25.0 multi-node
|
||||
|
||||
**5. Run the volume create command:** create new indexer and Wazuh manager volumes using the `com.docker.compose.version` label value from the previous command.
|
||||
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=wazuh-indexer-data-1 \
|
||||
multi-node_wazuh-indexer-data-1
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=wazuh-indexer-data-2 \
|
||||
multi-node_wazuh-indexer-data-2
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=wazuh-indexer-data-3 \
|
||||
multi-node_wazuh-indexer-data-3
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=master_wazuh_api_configuration \
|
||||
multi-node_master_wazuh_api_configuration
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=master_wazuh_etc \
|
||||
multi-node_docker_wazuh_etc
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=master-wazuh-logs \
|
||||
multi-node_master-wazuh-logs
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=master-wazuh-queue \
|
||||
multi-node_master-wazuh-queue
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=master-wazuh-var-multigroups \
|
||||
multi-node_master-wazuh-var-multigroups
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=master-wazuh-integrations \
|
||||
multi-node_master-wazuh-integrations
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=master-wazuh-active-response \
|
||||
multi-node_master-wazuh-active-response
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=master-wazuh-agentless \
|
||||
multi-node_master-wazuh-agentless
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=master-wazuh-wodles \
|
||||
multi-node_master-wazuh-wodles
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=master-filebeat-etc \
|
||||
multi-node_master-filebeat-etc
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=master-filebeat-var \
|
||||
multi-node_master-filebeat-var
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=worker_wazuh_api_configuration \
|
||||
multi-node_worker_wazuh_api_configuration
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=worker_wazuh_etc \
|
||||
multi-node_worker-wazuh-etc
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=worker-wazuh-logs \
|
||||
multi-node_worker-wazuh-logs
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=worker-wazuh-queue \
|
||||
multi-node_worker-wazuh-queue
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=worker-wazuh-var-multigroups \
|
||||
multi-node_worker-wazuh-var-multigroups
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=worker-wazuh-integrations \
|
||||
multi-node_worker-wazuh-integrations
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=worker-wazuh-active-response \
|
||||
multi-node_worker-wazuh-active-response
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=worker-wazuh-agentless \
|
||||
multi-node_worker-wazuh-agentless
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=worker-wazuh-wodles \
|
||||
multi-node_worker-wazuh-wodles
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=worker-filebeat-etc \
|
||||
multi-node_worker-filebeat-etc
|
||||
```
|
||||
```
|
||||
docker volume create \
|
||||
--label com.docker.compose.project=multi-node \
|
||||
--label com.docker.compose.version=1.25.0 \
|
||||
--label com.docker.compose.volume=worker-filebeat-var \
|
||||
multi-node_worker-filebeat-var
|
||||
```
|
||||
**6. Copy the volume content from elasticsearch to Wazuh indexer volumes and old Wazuh manager content to new volumes.**
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_elastic-data-1:/from \
|
||||
-v multi-node_wazuh-indexer-data-1:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_elastic-data-2:/from \
|
||||
-v multi-node_wazuh-indexer-data-2:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_elastic-data-3:/from \
|
||||
-v multi-node_wazuh-indexer-data-3:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_ossec-api-configuration:/from \
|
||||
-v multi-node_master-wazuh-api-configuration:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_ossec-etc:/from \
|
||||
-v multi-node_master-wazuh-etc:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_ossec-logs:/from \
|
||||
-v multi-node_master-wazuh-logs:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_ossec-queue:/from \
|
||||
-v multi-node_master-wazuh-queue:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_ossec-var-multigroups:/from \
|
||||
-v multi-node_master-wazuh-var-multigroups:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_ossec-integrations:/from \
|
||||
-v multi-node_master-wazuh-integrations:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_ossec-active-response:/from \
|
||||
-v multi-node_master-wazuh-active-response:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_ossec-agentless:/from \
|
||||
-v multi-node_master-wazuh-agentless:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_ossec-wodles:/from \
|
||||
-v multi-node_master-wazuh-wodles:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_filebeat-etc:/from \
|
||||
-v multi-node_master-filebeat-etc:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_filebeat-var:/from \
|
||||
-v multi-node_master-filebeat-var:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_worker-ossec-api-configuration:/from \
|
||||
-v multi-node_worker-wazuh-api-configuration:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_worker-ossec-etc:/from \
|
||||
-v multi-node_worker-wazuh-etc:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_worker-ossec-logs:/from \
|
||||
-v multi-node_worker-wazuh-logs:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_worker-ossec-queue:/from \
|
||||
-v multi-node_worker-wazuh-queue:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_worker-ossec-var-multigroups:/from \
|
||||
-v multi-node_worker-wazuh-var-multigroups:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_worker-ossec-integrations:/from \
|
||||
-v multi-node_worker-wazuh-integrations:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_worker-ossec-active-response:/from \
|
||||
-v multi-node_worker-wazuh-active-response:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_worker-ossec-agentless:/from \
|
||||
-v multi-node_worker-wazuh-agentless:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_worker-ossec-wodles:/from \
|
||||
-v multi-node_worker-wazuh-wodles:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_worker-filebeat-etc:/from \
|
||||
-v multi-node_worker-filebeat-etc:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
```
|
||||
docker container run --rm -it \
|
||||
-v wazuh-docker_worker-filebeat-var:/from \
|
||||
-v multi-node_worker-filebeat-var:/to \
|
||||
alpine ash -c "cd /from ; cp -avp . /to"
|
||||
```
|
||||
|
||||
**7. Start the 4.4 environment.**
|
||||
```
|
||||
git checkout 4.4
|
||||
cd multi-node
|
||||
docker-compose -f generate-indexer-certs.yml run --rm generator
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
**8. Check the access to Wazuh dashboard**: go to the Wazuh dashboard using the web browser and check the data.
|
@@ -1,26 +0,0 @@
|
||||
# Deploy Wazuh Docker in multi node configuration
|
||||
|
||||
This deployment is defined in the `docker-compose.yml` file with two Wazuh manager containers, three Wazuh indexer containers, and one Wazuh dashboard container. It can be deployed by following these steps:
|
||||
|
||||
1) Increase max_map_count on your host (Linux). This command must be run with root permissions:
|
||||
```
|
||||
$ sysctl -w vm.max_map_count=262144
|
||||
```
|
||||
2) Run the certificate creation script:
|
||||
```
|
||||
$ docker compose -f generate-indexer-certs.yml run --rm generator
|
||||
```
|
||||
3) Start the environment with docker compose:
|
||||
|
||||
- In the foregroud:
|
||||
```
|
||||
$ docker compose up
|
||||
```
|
||||
|
||||
- In the background:
|
||||
```
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
|
||||
The environment takes about 1 minute to get up (depending on your Docker host) for the first time since Wazuh Indexer must be started for the first time and the indexes and index patterns must be generated.
|
@@ -1,24 +0,0 @@
|
||||
nodes:
|
||||
# Wazuh indexer server nodes
|
||||
indexer:
|
||||
- name: wazuh1.indexer
|
||||
ip: wazuh1.indexer
|
||||
- name: wazuh2.indexer
|
||||
ip: wazuh2.indexer
|
||||
- name: wazuh3.indexer
|
||||
ip: wazuh3.indexer
|
||||
|
||||
# Wazuh server nodes
|
||||
# Use node_type only with more than one Wazuh manager
|
||||
server:
|
||||
- name: wazuh.master
|
||||
ip: wazuh.master
|
||||
node_type: master
|
||||
- name: wazuh.worker
|
||||
ip: wazuh.worker
|
||||
node_type: worker
|
||||
|
||||
# Wazuh dashboard node
|
||||
dashboard:
|
||||
- name: wazuh.dashboard
|
||||
ip: wazuh.dashboard
|
@@ -1,46 +0,0 @@
|
||||
user nginx;
|
||||
worker_processes 1;
|
||||
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
|
||||
keepalive_timeout 65;
|
||||
|
||||
server_tokens off;
|
||||
gzip on;
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
# load balancer for Wazuh cluster
|
||||
stream {
|
||||
upstream mycluster {
|
||||
hash $remote_addr consistent;
|
||||
server wazuh.master:1514;
|
||||
server wazuh.worker:1514;
|
||||
}
|
||||
server {
|
||||
listen 1514;
|
||||
proxy_pass mycluster;
|
||||
}
|
||||
}
|
@@ -1,313 +0,0 @@
|
||||
<ossec_config>
|
||||
<global>
|
||||
<jsonout_output>yes</jsonout_output>
|
||||
<alerts_log>yes</alerts_log>
|
||||
<logall>no</logall>
|
||||
<logall_json>no</logall_json>
|
||||
<email_notification>no</email_notification>
|
||||
<smtp_server>smtp.example.wazuh.com</smtp_server>
|
||||
<email_from>wazuh@example.wazuh.com</email_from>
|
||||
<email_to>recipient@example.wazuh.com</email_to>
|
||||
<email_maxperhour>12</email_maxperhour>
|
||||
<email_log_source>alerts.log</email_log_source>
|
||||
<agents_disconnection_time>10m</agents_disconnection_time>
|
||||
<agents_disconnection_alert_time>0</agents_disconnection_alert_time>
|
||||
</global>
|
||||
|
||||
<alerts>
|
||||
<log_alert_level>3</log_alert_level>
|
||||
<email_alert_level>12</email_alert_level>
|
||||
</alerts>
|
||||
|
||||
<!-- Choose between "plain", "json", or "plain,json" for the format of internal logs -->
|
||||
<logging>
|
||||
<log_format>plain</log_format>
|
||||
</logging>
|
||||
|
||||
<remote>
|
||||
<connection>secure</connection>
|
||||
<port>1514</port>
|
||||
<protocol>tcp</protocol>
|
||||
<queue_size>131072</queue_size>
|
||||
</remote>
|
||||
|
||||
<!-- Policy monitoring -->
|
||||
<rootcheck>
|
||||
<disabled>no</disabled>
|
||||
<check_files>yes</check_files>
|
||||
<check_trojans>yes</check_trojans>
|
||||
<check_dev>yes</check_dev>
|
||||
<check_sys>yes</check_sys>
|
||||
<check_pids>yes</check_pids>
|
||||
<check_ports>yes</check_ports>
|
||||
<check_if>yes</check_if>
|
||||
|
||||
<!-- Frequency that rootcheck is executed - every 12 hours -->
|
||||
<frequency>43200</frequency>
|
||||
|
||||
<rootkit_files>etc/rootcheck/rootkit_files.txt</rootkit_files>
|
||||
<rootkit_trojans>etc/rootcheck/rootkit_trojans.txt</rootkit_trojans>
|
||||
|
||||
<skip_nfs>yes</skip_nfs>
|
||||
</rootcheck>
|
||||
|
||||
<wodle name="cis-cat">
|
||||
<disabled>yes</disabled>
|
||||
<timeout>1800</timeout>
|
||||
<interval>1d</interval>
|
||||
<scan-on-start>yes</scan-on-start>
|
||||
|
||||
<java_path>wodles/java</java_path>
|
||||
<ciscat_path>wodles/ciscat</ciscat_path>
|
||||
</wodle>
|
||||
|
||||
<!-- Osquery integration -->
|
||||
<wodle name="osquery">
|
||||
<disabled>yes</disabled>
|
||||
<run_daemon>yes</run_daemon>
|
||||
<log_path>/var/log/osquery/osqueryd.results.log</log_path>
|
||||
<config_path>/etc/osquery/osquery.conf</config_path>
|
||||
<add_labels>yes</add_labels>
|
||||
</wodle>
|
||||
|
||||
<!-- System inventory -->
|
||||
<wodle name="syscollector">
|
||||
<disabled>no</disabled>
|
||||
<interval>1h</interval>
|
||||
<scan_on_start>yes</scan_on_start>
|
||||
<hardware>yes</hardware>
|
||||
<os>yes</os>
|
||||
<network>yes</network>
|
||||
<packages>yes</packages>
|
||||
<ports all="yes">yes</ports>
|
||||
<processes>yes</processes>
|
||||
|
||||
<!-- Database synchronization settings -->
|
||||
<synchronization>
|
||||
<max_eps>10</max_eps>
|
||||
</synchronization>
|
||||
</wodle>
|
||||
|
||||
<sca>
|
||||
<enabled>yes</enabled>
|
||||
<scan_on_start>yes</scan_on_start>
|
||||
<interval>12h</interval>
|
||||
<skip_nfs>yes</skip_nfs>
|
||||
</sca>
|
||||
|
||||
<vulnerability-detection>
|
||||
<enabled>yes</enabled>
|
||||
<index-status>yes</index-status>
|
||||
<feed-update-interval>60m</feed-update-interval>
|
||||
</vulnerability-detection>
|
||||
|
||||
<indexer>
|
||||
<enabled>yes</enabled>
|
||||
<hosts>
|
||||
<host>https://wazuh1.indexer:9200</host>
|
||||
<host>https://wazuh2.indexer:9200</host>
|
||||
<host>https://wazuh3.indexer:9200</host>
|
||||
</hosts>
|
||||
<ssl>
|
||||
<certificate_authorities>
|
||||
<ca>/etc/ssl/root-ca.pem</ca>
|
||||
</certificate_authorities>
|
||||
<certificate>/etc/ssl/filebeat.pem</certificate>
|
||||
<key>/etc/ssl/filebeat.key</key>
|
||||
</ssl>
|
||||
</indexer>
|
||||
|
||||
<!-- File integrity monitoring -->
|
||||
<syscheck>
|
||||
<disabled>no</disabled>
|
||||
|
||||
<!-- Frequency that syscheck is executed default every 12 hours -->
|
||||
<frequency>43200</frequency>
|
||||
|
||||
<scan_on_start>yes</scan_on_start>
|
||||
|
||||
<!-- Generate alert when new file detected -->
|
||||
<alert_new_files>yes</alert_new_files>
|
||||
|
||||
<!-- Don't ignore files that change more than 'frequency' times -->
|
||||
<auto_ignore frequency="10" timeframe="3600">no</auto_ignore>
|
||||
|
||||
<!-- Directories to check (perform all possible verifications) -->
|
||||
<directories>/etc,/usr/bin,/usr/sbin</directories>
|
||||
<directories>/bin,/sbin,/boot</directories>
|
||||
|
||||
<!-- Files/directories to ignore -->
|
||||
<ignore>/etc/mtab</ignore>
|
||||
<ignore>/etc/hosts.deny</ignore>
|
||||
<ignore>/etc/mail/statistics</ignore>
|
||||
<ignore>/etc/random-seed</ignore>
|
||||
<ignore>/etc/random.seed</ignore>
|
||||
<ignore>/etc/adjtime</ignore>
|
||||
<ignore>/etc/httpd/logs</ignore>
|
||||
<ignore>/etc/utmpx</ignore>
|
||||
<ignore>/etc/wtmpx</ignore>
|
||||
<ignore>/etc/cups/certs</ignore>
|
||||
<ignore>/etc/dumpdates</ignore>
|
||||
<ignore>/etc/svc/volatile</ignore>
|
||||
|
||||
<!-- File types to ignore -->
|
||||
<ignore type="sregex">.log$|.swp$</ignore>
|
||||
|
||||
<!-- Check the file, but never compute the diff -->
|
||||
<nodiff>/etc/ssl/private.key</nodiff>
|
||||
|
||||
<skip_nfs>yes</skip_nfs>
|
||||
<skip_dev>yes</skip_dev>
|
||||
<skip_proc>yes</skip_proc>
|
||||
<skip_sys>yes</skip_sys>
|
||||
|
||||
<!-- Nice value for Syscheck process -->
|
||||
<process_priority>10</process_priority>
|
||||
|
||||
<!-- Maximum output throughput -->
|
||||
<max_eps>100</max_eps>
|
||||
|
||||
<!-- Database synchronization settings -->
|
||||
<synchronization>
|
||||
<enabled>yes</enabled>
|
||||
<interval>5m</interval>
|
||||
<max_interval>1h</max_interval>
|
||||
<max_eps>10</max_eps>
|
||||
</synchronization>
|
||||
</syscheck>
|
||||
|
||||
<!-- Active response -->
|
||||
<global>
|
||||
<white_list>127.0.0.1</white_list>
|
||||
<white_list>^localhost.localdomain$</white_list>
|
||||
</global>
|
||||
|
||||
<command>
|
||||
<name>disable-account</name>
|
||||
<executable>disable-account</executable>
|
||||
<timeout_allowed>yes</timeout_allowed>
|
||||
</command>
|
||||
|
||||
<command>
|
||||
<name>restart-wazuh</name>
|
||||
<executable>restart-wazuh</executable>
|
||||
</command>
|
||||
|
||||
<command>
|
||||
<name>firewall-drop</name>
|
||||
<executable>firewall-drop</executable>
|
||||
<timeout_allowed>yes</timeout_allowed>
|
||||
</command>
|
||||
|
||||
<command>
|
||||
<name>host-deny</name>
|
||||
<executable>host-deny</executable>
|
||||
<timeout_allowed>yes</timeout_allowed>
|
||||
</command>
|
||||
|
||||
<command>
|
||||
<name>route-null</name>
|
||||
<executable>route-null</executable>
|
||||
<timeout_allowed>yes</timeout_allowed>
|
||||
</command>
|
||||
|
||||
<command>
|
||||
<name>win_route-null</name>
|
||||
<executable>route-null.exe</executable>
|
||||
<timeout_allowed>yes</timeout_allowed>
|
||||
</command>
|
||||
|
||||
<command>
|
||||
<name>netsh</name>
|
||||
<executable>netsh.exe</executable>
|
||||
<timeout_allowed>yes</timeout_allowed>
|
||||
</command>
|
||||
|
||||
<!--
|
||||
<active-response>
|
||||
active-response options here
|
||||
</active-response>
|
||||
-->
|
||||
|
||||
<!-- Log analysis -->
|
||||
<localfile>
|
||||
<log_format>command</log_format>
|
||||
<command>df -P</command>
|
||||
<frequency>360</frequency>
|
||||
</localfile>
|
||||
|
||||
<localfile>
|
||||
<log_format>full_command</log_format>
|
||||
<command>netstat -tulpn | sed 's/\([[:alnum:]]\+\)\ \+[[:digit:]]\+\ \+[[:digit:]]\+\ \+\(.*\):\([[:digit:]]*\)\ \+\([0-9\.\:\*]\+\).\+\ \([[:digit:]]*\/[[:alnum:]\-]*\).*/\1 \2 == \3 == \4 \5/' | sort -k 4 -g | sed 's/ == \(.*\) ==/:\1/' | sed 1,2d</command>
|
||||
<alias>netstat listening ports</alias>
|
||||
<frequency>360</frequency>
|
||||
</localfile>
|
||||
|
||||
<localfile>
|
||||
<log_format>full_command</log_format>
|
||||
<command>last -n 20</command>
|
||||
<frequency>360</frequency>
|
||||
</localfile>
|
||||
|
||||
<ruleset>
|
||||
<!-- Default ruleset -->
|
||||
<decoder_dir>ruleset/decoders</decoder_dir>
|
||||
<rule_dir>ruleset/rules</rule_dir>
|
||||
<rule_exclude>0215-policy_rules.xml</rule_exclude>
|
||||
<list>etc/lists/audit-keys</list>
|
||||
<list>etc/lists/amazon/aws-eventnames</list>
|
||||
<list>etc/lists/security-eventchannel</list>
|
||||
<list>etc/lists/malicious-ioc/malicious-ip</list>
|
||||
<list>etc/lists/malicious-ioc/malicious-domains</list>
|
||||
<list>etc/lists/malicious-ioc/malware-hashes</list>
|
||||
|
||||
<!-- User-defined ruleset -->
|
||||
<decoder_dir>etc/decoders</decoder_dir>
|
||||
<rule_dir>etc/rules</rule_dir>
|
||||
</ruleset>
|
||||
|
||||
<rule_test>
|
||||
<enabled>yes</enabled>
|
||||
<threads>1</threads>
|
||||
<max_sessions>64</max_sessions>
|
||||
<session_timeout>15m</session_timeout>
|
||||
</rule_test>
|
||||
|
||||
<!-- Configuration for wazuh-authd -->
|
||||
<auth>
|
||||
<disabled>no</disabled>
|
||||
<port>1515</port>
|
||||
<use_source_ip>no</use_source_ip>
|
||||
<purge>yes</purge>
|
||||
<use_password>no</use_password>
|
||||
<ciphers>HIGH:!ADH:!EXP:!MD5:!RC4:!3DES:!CAMELLIA:@STRENGTH</ciphers>
|
||||
<!-- <ssl_agent_ca></ssl_agent_ca> -->
|
||||
<ssl_verify_host>no</ssl_verify_host>
|
||||
<ssl_manager_cert>etc/sslmanager.cert</ssl_manager_cert>
|
||||
<ssl_manager_key>etc/sslmanager.key</ssl_manager_key>
|
||||
<ssl_auto_negotiate>no</ssl_auto_negotiate>
|
||||
</auth>
|
||||
|
||||
<cluster>
|
||||
<name>wazuh</name>
|
||||
<node_name>manager</node_name>
|
||||
<node_type>master</node_type>
|
||||
<key>c98b6ha9b6169zc5f67rae55ae4z5647</key>
|
||||
<port>1516</port>
|
||||
<bind_addr>0.0.0.0</bind_addr>
|
||||
<nodes>
|
||||
<node>wazuh.master</node>
|
||||
</nodes>
|
||||
<hidden>no</hidden>
|
||||
<disabled>no</disabled>
|
||||
</cluster>
|
||||
|
||||
</ossec_config>
|
||||
|
||||
<ossec_config>
|
||||
<localfile>
|
||||
<log_format>syslog</log_format>
|
||||
<location>/var/ossec/logs/active-responses.log</location>
|
||||
</localfile>
|
||||
|
||||
</ossec_config>
|
@@ -1,313 +0,0 @@
|
||||
<ossec_config>
|
||||
<global>
|
||||
<jsonout_output>yes</jsonout_output>
|
||||
<alerts_log>yes</alerts_log>
|
||||
<logall>no</logall>
|
||||
<logall_json>no</logall_json>
|
||||
<email_notification>no</email_notification>
|
||||
<smtp_server>smtp.example.wazuh.com</smtp_server>
|
||||
<email_from>wazuh@example.wazuh.com</email_from>
|
||||
<email_to>recipient@example.wazuh.com</email_to>
|
||||
<email_maxperhour>12</email_maxperhour>
|
||||
<email_log_source>alerts.log</email_log_source>
|
||||
<agents_disconnection_time>10m</agents_disconnection_time>
|
||||
<agents_disconnection_alert_time>0</agents_disconnection_alert_time>
|
||||
</global>
|
||||
|
||||
<alerts>
|
||||
<log_alert_level>3</log_alert_level>
|
||||
<email_alert_level>12</email_alert_level>
|
||||
</alerts>
|
||||
|
||||
<!-- Choose between "plain", "json", or "plain,json" for the format of internal logs -->
|
||||
<logging>
|
||||
<log_format>plain</log_format>
|
||||
</logging>
|
||||
|
||||
<remote>
|
||||
<connection>secure</connection>
|
||||
<port>1514</port>
|
||||
<protocol>tcp</protocol>
|
||||
<queue_size>131072</queue_size>
|
||||
</remote>
|
||||
|
||||
<!-- Policy monitoring -->
|
||||
<rootcheck>
|
||||
<disabled>no</disabled>
|
||||
<check_files>yes</check_files>
|
||||
<check_trojans>yes</check_trojans>
|
||||
<check_dev>yes</check_dev>
|
||||
<check_sys>yes</check_sys>
|
||||
<check_pids>yes</check_pids>
|
||||
<check_ports>yes</check_ports>
|
||||
<check_if>yes</check_if>
|
||||
|
||||
<!-- Frequency that rootcheck is executed - every 12 hours -->
|
||||
<frequency>43200</frequency>
|
||||
|
||||
<rootkit_files>etc/rootcheck/rootkit_files.txt</rootkit_files>
|
||||
<rootkit_trojans>etc/rootcheck/rootkit_trojans.txt</rootkit_trojans>
|
||||
|
||||
<skip_nfs>yes</skip_nfs>
|
||||
</rootcheck>
|
||||
|
||||
<wodle name="cis-cat">
|
||||
<disabled>yes</disabled>
|
||||
<timeout>1800</timeout>
|
||||
<interval>1d</interval>
|
||||
<scan-on-start>yes</scan-on-start>
|
||||
|
||||
<java_path>wodles/java</java_path>
|
||||
<ciscat_path>wodles/ciscat</ciscat_path>
|
||||
</wodle>
|
||||
|
||||
<!-- Osquery integration -->
|
||||
<wodle name="osquery">
|
||||
<disabled>yes</disabled>
|
||||
<run_daemon>yes</run_daemon>
|
||||
<log_path>/var/log/osquery/osqueryd.results.log</log_path>
|
||||
<config_path>/etc/osquery/osquery.conf</config_path>
|
||||
<add_labels>yes</add_labels>
|
||||
</wodle>
|
||||
|
||||
<!-- System inventory -->
|
||||
<wodle name="syscollector">
|
||||
<disabled>no</disabled>
|
||||
<interval>1h</interval>
|
||||
<scan_on_start>yes</scan_on_start>
|
||||
<hardware>yes</hardware>
|
||||
<os>yes</os>
|
||||
<network>yes</network>
|
||||
<packages>yes</packages>
|
||||
<ports all="yes">yes</ports>
|
||||
<processes>yes</processes>
|
||||
|
||||
<!-- Database synchronization settings -->
|
||||
<synchronization>
|
||||
<max_eps>10</max_eps>
|
||||
</synchronization>
|
||||
</wodle>
|
||||
|
||||
<sca>
|
||||
<enabled>yes</enabled>
|
||||
<scan_on_start>yes</scan_on_start>
|
||||
<interval>12h</interval>
|
||||
<skip_nfs>yes</skip_nfs>
|
||||
</sca>
|
||||
|
||||
<vulnerability-detection>
|
||||
<enabled>yes</enabled>
|
||||
<index-status>yes</index-status>
|
||||
<feed-update-interval>60m</feed-update-interval>
|
||||
</vulnerability-detection>
|
||||
|
||||
<indexer>
|
||||
<enabled>yes</enabled>
|
||||
<hosts>
|
||||
<host>https://wazuh1.indexer:9200</host>
|
||||
<host>https://wazuh2.indexer:9200</host>
|
||||
<host>https://wazuh3.indexer:9200</host>
|
||||
</hosts>
|
||||
<ssl>
|
||||
<certificate_authorities>
|
||||
<ca>/etc/ssl/root-ca.pem</ca>
|
||||
</certificate_authorities>
|
||||
<certificate>/etc/ssl/filebeat.pem</certificate>
|
||||
<key>/etc/ssl/filebeat.key</key>
|
||||
</ssl>
|
||||
</indexer>
|
||||
|
||||
<!-- File integrity monitoring -->
|
||||
<syscheck>
|
||||
<disabled>no</disabled>
|
||||
|
||||
<!-- Frequency that syscheck is executed default every 12 hours -->
|
||||
<frequency>43200</frequency>
|
||||
|
||||
<scan_on_start>yes</scan_on_start>
|
||||
|
||||
<!-- Generate alert when new file detected -->
|
||||
<alert_new_files>yes</alert_new_files>
|
||||
|
||||
<!-- Don't ignore files that change more than 'frequency' times -->
|
||||
<auto_ignore frequency="10" timeframe="3600">no</auto_ignore>
|
||||
|
||||
<!-- Directories to check (perform all possible verifications) -->
|
||||
<directories>/etc,/usr/bin,/usr/sbin</directories>
|
||||
<directories>/bin,/sbin,/boot</directories>
|
||||
|
||||
<!-- Files/directories to ignore -->
|
||||
<ignore>/etc/mtab</ignore>
|
||||
<ignore>/etc/hosts.deny</ignore>
|
||||
<ignore>/etc/mail/statistics</ignore>
|
||||
<ignore>/etc/random-seed</ignore>
|
||||
<ignore>/etc/random.seed</ignore>
|
||||
<ignore>/etc/adjtime</ignore>
|
||||
<ignore>/etc/httpd/logs</ignore>
|
||||
<ignore>/etc/utmpx</ignore>
|
||||
<ignore>/etc/wtmpx</ignore>
|
||||
<ignore>/etc/cups/certs</ignore>
|
||||
<ignore>/etc/dumpdates</ignore>
|
||||
<ignore>/etc/svc/volatile</ignore>
|
||||
|
||||
<!-- File types to ignore -->
|
||||
<ignore type="sregex">.log$|.swp$</ignore>
|
||||
|
||||
<!-- Check the file, but never compute the diff -->
|
||||
<nodiff>/etc/ssl/private.key</nodiff>
|
||||
|
||||
<skip_nfs>yes</skip_nfs>
|
||||
<skip_dev>yes</skip_dev>
|
||||
<skip_proc>yes</skip_proc>
|
||||
<skip_sys>yes</skip_sys>
|
||||
|
||||
<!-- Nice value for Syscheck process -->
|
||||
<process_priority>10</process_priority>
|
||||
|
||||
<!-- Maximum output throughput -->
|
||||
<max_eps>100</max_eps>
|
||||
|
||||
<!-- Database synchronization settings -->
|
||||
<synchronization>
|
||||
<enabled>yes</enabled>
|
||||
<interval>5m</interval>
|
||||
<max_interval>1h</max_interval>
|
||||
<max_eps>10</max_eps>
|
||||
</synchronization>
|
||||
</syscheck>
|
||||
|
||||
<!-- Active response -->
|
||||
<global>
|
||||
<white_list>127.0.0.1</white_list>
|
||||
<white_list>^localhost.localdomain$</white_list>
|
||||
</global>
|
||||
|
||||
<command>
|
||||
<name>disable-account</name>
|
||||
<executable>disable-account</executable>
|
||||
<timeout_allowed>yes</timeout_allowed>
|
||||
</command>
|
||||
|
||||
<command>
|
||||
<name>restart-wazuh</name>
|
||||
<executable>restart-wazuh</executable>
|
||||
</command>
|
||||
|
||||
<command>
|
||||
<name>firewall-drop</name>
|
||||
<executable>firewall-drop</executable>
|
||||
<timeout_allowed>yes</timeout_allowed>
|
||||
</command>
|
||||
|
||||
<command>
|
||||
<name>host-deny</name>
|
||||
<executable>host-deny</executable>
|
||||
<timeout_allowed>yes</timeout_allowed>
|
||||
</command>
|
||||
|
||||
<command>
|
||||
<name>route-null</name>
|
||||
<executable>route-null</executable>
|
||||
<timeout_allowed>yes</timeout_allowed>
|
||||
</command>
|
||||
|
||||
<command>
|
||||
<name>win_route-null</name>
|
||||
<executable>route-null.exe</executable>
|
||||
<timeout_allowed>yes</timeout_allowed>
|
||||
</command>
|
||||
|
||||
<command>
|
||||
<name>netsh</name>
|
||||
<executable>netsh.exe</executable>
|
||||
<timeout_allowed>yes</timeout_allowed>
|
||||
</command>
|
||||
|
||||
<!--
|
||||
<active-response>
|
||||
active-response options here
|
||||
</active-response>
|
||||
-->
|
||||
|
||||
<!-- Log analysis -->
|
||||
<localfile>
|
||||
<log_format>command</log_format>
|
||||
<command>df -P</command>
|
||||
<frequency>360</frequency>
|
||||
</localfile>
|
||||
|
||||
<localfile>
|
||||
<log_format>full_command</log_format>
|
||||
<command>netstat -tulpn | sed 's/\([[:alnum:]]\+\)\ \+[[:digit:]]\+\ \+[[:digit:]]\+\ \+\(.*\):\([[:digit:]]*\)\ \+\([0-9\.\:\*]\+\).\+\ \([[:digit:]]*\/[[:alnum:]\-]*\).*/\1 \2 == \3 == \4 \5/' | sort -k 4 -g | sed 's/ == \(.*\) ==/:\1/' | sed 1,2d</command>
|
||||
<alias>netstat listening ports</alias>
|
||||
<frequency>360</frequency>
|
||||
</localfile>
|
||||
|
||||
<localfile>
|
||||
<log_format>full_command</log_format>
|
||||
<command>last -n 20</command>
|
||||
<frequency>360</frequency>
|
||||
</localfile>
|
||||
|
||||
<ruleset>
|
||||
<!-- Default ruleset -->
|
||||
<decoder_dir>ruleset/decoders</decoder_dir>
|
||||
<rule_dir>ruleset/rules</rule_dir>
|
||||
<rule_exclude>0215-policy_rules.xml</rule_exclude>
|
||||
<list>etc/lists/audit-keys</list>
|
||||
<list>etc/lists/amazon/aws-eventnames</list>
|
||||
<list>etc/lists/security-eventchannel</list>
|
||||
<list>etc/lists/malicious-ioc/malicious-ip</list>
|
||||
<list>etc/lists/malicious-ioc/malicious-domains</list>
|
||||
<list>etc/lists/malicious-ioc/malware-hashes</list>
|
||||
|
||||
<!-- User-defined ruleset -->
|
||||
<decoder_dir>etc/decoders</decoder_dir>
|
||||
<rule_dir>etc/rules</rule_dir>
|
||||
</ruleset>
|
||||
|
||||
<rule_test>
|
||||
<enabled>yes</enabled>
|
||||
<threads>1</threads>
|
||||
<max_sessions>64</max_sessions>
|
||||
<session_timeout>15m</session_timeout>
|
||||
</rule_test>
|
||||
|
||||
<!-- Configuration for wazuh-authd -->
|
||||
<auth>
|
||||
<disabled>no</disabled>
|
||||
<port>1515</port>
|
||||
<use_source_ip>no</use_source_ip>
|
||||
<purge>yes</purge>
|
||||
<use_password>no</use_password>
|
||||
<ciphers>HIGH:!ADH:!EXP:!MD5:!RC4:!3DES:!CAMELLIA:@STRENGTH</ciphers>
|
||||
<!-- <ssl_agent_ca></ssl_agent_ca> -->
|
||||
<ssl_verify_host>no</ssl_verify_host>
|
||||
<ssl_manager_cert>etc/sslmanager.cert</ssl_manager_cert>
|
||||
<ssl_manager_key>etc/sslmanager.key</ssl_manager_key>
|
||||
<ssl_auto_negotiate>no</ssl_auto_negotiate>
|
||||
</auth>
|
||||
|
||||
<cluster>
|
||||
<name>wazuh</name>
|
||||
<node_name>worker01</node_name>
|
||||
<node_type>worker</node_type>
|
||||
<key>c98b6ha9b6169zc5f67rae55ae4z5647</key>
|
||||
<port>1516</port>
|
||||
<bind_addr>0.0.0.0</bind_addr>
|
||||
<nodes>
|
||||
<node>wazuh.master</node>
|
||||
</nodes>
|
||||
<hidden>no</hidden>
|
||||
<disabled>no</disabled>
|
||||
</cluster>
|
||||
|
||||
</ossec_config>
|
||||
|
||||
<ossec_config>
|
||||
<localfile>
|
||||
<log_format>syslog</log_format>
|
||||
<location>/var/ossec/logs/active-responses.log</location>
|
||||
</localfile>
|
||||
|
||||
</ossec_config>
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user