Compare commits

...

40 Commits

Author SHA1 Message Date
renovate[bot]
d17f115832 Update dependency tailwindcss to v4 2025-11-15 00:10:55 +00:00
9 Technology Group LTD
c770bf1444 API for auto-enrollment
Api
2025-11-15 00:08:37 +00:00
Muhammad Ibrahim
307970ebd4 Fixed formatting 2025-11-15 00:03:02 +00:00
Muhammad Ibrahim
9da341f84c auto-enrolment enhancements 2025-11-14 23:57:43 +00:00
Muhammad Ibrahim
1ca8bf8581 better auto-enrollment system 2025-11-14 22:53:48 +00:00
Muhammad Ibrahim
a4bc9c4aed workspace fix 2025-11-14 21:00:55 +00:00
Muhammad Ibrahim
8f25bc5b8b fixed docker build 2025-11-14 20:54:43 +00:00
Muhammad Ibrahim
a37b479de6 1.3.4 2025-11-14 20:45:40 +00:00
9 Technology Group LTD
3f18074f01 Merge pull request #304 from PatchMon/feature/alpine
new binary for alpie apk support
2025-11-11 12:49:09 +00:00
Muhammad Ibrahim
ab700a3bc8 new binary for alpie apk support 2025-11-11 12:42:00 +00:00
9 Technology Group LTD
9857d7cdfc Merge pull request #302 from PatchMon/feature/api
added the migration file
2025-11-10 22:02:37 +00:00
Muhammad Ibrahim
3f6466c80a added the migration file 2025-11-10 22:00:02 +00:00
9 Technology Group LTD
d7d47089b2 Merge pull request #301 from PatchMon/feature/api
Feature/api
2025-11-10 20:41:36 +00:00
Muhammad Ibrahim
d1069a8bd0 api endpoint and scopes created 2025-11-10 20:34:03 +00:00
Muhammad Ibrahim
bedcd1ac73 added api scope creator 2025-11-10 20:32:40 +00:00
Muhammad Ibrahim
f0b028cb77 alpine support on the agent installation script 2025-11-08 22:00:34 +00:00
9 Technology Group LTD
427743b81e Merge pull request #294 from PatchMon/feature/alpine
alpine support (apk) support agents
2025-11-08 21:26:04 +00:00
Muhammad Ibrahim
8c2d4aa42b alpine support (apk) support agents 2025-11-08 21:15:08 +00:00
9 Technology Group LTD
a4922b4e54 Merge pull request #292 from PatchMon/feature/alpine
arm support
2025-11-08 12:24:42 +00:00
Muhammad Ibrahim
082ceed27c arm support 2025-11-08 12:23:50 +00:00
9 Technology Group LTD
5a3938d7fc Merge pull request #291 from PatchMon/1-3-3
fixed env flag issue when auto-enrolling into proxmox
2025-11-08 09:35:52 +00:00
Muhammad Ibrahim
eb433719dd fixed env flag issue when auto-enrolling into proxmox 2025-11-08 09:34:39 +00:00
9 Technology Group LTD
106ab6f5f8 Merge pull request #290 from PatchMon/1-3-3
new agent files for 1.3.3
2025-11-07 23:07:49 +00:00
Muhammad Ibrahim
148ff2e77f new agent files for 1.3.3 2025-11-07 23:02:53 +00:00
9 Technology Group LTD
a9e4349f5f Merge pull request #289 from PatchMon/1-3-3
1 3 3
2025-11-07 22:33:28 +00:00
Muhammad Ibrahim
a655a24f2f 1.3.3 agents 2025-11-07 22:32:11 +00:00
Muhammad Ibrahim
417f6deccf 1.3.3 version changes 2025-11-07 22:29:13 +00:00
9 Technology Group LTD
3c780d07ff Merge pull request #288 from PatchMon/1-3-3
Bug fixes
2025-11-07 22:14:37 +00:00
Muhammad Ibrahim
55de7b40ed Merge remote 1-3-3 branch with local changes 2025-11-07 22:07:30 +00:00
Muhammad Ibrahim
90e56d62bb Update biome to 2.3.4 to match CI 2025-11-07 22:07:21 +00:00
Muhammad Ibrahim
497aeb8068 fixed biome and added tz 2025-11-07 22:04:27 +00:00
Muhammad Ibrahim
f5b0e930f7 Fixed reporting mechanism 2025-11-07 18:33:17 +00:00
Muhammad Ibrahim
e73ebc383c fixed the issue with graphs on dashboard not showing information correctly, so now statistics are queued into a new table specifically for this and addd this in automation queue 2025-11-07 10:00:19 +00:00
9 Technology Group LTD
ef0bcd2240 Merge pull request #285 from FutureCow/patch-1
Add IPv6 support to server configuration
2025-11-07 08:23:46 +00:00
Muhammad Ibrahim
63831caba3 fixed tfa route for handling insertion of tfa number
Better handling of existing systems already enrolled, done via checking if the config.yml file exists and ping through its credentials as opposed to checking for machine_ID
UI justification improvements on repositories pages
2025-11-07 08:20:42 +00:00
Muhammad Ibrahim
8e5eb54e02 fixed code quality 2025-11-06 22:16:35 +00:00
Muhammad Ibrahim
a8eb3ec21c fix docker error handling
fix websocket routes
Add timezone variable in code
changed the env.example to suit
2025-11-06 22:08:00 +00:00
FutureCow
c56debc80e Add IPv6 support to server configuration
Add IPv6 support to server configuration
2025-11-05 20:34:13 +01:00
Muhammad Ibrahim
e57ff7612e removed emoji 2025-11-01 03:25:31 +00:00
Muhammad Ibrahim
7a3d98862f Fix emoji parsing error in print functions
- Changed from echo -e to printf for safer special character handling
- Store emoji characters in variables using bash octal escape sequences
- Prevents 'command not found' error when bash interprets emoji as commands
- Fixes issue where line 41 error occurred during setup.sh --update
2025-11-01 02:58:34 +00:00
46 changed files with 3789 additions and 2151 deletions

270
agents/direct_host_auto_enroll.sh Executable file
View File

@@ -0,0 +1,270 @@
#!/bin/sh
# PatchMon Direct Host Auto-Enrollment Script
# POSIX-compliant shell script (works with dash, ash, bash, etc.)
# Usage: curl -s "https://patchmon.example.com/api/v1/auto-enrollment/script?type=direct-host&token_key=KEY&token_secret=SECRET" | sh
set -e
SCRIPT_VERSION="1.0.0"
# =============================================================================
# PatchMon Direct Host Auto-Enrollment Script
# =============================================================================
# This script automatically enrolls the current host into PatchMon for patch
# management.
#
# Usage:
# curl -s "https://patchmon.example.com/api/v1/auto-enrollment/script?type=direct-host&token_key=KEY&token_secret=SECRET" | sh
#
# With custom friendly name:
# curl -s "https://patchmon.example.com/api/v1/auto-enrollment/script?type=direct-host&token_key=KEY&token_secret=SECRET" | FRIENDLY_NAME="My Server" sh
#
# Requirements:
# - Run as root or with sudo
# - Auto-enrollment token from PatchMon
# - Network access to PatchMon server
# =============================================================================
# ===== CONFIGURATION =====
PATCHMON_URL="${PATCHMON_URL:-https://patchmon.example.com}"
AUTO_ENROLLMENT_KEY="${AUTO_ENROLLMENT_KEY:-}"
AUTO_ENROLLMENT_SECRET="${AUTO_ENROLLMENT_SECRET:-}"
CURL_FLAGS="${CURL_FLAGS:--s}"
FORCE_INSTALL="${FORCE_INSTALL:-false}"
FRIENDLY_NAME="${FRIENDLY_NAME:-}" # Optional: Custom friendly name for the host
# ===== COLOR OUTPUT =====
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# ===== LOGGING FUNCTIONS =====
info() { printf "%b\n" "${GREEN}[INFO]${NC} $1"; }
warn() { printf "%b\n" "${YELLOW}[WARN]${NC} $1"; }
error() { printf "%b\n" "${RED}[ERROR]${NC} $1" >&2; exit 1; }
success() { printf "%b\n" "${GREEN}[SUCCESS]${NC} $1"; }
debug() { [ "${DEBUG:-false}" = "true" ] && printf "%b\n" "${BLUE}[DEBUG]${NC} $1" || true; }
# ===== BANNER =====
cat << "EOF"
╔═══════════════════════════════════════════════════════════════╗
║ ║
║ ____ _ _ __ __ ║
| _ \ __ _| |_ ___| |__ | \/ | ___ _ __ ║
| |_) / _` | __/ __| '_ \| |\/| |/ _ \| '_ \
| __/ (_| | || (__| | | | | | | (_) | | | |
|_| \__,_|\__\___|_| |_|_| |_|\___/|_| |_|
║ ║
║ Direct Host Auto-Enrollment Script ║
║ ║
╚═══════════════════════════════════════════════════════════════╝
EOF
echo ""
# ===== VALIDATION =====
info "Validating configuration..."
if [ -z "$AUTO_ENROLLMENT_KEY" ] || [ -z "$AUTO_ENROLLMENT_SECRET" ]; then
error "AUTO_ENROLLMENT_KEY and AUTO_ENROLLMENT_SECRET must be set"
fi
if [ -z "$PATCHMON_URL" ]; then
error "PATCHMON_URL must be set"
fi
# Check if running as root
if [ "$(id -u)" -ne 0 ]; then
error "This script must be run as root (use sudo)"
fi
# Check for required commands
for cmd in curl; do
if ! command -v $cmd >/dev/null 2>&1; then
error "Required command '$cmd' not found. Please install it first."
fi
done
info "Configuration validated successfully"
info "PatchMon Server: $PATCHMON_URL"
echo ""
# ===== GATHER HOST INFORMATION =====
info "Gathering host information..."
# Get hostname
hostname=$(hostname)
# Use FRIENDLY_NAME env var if provided, otherwise use hostname
if [ -n "$FRIENDLY_NAME" ]; then
friendly_name="$FRIENDLY_NAME"
info "Using custom friendly name: $friendly_name"
else
friendly_name="$hostname"
fi
# Try to get machine_id (optional, for tracking)
machine_id=""
if [ -f /etc/machine-id ]; then
machine_id=$(cat /etc/machine-id 2>/dev/null || echo "")
elif [ -f /var/lib/dbus/machine-id ]; then
machine_id=$(cat /var/lib/dbus/machine-id 2>/dev/null || echo "")
fi
# Get OS information
os_info="unknown"
if [ -f /etc/os-release ]; then
os_info=$(grep "^PRETTY_NAME=" /etc/os-release 2>/dev/null | cut -d'"' -f2 || echo "unknown")
fi
# Get IP address (first non-loopback)
ip_address=$(hostname -I 2>/dev/null | awk '{print $1}' || echo "unknown")
# Detect architecture
arch_raw=$(uname -m 2>/dev/null || echo "unknown")
case "$arch_raw" in
"x86_64")
architecture="amd64"
;;
"i386"|"i686")
architecture="386"
;;
"aarch64"|"arm64")
architecture="arm64"
;;
"armv7l"|"armv6l"|"arm")
architecture="arm"
;;
*)
warn " ⚠ Unknown architecture '$arch_raw', defaulting to amd64"
architecture="amd64"
;;
esac
info "Hostname: $hostname"
info "Friendly Name: $friendly_name"
info "IP Address: $ip_address"
info "OS: $os_info"
info "Architecture: $architecture"
if [ -n "$machine_id" ]; then
# POSIX-compliant substring (first 16 chars)
machine_id_short=$(printf "%.16s" "$machine_id")
info "Machine ID: ${machine_id_short}..."
else
info "Machine ID: (not available)"
fi
echo ""
# ===== CHECK IF AGENT ALREADY INSTALLED =====
info "Checking if agent is already configured..."
config_check=$(sh -c "
if [ -f /etc/patchmon/config.yml ] && [ -f /etc/patchmon/credentials.yml ]; then
if [ -f /usr/local/bin/patchmon-agent ]; then
# Try to ping using existing configuration
if /usr/local/bin/patchmon-agent ping >/dev/null 2>&1; then
echo 'ping_success'
else
echo 'ping_failed'
fi
else
echo 'binary_missing'
fi
else
echo 'not_configured'
fi
" 2>/dev/null || echo "error")
if [ "$config_check" = "ping_success" ]; then
success "Host already enrolled and agent ping successful - nothing to do"
exit 0
elif [ "$config_check" = "ping_failed" ]; then
warn "Agent configuration exists but ping failed - will reinstall"
elif [ "$config_check" = "binary_missing" ]; then
warn "Config exists but agent binary missing - will reinstall"
elif [ "$config_check" = "not_configured" ]; then
info "Agent not yet configured - proceeding with enrollment"
else
warn "Could not check agent status - proceeding with enrollment"
fi
echo ""
# ===== ENROLL HOST =====
info "Enrolling $friendly_name in PatchMon..."
# Build JSON payload
json_payload=$(cat <<EOF
{
"friendly_name": "$friendly_name",
"metadata": {
"hostname": "$hostname",
"ip_address": "$ip_address",
"os_info": "$os_info",
"architecture": "$architecture"
}
}
EOF
)
# Add machine_id if available
if [ -n "$machine_id" ]; then
json_payload=$(echo "$json_payload" | sed "s/\"friendly_name\"/\"machine_id\": \"$machine_id\",\n \"friendly_name\"/")
fi
response=$(curl $CURL_FLAGS -X POST \
-H "X-Auto-Enrollment-Key: $AUTO_ENROLLMENT_KEY" \
-H "X-Auto-Enrollment-Secret: $AUTO_ENROLLMENT_SECRET" \
-H "Content-Type: application/json" \
-d "$json_payload" \
"$PATCHMON_URL/api/v1/auto-enrollment/enroll" \
-w "\n%{http_code}" 2>&1)
http_code=$(echo "$response" | tail -n 1)
body=$(echo "$response" | sed '$d')
if [ "$http_code" = "201" ]; then
# Use grep and cut instead of jq since jq may not be installed
api_id=$(echo "$body" | grep -o '"api_id":"[^"]*' | cut -d'"' -f4 || echo "")
api_key=$(echo "$body" | grep -o '"api_key":"[^"]*' | cut -d'"' -f4 || echo "")
if [ -z "$api_id" ] || [ -z "$api_key" ]; then
error "Failed to parse API credentials from response"
fi
success "Host enrolled successfully: $api_id"
echo ""
# ===== INSTALL AGENT =====
info "Installing PatchMon agent..."
# Build install URL with force flag and architecture
install_url="$PATCHMON_URL/api/v1/hosts/install?arch=$architecture"
if [ "$FORCE_INSTALL" = "true" ]; then
install_url="$install_url&force=true"
info "Using force mode - will bypass broken packages"
fi
info "Using architecture: $architecture"
# Download and execute installation script
install_exit_code=0
install_output=$(curl $CURL_FLAGS \
-H "X-API-ID: $api_id" \
-H "X-API-KEY: $api_key" \
"$install_url" | sh 2>&1) || install_exit_code=$?
# Check both exit code AND success message in output
if [ "$install_exit_code" -eq 0 ] || echo "$install_output" | grep -q "PatchMon Agent installation completed successfully"; then
success "Agent installed successfully"
else
error "Failed to install agent (exit: $install_exit_code)"
fi
else
printf "%b\n" "${RED}[ERROR]${NC} Failed to enroll $friendly_name - HTTP $http_code" >&2
printf "%b\n" "Response: $body" >&2
exit 1
fi
echo ""
success "Auto-enrollment complete!"
exit 0

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -1,7 +1,7 @@
#!/bin/bash #!/bin/sh
# PatchMon Agent Installation Script # PatchMon Agent Installation Script
# Usage: curl -s {PATCHMON_URL}/api/v1/hosts/install -H "X-API-ID: {API_ID}" -H "X-API-KEY: {API_KEY}" | bash # POSIX-compliant shell script (works with dash, ash, bash, etc.)
# Usage: curl -s {PATCHMON_URL}/api/v1/hosts/install -H "X-API-ID: {API_ID}" -H "X-API-KEY: {API_KEY}" | sh
set -e set -e
@@ -19,65 +19,69 @@ NC='\033[0m' # No Color
# Functions # Functions
error() { error() {
echo -e "${RED}ERROR: $1${NC}" >&2 printf "%b\n" "${RED}ERROR: $1${NC}" >&2
exit 1 exit 1
} }
info() { info() {
echo -e "${BLUE} $1${NC}" printf "%b\n" "${BLUE}INFO: $1${NC}"
} }
success() { success() {
echo -e "${GREEN} $1${NC}" printf "%b\n" "${GREEN}SUCCESS: $1${NC}"
} }
warning() { warning() {
echo -e "${YELLOW}⚠️ $1${NC}" printf "%b\n" "${YELLOW}WARNING: $1${NC}"
} }
# Check if running as root # Check if running as root
if [[ $EUID -ne 0 ]]; then if [ "$(id -u)" -ne 0 ]; then
error "This script must be run as root (use sudo)" error "This script must be run as root (use sudo)"
fi fi
# Verify system datetime and timezone # Verify system datetime and timezone
verify_datetime() { verify_datetime() {
info "🕐 Verifying system datetime and timezone..." info "Verifying system datetime and timezone..."
# Get current system time # Get current system time
local system_time=$(date) system_time=$(date)
local timezone=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "Unknown") timezone=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "Unknown")
# Display current datetime info # Display current datetime info
echo "" echo ""
echo -e "${BLUE}📅 Current System Date/Time:${NC}" printf "%b\n" "${BLUE}Current System Date/Time:${NC}"
echo " • Date/Time: $system_time" echo " • Date/Time: $system_time"
echo " • Timezone: $timezone" echo " • Timezone: $timezone"
echo "" echo ""
# Check if we can read from stdin (interactive terminal) # Check if we can read from stdin (interactive terminal)
if [[ -t 0 ]]; then if [ -t 0 ]; then
# Interactive terminal - ask user # Interactive terminal - ask user
read -p "Does this date/time look correct to you? (y/N): " -r response printf "Does this date/time look correct to you? (y/N): "
if [[ "$response" =~ ^[Yy]$ ]]; then read -r response
success "✅ Date/time verification passed" case "$response" in
[Yy]*)
success "Date/time verification passed"
echo ""
return 0
;;
*)
echo "" echo ""
return 0 printf "%b\n" "${RED}Date/time verification failed${NC}"
else
echo "" echo ""
echo -e "${RED}❌ Date/time verification failed${NC}" printf "%b\n" "${YELLOW}Please fix the date/time and re-run the installation script:${NC}"
echo ""
echo -e "${YELLOW}💡 Please fix the date/time and re-run the installation script:${NC}"
echo " sudo timedatectl set-time 'YYYY-MM-DD HH:MM:SS'" echo " sudo timedatectl set-time 'YYYY-MM-DD HH:MM:SS'"
echo " sudo timedatectl set-timezone 'America/New_York' # or your timezone" echo " sudo timedatectl set-timezone 'America/New_York' # or your timezone"
echo " sudo timedatectl list-timezones # to see available timezones" echo " sudo timedatectl list-timezones # to see available timezones"
echo "" echo ""
echo -e "${BLUE} After fixing the date/time, re-run this installation script.${NC}" printf "%b\n" "${BLUE}After fixing the date/time, re-run this installation script.${NC}"
error "Installation cancelled - please fix date/time and re-run" error "Installation cancelled - please fix date/time and re-run"
fi ;;
esac
else else
# Non-interactive (piped from curl) - show warning and continue # Non-interactive (piped from curl) - show warning and continue
echo -e "${YELLOW}⚠️ Non-interactive installation detected${NC}" printf "%b\n" "${YELLOW}Non-interactive installation detected${NC}"
echo "" echo ""
echo "Please verify the date/time shown above is correct." echo "Please verify the date/time shown above is correct."
echo "If the date/time is incorrect, it may cause issues with:" echo "If the date/time is incorrect, it may cause issues with:"
@@ -85,8 +89,8 @@ verify_datetime() {
echo " • Scheduled updates" echo " • Scheduled updates"
echo " • Data synchronization" echo " • Data synchronization"
echo "" echo ""
echo -e "${GREEN}Continuing with installation...${NC}" printf "%b\n" "${GREEN}Continuing with installation...${NC}"
success "Date/time verification completed (assumed correct)" success "Date/time verification completed (assumed correct)"
echo "" echo ""
fi fi
} }
@@ -121,9 +125,9 @@ cleanup_old_files
# Generate or retrieve machine ID # Generate or retrieve machine ID
get_machine_id() { get_machine_id() {
# Try multiple sources for machine ID # Try multiple sources for machine ID
if [[ -f /etc/machine-id ]]; then if [ -f /etc/machine-id ]; then
cat /etc/machine-id cat /etc/machine-id
elif [[ -f /var/lib/dbus/machine-id ]]; then elif [ -f /var/lib/dbus/machine-id ]; then
cat /var/lib/dbus/machine-id cat /var/lib/dbus/machine-id
else else
# Fallback: generate from hardware info (less ideal but works) # Fallback: generate from hardware info (less ideal but works)
@@ -132,45 +136,62 @@ get_machine_id() {
} }
# Parse arguments from environment (passed via HTTP headers) # Parse arguments from environment (passed via HTTP headers)
if [[ -z "$PATCHMON_URL" ]] || [[ -z "$API_ID" ]] || [[ -z "$API_KEY" ]]; then if [ -z "$PATCHMON_URL" ] || [ -z "$API_ID" ] || [ -z "$API_KEY" ]; then
error "Missing required parameters. This script should be called via the PatchMon web interface." error "Missing required parameters. This script should be called via the PatchMon web interface."
fi fi
# Parse architecture parameter (default to amd64) # Auto-detect architecture if not explicitly set
ARCHITECTURE="${ARCHITECTURE:-amd64}" if [ -z "$ARCHITECTURE" ]; then
if [[ "$ARCHITECTURE" != "amd64" && "$ARCHITECTURE" != "386" && "$ARCHITECTURE" != "arm64" ]]; then arch_raw=$(uname -m 2>/dev/null || echo "unknown")
error "Invalid architecture '$ARCHITECTURE'. Must be one of: amd64, 386, arm64"
# Map architecture to supported values
case "$arch_raw" in
"x86_64")
ARCHITECTURE="amd64"
;;
"i386"|"i686")
ARCHITECTURE="386"
;;
"aarch64"|"arm64")
ARCHITECTURE="arm64"
;;
"armv7l"|"armv6l"|"arm")
ARCHITECTURE="arm"
;;
*)
warning "Unknown architecture '$arch_raw', defaulting to amd64"
ARCHITECTURE="amd64"
;;
esac
fi
# Validate architecture
if [ "$ARCHITECTURE" != "amd64" ] && [ "$ARCHITECTURE" != "386" ] && [ "$ARCHITECTURE" != "arm64" ] && [ "$ARCHITECTURE" != "arm" ]; then
error "Invalid architecture '$ARCHITECTURE'. Must be one of: amd64, 386, arm64, arm"
fi fi
# Check if --force flag is set (for bypassing broken packages) # Check if --force flag is set (for bypassing broken packages)
FORCE_INSTALL="${FORCE_INSTALL:-false}" FORCE_INSTALL="${FORCE_INSTALL:-false}"
if [[ "$*" == *"--force"* ]] || [[ "$FORCE_INSTALL" == "true" ]]; then case "$*" in
*"--force"*) FORCE_INSTALL="true" ;;
esac
if [ "$FORCE_INSTALL" = "true" ]; then
FORCE_INSTALL="true" FORCE_INSTALL="true"
warning "⚠️ Force mode enabled - will bypass broken packages" warning "Force mode enabled - will bypass broken packages"
fi fi
# Get unique machine ID for this host # Get unique machine ID for this host
MACHINE_ID=$(get_machine_id) MACHINE_ID=$(get_machine_id)
export MACHINE_ID export MACHINE_ID
info "🚀 Starting PatchMon Agent Installation..." info "Starting PatchMon Agent Installation..."
info "📋 Server: $PATCHMON_URL" info "Server: $PATCHMON_URL"
info "🔑 API ID: ${API_ID:0:16}..." info "API ID: $(echo "$API_ID" | cut -c1-16)..."
info "🆔 Machine ID: ${MACHINE_ID:0:16}..." info "Machine ID: $(echo "$MACHINE_ID" | cut -c1-16)..."
info "🏗️ Architecture: $ARCHITECTURE" info "Architecture: $ARCHITECTURE"
# Display diagnostic information
echo ""
echo -e "${BLUE}🔧 Installation Diagnostics:${NC}"
echo " • URL: $PATCHMON_URL"
echo " • CURL FLAGS: $CURL_FLAGS"
echo " • API ID: ${API_ID:0:16}..."
echo " • API Key: ${API_KEY:0:16}..."
echo " • Architecture: $ARCHITECTURE"
echo ""
# Install required dependencies # Install required dependencies
info "📦 Installing required dependencies..." info "Installing required dependencies..."
echo "" echo ""
# Function to check if a command exists # Function to check if a command exists
@@ -180,52 +201,56 @@ command_exists() {
# Function to install packages with error handling # Function to install packages with error handling
install_apt_packages() { install_apt_packages() {
local packages=("$@") # Space-separated list of packages
local missing_packages=() _packages="$*"
_missing_packages=""
# Check which packages are missing # Check which packages are missing
for pkg in "${packages[@]}"; do for pkg in $_packages; do
if ! command_exists "$pkg"; then if ! command_exists "$pkg"; then
missing_packages+=("$pkg") _missing_packages="$_missing_packages $pkg"
fi fi
done done
if [ ${#missing_packages[@]} -eq 0 ]; then # Trim leading space
_missing_packages=$(echo "$_missing_packages" | sed 's/^ //')
if [ -z "$_missing_packages" ]; then
success "All required packages are already installed" success "All required packages are already installed"
return 0 return 0
fi fi
info "Need to install: ${missing_packages[*]}" info "Need to install: $_missing_packages"
# Build apt-get command based on force mode # Build apt-get command based on force mode
local apt_cmd="apt-get install ${missing_packages[*]} -y" _apt_cmd="apt-get install $_missing_packages -y"
if [[ "$FORCE_INSTALL" == "true" ]]; then if [ "$FORCE_INSTALL" = "true" ]; then
info "Using force mode - bypassing broken packages..." info "Using force mode - bypassing broken packages..."
apt_cmd="$apt_cmd -o APT::Get::Fix-Broken=false -o DPkg::Options::=\"--force-confold\" -o DPkg::Options::=\"--force-confdef\"" _apt_cmd="$_apt_cmd -o APT::Get::Fix-Broken=false -o DPkg::Options::=\"--force-confold\" -o DPkg::Options::=\"--force-confdef\""
fi fi
# Try to install packages # Try to install packages
if eval "$apt_cmd" 2>&1 | tee /tmp/patchmon_apt_install.log; then if eval "$_apt_cmd" 2>&1 | tee /tmp/patchmon_apt_install.log; then
success "Packages installed successfully" success "Packages installed successfully"
return 0 return 0
else else
warning "Package installation encountered issues, checking if required tools are available..." warning "Package installation encountered issues, checking if required tools are available..."
# Verify critical dependencies are actually available # Verify critical dependencies are actually available
local all_ok=true _all_ok=true
for pkg in "${packages[@]}"; do for pkg in $_packages; do
if ! command_exists "$pkg"; then if ! command_exists "$pkg"; then
if [[ "$FORCE_INSTALL" == "true" ]]; then if [ "$FORCE_INSTALL" = "true" ]; then
error "Critical dependency '$pkg' is not available even with --force. Please install manually." error "Critical dependency '$pkg' is not available even with --force. Please install manually."
else else
error "Critical dependency '$pkg' is not available. Try again with --force flag or install manually: apt-get install $pkg" error "Critical dependency '$pkg' is not available. Try again with --force flag or install manually: apt-get install $pkg"
fi fi
all_ok=false _all_ok=false
fi fi
done done
if $all_ok; then if $_all_ok; then
success "All required tools are available despite installation warnings" success "All required tools are available despite installation warnings"
return 0 return 0
else else
@@ -234,7 +259,144 @@ install_apt_packages() {
fi fi
} }
# Detect package manager and install jq and curl # Function to check and install packages for yum/dnf
install_yum_dnf_packages() {
_pkg_manager="$1"
shift
_packages="$*"
_missing_packages=""
# Check which packages are missing
for pkg in $_packages; do
if ! command_exists "$pkg"; then
_missing_packages="$_missing_packages $pkg"
fi
done
# Trim leading space
_missing_packages=$(echo "$_missing_packages" | sed 's/^ //')
if [ -z "$_missing_packages" ]; then
success "All required packages are already installed"
return 0
fi
info "Need to install: $_missing_packages"
if [ "$_pkg_manager" = "yum" ]; then
yum install -y $_missing_packages
else
dnf install -y $_missing_packages
fi
}
# Function to check and install packages for zypper
install_zypper_packages() {
_packages="$*"
_missing_packages=""
# Check which packages are missing
for pkg in $_packages; do
if ! command_exists "$pkg"; then
_missing_packages="$_missing_packages $pkg"
fi
done
# Trim leading space
_missing_packages=$(echo "$_missing_packages" | sed 's/^ //')
if [ -z "$_missing_packages" ]; then
success "All required packages are already installed"
return 0
fi
info "Need to install: $_missing_packages"
zypper install -y $_missing_packages
}
# Function to check and install packages for pacman
install_pacman_packages() {
_packages="$*"
_missing_packages=""
# Check which packages are missing
for pkg in $_packages; do
if ! command_exists "$pkg"; then
_missing_packages="$_missing_packages $pkg"
fi
done
# Trim leading space
_missing_packages=$(echo "$_missing_packages" | sed 's/^ //')
if [ -z "$_missing_packages" ]; then
success "All required packages are already installed"
return 0
fi
info "Need to install: $_missing_packages"
pacman -S --noconfirm $_missing_packages
}
# Function to check and install packages for apk
install_apk_packages() {
_packages="$*"
_missing_packages=""
# Check which packages are missing
for pkg in $_packages; do
if ! command_exists "$pkg"; then
_missing_packages="$_missing_packages $pkg"
fi
done
# Trim leading space
_missing_packages=$(echo "$_missing_packages" | sed 's/^ //')
if [ -z "$_missing_packages" ]; then
success "All required packages are already installed"
return 0
fi
info "Need to install: $_missing_packages"
# Update package index before installation
info "Updating package index..."
apk update -q || true
# Build apk command
_apk_cmd="apk add --no-cache $_missing_packages"
# Try to install packages
if eval "$_apk_cmd" 2>&1 | tee /tmp/patchmon_apk_install.log; then
success "Packages installed successfully"
return 0
else
warning "Package installation encountered issues, checking if required tools are available..."
# Verify critical dependencies are actually available
_all_ok=true
for pkg in $_packages; do
if ! command_exists "$pkg"; then
if [ "$FORCE_INSTALL" = "true" ]; then
error "Critical dependency '$pkg' is not available even with --force. Please install manually."
else
error "Critical dependency '$pkg' is not available. Try again with --force flag or install manually: apk add $pkg"
fi
_all_ok=false
fi
done
if $_all_ok; then
success "All required tools are available despite installation warnings"
return 0
else
return 1
fi
fi
}
# Detect package manager and install jq, curl, and bc
if command -v apt-get >/dev/null 2>&1; then if command -v apt-get >/dev/null 2>&1; then
# Debian/Ubuntu # Debian/Ubuntu
info "Detected apt-get (Debian/Ubuntu)" info "Detected apt-get (Debian/Ubuntu)"
@@ -242,10 +404,10 @@ if command -v apt-get >/dev/null 2>&1; then
# Check for broken packages # Check for broken packages
if dpkg -l | grep -q "^iH\|^iF" 2>/dev/null; then if dpkg -l | grep -q "^iH\|^iF" 2>/dev/null; then
if [[ "$FORCE_INSTALL" == "true" ]]; then if [ "$FORCE_INSTALL" = "true" ]; then
warning "Detected broken packages on system - force mode will work around them" warning "Detected broken packages on system - force mode will work around them"
else else
warning "⚠️ Broken packages detected on system" warning "Broken packages detected on system"
warning "If installation fails, retry with: curl -s {URL}/api/v1/hosts/install --force -H ..." warning "If installation fails, retry with: curl -s {URL}/api/v1/hosts/install --force -H ..."
fi fi
fi fi
@@ -260,31 +422,31 @@ elif command -v yum >/dev/null 2>&1; then
info "Detected yum (CentOS/RHEL 7)" info "Detected yum (CentOS/RHEL 7)"
echo "" echo ""
info "Installing jq, curl, and bc..." info "Installing jq, curl, and bc..."
yum install -y jq curl bc install_yum_dnf_packages yum jq curl bc
elif command -v dnf >/dev/null 2>&1; then elif command -v dnf >/dev/null 2>&1; then
# CentOS/RHEL 8+/Fedora # CentOS/RHEL 8+/Fedora
info "Detected dnf (CentOS/RHEL 8+/Fedora)" info "Detected dnf (CentOS/RHEL 8+/Fedora)"
echo "" echo ""
info "Installing jq, curl, and bc..." info "Installing jq, curl, and bc..."
dnf install -y jq curl bc install_yum_dnf_packages dnf jq curl bc
elif command -v zypper >/dev/null 2>&1; then elif command -v zypper >/dev/null 2>&1; then
# openSUSE # openSUSE
info "Detected zypper (openSUSE)" info "Detected zypper (openSUSE)"
echo "" echo ""
info "Installing jq, curl, and bc..." info "Installing jq, curl, and bc..."
zypper install -y jq curl bc install_zypper_packages jq curl bc
elif command -v pacman >/dev/null 2>&1; then elif command -v pacman >/dev/null 2>&1; then
# Arch Linux # Arch Linux
info "Detected pacman (Arch Linux)" info "Detected pacman (Arch Linux)"
echo "" echo ""
info "Installing jq, curl, and bc..." info "Installing jq, curl, and bc..."
pacman -S --noconfirm jq curl bc install_pacman_packages jq curl bc
elif command -v apk >/dev/null 2>&1; then elif command -v apk >/dev/null 2>&1; then
# Alpine Linux # Alpine Linux
info "Detected apk (Alpine Linux)" info "Detected apk (Alpine Linux)"
echo "" echo ""
info "Installing jq, curl, and bc..." info "Installing jq, curl, and bc..."
apk add --no-cache jq curl bc install_apk_packages jq curl bc
else else
warning "Could not detect package manager. Please ensure 'jq', 'curl', and 'bc' are installed manually." warning "Could not detect package manager. Please ensure 'jq', 'curl', and 'bc' are installed manually."
fi fi
@@ -294,57 +456,88 @@ success "Dependencies installation completed"
echo "" echo ""
# Step 1: Handle existing configuration directory # Step 1: Handle existing configuration directory
info "📁 Setting up configuration directory..." info "Setting up configuration directory..."
# Check if configuration directory already exists # Check if configuration directory already exists
if [[ -d "/etc/patchmon" ]]; then if [ -d "/etc/patchmon" ]; then
warning "⚠️ Configuration directory already exists at /etc/patchmon" warning "Configuration directory already exists at /etc/patchmon"
warning "⚠️ Preserving existing configuration files" warning "Preserving existing configuration files"
# List existing files for user awareness # List existing files for user awareness
info "📋 Existing files in /etc/patchmon:" info "Existing files in /etc/patchmon:"
ls -la /etc/patchmon/ 2>/dev/null | grep -v "^total" | while read -r line; do ls -la /etc/patchmon/ 2>/dev/null | grep -v "^total" | while read -r line; do
echo " $line" echo " $line"
done done
else else
info "📁 Creating new configuration directory..." info "Creating new configuration directory..."
mkdir -p /etc/patchmon mkdir -p /etc/patchmon
fi fi
# Check if agent is already configured and working (before we overwrite anything)
info "Checking if agent is already configured..."
if [ -f /etc/patchmon/config.yml ] && [ -f /etc/patchmon/credentials.yml ]; then
if [ -f /usr/local/bin/patchmon-agent ]; then
info "Found existing agent configuration"
info "Testing existing configuration with ping..."
if /usr/local/bin/patchmon-agent ping >/dev/null 2>&1; then
success "Agent is already configured and ping successful"
info "Existing configuration is working - skipping installation"
info ""
info "If you want to reinstall, remove the configuration files first:"
info " sudo rm -f /etc/patchmon/config.yml /etc/patchmon/credentials.yml"
echo ""
exit 0
else
warning "Agent configuration exists but ping failed"
warning "Will move existing configuration and reinstall"
echo ""
fi
else
warning "Configuration files exist but agent binary is missing"
warning "Will move existing configuration and reinstall"
echo ""
fi
else
success "Agent not yet configured - proceeding with installation"
echo ""
fi
# Step 2: Create configuration files # Step 2: Create configuration files
info "🔐 Creating configuration files..." info "Creating configuration files..."
# Check if config file already exists # Check if config file already exists
if [[ -f "/etc/patchmon/config.yml" ]]; then if [ -f "/etc/patchmon/config.yml" ]; then
warning "⚠️ Config file already exists at /etc/patchmon/config.yml" warning "Config file already exists at /etc/patchmon/config.yml"
warning "⚠️ Moving existing file out of the way for fresh installation" warning "Moving existing file out of the way for fresh installation"
# Clean up old config backups (keep only last 3) # Clean up old config backups (keep only last 3)
ls -t /etc/patchmon/config.yml.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f ls -t /etc/patchmon/config.yml.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f
# Move existing file out of the way # Move existing file out of the way
mv /etc/patchmon/config.yml /etc/patchmon/config.yml.backup.$(date +%Y%m%d_%H%M%S) mv /etc/patchmon/config.yml /etc/patchmon/config.yml.backup.$(date +%Y%m%d_%H%M%S)
info "📋 Moved existing config to: /etc/patchmon/config.yml.backup.$(date +%Y%m%d_%H%M%S)" info "Moved existing config to: /etc/patchmon/config.yml.backup.$(date +%Y%m%d_%H%M%S)"
fi fi
# Check if credentials file already exists # Check if credentials file already exists
if [[ -f "/etc/patchmon/credentials.yml" ]]; then if [ -f "/etc/patchmon/credentials.yml" ]; then
warning "⚠️ Credentials file already exists at /etc/patchmon/credentials.yml" warning "Credentials file already exists at /etc/patchmon/credentials.yml"
warning "⚠️ Moving existing file out of the way for fresh installation" warning "Moving existing file out of the way for fresh installation"
# Clean up old credential backups (keep only last 3) # Clean up old credential backups (keep only last 3)
ls -t /etc/patchmon/credentials.yml.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f ls -t /etc/patchmon/credentials.yml.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f
# Move existing file out of the way # Move existing file out of the way
mv /etc/patchmon/credentials.yml /etc/patchmon/credentials.yml.backup.$(date +%Y%m%d_%H%M%S) mv /etc/patchmon/credentials.yml /etc/patchmon/credentials.yml.backup.$(date +%Y%m%d_%H%M%S)
info "📋 Moved existing credentials to: /etc/patchmon/credentials.yml.backup.$(date +%Y%m%d_%H%M%S)" info "Moved existing credentials to: /etc/patchmon/credentials.yml.backup.$(date +%Y%m%d_%H%M%S)"
fi fi
# Clean up old credentials file if it exists (from previous installations) # Clean up old credentials file if it exists (from previous installations)
if [[ -f "/etc/patchmon/credentials" ]]; then if [ -f "/etc/patchmon/credentials" ]; then
warning "⚠️ Found old credentials file, removing it..." warning "Found old credentials file, removing it..."
rm -f /etc/patchmon/credentials rm -f /etc/patchmon/credentials
info "📋 Removed old credentials file" info "Removed old credentials file"
fi fi
# Create main config file # Create main config file
@@ -371,29 +564,29 @@ chmod 600 /etc/patchmon/config.yml
chmod 600 /etc/patchmon/credentials.yml chmod 600 /etc/patchmon/credentials.yml
# Step 3: Download the PatchMon agent binary using API credentials # Step 3: Download the PatchMon agent binary using API credentials
info "📥 Downloading PatchMon agent binary..." info "Downloading PatchMon agent binary..."
# Determine the binary filename based on architecture # Determine the binary filename based on architecture
BINARY_NAME="patchmon-agent-linux-${ARCHITECTURE}" BINARY_NAME="patchmon-agent-linux-${ARCHITECTURE}"
# Check if agent binary already exists # Check if agent binary already exists
if [[ -f "/usr/local/bin/patchmon-agent" ]]; then if [ -f "/usr/local/bin/patchmon-agent" ]; then
warning "⚠️ Agent binary already exists at /usr/local/bin/patchmon-agent" warning "Agent binary already exists at /usr/local/bin/patchmon-agent"
warning "⚠️ Moving existing file out of the way for fresh installation" warning "Moving existing file out of the way for fresh installation"
# Clean up old agent backups (keep only last 3) # Clean up old agent backups (keep only last 3)
ls -t /usr/local/bin/patchmon-agent.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f ls -t /usr/local/bin/patchmon-agent.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f
# Move existing file out of the way # Move existing file out of the way
mv /usr/local/bin/patchmon-agent /usr/local/bin/patchmon-agent.backup.$(date +%Y%m%d_%H%M%S) mv /usr/local/bin/patchmon-agent /usr/local/bin/patchmon-agent.backup.$(date +%Y%m%d_%H%M%S)
info "📋 Moved existing agent to: /usr/local/bin/patchmon-agent.backup.$(date +%Y%m%d_%H%M%S)" info "Moved existing agent to: /usr/local/bin/patchmon-agent.backup.$(date +%Y%m%d_%H%M%S)"
fi fi
# Clean up old shell script if it exists (from previous installations) # Clean up old shell script if it exists (from previous installations)
if [[ -f "/usr/local/bin/patchmon-agent.sh" ]]; then if [ -f "/usr/local/bin/patchmon-agent.sh" ]; then
warning "⚠️ Found old shell script agent, removing it..." warning "Found old shell script agent, removing it..."
rm -f /usr/local/bin/patchmon-agent.sh rm -f /usr/local/bin/patchmon-agent.sh
info "📋 Removed old shell script agent" info "Removed old shell script agent"
fi fi
# Download the binary # Download the binary
@@ -407,83 +600,52 @@ chmod +x /usr/local/bin/patchmon-agent
# Get the agent version from the binary # Get the agent version from the binary
AGENT_VERSION=$(/usr/local/bin/patchmon-agent version 2>/dev/null || echo "Unknown") AGENT_VERSION=$(/usr/local/bin/patchmon-agent version 2>/dev/null || echo "Unknown")
info "📋 Agent version: $AGENT_VERSION" info "Agent version: $AGENT_VERSION"
# Handle existing log files and create log directory # Handle existing log files and create log directory
info "📁 Setting up log directory..." info "Setting up log directory..."
# Create log directory if it doesn't exist # Create log directory if it doesn't exist
mkdir -p /etc/patchmon/logs mkdir -p /etc/patchmon/logs
# Handle existing log files # Handle existing log files
if [[ -f "/etc/patchmon/logs/patchmon-agent.log" ]]; then if [ -f "/etc/patchmon/logs/patchmon-agent.log" ]; then
warning "⚠️ Existing log file found at /etc/patchmon/logs/patchmon-agent.log" warning "Existing log file found at /etc/patchmon/logs/patchmon-agent.log"
warning "⚠️ Rotating log file for fresh start" warning "Rotating log file for fresh start"
# Rotate the log file # Rotate the log file
mv /etc/patchmon/logs/patchmon-agent.log /etc/patchmon/logs/patchmon-agent.log.old.$(date +%Y%m%d_%H%M%S) mv /etc/patchmon/logs/patchmon-agent.log /etc/patchmon/logs/patchmon-agent.log.old.$(date +%Y%m%d_%H%M%S)
info "📋 Log file rotated to: /etc/patchmon/logs/patchmon-agent.log.old.$(date +%Y%m%d_%H%M%S)" info "Log file rotated to: /etc/patchmon/logs/patchmon-agent.log.old.$(date +%Y%m%d_%H%M%S)"
fi fi
# Step 4: Test the configuration # Step 4: Test the configuration
# Check if this machine is already enrolled info "Testing API credentials and connectivity..."
info "🔍 Checking if machine is already enrolled..."
existing_check=$(curl $CURL_FLAGS -s -X POST \
-H "X-API-ID: $API_ID" \
-H "X-API-KEY: $API_KEY" \
-H "Content-Type: application/json" \
-d "{\"machine_id\": \"$MACHINE_ID\"}" \
"$PATCHMON_URL/api/v1/hosts/check-machine-id" \
-w "\n%{http_code}" 2>&1)
http_code=$(echo "$existing_check" | tail -n 1)
response_body=$(echo "$existing_check" | sed '$d')
if [[ "$http_code" == "200" ]]; then
already_enrolled=$(echo "$response_body" | jq -r '.exists' 2>/dev/null || echo "false")
if [[ "$already_enrolled" == "true" ]]; then
warning "⚠️ This machine is already enrolled in PatchMon"
info "Machine ID: $MACHINE_ID"
info "Existing host: $(echo "$response_body" | jq -r '.host.friendly_name' 2>/dev/null)"
info ""
info "The agent will be reinstalled/updated with existing credentials."
echo ""
else
success "✅ Machine not yet enrolled - proceeding with installation"
fi
fi
info "🧪 Testing API credentials and connectivity..."
if /usr/local/bin/patchmon-agent ping; then if /usr/local/bin/patchmon-agent ping; then
success "TEST: API credentials are valid and server is reachable" success "TEST: API credentials are valid and server is reachable"
else else
error "Failed to validate API credentials or reach server" error "Failed to validate API credentials or reach server"
fi fi
# Step 5: Send initial data and setup systemd service # Step 5: Setup service for WebSocket connection
info "📊 Sending initial package data to server..." # Note: The service will automatically send an initial report on startup (see serve.go)
if /usr/local/bin/patchmon-agent report; then # Detect init system and create appropriate service
success "✅ UPDATE: Initial package data sent successfully" if command -v systemctl >/dev/null 2>&1; then
else # Systemd is available
warning "⚠️ Failed to send initial data. You can retry later with: /usr/local/bin/patchmon-agent report" info "Setting up systemd service..."
fi
# Step 6: Setup systemd service for WebSocket connection # Stop and disable existing service if it exists
info "🔧 Setting up systemd service..." if systemctl is-active --quiet patchmon-agent.service 2>/dev/null; then
warning "Stopping existing PatchMon agent service..."
systemctl stop patchmon-agent.service
fi
# Stop and disable existing service if it exists if systemctl is-enabled --quiet patchmon-agent.service 2>/dev/null; then
if systemctl is-active --quiet patchmon-agent.service 2>/dev/null; then warning "Disabling existing PatchMon agent service..."
warning "⚠️ Stopping existing PatchMon agent service..." systemctl disable patchmon-agent.service
systemctl stop patchmon-agent.service fi
fi
if systemctl is-enabled --quiet patchmon-agent.service 2>/dev/null; then # Create systemd service file
warning "⚠️ Disabling existing PatchMon agent service..." cat > /etc/systemd/system/patchmon-agent.service << EOF
systemctl disable patchmon-agent.service
fi
# Create systemd service file
cat > /etc/systemd/system/patchmon-agent.service << EOF
[Unit] [Unit]
Description=PatchMon Agent Service Description=PatchMon Agent Service
After=network.target After=network.target
@@ -506,58 +668,153 @@ SyslogIdentifier=patchmon-agent
WantedBy=multi-user.target WantedBy=multi-user.target
EOF EOF
# Clean up old crontab entries if they exist (from previous installations) # Clean up old crontab entries if they exist (from previous installations)
if crontab -l 2>/dev/null | grep -q "patchmon-agent"; then if crontab -l 2>/dev/null | grep -q "patchmon-agent"; then
warning "⚠️ Found old crontab entries, removing them..." warning "Found old crontab entries, removing them..."
crontab -l 2>/dev/null | grep -v "patchmon-agent" | crontab - crontab -l 2>/dev/null | grep -v "patchmon-agent" | crontab -
info "📋 Removed old crontab entries" info "Removed old crontab entries"
fi fi
# Reload systemd and enable/start the service # Reload systemd and enable/start the service
systemctl daemon-reload systemctl daemon-reload
systemctl enable patchmon-agent.service systemctl enable patchmon-agent.service
systemctl start patchmon-agent.service systemctl start patchmon-agent.service
# Check if service started successfully # Check if service started successfully
if systemctl is-active --quiet patchmon-agent.service; then if systemctl is-active --quiet patchmon-agent.service; then
success "PatchMon Agent service started successfully" success "PatchMon Agent service started successfully"
info "🔗 WebSocket connection established" info "WebSocket connection established"
else
warning "Service may have failed to start. Check status with: systemctl status patchmon-agent"
fi
SERVICE_TYPE="systemd"
elif [ -d /etc/init.d ] && command -v rc-service >/dev/null 2>&1; then
# OpenRC is available (Alpine Linux)
info "Setting up OpenRC service..."
# Stop and disable existing service if it exists
if rc-service patchmon-agent status >/dev/null 2>&1; then
warning "Stopping existing PatchMon agent service..."
rc-service patchmon-agent stop
fi
if rc-update show default 2>/dev/null | grep -q "patchmon-agent"; then
warning "Disabling existing PatchMon agent service..."
rc-update del patchmon-agent default
fi
# Create OpenRC service file
cat > /etc/init.d/patchmon-agent << 'EOF'
#!/sbin/openrc-run
name="patchmon-agent"
description="PatchMon Agent Service"
command="/usr/local/bin/patchmon-agent"
command_args="serve"
command_user="root"
pidfile="/var/run/patchmon-agent.pid"
command_background="yes"
working_dir="/etc/patchmon"
depend() {
need net
after net
}
EOF
chmod +x /etc/init.d/patchmon-agent
# Clean up old crontab entries if they exist (from previous installations)
if crontab -l 2>/dev/null | grep -q "patchmon-agent"; then
warning "Found old crontab entries, removing them..."
crontab -l 2>/dev/null | grep -v "patchmon-agent" | crontab -
info "Removed old crontab entries"
fi
# Enable and start the service
rc-update add patchmon-agent default
rc-service patchmon-agent start
# Check if service started successfully
if rc-service patchmon-agent status >/dev/null 2>&1; then
success "PatchMon Agent service started successfully"
info "WebSocket connection established"
else
warning "Service may have failed to start. Check status with: rc-service patchmon-agent status"
fi
SERVICE_TYPE="openrc"
else else
warning "⚠️ Service may have failed to start. Check status with: systemctl status patchmon-agent" # No init system detected, use crontab as fallback
warning "No init system detected (systemd or OpenRC). Using crontab for service management."
# Clean up old crontab entries if they exist
if crontab -l 2>/dev/null | grep -q "patchmon-agent"; then
warning "Found old crontab entries, removing them..."
crontab -l 2>/dev/null | grep -v "patchmon-agent" | crontab -
info "Removed old crontab entries"
fi
# Add crontab entry to run the agent
(crontab -l 2>/dev/null; echo "@reboot /usr/local/bin/patchmon-agent serve >/dev/null 2>&1") | crontab -
info "Added crontab entry for PatchMon agent"
# Start the agent manually
/usr/local/bin/patchmon-agent serve >/dev/null 2>&1 &
success "PatchMon Agent started in background"
info "WebSocket connection established"
SERVICE_TYPE="crontab"
fi fi
# Installation complete # Installation complete
success "🎉 PatchMon Agent installation completed successfully!" success "PatchMon Agent installation completed successfully!"
echo "" echo ""
echo -e "${GREEN}📋 Installation Summary:${NC}" printf "%b\n" "${GREEN}Installation Summary:${NC}"
echo " • Configuration directory: /etc/patchmon" echo " • Configuration directory: /etc/patchmon"
echo " • Agent binary installed: /usr/local/bin/patchmon-agent" echo " • Agent binary installed: /usr/local/bin/patchmon-agent"
echo " • Architecture: $ARCHITECTURE" echo " • Architecture: $ARCHITECTURE"
echo " • Dependencies installed: jq, curl, bc" echo " • Dependencies installed: jq, curl, bc"
echo " • Systemd service configured and running" if [ "$SERVICE_TYPE" = "systemd" ]; then
echo " • Systemd service configured and running"
elif [ "$SERVICE_TYPE" = "openrc" ]; then
echo " • OpenRC service configured and running"
else
echo " • Service configured via crontab"
fi
echo " • API credentials configured and tested" echo " • API credentials configured and tested"
echo " • WebSocket connection established" echo " • WebSocket connection established"
echo " • Logs directory: /etc/patchmon/logs" echo " • Logs directory: /etc/patchmon/logs"
# Check for moved files and show them # Check for moved files and show them
MOVED_FILES=$(ls /etc/patchmon/credentials.yml.backup.* /etc/patchmon/config.yml.backup.* /usr/local/bin/patchmon-agent.backup.* /etc/patchmon/logs/patchmon-agent.log.old.* /usr/local/bin/patchmon-agent.sh.backup.* /etc/patchmon/credentials.backup.* 2>/dev/null || true) MOVED_FILES=$(ls /etc/patchmon/credentials.yml.backup.* /etc/patchmon/config.yml.backup.* /usr/local/bin/patchmon-agent.backup.* /etc/patchmon/logs/patchmon-agent.log.old.* /usr/local/bin/patchmon-agent.sh.backup.* /etc/patchmon/credentials.backup.* 2>/dev/null || true)
if [[ -n "$MOVED_FILES" ]]; then if [ -n "$MOVED_FILES" ]; then
echo "" echo ""
echo -e "${YELLOW}📋 Files Moved for Fresh Installation:${NC}" printf "%b\n" "${YELLOW}Files Moved for Fresh Installation:${NC}"
echo "$MOVED_FILES" | while read -r moved_file; do echo "$MOVED_FILES" | while read -r moved_file; do
echo "$moved_file" echo "$moved_file"
done done
echo "" echo ""
echo -e "${BLUE}💡 Note: Old files are automatically cleaned up (keeping last 3)${NC}" printf "%b\n" "${BLUE}Note: Old files are automatically cleaned up (keeping last 3)${NC}"
fi fi
echo "" echo ""
echo -e "${BLUE}🔧 Management Commands:${NC}" printf "%b\n" "${BLUE}Management Commands:${NC}"
echo " • Test connection: /usr/local/bin/patchmon-agent ping" echo " • Test connection: /usr/local/bin/patchmon-agent ping"
echo " • Manual report: /usr/local/bin/patchmon-agent report" echo " • Manual report: /usr/local/bin/patchmon-agent report"
echo " • Check status: /usr/local/bin/patchmon-agent diagnostics" echo " • Check status: /usr/local/bin/patchmon-agent diagnostics"
echo " • Service status: systemctl status patchmon-agent" if [ "$SERVICE_TYPE" = "systemd" ]; then
echo " • Service logs: journalctl -u patchmon-agent -f" echo " • Service status: systemctl status patchmon-agent"
echo " • Restart service: systemctl restart patchmon-agent" echo " • Service logs: journalctl -u patchmon-agent -f"
echo " • Restart service: systemctl restart patchmon-agent"
elif [ "$SERVICE_TYPE" = "openrc" ]; then
echo " • Service status: rc-service patchmon-agent status"
echo " • Service logs: tail -f /etc/patchmon/logs/patchmon-agent.log"
echo " • Restart service: rc-service patchmon-agent restart"
else
echo " • Service logs: tail -f /etc/patchmon/logs/patchmon-agent.log"
echo " • Restart service: pkill -f 'patchmon-agent serve' && /usr/local/bin/patchmon-agent serve &"
fi
echo "" echo ""
success "Your system is now being monitored by PatchMon!" success "Your system is now being monitored by PatchMon!"

View File

@@ -1,7 +1,8 @@
#!/bin/bash #!/bin/sh
# PatchMon Agent Removal Script # PatchMon Agent Removal Script
# Usage: curl -s {PATCHMON_URL}/api/v1/hosts/remove | bash # POSIX-compliant shell script (works with dash, ash, bash, etc.)
# Usage: curl -s {PATCHMON_URL}/api/v1/hosts/remove | sh
# This script completely removes PatchMon from the system # This script completely removes PatchMon from the system
set -e set -e
@@ -20,24 +21,24 @@ NC='\033[0m' # No Color
# Functions # Functions
error() { error() {
echo -e "${RED}❌ ERROR: $1${NC}" >&2 printf "%b\n" "${RED}❌ ERROR: $1${NC}" >&2
exit 1 exit 1
} }
info() { info() {
echo -e "${BLUE} $1${NC}" printf "%b\n" "${BLUE} $1${NC}"
} }
success() { success() {
echo -e "${GREEN}$1${NC}" printf "%b\n" "${GREEN}$1${NC}"
} }
warning() { warning() {
echo -e "${YELLOW}⚠️ $1${NC}" printf "%b\n" "${YELLOW}⚠️ $1${NC}"
} }
# Check if running as root # Check if running as root
if [[ $EUID -ne 0 ]]; then if [ "$(id -u)" -ne 0 ]; then
error "This script must be run as root (use sudo)" error "This script must be run as root (use sudo)"
fi fi
@@ -67,7 +68,7 @@ fi
# Step 3: Remove agent script # Step 3: Remove agent script
info "📄 Removing agent script..." info "📄 Removing agent script..."
if [[ -f "/usr/local/bin/patchmon-agent.sh" ]]; then if [ -f "/usr/local/bin/patchmon-agent.sh" ]; then
warning "Removing agent script: /usr/local/bin/patchmon-agent.sh" warning "Removing agent script: /usr/local/bin/patchmon-agent.sh"
rm -f /usr/local/bin/patchmon-agent.sh rm -f /usr/local/bin/patchmon-agent.sh
success "Agent script removed" success "Agent script removed"
@@ -77,7 +78,7 @@ fi
# Step 4: Remove configuration directory and files # Step 4: Remove configuration directory and files
info "📁 Removing configuration files..." info "📁 Removing configuration files..."
if [[ -d "/etc/patchmon" ]]; then if [ -d "/etc/patchmon" ]; then
warning "Removing configuration directory: /etc/patchmon" warning "Removing configuration directory: /etc/patchmon"
# Show what's being removed # Show what's being removed
@@ -95,7 +96,7 @@ fi
# Step 5: Remove log files # Step 5: Remove log files
info "📝 Removing log files..." info "📝 Removing log files..."
if [[ -f "/var/log/patchmon-agent.log" ]]; then if [ -f "/var/log/patchmon-agent.log" ]; then
warning "Removing log file: /var/log/patchmon-agent.log" warning "Removing log file: /var/log/patchmon-agent.log"
rm -f /var/log/patchmon-agent.log rm -f /var/log/patchmon-agent.log
success "Log file removed" success "Log file removed"
@@ -109,29 +110,29 @@ BACKUP_COUNT=0
# Count credential backups # Count credential backups
CRED_BACKUPS=$(ls /etc/patchmon/credentials.backup.* 2>/dev/null | wc -l || echo "0") CRED_BACKUPS=$(ls /etc/patchmon/credentials.backup.* 2>/dev/null | wc -l || echo "0")
if [[ $CRED_BACKUPS -gt 0 ]]; then if [ "$CRED_BACKUPS" -gt 0 ]; then
BACKUP_COUNT=$((BACKUP_COUNT + CRED_BACKUPS)) BACKUP_COUNT=$((BACKUP_COUNT + CRED_BACKUPS))
fi fi
# Count agent backups # Count agent backups
AGENT_BACKUPS=$(ls /usr/local/bin/patchmon-agent.sh.backup.* 2>/dev/null | wc -l || echo "0") AGENT_BACKUPS=$(ls /usr/local/bin/patchmon-agent.sh.backup.* 2>/dev/null | wc -l || echo "0")
if [[ $AGENT_BACKUPS -gt 0 ]]; then if [ "$AGENT_BACKUPS" -gt 0 ]; then
BACKUP_COUNT=$((BACKUP_COUNT + AGENT_BACKUPS)) BACKUP_COUNT=$((BACKUP_COUNT + AGENT_BACKUPS))
fi fi
# Count log backups # Count log backups
LOG_BACKUPS=$(ls /var/log/patchmon-agent.log.old.* 2>/dev/null | wc -l || echo "0") LOG_BACKUPS=$(ls /var/log/patchmon-agent.log.old.* 2>/dev/null | wc -l || echo "0")
if [[ $LOG_BACKUPS -gt 0 ]]; then if [ "$LOG_BACKUPS" -gt 0 ]; then
BACKUP_COUNT=$((BACKUP_COUNT + LOG_BACKUPS)) BACKUP_COUNT=$((BACKUP_COUNT + LOG_BACKUPS))
fi fi
if [[ $BACKUP_COUNT -gt 0 ]]; then if [ "$BACKUP_COUNT" -gt 0 ]; then
warning "Found $BACKUP_COUNT backup files" warning "Found $BACKUP_COUNT backup files"
echo "" echo ""
echo -e "${YELLOW}📋 Backup files found:${NC}" printf "%b\n" "${YELLOW}📋 Backup files found:${NC}"
# Show credential backups # Show credential backups
if [[ $CRED_BACKUPS -gt 0 ]]; then if [ "$CRED_BACKUPS" -gt 0 ]; then
echo " Credential backups:" echo " Credential backups:"
ls /etc/patchmon/credentials.backup.* 2>/dev/null | while read -r file; do ls /etc/patchmon/credentials.backup.* 2>/dev/null | while read -r file; do
echo "$file" echo "$file"
@@ -139,7 +140,7 @@ if [[ $BACKUP_COUNT -gt 0 ]]; then
fi fi
# Show agent backups # Show agent backups
if [[ $AGENT_BACKUPS -gt 0 ]]; then if [ "$AGENT_BACKUPS" -gt 0 ]; then
echo " Agent script backups:" echo " Agent script backups:"
ls /usr/local/bin/patchmon-agent.sh.backup.* 2>/dev/null | while read -r file; do ls /usr/local/bin/patchmon-agent.sh.backup.* 2>/dev/null | while read -r file; do
echo "$file" echo "$file"
@@ -147,7 +148,7 @@ if [[ $BACKUP_COUNT -gt 0 ]]; then
fi fi
# Show log backups # Show log backups
if [[ $LOG_BACKUPS -gt 0 ]]; then if [ "$LOG_BACKUPS" -gt 0 ]; then
echo " Log file backups:" echo " Log file backups:"
ls /var/log/patchmon-agent.log.old.* 2>/dev/null | while read -r file; do ls /var/log/patchmon-agent.log.old.* 2>/dev/null | while read -r file; do
echo "$file" echo "$file"
@@ -155,8 +156,8 @@ if [[ $BACKUP_COUNT -gt 0 ]]; then
fi fi
echo "" echo ""
echo -e "${BLUE}💡 Note: Backup files are preserved for safety${NC}" printf "%b\n" "${BLUE}💡 Note: Backup files are preserved for safety${NC}"
echo -e "${BLUE}💡 You can remove them manually if not needed${NC}" printf "%b\n" "${BLUE}💡 You can remove them manually if not needed${NC}"
else else
info "No backup files found" info "No backup files found"
fi fi
@@ -165,16 +166,16 @@ fi
info "📦 Checking for PatchMon-specific dependencies..." info "📦 Checking for PatchMon-specific dependencies..."
if command -v jq >/dev/null 2>&1; then if command -v jq >/dev/null 2>&1; then
warning "jq is installed (used by PatchMon)" warning "jq is installed (used by PatchMon)"
echo -e "${BLUE}💡 Note: jq may be used by other applications${NC}" printf "%b\n" "${BLUE}💡 Note: jq may be used by other applications${NC}"
echo -e "${BLUE}💡 Consider keeping it unless you're sure it's not needed${NC}" printf "%b\n" "${BLUE}💡 Consider keeping it unless you're sure it's not needed${NC}"
else else
info "jq not found" info "jq not found"
fi fi
if command -v curl >/dev/null 2>&1; then if command -v curl >/dev/null 2>&1; then
warning "curl is installed (used by PatchMon)" warning "curl is installed (used by PatchMon)"
echo -e "${BLUE}💡 Note: curl is commonly used by many applications${NC}" printf "%b\n" "${BLUE}💡 Note: curl is commonly used by many applications${NC}"
echo -e "${BLUE}💡 Consider keeping it unless you're sure it's not needed${NC}" printf "%b\n" "${BLUE}💡 Consider keeping it unless you're sure it's not needed${NC}"
else else
info "curl not found" info "curl not found"
fi fi
@@ -183,15 +184,15 @@ fi
info "🔍 Verifying removal..." info "🔍 Verifying removal..."
REMAINING_FILES=0 REMAINING_FILES=0
if [[ -f "/usr/local/bin/patchmon-agent.sh" ]]; then if [ -f "/usr/local/bin/patchmon-agent.sh" ]; then
REMAINING_FILES=$((REMAINING_FILES + 1)) REMAINING_FILES=$((REMAINING_FILES + 1))
fi fi
if [[ -d "/etc/patchmon" ]]; then if [ -d "/etc/patchmon" ]; then
REMAINING_FILES=$((REMAINING_FILES + 1)) REMAINING_FILES=$((REMAINING_FILES + 1))
fi fi
if [[ -f "/var/log/patchmon-agent.log" ]]; then if [ -f "/var/log/patchmon-agent.log" ]; then
REMAINING_FILES=$((REMAINING_FILES + 1)) REMAINING_FILES=$((REMAINING_FILES + 1))
fi fi
@@ -199,15 +200,15 @@ if crontab -l 2>/dev/null | grep -q "patchmon-agent.sh"; then
REMAINING_FILES=$((REMAINING_FILES + 1)) REMAINING_FILES=$((REMAINING_FILES + 1))
fi fi
if [[ $REMAINING_FILES -eq 0 ]]; then if [ "$REMAINING_FILES" -eq 0 ]; then
success "✅ PatchMon has been completely removed from the system!" success "✅ PatchMon has been completely removed from the system!"
else else
warning "⚠️ Some PatchMon files may still remain ($REMAINING_FILES items)" warning "⚠️ Some PatchMon files may still remain ($REMAINING_FILES items)"
echo -e "${BLUE}💡 You may need to remove them manually${NC}" printf "%b\n" "${BLUE}💡 You may need to remove them manually${NC}"
fi fi
echo "" echo ""
echo -e "${GREEN}📋 Removal Summary:${NC}" printf "%b\n" "${GREEN}📋 Removal Summary:${NC}"
echo " • Agent script: Removed" echo " • Agent script: Removed"
echo " • Configuration files: Removed" echo " • Configuration files: Removed"
echo " • Log files: Removed" echo " • Log files: Removed"
@@ -215,7 +216,7 @@ echo " • Crontab entries: Removed"
echo " • Running processes: Stopped" echo " • Running processes: Stopped"
echo " • Backup files: Preserved (if any)" echo " • Backup files: Preserved (if any)"
echo "" echo ""
echo -e "${BLUE}🔧 Manual cleanup (if needed):${NC}" printf "%b\n" "${BLUE}🔧 Manual cleanup (if needed):${NC}"
echo " • Remove backup files: rm /etc/patchmon/credentials.backup.* /usr/local/bin/patchmon-agent.sh.backup.* /var/log/patchmon-agent.log.old.*" echo " • Remove backup files: rm /etc/patchmon/credentials.backup.* /usr/local/bin/patchmon-agent.sh.backup.* /var/log/patchmon-agent.log.old.*"
echo " • Remove dependencies: apt remove jq curl (if not needed by other apps)" echo " • Remove dependencies: apt remove jq curl (if not needed by other apps)"
echo "" echo ""

View File

@@ -230,6 +230,40 @@ while IFS= read -r line; do
info " ✓ Host enrolled successfully: $api_id" info " ✓ Host enrolled successfully: $api_id"
# Check if agent is already installed and working
info " Checking if agent is already configured..."
config_check=$(timeout 10 pct exec "$vmid" -- bash -c "
if [[ -f /etc/patchmon/config.yml ]] && [[ -f /etc/patchmon/credentials.yml ]]; then
if [[ -f /usr/local/bin/patchmon-agent ]]; then
# Try to ping using existing configuration
if /usr/local/bin/patchmon-agent ping >/dev/null 2>&1; then
echo 'ping_success'
else
echo 'ping_failed'
fi
else
echo 'binary_missing'
fi
else
echo 'not_configured'
fi
" 2>/dev/null </dev/null || echo "error")
if [[ "$config_check" == "ping_success" ]]; then
info " ✓ Host already enrolled and agent ping successful - skipping"
((skipped_count++)) || true
echo ""
continue
elif [[ "$config_check" == "ping_failed" ]]; then
warn " ⚠ Agent configuration exists but ping failed - will reinstall"
elif [[ "$config_check" == "binary_missing" ]]; then
warn " ⚠ Config exists but agent binary missing - will reinstall"
elif [[ "$config_check" == "not_configured" ]]; then
info " Agent not yet configured - proceeding with installation"
else
warn " ⚠ Could not check agent status - proceeding with installation"
fi
# Ensure curl is installed in the container # Ensure curl is installed in the container
info " Checking for curl in container..." info " Checking for curl in container..."
curl_check=$(timeout 10 pct exec "$vmid" -- bash -c "command -v curl >/dev/null 2>&1 && echo 'installed' || echo 'missing'" 2>/dev/null </dev/null || echo "error") curl_check=$(timeout 10 pct exec "$vmid" -- bash -c "command -v curl >/dev/null 2>&1 && echo 'installed' || echo 'missing'" 2>/dev/null </dev/null || echo "error")
@@ -283,9 +317,11 @@ while IFS= read -r line; do
install_exit_code=0 install_exit_code=0
# Download and execute in separate steps to avoid stdin issues with piping # Download and execute in separate steps to avoid stdin issues with piping
# Pass CURL_FLAGS as environment variable to container
install_output=$(timeout 180 pct exec "$vmid" -- bash -c " install_output=$(timeout 180 pct exec "$vmid" -- bash -c "
export CURL_FLAGS='$CURL_FLAGS'
cd /tmp cd /tmp
curl $CURL_FLAGS \ curl \$CURL_FLAGS \
-H \"X-API-ID: $api_id\" \ -H \"X-API-ID: $api_id\" \
-H \"X-API-KEY: $api_key\" \ -H \"X-API-KEY: $api_key\" \
-o patchmon-install.sh \ -o patchmon-install.sh \
@@ -422,9 +458,11 @@ if [[ ${#dpkg_error_containers[@]} -gt 0 ]]; then
info " Retrying agent installation..." info " Retrying agent installation..."
install_exit_code=0 install_exit_code=0
# Pass CURL_FLAGS as environment variable to container
install_output=$(timeout 180 pct exec "$vmid" -- bash -c " install_output=$(timeout 180 pct exec "$vmid" -- bash -c "
export CURL_FLAGS='$CURL_FLAGS'
cd /tmp cd /tmp
curl $CURL_FLAGS \ curl \$CURL_FLAGS \
-H \"X-API-ID: $api_id\" \ -H \"X-API-ID: $api_id\" \
-H \"X-API-KEY: $api_key\" \ -H \"X-API-KEY: $api_key\" \
-o patchmon-install.sh \ -o patchmon-install.sh \

View File

@@ -54,3 +54,8 @@ ENABLE_LOGGING=true
TFA_REMEMBER_ME_EXPIRES_IN=30d TFA_REMEMBER_ME_EXPIRES_IN=30d
TFA_MAX_REMEMBER_SESSIONS=5 TFA_MAX_REMEMBER_SESSIONS=5
TFA_SUSPICIOUS_ACTIVITY_THRESHOLD=3 TFA_SUSPICIOUS_ACTIVITY_THRESHOLD=3
# Timezone Configuration
# Set the timezone for timestamps and logs (e.g., 'UTC', 'America/New_York', 'Europe/London')
# Defaults to UTC if not set. This ensures consistent timezone handling across the application.
TZ=UTC

View File

@@ -1,6 +1,6 @@
{ {
"name": "patchmon-backend", "name": "patchmon-backend",
"version": "1.3.2", "version": "1.3.4",
"description": "Backend API for Linux Patch Monitoring System", "description": "Backend API for Linux Patch Monitoring System",
"license": "AGPL-3.0", "license": "AGPL-3.0",
"main": "src/server.js", "main": "src/server.js",

View File

@@ -0,0 +1,16 @@
-- CreateTable
CREATE TABLE "system_statistics" (
"id" TEXT NOT NULL,
"unique_packages_count" INTEGER NOT NULL,
"unique_security_count" INTEGER NOT NULL,
"total_packages" INTEGER NOT NULL,
"total_hosts" INTEGER NOT NULL,
"hosts_needing_updates" INTEGER NOT NULL,
"timestamp" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "system_statistics_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE INDEX "system_statistics_timestamp_idx" ON "system_statistics"("timestamp");

View File

@@ -0,0 +1,3 @@
-- AlterTable
ALTER TABLE "auto_enrollment_tokens" ADD COLUMN "scopes" JSONB;

View File

@@ -0,0 +1,13 @@
-- Remove machine_id unique constraint and make it nullable
-- This allows multiple hosts with the same machine_id
-- Duplicate detection now relies on config.yml/credentials.yml checking instead
-- Drop the unique constraint
ALTER TABLE "hosts" DROP CONSTRAINT IF EXISTS "hosts_machine_id_key";
-- Make machine_id nullable
ALTER TABLE "hosts" ALTER COLUMN "machine_id" DROP NOT NULL;
-- Keep the index for query performance (but not unique)
CREATE INDEX IF NOT EXISTS "hosts_machine_id_idx" ON "hosts"("machine_id");

View File

@@ -81,7 +81,7 @@ model host_repositories {
model hosts { model hosts {
id String @id id String @id
machine_id String @unique machine_id String?
friendly_name String friendly_name String
ip String? ip String?
os_type String os_type String
@@ -202,7 +202,7 @@ model update_history {
id String @id id String @id
host_id String host_id String
packages_count Int packages_count Int
security_count Int security_count Int
total_packages Int? total_packages Int?
payload_size_kb Float? payload_size_kb Float?
execution_time Float? execution_time Float?
@@ -212,6 +212,18 @@ model update_history {
hosts hosts @relation(fields: [host_id], references: [id], onDelete: Cascade) hosts hosts @relation(fields: [host_id], references: [id], onDelete: Cascade)
} }
model system_statistics {
id String @id
unique_packages_count Int
unique_security_count Int
total_packages Int
total_hosts Int
hosts_needing_updates Int
timestamp DateTime @default(now())
@@index([timestamp])
}
model users { model users {
id String @id id String @id
username String @unique username String @unique
@@ -276,6 +288,7 @@ model auto_enrollment_tokens {
last_used_at DateTime? last_used_at DateTime?
expires_at DateTime? expires_at DateTime?
metadata Json? metadata Json?
scopes Json?
users users? @relation(fields: [created_by_user_id], references: [id], onDelete: SetNull) users users? @relation(fields: [created_by_user_id], references: [id], onDelete: SetNull)
host_groups host_groups? @relation(fields: [default_host_group_id], references: [id], onDelete: SetNull) host_groups host_groups? @relation(fields: [default_host_group_id], references: [id], onDelete: SetNull)

View File

@@ -0,0 +1,113 @@
const { getPrismaClient } = require("../config/prisma");
const bcrypt = require("bcryptjs");
const prisma = getPrismaClient();
/**
* Middleware factory to authenticate API tokens using Basic Auth
* @param {string} integrationType - The expected integration type (e.g., "api", "gethomepage")
* @returns {Function} Express middleware function
*/
const authenticateApiToken = (integrationType) => {
return async (req, res, next) => {
try {
const authHeader = req.headers.authorization;
if (!authHeader || !authHeader.startsWith("Basic ")) {
return res
.status(401)
.json({ error: "Missing or invalid authorization header" });
}
// Decode base64 credentials
const base64Credentials = authHeader.split(" ")[1];
const credentials = Buffer.from(base64Credentials, "base64").toString(
"ascii",
);
const [apiKey, apiSecret] = credentials.split(":");
if (!apiKey || !apiSecret) {
return res.status(401).json({ error: "Invalid credentials format" });
}
// Find the token in database
const token = await prisma.auto_enrollment_tokens.findUnique({
where: { token_key: apiKey },
include: {
users: {
select: {
id: true,
username: true,
role: true,
},
},
},
});
if (!token) {
console.log(`API key not found: ${apiKey}`);
return res.status(401).json({ error: "Invalid API key" });
}
// Check if token is active
if (!token.is_active) {
return res.status(401).json({ error: "API key is disabled" });
}
// Check if token has expired
if (token.expires_at && new Date(token.expires_at) < new Date()) {
return res.status(401).json({ error: "API key has expired" });
}
// Check if token is for the expected integration type
if (token.metadata?.integration_type !== integrationType) {
return res.status(401).json({ error: "Invalid API key type" });
}
// Verify the secret
const isValidSecret = await bcrypt.compare(apiSecret, token.token_secret);
if (!isValidSecret) {
return res.status(401).json({ error: "Invalid API secret" });
}
// Check IP restrictions if any
if (token.allowed_ip_ranges && token.allowed_ip_ranges.length > 0) {
const clientIp = req.ip || req.connection.remoteAddress;
const forwardedFor = req.headers["x-forwarded-for"];
const realIp = req.headers["x-real-ip"];
// Get the actual client IP (considering proxies)
const actualClientIp = forwardedFor
? forwardedFor.split(",")[0].trim()
: realIp || clientIp;
const isAllowedIp = token.allowed_ip_ranges.some((range) => {
// Simple IP range check (can be enhanced for CIDR support)
return actualClientIp.startsWith(range) || actualClientIp === range;
});
if (!isAllowedIp) {
console.log(
`IP validation failed. Client IP: ${actualClientIp}, Allowed ranges: ${token.allowed_ip_ranges.join(", ")}`,
);
return res.status(403).json({ error: "IP address not allowed" });
}
}
// Update last used timestamp
await prisma.auto_enrollment_tokens.update({
where: { id: token.id },
data: { last_used_at: new Date() },
});
// Attach token info to request
req.apiToken = token;
next();
} catch (error) {
console.error("API key authentication error:", error);
res.status(500).json({ error: "Authentication failed" });
}
};
};
module.exports = { authenticateApiToken };

View File

@@ -0,0 +1,76 @@
/**
* Middleware factory to validate API token scopes
* Only applies to tokens with metadata.integration_type === "api"
* @param {string} resource - The resource being accessed (e.g., "host")
* @param {string} action - The action being performed (e.g., "get", "put", "patch", "update", "delete")
* @returns {Function} Express middleware function
*/
const requireApiScope = (resource, action) => {
return async (req, res, next) => {
try {
const token = req.apiToken;
// If no token attached, this should have been caught by auth middleware
if (!token) {
return res.status(401).json({ error: "Unauthorized" });
}
// Only validate scopes for API type tokens
if (token.metadata?.integration_type !== "api") {
// For non-API tokens, skip scope validation
return next();
}
// Check if token has scopes field
if (!token.scopes || typeof token.scopes !== "object") {
console.warn(
`API token ${token.token_key} missing scopes field for ${resource}:${action}`,
);
return res.status(403).json({
error: "Access denied",
message: "This API key does not have the required permissions",
});
}
// Check if resource exists in scopes
if (!token.scopes[resource]) {
console.warn(
`API token ${token.token_key} missing resource ${resource} for ${action}`,
);
return res.status(403).json({
error: "Access denied",
message: `This API key does not have access to ${resource}`,
});
}
// Check if action exists in resource scopes
if (!Array.isArray(token.scopes[resource])) {
console.warn(
`API token ${token.token_key} has invalid scopes structure for ${resource}`,
);
return res.status(403).json({
error: "Access denied",
message: "Invalid API key permissions configuration",
});
}
if (!token.scopes[resource].includes(action)) {
console.warn(
`API token ${token.token_key} missing action ${action} for resource ${resource}`,
);
return res.status(403).json({
error: "Access denied",
message: `This API key does not have permission to ${action} ${resource}`,
});
}
// Scope validation passed
next();
} catch (error) {
console.error("Scope validation error:", error);
res.status(500).json({ error: "Scope validation failed" });
}
};
};
module.exports = { requireApiScope };

View File

@@ -0,0 +1,143 @@
const express = require("express");
const { getPrismaClient } = require("../config/prisma");
const { authenticateApiToken } = require("../middleware/apiAuth");
const { requireApiScope } = require("../middleware/apiScope");
const router = express.Router();
const prisma = getPrismaClient();
// Helper function to check if a string is a valid UUID
const isUUID = (str) => {
const uuidRegex =
/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
return uuidRegex.test(str);
};
// GET /api/v1/api/hosts - List hosts with IP and groups
router.get(
"/hosts",
authenticateApiToken("api"),
requireApiScope("host", "get"),
async (req, res) => {
try {
const { hostgroup } = req.query;
let whereClause = {};
let filterValues = [];
// Parse hostgroup filter (comma-separated names or UUIDs)
if (hostgroup) {
filterValues = hostgroup.split(",").map((g) => g.trim());
// Separate UUIDs from names
const uuidFilters = [];
const nameFilters = [];
for (const value of filterValues) {
if (isUUID(value)) {
uuidFilters.push(value);
} else {
nameFilters.push(value);
}
}
// Find host group IDs from names
const groupIds = [...uuidFilters];
if (nameFilters.length > 0) {
const groups = await prisma.host_groups.findMany({
where: {
name: {
in: nameFilters,
},
},
select: {
id: true,
name: true,
},
});
// Add found group IDs
groupIds.push(...groups.map((g) => g.id));
// Check if any name filters didn't match
const foundNames = groups.map((g) => g.name);
const notFoundNames = nameFilters.filter(
(name) => !foundNames.includes(name),
);
if (notFoundNames.length > 0) {
console.warn(`Host groups not found: ${notFoundNames.join(", ")}`);
}
}
// Filter hosts by group memberships
if (groupIds.length > 0) {
whereClause = {
host_group_memberships: {
some: {
host_group_id: {
in: groupIds,
},
},
},
};
} else {
// No valid groups found, return empty result
return res.json({
hosts: [],
total: 0,
filtered_by_groups: filterValues,
});
}
}
// Query hosts with groups
const hosts = await prisma.hosts.findMany({
where: whereClause,
select: {
id: true,
friendly_name: true,
hostname: true,
ip: true,
host_group_memberships: {
include: {
host_groups: {
select: {
id: true,
name: true,
},
},
},
},
},
orderBy: {
friendly_name: "asc",
},
});
// Format response
const formattedHosts = hosts.map((host) => ({
id: host.id,
friendly_name: host.friendly_name,
hostname: host.hostname,
ip: host.ip,
host_groups: host.host_group_memberships.map((membership) => ({
id: membership.host_groups.id,
name: membership.host_groups.name,
})),
}));
res.json({
hosts: formattedHosts,
total: formattedHosts.length,
filtered_by_groups: filterValues.length > 0 ? filterValues : undefined,
});
} catch (error) {
console.error("Error fetching hosts:", error);
res.status(500).json({ error: "Failed to fetch hosts" });
}
},
);
module.exports = router;

View File

@@ -125,6 +125,10 @@ router.post(
.optional({ nullable: true, checkFalsy: true }) .optional({ nullable: true, checkFalsy: true })
.isISO8601() .isISO8601()
.withMessage("Invalid date format"), .withMessage("Invalid date format"),
body("scopes")
.optional()
.isObject()
.withMessage("Scopes must be an object"),
], ],
async (req, res) => { async (req, res) => {
try { try {
@@ -140,6 +144,7 @@ router.post(
default_host_group_id, default_host_group_id,
expires_at, expires_at,
metadata = {}, metadata = {},
scopes,
} = req.body; } = req.body;
// Validate host group if provided // Validate host group if provided
@@ -153,6 +158,32 @@ router.post(
} }
} }
// Validate scopes for API tokens
if (metadata.integration_type === "api" && scopes) {
// Validate scopes structure
if (typeof scopes !== "object" || scopes === null) {
return res.status(400).json({ error: "Scopes must be an object" });
}
// Validate each resource in scopes
for (const [resource, actions] of Object.entries(scopes)) {
if (!Array.isArray(actions)) {
return res.status(400).json({
error: `Scopes for resource "${resource}" must be an array of actions`,
});
}
// Validate action names
for (const action of actions) {
if (typeof action !== "string") {
return res.status(400).json({
error: `All actions in scopes must be strings`,
});
}
}
}
}
const { token_key, token_secret } = generate_auto_enrollment_token(); const { token_key, token_secret } = generate_auto_enrollment_token();
const hashed_secret = await bcrypt.hash(token_secret, 10); const hashed_secret = await bcrypt.hash(token_secret, 10);
@@ -168,6 +199,7 @@ router.post(
default_host_group_id: default_host_group_id || null, default_host_group_id: default_host_group_id || null,
expires_at: expires_at ? new Date(expires_at) : null, expires_at: expires_at ? new Date(expires_at) : null,
metadata: { integration_type: "proxmox-lxc", ...metadata }, metadata: { integration_type: "proxmox-lxc", ...metadata },
scopes: metadata.integration_type === "api" ? scopes || null : null,
updated_at: new Date(), updated_at: new Date(),
}, },
include: { include: {
@@ -201,6 +233,7 @@ router.post(
default_host_group: token.host_groups, default_host_group: token.host_groups,
created_by: token.users, created_by: token.users,
expires_at: token.expires_at, expires_at: token.expires_at,
scopes: token.scopes,
}, },
warning: "⚠️ Save the token_secret now - it cannot be retrieved later!", warning: "⚠️ Save the token_secret now - it cannot be retrieved later!",
}); });
@@ -232,6 +265,7 @@ router.get(
created_at: true, created_at: true,
default_host_group_id: true, default_host_group_id: true,
metadata: true, metadata: true,
scopes: true,
host_groups: { host_groups: {
select: { select: {
id: true, id: true,
@@ -314,6 +348,10 @@ router.patch(
body("max_hosts_per_day").optional().isInt({ min: 1, max: 1000 }), body("max_hosts_per_day").optional().isInt({ min: 1, max: 1000 }),
body("allowed_ip_ranges").optional().isArray(), body("allowed_ip_ranges").optional().isArray(),
body("expires_at").optional().isISO8601(), body("expires_at").optional().isISO8601(),
body("scopes")
.optional()
.isObject()
.withMessage("Scopes must be an object"),
], ],
async (req, res) => { async (req, res) => {
try { try {
@@ -323,6 +361,16 @@ router.patch(
} }
const { tokenId } = req.params; const { tokenId } = req.params;
// First, get the existing token to check its integration type
const existing_token = await prisma.auto_enrollment_tokens.findUnique({
where: { id: tokenId },
});
if (!existing_token) {
return res.status(404).json({ error: "Token not found" });
}
const update_data = { updated_at: new Date() }; const update_data = { updated_at: new Date() };
if (req.body.is_active !== undefined) if (req.body.is_active !== undefined)
@@ -334,6 +382,41 @@ router.patch(
if (req.body.expires_at !== undefined) if (req.body.expires_at !== undefined)
update_data.expires_at = new Date(req.body.expires_at); update_data.expires_at = new Date(req.body.expires_at);
// Handle scopes updates for API tokens only
if (req.body.scopes !== undefined) {
if (existing_token.metadata?.integration_type === "api") {
// Validate scopes structure
const scopes = req.body.scopes;
if (typeof scopes !== "object" || scopes === null) {
return res.status(400).json({ error: "Scopes must be an object" });
}
// Validate each resource in scopes
for (const [resource, actions] of Object.entries(scopes)) {
if (!Array.isArray(actions)) {
return res.status(400).json({
error: `Scopes for resource "${resource}" must be an array of actions`,
});
}
// Validate action names
for (const action of actions) {
if (typeof action !== "string") {
return res.status(400).json({
error: `All actions in scopes must be strings`,
});
}
}
}
update_data.scopes = scopes;
} else {
return res.status(400).json({
error: "Scopes can only be updated for API integration tokens",
});
}
}
const token = await prisma.auto_enrollment_tokens.update({ const token = await prisma.auto_enrollment_tokens.update({
where: { id: tokenId }, where: { id: tokenId },
data: update_data, data: update_data,
@@ -398,19 +481,22 @@ router.delete(
); );
// ========== AUTO-ENROLLMENT ENDPOINTS (Used by Scripts) ========== // ========== AUTO-ENROLLMENT ENDPOINTS (Used by Scripts) ==========
// Future integrations can follow this pattern: // Universal script-serving endpoint with type parameter
// - /proxmox-lxc - Proxmox LXC containers // Supported types:
// - /vmware-esxi - VMware ESXi VMs // - proxmox-lxc - Proxmox LXC containers
// - /docker - Docker containers // - direct-host - Direct host enrollment
// - /kubernetes - Kubernetes pods // Future types:
// - /aws-ec2 - AWS EC2 instances // - vmware-esxi - VMware ESXi VMs
// - docker - Docker containers
// - kubernetes - Kubernetes pods
// Serve the Proxmox LXC enrollment script with credentials injected // Serve auto-enrollment scripts with credentials injected
router.get("/proxmox-lxc", async (req, res) => { router.get("/script", async (req, res) => {
try { try {
// Get token from query params // Get parameters from query params
const token_key = req.query.token_key; const token_key = req.query.token_key;
const token_secret = req.query.token_secret; const token_secret = req.query.token_secret;
const script_type = req.query.type;
if (!token_key || !token_secret) { if (!token_key || !token_secret) {
return res return res
@@ -418,6 +504,25 @@ router.get("/proxmox-lxc", async (req, res) => {
.json({ error: "Token key and secret required as query parameters" }); .json({ error: "Token key and secret required as query parameters" });
} }
if (!script_type) {
return res.status(400).json({
error:
"Script type required as query parameter (e.g., ?type=proxmox-lxc or ?type=direct-host)",
});
}
// Map script types to script file paths
const scriptMap = {
"proxmox-lxc": "proxmox_auto_enroll.sh",
"direct-host": "direct_host_auto_enroll.sh",
};
if (!scriptMap[script_type]) {
return res.status(400).json({
error: `Invalid script type: ${script_type}. Supported types: ${Object.keys(scriptMap).join(", ")}`,
});
}
// Validate token // Validate token
const token = await prisma.auto_enrollment_tokens.findUnique({ const token = await prisma.auto_enrollment_tokens.findUnique({
where: { token_key: token_key }, where: { token_key: token_key },
@@ -443,13 +548,13 @@ router.get("/proxmox-lxc", async (req, res) => {
const script_path = path.join( const script_path = path.join(
__dirname, __dirname,
"../../../agents/proxmox_auto_enroll.sh", `../../../agents/${scriptMap[script_type]}`,
); );
if (!fs.existsSync(script_path)) { if (!fs.existsSync(script_path)) {
return res return res.status(404).json({
.status(404) error: `Enrollment script not found: ${scriptMap[script_type]}`,
.json({ error: "Proxmox enrollment script not found" }); });
} }
let script = fs.readFileSync(script_path, "utf8"); let script = fs.readFileSync(script_path, "utf8");
@@ -484,7 +589,7 @@ router.get("/proxmox-lxc", async (req, res) => {
const force_install = req.query.force === "true" || req.query.force === "1"; const force_install = req.query.force === "true" || req.query.force === "1";
// Inject the token credentials, server URL, curl flags, and force flag into the script // Inject the token credentials, server URL, curl flags, and force flag into the script
const env_vars = `#!/bin/bash const env_vars = `#!/bin/sh
# PatchMon Auto-Enrollment Configuration (Auto-generated) # PatchMon Auto-Enrollment Configuration (Auto-generated)
export PATCHMON_URL="${server_url}" export PATCHMON_URL="${server_url}"
export AUTO_ENROLLMENT_KEY="${token.token_key}" export AUTO_ENROLLMENT_KEY="${token.token_key}"
@@ -508,11 +613,11 @@ export FORCE_INSTALL="${force_install ? "true" : "false"}"
res.setHeader("Content-Type", "text/plain"); res.setHeader("Content-Type", "text/plain");
res.setHeader( res.setHeader(
"Content-Disposition", "Content-Disposition",
'inline; filename="proxmox_auto_enroll.sh"', `inline; filename="${scriptMap[script_type]}"`,
); );
res.send(script); res.send(script);
} catch (error) { } catch (error) {
console.error("Proxmox script serve error:", error); console.error("Script serve error:", error);
res.status(500).json({ error: "Failed to serve enrollment script" }); res.status(500).json({ error: "Failed to serve enrollment script" });
} }
}); });
@@ -526,8 +631,11 @@ router.post(
.isLength({ min: 1, max: 255 }) .isLength({ min: 1, max: 255 })
.withMessage("Friendly name is required"), .withMessage("Friendly name is required"),
body("machine_id") body("machine_id")
.optional()
.isLength({ min: 1, max: 255 }) .isLength({ min: 1, max: 255 })
.withMessage("Machine ID is required"), .withMessage(
"Machine ID must be between 1 and 255 characters if provided",
),
body("metadata").optional().isObject(), body("metadata").optional().isObject(),
], ],
async (req, res) => { async (req, res) => {
@@ -543,24 +651,7 @@ router.post(
const api_id = `patchmon_${crypto.randomBytes(8).toString("hex")}`; const api_id = `patchmon_${crypto.randomBytes(8).toString("hex")}`;
const api_key = crypto.randomBytes(32).toString("hex"); const api_key = crypto.randomBytes(32).toString("hex");
// Check if host already exists by machine_id (not hostname) // Create host (no duplicate check - using config.yml checking instead)
const existing_host = await prisma.hosts.findUnique({
where: { machine_id },
});
if (existing_host) {
return res.status(409).json({
error: "Host already exists",
host_id: existing_host.id,
api_id: existing_host.api_id,
machine_id: existing_host.machine_id,
friendly_name: existing_host.friendly_name,
message:
"This machine is already enrolled in PatchMon (matched by machine ID)",
});
}
// Create host
const host = await prisma.hosts.create({ const host = await prisma.hosts.create({
data: { data: {
id: uuidv4(), id: uuidv4(),
@@ -677,30 +768,7 @@ router.post(
try { try {
const { friendly_name, machine_id } = host_data; const { friendly_name, machine_id } = host_data;
if (!machine_id) { // Generate credentials (no duplicate check - using config.yml checking instead)
results.failed.push({
friendly_name,
error: "Machine ID is required",
});
continue;
}
// Check if host already exists by machine_id
const existing_host = await prisma.hosts.findUnique({
where: { machine_id },
});
if (existing_host) {
results.skipped.push({
friendly_name,
machine_id,
reason: "Machine already enrolled",
api_id: existing_host.api_id,
});
continue;
}
// Generate credentials
const api_id = `patchmon_${crypto.randomBytes(8).toString("hex")}`; const api_id = `patchmon_${crypto.randomBytes(8).toString("hex")}`;
const api_key = crypto.randomBytes(32).toString("hex"); const api_key = crypto.randomBytes(32).toString("hex");

View File

@@ -242,6 +242,30 @@ router.post(
}, },
); );
// Trigger manual system statistics collection
router.post(
"/trigger/system-statistics",
authenticateToken,
async (_req, res) => {
try {
const job = await queueManager.triggerSystemStatistics();
res.json({
success: true,
data: {
jobId: job.id,
message: "System statistics collection triggered successfully",
},
});
} catch (error) {
console.error("Error triggering system statistics collection:", error);
res.status(500).json({
success: false,
error: "Failed to trigger system statistics collection",
});
}
},
);
// Get queue health status // Get queue health status
router.get("/health", authenticateToken, async (_req, res) => { router.get("/health", authenticateToken, async (_req, res) => {
try { try {
@@ -300,6 +324,7 @@ router.get("/overview", authenticateToken, async (_req, res) => {
queueManager.getRecentJobs(QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP, 1), queueManager.getRecentJobs(QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP, 1),
queueManager.getRecentJobs(QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP, 1), queueManager.getRecentJobs(QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP, 1),
queueManager.getRecentJobs(QUEUE_NAMES.AGENT_COMMANDS, 1), queueManager.getRecentJobs(QUEUE_NAMES.AGENT_COMMANDS, 1),
queueManager.getRecentJobs(QUEUE_NAMES.SYSTEM_STATISTICS, 1),
]); ]);
// Calculate overview metrics // Calculate overview metrics
@@ -309,21 +334,24 @@ router.get("/overview", authenticateToken, async (_req, res) => {
stats[QUEUE_NAMES.SESSION_CLEANUP].delayed + stats[QUEUE_NAMES.SESSION_CLEANUP].delayed +
stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].delayed + stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].delayed +
stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].delayed + stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].delayed +
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].delayed, stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].delayed +
stats[QUEUE_NAMES.SYSTEM_STATISTICS].delayed,
runningTasks: runningTasks:
stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].active + stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].active +
stats[QUEUE_NAMES.SESSION_CLEANUP].active + stats[QUEUE_NAMES.SESSION_CLEANUP].active +
stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].active + stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].active +
stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].active + stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].active +
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].active, stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].active +
stats[QUEUE_NAMES.SYSTEM_STATISTICS].active,
failedTasks: failedTasks:
stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].failed + stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].failed +
stats[QUEUE_NAMES.SESSION_CLEANUP].failed + stats[QUEUE_NAMES.SESSION_CLEANUP].failed +
stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].failed + stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].failed +
stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].failed + stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].failed +
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].failed, stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].failed +
stats[QUEUE_NAMES.SYSTEM_STATISTICS].failed,
totalAutomations: Object.values(stats).reduce((sum, queueStats) => { totalAutomations: Object.values(stats).reduce((sum, queueStats) => {
return ( return (
@@ -435,6 +463,22 @@ router.get("/overview", authenticateToken, async (_req, res) => {
: "Never run", : "Never run",
stats: stats[QUEUE_NAMES.AGENT_COMMANDS], stats: stats[QUEUE_NAMES.AGENT_COMMANDS],
}, },
{
name: "System Statistics Collection",
queue: QUEUE_NAMES.SYSTEM_STATISTICS,
description: "Collects aggregated system-wide package statistics",
schedule: "Every 30 minutes",
lastRun: recentJobs[6][0]?.finishedOn
? new Date(recentJobs[6][0].finishedOn).toLocaleString()
: "Never",
lastRunTimestamp: recentJobs[6][0]?.finishedOn || 0,
status: recentJobs[6][0]?.failedReason
? "Failed"
: recentJobs[6][0]
? "Success"
: "Never run",
stats: stats[QUEUE_NAMES.SYSTEM_STATISTICS],
},
].sort((a, b) => { ].sort((a, b) => {
// Sort by last run timestamp (most recent first) // Sort by last run timestamp (most recent first)
// If both have never run (timestamp 0), maintain original order // If both have never run (timestamp 0), maintain original order

View File

@@ -564,174 +564,216 @@ router.get(
const startDate = new Date(); const startDate = new Date();
startDate.setDate(endDate.getDate() - daysInt); startDate.setDate(endDate.getDate() - daysInt);
// Build where clause
const whereClause = {
timestamp: {
gte: startDate,
lte: endDate,
},
};
// Add host filter if specified
if (hostId && hostId !== "all" && hostId !== "undefined") {
whereClause.host_id = hostId;
}
// Get all update history records in the date range
const trendsData = await prisma.update_history.findMany({
where: whereClause,
select: {
timestamp: true,
packages_count: true,
security_count: true,
total_packages: true,
host_id: true,
status: true,
},
orderBy: {
timestamp: "asc",
},
});
// Enhanced data validation and processing
const processedData = trendsData
.filter((record) => {
// Enhanced validation
return (
record.total_packages !== null &&
record.total_packages >= 0 &&
record.packages_count >= 0 &&
record.security_count >= 0 &&
record.security_count <= record.packages_count && // Security can't exceed outdated
record.status === "success"
); // Only include successful reports
})
.map((record) => {
const date = new Date(record.timestamp);
let timeKey;
if (daysInt <= 1) {
// For hourly view, group by hour only (not minutes)
timeKey = date.toISOString().substring(0, 13); // YYYY-MM-DDTHH
} else {
// For daily view, group by day
timeKey = date.toISOString().split("T")[0]; // YYYY-MM-DD
}
return {
timeKey,
total_packages: record.total_packages,
packages_count: record.packages_count || 0,
security_count: record.security_count || 0,
host_id: record.host_id,
timestamp: record.timestamp,
};
});
// Determine if we need aggregation based on host filter // Determine if we need aggregation based on host filter
const needsAggregation = const needsAggregation =
!hostId || hostId === "all" || hostId === "undefined"; !hostId || hostId === "all" || hostId === "undefined";
let trendsData;
if (needsAggregation) {
// For "All Hosts" mode, use system_statistics table
trendsData = await prisma.system_statistics.findMany({
where: {
timestamp: {
gte: startDate,
lte: endDate,
},
},
select: {
timestamp: true,
unique_packages_count: true,
unique_security_count: true,
total_packages: true,
total_hosts: true,
hosts_needing_updates: true,
},
orderBy: {
timestamp: "asc",
},
});
} else {
// For individual host, use update_history table
trendsData = await prisma.update_history.findMany({
where: {
host_id: hostId,
timestamp: {
gte: startDate,
lte: endDate,
},
},
select: {
timestamp: true,
packages_count: true,
security_count: true,
total_packages: true,
host_id: true,
status: true,
},
orderBy: {
timestamp: "asc",
},
});
}
// Process data based on source
let processedData;
let aggregatedArray; let aggregatedArray;
if (needsAggregation) { if (needsAggregation) {
// For "All Hosts" mode, we need to calculate the actual total packages differently // For "All Hosts" mode, data comes from system_statistics table
// Instead of aggregating historical data (which is per-host), we'll use the current total // Already aggregated, just need to format it
// and show that as a flat line, since total packages don't change much over time processedData = trendsData
.filter((record) => {
// Enhanced validation
return (
record.total_packages !== null &&
record.total_packages >= 0 &&
record.unique_packages_count >= 0 &&
record.unique_security_count >= 0 &&
record.unique_security_count <= record.unique_packages_count
);
})
.map((record) => {
const date = new Date(record.timestamp);
let timeKey;
// Get the current total packages count (unique packages across all hosts) if (daysInt <= 1) {
const currentTotalPackages = await prisma.packages.count({ // For "Last 24 hours", use full timestamp for each data point
where: { // This allows plotting all individual data points
host_packages: { timeKey = date.toISOString(); // Full ISO timestamp
some: {}, // At least one host has this package } else {
}, // For daily view, group by day
}, timeKey = date.toISOString().split("T")[0]; // YYYY-MM-DD
}); }
// Aggregate data by timeKey when looking at "All Hosts" or no specific host return {
const aggregatedData = processedData.reduce((acc, item) => { timeKey,
if (!acc[item.timeKey]) { total_packages: record.total_packages,
acc[item.timeKey] = { packages_count: record.unique_packages_count,
timeKey: item.timeKey, security_count: record.unique_security_count,
total_packages: currentTotalPackages, // Use current total packages timestamp: record.timestamp,
packages_count: 0,
security_count: 0,
record_count: 0,
host_ids: new Set(),
min_timestamp: item.timestamp,
max_timestamp: item.timestamp,
}; };
} });
// For outdated and security packages: SUM (these represent counts across hosts) if (daysInt <= 1) {
acc[item.timeKey].packages_count += item.packages_count; // For "Last 24 hours", use all individual data points without grouping
acc[item.timeKey].security_count += item.security_count; // Sort by timestamp
aggregatedArray = processedData.sort(
(a, b) => a.timestamp.getTime() - b.timestamp.getTime(),
);
} else {
// For longer periods, group by timeKey and take the latest value for each period
const aggregatedData = processedData.reduce((acc, item) => {
if (
!acc[item.timeKey] ||
item.timestamp > acc[item.timeKey].timestamp
) {
acc[item.timeKey] = item;
}
return acc;
}, {});
acc[item.timeKey].record_count += 1; // Convert to array and sort
acc[item.timeKey].host_ids.add(item.host_id); aggregatedArray = Object.values(aggregatedData).sort((a, b) =>
a.timeKey.localeCompare(b.timeKey),
// Track timestamp range );
if (item.timestamp < acc[item.timeKey].min_timestamp) { }
acc[item.timeKey].min_timestamp = item.timestamp;
}
if (item.timestamp > acc[item.timeKey].max_timestamp) {
acc[item.timeKey].max_timestamp = item.timestamp;
}
return acc;
}, {});
// Convert to array and add metadata
aggregatedArray = Object.values(aggregatedData)
.map((item) => ({
...item,
host_count: item.host_ids.size,
host_ids: Array.from(item.host_ids),
}))
.sort((a, b) => a.timeKey.localeCompare(b.timeKey));
} else { } else {
// For specific host, show individual data points without aggregation // For individual host, data comes from update_history table
// But still group by timeKey to handle multiple reports from same host in same time period processedData = trendsData
const hostAggregatedData = processedData.reduce((acc, item) => { .filter((record) => {
if (!acc[item.timeKey]) { // Enhanced validation
acc[item.timeKey] = { return (
timeKey: item.timeKey, record.total_packages !== null &&
total_packages: 0, record.total_packages >= 0 &&
packages_count: 0, record.packages_count >= 0 &&
security_count: 0, record.security_count >= 0 &&
record_count: 0, record.security_count <= record.packages_count &&
host_ids: new Set([item.host_id]), record.status === "success"
min_timestamp: item.timestamp, );
max_timestamp: item.timestamp, })
.map((record) => {
const date = new Date(record.timestamp);
let timeKey;
if (daysInt <= 1) {
// For "Last 24 hours", use full timestamp for each data point
// This allows plotting all individual data points
timeKey = date.toISOString(); // Full ISO timestamp
} else {
// For daily view, group by day
timeKey = date.toISOString().split("T")[0]; // YYYY-MM-DD
}
return {
timeKey,
total_packages: record.total_packages,
packages_count: record.packages_count || 0,
security_count: record.security_count || 0,
host_id: record.host_id,
timestamp: record.timestamp,
}; };
} });
// For same host, take the latest values (not sum) if (daysInt <= 1) {
// This handles cases where a host reports multiple times in the same time period // For "Last 24 hours", use all individual data points without grouping
if (item.timestamp > acc[item.timeKey].max_timestamp) { // Sort by timestamp
acc[item.timeKey].total_packages = item.total_packages; aggregatedArray = processedData.sort(
acc[item.timeKey].packages_count = item.packages_count; (a, b) => a.timestamp.getTime() - b.timestamp.getTime(),
acc[item.timeKey].security_count = item.security_count; );
acc[item.timeKey].max_timestamp = item.timestamp; } else {
} // For longer periods, group by timeKey to handle multiple reports from same host in same time period
const hostAggregatedData = processedData.reduce((acc, item) => {
if (!acc[item.timeKey]) {
acc[item.timeKey] = {
timeKey: item.timeKey,
total_packages: 0,
packages_count: 0,
security_count: 0,
record_count: 0,
host_ids: new Set([item.host_id]),
min_timestamp: item.timestamp,
max_timestamp: item.timestamp,
};
}
acc[item.timeKey].record_count += 1; // For same host, take the latest values (not sum)
// This handles cases where a host reports multiple times in the same time period
if (item.timestamp > acc[item.timeKey].max_timestamp) {
acc[item.timeKey].total_packages = item.total_packages;
acc[item.timeKey].packages_count = item.packages_count;
acc[item.timeKey].security_count = item.security_count;
acc[item.timeKey].max_timestamp = item.timestamp;
}
return acc; acc[item.timeKey].record_count += 1;
}, {});
// Convert to array return acc;
aggregatedArray = Object.values(hostAggregatedData) }, {});
.map((item) => ({
...item, // Convert to array
host_count: item.host_ids.size, aggregatedArray = Object.values(hostAggregatedData)
host_ids: Array.from(item.host_ids), .map((item) => ({
})) ...item,
.sort((a, b) => a.timeKey.localeCompare(b.timeKey)); host_count: item.host_ids.size,
host_ids: Array.from(item.host_ids),
}))
.sort((a, b) => a.timeKey.localeCompare(b.timeKey));
}
} }
// Handle sparse data by filling missing time periods // Handle sparse data by filling missing time periods
const fillMissingPeriods = (data, daysInt) => { const fillMissingPeriods = (data, daysInt) => {
if (data.length === 0) {
return [];
}
// For "Last 24 hours", return data as-is without filling gaps
// This allows plotting all individual data points
if (daysInt <= 1) {
return data;
}
const filledData = []; const filledData = [];
const startDate = new Date(); const startDate = new Date();
startDate.setDate(startDate.getDate() - daysInt); startDate.setDate(startDate.getDate() - daysInt);
@@ -741,50 +783,58 @@ router.get(
const endDate = new Date(); const endDate = new Date();
const currentDate = new Date(startDate); const currentDate = new Date(startDate);
// Find the last known values for interpolation // Sort data by timeKey to get chronological order
const sortedData = [...data].sort((a, b) =>
a.timeKey.localeCompare(b.timeKey),
);
// Find the first actual data point (don't fill before this)
const firstDataPoint = sortedData[0];
const firstDataTimeKey = firstDataPoint?.timeKey;
// Track last known values as we iterate forward
let lastKnownValues = null; let lastKnownValues = null;
if (data.length > 0) { let hasSeenFirstDataPoint = false;
lastKnownValues = {
total_packages: data[0].total_packages,
packages_count: data[0].packages_count,
security_count: data[0].security_count,
};
}
while (currentDate <= endDate) { while (currentDate <= endDate) {
let timeKey; let timeKey;
if (daysInt <= 1) { // For daily view, group by day
timeKey = currentDate.toISOString().substring(0, 13); // Hourly timeKey = currentDate.toISOString().split("T")[0]; // YYYY-MM-DD
currentDate.setHours(currentDate.getHours() + 1); currentDate.setDate(currentDate.getDate() + 1);
} else {
timeKey = currentDate.toISOString().split("T")[0]; // Daily // Skip periods before the first actual data point
currentDate.setDate(currentDate.getDate() + 1); if (firstDataTimeKey && timeKey < firstDataTimeKey) {
continue;
} }
if (dataMap.has(timeKey)) { if (dataMap.has(timeKey)) {
const item = dataMap.get(timeKey); const item = dataMap.get(timeKey);
filledData.push(item); filledData.push(item);
// Update last known values // Update last known values with actual data
lastKnownValues = { lastKnownValues = {
total_packages: item.total_packages, total_packages: item.total_packages || 0,
packages_count: item.packages_count, packages_count: item.packages_count || 0,
security_count: item.security_count, security_count: item.security_count || 0,
}; };
hasSeenFirstDataPoint = true;
} else { } else {
// For missing periods, use the last known values (interpolation) // For missing periods AFTER the first data point, use forward-fill
// This creates a continuous line instead of gaps // Only fill if we have a last known value and we've seen the first data point
filledData.push({ if (lastKnownValues !== null && hasSeenFirstDataPoint) {
timeKey, filledData.push({
total_packages: lastKnownValues?.total_packages || 0, timeKey,
packages_count: lastKnownValues?.packages_count || 0, total_packages: lastKnownValues.total_packages,
security_count: lastKnownValues?.security_count || 0, packages_count: lastKnownValues.packages_count,
record_count: 0, security_count: lastKnownValues.security_count,
host_count: 0, record_count: 0,
host_ids: [], host_count: 0,
min_timestamp: null, host_ids: [],
max_timestamp: null, min_timestamp: null,
isInterpolated: true, // Mark as interpolated for debugging max_timestamp: null,
}); isInterpolated: true, // Mark as interpolated for debugging
});
}
// If we haven't seen the first data point yet, skip this period
} }
} }
@@ -810,7 +860,7 @@ router.get(
// Get current package state for offline fallback // Get current package state for offline fallback
let currentPackageState = null; let currentPackageState = null;
if (hostId && hostId !== "all" && hostId !== "undefined") { if (hostId && hostId !== "all" && hostId !== "undefined") {
// Get current package counts for specific host // For individual host, get current package counts from host_packages
const currentState = await prisma.host_packages.aggregate({ const currentState = await prisma.host_packages.aggregate({
where: { where: {
host_id: hostId, host_id: hostId,
@@ -841,34 +891,64 @@ router.get(
security_count: securityCount, security_count: securityCount,
}; };
} else { } else {
// Get current package counts for all hosts // For "All Hosts" mode, use the latest system_statistics record if available
// Total packages = count of unique packages installed on at least one host // Otherwise calculate from database
const totalPackagesCount = await prisma.packages.count({ const latestStats = await prisma.system_statistics.findFirst({
where: { orderBy: {
host_packages: { timestamp: "desc",
some: {}, // At least one host has this package },
select: {
total_packages: true,
unique_packages_count: true,
unique_security_count: true,
timestamp: true,
},
});
if (latestStats) {
// Use latest system statistics (collected by scheduled job)
currentPackageState = {
total_packages: latestStats.total_packages,
packages_count: latestStats.unique_packages_count,
security_count: latestStats.unique_security_count,
};
} else {
// Fallback: calculate from database if no statistics collected yet
const totalPackagesCount = await prisma.packages.count({
where: {
host_packages: {
some: {}, // At least one host has this package
},
}, },
}, });
});
// Get counts for boolean fields separately const uniqueOutdatedCount = await prisma.packages.count({
const outdatedCount = await prisma.host_packages.count({ where: {
where: { host_packages: {
needs_update: true, some: {
}, needs_update: true,
}); },
},
},
});
const securityCount = await prisma.host_packages.count({ const uniqueSecurityCount = await prisma.packages.count({
where: { where: {
is_security_update: true, host_packages: {
}, some: {
}); needs_update: true,
is_security_update: true,
},
},
},
});
currentPackageState = { currentPackageState = {
total_packages: totalPackagesCount, total_packages: totalPackagesCount,
packages_count: outdatedCount, packages_count: uniqueOutdatedCount,
security_count: securityCount, security_count: uniqueSecurityCount,
}; };
}
} }
// Format data for chart // Format data for chart
@@ -923,6 +1003,11 @@ router.get(
chartData.datasets[2].data.push(item.security_count); chartData.datasets[2].data.push(item.security_count);
}); });
// Replace the last label with "Now" to indicate current state
if (chartData.labels.length > 0) {
chartData.labels[chartData.labels.length - 1] = "Now";
}
// Calculate data quality metrics // Calculate data quality metrics
const dataQuality = { const dataQuality = {
totalRecords: trendsData.length, totalRecords: trendsData.length,

View File

@@ -2,6 +2,7 @@ const express = require("express");
const { authenticateToken } = require("../middleware/auth"); const { authenticateToken } = require("../middleware/auth");
const { getPrismaClient } = require("../config/prisma"); const { getPrismaClient } = require("../config/prisma");
const { v4: uuidv4 } = require("uuid"); const { v4: uuidv4 } = require("uuid");
const { get_current_time, parse_date } = require("../utils/timezone");
const prisma = getPrismaClient(); const prisma = getPrismaClient();
const router = express.Router(); const router = express.Router();
@@ -537,14 +538,7 @@ router.post("/collect", async (req, res) => {
return res.status(401).json({ error: "Invalid API credentials" }); return res.status(401).json({ error: "Invalid API credentials" });
} }
const now = new Date(); const now = get_current_time();
// Helper function to validate and parse dates
const parseDate = (dateString) => {
if (!dateString) return now;
const date = new Date(dateString);
return Number.isNaN(date.getTime()) ? now : date;
};
// Process containers // Process containers
if (containers && Array.isArray(containers)) { if (containers && Array.isArray(containers)) {
@@ -572,7 +566,7 @@ router.post("/collect", async (req, res) => {
tag: containerData.image_tag, tag: containerData.image_tag,
image_id: containerData.image_id || "unknown", image_id: containerData.image_id || "unknown",
source: containerData.image_source || "docker-hub", source: containerData.image_source || "docker-hub",
created_at: parseDate(containerData.created_at), created_at: parse_date(containerData.created_at, now),
last_checked: now, last_checked: now,
updated_at: now, updated_at: now,
}, },
@@ -597,7 +591,7 @@ router.post("/collect", async (req, res) => {
state: containerData.state, state: containerData.state,
ports: containerData.ports || null, ports: containerData.ports || null,
started_at: containerData.started_at started_at: containerData.started_at
? parseDate(containerData.started_at) ? parse_date(containerData.started_at, null)
: null, : null,
updated_at: now, updated_at: now,
last_checked: now, last_checked: now,
@@ -613,9 +607,9 @@ router.post("/collect", async (req, res) => {
status: containerData.status, status: containerData.status,
state: containerData.state, state: containerData.state,
ports: containerData.ports || null, ports: containerData.ports || null,
created_at: parseDate(containerData.created_at), created_at: parse_date(containerData.created_at, now),
started_at: containerData.started_at started_at: containerData.started_at
? parseDate(containerData.started_at) ? parse_date(containerData.started_at, null)
: null, : null,
updated_at: now, updated_at: now,
}, },
@@ -651,7 +645,7 @@ router.post("/collect", async (req, res) => {
? BigInt(imageData.size_bytes) ? BigInt(imageData.size_bytes)
: null, : null,
source: imageData.source || "docker-hub", source: imageData.source || "docker-hub",
created_at: parseDate(imageData.created_at), created_at: parse_date(imageData.created_at, now),
updated_at: now, updated_at: now,
}, },
}); });
@@ -780,14 +774,7 @@ router.post("/../integrations/docker", async (req, res) => {
`[Docker Integration] Processing for host: ${host.friendly_name}`, `[Docker Integration] Processing for host: ${host.friendly_name}`,
); );
const now = new Date(); const now = get_current_time();
// Helper function to validate and parse dates
const parseDate = (dateString) => {
if (!dateString) return now;
const date = new Date(dateString);
return Number.isNaN(date.getTime()) ? now : date;
};
let containersProcessed = 0; let containersProcessed = 0;
let imagesProcessed = 0; let imagesProcessed = 0;
@@ -822,7 +809,7 @@ router.post("/../integrations/docker", async (req, res) => {
tag: containerData.image_tag, tag: containerData.image_tag,
image_id: containerData.image_id || "unknown", image_id: containerData.image_id || "unknown",
source: containerData.image_source || "docker-hub", source: containerData.image_source || "docker-hub",
created_at: parseDate(containerData.created_at), created_at: parse_date(containerData.created_at, now),
last_checked: now, last_checked: now,
updated_at: now, updated_at: now,
}, },
@@ -847,7 +834,7 @@ router.post("/../integrations/docker", async (req, res) => {
state: containerData.state || containerData.status, state: containerData.state || containerData.status,
ports: containerData.ports || null, ports: containerData.ports || null,
started_at: containerData.started_at started_at: containerData.started_at
? parseDate(containerData.started_at) ? parse_date(containerData.started_at, null)
: null, : null,
updated_at: now, updated_at: now,
last_checked: now, last_checked: now,
@@ -863,9 +850,9 @@ router.post("/../integrations/docker", async (req, res) => {
status: containerData.status, status: containerData.status,
state: containerData.state || containerData.status, state: containerData.state || containerData.status,
ports: containerData.ports || null, ports: containerData.ports || null,
created_at: parseDate(containerData.created_at), created_at: parse_date(containerData.created_at, now),
started_at: containerData.started_at started_at: containerData.started_at
? parseDate(containerData.started_at) ? parse_date(containerData.started_at, null)
: null, : null,
updated_at: now, updated_at: now,
}, },
@@ -911,7 +898,7 @@ router.post("/../integrations/docker", async (req, res) => {
? BigInt(imageData.size_bytes) ? BigInt(imageData.size_bytes)
: null, : null,
source: imageSource, source: imageSource,
created_at: parseDate(imageData.created_at), created_at: parse_date(imageData.created_at, now),
last_checked: now, last_checked: now,
updated_at: now, updated_at: now,
}, },

View File

@@ -1,113 +1,12 @@
const express = require("express"); const express = require("express");
const { getPrismaClient } = require("../config/prisma"); const { getPrismaClient } = require("../config/prisma");
const bcrypt = require("bcryptjs"); const { authenticateApiToken } = require("../middleware/apiAuth");
const router = express.Router(); const router = express.Router();
const prisma = getPrismaClient(); const prisma = getPrismaClient();
// Middleware to authenticate API key
const authenticateApiKey = async (req, res, next) => {
try {
const authHeader = req.headers.authorization;
if (!authHeader || !authHeader.startsWith("Basic ")) {
return res
.status(401)
.json({ error: "Missing or invalid authorization header" });
}
// Decode base64 credentials
const base64Credentials = authHeader.split(" ")[1];
const credentials = Buffer.from(base64Credentials, "base64").toString(
"ascii",
);
const [apiKey, apiSecret] = credentials.split(":");
if (!apiKey || !apiSecret) {
return res.status(401).json({ error: "Invalid credentials format" });
}
// Find the token in database
const token = await prisma.auto_enrollment_tokens.findUnique({
where: { token_key: apiKey },
include: {
users: {
select: {
id: true,
username: true,
role: true,
},
},
},
});
if (!token) {
console.log(`API key not found: ${apiKey}`);
return res.status(401).json({ error: "Invalid API key" });
}
// Check if token is active
if (!token.is_active) {
return res.status(401).json({ error: "API key is disabled" });
}
// Check if token has expired
if (token.expires_at && new Date(token.expires_at) < new Date()) {
return res.status(401).json({ error: "API key has expired" });
}
// Check if token is for gethomepage integration
if (token.metadata?.integration_type !== "gethomepage") {
return res.status(401).json({ error: "Invalid API key type" });
}
// Verify the secret
const isValidSecret = await bcrypt.compare(apiSecret, token.token_secret);
if (!isValidSecret) {
return res.status(401).json({ error: "Invalid API secret" });
}
// Check IP restrictions if any
if (token.allowed_ip_ranges && token.allowed_ip_ranges.length > 0) {
const clientIp = req.ip || req.connection.remoteAddress;
const forwardedFor = req.headers["x-forwarded-for"];
const realIp = req.headers["x-real-ip"];
// Get the actual client IP (considering proxies)
const actualClientIp = forwardedFor
? forwardedFor.split(",")[0].trim()
: realIp || clientIp;
const isAllowedIp = token.allowed_ip_ranges.some((range) => {
// Simple IP range check (can be enhanced for CIDR support)
return actualClientIp.startsWith(range) || actualClientIp === range;
});
if (!isAllowedIp) {
console.log(
`IP validation failed. Client IP: ${actualClientIp}, Allowed ranges: ${token.allowed_ip_ranges.join(", ")}`,
);
return res.status(403).json({ error: "IP address not allowed" });
}
}
// Update last used timestamp
await prisma.auto_enrollment_tokens.update({
where: { id: token.id },
data: { last_used_at: new Date() },
});
// Attach token info to request
req.apiToken = token;
next();
} catch (error) {
console.error("API key authentication error:", error);
res.status(500).json({ error: "Authentication failed" });
}
};
// Get homepage widget statistics // Get homepage widget statistics
router.get("/stats", authenticateApiKey, async (_req, res) => { router.get("/stats", authenticateApiToken("gethomepage"), async (_req, res) => {
try { try {
// Get total hosts count // Get total hosts count
const totalHosts = await prisma.hosts.count({ const totalHosts = await prisma.hosts.count({
@@ -235,7 +134,7 @@ router.get("/stats", authenticateApiKey, async (_req, res) => {
}); });
// Health check endpoint for the API // Health check endpoint for the API
router.get("/health", authenticateApiKey, async (req, res) => { router.get("/health", authenticateApiToken("gethomepage"), async (req, res) => {
res.json({ res.json({
status: "ok", status: "ok",
timestamp: new Date().toISOString(), timestamp: new Date().toISOString(),

View File

@@ -11,10 +11,16 @@ const {
requireManageSettings, requireManageSettings,
} = require("../middleware/permissions"); } = require("../middleware/permissions");
const { queueManager, QUEUE_NAMES } = require("../services/automation"); const { queueManager, QUEUE_NAMES } = require("../services/automation");
const { pushIntegrationToggle, isConnected } = require("../services/agentWs");
const agentVersionService = require("../services/agentVersionService");
const router = express.Router(); const router = express.Router();
const prisma = getPrismaClient(); const prisma = getPrismaClient();
// In-memory cache for integration states (api_id -> { integration_name -> enabled })
// This stores the last known state from successful toggles
const integrationStateCache = new Map();
// Secure endpoint to download the agent script/binary (requires API authentication) // Secure endpoint to download the agent script/binary (requires API authentication)
router.get("/agent/download", async (req, res) => { router.get("/agent/download", async (req, res) => {
try { try {
@@ -128,9 +134,6 @@ router.get("/agent/version", async (req, res) => {
try { try {
const fs = require("node:fs"); const fs = require("node:fs");
const path = require("node:path"); const path = require("node:path");
const { exec } = require("node:child_process");
const { promisify } = require("node:util");
const execAsync = promisify(exec);
// Get architecture parameter (default to amd64 for Go agents) // Get architecture parameter (default to amd64 for Go agents)
const architecture = req.query.arch || "amd64"; const architecture = req.query.arch || "amd64";
@@ -165,53 +168,108 @@ router.get("/agent/version", async (req, res) => {
minServerVersion: null, minServerVersion: null,
}); });
} else { } else {
// Go agent version check (binary) // Go agent version check
const binaryName = `patchmon-agent-linux-${architecture}`; // Detect server architecture and map to Go architecture names
const binaryPath = path.join(__dirname, "../../../agents", binaryName); const os = require("node:os");
const { exec } = require("node:child_process");
const { promisify } = require("node:util");
const execAsync = promisify(exec);
if (!fs.existsSync(binaryPath)) { const serverArch = os.arch();
return res.status(404).json({ // Map Node.js architecture to Go architecture names
error: `Go agent binary not found for architecture: ${architecture}`, const archMap = {
}); x64: "amd64",
ia32: "386",
arm64: "arm64",
arm: "arm",
};
const serverGoArch = archMap[serverArch] || serverArch;
// If requested architecture matches server architecture, execute the binary
if (architecture === serverGoArch) {
const binaryName = `patchmon-agent-linux-${architecture}`;
const binaryPath = path.join(__dirname, "../../../agents", binaryName);
if (!fs.existsSync(binaryPath)) {
// Binary doesn't exist, fall back to GitHub
console.log(`Binary ${binaryName} not found, falling back to GitHub`);
} else {
// Execute the binary to get its version
try {
const { stdout } = await execAsync(`${binaryPath} --help`, {
timeout: 10000,
});
// Parse version from help output (e.g., "PatchMon Agent v1.3.1")
const versionMatch = stdout.match(
/PatchMon Agent v([0-9]+\.[0-9]+\.[0-9]+)/i,
);
if (versionMatch) {
const serverVersion = versionMatch[1];
const agentVersion = req.query.currentVersion || serverVersion;
// Simple version comparison (assuming semantic versioning)
const hasUpdate = agentVersion !== serverVersion;
return res.json({
currentVersion: agentVersion,
latestVersion: serverVersion,
hasUpdate: hasUpdate,
downloadUrl: `/api/v1/hosts/agent/download?arch=${architecture}`,
releaseNotes: `PatchMon Agent v${serverVersion}`,
minServerVersion: null,
architecture: architecture,
agentType: "go",
});
}
} catch (execError) {
// Execution failed, fall back to GitHub
console.log(
`Failed to execute binary ${binaryName}: ${execError.message}, falling back to GitHub`,
);
}
}
} }
// Execute the binary to get its version // Fall back to GitHub if architecture doesn't match or binary execution failed
try { try {
const { stdout } = await execAsync(`${binaryPath} --help`, { const versionInfo = await agentVersionService.getVersionInfo();
timeout: 10000, const latestVersion = versionInfo.latestVersion;
}); const agentVersion =
req.query.currentVersion || latestVersion || "unknown";
// Parse version from help output (e.g., "PatchMon Agent v1.3.1") if (!latestVersion) {
const versionMatch = stdout.match( return res.status(503).json({
/PatchMon Agent v([0-9]+\.[0-9]+\.[0-9]+)/i, error: "Unable to determine latest version from GitHub releases",
); currentVersion: agentVersion,
latestVersion: null,
if (!versionMatch) { hasUpdate: false,
return res.status(500).json({
error: "Could not extract version from agent binary",
}); });
} }
const serverVersion = versionMatch[1];
const agentVersion = req.query.currentVersion || serverVersion;
// Simple version comparison (assuming semantic versioning) // Simple version comparison (assuming semantic versioning)
const hasUpdate = agentVersion !== serverVersion; const hasUpdate =
agentVersion !== latestVersion && latestVersion !== null;
res.json({ res.json({
currentVersion: agentVersion, currentVersion: agentVersion,
latestVersion: serverVersion, latestVersion: latestVersion,
hasUpdate: hasUpdate, hasUpdate: hasUpdate,
downloadUrl: `/api/v1/hosts/agent/download?arch=${architecture}`, downloadUrl: `/api/v1/hosts/agent/download?arch=${architecture}`,
releaseNotes: `PatchMon Agent v${serverVersion}`, releaseNotes: `PatchMon Agent v${latestVersion}`,
minServerVersion: null, minServerVersion: null,
architecture: architecture, architecture: architecture,
agentType: "go", agentType: "go",
}); });
} catch (execError) { } catch (serviceError) {
console.error("Failed to execute agent binary:", execError.message); console.error(
"Failed to get version from agentVersionService:",
serviceError.message,
);
return res.status(500).json({ return res.status(500).json({
error: "Failed to get version from agent binary", error: "Failed to get agent version from service",
details: serviceError.message,
}); });
} }
} }
@@ -493,8 +551,11 @@ router.post(
updated_at: new Date(), updated_at: new Date(),
}; };
// Update machine_id if provided and current one is a placeholder // Update machine_id if provided and current one is a placeholder or null
if (req.body.machineId && host.machine_id.startsWith("pending-")) { if (
req.body.machineId &&
(host.machine_id === null || host.machine_id.startsWith("pending-"))
) {
updateData.machine_id = req.body.machineId; updateData.machine_id = req.body.machineId;
} }
@@ -1616,19 +1677,22 @@ router.get("/install", async (req, res) => {
// Check for --force parameter // Check for --force parameter
const forceInstall = req.query.force === "true" || req.query.force === "1"; const forceInstall = req.query.force === "true" || req.query.force === "1";
// Get architecture parameter (default to amd64) // Get architecture parameter (only set if explicitly provided, otherwise let script auto-detect)
const architecture = req.query.arch || "amd64"; const architecture = req.query.arch;
// Inject the API credentials, server URL, curl flags, SSL verify flag, force flag, and architecture into the script // Inject the API credentials, server URL, curl flags, SSL verify flag, force flag, and architecture into the script
const envVars = `#!/bin/bash // Only set ARCHITECTURE if explicitly provided, otherwise let the script auto-detect
const archExport = architecture
? `export ARCHITECTURE="${architecture}"\n`
: "";
const envVars = `#!/bin/sh
export PATCHMON_URL="${serverUrl}" export PATCHMON_URL="${serverUrl}"
export API_ID="${host.api_id}" export API_ID="${host.api_id}"
export API_KEY="${host.api_key}" export API_KEY="${host.api_key}"
export CURL_FLAGS="${curlFlags}" export CURL_FLAGS="${curlFlags}"
export SKIP_SSL_VERIFY="${skipSSLVerify}" export SKIP_SSL_VERIFY="${skipSSLVerify}"
export FORCE_INSTALL="${forceInstall ? "true" : "false"}" export FORCE_INSTALL="${forceInstall ? "true" : "false"}"
export ARCHITECTURE="${architecture}" ${archExport}
`; `;
// Remove the shebang from the original script and prepend our env vars // Remove the shebang from the original script and prepend our env vars
@@ -1647,47 +1711,7 @@ export ARCHITECTURE="${architecture}"
} }
}); });
// Check if machine_id already exists (requires auth) // Note: /check-machine-id endpoint removed - using config.yml checking method instead
router.post("/check-machine-id", validateApiCredentials, async (req, res) => {
try {
const { machine_id } = req.body;
if (!machine_id) {
return res.status(400).json({
error: "machine_id is required",
});
}
// Check if a host with this machine_id exists
const existing_host = await prisma.hosts.findUnique({
where: { machine_id },
select: {
id: true,
friendly_name: true,
machine_id: true,
api_id: true,
status: true,
created_at: true,
},
});
if (existing_host) {
return res.status(200).json({
exists: true,
host: existing_host,
message: "This machine is already enrolled",
});
}
return res.status(200).json({
exists: false,
message: "Machine not yet enrolled",
});
} catch (error) {
console.error("Error checking machine_id:", error);
res.status(500).json({ error: "Failed to check machine_id" });
}
});
// Serve the removal script (public endpoint - no authentication required) // Serve the removal script (public endpoint - no authentication required)
router.get("/remove", async (_req, res) => { router.get("/remove", async (_req, res) => {
@@ -1720,7 +1744,7 @@ router.get("/remove", async (_req, res) => {
} catch (_) {} } catch (_) {}
// Prepend environment for CURL_FLAGS so script can use it if needed // Prepend environment for CURL_FLAGS so script can use it if needed
const envPrefix = `#!/bin/bash\nexport CURL_FLAGS="${curlFlags}"\n\n`; const envPrefix = `#!/bin/sh\nexport CURL_FLAGS="${curlFlags}"\n\n`;
script = script.replace(/^#!/, "#"); script = script.replace(/^#!/, "#");
script = envPrefix + script; script = envPrefix + script;
@@ -2103,4 +2127,137 @@ router.patch(
}, },
); );
// Get integration status for a host
router.get(
"/:hostId/integrations",
authenticateToken,
requireManageHosts,
async (req, res) => {
try {
const { hostId } = req.params;
// Get host to verify it exists
const host = await prisma.hosts.findUnique({
where: { id: hostId },
select: { id: true, api_id: true, friendly_name: true },
});
if (!host) {
return res.status(404).json({ error: "Host not found" });
}
// Check if agent is connected
const connected = isConnected(host.api_id);
// Get integration states from cache (or defaults if not cached)
// Default: all integrations are disabled
const cachedState = integrationStateCache.get(host.api_id) || {};
const integrations = {
docker: cachedState.docker || false, // Default: disabled
// Future integrations can be added here
};
res.json({
success: true,
data: {
integrations,
connected,
host: {
id: host.id,
friendlyName: host.friendly_name,
apiId: host.api_id,
},
},
});
} catch (error) {
console.error("Get integration status error:", error);
res.status(500).json({ error: "Failed to get integration status" });
}
},
);
// Toggle integration status for a host
router.post(
"/:hostId/integrations/:integrationName/toggle",
authenticateToken,
requireManageHosts,
[body("enabled").isBoolean().withMessage("Enabled status must be a boolean")],
async (req, res) => {
try {
const errors = validationResult(req);
if (!errors.isEmpty()) {
return res.status(400).json({ errors: errors.array() });
}
const { hostId, integrationName } = req.params;
const { enabled } = req.body;
// Validate integration name
const validIntegrations = ["docker"]; // Add more as they're implemented
if (!validIntegrations.includes(integrationName)) {
return res.status(400).json({
error: "Invalid integration name",
validIntegrations,
});
}
// Get host to verify it exists
const host = await prisma.hosts.findUnique({
where: { id: hostId },
select: { id: true, api_id: true, friendly_name: true },
});
if (!host) {
return res.status(404).json({ error: "Host not found" });
}
// Check if agent is connected
if (!isConnected(host.api_id)) {
return res.status(503).json({
error: "Agent is not connected",
message:
"The agent must be connected via WebSocket to toggle integrations",
});
}
// Send WebSocket message to agent
const success = pushIntegrationToggle(
host.api_id,
integrationName,
enabled,
);
if (!success) {
return res.status(503).json({
error: "Failed to send integration toggle",
message: "Agent connection may have been lost",
});
}
// Update cache with new state
if (!integrationStateCache.has(host.api_id)) {
integrationStateCache.set(host.api_id, {});
}
integrationStateCache.get(host.api_id)[integrationName] = enabled;
res.json({
success: true,
message: `Integration ${integrationName} ${enabled ? "enabled" : "disabled"} successfully`,
data: {
integration: integrationName,
enabled,
host: {
id: host.id,
friendlyName: host.friendly_name,
apiId: host.api_id,
},
},
});
} catch (error) {
console.error("Toggle integration error:", error);
res.status(500).json({ error: "Failed to toggle integration" });
}
},
);
module.exports = router; module.exports = router;

View File

@@ -60,9 +60,14 @@ router.post(
authenticateToken, authenticateToken,
[ [
body("token") body("token")
.notEmpty()
.withMessage("Token is required")
.isString()
.withMessage("Token must be a string")
.isLength({ min: 6, max: 6 }) .isLength({ min: 6, max: 6 })
.withMessage("Token must be 6 digits"), .withMessage("Token must be exactly 6 digits")
body("token").isNumeric().withMessage("Token must contain only numbers"), .matches(/^\d{6}$/)
.withMessage("Token must contain only numbers"),
], ],
async (req, res) => { async (req, res) => {
try { try {
@@ -71,7 +76,11 @@ router.post(
return res.status(400).json({ errors: errors.array() }); return res.status(400).json({ errors: errors.array() });
} }
const { token } = req.body; // Ensure token is a string (convert if needed)
let { token } = req.body;
if (typeof token !== "string") {
token = String(token);
}
const userId = req.user.id; const userId = req.user.id;
// Get user's TFA secret // Get user's TFA secret

View File

@@ -71,6 +71,7 @@ const wsRoutes = require("./routes/wsRoutes");
const agentVersionRoutes = require("./routes/agentVersionRoutes"); const agentVersionRoutes = require("./routes/agentVersionRoutes");
const metricsRoutes = require("./routes/metricsRoutes"); const metricsRoutes = require("./routes/metricsRoutes");
const userPreferencesRoutes = require("./routes/userPreferencesRoutes"); const userPreferencesRoutes = require("./routes/userPreferencesRoutes");
const apiHostsRoutes = require("./routes/apiHostsRoutes");
const { initSettings } = require("./services/settingsService"); const { initSettings } = require("./services/settingsService");
const { queueManager } = require("./services/automation"); const { queueManager } = require("./services/automation");
const { authenticateToken, requireAdmin } = require("./middleware/auth"); const { authenticateToken, requireAdmin } = require("./middleware/auth");
@@ -480,6 +481,7 @@ app.use(`/api/${apiVersion}/ws`, wsRoutes);
app.use(`/api/${apiVersion}/agent`, agentVersionRoutes); app.use(`/api/${apiVersion}/agent`, agentVersionRoutes);
app.use(`/api/${apiVersion}/metrics`, metricsRoutes); app.use(`/api/${apiVersion}/metrics`, metricsRoutes);
app.use(`/api/${apiVersion}/user/preferences`, userPreferencesRoutes); app.use(`/api/${apiVersion}/user/preferences`, userPreferencesRoutes);
app.use(`/api/${apiVersion}/api`, authLimiter, apiHostsRoutes);
// Bull Board - will be populated after queue manager initializes // Bull Board - will be populated after queue manager initializes
let bullBoardRouter = null; let bullBoardRouter = null;

View File

@@ -1,6 +1,7 @@
const axios = require("axios"); const axios = require("axios");
const fs = require("node:fs").promises; const fs = require("node:fs").promises;
const path = require("node:path"); const path = require("node:path");
const os = require("node:os");
const { exec, spawn } = require("node:child_process"); const { exec, spawn } = require("node:child_process");
const { promisify } = require("node:util"); const { promisify } = require("node:util");
const _execAsync = promisify(exec); const _execAsync = promisify(exec);
@@ -106,10 +107,26 @@ class AgentVersionService {
try { try {
console.log("🔍 Getting current agent version..."); console.log("🔍 Getting current agent version...");
// Try to find the agent binary in agents/ folder only (what gets distributed) // Detect server architecture and map to Go architecture names
const serverArch = os.arch();
// Map Node.js architecture to Go architecture names
const archMap = {
x64: "amd64",
ia32: "386",
arm64: "arm64",
arm: "arm",
};
const serverGoArch = archMap[serverArch] || serverArch;
console.log(
`🔍 Detected server architecture: ${serverArch} -> ${serverGoArch}`,
);
// Try to find the agent binary in agents/ folder based on server architecture
const possiblePaths = [ const possiblePaths = [
path.join(this.agentsDir, "patchmon-agent-linux-amd64"), path.join(this.agentsDir, `patchmon-agent-linux-${serverGoArch}`),
path.join(this.agentsDir, "patchmon-agent"), path.join(this.agentsDir, "patchmon-agent-linux-amd64"), // Fallback
path.join(this.agentsDir, "patchmon-agent"), // Legacy fallback
]; ];
let agentPath = null; let agentPath = null;
@@ -126,7 +143,7 @@ class AgentVersionService {
if (!agentPath) { if (!agentPath) {
console.log( console.log(
"⚠️ No agent binary found in agents/ folder, current version will be unknown", `⚠️ No agent binary found in agents/ folder for architecture ${serverGoArch}, current version will be unknown`,
); );
console.log("💡 Use the Download Updates button to get agent binaries"); console.log("💡 Use the Download Updates button to get agent binaries");
this.currentVersion = null; this.currentVersion = null;

View File

@@ -3,6 +3,7 @@
const WebSocket = require("ws"); const WebSocket = require("ws");
const url = require("node:url"); const url = require("node:url");
const { get_current_time } = require("../utils/timezone");
// Connection registry by api_id // Connection registry by api_id
const apiIdToSocket = new Map(); const apiIdToSocket = new Map();
@@ -49,7 +50,29 @@ function init(server, prismaClient) {
wss.handleUpgrade(request, socket, head, (ws) => { wss.handleUpgrade(request, socket, head, (ws) => {
ws.on("message", (message) => { ws.on("message", (message) => {
// Echo back for Bull Board WebSocket // Echo back for Bull Board WebSocket
ws.send(message); try {
ws.send(message);
} catch (_err) {
// Ignore send errors (connection may be closed)
}
});
ws.on("error", (err) => {
// Handle WebSocket errors gracefully for Bull Board
if (
err.code === "WS_ERR_INVALID_CLOSE_CODE" ||
err.code === "ECONNRESET" ||
err.code === "EPIPE"
) {
// These are expected errors, just log quietly
console.log("[bullboard-ws] connection error:", err.code);
} else {
console.error("[bullboard-ws] error:", err.message || err);
}
});
ws.on("close", () => {
// Connection closed, no action needed
}); });
}); });
return; return;
@@ -117,7 +140,58 @@ function init(server, prismaClient) {
} }
}); });
ws.on("close", () => { ws.on("error", (err) => {
// Handle WebSocket errors gracefully without crashing
// Common errors: invalid close codes (1006), connection resets, etc.
if (
err.code === "WS_ERR_INVALID_CLOSE_CODE" ||
err.message?.includes("invalid status code 1006") ||
err.message?.includes("Invalid WebSocket frame")
) {
// 1006 is a special close code indicating abnormal closure
// It cannot be sent in a close frame, but can occur when connection is lost
console.log(
`[agent-ws] connection error for ${apiId} (abnormal closure):`,
err.message || err.code,
);
} else if (
err.code === "ECONNRESET" ||
err.code === "EPIPE" ||
err.message?.includes("read ECONNRESET")
) {
// Connection reset errors are common and expected
console.log(`[agent-ws] connection reset for ${apiId}`);
} else {
// Log other errors for debugging
console.error(
`[agent-ws] error for ${apiId}:`,
err.message || err.code || err,
);
}
// Clean up connection on error
const existing = apiIdToSocket.get(apiId);
if (existing === ws) {
apiIdToSocket.delete(apiId);
connectionMetadata.delete(apiId);
// Notify subscribers of disconnection
notifyConnectionChange(apiId, false);
}
// Try to close the connection gracefully if still open
if (
ws.readyState === WebSocket.OPEN ||
ws.readyState === WebSocket.CONNECTING
) {
try {
ws.close(1000); // Normal closure
} catch {
// Ignore errors when closing
}
}
});
ws.on("close", (code, reason) => {
const existing = apiIdToSocket.get(apiId); const existing = apiIdToSocket.get(apiId);
if (existing === ws) { if (existing === ws) {
apiIdToSocket.delete(apiId); apiIdToSocket.delete(apiId);
@@ -126,7 +200,7 @@ function init(server, prismaClient) {
notifyConnectionChange(apiId, false); notifyConnectionChange(apiId, false);
} }
console.log( console.log(
`[agent-ws] disconnected api_id=${apiId} total=${apiIdToSocket.size}`, `[agent-ws] disconnected api_id=${apiId} code=${code} reason=${reason || "none"} total=${apiIdToSocket.size}`,
); );
}); });
@@ -181,6 +255,29 @@ function pushUpdateAgent(apiId) {
safeSend(ws, JSON.stringify({ type: "update_agent" })); safeSend(ws, JSON.stringify({ type: "update_agent" }));
} }
function pushIntegrationToggle(apiId, integrationName, enabled) {
const ws = apiIdToSocket.get(apiId);
if (ws && ws.readyState === WebSocket.OPEN) {
safeSend(
ws,
JSON.stringify({
type: "integration_toggle",
integration: integrationName,
enabled: enabled,
}),
);
console.log(
`📤 Pushed integration toggle to agent ${apiId}: ${integrationName} = ${enabled}`,
);
return true;
} else {
console.log(
`⚠️ Agent ${apiId} not connected, cannot push integration toggle, please edit config.yml manually`,
);
return false;
}
}
function getConnectionByApiId(apiId) { function getConnectionByApiId(apiId) {
return apiIdToSocket.get(apiId); return apiIdToSocket.get(apiId);
} }
@@ -314,7 +411,7 @@ async function handleDockerStatusEvent(apiId, message) {
status: status, status: status,
state: status, state: status,
updated_at: new Date(timestamp || Date.now()), updated_at: new Date(timestamp || Date.now()),
last_checked: new Date(), last_checked: get_current_time(),
}, },
}); });
@@ -340,6 +437,7 @@ module.exports = {
pushReportNow, pushReportNow,
pushSettingsUpdate, pushSettingsUpdate,
pushUpdateAgent, pushUpdateAgent,
pushIntegrationToggle,
pushUpdateNotification, pushUpdateNotification,
pushUpdateNotificationToAll, pushUpdateNotificationToAll,
// Expose read-only view of connected agents // Expose read-only view of connected agents

View File

@@ -139,15 +139,13 @@ class DockerImageUpdateCheck {
console.log("🐳 Starting Docker image update check..."); console.log("🐳 Starting Docker image update check...");
try { try {
// Get all Docker images that have a digest and repository // Get all Docker images that have a digest
// Note: repository is required (non-nullable) in schema, so we don't need to check it
const images = await prisma.docker_images.findMany({ const images = await prisma.docker_images.findMany({
where: { where: {
digest: { digest: {
not: null, not: null,
}, },
repository: {
not: null,
},
}, },
include: { include: {
docker_image_updates: true, docker_image_updates: true,

View File

@@ -3,6 +3,7 @@ const { redis, redisConnection } = require("./shared/redis");
const { prisma } = require("./shared/prisma"); const { prisma } = require("./shared/prisma");
const agentWs = require("../agentWs"); const agentWs = require("../agentWs");
const { v4: uuidv4 } = require("uuid"); const { v4: uuidv4 } = require("uuid");
const { get_current_time } = require("../../utils/timezone");
// Import automation classes // Import automation classes
const GitHubUpdateCheck = require("./githubUpdateCheck"); const GitHubUpdateCheck = require("./githubUpdateCheck");
@@ -12,6 +13,7 @@ const OrphanedPackageCleanup = require("./orphanedPackageCleanup");
const DockerInventoryCleanup = require("./dockerInventoryCleanup"); const DockerInventoryCleanup = require("./dockerInventoryCleanup");
const DockerImageUpdateCheck = require("./dockerImageUpdateCheck"); const DockerImageUpdateCheck = require("./dockerImageUpdateCheck");
const MetricsReporting = require("./metricsReporting"); const MetricsReporting = require("./metricsReporting");
const SystemStatistics = require("./systemStatistics");
// Queue names // Queue names
const QUEUE_NAMES = { const QUEUE_NAMES = {
@@ -22,6 +24,7 @@ const QUEUE_NAMES = {
DOCKER_INVENTORY_CLEANUP: "docker-inventory-cleanup", DOCKER_INVENTORY_CLEANUP: "docker-inventory-cleanup",
DOCKER_IMAGE_UPDATE_CHECK: "docker-image-update-check", DOCKER_IMAGE_UPDATE_CHECK: "docker-image-update-check",
METRICS_REPORTING: "metrics-reporting", METRICS_REPORTING: "metrics-reporting",
SYSTEM_STATISTICS: "system-statistics",
AGENT_COMMANDS: "agent-commands", AGENT_COMMANDS: "agent-commands",
}; };
@@ -105,6 +108,9 @@ class QueueManager {
this.automations[QUEUE_NAMES.METRICS_REPORTING] = new MetricsReporting( this.automations[QUEUE_NAMES.METRICS_REPORTING] = new MetricsReporting(
this, this,
); );
this.automations[QUEUE_NAMES.SYSTEM_STATISTICS] = new SystemStatistics(
this,
);
console.log("✅ All automation classes initialized"); console.log("✅ All automation classes initialized");
} }
@@ -190,6 +196,15 @@ class QueueManager {
workerOptions, workerOptions,
); );
// System Statistics Worker
this.workers[QUEUE_NAMES.SYSTEM_STATISTICS] = new Worker(
QUEUE_NAMES.SYSTEM_STATISTICS,
this.automations[QUEUE_NAMES.SYSTEM_STATISTICS].process.bind(
this.automations[QUEUE_NAMES.SYSTEM_STATISTICS],
),
workerOptions,
);
// Agent Commands Worker // Agent Commands Worker
this.workers[QUEUE_NAMES.AGENT_COMMANDS] = new Worker( this.workers[QUEUE_NAMES.AGENT_COMMANDS] = new Worker(
QUEUE_NAMES.AGENT_COMMANDS, QUEUE_NAMES.AGENT_COMMANDS,
@@ -216,8 +231,8 @@ class QueueManager {
api_id: api_id, api_id: api_id,
status: "active", status: "active",
attempt_number: job.attemptsMade + 1, attempt_number: job.attemptsMade + 1,
created_at: new Date(), created_at: get_current_time(),
updated_at: new Date(), updated_at: get_current_time(),
}, },
}); });
console.log(`📝 Logged job to job_history: ${job.id} (${type})`); console.log(`📝 Logged job to job_history: ${job.id} (${type})`);
@@ -257,8 +272,8 @@ class QueueManager {
where: { job_id: job.id }, where: { job_id: job.id },
data: { data: {
status: "completed", status: "completed",
completed_at: new Date(), completed_at: get_current_time(),
updated_at: new Date(), updated_at: get_current_time(),
}, },
}); });
console.log(`✅ Marked job as completed in job_history: ${job.id}`); console.log(`✅ Marked job as completed in job_history: ${job.id}`);
@@ -271,8 +286,8 @@ class QueueManager {
data: { data: {
status: "failed", status: "failed",
error_message: error.message, error_message: error.message,
completed_at: new Date(), completed_at: get_current_time(),
updated_at: new Date(), updated_at: get_current_time(),
}, },
}); });
console.log(`❌ Marked job as failed in job_history: ${job.id}`); console.log(`❌ Marked job as failed in job_history: ${job.id}`);
@@ -322,6 +337,7 @@ class QueueManager {
await this.automations[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].schedule(); await this.automations[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].schedule();
await this.automations[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK].schedule(); await this.automations[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK].schedule();
await this.automations[QUEUE_NAMES.METRICS_REPORTING].schedule(); await this.automations[QUEUE_NAMES.METRICS_REPORTING].schedule();
await this.automations[QUEUE_NAMES.SYSTEM_STATISTICS].schedule();
} }
/** /**
@@ -357,6 +373,10 @@ class QueueManager {
].triggerManual(); ].triggerManual();
} }
async triggerSystemStatistics() {
return this.automations[QUEUE_NAMES.SYSTEM_STATISTICS].triggerManual();
}
async triggerMetricsReporting() { async triggerMetricsReporting() {
return this.automations[QUEUE_NAMES.METRICS_REPORTING].triggerManual(); return this.automations[QUEUE_NAMES.METRICS_REPORTING].triggerManual();
} }

View File

@@ -0,0 +1,140 @@
const { prisma } = require("./shared/prisma");
const { v4: uuidv4 } = require("uuid");
/**
* System Statistics Collection Automation
* Collects aggregated system-wide statistics every 30 minutes
* for use in package trends charts
*/
class SystemStatistics {
constructor(queueManager) {
this.queueManager = queueManager;
this.queueName = "system-statistics";
}
/**
* Process system statistics collection job
*/
async process(_job) {
const startTime = Date.now();
console.log("📊 Starting system statistics collection...");
try {
// Calculate unique package counts across all hosts
const uniquePackagesCount = await prisma.packages.count({
where: {
host_packages: {
some: {
needs_update: true,
},
},
},
});
const uniqueSecurityCount = await prisma.packages.count({
where: {
host_packages: {
some: {
needs_update: true,
is_security_update: true,
},
},
},
});
// Calculate total unique packages installed on at least one host
const totalPackages = await prisma.packages.count({
where: {
host_packages: {
some: {}, // At least one host has this package
},
},
});
// Calculate total hosts
const totalHosts = await prisma.hosts.count({
where: {
status: "active",
},
});
// Calculate hosts needing updates (distinct hosts with packages needing updates)
const hostsNeedingUpdates = await prisma.hosts.count({
where: {
status: "active",
host_packages: {
some: {
needs_update: true,
},
},
},
});
// Store statistics in database
await prisma.system_statistics.create({
data: {
id: uuidv4(),
unique_packages_count: uniquePackagesCount,
unique_security_count: uniqueSecurityCount,
total_packages: totalPackages,
total_hosts: totalHosts,
hosts_needing_updates: hostsNeedingUpdates,
timestamp: new Date(),
},
});
const executionTime = Date.now() - startTime;
console.log(
`✅ System statistics collection completed in ${executionTime}ms - Unique packages: ${uniquePackagesCount}, Security: ${uniqueSecurityCount}, Total hosts: ${totalHosts}`,
);
return {
success: true,
uniquePackagesCount,
uniqueSecurityCount,
totalPackages,
totalHosts,
hostsNeedingUpdates,
executionTime,
};
} catch (error) {
const executionTime = Date.now() - startTime;
console.error(
`❌ System statistics collection failed after ${executionTime}ms:`,
error.message,
);
throw error;
}
}
/**
* Schedule recurring system statistics collection (every 30 minutes)
*/
async schedule() {
const job = await this.queueManager.queues[this.queueName].add(
"system-statistics",
{},
{
repeat: { pattern: "*/30 * * * *" }, // Every 30 minutes
jobId: "system-statistics-recurring",
},
);
console.log("✅ System statistics collection scheduled (every 30 minutes)");
return job;
}
/**
* Trigger manual system statistics collection
*/
async triggerManual() {
const job = await this.queueManager.queues[this.queueName].add(
"system-statistics-manual",
{},
{ priority: 1 },
);
console.log("✅ Manual system statistics collection triggered");
return job;
}
}
module.exports = SystemStatistics;

View File

@@ -0,0 +1,107 @@
/**
* Timezone utility functions for consistent timestamp handling
*
* This module provides timezone-aware timestamp functions that use
* the TZ environment variable for consistent timezone handling across
* the application. If TZ is not set, defaults to UTC.
*/
/**
* Get the configured timezone from environment variable
* Defaults to UTC if not set
* @returns {string} Timezone string (e.g., 'UTC', 'America/New_York', 'Europe/London')
*/
function get_timezone() {
return process.env.TZ || process.env.TIMEZONE || "UTC";
}
/**
* Get current date/time in the configured timezone
* Returns a Date object that represents the current time in the configured timezone
* @returns {Date} Current date/time
*/
function get_current_time() {
const tz = get_timezone();
// If UTC, use Date.now() which is always UTC
if (tz === "UTC" || tz === "Etc/UTC") {
return new Date();
}
// For other timezones, we need to create a date string with timezone info
// and parse it. This ensures the date represents the correct time in that timezone.
// For database storage, we always store UTC timestamps
// The timezone is primarily used for display purposes
return new Date();
}
/**
* Get current timestamp in milliseconds (UTC)
* This is always UTC for database storage consistency
* @returns {number} Current timestamp in milliseconds
*/
function get_current_timestamp() {
return Date.now();
}
/**
* Format a date to ISO string in the configured timezone
* @param {Date} date - Date to format (defaults to now)
* @returns {string} ISO formatted date string
*/
function format_date_iso(date = null) {
const d = date || get_current_time();
return d.toISOString();
}
/**
* Parse a date string and return a Date object
* Handles various date formats and timezone conversions
* @param {string} date_string - Date string to parse
* @param {Date} fallback - Fallback date if parsing fails (defaults to now)
* @returns {Date} Parsed date or fallback
*/
function parse_date(date_string, fallback = null) {
if (!date_string) {
return fallback || get_current_time();
}
try {
const date = new Date(date_string);
if (Number.isNaN(date.getTime())) {
return fallback || get_current_time();
}
return date;
} catch (_error) {
return fallback || get_current_time();
}
}
/**
* Convert a date to the configured timezone for display
* @param {Date} date - Date to convert
* @returns {string} Formatted date string in configured timezone
*/
function format_date_for_display(date) {
const tz = get_timezone();
const formatter = new Intl.DateTimeFormat("en-US", {
timeZone: tz,
year: "numeric",
month: "2-digit",
day: "2-digit",
hour: "2-digit",
minute: "2-digit",
second: "2-digit",
hour12: false,
});
return formatter.format(date);
}
module.exports = {
get_timezone,
get_current_time,
get_current_timestamp,
format_date_iso,
parse_date,
format_date_for_display,
};

View File

@@ -1,5 +1,5 @@
{ {
"$schema": "https://biomejs.dev/schemas/2.3.0/schema.json", "$schema": "https://biomejs.dev/schemas/2.3.4/schema.json",
"vcs": { "vcs": {
"enabled": true, "enabled": true,
"clientKind": "git", "clientKind": "git",

View File

@@ -20,9 +20,7 @@ COPY --chown=node:node agents ./agents_backup
COPY --chown=node:node agents ./agents COPY --chown=node:node agents ./agents
COPY --chmod=755 docker/backend.docker-entrypoint.sh ./entrypoint.sh COPY --chmod=755 docker/backend.docker-entrypoint.sh ./entrypoint.sh
WORKDIR /app/backend RUN npm install --workspace=backend --ignore-scripts && cd backend && npx prisma generate
RUN npm ci --ignore-scripts && npx prisma generate
EXPOSE 3001 EXPOSE 3001
@@ -44,13 +42,11 @@ WORKDIR /app
COPY --chown=node:node package*.json ./ COPY --chown=node:node package*.json ./
COPY --chown=node:node backend/ ./backend/ COPY --chown=node:node backend/ ./backend/
WORKDIR /app/backend
RUN npm cache clean --force &&\ RUN npm cache clean --force &&\
rm -rf node_modules ~/.npm /root/.npm &&\ rm -rf node_modules ~/.npm /root/.npm &&\
npm ci --ignore-scripts --legacy-peer-deps --no-audit --prefer-online --fetch-retries=3 --fetch-retry-mintimeout=20000 --fetch-retry-maxtimeout=120000 &&\ npm install --workspace=backend --ignore-scripts --legacy-peer-deps --no-audit --prefer-online --fetch-retries=3 --fetch-retry-mintimeout=20000 --fetch-retry-maxtimeout=120000 &&\
PRISMA_CLI_BINARY_TYPE=binary npm run db:generate &&\ cd backend && PRISMA_CLI_BINARY_TYPE=binary npm run db:generate &&\
npm prune --omit=dev &&\ cd .. && npm prune --omit=dev --workspace=backend &&\
npm cache clean --force npm cache clean --force
# Production stage # Production stage

View File

@@ -6,7 +6,7 @@ WORKDIR /app
COPY package*.json ./ COPY package*.json ./
COPY frontend/ ./frontend/ COPY frontend/ ./frontend/
RUN npm ci --ignore-scripts RUN npm install --workspace=frontend --ignore-scripts
WORKDIR /app/frontend WORKDIR /app/frontend

View File

@@ -6,5 +6,5 @@ VITE_API_URL=http://localhost:3001/api/v1
# Application Metadata # Application Metadata
VITE_APP_NAME=PatchMon VITE_APP_NAME=PatchMon
VITE_APP_VERSION=1.3.1 VITE_APP_VERSION=1.3.4

View File

@@ -1,7 +1,7 @@
{ {
"name": "patchmon-frontend", "name": "patchmon-frontend",
"private": true, "private": true,
"version": "1.3.2", "version": "1.3.4",
"license": "AGPL-3.0", "license": "AGPL-3.0",
"type": "module", "type": "module",
"scripts": { "scripts": {
@@ -35,7 +35,7 @@
"@vitejs/plugin-react": "^4.3.4", "@vitejs/plugin-react": "^4.3.4",
"autoprefixer": "^10.4.20", "autoprefixer": "^10.4.20",
"postcss": "^8.5.6", "postcss": "^8.5.6",
"tailwindcss": "^3.4.17", "tailwindcss": "^4.0.0",
"vite": "^7.1.5" "vite": "^7.1.5"
}, },
"overrides": { "overrides": {

View File

@@ -120,7 +120,6 @@ const Layout = ({ children }) => {
name: "Automation", name: "Automation",
href: "/automation", href: "/automation",
icon: RefreshCw, icon: RefreshCw,
new: true,
}); });
if (canViewReports()) { if (canViewReports()) {

View File

@@ -196,6 +196,25 @@ const Automation = () => {
year: "numeric", year: "numeric",
}); });
} }
if (schedule === "Every 30 minutes") {
const now = new Date();
const nextRun = new Date(now);
// Round up to the next 30-minute mark
const minutes = now.getMinutes();
if (minutes < 30) {
nextRun.setMinutes(30, 0, 0);
} else {
nextRun.setHours(nextRun.getHours() + 1, 0, 0, 0);
}
return nextRun.toLocaleString([], {
hour12: true,
hour: "numeric",
minute: "2-digit",
day: "numeric",
month: "numeric",
year: "numeric",
});
}
return "Unknown"; return "Unknown";
}; };
@@ -236,6 +255,18 @@ const Automation = () => {
nextHour.setHours(nextHour.getHours() + 1, 0, 0, 0); nextHour.setHours(nextHour.getHours() + 1, 0, 0, 0);
return nextHour.getTime(); return nextHour.getTime();
} }
if (schedule === "Every 30 minutes") {
const now = new Date();
const nextRun = new Date(now);
// Round up to the next 30-minute mark
const minutes = now.getMinutes();
if (minutes < 30) {
nextRun.setMinutes(30, 0, 0);
} else {
nextRun.setHours(nextRun.getHours() + 1, 0, 0, 0);
}
return nextRun.getTime();
}
return Number.MAX_SAFE_INTEGER; // Unknown schedules go to bottom return Number.MAX_SAFE_INTEGER; // Unknown schedules go to bottom
}; };
@@ -294,6 +325,8 @@ const Automation = () => {
endpoint = "/automation/trigger/docker-inventory-cleanup"; endpoint = "/automation/trigger/docker-inventory-cleanup";
} else if (jobType === "agent-collection") { } else if (jobType === "agent-collection") {
endpoint = "/automation/trigger/agent-collection"; endpoint = "/automation/trigger/agent-collection";
} else if (jobType === "system-statistics") {
endpoint = "/automation/trigger/system-statistics";
} }
const _response = await api.post(endpoint, data); const _response = await api.post(endpoint, data);
@@ -615,6 +648,10 @@ const Automation = () => {
automation.queue.includes("agent-commands") automation.queue.includes("agent-commands")
) { ) {
triggerManualJob("agent-collection"); triggerManualJob("agent-collection");
} else if (
automation.queue.includes("system-statistics")
) {
triggerManualJob("system-statistics");
} }
}} }}
className="inline-flex items-center justify-center w-6 h-6 border border-transparent rounded text-white bg-green-600 hover:bg-green-700 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-green-500 transition-colors duration-200" className="inline-flex items-center justify-center w-6 h-6 border border-transparent rounded text-white bg-green-600 hover:bg-green-700 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-green-500 transition-colors duration-200"

View File

@@ -55,6 +55,8 @@ const Dashboard = () => {
const [cardPreferences, setCardPreferences] = useState([]); const [cardPreferences, setCardPreferences] = useState([]);
const [packageTrendsPeriod, setPackageTrendsPeriod] = useState("1"); // days const [packageTrendsPeriod, setPackageTrendsPeriod] = useState("1"); // days
const [packageTrendsHost, setPackageTrendsHost] = useState("all"); // host filter const [packageTrendsHost, setPackageTrendsHost] = useState("all"); // host filter
const [systemStatsJobId, setSystemStatsJobId] = useState(null); // Track job ID for system statistics
const [isTriggeringJob, setIsTriggeringJob] = useState(false);
const navigate = useNavigate(); const navigate = useNavigate();
const { isDark } = useTheme(); const { isDark } = useTheme();
const { user } = useAuth(); const { user } = useAuth();
@@ -772,56 +774,108 @@ const Dashboard = () => {
<h3 className="text-lg font-medium text-secondary-900 dark:text-white"> <h3 className="text-lg font-medium text-secondary-900 dark:text-white">
Package Trends Over Time Package Trends Over Time
</h3> </h3>
<div className="flex items-center gap-3"> <div className="flex flex-col gap-2">
{/* Refresh Button */} <div className="flex items-center gap-3">
<button {/* Refresh Button */}
type="button" <button
onClick={() => refetchPackageTrends()} type="button"
disabled={packageTrendsFetching} onClick={async () => {
className="px-3 py-1.5 text-sm border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white hover:bg-secondary-50 dark:hover:bg-secondary-700 focus:ring-2 focus:ring-primary-500 focus:border-primary-500 disabled:opacity-50 disabled:cursor-not-allowed flex items-center gap-2" if (packageTrendsHost === "all") {
title="Refresh data" // For "All Hosts", trigger system statistics collection job
> setIsTriggeringJob(true);
<RefreshCw try {
className={`h-4 w-4 ${packageTrendsFetching ? "animate-spin" : ""}`} const response =
/> await dashboardAPI.triggerSystemStatistics();
Refresh if (response.data?.data?.jobId) {
</button> setSystemStatsJobId(response.data.data.jobId);
// Wait a moment for the job to complete, then refetch
setTimeout(() => {
refetchPackageTrends();
}, 2000);
// Clear the job ID message after 2 seconds
setTimeout(() => {
setSystemStatsJobId(null);
}, 2000);
}
} catch (error) {
console.error(
"Failed to trigger system statistics:",
error,
);
// Still refetch data even if job trigger fails
refetchPackageTrends();
} finally {
setIsTriggeringJob(false);
}
} else {
// For individual host, just refetch the data
refetchPackageTrends();
}
}}
disabled={packageTrendsFetching || isTriggeringJob}
className="px-3 py-1.5 text-sm border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white hover:bg-secondary-50 dark:hover:bg-secondary-700 focus:ring-2 focus:ring-primary-500 focus:border-primary-500 disabled:opacity-50 disabled:cursor-not-allowed flex items-center gap-2"
title={
packageTrendsHost === "all"
? "Trigger system statistics collection"
: "Refresh data"
}
>
<RefreshCw
className={`h-4 w-4 ${
packageTrendsFetching || isTriggeringJob
? "animate-spin"
: ""
}`}
/>
Refresh
</button>
{/* Period Selector */} {/* Period Selector */}
<select <select
value={packageTrendsPeriod} value={packageTrendsPeriod}
onChange={(e) => setPackageTrendsPeriod(e.target.value)} onChange={(e) => setPackageTrendsPeriod(e.target.value)}
className="px-3 py-1.5 text-sm border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white focus:ring-2 focus:ring-primary-500 focus:border-primary-500" className="px-3 py-1.5 text-sm border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white focus:ring-2 focus:ring-primary-500 focus:border-primary-500"
> >
<option value="1">Last 24 hours</option> <option value="1">Last 24 hours</option>
<option value="7">Last 7 days</option> <option value="7">Last 7 days</option>
<option value="30">Last 30 days</option> <option value="30">Last 30 days</option>
<option value="90">Last 90 days</option> <option value="90">Last 90 days</option>
<option value="180">Last 6 months</option> <option value="180">Last 6 months</option>
<option value="365">Last year</option> <option value="365">Last year</option>
</select> </select>
{/* Host Selector */} {/* Host Selector */}
<select <select
value={packageTrendsHost} value={packageTrendsHost}
onChange={(e) => setPackageTrendsHost(e.target.value)} onChange={(e) => {
className="px-3 py-1.5 text-sm border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white focus:ring-2 focus:ring-primary-500 focus:border-primary-500" setPackageTrendsHost(e.target.value);
> // Clear job ID message when host selection changes
<option value="all">All Hosts</option> setSystemStatsJobId(null);
{packageTrendsData?.hosts?.length > 0 ? ( }}
packageTrendsData.hosts.map((host) => ( className="px-3 py-1.5 text-sm border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white focus:ring-2 focus:ring-primary-500 focus:border-primary-500"
<option key={host.id} value={host.id}> >
{host.friendly_name || host.hostname} <option value="all">All Hosts</option>
{packageTrendsData?.hosts?.length > 0 ? (
packageTrendsData.hosts.map((host) => (
<option key={host.id} value={host.id}>
{host.friendly_name || host.hostname}
</option>
))
) : (
<option disabled>
{packageTrendsLoading
? "Loading hosts..."
: "No hosts available"}
</option> </option>
)) )}
) : ( </select>
<option disabled> </div>
{packageTrendsLoading {/* Job ID Message */}
? "Loading hosts..." {systemStatsJobId && packageTrendsHost === "all" && (
: "No hosts available"} <p className="text-xs text-secondary-600 dark:text-secondary-400 ml-1">
</option> Ran collection job #{systemStatsJobId}
)} </p>
</select> )}
</div> </div>
</div> </div>
@@ -1167,13 +1221,40 @@ const Dashboard = () => {
title: (context) => { title: (context) => {
const label = context[0].label; const label = context[0].label;
// Handle "Now" label
if (label === "Now") {
return "Now";
}
// Handle empty or invalid labels // Handle empty or invalid labels
if (!label || typeof label !== "string") { if (!label || typeof label !== "string") {
return "Unknown Date"; return "Unknown Date";
} }
// Check if it's a full ISO timestamp (for "Last 24 hours")
// Format: "2025-01-15T14:30:00.000Z" or "2025-01-15T14:30:00.000"
if (label.includes("T") && label.includes(":")) {
try {
const date = new Date(label);
// Check if date is valid
if (Number.isNaN(date.getTime())) {
return label; // Return original label if date is invalid
}
// Format full ISO timestamp with date and time
return date.toLocaleDateString("en-US", {
month: "short",
day: "numeric",
hour: "numeric",
minute: "2-digit",
hour12: true,
});
} catch (_error) {
return label; // Return original label if parsing fails
}
}
// Format hourly labels (e.g., "2025-10-07T14" -> "Oct 7, 2:00 PM") // Format hourly labels (e.g., "2025-10-07T14" -> "Oct 7, 2:00 PM")
if (label.includes("T")) { if (label.includes("T") && !label.includes(":")) {
try { try {
const date = new Date(`${label}:00:00`); const date = new Date(`${label}:00:00`);
// Check if date is valid // Check if date is valid
@@ -1233,13 +1314,41 @@ const Dashboard = () => {
callback: function (value, _index, _ticks) { callback: function (value, _index, _ticks) {
const label = this.getLabelForValue(value); const label = this.getLabelForValue(value);
// Handle "Now" label
if (label === "Now") {
return "Now";
}
// Handle empty or invalid labels // Handle empty or invalid labels
if (!label || typeof label !== "string") { if (!label || typeof label !== "string") {
return "Unknown"; return "Unknown";
} }
// Check if it's a full ISO timestamp (for "Last 24 hours")
// Format: "2025-01-15T14:30:00.000Z" or "2025-01-15T14:30:00.000"
if (label.includes("T") && label.includes(":")) {
try {
const date = new Date(label);
// Check if date is valid
if (Number.isNaN(date.getTime())) {
return label; // Return original label if date is invalid
}
// Extract hour from full ISO timestamp
const hourNum = date.getHours();
return hourNum === 0
? "12 AM"
: hourNum < 12
? `${hourNum} AM`
: hourNum === 12
? "12 PM"
: `${hourNum - 12} PM`;
} catch (_error) {
return label; // Return original label if parsing fails
}
}
// Format hourly labels (e.g., "2025-10-07T14" -> "2 PM") // Format hourly labels (e.g., "2025-10-07T14" -> "2 PM")
if (label.includes("T")) { if (label.includes("T") && !label.includes(":")) {
try { try {
const hour = label.split("T")[1]; const hour = label.split("T")[1];
const hourNum = parseInt(hour, 10); const hourNum = parseInt(hour, 10);

View File

@@ -281,6 +281,67 @@ const HostDetail = () => {
}, },
}); });
// Fetch integration status
const {
data: integrationsData,
isLoading: isLoadingIntegrations,
refetch: refetchIntegrations,
} = useQuery({
queryKey: ["host-integrations", hostId],
queryFn: () =>
adminHostsAPI.getIntegrations(hostId).then((res) => res.data),
staleTime: 30 * 1000, // 30 seconds
refetchOnWindowFocus: false,
enabled: !!hostId && activeTab === "integrations",
});
// Refetch integrations when WebSocket status changes (e.g., after agent restart)
useEffect(() => {
if (
wsStatus?.connected &&
activeTab === "integrations" &&
integrationsData?.data?.connected === false
) {
// Agent just reconnected, refetch integrations to get updated connection status
refetchIntegrations();
}
}, [
wsStatus?.connected,
activeTab,
integrationsData?.data?.connected,
refetchIntegrations,
]);
// Toggle integration mutation
const toggleIntegrationMutation = useMutation({
mutationFn: ({ integrationName, enabled }) =>
adminHostsAPI
.toggleIntegration(hostId, integrationName, enabled)
.then((res) => res.data),
onSuccess: (data) => {
// Optimistically update the cache with the new state
queryClient.setQueryData(["host-integrations", hostId], (oldData) => {
if (!oldData) return oldData;
return {
...oldData,
data: {
...oldData.data,
integrations: {
...oldData.data.integrations,
[data.data.integration]: data.data.enabled,
},
},
};
});
// Also invalidate to ensure we get fresh data
queryClient.invalidateQueries(["host-integrations", hostId]);
},
onError: () => {
// On error, refetch to get the actual state
refetchIntegrations();
},
});
const handleDeleteHost = async () => { const handleDeleteHost = async () => {
if ( if (
window.confirm( window.confirm(
@@ -666,6 +727,17 @@ const HostDetail = () => {
> >
Notes Notes
</button> </button>
<button
type="button"
onClick={() => handleTabChange("integrations")}
className={`px-4 py-2 text-sm font-medium ${
activeTab === "integrations"
? "text-primary-600 dark:text-primary-400 border-b-2 border-primary-500"
: "text-secondary-500 dark:text-secondary-400 hover:text-secondary-700 dark:hover:text-secondary-300"
}`}
>
Integrations
</button>
</div> </div>
<div className="p-4"> <div className="p-4">
@@ -1446,6 +1518,101 @@ const HostDetail = () => {
{/* Agent Queue */} {/* Agent Queue */}
{activeTab === "queue" && <AgentQueueTab hostId={hostId} />} {activeTab === "queue" && <AgentQueueTab hostId={hostId} />}
{/* Integrations */}
{activeTab === "integrations" && (
<div className="max-w-2xl space-y-4">
{isLoadingIntegrations ? (
<div className="flex items-center justify-center h-32">
<RefreshCw className="h-6 w-6 animate-spin text-primary-600" />
</div>
) : (
<div className="space-y-4">
{/* Docker Integration */}
<div className="bg-secondary-50 dark:bg-secondary-700 rounded-lg p-4 border border-secondary-200 dark:border-secondary-600">
<div className="flex items-start justify-between gap-4">
<div className="flex-1">
<div className="flex items-center gap-3 mb-2">
<Database className="h-5 w-5 text-primary-600 dark:text-primary-400" />
<h4 className="text-sm font-medium text-secondary-900 dark:text-white">
Docker
</h4>
{integrationsData?.data?.integrations?.docker ? (
<span className="inline-flex items-center px-2 py-0.5 rounded text-xs font-semibold bg-green-100 text-green-800 dark:bg-green-900 dark:text-green-200">
Enabled
</span>
) : (
<span className="inline-flex items-center px-2 py-0.5 rounded text-xs font-semibold bg-gray-200 text-gray-600 dark:bg-gray-600 dark:text-gray-400">
Disabled
</span>
)}
</div>
<p className="text-xs text-secondary-600 dark:text-secondary-300">
Monitor Docker containers, images, volumes, and
networks. Collects real-time container status
events.
</p>
</div>
<div className="flex-shrink-0">
<button
type="button"
onClick={() =>
toggleIntegrationMutation.mutate({
integrationName: "docker",
enabled:
!integrationsData?.data?.integrations?.docker,
})
}
disabled={
toggleIntegrationMutation.isPending ||
!wsStatus?.connected
}
title={
!wsStatus?.connected
? "Agent is not connected"
: integrationsData?.data?.integrations?.docker
? "Disable Docker integration"
: "Enable Docker integration"
}
className={`relative inline-flex h-5 w-9 items-center rounded-full transition-colors focus:outline-none focus:ring-2 focus:ring-primary-500 focus:ring-offset-2 ${
integrationsData?.data?.integrations?.docker
? "bg-primary-600 dark:bg-primary-500"
: "bg-secondary-200 dark:bg-secondary-600"
} ${
toggleIntegrationMutation.isPending ||
!integrationsData?.data?.connected
? "opacity-50 cursor-not-allowed"
: ""
}`}
>
<span
className={`inline-block h-3 w-3 transform rounded-full bg-white transition-transform ${
integrationsData?.data?.integrations?.docker
? "translate-x-5"
: "translate-x-1"
}`}
/>
</button>
</div>
</div>
{!wsStatus?.connected && (
<p className="text-xs text-warning-600 dark:text-warning-400 mt-2">
Agent must be connected via WebSocket to toggle
integrations
</p>
)}
{toggleIntegrationMutation.isPending && (
<p className="text-xs text-secondary-600 dark:text-secondary-400 mt-2">
Updating integration...
</p>
)}
</div>
{/* Future integrations can be added here with the same pattern */}
</div>
)}
</div>
)}
</div> </div>
</div> </div>
</div> </div>
@@ -1639,7 +1806,8 @@ const CredentialsModal = ({ host, isOpen, onClose }) => {
> >
<option value="amd64">AMD64 (x86_64) - Default</option> <option value="amd64">AMD64 (x86_64) - Default</option>
<option value="386">386 (i386) - 32-bit</option> <option value="386">386 (i386) - 32-bit</option>
<option value="arm64">ARM64 (aarch64) - ARM</option> <option value="arm64">ARM64 (aarch64) - ARM 64-bit</option>
<option value="arm">ARM (armv7l/armv6l) - ARM 32-bit</option>
</select> </select>
<p className="text-xs text-primary-600 dark:text-primary-400 mt-1"> <p className="text-xs text-primary-600 dark:text-primary-400 mt-1">
Select the architecture of the target host Select the architecture of the target host
@@ -1649,7 +1817,7 @@ const CredentialsModal = ({ host, isOpen, onClose }) => {
<div className="flex items-center gap-2"> <div className="flex items-center gap-2">
<input <input
type="text" type="text"
value={`curl ${getCurlFlags()} ${getInstallUrl()} -H "X-API-ID: ${host.api_id}" -H "X-API-KEY: ${host.api_key}" | bash`} value={`curl ${getCurlFlags()} ${getInstallUrl()} -H "X-API-ID: ${host.api_id}" -H "X-API-KEY: ${host.api_key}" | sh`}
readOnly readOnly
className="flex-1 px-3 py-2 border border-primary-300 dark:border-primary-600 rounded-md bg-white dark:bg-secondary-800 text-sm font-mono text-secondary-900 dark:text-white" className="flex-1 px-3 py-2 border border-primary-300 dark:border-primary-600 rounded-md bg-white dark:bg-secondary-800 text-sm font-mono text-secondary-900 dark:text-white"
/> />
@@ -1657,7 +1825,7 @@ const CredentialsModal = ({ host, isOpen, onClose }) => {
type="button" type="button"
onClick={() => onClick={() =>
copyToClipboard( copyToClipboard(
`curl ${getCurlFlags()} ${getInstallUrl()} -H "X-API-ID: ${host.api_id}" -H "X-API-KEY: ${host.api_key}" | bash`, `curl ${getCurlFlags()} ${getInstallUrl()} -H "X-API-ID: ${host.api_id}" -H "X-API-KEY: ${host.api_key}" | sh`,
) )
} }
className="btn-primary flex items-center gap-1" className="btn-primary flex items-center gap-1"
@@ -1667,270 +1835,6 @@ const CredentialsModal = ({ host, isOpen, onClose }) => {
</button> </button>
</div> </div>
</div> </div>
<div className="bg-secondary-50 dark:bg-secondary-700 rounded-lg p-4">
<h4 className="text-sm font-medium text-secondary-900 dark:text-white mb-2">
Manual Installation
</h4>
<p className="text-sm text-secondary-600 dark:text-secondary-300 mb-3">
If you prefer to install manually, follow these steps:
</p>
<div className="space-y-3">
<div className="bg-white dark:bg-secondary-800 rounded-md p-3 border border-secondary-200 dark:border-secondary-600">
<h5 className="text-sm font-medium text-secondary-900 dark:text-white mb-2">
1. Create Configuration Directory
</h5>
<div className="flex items-center gap-2">
<input
type="text"
value="sudo mkdir -p /etc/patchmon"
readOnly
className="flex-1 px-3 py-2 border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-sm font-mono text-secondary-900 dark:text-white"
/>
<button
type="button"
onClick={() =>
copyToClipboard("sudo mkdir -p /etc/patchmon")
}
className="btn-secondary flex items-center gap-1"
>
<Copy className="h-4 w-4" />
Copy
</button>
</div>
</div>
<div className="bg-white dark:bg-secondary-800 rounded-md p-3 border border-secondary-200 dark:border-secondary-600">
<h5 className="text-sm font-medium text-secondary-900 dark:text-white mb-2">
2. Download and Install Agent Binary
</h5>
<div className="flex items-center gap-2">
<input
type="text"
value={`curl ${getCurlFlags()} -o /usr/local/bin/patchmon-agent ${serverUrl}/api/v1/hosts/agent/download?arch=${architecture} -H "X-API-ID: ${host.api_id}" -H "X-API-KEY: ${host.api_key}" && sudo chmod +x /usr/local/bin/patchmon-agent`}
readOnly
className="flex-1 px-3 py-2 border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-sm font-mono text-secondary-900 dark:text-white"
/>
<button
type="button"
onClick={() =>
copyToClipboard(
`curl ${getCurlFlags()} -o /usr/local/bin/patchmon-agent ${serverUrl}/api/v1/hosts/agent/download?arch=${architecture} -H "X-API-ID: ${host.api_id}" -H "X-API-KEY: ${host.api_key}" && sudo chmod +x /usr/local/bin/patchmon-agent`,
)
}
className="btn-secondary flex items-center gap-1"
>
<Copy className="h-4 w-4" />
Copy
</button>
</div>
</div>
<div className="bg-white dark:bg-secondary-800 rounded-md p-3 border border-secondary-200 dark:border-secondary-600">
<h5 className="text-sm font-medium text-secondary-900 dark:text-white mb-2">
3. Configure Credentials
</h5>
<div className="flex items-center gap-2">
<input
type="text"
value={`sudo /usr/local/bin/patchmon-agent config set-api "${host.api_id}" "${host.api_key}" "${serverUrl}"`}
readOnly
className="flex-1 px-3 py-2 border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-sm font-mono text-secondary-900 dark:text-white"
/>
<button
type="button"
onClick={() =>
copyToClipboard(
`sudo /usr/local/bin/patchmon-agent config set-api "${host.api_id}" "${host.api_key}" "${serverUrl}"`,
)
}
className="btn-secondary flex items-center gap-1"
>
<Copy className="h-4 w-4" />
Copy
</button>
</div>
</div>
<div className="bg-white dark:bg-secondary-800 rounded-md p-3 border border-secondary-200 dark:border-secondary-600">
<h5 className="text-sm font-medium text-secondary-900 dark:text-white mb-2">
4. Test Configuration
</h5>
<div className="flex items-center gap-2">
<input
type="text"
value="sudo /usr/local/bin/patchmon-agent ping"
readOnly
className="flex-1 px-3 py-2 border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-sm font-mono text-secondary-900 dark:text-white"
/>
<button
type="button"
onClick={() =>
copyToClipboard(
"sudo /usr/local/bin/patchmon-agent ping",
)
}
className="btn-secondary flex items-center gap-1"
>
<Copy className="h-4 w-4" />
Copy
</button>
</div>
</div>
<div className="bg-white dark:bg-secondary-800 rounded-md p-3 border border-secondary-200 dark:border-secondary-600">
<h5 className="text-sm font-medium text-secondary-900 dark:text-white mb-2">
5. Send Initial Data
</h5>
<div className="flex items-center gap-2">
<input
type="text"
value="sudo /usr/local/bin/patchmon-agent report"
readOnly
className="flex-1 px-3 py-2 border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-sm font-mono text-secondary-900 dark:text-white"
/>
<button
type="button"
onClick={() =>
copyToClipboard(
"sudo /usr/local/bin/patchmon-agent report",
)
}
className="btn-secondary flex items-center gap-1"
>
<Copy className="h-4 w-4" />
Copy
</button>
</div>
</div>
<div className="bg-white dark:bg-secondary-800 rounded-md p-3 border border-secondary-200 dark:border-secondary-600">
<h5 className="text-sm font-medium text-secondary-900 dark:text-white mb-2">
6. Create Systemd Service File
</h5>
<div className="flex items-center gap-2">
<input
type="text"
value={`sudo tee /etc/systemd/system/patchmon-agent.service > /dev/null << 'EOF'
[Unit]
Description=PatchMon Agent Service
After=network.target
Wants=network.target
[Service]
Type=simple
User=root
ExecStart=/usr/local/bin/patchmon-agent serve
Restart=always
RestartSec=10
WorkingDirectory=/etc/patchmon
# Logging
StandardOutput=journal
StandardError=journal
SyslogIdentifier=patchmon-agent
[Install]
WantedBy=multi-user.target
EOF`}
readOnly
className="flex-1 px-3 py-2 border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-sm font-mono text-secondary-900 dark:text-white"
/>
<button
type="button"
onClick={() =>
copyToClipboard(
`sudo tee /etc/systemd/system/patchmon-agent.service > /dev/null << 'EOF'
[Unit]
Description=PatchMon Agent Service
After=network.target
Wants=network.target
[Service]
Type=simple
User=root
ExecStart=/usr/local/bin/patchmon-agent serve
Restart=always
RestartSec=10
WorkingDirectory=/etc/patchmon
# Logging
StandardOutput=journal
StandardError=journal
SyslogIdentifier=patchmon-agent
[Install]
WantedBy=multi-user.target
EOF`,
)
}
className="btn-secondary flex items-center gap-1"
>
<Copy className="h-4 w-4" />
Copy
</button>
</div>
</div>
<div className="bg-white dark:bg-secondary-800 rounded-md p-3 border border-secondary-200 dark:border-secondary-600">
<h5 className="text-sm font-medium text-secondary-900 dark:text-white mb-2">
7. Enable and Start Service
</h5>
<div className="flex items-center gap-2">
<input
type="text"
value="sudo systemctl daemon-reload && sudo systemctl enable patchmon-agent && sudo systemctl start patchmon-agent"
readOnly
className="flex-1 px-3 py-2 border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-sm font-mono text-secondary-900 dark:text-white"
/>
<button
type="button"
onClick={() =>
copyToClipboard(
"sudo systemctl daemon-reload && sudo systemctl enable patchmon-agent && sudo systemctl start patchmon-agent",
)
}
className="btn-secondary flex items-center gap-1"
>
<Copy className="h-4 w-4" />
Copy
</button>
</div>
<p className="text-xs text-secondary-600 dark:text-secondary-400 mt-2">
This will start the agent service and establish WebSocket
connection for real-time communication
</p>
</div>
<div className="bg-white dark:bg-secondary-800 rounded-md p-3 border border-secondary-200 dark:border-secondary-600">
<h5 className="text-sm font-medium text-secondary-900 dark:text-white mb-2">
8. Verify Service Status
</h5>
<div className="flex items-center gap-2">
<input
type="text"
value="sudo systemctl status patchmon-agent"
readOnly
className="flex-1 px-3 py-2 border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-sm font-mono text-secondary-900 dark:text-white"
/>
<button
type="button"
onClick={() =>
copyToClipboard("sudo systemctl status patchmon-agent")
}
className="btn-secondary flex items-center gap-1"
>
<Copy className="h-4 w-4" />
Copy
</button>
</div>
<p className="text-xs text-secondary-600 dark:text-secondary-400 mt-2">
Check that the service is running and WebSocket connection
is established
</p>
</div>
</div>
</div>
</div> </div>
)} )}

View File

@@ -237,8 +237,14 @@ const Repositories = () => {
// Handle special cases // Handle special cases
if (sortField === "security") { if (sortField === "security") {
aValue = a.isSecure ? "Secure" : "Insecure"; // Use the same logic as filtering to determine isSecure
bValue = b.isSecure ? "Secure" : "Insecure"; const aIsSecure =
a.isSecure !== undefined ? a.isSecure : a.url.startsWith("https://");
const bIsSecure =
b.isSecure !== undefined ? b.isSecure : b.url.startsWith("https://");
// Sort by boolean: true (Secure) comes before false (Insecure) when ascending
aValue = aIsSecure ? 1 : 0;
bValue = bIsSecure ? 1 : 0;
} else if (sortField === "status") { } else if (sortField === "status") {
aValue = a.is_active ? "Active" : "Inactive"; aValue = a.is_active ? "Active" : "Inactive";
bValue = b.is_active ? "Active" : "Inactive"; bValue = b.is_active ? "Active" : "Inactive";
@@ -535,12 +541,12 @@ const Repositories = () => {
{visibleColumns.map((column) => ( {visibleColumns.map((column) => (
<th <th
key={column.id} key={column.id}
className="px-4 py-2 text-center text-xs font-medium text-secondary-500 dark:text-secondary-300 uppercase tracking-wider" className="px-4 py-2 text-left text-xs font-medium text-secondary-500 dark:text-secondary-300 uppercase tracking-wider"
> >
<button <button
type="button" type="button"
onClick={() => handleSort(column.id)} onClick={() => handleSort(column.id)}
className="flex items-center gap-1 hover:text-secondary-700 dark:hover:text-secondary-200 transition-colors" className="flex items-center justify-start gap-1 hover:text-secondary-700 dark:hover:text-secondary-200 transition-colors"
> >
{column.label} {column.label}
{getSortIcon(column.id)} {getSortIcon(column.id)}
@@ -559,7 +565,7 @@ const Repositories = () => {
{visibleColumns.map((column) => ( {visibleColumns.map((column) => (
<td <td
key={column.id} key={column.id}
className="px-4 py-2 whitespace-nowrap text-center" className="px-4 py-2 whitespace-nowrap text-left"
> >
{renderCellContent(column, repo)} {renderCellContent(column, repo)}
</td> </td>
@@ -622,7 +628,7 @@ const Repositories = () => {
? repo.isSecure ? repo.isSecure
: repo.url.startsWith("https://"); : repo.url.startsWith("https://");
return ( return (
<div className="flex items-center justify-center"> <div className="flex items-center justify-start">
{isSecure ? ( {isSecure ? (
<div className="flex items-center gap-1 text-green-600"> <div className="flex items-center gap-1 text-green-600">
<Lock className="h-4 w-4" /> <Lock className="h-4 w-4" />
@@ -651,14 +657,14 @@ const Repositories = () => {
); );
case "hostCount": case "hostCount":
return ( return (
<div className="flex items-center justify-center gap-1 text-sm text-secondary-900 dark:text-white"> <div className="flex items-center justify-start gap-1 text-sm text-secondary-900 dark:text-white">
<Server className="h-4 w-4" /> <Server className="h-4 w-4" />
<span>{repo.hostCount}</span> <span>{repo.hostCount}</span>
</div> </div>
); );
case "actions": case "actions":
return ( return (
<div className="flex items-center justify-center"> <div className="flex items-center justify-start">
<button <button
type="button" type="button"
onClick={(e) => handleDeleteRepository(repo, e)} onClick={(e) => handleDeleteRepository(repo, e)}

File diff suppressed because it is too large Load Diff

View File

@@ -99,6 +99,8 @@ export const dashboardAPI = {
}, },
getRecentUsers: () => api.get("/dashboard/recent-users"), getRecentUsers: () => api.get("/dashboard/recent-users"),
getRecentCollection: () => api.get("/dashboard/recent-collection"), getRecentCollection: () => api.get("/dashboard/recent-collection"),
triggerSystemStatistics: () =>
api.post("/automation/trigger/system-statistics"),
}; };
// Admin Hosts API (for management interface) // Admin Hosts API (for management interface)
@@ -129,6 +131,11 @@ export const adminHostsAPI = {
api.patch(`/hosts/${hostId}/notes`, { api.patch(`/hosts/${hostId}/notes`, {
notes: notes, notes: notes,
}), }),
getIntegrations: (hostId) => api.get(`/hosts/${hostId}/integrations`),
toggleIntegration: (hostId, integrationName, enabled) =>
api.post(`/hosts/${hostId}/integrations/${integrationName}/toggle`, {
enabled,
}),
}; };
// Host Groups API // Host Groups API

1104
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{ {
"name": "patchmon", "name": "patchmon",
"version": "1.3.2", "version": "1.3.4",
"description": "Linux Patch Monitoring System", "description": "Linux Patch Monitoring System",
"license": "AGPL-3.0", "license": "AGPL-3.0",
"private": true, "private": true,
@@ -25,7 +25,7 @@
"lint:fix": "biome check --write ." "lint:fix": "biome check --write ."
}, },
"devDependencies": { "devDependencies": {
"@biomejs/biome": "^2.3.0", "@biomejs/biome": "^2.3.4",
"concurrently": "^8.2.2", "concurrently": "^8.2.2",
"lefthook": "^1.13.4" "lefthook": "^1.13.4"
}, },

View File

@@ -34,7 +34,7 @@ BLUE='\033[0;34m'
NC='\033[0m' # No Color NC='\033[0m' # No Color
# Global variables # Global variables
SCRIPT_VERSION="self-hosting-install.sh v1.3.2-selfhost-2025-10-31-1" SCRIPT_VERSION="self-hosting-install.sh v1.3.3-selfhost-2025-11-07"
DEFAULT_GITHUB_REPO="https://github.com/PatchMon/PatchMon.git" DEFAULT_GITHUB_REPO="https://github.com/PatchMon/PatchMon.git"
FQDN="" FQDN=""
CUSTOM_FQDN="" CUSTOM_FQDN=""
@@ -66,27 +66,27 @@ SELECTED_SERVICE_NAME=""
# Functions # Functions
print_status() { print_status() {
echo -e "${GREEN}$1${NC}" printf "${GREEN}%s${NC}\n" "$1"
} }
print_info() { print_info() {
echo -e "${BLUE} $1${NC}" printf "${BLUE}%s${NC}\n" "$1"
} }
print_error() { print_error() {
echo -e "${RED}$1${NC}" printf "${RED}%s${NC}\n" "$1"
} }
print_warning() { print_warning() {
echo -e "${YELLOW}⚠️ $1${NC}" printf "${YELLOW}%s${NC}\n" "$1"
} }
print_question() { print_question() {
echo -e "${BLUE}$1${NC}" printf "${BLUE}%s${NC}\n" "$1"
} }
print_success() { print_success() {
echo -e "${GREEN}🎉 $1${NC}" printf "${GREEN}%s${NC}\n" "$1"
} }
# Interactive input functions # Interactive input functions
@@ -443,7 +443,7 @@ generate_redis_password() {
# Find next available Redis database # Find next available Redis database
find_next_redis_db() { find_next_redis_db() {
print_info "Finding next available Redis database..." print_info "Finding next available Redis database..." >&2
# Start from database 0 and keep checking until we find an empty one # Start from database 0 and keep checking until we find an empty one
local db_num=0 local db_num=0
@@ -463,11 +463,11 @@ find_next_redis_db() {
# Try to load admin credentials if ACL file exists # Try to load admin credentials if ACL file exists
if [ -f /etc/redis/users.acl ] && grep -q "^user admin" /etc/redis/users.acl; then if [ -f /etc/redis/users.acl ] && grep -q "^user admin" /etc/redis/users.acl; then
# Redis is configured with ACL - try to extract admin password # Redis is configured with ACL - try to extract admin password
print_info "Redis requires authentication, attempting with admin credentials..." print_info "Redis requires authentication, attempting with admin credentials..." >&2
# For multi-instance setups, we can't know the admin password yet # For multi-instance setups, we can't know the admin password yet
# So we'll just use database 0 as default # So we'll just use database 0 as default
print_info "Using database 0 (Redis ACL already configured)" print_info "Using database 0 (Redis ACL already configured)" >&2
echo "0" echo "0"
return 0 return 0
fi fi
@@ -484,7 +484,7 @@ find_next_redis_db() {
# Check for authentication errors # Check for authentication errors
if echo "$redis_output" | grep -q "NOAUTH\|WRONGPASS"; then if echo "$redis_output" | grep -q "NOAUTH\|WRONGPASS"; then
# If we hit auth errors and haven't configured yet, use database 0 # If we hit auth errors and haven't configured yet, use database 0
print_info "Redis requires authentication, defaulting to database 0" print_info "Redis requires authentication, defaulting to database 0" >&2
echo "0" echo "0"
return 0 return 0
fi fi
@@ -492,10 +492,10 @@ find_next_redis_db() {
# Check for other errors # Check for other errors
if echo "$redis_output" | grep -q "ERR"; then if echo "$redis_output" | grep -q "ERR"; then
if echo "$redis_output" | grep -q "invalid DB index"; then if echo "$redis_output" | grep -q "invalid DB index"; then
print_warning "Reached maximum database limit at database $db_num" print_warning "Reached maximum database limit at database $db_num" >&2
break break
else else
print_error "Error checking database $db_num: $redis_output" print_error "Error checking database $db_num: $redis_output" >&2
return 1 return 1
fi fi
fi fi
@@ -504,17 +504,17 @@ find_next_redis_db() {
# If database is empty, use it # If database is empty, use it
if [ "$key_count" = "0" ] || [ "$key_count" = "(integer) 0" ]; then if [ "$key_count" = "0" ] || [ "$key_count" = "(integer) 0" ]; then
print_status "Found available Redis database: $db_num (empty)" print_status "Found available Redis database: $db_num (empty)" >&2
echo "$db_num" echo "$db_num"
return 0 return 0
fi fi
print_info "Database $db_num has $key_count keys, checking next..." print_info "Database $db_num has $key_count keys, checking next..." >&2
db_num=$((db_num + 1)) db_num=$((db_num + 1))
done done
print_warning "No available Redis databases found (checked 0-$max_attempts)" print_warning "No available Redis databases found (checked 0-$max_attempts)" >&2
print_info "Using database 0 (may have existing data)" print_info "Using database 0 (may have existing data)" >&2
echo "0" echo "0"
return 0 return 0
} }
@@ -1470,6 +1470,7 @@ EOF
cat > "$config_file" << EOF cat > "$config_file" << EOF
server { server {
listen 80; listen 80;
listen [::]:80;
server_name $fqdn; server_name $fqdn;
# Security headers # Security headers
@@ -1657,7 +1658,7 @@ start_services() {
local logs=$(journalctl -u "$SERVICE_NAME" -n 50 --no-pager 2>/dev/null || echo "") local logs=$(journalctl -u "$SERVICE_NAME" -n 50 --no-pager 2>/dev/null || echo "")
if echo "$logs" | grep -q "WRONGPASS\|NOAUTH"; then if echo "$logs" | grep -q "WRONGPASS\|NOAUTH"; then
print_error "Detected Redis authentication error!" print_error "Detected Redis authentication error!"
print_info "The service cannot authenticate with Redis." print_info "The service cannot authenticate with Redis."
echo "" echo ""
print_info "Current Redis configuration in .env:" print_info "Current Redis configuration in .env:"
@@ -1681,18 +1682,18 @@ start_services() {
print_info " cat /etc/redis/users.acl" print_info " cat /etc/redis/users.acl"
echo "" echo ""
elif echo "$logs" | grep -q "ECONNREFUSED.*postgresql\|Connection refused.*5432"; then elif echo "$logs" | grep -q "ECONNREFUSED.*postgresql\|Connection refused.*5432"; then
print_error "Detected PostgreSQL connection error!" print_error "Detected PostgreSQL connection error!"
print_info "Check if PostgreSQL is running:" print_info "Check if PostgreSQL is running:"
print_info " systemctl status postgresql" print_info " systemctl status postgresql"
elif echo "$logs" | grep -q "ECONNREFUSED.*redis\|Connection refused.*6379"; then elif echo "$logs" | grep -q "ECONNREFUSED.*redis\|Connection refused.*6379"; then
print_error "Detected Redis connection error!" print_error "Detected Redis connection error!"
print_info "Check if Redis is running:" print_info "Check if Redis is running:"
print_info " systemctl status redis-server" print_info " systemctl status redis-server"
elif echo "$logs" | grep -q "database.*does not exist"; then elif echo "$logs" | grep -q "database.*does not exist"; then
print_error "Database does not exist!" print_error "Database does not exist!"
print_info "Database: $DB_NAME" print_info "Database: $DB_NAME"
elif echo "$logs" | grep -q "Error:"; then elif echo "$logs" | grep -q "Error:"; then
print_error "Application error detected in logs" print_error "Application error detected in logs"
fi fi
echo "" echo ""
@@ -1741,9 +1742,9 @@ async function updateSettings() {
}); });
} }
console.log('Database settings updated successfully'); console.log('Database settings updated successfully');
} catch (error) { } catch (error) {
console.error('Error updating settings:', error.message); console.error('Error updating settings:', error.message);
process.exit(1); process.exit(1);
} finally { } finally {
await prisma.\$disconnect(); await prisma.\$disconnect();
@@ -1867,7 +1868,7 @@ EOF
if [ -f "$SUMMARY_FILE" ]; then if [ -f "$SUMMARY_FILE" ]; then
print_status "Deployment summary appended to: $SUMMARY_FILE" print_status "Deployment summary appended to: $SUMMARY_FILE"
else else
print_error "⚠️ Failed to append to deployment-info.txt file" print_error "Failed to append to deployment-info.txt file"
return 1 return 1
fi fi
} }
@@ -1949,7 +1950,7 @@ EOF
print_status "Deployment information saved to: $INFO_FILE" print_status "Deployment information saved to: $INFO_FILE"
print_info "File details: $(ls -lh "$INFO_FILE" | awk '{print $5, $9}')" print_info "File details: $(ls -lh "$INFO_FILE" | awk '{print $5, $9}')"
else else
print_error "⚠️ Failed to create deployment-info.txt file" print_error "Failed to create deployment-info.txt file"
return 1 return 1
fi fi
} }
@@ -2142,7 +2143,7 @@ deploy_instance() {
log_message "Backend port: $BACKEND_PORT" log_message "Backend port: $BACKEND_PORT"
log_message "SSL enabled: $USE_LETSENCRYPT" log_message "SSL enabled: $USE_LETSENCRYPT"
print_status "🎉 PatchMon instance deployed successfully!" print_status "PatchMon instance deployed successfully!"
echo "" echo ""
print_info "Next steps:" print_info "Next steps:"
echo " • Visit your URL: $SERVER_PROTOCOL_SEL://$FQDN (ensure DNS is configured)" echo " • Visit your URL: $SERVER_PROTOCOL_SEL://$FQDN (ensure DNS is configured)"
@@ -3236,7 +3237,7 @@ update_installation() {
sleep 5 sleep 5
if systemctl is-active --quiet "$service_name"; then if systemctl is-active --quiet "$service_name"; then
print_success "Update completed successfully!" print_success "Update completed successfully!"
print_status "Service $service_name is running" print_status "Service $service_name is running"
# Get new version # Get new version
@@ -3264,7 +3265,7 @@ update_installation() {
local logs=$(journalctl -u "$service_name" -n 50 --no-pager 2>/dev/null || echo "") local logs=$(journalctl -u "$service_name" -n 50 --no-pager 2>/dev/null || echo "")
if echo "$logs" | grep -q "WRONGPASS\|NOAUTH"; then if echo "$logs" | grep -q "WRONGPASS\|NOAUTH"; then
print_error "Detected Redis authentication error!" print_error "Detected Redis authentication error!"
print_info "The service cannot authenticate with Redis." print_info "The service cannot authenticate with Redis."
echo "" echo ""
print_info "Current Redis configuration in .env:" print_info "Current Redis configuration in .env:"
@@ -3281,12 +3282,12 @@ update_installation() {
print_info " redis-cli --user $test_user --pass $test_pass -n ${test_db:-0} ping" print_info " redis-cli --user $test_user --pass $test_pass -n ${test_db:-0} ping"
echo "" echo ""
elif echo "$logs" | grep -q "ECONNREFUSED"; then elif echo "$logs" | grep -q "ECONNREFUSED"; then
print_error "Detected connection refused error!" print_error "Detected connection refused error!"
print_info "Check if required services are running:" print_info "Check if required services are running:"
print_info " systemctl status postgresql" print_info " systemctl status postgresql"
print_info " systemctl status redis-server" print_info " systemctl status redis-server"
elif echo "$logs" | grep -q "Error:"; then elif echo "$logs" | grep -q "Error:"; then
print_error "Application error detected in logs" print_error "Application error detected in logs"
fi fi
echo "" echo ""
@@ -3319,7 +3320,7 @@ main() {
# Handle update mode # Handle update mode
if [ "$UPDATE_MODE" = "true" ]; then if [ "$UPDATE_MODE" = "true" ]; then
print_banner print_banner
print_info "🔄 PatchMon Update Mode" print_info "PatchMon Update Mode"
echo "" echo ""
# Select installation to update # Select installation to update
@@ -3335,7 +3336,7 @@ main() {
# Check if existing installations are present # Check if existing installations are present
local existing_installs=($(detect_installations)) local existing_installs=($(detect_installations))
if [ ${#existing_installs[@]} -gt 0 ]; then if [ ${#existing_installs[@]} -gt 0 ]; then
print_warning "⚠️ Found ${#existing_installs[@]} existing PatchMon installation(s):" print_warning "Found ${#existing_installs[@]} existing PatchMon installation(s):"
for install in "${existing_installs[@]}"; do for install in "${existing_installs[@]}"; do
print_info " - $install" print_info " - $install"
done done