diff --git a/agents/patchmon-agent-linux-386 b/agents/patchmon-agent-linux-386 index fc7f5a8..cb875b8 100755 Binary files a/agents/patchmon-agent-linux-386 and b/agents/patchmon-agent-linux-386 differ diff --git a/agents/patchmon-agent-linux-amd64 b/agents/patchmon-agent-linux-amd64 index 3db8d1b..3025391 100755 Binary files a/agents/patchmon-agent-linux-amd64 and b/agents/patchmon-agent-linux-amd64 differ diff --git a/agents/patchmon-agent-linux-arm b/agents/patchmon-agent-linux-arm index 6d5f77c..6122e7b 100755 Binary files a/agents/patchmon-agent-linux-arm and b/agents/patchmon-agent-linux-arm differ diff --git a/agents/patchmon-agent-linux-arm64 b/agents/patchmon-agent-linux-arm64 index d809d4a..523e6c8 100755 Binary files a/agents/patchmon-agent-linux-arm64 and b/agents/patchmon-agent-linux-arm64 differ diff --git a/agents/patchmon_install.sh b/agents/patchmon_install.sh index 315f117..1e31a15 100644 --- a/agents/patchmon_install.sh +++ b/agents/patchmon_install.sh @@ -136,10 +136,34 @@ if [[ -z "$PATCHMON_URL" ]] || [[ -z "$API_ID" ]] || [[ -z "$API_KEY" ]]; then error "Missing required parameters. This script should be called via the PatchMon web interface." fi -# Parse architecture parameter (default to amd64) -ARCHITECTURE="${ARCHITECTURE:-amd64}" -if [[ "$ARCHITECTURE" != "amd64" && "$ARCHITECTURE" != "386" && "$ARCHITECTURE" != "arm64" ]]; then - error "Invalid architecture '$ARCHITECTURE'. Must be one of: amd64, 386, arm64" +# Auto-detect architecture if not explicitly set +if [[ -z "$ARCHITECTURE" ]]; then + arch_raw=$(uname -m 2>/dev/null || echo "unknown") + + # Map architecture to supported values + case "$arch_raw" in + "x86_64") + ARCHITECTURE="amd64" + ;; + "i386"|"i686") + ARCHITECTURE="386" + ;; + "aarch64"|"arm64") + ARCHITECTURE="arm64" + ;; + "armv7l"|"armv6l"|"arm") + ARCHITECTURE="arm" + ;; + *) + warning "โš ๏ธ Unknown architecture '$arch_raw', defaulting to amd64" + ARCHITECTURE="amd64" + ;; + esac +fi + +# Validate architecture +if [[ "$ARCHITECTURE" != "amd64" && "$ARCHITECTURE" != "386" && "$ARCHITECTURE" != "arm64" && "$ARCHITECTURE" != "arm" ]]; then + error "Invalid architecture '$ARCHITECTURE'. Must be one of: amd64, 386, arm64, arm" fi # Check if --force flag is set (for bypassing broken packages) @@ -234,7 +258,98 @@ install_apt_packages() { fi } -# Detect package manager and install jq and curl +# Function to check and install packages for yum/dnf +install_yum_dnf_packages() { + local pkg_manager="$1" + shift + local packages=("$@") + local missing_packages=() + + # Check which packages are missing + for pkg in "${packages[@]}"; do + if ! command_exists "$pkg"; then + missing_packages+=("$pkg") + fi + done + + if [ ${#missing_packages[@]} -eq 0 ]; then + success "All required packages are already installed" + return 0 + fi + + info "Need to install: ${missing_packages[*]}" + + if [[ "$pkg_manager" == "yum" ]]; then + yum install -y "${missing_packages[@]}" + else + dnf install -y "${missing_packages[@]}" + fi +} + +# Function to check and install packages for zypper +install_zypper_packages() { + local packages=("$@") + local missing_packages=() + + # Check which packages are missing + for pkg in "${packages[@]}"; do + if ! command_exists "$pkg"; then + missing_packages+=("$pkg") + fi + done + + if [ ${#missing_packages[@]} -eq 0 ]; then + success "All required packages are already installed" + return 0 + fi + + info "Need to install: ${missing_packages[*]}" + zypper install -y "${missing_packages[@]}" +} + +# Function to check and install packages for pacman +install_pacman_packages() { + local packages=("$@") + local missing_packages=() + + # Check which packages are missing + for pkg in "${packages[@]}"; do + if ! command_exists "$pkg"; then + missing_packages+=("$pkg") + fi + done + + if [ ${#missing_packages[@]} -eq 0 ]; then + success "All required packages are already installed" + return 0 + fi + + info "Need to install: ${missing_packages[*]}" + pacman -S --noconfirm "${missing_packages[@]}" +} + +# Function to check and install packages for apk +install_apk_packages() { + local packages=("$@") + local missing_packages=() + + # Check which packages are missing + for pkg in "${packages[@]}"; do + if ! command_exists "$pkg"; then + missing_packages+=("$pkg") + fi + done + + if [ ${#missing_packages[@]} -eq 0 ]; then + success "All required packages are already installed" + return 0 + fi + + info "Need to install: ${missing_packages[*]}" + apk add --no-cache "${missing_packages[@]}" +} + +# Detect package manager and install jq, curl, and bc if command -v apt-get >/dev/null 2>&1; then # Debian/Ubuntu info "Detected apt-get (Debian/Ubuntu)" @@ -260,31 +375,31 @@ elif command -v yum >/dev/null 2>&1; then info "Detected yum (CentOS/RHEL 7)" echo "" info "Installing jq, curl, and bc..." - yum install -y jq curl bc + install_yum_dnf_packages yum jq curl bc elif command -v dnf >/dev/null 2>&1; then # CentOS/RHEL 8+/Fedora info "Detected dnf (CentOS/RHEL 8+/Fedora)" echo "" info "Installing jq, curl, and bc..." - dnf install -y jq curl bc + install_yum_dnf_packages dnf jq curl bc elif command -v zypper >/dev/null 2>&1; then # openSUSE info "Detected zypper (openSUSE)" echo "" info "Installing jq, curl, and bc..." - zypper install -y jq curl bc + install_zypper_packages jq curl bc elif command -v pacman >/dev/null 2>&1; then # Arch Linux info "Detected pacman (Arch Linux)" echo "" info "Installing jq, curl, and bc..." - pacman -S --noconfirm jq curl bc + install_pacman_packages jq curl bc elif command -v apk >/dev/null 2>&1; then # Alpine Linux info "Detected apk (Alpine Linux)" echo "" info "Installing jq, curl, and bc..." - apk add --no-cache jq curl bc + install_apk_packages jq curl bc else warning "Could not detect package manager. Please ensure 'jq', 'curl', and 'bc' are installed manually." fi @@ -311,6 +426,37 @@ else mkdir -p /etc/patchmon fi +# Check if agent is already configured and working (before we overwrite anything) +info "๐Ÿ” Checking if agent is already configured..." + +if [[ -f /etc/patchmon/config.yml ]] && [[ -f /etc/patchmon/credentials.yml ]]; then + if [[ -f /usr/local/bin/patchmon-agent ]]; then + info "๐Ÿ“‹ Found existing agent configuration" + info "๐Ÿงช Testing existing configuration with ping..." + + if /usr/local/bin/patchmon-agent ping >/dev/null 2>&1; then + success "โœ… Agent is already configured and ping successful" + info "๐Ÿ“‹ Existing configuration is working - skipping installation" + info "" + info "If you want to reinstall, remove the configuration files first:" + info " sudo rm -f /etc/patchmon/config.yml /etc/patchmon/credentials.yml" + echo "" + exit 0 + else + warning "โš ๏ธ Agent configuration exists but ping failed" + warning "โš ๏ธ Will move existing configuration and reinstall" + echo "" + fi + else + warning "โš ๏ธ Configuration files exist but agent binary is missing" + warning "โš ๏ธ Will move existing configuration and reinstall" + echo "" + fi +else + success "โœ… Agent not yet configured - proceeding with installation" + echo "" +fi + # Step 2: Create configuration files info "๐Ÿ” Creating configuration files..." @@ -426,33 +572,6 @@ if [[ -f "/etc/patchmon/logs/patchmon-agent.log" ]]; then fi # Step 4: Test the configuration -# Check if this machine is already enrolled -info "๐Ÿ” Checking if machine is already enrolled..." -existing_check=$(curl $CURL_FLAGS -s -X POST \ - -H "X-API-ID: $API_ID" \ - -H "X-API-KEY: $API_KEY" \ - -H "Content-Type: application/json" \ - -d "{\"machine_id\": \"$MACHINE_ID\"}" \ - "$PATCHMON_URL/api/v1/hosts/check-machine-id" \ - -w "\n%{http_code}" 2>&1) - -http_code=$(echo "$existing_check" | tail -n 1) -response_body=$(echo "$existing_check" | sed '$d') - -if [[ "$http_code" == "200" ]]; then - already_enrolled=$(echo "$response_body" | jq -r '.exists' 2>/dev/null || echo "false") - if [[ "$already_enrolled" == "true" ]]; then - warning "โš ๏ธ This machine is already enrolled in PatchMon" - info "Machine ID: $MACHINE_ID" - info "Existing host: $(echo "$response_body" | jq -r '.host.friendly_name' 2>/dev/null)" - info "" - info "The agent will be reinstalled/updated with existing credentials." - echo "" - else - success "โœ… Machine not yet enrolled - proceeding with installation" - fi -fi - info "๐Ÿงช Testing API credentials and connectivity..." if /usr/local/bin/patchmon-agent ping; then success "โœ… TEST: API credentials are valid and server is reachable" @@ -460,15 +579,8 @@ else error "โŒ Failed to validate API credentials or reach server" fi -# Step 5: Send initial data and setup systemd service -info "๐Ÿ“Š Sending initial package data to server..." -if /usr/local/bin/patchmon-agent report; then - success "โœ… UPDATE: Initial package data sent successfully" -else - warning "โš ๏ธ Failed to send initial data. You can retry later with: /usr/local/bin/patchmon-agent report" -fi - -# Step 6: Setup systemd service for WebSocket connection +# Step 5: Setup systemd service for WebSocket connection +# Note: The service will automatically send an initial report on startup (see serve.go) info "๐Ÿ”ง Setting up systemd service..." # Stop and disable existing service if it exists diff --git a/agents/proxmox_auto_enroll.sh b/agents/proxmox_auto_enroll.sh index 01c4158..04516f4 100755 --- a/agents/proxmox_auto_enroll.sh +++ b/agents/proxmox_auto_enroll.sh @@ -230,6 +230,40 @@ while IFS= read -r line; do info " โœ“ Host enrolled successfully: $api_id" + # Check if agent is already installed and working + info " Checking if agent is already configured..." + config_check=$(timeout 10 pct exec "$vmid" -- bash -c " + if [[ -f /etc/patchmon/config.yml ]] && [[ -f /etc/patchmon/credentials.yml ]]; then + if [[ -f /usr/local/bin/patchmon-agent ]]; then + # Try to ping using existing configuration + if /usr/local/bin/patchmon-agent ping >/dev/null 2>&1; then + echo 'ping_success' + else + echo 'ping_failed' + fi + else + echo 'binary_missing' + fi + else + echo 'not_configured' + fi + " 2>/dev/null /dev/null 2>&1 && echo 'installed' || echo 'missing'" 2>/dev/null { + try { + const job = await queueManager.triggerSystemStatistics(); + res.json({ + success: true, + data: { + jobId: job.id, + message: "System statistics collection triggered successfully", + }, + }); + } catch (error) { + console.error("Error triggering system statistics collection:", error); + res.status(500).json({ + success: false, + error: "Failed to trigger system statistics collection", + }); + } + }, +); + // Get queue health status router.get("/health", authenticateToken, async (_req, res) => { try { @@ -300,6 +324,7 @@ router.get("/overview", authenticateToken, async (_req, res) => { queueManager.getRecentJobs(QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP, 1), queueManager.getRecentJobs(QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP, 1), queueManager.getRecentJobs(QUEUE_NAMES.AGENT_COMMANDS, 1), + queueManager.getRecentJobs(QUEUE_NAMES.SYSTEM_STATISTICS, 1), ]); // Calculate overview metrics @@ -309,21 +334,24 @@ router.get("/overview", authenticateToken, async (_req, res) => { stats[QUEUE_NAMES.SESSION_CLEANUP].delayed + stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].delayed + stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].delayed + - stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].delayed, + stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].delayed + + stats[QUEUE_NAMES.SYSTEM_STATISTICS].delayed, runningTasks: stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].active + stats[QUEUE_NAMES.SESSION_CLEANUP].active + stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].active + stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].active + - stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].active, + stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].active + + stats[QUEUE_NAMES.SYSTEM_STATISTICS].active, failedTasks: stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].failed + stats[QUEUE_NAMES.SESSION_CLEANUP].failed + stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].failed + stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].failed + - stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].failed, + stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].failed + + stats[QUEUE_NAMES.SYSTEM_STATISTICS].failed, totalAutomations: Object.values(stats).reduce((sum, queueStats) => { return ( @@ -435,6 +463,22 @@ router.get("/overview", authenticateToken, async (_req, res) => { : "Never run", stats: stats[QUEUE_NAMES.AGENT_COMMANDS], }, + { + name: "System Statistics Collection", + queue: QUEUE_NAMES.SYSTEM_STATISTICS, + description: "Collects aggregated system-wide package statistics", + schedule: "Every 30 minutes", + lastRun: recentJobs[6][0]?.finishedOn + ? new Date(recentJobs[6][0].finishedOn).toLocaleString() + : "Never", + lastRunTimestamp: recentJobs[6][0]?.finishedOn || 0, + status: recentJobs[6][0]?.failedReason + ? "Failed" + : recentJobs[6][0] + ? "Success" + : "Never run", + stats: stats[QUEUE_NAMES.SYSTEM_STATISTICS], + }, ].sort((a, b) => { // Sort by last run timestamp (most recent first) // If both have never run (timestamp 0), maintain original order diff --git a/backend/src/routes/dashboardRoutes.js b/backend/src/routes/dashboardRoutes.js index e995737..bfc03db 100644 --- a/backend/src/routes/dashboardRoutes.js +++ b/backend/src/routes/dashboardRoutes.js @@ -564,174 +564,216 @@ router.get( const startDate = new Date(); startDate.setDate(endDate.getDate() - daysInt); - // Build where clause - const whereClause = { - timestamp: { - gte: startDate, - lte: endDate, - }, - }; - - // Add host filter if specified - if (hostId && hostId !== "all" && hostId !== "undefined") { - whereClause.host_id = hostId; - } - - // Get all update history records in the date range - const trendsData = await prisma.update_history.findMany({ - where: whereClause, - select: { - timestamp: true, - packages_count: true, - security_count: true, - total_packages: true, - host_id: true, - status: true, - }, - orderBy: { - timestamp: "asc", - }, - }); - - // Enhanced data validation and processing - const processedData = trendsData - .filter((record) => { - // Enhanced validation - return ( - record.total_packages !== null && - record.total_packages >= 0 && - record.packages_count >= 0 && - record.security_count >= 0 && - record.security_count <= record.packages_count && // Security can't exceed outdated - record.status === "success" - ); // Only include successful reports - }) - .map((record) => { - const date = new Date(record.timestamp); - let timeKey; - - if (daysInt <= 1) { - // For hourly view, group by hour only (not minutes) - timeKey = date.toISOString().substring(0, 13); // YYYY-MM-DDTHH - } else { - // For daily view, group by day - timeKey = date.toISOString().split("T")[0]; // YYYY-MM-DD - } - - return { - timeKey, - total_packages: record.total_packages, - packages_count: record.packages_count || 0, - security_count: record.security_count || 0, - host_id: record.host_id, - timestamp: record.timestamp, - }; - }); - // Determine if we need aggregation based on host filter const needsAggregation = !hostId || hostId === "all" || hostId === "undefined"; + let trendsData; + + if (needsAggregation) { + // For "All Hosts" mode, use system_statistics table + trendsData = await prisma.system_statistics.findMany({ + where: { + timestamp: { + gte: startDate, + lte: endDate, + }, + }, + select: { + timestamp: true, + unique_packages_count: true, + unique_security_count: true, + total_packages: true, + total_hosts: true, + hosts_needing_updates: true, + }, + orderBy: { + timestamp: "asc", + }, + }); + } else { + // For individual host, use update_history table + trendsData = await prisma.update_history.findMany({ + where: { + host_id: hostId, + timestamp: { + gte: startDate, + lte: endDate, + }, + }, + select: { + timestamp: true, + packages_count: true, + security_count: true, + total_packages: true, + host_id: true, + status: true, + }, + orderBy: { + timestamp: "asc", + }, + }); + } + + // Process data based on source + let processedData; let aggregatedArray; if (needsAggregation) { - // For "All Hosts" mode, we need to calculate the actual total packages differently - // Instead of aggregating historical data (which is per-host), we'll use the current total - // and show that as a flat line, since total packages don't change much over time + // For "All Hosts" mode, data comes from system_statistics table + // Already aggregated, just need to format it + processedData = trendsData + .filter((record) => { + // Enhanced validation + return ( + record.total_packages !== null && + record.total_packages >= 0 && + record.unique_packages_count >= 0 && + record.unique_security_count >= 0 && + record.unique_security_count <= record.unique_packages_count + ); + }) + .map((record) => { + const date = new Date(record.timestamp); + let timeKey; - // Get the current total packages count (unique packages across all hosts) - const currentTotalPackages = await prisma.packages.count({ - where: { - host_packages: { - some: {}, // At least one host has this package - }, - }, - }); + if (daysInt <= 1) { + // For "Last 24 hours", use full timestamp for each data point + // This allows plotting all individual data points + timeKey = date.toISOString(); // Full ISO timestamp + } else { + // For daily view, group by day + timeKey = date.toISOString().split("T")[0]; // YYYY-MM-DD + } - // Aggregate data by timeKey when looking at "All Hosts" or no specific host - const aggregatedData = processedData.reduce((acc, item) => { - if (!acc[item.timeKey]) { - acc[item.timeKey] = { - timeKey: item.timeKey, - total_packages: currentTotalPackages, // Use current total packages - packages_count: 0, - security_count: 0, - record_count: 0, - host_ids: new Set(), - min_timestamp: item.timestamp, - max_timestamp: item.timestamp, + return { + timeKey, + total_packages: record.total_packages, + packages_count: record.unique_packages_count, + security_count: record.unique_security_count, + timestamp: record.timestamp, }; - } + }); - // For outdated and security packages: SUM (these represent counts across hosts) - acc[item.timeKey].packages_count += item.packages_count; - acc[item.timeKey].security_count += item.security_count; + if (daysInt <= 1) { + // For "Last 24 hours", use all individual data points without grouping + // Sort by timestamp + aggregatedArray = processedData.sort( + (a, b) => a.timestamp.getTime() - b.timestamp.getTime(), + ); + } else { + // For longer periods, group by timeKey and take the latest value for each period + const aggregatedData = processedData.reduce((acc, item) => { + if ( + !acc[item.timeKey] || + item.timestamp > acc[item.timeKey].timestamp + ) { + acc[item.timeKey] = item; + } + return acc; + }, {}); - acc[item.timeKey].record_count += 1; - acc[item.timeKey].host_ids.add(item.host_id); - - // Track timestamp range - if (item.timestamp < acc[item.timeKey].min_timestamp) { - acc[item.timeKey].min_timestamp = item.timestamp; - } - if (item.timestamp > acc[item.timeKey].max_timestamp) { - acc[item.timeKey].max_timestamp = item.timestamp; - } - - return acc; - }, {}); - - // Convert to array and add metadata - aggregatedArray = Object.values(aggregatedData) - .map((item) => ({ - ...item, - host_count: item.host_ids.size, - host_ids: Array.from(item.host_ids), - })) - .sort((a, b) => a.timeKey.localeCompare(b.timeKey)); + // Convert to array and sort + aggregatedArray = Object.values(aggregatedData).sort((a, b) => + a.timeKey.localeCompare(b.timeKey), + ); + } } else { - // For specific host, show individual data points without aggregation - // But still group by timeKey to handle multiple reports from same host in same time period - const hostAggregatedData = processedData.reduce((acc, item) => { - if (!acc[item.timeKey]) { - acc[item.timeKey] = { - timeKey: item.timeKey, - total_packages: 0, - packages_count: 0, - security_count: 0, - record_count: 0, - host_ids: new Set([item.host_id]), - min_timestamp: item.timestamp, - max_timestamp: item.timestamp, + // For individual host, data comes from update_history table + processedData = trendsData + .filter((record) => { + // Enhanced validation + return ( + record.total_packages !== null && + record.total_packages >= 0 && + record.packages_count >= 0 && + record.security_count >= 0 && + record.security_count <= record.packages_count && + record.status === "success" + ); + }) + .map((record) => { + const date = new Date(record.timestamp); + let timeKey; + + if (daysInt <= 1) { + // For "Last 24 hours", use full timestamp for each data point + // This allows plotting all individual data points + timeKey = date.toISOString(); // Full ISO timestamp + } else { + // For daily view, group by day + timeKey = date.toISOString().split("T")[0]; // YYYY-MM-DD + } + + return { + timeKey, + total_packages: record.total_packages, + packages_count: record.packages_count || 0, + security_count: record.security_count || 0, + host_id: record.host_id, + timestamp: record.timestamp, }; - } + }); - // For same host, take the latest values (not sum) - // This handles cases where a host reports multiple times in the same time period - if (item.timestamp > acc[item.timeKey].max_timestamp) { - acc[item.timeKey].total_packages = item.total_packages; - acc[item.timeKey].packages_count = item.packages_count; - acc[item.timeKey].security_count = item.security_count; - acc[item.timeKey].max_timestamp = item.timestamp; - } + if (daysInt <= 1) { + // For "Last 24 hours", use all individual data points without grouping + // Sort by timestamp + aggregatedArray = processedData.sort( + (a, b) => a.timestamp.getTime() - b.timestamp.getTime(), + ); + } else { + // For longer periods, group by timeKey to handle multiple reports from same host in same time period + const hostAggregatedData = processedData.reduce((acc, item) => { + if (!acc[item.timeKey]) { + acc[item.timeKey] = { + timeKey: item.timeKey, + total_packages: 0, + packages_count: 0, + security_count: 0, + record_count: 0, + host_ids: new Set([item.host_id]), + min_timestamp: item.timestamp, + max_timestamp: item.timestamp, + }; + } - acc[item.timeKey].record_count += 1; + // For same host, take the latest values (not sum) + // This handles cases where a host reports multiple times in the same time period + if (item.timestamp > acc[item.timeKey].max_timestamp) { + acc[item.timeKey].total_packages = item.total_packages; + acc[item.timeKey].packages_count = item.packages_count; + acc[item.timeKey].security_count = item.security_count; + acc[item.timeKey].max_timestamp = item.timestamp; + } - return acc; - }, {}); + acc[item.timeKey].record_count += 1; - // Convert to array - aggregatedArray = Object.values(hostAggregatedData) - .map((item) => ({ - ...item, - host_count: item.host_ids.size, - host_ids: Array.from(item.host_ids), - })) - .sort((a, b) => a.timeKey.localeCompare(b.timeKey)); + return acc; + }, {}); + + // Convert to array + aggregatedArray = Object.values(hostAggregatedData) + .map((item) => ({ + ...item, + host_count: item.host_ids.size, + host_ids: Array.from(item.host_ids), + })) + .sort((a, b) => a.timeKey.localeCompare(b.timeKey)); + } } // Handle sparse data by filling missing time periods const fillMissingPeriods = (data, daysInt) => { + if (data.length === 0) { + return []; + } + + // For "Last 24 hours", return data as-is without filling gaps + // This allows plotting all individual data points + if (daysInt <= 1) { + return data; + } + const filledData = []; const startDate = new Date(); startDate.setDate(startDate.getDate() - daysInt); @@ -741,50 +783,58 @@ router.get( const endDate = new Date(); const currentDate = new Date(startDate); - // Find the last known values for interpolation + // Sort data by timeKey to get chronological order + const sortedData = [...data].sort((a, b) => + a.timeKey.localeCompare(b.timeKey), + ); + + // Find the first actual data point (don't fill before this) + const firstDataPoint = sortedData[0]; + const firstDataTimeKey = firstDataPoint?.timeKey; + + // Track last known values as we iterate forward let lastKnownValues = null; - if (data.length > 0) { - lastKnownValues = { - total_packages: data[0].total_packages, - packages_count: data[0].packages_count, - security_count: data[0].security_count, - }; - } + let hasSeenFirstDataPoint = false; while (currentDate <= endDate) { let timeKey; - if (daysInt <= 1) { - timeKey = currentDate.toISOString().substring(0, 13); // Hourly - currentDate.setHours(currentDate.getHours() + 1); - } else { - timeKey = currentDate.toISOString().split("T")[0]; // Daily - currentDate.setDate(currentDate.getDate() + 1); + // For daily view, group by day + timeKey = currentDate.toISOString().split("T")[0]; // YYYY-MM-DD + currentDate.setDate(currentDate.getDate() + 1); + + // Skip periods before the first actual data point + if (firstDataTimeKey && timeKey < firstDataTimeKey) { + continue; } if (dataMap.has(timeKey)) { const item = dataMap.get(timeKey); filledData.push(item); - // Update last known values + // Update last known values with actual data lastKnownValues = { - total_packages: item.total_packages, - packages_count: item.packages_count, - security_count: item.security_count, + total_packages: item.total_packages || 0, + packages_count: item.packages_count || 0, + security_count: item.security_count || 0, }; + hasSeenFirstDataPoint = true; } else { - // For missing periods, use the last known values (interpolation) - // This creates a continuous line instead of gaps - filledData.push({ - timeKey, - total_packages: lastKnownValues?.total_packages || 0, - packages_count: lastKnownValues?.packages_count || 0, - security_count: lastKnownValues?.security_count || 0, - record_count: 0, - host_count: 0, - host_ids: [], - min_timestamp: null, - max_timestamp: null, - isInterpolated: true, // Mark as interpolated for debugging - }); + // For missing periods AFTER the first data point, use forward-fill + // Only fill if we have a last known value and we've seen the first data point + if (lastKnownValues !== null && hasSeenFirstDataPoint) { + filledData.push({ + timeKey, + total_packages: lastKnownValues.total_packages, + packages_count: lastKnownValues.packages_count, + security_count: lastKnownValues.security_count, + record_count: 0, + host_count: 0, + host_ids: [], + min_timestamp: null, + max_timestamp: null, + isInterpolated: true, // Mark as interpolated for debugging + }); + } + // If we haven't seen the first data point yet, skip this period } } @@ -810,7 +860,7 @@ router.get( // Get current package state for offline fallback let currentPackageState = null; if (hostId && hostId !== "all" && hostId !== "undefined") { - // Get current package counts for specific host + // For individual host, get current package counts from host_packages const currentState = await prisma.host_packages.aggregate({ where: { host_id: hostId, @@ -841,34 +891,64 @@ router.get( security_count: securityCount, }; } else { - // Get current package counts for all hosts - // Total packages = count of unique packages installed on at least one host - const totalPackagesCount = await prisma.packages.count({ - where: { - host_packages: { - some: {}, // At least one host has this package + // For "All Hosts" mode, use the latest system_statistics record if available + // Otherwise calculate from database + const latestStats = await prisma.system_statistics.findFirst({ + orderBy: { + timestamp: "desc", + }, + select: { + total_packages: true, + unique_packages_count: true, + unique_security_count: true, + timestamp: true, + }, + }); + + if (latestStats) { + // Use latest system statistics (collected by scheduled job) + currentPackageState = { + total_packages: latestStats.total_packages, + packages_count: latestStats.unique_packages_count, + security_count: latestStats.unique_security_count, + }; + } else { + // Fallback: calculate from database if no statistics collected yet + const totalPackagesCount = await prisma.packages.count({ + where: { + host_packages: { + some: {}, // At least one host has this package + }, }, - }, - }); + }); - // Get counts for boolean fields separately - const outdatedCount = await prisma.host_packages.count({ - where: { - needs_update: true, - }, - }); + const uniqueOutdatedCount = await prisma.packages.count({ + where: { + host_packages: { + some: { + needs_update: true, + }, + }, + }, + }); - const securityCount = await prisma.host_packages.count({ - where: { - is_security_update: true, - }, - }); + const uniqueSecurityCount = await prisma.packages.count({ + where: { + host_packages: { + some: { + needs_update: true, + is_security_update: true, + }, + }, + }, + }); - currentPackageState = { - total_packages: totalPackagesCount, - packages_count: outdatedCount, - security_count: securityCount, - }; + currentPackageState = { + total_packages: totalPackagesCount, + packages_count: uniqueOutdatedCount, + security_count: uniqueSecurityCount, + }; + } } // Format data for chart @@ -923,6 +1003,11 @@ router.get( chartData.datasets[2].data.push(item.security_count); }); + // Replace the last label with "Now" to indicate current state + if (chartData.labels.length > 0) { + chartData.labels[chartData.labels.length - 1] = "Now"; + } + // Calculate data quality metrics const dataQuality = { totalRecords: trendsData.length, diff --git a/backend/src/routes/dockerRoutes.js b/backend/src/routes/dockerRoutes.js index 7a90214..b28cef2 100644 --- a/backend/src/routes/dockerRoutes.js +++ b/backend/src/routes/dockerRoutes.js @@ -2,6 +2,7 @@ const express = require("express"); const { authenticateToken } = require("../middleware/auth"); const { getPrismaClient } = require("../config/prisma"); const { v4: uuidv4 } = require("uuid"); +const { get_current_time, parse_date } = require("../utils/timezone"); const prisma = getPrismaClient(); const router = express.Router(); @@ -537,14 +538,7 @@ router.post("/collect", async (req, res) => { return res.status(401).json({ error: "Invalid API credentials" }); } - const now = new Date(); - - // Helper function to validate and parse dates - const parseDate = (dateString) => { - if (!dateString) return now; - const date = new Date(dateString); - return Number.isNaN(date.getTime()) ? now : date; - }; + const now = get_current_time(); // Process containers if (containers && Array.isArray(containers)) { @@ -572,7 +566,7 @@ router.post("/collect", async (req, res) => { tag: containerData.image_tag, image_id: containerData.image_id || "unknown", source: containerData.image_source || "docker-hub", - created_at: parseDate(containerData.created_at), + created_at: parse_date(containerData.created_at, now), last_checked: now, updated_at: now, }, @@ -597,7 +591,7 @@ router.post("/collect", async (req, res) => { state: containerData.state, ports: containerData.ports || null, started_at: containerData.started_at - ? parseDate(containerData.started_at) + ? parse_date(containerData.started_at, null) : null, updated_at: now, last_checked: now, @@ -613,9 +607,9 @@ router.post("/collect", async (req, res) => { status: containerData.status, state: containerData.state, ports: containerData.ports || null, - created_at: parseDate(containerData.created_at), + created_at: parse_date(containerData.created_at, now), started_at: containerData.started_at - ? parseDate(containerData.started_at) + ? parse_date(containerData.started_at, null) : null, updated_at: now, }, @@ -651,7 +645,7 @@ router.post("/collect", async (req, res) => { ? BigInt(imageData.size_bytes) : null, source: imageData.source || "docker-hub", - created_at: parseDate(imageData.created_at), + created_at: parse_date(imageData.created_at, now), updated_at: now, }, }); @@ -780,14 +774,7 @@ router.post("/../integrations/docker", async (req, res) => { `[Docker Integration] Processing for host: ${host.friendly_name}`, ); - const now = new Date(); - - // Helper function to validate and parse dates - const parseDate = (dateString) => { - if (!dateString) return now; - const date = new Date(dateString); - return Number.isNaN(date.getTime()) ? now : date; - }; + const now = get_current_time(); let containersProcessed = 0; let imagesProcessed = 0; @@ -822,7 +809,7 @@ router.post("/../integrations/docker", async (req, res) => { tag: containerData.image_tag, image_id: containerData.image_id || "unknown", source: containerData.image_source || "docker-hub", - created_at: parseDate(containerData.created_at), + created_at: parse_date(containerData.created_at, now), last_checked: now, updated_at: now, }, @@ -847,7 +834,7 @@ router.post("/../integrations/docker", async (req, res) => { state: containerData.state || containerData.status, ports: containerData.ports || null, started_at: containerData.started_at - ? parseDate(containerData.started_at) + ? parse_date(containerData.started_at, null) : null, updated_at: now, last_checked: now, @@ -863,9 +850,9 @@ router.post("/../integrations/docker", async (req, res) => { status: containerData.status, state: containerData.state || containerData.status, ports: containerData.ports || null, - created_at: parseDate(containerData.created_at), + created_at: parse_date(containerData.created_at, now), started_at: containerData.started_at - ? parseDate(containerData.started_at) + ? parse_date(containerData.started_at, null) : null, updated_at: now, }, @@ -911,7 +898,7 @@ router.post("/../integrations/docker", async (req, res) => { ? BigInt(imageData.size_bytes) : null, source: imageSource, - created_at: parseDate(imageData.created_at), + created_at: parse_date(imageData.created_at, now), last_checked: now, updated_at: now, }, diff --git a/backend/src/routes/hostRoutes.js b/backend/src/routes/hostRoutes.js index 6236197..03f174c 100644 --- a/backend/src/routes/hostRoutes.js +++ b/backend/src/routes/hostRoutes.js @@ -11,10 +11,16 @@ const { requireManageSettings, } = require("../middleware/permissions"); const { queueManager, QUEUE_NAMES } = require("../services/automation"); +const { pushIntegrationToggle, isConnected } = require("../services/agentWs"); +const agentVersionService = require("../services/agentVersionService"); const router = express.Router(); const prisma = getPrismaClient(); +// In-memory cache for integration states (api_id -> { integration_name -> enabled }) +// This stores the last known state from successful toggles +const integrationStateCache = new Map(); + // Secure endpoint to download the agent script/binary (requires API authentication) router.get("/agent/download", async (req, res) => { try { @@ -128,9 +134,6 @@ router.get("/agent/version", async (req, res) => { try { const fs = require("node:fs"); const path = require("node:path"); - const { exec } = require("node:child_process"); - const { promisify } = require("node:util"); - const execAsync = promisify(exec); // Get architecture parameter (default to amd64 for Go agents) const architecture = req.query.arch || "amd64"; @@ -165,53 +168,108 @@ router.get("/agent/version", async (req, res) => { minServerVersion: null, }); } else { - // Go agent version check (binary) - const binaryName = `patchmon-agent-linux-${architecture}`; - const binaryPath = path.join(__dirname, "../../../agents", binaryName); + // Go agent version check + // Detect server architecture and map to Go architecture names + const os = require("node:os"); + const { exec } = require("node:child_process"); + const { promisify } = require("node:util"); + const execAsync = promisify(exec); - if (!fs.existsSync(binaryPath)) { - return res.status(404).json({ - error: `Go agent binary not found for architecture: ${architecture}`, - }); + const serverArch = os.arch(); + // Map Node.js architecture to Go architecture names + const archMap = { + x64: "amd64", + ia32: "386", + arm64: "arm64", + arm: "arm", + }; + const serverGoArch = archMap[serverArch] || serverArch; + + // If requested architecture matches server architecture, execute the binary + if (architecture === serverGoArch) { + const binaryName = `patchmon-agent-linux-${architecture}`; + const binaryPath = path.join(__dirname, "../../../agents", binaryName); + + if (!fs.existsSync(binaryPath)) { + // Binary doesn't exist, fall back to GitHub + console.log(`Binary ${binaryName} not found, falling back to GitHub`); + } else { + // Execute the binary to get its version + try { + const { stdout } = await execAsync(`${binaryPath} --help`, { + timeout: 10000, + }); + + // Parse version from help output (e.g., "PatchMon Agent v1.3.1") + const versionMatch = stdout.match( + /PatchMon Agent v([0-9]+\.[0-9]+\.[0-9]+)/i, + ); + + if (versionMatch) { + const serverVersion = versionMatch[1]; + const agentVersion = req.query.currentVersion || serverVersion; + + // Simple version comparison (assuming semantic versioning) + const hasUpdate = agentVersion !== serverVersion; + + return res.json({ + currentVersion: agentVersion, + latestVersion: serverVersion, + hasUpdate: hasUpdate, + downloadUrl: `/api/v1/hosts/agent/download?arch=${architecture}`, + releaseNotes: `PatchMon Agent v${serverVersion}`, + minServerVersion: null, + architecture: architecture, + agentType: "go", + }); + } + } catch (execError) { + // Execution failed, fall back to GitHub + console.log( + `Failed to execute binary ${binaryName}: ${execError.message}, falling back to GitHub`, + ); + } + } } - // Execute the binary to get its version + // Fall back to GitHub if architecture doesn't match or binary execution failed try { - const { stdout } = await execAsync(`${binaryPath} --help`, { - timeout: 10000, - }); + const versionInfo = await agentVersionService.getVersionInfo(); + const latestVersion = versionInfo.latestVersion; + const agentVersion = + req.query.currentVersion || latestVersion || "unknown"; - // Parse version from help output (e.g., "PatchMon Agent v1.3.1") - const versionMatch = stdout.match( - /PatchMon Agent v([0-9]+\.[0-9]+\.[0-9]+)/i, - ); - - if (!versionMatch) { - return res.status(500).json({ - error: "Could not extract version from agent binary", + if (!latestVersion) { + return res.status(503).json({ + error: "Unable to determine latest version from GitHub releases", + currentVersion: agentVersion, + latestVersion: null, + hasUpdate: false, }); } - const serverVersion = versionMatch[1]; - const agentVersion = req.query.currentVersion || serverVersion; - // Simple version comparison (assuming semantic versioning) - const hasUpdate = agentVersion !== serverVersion; + const hasUpdate = + agentVersion !== latestVersion && latestVersion !== null; res.json({ currentVersion: agentVersion, - latestVersion: serverVersion, + latestVersion: latestVersion, hasUpdate: hasUpdate, downloadUrl: `/api/v1/hosts/agent/download?arch=${architecture}`, - releaseNotes: `PatchMon Agent v${serverVersion}`, + releaseNotes: `PatchMon Agent v${latestVersion}`, minServerVersion: null, architecture: architecture, agentType: "go", }); - } catch (execError) { - console.error("Failed to execute agent binary:", execError.message); + } catch (serviceError) { + console.error( + "Failed to get version from agentVersionService:", + serviceError.message, + ); return res.status(500).json({ - error: "Failed to get version from agent binary", + error: "Failed to get agent version from service", + details: serviceError.message, }); } } @@ -1616,10 +1674,14 @@ router.get("/install", async (req, res) => { // Check for --force parameter const forceInstall = req.query.force === "true" || req.query.force === "1"; - // Get architecture parameter (default to amd64) - const architecture = req.query.arch || "amd64"; + // Get architecture parameter (only set if explicitly provided, otherwise let script auto-detect) + const architecture = req.query.arch; // Inject the API credentials, server URL, curl flags, SSL verify flag, force flag, and architecture into the script + // Only set ARCHITECTURE if explicitly provided, otherwise let the script auto-detect + const archExport = architecture + ? `export ARCHITECTURE="${architecture}"\n` + : ""; const envVars = `#!/bin/bash export PATCHMON_URL="${serverUrl}" export API_ID="${host.api_id}" @@ -1627,8 +1689,7 @@ export API_KEY="${host.api_key}" export CURL_FLAGS="${curlFlags}" export SKIP_SSL_VERIFY="${skipSSLVerify}" export FORCE_INSTALL="${forceInstall ? "true" : "false"}" -export ARCHITECTURE="${architecture}" - +${archExport} `; // Remove the shebang from the original script and prepend our env vars @@ -2103,4 +2164,137 @@ router.patch( }, ); +// Get integration status for a host +router.get( + "/:hostId/integrations", + authenticateToken, + requireManageHosts, + async (req, res) => { + try { + const { hostId } = req.params; + + // Get host to verify it exists + const host = await prisma.hosts.findUnique({ + where: { id: hostId }, + select: { id: true, api_id: true, friendly_name: true }, + }); + + if (!host) { + return res.status(404).json({ error: "Host not found" }); + } + + // Check if agent is connected + const connected = isConnected(host.api_id); + + // Get integration states from cache (or defaults if not cached) + // Default: all integrations are disabled + const cachedState = integrationStateCache.get(host.api_id) || {}; + const integrations = { + docker: cachedState.docker || false, // Default: disabled + // Future integrations can be added here + }; + + res.json({ + success: true, + data: { + integrations, + connected, + host: { + id: host.id, + friendlyName: host.friendly_name, + apiId: host.api_id, + }, + }, + }); + } catch (error) { + console.error("Get integration status error:", error); + res.status(500).json({ error: "Failed to get integration status" }); + } + }, +); + +// Toggle integration status for a host +router.post( + "/:hostId/integrations/:integrationName/toggle", + authenticateToken, + requireManageHosts, + [body("enabled").isBoolean().withMessage("Enabled status must be a boolean")], + async (req, res) => { + try { + const errors = validationResult(req); + if (!errors.isEmpty()) { + return res.status(400).json({ errors: errors.array() }); + } + + const { hostId, integrationName } = req.params; + const { enabled } = req.body; + + // Validate integration name + const validIntegrations = ["docker"]; // Add more as they're implemented + if (!validIntegrations.includes(integrationName)) { + return res.status(400).json({ + error: "Invalid integration name", + validIntegrations, + }); + } + + // Get host to verify it exists + const host = await prisma.hosts.findUnique({ + where: { id: hostId }, + select: { id: true, api_id: true, friendly_name: true }, + }); + + if (!host) { + return res.status(404).json({ error: "Host not found" }); + } + + // Check if agent is connected + if (!isConnected(host.api_id)) { + return res.status(503).json({ + error: "Agent is not connected", + message: + "The agent must be connected via WebSocket to toggle integrations", + }); + } + + // Send WebSocket message to agent + const success = pushIntegrationToggle( + host.api_id, + integrationName, + enabled, + ); + + if (!success) { + return res.status(503).json({ + error: "Failed to send integration toggle", + message: "Agent connection may have been lost", + }); + } + + // Update cache with new state + if (!integrationStateCache.has(host.api_id)) { + integrationStateCache.set(host.api_id, {}); + } + integrationStateCache.get(host.api_id)[integrationName] = enabled; + + res.json({ + success: true, + message: `Integration ${integrationName} ${enabled ? "enabled" : "disabled"} successfully`, + data: { + integration: integrationName, + enabled, + host: { + id: host.id, + friendlyName: host.friendly_name, + apiId: host.api_id, + }, + }, + }); + } catch (error) { + console.error("Toggle integration error:", error); + res.status(500).json({ error: "Failed to toggle integration" }); + } + }, +); + module.exports = router; diff --git a/backend/src/routes/tfaRoutes.js b/backend/src/routes/tfaRoutes.js index f69206a..a97e6f3 100644 --- a/backend/src/routes/tfaRoutes.js +++ b/backend/src/routes/tfaRoutes.js @@ -60,9 +60,14 @@ router.post( authenticateToken, [ body("token") + .notEmpty() + .withMessage("Token is required") + .isString() + .withMessage("Token must be a string") .isLength({ min: 6, max: 6 }) - .withMessage("Token must be 6 digits"), - body("token").isNumeric().withMessage("Token must contain only numbers"), + .withMessage("Token must be exactly 6 digits") + .matches(/^\d{6}$/) + .withMessage("Token must contain only numbers"), ], async (req, res) => { try { @@ -71,7 +76,11 @@ router.post( return res.status(400).json({ errors: errors.array() }); } - const { token } = req.body; + // Ensure token is a string (convert if needed) + let { token } = req.body; + if (typeof token !== "string") { + token = String(token); + } const userId = req.user.id; // Get user's TFA secret diff --git a/backend/src/services/agentWs.js b/backend/src/services/agentWs.js index b5e3b1f..77fb860 100644 --- a/backend/src/services/agentWs.js +++ b/backend/src/services/agentWs.js @@ -3,6 +3,7 @@ const WebSocket = require("ws"); const url = require("node:url"); +const { get_current_time } = require("../utils/timezone"); // Connection registry by api_id const apiIdToSocket = new Map(); @@ -49,7 +50,29 @@ function init(server, prismaClient) { wss.handleUpgrade(request, socket, head, (ws) => { ws.on("message", (message) => { // Echo back for Bull Board WebSocket - ws.send(message); + try { + ws.send(message); + } catch (_err) { + // Ignore send errors (connection may be closed) + } + }); + + ws.on("error", (err) => { + // Handle WebSocket errors gracefully for Bull Board + if ( + err.code === "WS_ERR_INVALID_CLOSE_CODE" || + err.code === "ECONNRESET" || + err.code === "EPIPE" + ) { + // These are expected errors, just log quietly + console.log("[bullboard-ws] connection error:", err.code); + } else { + console.error("[bullboard-ws] error:", err.message || err); + } + }); + + ws.on("close", () => { + // Connection closed, no action needed }); }); return; @@ -117,7 +140,58 @@ function init(server, prismaClient) { } }); - ws.on("close", () => { + ws.on("error", (err) => { + // Handle WebSocket errors gracefully without crashing + // Common errors: invalid close codes (1006), connection resets, etc. + if ( + err.code === "WS_ERR_INVALID_CLOSE_CODE" || + err.message?.includes("invalid status code 1006") || + err.message?.includes("Invalid WebSocket frame") + ) { + // 1006 is a special close code indicating abnormal closure + // It cannot be sent in a close frame, but can occur when connection is lost + console.log( + `[agent-ws] connection error for ${apiId} (abnormal closure):`, + err.message || err.code, + ); + } else if ( + err.code === "ECONNRESET" || + err.code === "EPIPE" || + err.message?.includes("read ECONNRESET") + ) { + // Connection reset errors are common and expected + console.log(`[agent-ws] connection reset for ${apiId}`); + } else { + // Log other errors for debugging + console.error( + `[agent-ws] error for ${apiId}:`, + err.message || err.code || err, + ); + } + + // Clean up connection on error + const existing = apiIdToSocket.get(apiId); + if (existing === ws) { + apiIdToSocket.delete(apiId); + connectionMetadata.delete(apiId); + // Notify subscribers of disconnection + notifyConnectionChange(apiId, false); + } + + // Try to close the connection gracefully if still open + if ( + ws.readyState === WebSocket.OPEN || + ws.readyState === WebSocket.CONNECTING + ) { + try { + ws.close(1000); // Normal closure + } catch { + // Ignore errors when closing + } + } + }); + + ws.on("close", (code, reason) => { const existing = apiIdToSocket.get(apiId); if (existing === ws) { apiIdToSocket.delete(apiId); @@ -126,7 +200,7 @@ function init(server, prismaClient) { notifyConnectionChange(apiId, false); } console.log( - `[agent-ws] disconnected api_id=${apiId} total=${apiIdToSocket.size}`, + `[agent-ws] disconnected api_id=${apiId} code=${code} reason=${reason || "none"} total=${apiIdToSocket.size}`, ); }); @@ -181,6 +255,29 @@ function pushUpdateAgent(apiId) { safeSend(ws, JSON.stringify({ type: "update_agent" })); } +function pushIntegrationToggle(apiId, integrationName, enabled) { + const ws = apiIdToSocket.get(apiId); + if (ws && ws.readyState === WebSocket.OPEN) { + safeSend( + ws, + JSON.stringify({ + type: "integration_toggle", + integration: integrationName, + enabled: enabled, + }), + ); + console.log( + `๐Ÿ“ค Pushed integration toggle to agent ${apiId}: ${integrationName} = ${enabled}`, + ); + return true; + } else { + console.log( + `โš ๏ธ Agent ${apiId} not connected, cannot push integration toggle, please edit config.yml manually`, + ); + return false; + } +} + function getConnectionByApiId(apiId) { return apiIdToSocket.get(apiId); } @@ -314,7 +411,7 @@ async function handleDockerStatusEvent(apiId, message) { status: status, state: status, updated_at: new Date(timestamp || Date.now()), - last_checked: new Date(), + last_checked: get_current_time(), }, }); @@ -340,6 +437,7 @@ module.exports = { pushReportNow, pushSettingsUpdate, pushUpdateAgent, + pushIntegrationToggle, pushUpdateNotification, pushUpdateNotificationToAll, // Expose read-only view of connected agents diff --git a/backend/src/services/automation/dockerImageUpdateCheck.js b/backend/src/services/automation/dockerImageUpdateCheck.js index 2706768..a70db9f 100644 --- a/backend/src/services/automation/dockerImageUpdateCheck.js +++ b/backend/src/services/automation/dockerImageUpdateCheck.js @@ -139,15 +139,13 @@ class DockerImageUpdateCheck { console.log("๐Ÿณ Starting Docker image update check..."); try { - // Get all Docker images that have a digest and repository + // Get all Docker images that have a digest + // Note: repository is required (non-nullable) in schema, so we don't need to check it const images = await prisma.docker_images.findMany({ where: { digest: { not: null, }, - repository: { - not: null, - }, }, include: { docker_image_updates: true, diff --git a/backend/src/services/automation/index.js b/backend/src/services/automation/index.js index 1731e33..cc81099 100644 --- a/backend/src/services/automation/index.js +++ b/backend/src/services/automation/index.js @@ -3,6 +3,7 @@ const { redis, redisConnection } = require("./shared/redis"); const { prisma } = require("./shared/prisma"); const agentWs = require("../agentWs"); const { v4: uuidv4 } = require("uuid"); +const { get_current_time } = require("../../utils/timezone"); // Import automation classes const GitHubUpdateCheck = require("./githubUpdateCheck"); @@ -12,6 +13,7 @@ const OrphanedPackageCleanup = require("./orphanedPackageCleanup"); const DockerInventoryCleanup = require("./dockerInventoryCleanup"); const DockerImageUpdateCheck = require("./dockerImageUpdateCheck"); const MetricsReporting = require("./metricsReporting"); +const SystemStatistics = require("./systemStatistics"); // Queue names const QUEUE_NAMES = { @@ -22,6 +24,7 @@ const QUEUE_NAMES = { DOCKER_INVENTORY_CLEANUP: "docker-inventory-cleanup", DOCKER_IMAGE_UPDATE_CHECK: "docker-image-update-check", METRICS_REPORTING: "metrics-reporting", + SYSTEM_STATISTICS: "system-statistics", AGENT_COMMANDS: "agent-commands", }; @@ -105,6 +108,9 @@ class QueueManager { this.automations[QUEUE_NAMES.METRICS_REPORTING] = new MetricsReporting( this, ); + this.automations[QUEUE_NAMES.SYSTEM_STATISTICS] = new SystemStatistics( + this, + ); console.log("โœ… All automation classes initialized"); } @@ -190,6 +196,15 @@ class QueueManager { workerOptions, ); + // System Statistics Worker + this.workers[QUEUE_NAMES.SYSTEM_STATISTICS] = new Worker( + QUEUE_NAMES.SYSTEM_STATISTICS, + this.automations[QUEUE_NAMES.SYSTEM_STATISTICS].process.bind( + this.automations[QUEUE_NAMES.SYSTEM_STATISTICS], + ), + workerOptions, + ); + // Agent Commands Worker this.workers[QUEUE_NAMES.AGENT_COMMANDS] = new Worker( QUEUE_NAMES.AGENT_COMMANDS, @@ -216,8 +231,8 @@ class QueueManager { api_id: api_id, status: "active", attempt_number: job.attemptsMade + 1, - created_at: new Date(), - updated_at: new Date(), + created_at: get_current_time(), + updated_at: get_current_time(), }, }); console.log(`๐Ÿ“ Logged job to job_history: ${job.id} (${type})`); @@ -257,8 +272,8 @@ class QueueManager { where: { job_id: job.id }, data: { status: "completed", - completed_at: new Date(), - updated_at: new Date(), + completed_at: get_current_time(), + updated_at: get_current_time(), }, }); console.log(`โœ… Marked job as completed in job_history: ${job.id}`); @@ -271,8 +286,8 @@ class QueueManager { data: { status: "failed", error_message: error.message, - completed_at: new Date(), - updated_at: new Date(), + completed_at: get_current_time(), + updated_at: get_current_time(), }, }); console.log(`โŒ Marked job as failed in job_history: ${job.id}`); @@ -322,6 +337,7 @@ class QueueManager { await this.automations[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].schedule(); await this.automations[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK].schedule(); await this.automations[QUEUE_NAMES.METRICS_REPORTING].schedule(); + await this.automations[QUEUE_NAMES.SYSTEM_STATISTICS].schedule(); } /** @@ -357,6 +373,10 @@ class QueueManager { ].triggerManual(); } + async triggerSystemStatistics() { + return this.automations[QUEUE_NAMES.SYSTEM_STATISTICS].triggerManual(); + } + async triggerMetricsReporting() { return this.automations[QUEUE_NAMES.METRICS_REPORTING].triggerManual(); } diff --git a/backend/src/services/automation/systemStatistics.js b/backend/src/services/automation/systemStatistics.js new file mode 100644 index 0000000..46ae17e --- /dev/null +++ b/backend/src/services/automation/systemStatistics.js @@ -0,0 +1,140 @@ +const { prisma } = require("./shared/prisma"); +const { v4: uuidv4 } = require("uuid"); + +/** + * System Statistics Collection Automation + * Collects aggregated system-wide statistics every 30 minutes + * for use in package trends charts + */ +class SystemStatistics { + constructor(queueManager) { + this.queueManager = queueManager; + this.queueName = "system-statistics"; + } + + /** + * Process system statistics collection job + */ + async process(_job) { + const startTime = Date.now(); + console.log("๐Ÿ“Š Starting system statistics collection..."); + + try { + // Calculate unique package counts across all hosts + const uniquePackagesCount = await prisma.packages.count({ + where: { + host_packages: { + some: { + needs_update: true, + }, + }, + }, + }); + + const uniqueSecurityCount = await prisma.packages.count({ + where: { + host_packages: { + some: { + needs_update: true, + is_security_update: true, + }, + }, + }, + }); + + // Calculate total unique packages installed on at least one host + const totalPackages = await prisma.packages.count({ + where: { + host_packages: { + some: {}, // At least one host has this package + }, + }, + }); + + // Calculate total hosts + const totalHosts = await prisma.hosts.count({ + where: { + status: "active", + }, + }); + + // Calculate hosts needing updates (distinct hosts with packages needing updates) + const hostsNeedingUpdates = await prisma.hosts.count({ + where: { + status: "active", + host_packages: { + some: { + needs_update: true, + }, + }, + }, + }); + + // Store statistics in database + await prisma.system_statistics.create({ + data: { + id: uuidv4(), + unique_packages_count: uniquePackagesCount, + unique_security_count: uniqueSecurityCount, + total_packages: totalPackages, + total_hosts: totalHosts, + hosts_needing_updates: hostsNeedingUpdates, + timestamp: new Date(), + }, + }); + + const executionTime = Date.now() - startTime; + console.log( + `โœ… System statistics collection completed in ${executionTime}ms - Unique packages: ${uniquePackagesCount}, Security: ${uniqueSecurityCount}, Total hosts: ${totalHosts}`, + ); + + return { + success: true, + uniquePackagesCount, + uniqueSecurityCount, + totalPackages, + totalHosts, + hostsNeedingUpdates, + executionTime, + }; + } catch (error) { + const executionTime = Date.now() - startTime; + console.error( + `โŒ System statistics collection failed after ${executionTime}ms:`, + error.message, + ); + throw error; + } + } + + /** + * Schedule recurring system statistics collection (every 30 minutes) + */ + async schedule() { + const job = await this.queueManager.queues[this.queueName].add( + "system-statistics", + {}, + { + repeat: { pattern: "*/30 * * * *" }, // Every 30 minutes + jobId: "system-statistics-recurring", + }, + ); + console.log("โœ… System statistics collection scheduled (every 30 minutes)"); + return job; + } + + /** + * Trigger manual system statistics collection + */ + async triggerManual() { + const job = await this.queueManager.queues[this.queueName].add( + "system-statistics-manual", + {}, + { priority: 1 }, + ); + console.log("โœ… Manual system statistics collection triggered"); + return job; + } +} + +module.exports = SystemStatistics; diff --git a/frontend/src/components/Layout.jsx b/frontend/src/components/Layout.jsx index f647487..1d72ca0 100644 --- a/frontend/src/components/Layout.jsx +++ b/frontend/src/components/Layout.jsx @@ -120,7 +120,6 @@ const Layout = ({ children }) => { name: "Automation", href: "/automation", icon: RefreshCw, - new: true, }); if (canViewReports()) { diff --git a/frontend/src/pages/Automation.jsx b/frontend/src/pages/Automation.jsx index ede20c9..775d7e3 100644 --- a/frontend/src/pages/Automation.jsx +++ b/frontend/src/pages/Automation.jsx @@ -196,6 +196,25 @@ const Automation = () => { year: "numeric", }); } + if (schedule === "Every 30 minutes") { + const now = new Date(); + const nextRun = new Date(now); + // Round up to the next 30-minute mark + const minutes = now.getMinutes(); + if (minutes < 30) { + nextRun.setMinutes(30, 0, 0); + } else { + nextRun.setHours(nextRun.getHours() + 1, 0, 0, 0); + } + return nextRun.toLocaleString([], { + hour12: true, + hour: "numeric", + minute: "2-digit", + day: "numeric", + month: "numeric", + year: "numeric", + }); + } return "Unknown"; }; @@ -236,6 +255,18 @@ const Automation = () => { nextHour.setHours(nextHour.getHours() + 1, 0, 0, 0); return nextHour.getTime(); } + if (schedule === "Every 30 minutes") { + const now = new Date(); + const nextRun = new Date(now); + // Round up to the next 30-minute mark + const minutes = now.getMinutes(); + if (minutes < 30) { + nextRun.setMinutes(30, 0, 0); + } else { + nextRun.setHours(nextRun.getHours() + 1, 0, 0, 0); + } + return nextRun.getTime(); + } return Number.MAX_SAFE_INTEGER; // Unknown schedules go to bottom }; @@ -294,6 +325,8 @@ const Automation = () => { endpoint = "/automation/trigger/docker-inventory-cleanup"; } else if (jobType === "agent-collection") { endpoint = "/automation/trigger/agent-collection"; + } else if (jobType === "system-statistics") { + endpoint = "/automation/trigger/system-statistics"; } const _response = await api.post(endpoint, data); @@ -615,6 +648,10 @@ const Automation = () => { automation.queue.includes("agent-commands") ) { triggerManualJob("agent-collection"); + } else if ( + automation.queue.includes("system-statistics") + ) { + triggerManualJob("system-statistics"); } }} className="inline-flex items-center justify-center w-6 h-6 border border-transparent rounded text-white bg-green-600 hover:bg-green-700 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-green-500 transition-colors duration-200" diff --git a/frontend/src/pages/Dashboard.jsx b/frontend/src/pages/Dashboard.jsx index c5bdfae..80aeace 100644 --- a/frontend/src/pages/Dashboard.jsx +++ b/frontend/src/pages/Dashboard.jsx @@ -55,6 +55,8 @@ const Dashboard = () => { const [cardPreferences, setCardPreferences] = useState([]); const [packageTrendsPeriod, setPackageTrendsPeriod] = useState("1"); // days const [packageTrendsHost, setPackageTrendsHost] = useState("all"); // host filter + const [systemStatsJobId, setSystemStatsJobId] = useState(null); // Track job ID for system statistics + const [isTriggeringJob, setIsTriggeringJob] = useState(false); const navigate = useNavigate(); const { isDark } = useTheme(); const { user } = useAuth(); @@ -772,56 +774,108 @@ const Dashboard = () => {

Package Trends Over Time

-
- {/* Refresh Button */} - +
+
+ {/* Refresh Button */} + - {/* Period Selector */} - + {/* Period Selector */} + - {/* Host Selector */} - { + setPackageTrendsHost(e.target.value); + // Clear job ID message when host selection changes + setSystemStatsJobId(null); + }} + className="px-3 py-1.5 text-sm border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white focus:ring-2 focus:ring-primary-500 focus:border-primary-500" + > + + {packageTrendsData?.hosts?.length > 0 ? ( + packageTrendsData.hosts.map((host) => ( + + )) + ) : ( + - )) - ) : ( - - )} - + )} + +
+ {/* Job ID Message */} + {systemStatsJobId && packageTrendsHost === "all" && ( +

+ Ran collection job #{systemStatsJobId} +

+ )}
@@ -1167,13 +1221,40 @@ const Dashboard = () => { title: (context) => { const label = context[0].label; + // Handle "Now" label + if (label === "Now") { + return "Now"; + } + // Handle empty or invalid labels if (!label || typeof label !== "string") { return "Unknown Date"; } + // Check if it's a full ISO timestamp (for "Last 24 hours") + // Format: "2025-01-15T14:30:00.000Z" or "2025-01-15T14:30:00.000" + if (label.includes("T") && label.includes(":")) { + try { + const date = new Date(label); + // Check if date is valid + if (Number.isNaN(date.getTime())) { + return label; // Return original label if date is invalid + } + // Format full ISO timestamp with date and time + return date.toLocaleDateString("en-US", { + month: "short", + day: "numeric", + hour: "numeric", + minute: "2-digit", + hour12: true, + }); + } catch (_error) { + return label; // Return original label if parsing fails + } + } + // Format hourly labels (e.g., "2025-10-07T14" -> "Oct 7, 2:00 PM") - if (label.includes("T")) { + if (label.includes("T") && !label.includes(":")) { try { const date = new Date(`${label}:00:00`); // Check if date is valid @@ -1233,13 +1314,41 @@ const Dashboard = () => { callback: function (value, _index, _ticks) { const label = this.getLabelForValue(value); + // Handle "Now" label + if (label === "Now") { + return "Now"; + } + // Handle empty or invalid labels if (!label || typeof label !== "string") { return "Unknown"; } + // Check if it's a full ISO timestamp (for "Last 24 hours") + // Format: "2025-01-15T14:30:00.000Z" or "2025-01-15T14:30:00.000" + if (label.includes("T") && label.includes(":")) { + try { + const date = new Date(label); + // Check if date is valid + if (Number.isNaN(date.getTime())) { + return label; // Return original label if date is invalid + } + // Extract hour from full ISO timestamp + const hourNum = date.getHours(); + return hourNum === 0 + ? "12 AM" + : hourNum < 12 + ? `${hourNum} AM` + : hourNum === 12 + ? "12 PM" + : `${hourNum - 12} PM`; + } catch (_error) { + return label; // Return original label if parsing fails + } + } + // Format hourly labels (e.g., "2025-10-07T14" -> "2 PM") - if (label.includes("T")) { + if (label.includes("T") && !label.includes(":")) { try { const hour = label.split("T")[1]; const hourNum = parseInt(hour, 10); diff --git a/frontend/src/pages/HostDetail.jsx b/frontend/src/pages/HostDetail.jsx index 7b943b2..a625269 100644 --- a/frontend/src/pages/HostDetail.jsx +++ b/frontend/src/pages/HostDetail.jsx @@ -281,6 +281,67 @@ const HostDetail = () => { }, }); + // Fetch integration status + const { + data: integrationsData, + isLoading: isLoadingIntegrations, + refetch: refetchIntegrations, + } = useQuery({ + queryKey: ["host-integrations", hostId], + queryFn: () => + adminHostsAPI.getIntegrations(hostId).then((res) => res.data), + staleTime: 30 * 1000, // 30 seconds + refetchOnWindowFocus: false, + enabled: !!hostId && activeTab === "integrations", + }); + + // Refetch integrations when WebSocket status changes (e.g., after agent restart) + useEffect(() => { + if ( + wsStatus?.connected && + activeTab === "integrations" && + integrationsData?.data?.connected === false + ) { + // Agent just reconnected, refetch integrations to get updated connection status + refetchIntegrations(); + } + }, [ + wsStatus?.connected, + activeTab, + integrationsData?.data?.connected, + refetchIntegrations, + ]); + + // Toggle integration mutation + const toggleIntegrationMutation = useMutation({ + mutationFn: ({ integrationName, enabled }) => + adminHostsAPI + .toggleIntegration(hostId, integrationName, enabled) + .then((res) => res.data), + onSuccess: (data) => { + // Optimistically update the cache with the new state + queryClient.setQueryData(["host-integrations", hostId], (oldData) => { + if (!oldData) return oldData; + return { + ...oldData, + data: { + ...oldData.data, + integrations: { + ...oldData.data.integrations, + [data.data.integration]: data.data.enabled, + }, + }, + }; + }); + // Also invalidate to ensure we get fresh data + queryClient.invalidateQueries(["host-integrations", hostId]); + }, + onError: () => { + // On error, refetch to get the actual state + refetchIntegrations(); + }, + }); + const handleDeleteHost = async () => { if ( window.confirm( @@ -666,6 +727,17 @@ const HostDetail = () => { > Notes +
@@ -1446,6 +1518,101 @@ const HostDetail = () => { {/* Agent Queue */} {activeTab === "queue" && } + + {/* Integrations */} + {activeTab === "integrations" && ( +
+ {isLoadingIntegrations ? ( +
+ +
+ ) : ( +
+ {/* Docker Integration */} +
+
+
+
+ +

+ Docker +

+ {integrationsData?.data?.integrations?.docker ? ( + + Enabled + + ) : ( + + Disabled + + )} +
+

+ Monitor Docker containers, images, volumes, and + networks. Collects real-time container status + events. +

+
+
+ +
+
+ {!wsStatus?.connected && ( +

+ Agent must be connected via WebSocket to toggle + integrations +

+ )} + {toggleIntegrationMutation.isPending && ( +

+ Updating integration... +

+ )} +
+ + {/* Future integrations can be added here with the same pattern */} +
+ )} +
+ )}
@@ -1639,7 +1806,8 @@ const CredentialsModal = ({ host, isOpen, onClose }) => { > - + +

Select the architecture of the target host diff --git a/frontend/src/pages/Repositories.jsx b/frontend/src/pages/Repositories.jsx index 5bcf845..77d0300 100644 --- a/frontend/src/pages/Repositories.jsx +++ b/frontend/src/pages/Repositories.jsx @@ -237,8 +237,14 @@ const Repositories = () => { // Handle special cases if (sortField === "security") { - aValue = a.isSecure ? "Secure" : "Insecure"; - bValue = b.isSecure ? "Secure" : "Insecure"; + // Use the same logic as filtering to determine isSecure + const aIsSecure = + a.isSecure !== undefined ? a.isSecure : a.url.startsWith("https://"); + const bIsSecure = + b.isSecure !== undefined ? b.isSecure : b.url.startsWith("https://"); + // Sort by boolean: true (Secure) comes before false (Insecure) when ascending + aValue = aIsSecure ? 1 : 0; + bValue = bIsSecure ? 1 : 0; } else if (sortField === "status") { aValue = a.is_active ? "Active" : "Inactive"; bValue = b.is_active ? "Active" : "Inactive"; @@ -535,12 +541,12 @@ const Repositories = () => { {visibleColumns.map((column) => (