Compare commits

..

14 Commits

Author SHA1 Message Date
renovate[bot]
e1162fce79 Update dependency express-rate-limit to v8 2025-11-07 22:16:18 +00:00
9 Technology Group LTD
3c780d07ff Merge pull request #288 from PatchMon/1-3-3
Bug fixes
2025-11-07 22:14:37 +00:00
Muhammad Ibrahim
55de7b40ed Merge remote 1-3-3 branch with local changes 2025-11-07 22:07:30 +00:00
Muhammad Ibrahim
90e56d62bb Update biome to 2.3.4 to match CI 2025-11-07 22:07:21 +00:00
Muhammad Ibrahim
497aeb8068 fixed biome and added tz 2025-11-07 22:04:27 +00:00
Muhammad Ibrahim
f5b0e930f7 Fixed reporting mechanism 2025-11-07 18:33:17 +00:00
Muhammad Ibrahim
e73ebc383c fixed the issue with graphs on dashboard not showing information correctly, so now statistics are queued into a new table specifically for this and addd this in automation queue 2025-11-07 10:00:19 +00:00
9 Technology Group LTD
ef0bcd2240 Merge pull request #285 from FutureCow/patch-1
Add IPv6 support to server configuration
2025-11-07 08:23:46 +00:00
Muhammad Ibrahim
63831caba3 fixed tfa route for handling insertion of tfa number
Better handling of existing systems already enrolled, done via checking if the config.yml file exists and ping through its credentials as opposed to checking for machine_ID
UI justification improvements on repositories pages
2025-11-07 08:20:42 +00:00
Muhammad Ibrahim
8e5eb54e02 fixed code quality 2025-11-06 22:16:35 +00:00
Muhammad Ibrahim
a8eb3ec21c fix docker error handling
fix websocket routes
Add timezone variable in code
changed the env.example to suit
2025-11-06 22:08:00 +00:00
FutureCow
c56debc80e Add IPv6 support to server configuration
Add IPv6 support to server configuration
2025-11-05 20:34:13 +01:00
Muhammad Ibrahim
e57ff7612e removed emoji 2025-11-01 03:25:31 +00:00
Muhammad Ibrahim
7a3d98862f Fix emoji parsing error in print functions
- Changed from echo -e to printf for safer special character handling
- Store emoji characters in variables using bash octal escape sequences
- Prevents 'command not found' error when bash interprets emoji as commands
- Fixes issue where line 41 error occurred during setup.sh --update
2025-11-01 02:58:34 +00:00
30 changed files with 1913 additions and 446 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -136,10 +136,34 @@ if [[ -z "$PATCHMON_URL" ]] || [[ -z "$API_ID" ]] || [[ -z "$API_KEY" ]]; then
error "Missing required parameters. This script should be called via the PatchMon web interface."
fi
# Parse architecture parameter (default to amd64)
ARCHITECTURE="${ARCHITECTURE:-amd64}"
if [[ "$ARCHITECTURE" != "amd64" && "$ARCHITECTURE" != "386" && "$ARCHITECTURE" != "arm64" ]]; then
error "Invalid architecture '$ARCHITECTURE'. Must be one of: amd64, 386, arm64"
# Auto-detect architecture if not explicitly set
if [[ -z "$ARCHITECTURE" ]]; then
arch_raw=$(uname -m 2>/dev/null || echo "unknown")
# Map architecture to supported values
case "$arch_raw" in
"x86_64")
ARCHITECTURE="amd64"
;;
"i386"|"i686")
ARCHITECTURE="386"
;;
"aarch64"|"arm64")
ARCHITECTURE="arm64"
;;
"armv7l"|"armv6l"|"arm")
ARCHITECTURE="arm"
;;
*)
warning "⚠️ Unknown architecture '$arch_raw', defaulting to amd64"
ARCHITECTURE="amd64"
;;
esac
fi
# Validate architecture
if [[ "$ARCHITECTURE" != "amd64" && "$ARCHITECTURE" != "386" && "$ARCHITECTURE" != "arm64" && "$ARCHITECTURE" != "arm" ]]; then
error "Invalid architecture '$ARCHITECTURE'. Must be one of: amd64, 386, arm64, arm"
fi
# Check if --force flag is set (for bypassing broken packages)
@@ -234,7 +258,98 @@ install_apt_packages() {
fi
}
# Detect package manager and install jq and curl
# Function to check and install packages for yum/dnf
install_yum_dnf_packages() {
local pkg_manager="$1"
shift
local packages=("$@")
local missing_packages=()
# Check which packages are missing
for pkg in "${packages[@]}"; do
if ! command_exists "$pkg"; then
missing_packages+=("$pkg")
fi
done
if [ ${#missing_packages[@]} -eq 0 ]; then
success "All required packages are already installed"
return 0
fi
info "Need to install: ${missing_packages[*]}"
if [[ "$pkg_manager" == "yum" ]]; then
yum install -y "${missing_packages[@]}"
else
dnf install -y "${missing_packages[@]}"
fi
}
# Function to check and install packages for zypper
install_zypper_packages() {
local packages=("$@")
local missing_packages=()
# Check which packages are missing
for pkg in "${packages[@]}"; do
if ! command_exists "$pkg"; then
missing_packages+=("$pkg")
fi
done
if [ ${#missing_packages[@]} -eq 0 ]; then
success "All required packages are already installed"
return 0
fi
info "Need to install: ${missing_packages[*]}"
zypper install -y "${missing_packages[@]}"
}
# Function to check and install packages for pacman
install_pacman_packages() {
local packages=("$@")
local missing_packages=()
# Check which packages are missing
for pkg in "${packages[@]}"; do
if ! command_exists "$pkg"; then
missing_packages+=("$pkg")
fi
done
if [ ${#missing_packages[@]} -eq 0 ]; then
success "All required packages are already installed"
return 0
fi
info "Need to install: ${missing_packages[*]}"
pacman -S --noconfirm "${missing_packages[@]}"
}
# Function to check and install packages for apk
install_apk_packages() {
local packages=("$@")
local missing_packages=()
# Check which packages are missing
for pkg in "${packages[@]}"; do
if ! command_exists "$pkg"; then
missing_packages+=("$pkg")
fi
done
if [ ${#missing_packages[@]} -eq 0 ]; then
success "All required packages are already installed"
return 0
fi
info "Need to install: ${missing_packages[*]}"
apk add --no-cache "${missing_packages[@]}"
}
# Detect package manager and install jq, curl, and bc
if command -v apt-get >/dev/null 2>&1; then
# Debian/Ubuntu
info "Detected apt-get (Debian/Ubuntu)"
@@ -260,31 +375,31 @@ elif command -v yum >/dev/null 2>&1; then
info "Detected yum (CentOS/RHEL 7)"
echo ""
info "Installing jq, curl, and bc..."
yum install -y jq curl bc
install_yum_dnf_packages yum jq curl bc
elif command -v dnf >/dev/null 2>&1; then
# CentOS/RHEL 8+/Fedora
info "Detected dnf (CentOS/RHEL 8+/Fedora)"
echo ""
info "Installing jq, curl, and bc..."
dnf install -y jq curl bc
install_yum_dnf_packages dnf jq curl bc
elif command -v zypper >/dev/null 2>&1; then
# openSUSE
info "Detected zypper (openSUSE)"
echo ""
info "Installing jq, curl, and bc..."
zypper install -y jq curl bc
install_zypper_packages jq curl bc
elif command -v pacman >/dev/null 2>&1; then
# Arch Linux
info "Detected pacman (Arch Linux)"
echo ""
info "Installing jq, curl, and bc..."
pacman -S --noconfirm jq curl bc
install_pacman_packages jq curl bc
elif command -v apk >/dev/null 2>&1; then
# Alpine Linux
info "Detected apk (Alpine Linux)"
echo ""
info "Installing jq, curl, and bc..."
apk add --no-cache jq curl bc
install_apk_packages jq curl bc
else
warning "Could not detect package manager. Please ensure 'jq', 'curl', and 'bc' are installed manually."
fi
@@ -311,6 +426,37 @@ else
mkdir -p /etc/patchmon
fi
# Check if agent is already configured and working (before we overwrite anything)
info "🔍 Checking if agent is already configured..."
if [[ -f /etc/patchmon/config.yml ]] && [[ -f /etc/patchmon/credentials.yml ]]; then
if [[ -f /usr/local/bin/patchmon-agent ]]; then
info "📋 Found existing agent configuration"
info "🧪 Testing existing configuration with ping..."
if /usr/local/bin/patchmon-agent ping >/dev/null 2>&1; then
success "✅ Agent is already configured and ping successful"
info "📋 Existing configuration is working - skipping installation"
info ""
info "If you want to reinstall, remove the configuration files first:"
info " sudo rm -f /etc/patchmon/config.yml /etc/patchmon/credentials.yml"
echo ""
exit 0
else
warning "⚠️ Agent configuration exists but ping failed"
warning "⚠️ Will move existing configuration and reinstall"
echo ""
fi
else
warning "⚠️ Configuration files exist but agent binary is missing"
warning "⚠️ Will move existing configuration and reinstall"
echo ""
fi
else
success "✅ Agent not yet configured - proceeding with installation"
echo ""
fi
# Step 2: Create configuration files
info "🔐 Creating configuration files..."
@@ -426,33 +572,6 @@ if [[ -f "/etc/patchmon/logs/patchmon-agent.log" ]]; then
fi
# Step 4: Test the configuration
# Check if this machine is already enrolled
info "🔍 Checking if machine is already enrolled..."
existing_check=$(curl $CURL_FLAGS -s -X POST \
-H "X-API-ID: $API_ID" \
-H "X-API-KEY: $API_KEY" \
-H "Content-Type: application/json" \
-d "{\"machine_id\": \"$MACHINE_ID\"}" \
"$PATCHMON_URL/api/v1/hosts/check-machine-id" \
-w "\n%{http_code}" 2>&1)
http_code=$(echo "$existing_check" | tail -n 1)
response_body=$(echo "$existing_check" | sed '$d')
if [[ "$http_code" == "200" ]]; then
already_enrolled=$(echo "$response_body" | jq -r '.exists' 2>/dev/null || echo "false")
if [[ "$already_enrolled" == "true" ]]; then
warning "⚠️ This machine is already enrolled in PatchMon"
info "Machine ID: $MACHINE_ID"
info "Existing host: $(echo "$response_body" | jq -r '.host.friendly_name' 2>/dev/null)"
info ""
info "The agent will be reinstalled/updated with existing credentials."
echo ""
else
success "✅ Machine not yet enrolled - proceeding with installation"
fi
fi
info "🧪 Testing API credentials and connectivity..."
if /usr/local/bin/patchmon-agent ping; then
success "✅ TEST: API credentials are valid and server is reachable"
@@ -460,15 +579,8 @@ else
error "❌ Failed to validate API credentials or reach server"
fi
# Step 5: Send initial data and setup systemd service
info "📊 Sending initial package data to server..."
if /usr/local/bin/patchmon-agent report; then
success "✅ UPDATE: Initial package data sent successfully"
else
warning "⚠️ Failed to send initial data. You can retry later with: /usr/local/bin/patchmon-agent report"
fi
# Step 6: Setup systemd service for WebSocket connection
# Step 5: Setup systemd service for WebSocket connection
# Note: The service will automatically send an initial report on startup (see serve.go)
info "🔧 Setting up systemd service..."
# Stop and disable existing service if it exists

View File

@@ -230,6 +230,40 @@ while IFS= read -r line; do
info " ✓ Host enrolled successfully: $api_id"
# Check if agent is already installed and working
info " Checking if agent is already configured..."
config_check=$(timeout 10 pct exec "$vmid" -- bash -c "
if [[ -f /etc/patchmon/config.yml ]] && [[ -f /etc/patchmon/credentials.yml ]]; then
if [[ -f /usr/local/bin/patchmon-agent ]]; then
# Try to ping using existing configuration
if /usr/local/bin/patchmon-agent ping >/dev/null 2>&1; then
echo 'ping_success'
else
echo 'ping_failed'
fi
else
echo 'binary_missing'
fi
else
echo 'not_configured'
fi
" 2>/dev/null </dev/null || echo "error")
if [[ "$config_check" == "ping_success" ]]; then
info " ✓ Host already enrolled and agent ping successful - skipping"
((skipped_count++)) || true
echo ""
continue
elif [[ "$config_check" == "ping_failed" ]]; then
warn " ⚠ Agent configuration exists but ping failed - will reinstall"
elif [[ "$config_check" == "binary_missing" ]]; then
warn " ⚠ Config exists but agent binary missing - will reinstall"
elif [[ "$config_check" == "not_configured" ]]; then
info " Agent not yet configured - proceeding with installation"
else
warn " ⚠ Could not check agent status - proceeding with installation"
fi
# Ensure curl is installed in the container
info " Checking for curl in container..."
curl_check=$(timeout 10 pct exec "$vmid" -- bash -c "command -v curl >/dev/null 2>&1 && echo 'installed' || echo 'missing'" 2>/dev/null </dev/null || echo "error")
@@ -283,9 +317,10 @@ while IFS= read -r line; do
install_exit_code=0
# Download and execute in separate steps to avoid stdin issues with piping
install_output=$(timeout 180 pct exec "$vmid" -- bash -c "
# Pass CURL_FLAGS as environment variable to container
install_output=$(timeout 180 pct exec "$vmid" --env CURL_FLAGS="$CURL_FLAGS" -- bash -c "
cd /tmp
curl $CURL_FLAGS \
curl \$CURL_FLAGS \
-H \"X-API-ID: $api_id\" \
-H \"X-API-KEY: $api_key\" \
-o patchmon-install.sh \
@@ -422,9 +457,10 @@ if [[ ${#dpkg_error_containers[@]} -gt 0 ]]; then
info " Retrying agent installation..."
install_exit_code=0
install_output=$(timeout 180 pct exec "$vmid" -- bash -c "
# Pass CURL_FLAGS as environment variable to container
install_output=$(timeout 180 pct exec "$vmid" --env CURL_FLAGS="$CURL_FLAGS" -- bash -c "
cd /tmp
curl $CURL_FLAGS \
curl \$CURL_FLAGS \
-H \"X-API-ID: $api_id\" \
-H \"X-API-KEY: $api_key\" \
-o patchmon-install.sh \

View File

@@ -54,3 +54,8 @@ ENABLE_LOGGING=true
TFA_REMEMBER_ME_EXPIRES_IN=30d
TFA_MAX_REMEMBER_SESSIONS=5
TFA_SUSPICIOUS_ACTIVITY_THRESHOLD=3
# Timezone Configuration
# Set the timezone for timestamps and logs (e.g., 'UTC', 'America/New_York', 'Europe/London')
# Defaults to UTC if not set. This ensures consistent timezone handling across the application.
TZ=UTC

View File

@@ -24,7 +24,7 @@
"cors": "^2.8.5",
"dotenv": "^16.4.7",
"express": "^4.21.2",
"express-rate-limit": "^7.5.0",
"express-rate-limit": "^8.0.0",
"express-validator": "^7.2.0",
"helmet": "^8.0.0",
"ioredis": "^5.8.1",

View File

@@ -0,0 +1,16 @@
-- CreateTable
CREATE TABLE "system_statistics" (
"id" TEXT NOT NULL,
"unique_packages_count" INTEGER NOT NULL,
"unique_security_count" INTEGER NOT NULL,
"total_packages" INTEGER NOT NULL,
"total_hosts" INTEGER NOT NULL,
"hosts_needing_updates" INTEGER NOT NULL,
"timestamp" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "system_statistics_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE INDEX "system_statistics_timestamp_idx" ON "system_statistics"("timestamp");

View File

@@ -212,6 +212,18 @@ model update_history {
hosts hosts @relation(fields: [host_id], references: [id], onDelete: Cascade)
}
model system_statistics {
id String @id
unique_packages_count Int
unique_security_count Int
total_packages Int
total_hosts Int
hosts_needing_updates Int
timestamp DateTime @default(now())
@@index([timestamp])
}
model users {
id String @id
username String @unique

View File

@@ -242,6 +242,30 @@ router.post(
},
);
// Trigger manual system statistics collection
router.post(
"/trigger/system-statistics",
authenticateToken,
async (_req, res) => {
try {
const job = await queueManager.triggerSystemStatistics();
res.json({
success: true,
data: {
jobId: job.id,
message: "System statistics collection triggered successfully",
},
});
} catch (error) {
console.error("Error triggering system statistics collection:", error);
res.status(500).json({
success: false,
error: "Failed to trigger system statistics collection",
});
}
},
);
// Get queue health status
router.get("/health", authenticateToken, async (_req, res) => {
try {
@@ -300,6 +324,7 @@ router.get("/overview", authenticateToken, async (_req, res) => {
queueManager.getRecentJobs(QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP, 1),
queueManager.getRecentJobs(QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP, 1),
queueManager.getRecentJobs(QUEUE_NAMES.AGENT_COMMANDS, 1),
queueManager.getRecentJobs(QUEUE_NAMES.SYSTEM_STATISTICS, 1),
]);
// Calculate overview metrics
@@ -309,21 +334,24 @@ router.get("/overview", authenticateToken, async (_req, res) => {
stats[QUEUE_NAMES.SESSION_CLEANUP].delayed +
stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].delayed +
stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].delayed +
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].delayed,
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].delayed +
stats[QUEUE_NAMES.SYSTEM_STATISTICS].delayed,
runningTasks:
stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].active +
stats[QUEUE_NAMES.SESSION_CLEANUP].active +
stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].active +
stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].active +
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].active,
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].active +
stats[QUEUE_NAMES.SYSTEM_STATISTICS].active,
failedTasks:
stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].failed +
stats[QUEUE_NAMES.SESSION_CLEANUP].failed +
stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].failed +
stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].failed +
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].failed,
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].failed +
stats[QUEUE_NAMES.SYSTEM_STATISTICS].failed,
totalAutomations: Object.values(stats).reduce((sum, queueStats) => {
return (
@@ -435,6 +463,22 @@ router.get("/overview", authenticateToken, async (_req, res) => {
: "Never run",
stats: stats[QUEUE_NAMES.AGENT_COMMANDS],
},
{
name: "System Statistics Collection",
queue: QUEUE_NAMES.SYSTEM_STATISTICS,
description: "Collects aggregated system-wide package statistics",
schedule: "Every 30 minutes",
lastRun: recentJobs[6][0]?.finishedOn
? new Date(recentJobs[6][0].finishedOn).toLocaleString()
: "Never",
lastRunTimestamp: recentJobs[6][0]?.finishedOn || 0,
status: recentJobs[6][0]?.failedReason
? "Failed"
: recentJobs[6][0]
? "Success"
: "Never run",
stats: stats[QUEUE_NAMES.SYSTEM_STATISTICS],
},
].sort((a, b) => {
// Sort by last run timestamp (most recent first)
// If both have never run (timestamp 0), maintain original order

View File

@@ -564,22 +564,43 @@ router.get(
const startDate = new Date();
startDate.setDate(endDate.getDate() - daysInt);
// Build where clause
const whereClause = {
// Determine if we need aggregation based on host filter
const needsAggregation =
!hostId || hostId === "all" || hostId === "undefined";
let trendsData;
if (needsAggregation) {
// For "All Hosts" mode, use system_statistics table
trendsData = await prisma.system_statistics.findMany({
where: {
timestamp: {
gte: startDate,
lte: endDate,
},
};
// Add host filter if specified
if (hostId && hostId !== "all" && hostId !== "undefined") {
whereClause.host_id = hostId;
}
// Get all update history records in the date range
const trendsData = await prisma.update_history.findMany({
where: whereClause,
},
select: {
timestamp: true,
unique_packages_count: true,
unique_security_count: true,
total_packages: true,
total_hosts: true,
hosts_needing_updates: true,
},
orderBy: {
timestamp: "asc",
},
});
} else {
// For individual host, use update_history table
trendsData = await prisma.update_history.findMany({
where: {
host_id: hostId,
timestamp: {
gte: startDate,
lte: endDate,
},
},
select: {
timestamp: true,
packages_count: true,
@@ -592,9 +613,74 @@ router.get(
timestamp: "asc",
},
});
}
// Enhanced data validation and processing
const processedData = trendsData
// Process data based on source
let processedData;
let aggregatedArray;
if (needsAggregation) {
// For "All Hosts" mode, data comes from system_statistics table
// Already aggregated, just need to format it
processedData = trendsData
.filter((record) => {
// Enhanced validation
return (
record.total_packages !== null &&
record.total_packages >= 0 &&
record.unique_packages_count >= 0 &&
record.unique_security_count >= 0 &&
record.unique_security_count <= record.unique_packages_count
);
})
.map((record) => {
const date = new Date(record.timestamp);
let timeKey;
if (daysInt <= 1) {
// For "Last 24 hours", use full timestamp for each data point
// This allows plotting all individual data points
timeKey = date.toISOString(); // Full ISO timestamp
} else {
// For daily view, group by day
timeKey = date.toISOString().split("T")[0]; // YYYY-MM-DD
}
return {
timeKey,
total_packages: record.total_packages,
packages_count: record.unique_packages_count,
security_count: record.unique_security_count,
timestamp: record.timestamp,
};
});
if (daysInt <= 1) {
// For "Last 24 hours", use all individual data points without grouping
// Sort by timestamp
aggregatedArray = processedData.sort(
(a, b) => a.timestamp.getTime() - b.timestamp.getTime(),
);
} else {
// For longer periods, group by timeKey and take the latest value for each period
const aggregatedData = processedData.reduce((acc, item) => {
if (
!acc[item.timeKey] ||
item.timestamp > acc[item.timeKey].timestamp
) {
acc[item.timeKey] = item;
}
return acc;
}, {});
// Convert to array and sort
aggregatedArray = Object.values(aggregatedData).sort((a, b) =>
a.timeKey.localeCompare(b.timeKey),
);
}
} else {
// For individual host, data comes from update_history table
processedData = trendsData
.filter((record) => {
// Enhanced validation
return (
@@ -602,17 +688,18 @@ router.get(
record.total_packages >= 0 &&
record.packages_count >= 0 &&
record.security_count >= 0 &&
record.security_count <= record.packages_count && // Security can't exceed outdated
record.security_count <= record.packages_count &&
record.status === "success"
); // Only include successful reports
);
})
.map((record) => {
const date = new Date(record.timestamp);
let timeKey;
if (daysInt <= 1) {
// For hourly view, group by hour only (not minutes)
timeKey = date.toISOString().substring(0, 13); // YYYY-MM-DDTHH
// For "Last 24 hours", use full timestamp for each data point
// This allows plotting all individual data points
timeKey = date.toISOString(); // Full ISO timestamp
} else {
// For daily view, group by day
timeKey = date.toISOString().split("T")[0]; // YYYY-MM-DD
@@ -628,70 +715,14 @@ router.get(
};
});
// Determine if we need aggregation based on host filter
const needsAggregation =
!hostId || hostId === "all" || hostId === "undefined";
let aggregatedArray;
if (needsAggregation) {
// For "All Hosts" mode, we need to calculate the actual total packages differently
// Instead of aggregating historical data (which is per-host), we'll use the current total
// and show that as a flat line, since total packages don't change much over time
// Get the current total packages count (unique packages across all hosts)
const currentTotalPackages = await prisma.packages.count({
where: {
host_packages: {
some: {}, // At least one host has this package
},
},
});
// Aggregate data by timeKey when looking at "All Hosts" or no specific host
const aggregatedData = processedData.reduce((acc, item) => {
if (!acc[item.timeKey]) {
acc[item.timeKey] = {
timeKey: item.timeKey,
total_packages: currentTotalPackages, // Use current total packages
packages_count: 0,
security_count: 0,
record_count: 0,
host_ids: new Set(),
min_timestamp: item.timestamp,
max_timestamp: item.timestamp,
};
}
// For outdated and security packages: SUM (these represent counts across hosts)
acc[item.timeKey].packages_count += item.packages_count;
acc[item.timeKey].security_count += item.security_count;
acc[item.timeKey].record_count += 1;
acc[item.timeKey].host_ids.add(item.host_id);
// Track timestamp range
if (item.timestamp < acc[item.timeKey].min_timestamp) {
acc[item.timeKey].min_timestamp = item.timestamp;
}
if (item.timestamp > acc[item.timeKey].max_timestamp) {
acc[item.timeKey].max_timestamp = item.timestamp;
}
return acc;
}, {});
// Convert to array and add metadata
aggregatedArray = Object.values(aggregatedData)
.map((item) => ({
...item,
host_count: item.host_ids.size,
host_ids: Array.from(item.host_ids),
}))
.sort((a, b) => a.timeKey.localeCompare(b.timeKey));
if (daysInt <= 1) {
// For "Last 24 hours", use all individual data points without grouping
// Sort by timestamp
aggregatedArray = processedData.sort(
(a, b) => a.timestamp.getTime() - b.timestamp.getTime(),
);
} else {
// For specific host, show individual data points without aggregation
// But still group by timeKey to handle multiple reports from same host in same time period
// For longer periods, group by timeKey to handle multiple reports from same host in same time period
const hostAggregatedData = processedData.reduce((acc, item) => {
if (!acc[item.timeKey]) {
acc[item.timeKey] = {
@@ -729,9 +760,20 @@ router.get(
}))
.sort((a, b) => a.timeKey.localeCompare(b.timeKey));
}
}
// Handle sparse data by filling missing time periods
const fillMissingPeriods = (data, daysInt) => {
if (data.length === 0) {
return [];
}
// For "Last 24 hours", return data as-is without filling gaps
// This allows plotting all individual data points
if (daysInt <= 1) {
return data;
}
const filledData = [];
const startDate = new Date();
startDate.setDate(startDate.getDate() - daysInt);
@@ -741,43 +783,49 @@ router.get(
const endDate = new Date();
const currentDate = new Date(startDate);
// Find the last known values for interpolation
// Sort data by timeKey to get chronological order
const sortedData = [...data].sort((a, b) =>
a.timeKey.localeCompare(b.timeKey),
);
// Find the first actual data point (don't fill before this)
const firstDataPoint = sortedData[0];
const firstDataTimeKey = firstDataPoint?.timeKey;
// Track last known values as we iterate forward
let lastKnownValues = null;
if (data.length > 0) {
lastKnownValues = {
total_packages: data[0].total_packages,
packages_count: data[0].packages_count,
security_count: data[0].security_count,
};
}
let hasSeenFirstDataPoint = false;
while (currentDate <= endDate) {
let timeKey;
if (daysInt <= 1) {
timeKey = currentDate.toISOString().substring(0, 13); // Hourly
currentDate.setHours(currentDate.getHours() + 1);
} else {
timeKey = currentDate.toISOString().split("T")[0]; // Daily
// For daily view, group by day
timeKey = currentDate.toISOString().split("T")[0]; // YYYY-MM-DD
currentDate.setDate(currentDate.getDate() + 1);
// Skip periods before the first actual data point
if (firstDataTimeKey && timeKey < firstDataTimeKey) {
continue;
}
if (dataMap.has(timeKey)) {
const item = dataMap.get(timeKey);
filledData.push(item);
// Update last known values
// Update last known values with actual data
lastKnownValues = {
total_packages: item.total_packages,
packages_count: item.packages_count,
security_count: item.security_count,
total_packages: item.total_packages || 0,
packages_count: item.packages_count || 0,
security_count: item.security_count || 0,
};
hasSeenFirstDataPoint = true;
} else {
// For missing periods, use the last known values (interpolation)
// This creates a continuous line instead of gaps
// For missing periods AFTER the first data point, use forward-fill
// Only fill if we have a last known value and we've seen the first data point
if (lastKnownValues !== null && hasSeenFirstDataPoint) {
filledData.push({
timeKey,
total_packages: lastKnownValues?.total_packages || 0,
packages_count: lastKnownValues?.packages_count || 0,
security_count: lastKnownValues?.security_count || 0,
total_packages: lastKnownValues.total_packages,
packages_count: lastKnownValues.packages_count,
security_count: lastKnownValues.security_count,
record_count: 0,
host_count: 0,
host_ids: [],
@@ -786,6 +834,8 @@ router.get(
isInterpolated: true, // Mark as interpolated for debugging
});
}
// If we haven't seen the first data point yet, skip this period
}
}
return filledData;
@@ -810,7 +860,7 @@ router.get(
// Get current package state for offline fallback
let currentPackageState = null;
if (hostId && hostId !== "all" && hostId !== "undefined") {
// Get current package counts for specific host
// For individual host, get current package counts from host_packages
const currentState = await prisma.host_packages.aggregate({
where: {
host_id: hostId,
@@ -841,8 +891,29 @@ router.get(
security_count: securityCount,
};
} else {
// Get current package counts for all hosts
// Total packages = count of unique packages installed on at least one host
// For "All Hosts" mode, use the latest system_statistics record if available
// Otherwise calculate from database
const latestStats = await prisma.system_statistics.findFirst({
orderBy: {
timestamp: "desc",
},
select: {
total_packages: true,
unique_packages_count: true,
unique_security_count: true,
timestamp: true,
},
});
if (latestStats) {
// Use latest system statistics (collected by scheduled job)
currentPackageState = {
total_packages: latestStats.total_packages,
packages_count: latestStats.unique_packages_count,
security_count: latestStats.unique_security_count,
};
} else {
// Fallback: calculate from database if no statistics collected yet
const totalPackagesCount = await prisma.packages.count({
where: {
host_packages: {
@@ -851,25 +922,34 @@ router.get(
},
});
// Get counts for boolean fields separately
const outdatedCount = await prisma.host_packages.count({
const uniqueOutdatedCount = await prisma.packages.count({
where: {
host_packages: {
some: {
needs_update: true,
},
},
},
});
const securityCount = await prisma.host_packages.count({
const uniqueSecurityCount = await prisma.packages.count({
where: {
host_packages: {
some: {
needs_update: true,
is_security_update: true,
},
},
},
});
currentPackageState = {
total_packages: totalPackagesCount,
packages_count: outdatedCount,
security_count: securityCount,
packages_count: uniqueOutdatedCount,
security_count: uniqueSecurityCount,
};
}
}
// Format data for chart
const chartData = {
@@ -923,6 +1003,11 @@ router.get(
chartData.datasets[2].data.push(item.security_count);
});
// Replace the last label with "Now" to indicate current state
if (chartData.labels.length > 0) {
chartData.labels[chartData.labels.length - 1] = "Now";
}
// Calculate data quality metrics
const dataQuality = {
totalRecords: trendsData.length,

View File

@@ -2,6 +2,7 @@ const express = require("express");
const { authenticateToken } = require("../middleware/auth");
const { getPrismaClient } = require("../config/prisma");
const { v4: uuidv4 } = require("uuid");
const { get_current_time, parse_date } = require("../utils/timezone");
const prisma = getPrismaClient();
const router = express.Router();
@@ -537,14 +538,7 @@ router.post("/collect", async (req, res) => {
return res.status(401).json({ error: "Invalid API credentials" });
}
const now = new Date();
// Helper function to validate and parse dates
const parseDate = (dateString) => {
if (!dateString) return now;
const date = new Date(dateString);
return Number.isNaN(date.getTime()) ? now : date;
};
const now = get_current_time();
// Process containers
if (containers && Array.isArray(containers)) {
@@ -572,7 +566,7 @@ router.post("/collect", async (req, res) => {
tag: containerData.image_tag,
image_id: containerData.image_id || "unknown",
source: containerData.image_source || "docker-hub",
created_at: parseDate(containerData.created_at),
created_at: parse_date(containerData.created_at, now),
last_checked: now,
updated_at: now,
},
@@ -597,7 +591,7 @@ router.post("/collect", async (req, res) => {
state: containerData.state,
ports: containerData.ports || null,
started_at: containerData.started_at
? parseDate(containerData.started_at)
? parse_date(containerData.started_at, null)
: null,
updated_at: now,
last_checked: now,
@@ -613,9 +607,9 @@ router.post("/collect", async (req, res) => {
status: containerData.status,
state: containerData.state,
ports: containerData.ports || null,
created_at: parseDate(containerData.created_at),
created_at: parse_date(containerData.created_at, now),
started_at: containerData.started_at
? parseDate(containerData.started_at)
? parse_date(containerData.started_at, null)
: null,
updated_at: now,
},
@@ -651,7 +645,7 @@ router.post("/collect", async (req, res) => {
? BigInt(imageData.size_bytes)
: null,
source: imageData.source || "docker-hub",
created_at: parseDate(imageData.created_at),
created_at: parse_date(imageData.created_at, now),
updated_at: now,
},
});
@@ -780,14 +774,7 @@ router.post("/../integrations/docker", async (req, res) => {
`[Docker Integration] Processing for host: ${host.friendly_name}`,
);
const now = new Date();
// Helper function to validate and parse dates
const parseDate = (dateString) => {
if (!dateString) return now;
const date = new Date(dateString);
return Number.isNaN(date.getTime()) ? now : date;
};
const now = get_current_time();
let containersProcessed = 0;
let imagesProcessed = 0;
@@ -822,7 +809,7 @@ router.post("/../integrations/docker", async (req, res) => {
tag: containerData.image_tag,
image_id: containerData.image_id || "unknown",
source: containerData.image_source || "docker-hub",
created_at: parseDate(containerData.created_at),
created_at: parse_date(containerData.created_at, now),
last_checked: now,
updated_at: now,
},
@@ -847,7 +834,7 @@ router.post("/../integrations/docker", async (req, res) => {
state: containerData.state || containerData.status,
ports: containerData.ports || null,
started_at: containerData.started_at
? parseDate(containerData.started_at)
? parse_date(containerData.started_at, null)
: null,
updated_at: now,
last_checked: now,
@@ -863,9 +850,9 @@ router.post("/../integrations/docker", async (req, res) => {
status: containerData.status,
state: containerData.state || containerData.status,
ports: containerData.ports || null,
created_at: parseDate(containerData.created_at),
created_at: parse_date(containerData.created_at, now),
started_at: containerData.started_at
? parseDate(containerData.started_at)
? parse_date(containerData.started_at, null)
: null,
updated_at: now,
},
@@ -911,7 +898,7 @@ router.post("/../integrations/docker", async (req, res) => {
? BigInt(imageData.size_bytes)
: null,
source: imageSource,
created_at: parseDate(imageData.created_at),
created_at: parse_date(imageData.created_at, now),
last_checked: now,
updated_at: now,
},

View File

@@ -11,10 +11,16 @@ const {
requireManageSettings,
} = require("../middleware/permissions");
const { queueManager, QUEUE_NAMES } = require("../services/automation");
const { pushIntegrationToggle, isConnected } = require("../services/agentWs");
const agentVersionService = require("../services/agentVersionService");
const router = express.Router();
const prisma = getPrismaClient();
// In-memory cache for integration states (api_id -> { integration_name -> enabled })
// This stores the last known state from successful toggles
const integrationStateCache = new Map();
// Secure endpoint to download the agent script/binary (requires API authentication)
router.get("/agent/download", async (req, res) => {
try {
@@ -128,9 +134,6 @@ router.get("/agent/version", async (req, res) => {
try {
const fs = require("node:fs");
const path = require("node:path");
const { exec } = require("node:child_process");
const { promisify } = require("node:util");
const execAsync = promisify(exec);
// Get architecture parameter (default to amd64 for Go agents)
const architecture = req.query.arch || "amd64";
@@ -165,16 +168,32 @@ router.get("/agent/version", async (req, res) => {
minServerVersion: null,
});
} else {
// Go agent version check (binary)
// Go agent version check
// Detect server architecture and map to Go architecture names
const os = require("node:os");
const { exec } = require("node:child_process");
const { promisify } = require("node:util");
const execAsync = promisify(exec);
const serverArch = os.arch();
// Map Node.js architecture to Go architecture names
const archMap = {
x64: "amd64",
ia32: "386",
arm64: "arm64",
arm: "arm",
};
const serverGoArch = archMap[serverArch] || serverArch;
// If requested architecture matches server architecture, execute the binary
if (architecture === serverGoArch) {
const binaryName = `patchmon-agent-linux-${architecture}`;
const binaryPath = path.join(__dirname, "../../../agents", binaryName);
if (!fs.existsSync(binaryPath)) {
return res.status(404).json({
error: `Go agent binary not found for architecture: ${architecture}`,
});
}
// Binary doesn't exist, fall back to GitHub
console.log(`Binary ${binaryName} not found, falling back to GitHub`);
} else {
// Execute the binary to get its version
try {
const { stdout } = await execAsync(`${binaryPath} --help`, {
@@ -186,19 +205,14 @@ router.get("/agent/version", async (req, res) => {
/PatchMon Agent v([0-9]+\.[0-9]+\.[0-9]+)/i,
);
if (!versionMatch) {
return res.status(500).json({
error: "Could not extract version from agent binary",
});
}
if (versionMatch) {
const serverVersion = versionMatch[1];
const agentVersion = req.query.currentVersion || serverVersion;
// Simple version comparison (assuming semantic versioning)
const hasUpdate = agentVersion !== serverVersion;
res.json({
return res.json({
currentVersion: agentVersion,
latestVersion: serverVersion,
hasUpdate: hasUpdate,
@@ -208,10 +222,54 @@ router.get("/agent/version", async (req, res) => {
architecture: architecture,
agentType: "go",
});
}
} catch (execError) {
console.error("Failed to execute agent binary:", execError.message);
// Execution failed, fall back to GitHub
console.log(
`Failed to execute binary ${binaryName}: ${execError.message}, falling back to GitHub`,
);
}
}
}
// Fall back to GitHub if architecture doesn't match or binary execution failed
try {
const versionInfo = await agentVersionService.getVersionInfo();
const latestVersion = versionInfo.latestVersion;
const agentVersion =
req.query.currentVersion || latestVersion || "unknown";
if (!latestVersion) {
return res.status(503).json({
error: "Unable to determine latest version from GitHub releases",
currentVersion: agentVersion,
latestVersion: null,
hasUpdate: false,
});
}
// Simple version comparison (assuming semantic versioning)
const hasUpdate =
agentVersion !== latestVersion && latestVersion !== null;
res.json({
currentVersion: agentVersion,
latestVersion: latestVersion,
hasUpdate: hasUpdate,
downloadUrl: `/api/v1/hosts/agent/download?arch=${architecture}`,
releaseNotes: `PatchMon Agent v${latestVersion}`,
minServerVersion: null,
architecture: architecture,
agentType: "go",
});
} catch (serviceError) {
console.error(
"Failed to get version from agentVersionService:",
serviceError.message,
);
return res.status(500).json({
error: "Failed to get version from agent binary",
error: "Failed to get agent version from service",
details: serviceError.message,
});
}
}
@@ -1616,10 +1674,14 @@ router.get("/install", async (req, res) => {
// Check for --force parameter
const forceInstall = req.query.force === "true" || req.query.force === "1";
// Get architecture parameter (default to amd64)
const architecture = req.query.arch || "amd64";
// Get architecture parameter (only set if explicitly provided, otherwise let script auto-detect)
const architecture = req.query.arch;
// Inject the API credentials, server URL, curl flags, SSL verify flag, force flag, and architecture into the script
// Only set ARCHITECTURE if explicitly provided, otherwise let the script auto-detect
const archExport = architecture
? `export ARCHITECTURE="${architecture}"\n`
: "";
const envVars = `#!/bin/bash
export PATCHMON_URL="${serverUrl}"
export API_ID="${host.api_id}"
@@ -1627,8 +1689,7 @@ export API_KEY="${host.api_key}"
export CURL_FLAGS="${curlFlags}"
export SKIP_SSL_VERIFY="${skipSSLVerify}"
export FORCE_INSTALL="${forceInstall ? "true" : "false"}"
export ARCHITECTURE="${architecture}"
${archExport}
`;
// Remove the shebang from the original script and prepend our env vars
@@ -2103,4 +2164,137 @@ router.patch(
},
);
// Get integration status for a host
router.get(
"/:hostId/integrations",
authenticateToken,
requireManageHosts,
async (req, res) => {
try {
const { hostId } = req.params;
// Get host to verify it exists
const host = await prisma.hosts.findUnique({
where: { id: hostId },
select: { id: true, api_id: true, friendly_name: true },
});
if (!host) {
return res.status(404).json({ error: "Host not found" });
}
// Check if agent is connected
const connected = isConnected(host.api_id);
// Get integration states from cache (or defaults if not cached)
// Default: all integrations are disabled
const cachedState = integrationStateCache.get(host.api_id) || {};
const integrations = {
docker: cachedState.docker || false, // Default: disabled
// Future integrations can be added here
};
res.json({
success: true,
data: {
integrations,
connected,
host: {
id: host.id,
friendlyName: host.friendly_name,
apiId: host.api_id,
},
},
});
} catch (error) {
console.error("Get integration status error:", error);
res.status(500).json({ error: "Failed to get integration status" });
}
},
);
// Toggle integration status for a host
router.post(
"/:hostId/integrations/:integrationName/toggle",
authenticateToken,
requireManageHosts,
[body("enabled").isBoolean().withMessage("Enabled status must be a boolean")],
async (req, res) => {
try {
const errors = validationResult(req);
if (!errors.isEmpty()) {
return res.status(400).json({ errors: errors.array() });
}
const { hostId, integrationName } = req.params;
const { enabled } = req.body;
// Validate integration name
const validIntegrations = ["docker"]; // Add more as they're implemented
if (!validIntegrations.includes(integrationName)) {
return res.status(400).json({
error: "Invalid integration name",
validIntegrations,
});
}
// Get host to verify it exists
const host = await prisma.hosts.findUnique({
where: { id: hostId },
select: { id: true, api_id: true, friendly_name: true },
});
if (!host) {
return res.status(404).json({ error: "Host not found" });
}
// Check if agent is connected
if (!isConnected(host.api_id)) {
return res.status(503).json({
error: "Agent is not connected",
message:
"The agent must be connected via WebSocket to toggle integrations",
});
}
// Send WebSocket message to agent
const success = pushIntegrationToggle(
host.api_id,
integrationName,
enabled,
);
if (!success) {
return res.status(503).json({
error: "Failed to send integration toggle",
message: "Agent connection may have been lost",
});
}
// Update cache with new state
if (!integrationStateCache.has(host.api_id)) {
integrationStateCache.set(host.api_id, {});
}
integrationStateCache.get(host.api_id)[integrationName] = enabled;
res.json({
success: true,
message: `Integration ${integrationName} ${enabled ? "enabled" : "disabled"} successfully`,
data: {
integration: integrationName,
enabled,
host: {
id: host.id,
friendlyName: host.friendly_name,
apiId: host.api_id,
},
},
});
} catch (error) {
console.error("Toggle integration error:", error);
res.status(500).json({ error: "Failed to toggle integration" });
}
},
);
module.exports = router;

View File

@@ -60,9 +60,14 @@ router.post(
authenticateToken,
[
body("token")
.notEmpty()
.withMessage("Token is required")
.isString()
.withMessage("Token must be a string")
.isLength({ min: 6, max: 6 })
.withMessage("Token must be 6 digits"),
body("token").isNumeric().withMessage("Token must contain only numbers"),
.withMessage("Token must be exactly 6 digits")
.matches(/^\d{6}$/)
.withMessage("Token must contain only numbers"),
],
async (req, res) => {
try {
@@ -71,7 +76,11 @@ router.post(
return res.status(400).json({ errors: errors.array() });
}
const { token } = req.body;
// Ensure token is a string (convert if needed)
let { token } = req.body;
if (typeof token !== "string") {
token = String(token);
}
const userId = req.user.id;
// Get user's TFA secret

View File

@@ -3,6 +3,7 @@
const WebSocket = require("ws");
const url = require("node:url");
const { get_current_time } = require("../utils/timezone");
// Connection registry by api_id
const apiIdToSocket = new Map();
@@ -49,7 +50,29 @@ function init(server, prismaClient) {
wss.handleUpgrade(request, socket, head, (ws) => {
ws.on("message", (message) => {
// Echo back for Bull Board WebSocket
try {
ws.send(message);
} catch (_err) {
// Ignore send errors (connection may be closed)
}
});
ws.on("error", (err) => {
// Handle WebSocket errors gracefully for Bull Board
if (
err.code === "WS_ERR_INVALID_CLOSE_CODE" ||
err.code === "ECONNRESET" ||
err.code === "EPIPE"
) {
// These are expected errors, just log quietly
console.log("[bullboard-ws] connection error:", err.code);
} else {
console.error("[bullboard-ws] error:", err.message || err);
}
});
ws.on("close", () => {
// Connection closed, no action needed
});
});
return;
@@ -117,7 +140,58 @@ function init(server, prismaClient) {
}
});
ws.on("close", () => {
ws.on("error", (err) => {
// Handle WebSocket errors gracefully without crashing
// Common errors: invalid close codes (1006), connection resets, etc.
if (
err.code === "WS_ERR_INVALID_CLOSE_CODE" ||
err.message?.includes("invalid status code 1006") ||
err.message?.includes("Invalid WebSocket frame")
) {
// 1006 is a special close code indicating abnormal closure
// It cannot be sent in a close frame, but can occur when connection is lost
console.log(
`[agent-ws] connection error for ${apiId} (abnormal closure):`,
err.message || err.code,
);
} else if (
err.code === "ECONNRESET" ||
err.code === "EPIPE" ||
err.message?.includes("read ECONNRESET")
) {
// Connection reset errors are common and expected
console.log(`[agent-ws] connection reset for ${apiId}`);
} else {
// Log other errors for debugging
console.error(
`[agent-ws] error for ${apiId}:`,
err.message || err.code || err,
);
}
// Clean up connection on error
const existing = apiIdToSocket.get(apiId);
if (existing === ws) {
apiIdToSocket.delete(apiId);
connectionMetadata.delete(apiId);
// Notify subscribers of disconnection
notifyConnectionChange(apiId, false);
}
// Try to close the connection gracefully if still open
if (
ws.readyState === WebSocket.OPEN ||
ws.readyState === WebSocket.CONNECTING
) {
try {
ws.close(1000); // Normal closure
} catch {
// Ignore errors when closing
}
}
});
ws.on("close", (code, reason) => {
const existing = apiIdToSocket.get(apiId);
if (existing === ws) {
apiIdToSocket.delete(apiId);
@@ -126,7 +200,7 @@ function init(server, prismaClient) {
notifyConnectionChange(apiId, false);
}
console.log(
`[agent-ws] disconnected api_id=${apiId} total=${apiIdToSocket.size}`,
`[agent-ws] disconnected api_id=${apiId} code=${code} reason=${reason || "none"} total=${apiIdToSocket.size}`,
);
});
@@ -181,6 +255,29 @@ function pushUpdateAgent(apiId) {
safeSend(ws, JSON.stringify({ type: "update_agent" }));
}
function pushIntegrationToggle(apiId, integrationName, enabled) {
const ws = apiIdToSocket.get(apiId);
if (ws && ws.readyState === WebSocket.OPEN) {
safeSend(
ws,
JSON.stringify({
type: "integration_toggle",
integration: integrationName,
enabled: enabled,
}),
);
console.log(
`📤 Pushed integration toggle to agent ${apiId}: ${integrationName} = ${enabled}`,
);
return true;
} else {
console.log(
`⚠️ Agent ${apiId} not connected, cannot push integration toggle, please edit config.yml manually`,
);
return false;
}
}
function getConnectionByApiId(apiId) {
return apiIdToSocket.get(apiId);
}
@@ -314,7 +411,7 @@ async function handleDockerStatusEvent(apiId, message) {
status: status,
state: status,
updated_at: new Date(timestamp || Date.now()),
last_checked: new Date(),
last_checked: get_current_time(),
},
});
@@ -340,6 +437,7 @@ module.exports = {
pushReportNow,
pushSettingsUpdate,
pushUpdateAgent,
pushIntegrationToggle,
pushUpdateNotification,
pushUpdateNotificationToAll,
// Expose read-only view of connected agents

View File

@@ -139,15 +139,13 @@ class DockerImageUpdateCheck {
console.log("🐳 Starting Docker image update check...");
try {
// Get all Docker images that have a digest and repository
// Get all Docker images that have a digest
// Note: repository is required (non-nullable) in schema, so we don't need to check it
const images = await prisma.docker_images.findMany({
where: {
digest: {
not: null,
},
repository: {
not: null,
},
},
include: {
docker_image_updates: true,

View File

@@ -3,6 +3,7 @@ const { redis, redisConnection } = require("./shared/redis");
const { prisma } = require("./shared/prisma");
const agentWs = require("../agentWs");
const { v4: uuidv4 } = require("uuid");
const { get_current_time } = require("../../utils/timezone");
// Import automation classes
const GitHubUpdateCheck = require("./githubUpdateCheck");
@@ -12,6 +13,7 @@ const OrphanedPackageCleanup = require("./orphanedPackageCleanup");
const DockerInventoryCleanup = require("./dockerInventoryCleanup");
const DockerImageUpdateCheck = require("./dockerImageUpdateCheck");
const MetricsReporting = require("./metricsReporting");
const SystemStatistics = require("./systemStatistics");
// Queue names
const QUEUE_NAMES = {
@@ -22,6 +24,7 @@ const QUEUE_NAMES = {
DOCKER_INVENTORY_CLEANUP: "docker-inventory-cleanup",
DOCKER_IMAGE_UPDATE_CHECK: "docker-image-update-check",
METRICS_REPORTING: "metrics-reporting",
SYSTEM_STATISTICS: "system-statistics",
AGENT_COMMANDS: "agent-commands",
};
@@ -105,6 +108,9 @@ class QueueManager {
this.automations[QUEUE_NAMES.METRICS_REPORTING] = new MetricsReporting(
this,
);
this.automations[QUEUE_NAMES.SYSTEM_STATISTICS] = new SystemStatistics(
this,
);
console.log("✅ All automation classes initialized");
}
@@ -190,6 +196,15 @@ class QueueManager {
workerOptions,
);
// System Statistics Worker
this.workers[QUEUE_NAMES.SYSTEM_STATISTICS] = new Worker(
QUEUE_NAMES.SYSTEM_STATISTICS,
this.automations[QUEUE_NAMES.SYSTEM_STATISTICS].process.bind(
this.automations[QUEUE_NAMES.SYSTEM_STATISTICS],
),
workerOptions,
);
// Agent Commands Worker
this.workers[QUEUE_NAMES.AGENT_COMMANDS] = new Worker(
QUEUE_NAMES.AGENT_COMMANDS,
@@ -216,8 +231,8 @@ class QueueManager {
api_id: api_id,
status: "active",
attempt_number: job.attemptsMade + 1,
created_at: new Date(),
updated_at: new Date(),
created_at: get_current_time(),
updated_at: get_current_time(),
},
});
console.log(`📝 Logged job to job_history: ${job.id} (${type})`);
@@ -257,8 +272,8 @@ class QueueManager {
where: { job_id: job.id },
data: {
status: "completed",
completed_at: new Date(),
updated_at: new Date(),
completed_at: get_current_time(),
updated_at: get_current_time(),
},
});
console.log(`✅ Marked job as completed in job_history: ${job.id}`);
@@ -271,8 +286,8 @@ class QueueManager {
data: {
status: "failed",
error_message: error.message,
completed_at: new Date(),
updated_at: new Date(),
completed_at: get_current_time(),
updated_at: get_current_time(),
},
});
console.log(`❌ Marked job as failed in job_history: ${job.id}`);
@@ -322,6 +337,7 @@ class QueueManager {
await this.automations[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].schedule();
await this.automations[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK].schedule();
await this.automations[QUEUE_NAMES.METRICS_REPORTING].schedule();
await this.automations[QUEUE_NAMES.SYSTEM_STATISTICS].schedule();
}
/**
@@ -357,6 +373,10 @@ class QueueManager {
].triggerManual();
}
async triggerSystemStatistics() {
return this.automations[QUEUE_NAMES.SYSTEM_STATISTICS].triggerManual();
}
async triggerMetricsReporting() {
return this.automations[QUEUE_NAMES.METRICS_REPORTING].triggerManual();
}

View File

@@ -0,0 +1,140 @@
const { prisma } = require("./shared/prisma");
const { v4: uuidv4 } = require("uuid");
/**
* System Statistics Collection Automation
* Collects aggregated system-wide statistics every 30 minutes
* for use in package trends charts
*/
class SystemStatistics {
constructor(queueManager) {
this.queueManager = queueManager;
this.queueName = "system-statistics";
}
/**
* Process system statistics collection job
*/
async process(_job) {
const startTime = Date.now();
console.log("📊 Starting system statistics collection...");
try {
// Calculate unique package counts across all hosts
const uniquePackagesCount = await prisma.packages.count({
where: {
host_packages: {
some: {
needs_update: true,
},
},
},
});
const uniqueSecurityCount = await prisma.packages.count({
where: {
host_packages: {
some: {
needs_update: true,
is_security_update: true,
},
},
},
});
// Calculate total unique packages installed on at least one host
const totalPackages = await prisma.packages.count({
where: {
host_packages: {
some: {}, // At least one host has this package
},
},
});
// Calculate total hosts
const totalHosts = await prisma.hosts.count({
where: {
status: "active",
},
});
// Calculate hosts needing updates (distinct hosts with packages needing updates)
const hostsNeedingUpdates = await prisma.hosts.count({
where: {
status: "active",
host_packages: {
some: {
needs_update: true,
},
},
},
});
// Store statistics in database
await prisma.system_statistics.create({
data: {
id: uuidv4(),
unique_packages_count: uniquePackagesCount,
unique_security_count: uniqueSecurityCount,
total_packages: totalPackages,
total_hosts: totalHosts,
hosts_needing_updates: hostsNeedingUpdates,
timestamp: new Date(),
},
});
const executionTime = Date.now() - startTime;
console.log(
`✅ System statistics collection completed in ${executionTime}ms - Unique packages: ${uniquePackagesCount}, Security: ${uniqueSecurityCount}, Total hosts: ${totalHosts}`,
);
return {
success: true,
uniquePackagesCount,
uniqueSecurityCount,
totalPackages,
totalHosts,
hostsNeedingUpdates,
executionTime,
};
} catch (error) {
const executionTime = Date.now() - startTime;
console.error(
`❌ System statistics collection failed after ${executionTime}ms:`,
error.message,
);
throw error;
}
}
/**
* Schedule recurring system statistics collection (every 30 minutes)
*/
async schedule() {
const job = await this.queueManager.queues[this.queueName].add(
"system-statistics",
{},
{
repeat: { pattern: "*/30 * * * *" }, // Every 30 minutes
jobId: "system-statistics-recurring",
},
);
console.log("✅ System statistics collection scheduled (every 30 minutes)");
return job;
}
/**
* Trigger manual system statistics collection
*/
async triggerManual() {
const job = await this.queueManager.queues[this.queueName].add(
"system-statistics-manual",
{},
{ priority: 1 },
);
console.log("✅ Manual system statistics collection triggered");
return job;
}
}
module.exports = SystemStatistics;

View File

@@ -0,0 +1,107 @@
/**
* Timezone utility functions for consistent timestamp handling
*
* This module provides timezone-aware timestamp functions that use
* the TZ environment variable for consistent timezone handling across
* the application. If TZ is not set, defaults to UTC.
*/
/**
* Get the configured timezone from environment variable
* Defaults to UTC if not set
* @returns {string} Timezone string (e.g., 'UTC', 'America/New_York', 'Europe/London')
*/
function get_timezone() {
return process.env.TZ || process.env.TIMEZONE || "UTC";
}
/**
* Get current date/time in the configured timezone
* Returns a Date object that represents the current time in the configured timezone
* @returns {Date} Current date/time
*/
function get_current_time() {
const tz = get_timezone();
// If UTC, use Date.now() which is always UTC
if (tz === "UTC" || tz === "Etc/UTC") {
return new Date();
}
// For other timezones, we need to create a date string with timezone info
// and parse it. This ensures the date represents the correct time in that timezone.
// For database storage, we always store UTC timestamps
// The timezone is primarily used for display purposes
return new Date();
}
/**
* Get current timestamp in milliseconds (UTC)
* This is always UTC for database storage consistency
* @returns {number} Current timestamp in milliseconds
*/
function get_current_timestamp() {
return Date.now();
}
/**
* Format a date to ISO string in the configured timezone
* @param {Date} date - Date to format (defaults to now)
* @returns {string} ISO formatted date string
*/
function format_date_iso(date = null) {
const d = date || get_current_time();
return d.toISOString();
}
/**
* Parse a date string and return a Date object
* Handles various date formats and timezone conversions
* @param {string} date_string - Date string to parse
* @param {Date} fallback - Fallback date if parsing fails (defaults to now)
* @returns {Date} Parsed date or fallback
*/
function parse_date(date_string, fallback = null) {
if (!date_string) {
return fallback || get_current_time();
}
try {
const date = new Date(date_string);
if (Number.isNaN(date.getTime())) {
return fallback || get_current_time();
}
return date;
} catch (_error) {
return fallback || get_current_time();
}
}
/**
* Convert a date to the configured timezone for display
* @param {Date} date - Date to convert
* @returns {string} Formatted date string in configured timezone
*/
function format_date_for_display(date) {
const tz = get_timezone();
const formatter = new Intl.DateTimeFormat("en-US", {
timeZone: tz,
year: "numeric",
month: "2-digit",
day: "2-digit",
hour: "2-digit",
minute: "2-digit",
second: "2-digit",
hour12: false,
});
return formatter.format(date);
}
module.exports = {
get_timezone,
get_current_time,
get_current_timestamp,
format_date_iso,
parse_date,
format_date_for_display,
};

View File

@@ -1,5 +1,5 @@
{
"$schema": "https://biomejs.dev/schemas/2.3.0/schema.json",
"$schema": "https://biomejs.dev/schemas/2.3.4/schema.json",
"vcs": {
"enabled": true,
"clientKind": "git",

View File

@@ -120,7 +120,6 @@ const Layout = ({ children }) => {
name: "Automation",
href: "/automation",
icon: RefreshCw,
new: true,
});
if (canViewReports()) {

View File

@@ -196,6 +196,25 @@ const Automation = () => {
year: "numeric",
});
}
if (schedule === "Every 30 minutes") {
const now = new Date();
const nextRun = new Date(now);
// Round up to the next 30-minute mark
const minutes = now.getMinutes();
if (minutes < 30) {
nextRun.setMinutes(30, 0, 0);
} else {
nextRun.setHours(nextRun.getHours() + 1, 0, 0, 0);
}
return nextRun.toLocaleString([], {
hour12: true,
hour: "numeric",
minute: "2-digit",
day: "numeric",
month: "numeric",
year: "numeric",
});
}
return "Unknown";
};
@@ -236,6 +255,18 @@ const Automation = () => {
nextHour.setHours(nextHour.getHours() + 1, 0, 0, 0);
return nextHour.getTime();
}
if (schedule === "Every 30 minutes") {
const now = new Date();
const nextRun = new Date(now);
// Round up to the next 30-minute mark
const minutes = now.getMinutes();
if (minutes < 30) {
nextRun.setMinutes(30, 0, 0);
} else {
nextRun.setHours(nextRun.getHours() + 1, 0, 0, 0);
}
return nextRun.getTime();
}
return Number.MAX_SAFE_INTEGER; // Unknown schedules go to bottom
};
@@ -294,6 +325,8 @@ const Automation = () => {
endpoint = "/automation/trigger/docker-inventory-cleanup";
} else if (jobType === "agent-collection") {
endpoint = "/automation/trigger/agent-collection";
} else if (jobType === "system-statistics") {
endpoint = "/automation/trigger/system-statistics";
}
const _response = await api.post(endpoint, data);
@@ -615,6 +648,10 @@ const Automation = () => {
automation.queue.includes("agent-commands")
) {
triggerManualJob("agent-collection");
} else if (
automation.queue.includes("system-statistics")
) {
triggerManualJob("system-statistics");
}
}}
className="inline-flex items-center justify-center w-6 h-6 border border-transparent rounded text-white bg-green-600 hover:bg-green-700 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-green-500 transition-colors duration-200"

View File

@@ -55,6 +55,8 @@ const Dashboard = () => {
const [cardPreferences, setCardPreferences] = useState([]);
const [packageTrendsPeriod, setPackageTrendsPeriod] = useState("1"); // days
const [packageTrendsHost, setPackageTrendsHost] = useState("all"); // host filter
const [systemStatsJobId, setSystemStatsJobId] = useState(null); // Track job ID for system statistics
const [isTriggeringJob, setIsTriggeringJob] = useState(false);
const navigate = useNavigate();
const { isDark } = useTheme();
const { user } = useAuth();
@@ -772,17 +774,58 @@ const Dashboard = () => {
<h3 className="text-lg font-medium text-secondary-900 dark:text-white">
Package Trends Over Time
</h3>
<div className="flex flex-col gap-2">
<div className="flex items-center gap-3">
{/* Refresh Button */}
<button
type="button"
onClick={() => refetchPackageTrends()}
disabled={packageTrendsFetching}
onClick={async () => {
if (packageTrendsHost === "all") {
// For "All Hosts", trigger system statistics collection job
setIsTriggeringJob(true);
try {
const response =
await dashboardAPI.triggerSystemStatistics();
if (response.data?.data?.jobId) {
setSystemStatsJobId(response.data.data.jobId);
// Wait a moment for the job to complete, then refetch
setTimeout(() => {
refetchPackageTrends();
}, 2000);
// Clear the job ID message after 2 seconds
setTimeout(() => {
setSystemStatsJobId(null);
}, 2000);
}
} catch (error) {
console.error(
"Failed to trigger system statistics:",
error,
);
// Still refetch data even if job trigger fails
refetchPackageTrends();
} finally {
setIsTriggeringJob(false);
}
} else {
// For individual host, just refetch the data
refetchPackageTrends();
}
}}
disabled={packageTrendsFetching || isTriggeringJob}
className="px-3 py-1.5 text-sm border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white hover:bg-secondary-50 dark:hover:bg-secondary-700 focus:ring-2 focus:ring-primary-500 focus:border-primary-500 disabled:opacity-50 disabled:cursor-not-allowed flex items-center gap-2"
title="Refresh data"
title={
packageTrendsHost === "all"
? "Trigger system statistics collection"
: "Refresh data"
}
>
<RefreshCw
className={`h-4 w-4 ${packageTrendsFetching ? "animate-spin" : ""}`}
className={`h-4 w-4 ${
packageTrendsFetching || isTriggeringJob
? "animate-spin"
: ""
}`}
/>
Refresh
</button>
@@ -804,7 +847,11 @@ const Dashboard = () => {
{/* Host Selector */}
<select
value={packageTrendsHost}
onChange={(e) => setPackageTrendsHost(e.target.value)}
onChange={(e) => {
setPackageTrendsHost(e.target.value);
// Clear job ID message when host selection changes
setSystemStatsJobId(null);
}}
className="px-3 py-1.5 text-sm border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white focus:ring-2 focus:ring-primary-500 focus:border-primary-500"
>
<option value="all">All Hosts</option>
@@ -823,6 +870,13 @@ const Dashboard = () => {
)}
</select>
</div>
{/* Job ID Message */}
{systemStatsJobId && packageTrendsHost === "all" && (
<p className="text-xs text-secondary-600 dark:text-secondary-400 ml-1">
Ran collection job #{systemStatsJobId}
</p>
)}
</div>
</div>
<div className="h-64 w-full">
@@ -1167,13 +1221,40 @@ const Dashboard = () => {
title: (context) => {
const label = context[0].label;
// Handle "Now" label
if (label === "Now") {
return "Now";
}
// Handle empty or invalid labels
if (!label || typeof label !== "string") {
return "Unknown Date";
}
// Check if it's a full ISO timestamp (for "Last 24 hours")
// Format: "2025-01-15T14:30:00.000Z" or "2025-01-15T14:30:00.000"
if (label.includes("T") && label.includes(":")) {
try {
const date = new Date(label);
// Check if date is valid
if (Number.isNaN(date.getTime())) {
return label; // Return original label if date is invalid
}
// Format full ISO timestamp with date and time
return date.toLocaleDateString("en-US", {
month: "short",
day: "numeric",
hour: "numeric",
minute: "2-digit",
hour12: true,
});
} catch (_error) {
return label; // Return original label if parsing fails
}
}
// Format hourly labels (e.g., "2025-10-07T14" -> "Oct 7, 2:00 PM")
if (label.includes("T")) {
if (label.includes("T") && !label.includes(":")) {
try {
const date = new Date(`${label}:00:00`);
// Check if date is valid
@@ -1233,13 +1314,41 @@ const Dashboard = () => {
callback: function (value, _index, _ticks) {
const label = this.getLabelForValue(value);
// Handle "Now" label
if (label === "Now") {
return "Now";
}
// Handle empty or invalid labels
if (!label || typeof label !== "string") {
return "Unknown";
}
// Check if it's a full ISO timestamp (for "Last 24 hours")
// Format: "2025-01-15T14:30:00.000Z" or "2025-01-15T14:30:00.000"
if (label.includes("T") && label.includes(":")) {
try {
const date = new Date(label);
// Check if date is valid
if (Number.isNaN(date.getTime())) {
return label; // Return original label if date is invalid
}
// Extract hour from full ISO timestamp
const hourNum = date.getHours();
return hourNum === 0
? "12 AM"
: hourNum < 12
? `${hourNum} AM`
: hourNum === 12
? "12 PM"
: `${hourNum - 12} PM`;
} catch (_error) {
return label; // Return original label if parsing fails
}
}
// Format hourly labels (e.g., "2025-10-07T14" -> "2 PM")
if (label.includes("T")) {
if (label.includes("T") && !label.includes(":")) {
try {
const hour = label.split("T")[1];
const hourNum = parseInt(hour, 10);

View File

@@ -281,6 +281,67 @@ const HostDetail = () => {
},
});
// Fetch integration status
const {
data: integrationsData,
isLoading: isLoadingIntegrations,
refetch: refetchIntegrations,
} = useQuery({
queryKey: ["host-integrations", hostId],
queryFn: () =>
adminHostsAPI.getIntegrations(hostId).then((res) => res.data),
staleTime: 30 * 1000, // 30 seconds
refetchOnWindowFocus: false,
enabled: !!hostId && activeTab === "integrations",
});
// Refetch integrations when WebSocket status changes (e.g., after agent restart)
useEffect(() => {
if (
wsStatus?.connected &&
activeTab === "integrations" &&
integrationsData?.data?.connected === false
) {
// Agent just reconnected, refetch integrations to get updated connection status
refetchIntegrations();
}
}, [
wsStatus?.connected,
activeTab,
integrationsData?.data?.connected,
refetchIntegrations,
]);
// Toggle integration mutation
const toggleIntegrationMutation = useMutation({
mutationFn: ({ integrationName, enabled }) =>
adminHostsAPI
.toggleIntegration(hostId, integrationName, enabled)
.then((res) => res.data),
onSuccess: (data) => {
// Optimistically update the cache with the new state
queryClient.setQueryData(["host-integrations", hostId], (oldData) => {
if (!oldData) return oldData;
return {
...oldData,
data: {
...oldData.data,
integrations: {
...oldData.data.integrations,
[data.data.integration]: data.data.enabled,
},
},
};
});
// Also invalidate to ensure we get fresh data
queryClient.invalidateQueries(["host-integrations", hostId]);
},
onError: () => {
// On error, refetch to get the actual state
refetchIntegrations();
},
});
const handleDeleteHost = async () => {
if (
window.confirm(
@@ -666,6 +727,17 @@ const HostDetail = () => {
>
Notes
</button>
<button
type="button"
onClick={() => handleTabChange("integrations")}
className={`px-4 py-2 text-sm font-medium ${
activeTab === "integrations"
? "text-primary-600 dark:text-primary-400 border-b-2 border-primary-500"
: "text-secondary-500 dark:text-secondary-400 hover:text-secondary-700 dark:hover:text-secondary-300"
}`}
>
Integrations
</button>
</div>
<div className="p-4">
@@ -1446,6 +1518,101 @@ const HostDetail = () => {
{/* Agent Queue */}
{activeTab === "queue" && <AgentQueueTab hostId={hostId} />}
{/* Integrations */}
{activeTab === "integrations" && (
<div className="max-w-2xl space-y-4">
{isLoadingIntegrations ? (
<div className="flex items-center justify-center h-32">
<RefreshCw className="h-6 w-6 animate-spin text-primary-600" />
</div>
) : (
<div className="space-y-4">
{/* Docker Integration */}
<div className="bg-secondary-50 dark:bg-secondary-700 rounded-lg p-4 border border-secondary-200 dark:border-secondary-600">
<div className="flex items-start justify-between gap-4">
<div className="flex-1">
<div className="flex items-center gap-3 mb-2">
<Database className="h-5 w-5 text-primary-600 dark:text-primary-400" />
<h4 className="text-sm font-medium text-secondary-900 dark:text-white">
Docker
</h4>
{integrationsData?.data?.integrations?.docker ? (
<span className="inline-flex items-center px-2 py-0.5 rounded text-xs font-semibold bg-green-100 text-green-800 dark:bg-green-900 dark:text-green-200">
Enabled
</span>
) : (
<span className="inline-flex items-center px-2 py-0.5 rounded text-xs font-semibold bg-gray-200 text-gray-600 dark:bg-gray-600 dark:text-gray-400">
Disabled
</span>
)}
</div>
<p className="text-xs text-secondary-600 dark:text-secondary-300">
Monitor Docker containers, images, volumes, and
networks. Collects real-time container status
events.
</p>
</div>
<div className="flex-shrink-0">
<button
type="button"
onClick={() =>
toggleIntegrationMutation.mutate({
integrationName: "docker",
enabled:
!integrationsData?.data?.integrations?.docker,
})
}
disabled={
toggleIntegrationMutation.isPending ||
!wsStatus?.connected
}
title={
!wsStatus?.connected
? "Agent is not connected"
: integrationsData?.data?.integrations?.docker
? "Disable Docker integration"
: "Enable Docker integration"
}
className={`relative inline-flex h-5 w-9 items-center rounded-full transition-colors focus:outline-none focus:ring-2 focus:ring-primary-500 focus:ring-offset-2 ${
integrationsData?.data?.integrations?.docker
? "bg-primary-600 dark:bg-primary-500"
: "bg-secondary-200 dark:bg-secondary-600"
} ${
toggleIntegrationMutation.isPending ||
!integrationsData?.data?.connected
? "opacity-50 cursor-not-allowed"
: ""
}`}
>
<span
className={`inline-block h-3 w-3 transform rounded-full bg-white transition-transform ${
integrationsData?.data?.integrations?.docker
? "translate-x-5"
: "translate-x-1"
}`}
/>
</button>
</div>
</div>
{!wsStatus?.connected && (
<p className="text-xs text-warning-600 dark:text-warning-400 mt-2">
Agent must be connected via WebSocket to toggle
integrations
</p>
)}
{toggleIntegrationMutation.isPending && (
<p className="text-xs text-secondary-600 dark:text-secondary-400 mt-2">
Updating integration...
</p>
)}
</div>
{/* Future integrations can be added here with the same pattern */}
</div>
)}
</div>
)}
</div>
</div>
</div>
@@ -1639,7 +1806,8 @@ const CredentialsModal = ({ host, isOpen, onClose }) => {
>
<option value="amd64">AMD64 (x86_64) - Default</option>
<option value="386">386 (i386) - 32-bit</option>
<option value="arm64">ARM64 (aarch64) - ARM</option>
<option value="arm64">ARM64 (aarch64) - ARM 64-bit</option>
<option value="arm">ARM (armv7l/armv6l) - ARM 32-bit</option>
</select>
<p className="text-xs text-primary-600 dark:text-primary-400 mt-1">
Select the architecture of the target host

View File

@@ -237,8 +237,14 @@ const Repositories = () => {
// Handle special cases
if (sortField === "security") {
aValue = a.isSecure ? "Secure" : "Insecure";
bValue = b.isSecure ? "Secure" : "Insecure";
// Use the same logic as filtering to determine isSecure
const aIsSecure =
a.isSecure !== undefined ? a.isSecure : a.url.startsWith("https://");
const bIsSecure =
b.isSecure !== undefined ? b.isSecure : b.url.startsWith("https://");
// Sort by boolean: true (Secure) comes before false (Insecure) when ascending
aValue = aIsSecure ? 1 : 0;
bValue = bIsSecure ? 1 : 0;
} else if (sortField === "status") {
aValue = a.is_active ? "Active" : "Inactive";
bValue = b.is_active ? "Active" : "Inactive";
@@ -535,12 +541,12 @@ const Repositories = () => {
{visibleColumns.map((column) => (
<th
key={column.id}
className="px-4 py-2 text-center text-xs font-medium text-secondary-500 dark:text-secondary-300 uppercase tracking-wider"
className="px-4 py-2 text-left text-xs font-medium text-secondary-500 dark:text-secondary-300 uppercase tracking-wider"
>
<button
type="button"
onClick={() => handleSort(column.id)}
className="flex items-center gap-1 hover:text-secondary-700 dark:hover:text-secondary-200 transition-colors"
className="flex items-center justify-start gap-1 hover:text-secondary-700 dark:hover:text-secondary-200 transition-colors"
>
{column.label}
{getSortIcon(column.id)}
@@ -559,7 +565,7 @@ const Repositories = () => {
{visibleColumns.map((column) => (
<td
key={column.id}
className="px-4 py-2 whitespace-nowrap text-center"
className="px-4 py-2 whitespace-nowrap text-left"
>
{renderCellContent(column, repo)}
</td>
@@ -622,7 +628,7 @@ const Repositories = () => {
? repo.isSecure
: repo.url.startsWith("https://");
return (
<div className="flex items-center justify-center">
<div className="flex items-center justify-start">
{isSecure ? (
<div className="flex items-center gap-1 text-green-600">
<Lock className="h-4 w-4" />
@@ -651,14 +657,14 @@ const Repositories = () => {
);
case "hostCount":
return (
<div className="flex items-center justify-center gap-1 text-sm text-secondary-900 dark:text-white">
<div className="flex items-center justify-start gap-1 text-sm text-secondary-900 dark:text-white">
<Server className="h-4 w-4" />
<span>{repo.hostCount}</span>
</div>
);
case "actions":
return (
<div className="flex items-center justify-center">
<div className="flex items-center justify-start">
<button
type="button"
onClick={(e) => handleDeleteRepository(repo, e)}

View File

@@ -99,6 +99,8 @@ export const dashboardAPI = {
},
getRecentUsers: () => api.get("/dashboard/recent-users"),
getRecentCollection: () => api.get("/dashboard/recent-collection"),
triggerSystemStatistics: () =>
api.post("/automation/trigger/system-statistics"),
};
// Admin Hosts API (for management interface)
@@ -129,6 +131,11 @@ export const adminHostsAPI = {
api.patch(`/hosts/${hostId}/notes`, {
notes: notes,
}),
getIntegrations: (hostId) => api.get(`/hosts/${hostId}/integrations`),
toggleIntegration: (hostId, integrationName, enabled) =>
api.post(`/hosts/${hostId}/integrations/${integrationName}/toggle`, {
enabled,
}),
};
// Host Groups API

311
package-lock.json generated
View File

@@ -1,19 +1,19 @@
{
"name": "patchmon",
"version": "1.3.1",
"version": "1.3.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "patchmon",
"version": "1.3.1",
"version": "1.3.2",
"license": "AGPL-3.0",
"workspaces": [
"backend",
"frontend"
],
"devDependencies": {
"@biomejs/biome": "^2.3.0",
"@biomejs/biome": "^2.3.4",
"concurrently": "^8.2.2",
"lefthook": "^1.13.4"
},
@@ -23,7 +23,7 @@
},
"backend": {
"name": "patchmon-backend",
"version": "1.3.1",
"version": "1.3.2",
"license": "AGPL-3.0",
"dependencies": {
"@bull-board/api": "^6.13.1",
@@ -36,7 +36,7 @@
"cors": "^2.8.5",
"dotenv": "^16.4.7",
"express": "^4.21.2",
"express-rate-limit": "^7.5.0",
"express-rate-limit": "^8.0.0",
"express-validator": "^7.2.0",
"helmet": "^8.0.0",
"ioredis": "^5.8.1",
@@ -59,7 +59,7 @@
},
"frontend": {
"name": "patchmon-frontend",
"version": "1.3.1",
"version": "1.3.2",
"license": "AGPL-3.0",
"dependencies": {
"@dnd-kit/core": "^6.3.1",
@@ -134,6 +134,7 @@
"version": "7.28.4",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@babel/code-frame": "^7.27.1",
"@babel/generator": "^7.28.3",
@@ -362,7 +363,9 @@
}
},
"node_modules/@biomejs/biome": {
"version": "2.3.0",
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/biome/-/biome-2.3.4.tgz",
"integrity": "sha512-TU08LXjBHdy0mEY9APtEtZdNQQijXUDSXR7IK1i45wgoPD5R0muK7s61QcFir6FpOj/RP1+YkPx5QJlycXUU3w==",
"dev": true,
"license": "MIT OR Apache-2.0",
"bin": {
@@ -376,18 +379,88 @@
"url": "https://opencollective.com/biome"
},
"optionalDependencies": {
"@biomejs/cli-darwin-arm64": "2.3.0",
"@biomejs/cli-darwin-x64": "2.3.0",
"@biomejs/cli-linux-arm64": "2.3.0",
"@biomejs/cli-linux-arm64-musl": "2.3.0",
"@biomejs/cli-linux-x64": "2.3.0",
"@biomejs/cli-linux-x64-musl": "2.3.0",
"@biomejs/cli-win32-arm64": "2.3.0",
"@biomejs/cli-win32-x64": "2.3.0"
"@biomejs/cli-darwin-arm64": "2.3.4",
"@biomejs/cli-darwin-x64": "2.3.4",
"@biomejs/cli-linux-arm64": "2.3.4",
"@biomejs/cli-linux-arm64-musl": "2.3.4",
"@biomejs/cli-linux-x64": "2.3.4",
"@biomejs/cli-linux-x64-musl": "2.3.4",
"@biomejs/cli-win32-arm64": "2.3.4",
"@biomejs/cli-win32-x64": "2.3.4"
}
},
"node_modules/@biomejs/cli-darwin-arm64": {
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/cli-darwin-arm64/-/cli-darwin-arm64-2.3.4.tgz",
"integrity": "sha512-w40GvlNzLaqmuWYiDU6Ys9FNhJiclngKqcGld3iJIiy2bpJ0Q+8n3haiaC81uTPY/NA0d8Q/I3Z9+ajc14102Q==",
"cpu": [
"arm64"
],
"dev": true,
"license": "MIT OR Apache-2.0",
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": ">=14.21.3"
}
},
"node_modules/@biomejs/cli-darwin-x64": {
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/cli-darwin-x64/-/cli-darwin-x64-2.3.4.tgz",
"integrity": "sha512-3s7TLVtjJ7ni1xADXsS7x7GMUrLBZXg8SemXc3T0XLslzvqKj/dq1xGeBQ+pOWQzng9MaozfacIHdK2UlJ3jGA==",
"cpu": [
"x64"
],
"dev": true,
"license": "MIT OR Apache-2.0",
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": ">=14.21.3"
}
},
"node_modules/@biomejs/cli-linux-arm64": {
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/cli-linux-arm64/-/cli-linux-arm64-2.3.4.tgz",
"integrity": "sha512-y7efHyyM2gYmHy/AdWEip+VgTMe9973aP7XYKPzu/j8JxnPHuSUXftzmPhkVw0lfm4ECGbdBdGD6+rLmTgNZaA==",
"cpu": [
"arm64"
],
"dev": true,
"license": "MIT OR Apache-2.0",
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=14.21.3"
}
},
"node_modules/@biomejs/cli-linux-arm64-musl": {
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/cli-linux-arm64-musl/-/cli-linux-arm64-musl-2.3.4.tgz",
"integrity": "sha512-IruVGQRwMURivWazchiq7gKAqZSFs5so6gi0hJyxk7x6HR+iwZbO2IxNOqyLURBvL06qkIHs7Wffl6Bw30vCbQ==",
"cpu": [
"arm64"
],
"dev": true,
"license": "MIT OR Apache-2.0",
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=14.21.3"
}
},
"node_modules/@biomejs/cli-linux-x64": {
"version": "2.3.0",
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/cli-linux-x64/-/cli-linux-x64-2.3.4.tgz",
"integrity": "sha512-gKfjWR/6/dfIxPJCw8REdEowiXCkIpl9jycpNVHux8aX2yhWPLjydOshkDL6Y/82PcQJHn95VCj7J+BRcE5o1Q==",
"cpu": [
"x64"
],
@@ -401,6 +474,57 @@
"node": ">=14.21.3"
}
},
"node_modules/@biomejs/cli-linux-x64-musl": {
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/cli-linux-x64-musl/-/cli-linux-x64-musl-2.3.4.tgz",
"integrity": "sha512-mzKFFv/w66e4/jCobFmD3kymCqG+FuWE7sVa4Yjqd9v7qt2UhXo67MSZKY9Ih18V2IwPzRKQPCw6KwdZs6AXSA==",
"cpu": [
"x64"
],
"dev": true,
"license": "MIT OR Apache-2.0",
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">=14.21.3"
}
},
"node_modules/@biomejs/cli-win32-arm64": {
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/cli-win32-arm64/-/cli-win32-arm64-2.3.4.tgz",
"integrity": "sha512-5TJ6JfVez+yyupJ/iGUici2wzKf0RrSAxJhghQXtAEsc67OIpdwSKAQboemILrwKfHDi5s6mu7mX+VTCTUydkw==",
"cpu": [
"arm64"
],
"dev": true,
"license": "MIT OR Apache-2.0",
"optional": true,
"os": [
"win32"
],
"engines": {
"node": ">=14.21.3"
}
},
"node_modules/@biomejs/cli-win32-x64": {
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/@biomejs/cli-win32-x64/-/cli-win32-x64-2.3.4.tgz",
"integrity": "sha512-FGCijXecmC4IedQ0esdYNlMpx0Jxgf4zceCaMu6fkjWyjgn50ZQtMiqZZQ0Q/77yqPxvtkgZAvt5uGw0gAAjig==",
"cpu": [
"x64"
],
"dev": true,
"license": "MIT OR Apache-2.0",
"optional": true,
"os": [
"win32"
],
"engines": {
"node": ">=14.21.3"
}
},
"node_modules/@bull-board/api": {
"version": "6.13.1",
"license": "MIT",
@@ -424,6 +548,7 @@
"node_modules/@bull-board/ui": {
"version": "6.13.1",
"license": "MIT",
"peer": true,
"dependencies": {
"@bull-board/api": "6.13.1"
}
@@ -457,6 +582,7 @@
"node_modules/@dnd-kit/core": {
"version": "6.3.1",
"license": "MIT",
"peer": true,
"dependencies": {
"@dnd-kit/accessibility": "^3.1.1",
"@dnd-kit/utilities": "^3.2.2",
@@ -897,6 +1023,7 @@
"version": "18.3.24",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@types/prop-types": "*",
"csstype": "^3.0.2"
@@ -1144,6 +1271,7 @@
}
],
"license": "MIT",
"peer": true,
"dependencies": {
"baseline-browser-mapping": "^2.8.3",
"caniuse-lite": "^1.0.30001741",
@@ -1333,6 +1461,7 @@
"node_modules/chart.js": {
"version": "4.5.0",
"license": "MIT",
"peer": true,
"dependencies": {
"@kurkle/color": "^0.3.0"
},
@@ -1907,6 +2036,7 @@
"node_modules/express": {
"version": "4.21.2",
"license": "MIT",
"peer": true,
"dependencies": {
"accepts": "~1.3.8",
"array-flatten": "1.1.1",
@@ -1949,8 +2079,13 @@
}
},
"node_modules/express-rate-limit": {
"version": "7.5.1",
"version": "8.2.1",
"resolved": "https://registry.npmjs.org/express-rate-limit/-/express-rate-limit-8.2.1.tgz",
"integrity": "sha512-PCZEIEIxqwhzw4KF0n7QF4QqruVTcF73O5kFKUnGOyjbCCgizBBiFaYpd/fnBLUMPw/BWw9OsiN7GgrNYr7j6g==",
"license": "MIT",
"dependencies": {
"ip-address": "10.0.1"
},
"engines": {
"node": ">= 16"
},
@@ -2448,6 +2583,15 @@
"url": "https://opencollective.com/ioredis"
}
},
"node_modules/ip-address": {
"version": "10.0.1",
"resolved": "https://registry.npmjs.org/ip-address/-/ip-address-10.0.1.tgz",
"integrity": "sha512-NWv9YLW4PoW2B7xtzaS3NCot75m6nK7Icdv0o3lfMceJVRfSoQwqD4wEH5rLwoKJwUiZ/rfpiVBhnaF0FK4HoA==",
"license": "MIT",
"engines": {
"node": ">= 12"
}
},
"node_modules/ipaddr.js": {
"version": "1.9.1",
"license": "MIT",
@@ -2672,6 +2816,76 @@
"lefthook-windows-x64": "1.13.5"
}
},
"node_modules/lefthook-darwin-arm64": {
"version": "1.13.5",
"resolved": "https://registry.npmjs.org/lefthook-darwin-arm64/-/lefthook-darwin-arm64-1.13.5.tgz",
"integrity": "sha512-BYt5CnAOXasVCS6i+A4ljUo9xru/B5uMFD6EWHhs3R26jGF7mBSDxM3ErzXTUaJRTP0kQI/XBmgqBryBqoqZOQ==",
"cpu": [
"arm64"
],
"dev": true,
"license": "MIT",
"optional": true,
"os": [
"darwin"
]
},
"node_modules/lefthook-darwin-x64": {
"version": "1.13.5",
"resolved": "https://registry.npmjs.org/lefthook-darwin-x64/-/lefthook-darwin-x64-1.13.5.tgz",
"integrity": "sha512-ZDtLBzvI5e26C/RZ4irOHpELTd22x9lDTgF2+eCYcnrBWOkB7800V8tuAvBybsLGvg6JwKjFxn+NTRNZnCC2hw==",
"cpu": [
"x64"
],
"dev": true,
"license": "MIT",
"optional": true,
"os": [
"darwin"
]
},
"node_modules/lefthook-freebsd-arm64": {
"version": "1.13.5",
"resolved": "https://registry.npmjs.org/lefthook-freebsd-arm64/-/lefthook-freebsd-arm64-1.13.5.tgz",
"integrity": "sha512-uQ/kQZSSedw74aGCpsfOPN4yVt3klg8grOP6gHQOCRUMv5oK/Lj3pe1PylpTuuhxWORWRzkauPMot26J0OZZdA==",
"cpu": [
"arm64"
],
"dev": true,
"license": "MIT",
"optional": true,
"os": [
"freebsd"
]
},
"node_modules/lefthook-freebsd-x64": {
"version": "1.13.5",
"resolved": "https://registry.npmjs.org/lefthook-freebsd-x64/-/lefthook-freebsd-x64-1.13.5.tgz",
"integrity": "sha512-6czek8XagVrI7ExURawkfrfX40Qjc/wktc8bLq/iXfRlmdvKDMrx2FrA82mDfEVCAEz+tTvkteK1TfR3icYF3Q==",
"cpu": [
"x64"
],
"dev": true,
"license": "MIT",
"optional": true,
"os": [
"freebsd"
]
},
"node_modules/lefthook-linux-arm64": {
"version": "1.13.5",
"resolved": "https://registry.npmjs.org/lefthook-linux-arm64/-/lefthook-linux-arm64-1.13.5.tgz",
"integrity": "sha512-MjWtiuW1br+rpTtgG1KGV53mSGtL5MWQwgafYzrFleJ89fKb86F4TD/4mVNzk5thmZ+HVPZw9bRZGUHFBnNJWg==",
"cpu": [
"arm64"
],
"dev": true,
"license": "MIT",
"optional": true,
"os": [
"linux"
]
},
"node_modules/lefthook-linux-x64": {
"version": "1.13.5",
"cpu": [
@@ -2684,6 +2898,62 @@
"linux"
]
},
"node_modules/lefthook-openbsd-arm64": {
"version": "1.13.5",
"resolved": "https://registry.npmjs.org/lefthook-openbsd-arm64/-/lefthook-openbsd-arm64-1.13.5.tgz",
"integrity": "sha512-lYXrWf0/hBrwtG8ceaHq886bcqRKh3Lfv+jZJs+ykMLB6L/kaqk8tA4V2NHWydQ5h56o45ugs/580nMz36ZdRg==",
"cpu": [
"arm64"
],
"dev": true,
"license": "MIT",
"optional": true,
"os": [
"openbsd"
]
},
"node_modules/lefthook-openbsd-x64": {
"version": "1.13.5",
"resolved": "https://registry.npmjs.org/lefthook-openbsd-x64/-/lefthook-openbsd-x64-1.13.5.tgz",
"integrity": "sha512-Ba1JrsRbfan4WKd8Q7gUhTxCUuppXzirDObd3JxpLRSLxA47yxhjMv7KByDunRDTvzTgsXoykZI6mPupkc1JiQ==",
"cpu": [
"x64"
],
"dev": true,
"license": "MIT",
"optional": true,
"os": [
"openbsd"
]
},
"node_modules/lefthook-windows-arm64": {
"version": "1.13.5",
"resolved": "https://registry.npmjs.org/lefthook-windows-arm64/-/lefthook-windows-arm64-1.13.5.tgz",
"integrity": "sha512-Y/CpmEIb0hlFe+kTT/efWgX6+/gUTp5NItTF+gmUrY1/G/bTLIxdIRS7WpodVM0MEN24sOrQVTSi9DN9FvGoGg==",
"cpu": [
"arm64"
],
"dev": true,
"license": "MIT",
"optional": true,
"os": [
"win32"
]
},
"node_modules/lefthook-windows-x64": {
"version": "1.13.5",
"resolved": "https://registry.npmjs.org/lefthook-windows-x64/-/lefthook-windows-x64-1.13.5.tgz",
"integrity": "sha512-WJBqGNBlFJnunRwy12QyaDHdGULtostPqpYSZSS4boFJDY0lP5qtz9lAGmJ49aA5GQ19jrnDjGLwVPFiwIqksQ==",
"cpu": [
"x64"
],
"dev": true,
"license": "MIT",
"optional": true,
"os": [
"win32"
]
},
"node_modules/lilconfig": {
"version": "3.1.3",
"dev": true,
@@ -3296,6 +3566,7 @@
}
],
"license": "MIT",
"peer": true,
"dependencies": {
"nanoid": "^3.3.11",
"picocolors": "^1.1.1",
@@ -3425,6 +3696,7 @@
"devOptional": true,
"hasInstallScript": true,
"license": "Apache-2.0",
"peer": true,
"dependencies": {
"@prisma/config": "6.16.2",
"@prisma/engines": "6.16.2"
@@ -3614,6 +3886,7 @@
"node_modules/react": {
"version": "18.3.1",
"license": "MIT",
"peer": true,
"dependencies": {
"loose-envify": "^1.1.0"
},
@@ -3632,6 +3905,7 @@
"node_modules/react-dom": {
"version": "18.3.1",
"license": "MIT",
"peer": true,
"dependencies": {
"loose-envify": "^1.1.0",
"scheduler": "^0.23.2"
@@ -4349,6 +4623,7 @@
"version": "4.0.3",
"dev": true,
"license": "MIT",
"peer": true,
"engines": {
"node": ">=12"
},
@@ -4501,6 +4776,7 @@
"version": "7.1.7",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"esbuild": "^0.25.0",
"fdir": "^6.5.0",
@@ -4590,6 +4866,7 @@
"version": "4.0.3",
"dev": true,
"license": "MIT",
"peer": true,
"engines": {
"node": ">=12"
},

View File

@@ -25,7 +25,7 @@
"lint:fix": "biome check --write ."
},
"devDependencies": {
"@biomejs/biome": "^2.3.0",
"@biomejs/biome": "^2.3.4",
"concurrently": "^8.2.2",
"lefthook": "^1.13.4"
},

View File

@@ -66,27 +66,27 @@ SELECTED_SERVICE_NAME=""
# Functions
print_status() {
echo -e "${GREEN}$1${NC}"
printf "${GREEN}%s${NC}\n" "$1"
}
print_info() {
echo -e "${BLUE} $1${NC}"
printf "${BLUE}%s${NC}\n" "$1"
}
print_error() {
echo -e "${RED}$1${NC}"
printf "${RED}%s${NC}\n" "$1"
}
print_warning() {
echo -e "${YELLOW}⚠️ $1${NC}"
printf "${YELLOW}%s${NC}\n" "$1"
}
print_question() {
echo -e "${BLUE}$1${NC}"
printf "${BLUE}%s${NC}\n" "$1"
}
print_success() {
echo -e "${GREEN}🎉 $1${NC}"
printf "${GREEN}%s${NC}\n" "$1"
}
# Interactive input functions
@@ -443,7 +443,7 @@ generate_redis_password() {
# Find next available Redis database
find_next_redis_db() {
print_info "Finding next available Redis database..."
print_info "Finding next available Redis database..." >&2
# Start from database 0 and keep checking until we find an empty one
local db_num=0
@@ -463,11 +463,11 @@ find_next_redis_db() {
# Try to load admin credentials if ACL file exists
if [ -f /etc/redis/users.acl ] && grep -q "^user admin" /etc/redis/users.acl; then
# Redis is configured with ACL - try to extract admin password
print_info "Redis requires authentication, attempting with admin credentials..."
print_info "Redis requires authentication, attempting with admin credentials..." >&2
# For multi-instance setups, we can't know the admin password yet
# So we'll just use database 0 as default
print_info "Using database 0 (Redis ACL already configured)"
print_info "Using database 0 (Redis ACL already configured)" >&2
echo "0"
return 0
fi
@@ -484,7 +484,7 @@ find_next_redis_db() {
# Check for authentication errors
if echo "$redis_output" | grep -q "NOAUTH\|WRONGPASS"; then
# If we hit auth errors and haven't configured yet, use database 0
print_info "Redis requires authentication, defaulting to database 0"
print_info "Redis requires authentication, defaulting to database 0" >&2
echo "0"
return 0
fi
@@ -492,10 +492,10 @@ find_next_redis_db() {
# Check for other errors
if echo "$redis_output" | grep -q "ERR"; then
if echo "$redis_output" | grep -q "invalid DB index"; then
print_warning "Reached maximum database limit at database $db_num"
print_warning "Reached maximum database limit at database $db_num" >&2
break
else
print_error "Error checking database $db_num: $redis_output"
print_error "Error checking database $db_num: $redis_output" >&2
return 1
fi
fi
@@ -504,17 +504,17 @@ find_next_redis_db() {
# If database is empty, use it
if [ "$key_count" = "0" ] || [ "$key_count" = "(integer) 0" ]; then
print_status "Found available Redis database: $db_num (empty)"
print_status "Found available Redis database: $db_num (empty)" >&2
echo "$db_num"
return 0
fi
print_info "Database $db_num has $key_count keys, checking next..."
print_info "Database $db_num has $key_count keys, checking next..." >&2
db_num=$((db_num + 1))
done
print_warning "No available Redis databases found (checked 0-$max_attempts)"
print_info "Using database 0 (may have existing data)"
print_warning "No available Redis databases found (checked 0-$max_attempts)" >&2
print_info "Using database 0 (may have existing data)" >&2
echo "0"
return 0
}
@@ -1470,6 +1470,7 @@ EOF
cat > "$config_file" << EOF
server {
listen 80;
listen [::]:80;
server_name $fqdn;
# Security headers
@@ -1657,7 +1658,7 @@ start_services() {
local logs=$(journalctl -u "$SERVICE_NAME" -n 50 --no-pager 2>/dev/null || echo "")
if echo "$logs" | grep -q "WRONGPASS\|NOAUTH"; then
print_error "Detected Redis authentication error!"
print_error "Detected Redis authentication error!"
print_info "The service cannot authenticate with Redis."
echo ""
print_info "Current Redis configuration in .env:"
@@ -1681,18 +1682,18 @@ start_services() {
print_info " cat /etc/redis/users.acl"
echo ""
elif echo "$logs" | grep -q "ECONNREFUSED.*postgresql\|Connection refused.*5432"; then
print_error "Detected PostgreSQL connection error!"
print_error "Detected PostgreSQL connection error!"
print_info "Check if PostgreSQL is running:"
print_info " systemctl status postgresql"
elif echo "$logs" | grep -q "ECONNREFUSED.*redis\|Connection refused.*6379"; then
print_error "Detected Redis connection error!"
print_error "Detected Redis connection error!"
print_info "Check if Redis is running:"
print_info " systemctl status redis-server"
elif echo "$logs" | grep -q "database.*does not exist"; then
print_error "Database does not exist!"
print_error "Database does not exist!"
print_info "Database: $DB_NAME"
elif echo "$logs" | grep -q "Error:"; then
print_error "Application error detected in logs"
print_error "Application error detected in logs"
fi
echo ""
@@ -1741,9 +1742,9 @@ async function updateSettings() {
});
}
console.log('Database settings updated successfully');
console.log('Database settings updated successfully');
} catch (error) {
console.error('Error updating settings:', error.message);
console.error('Error updating settings:', error.message);
process.exit(1);
} finally {
await prisma.\$disconnect();
@@ -1867,7 +1868,7 @@ EOF
if [ -f "$SUMMARY_FILE" ]; then
print_status "Deployment summary appended to: $SUMMARY_FILE"
else
print_error "⚠️ Failed to append to deployment-info.txt file"
print_error "Failed to append to deployment-info.txt file"
return 1
fi
}
@@ -1949,7 +1950,7 @@ EOF
print_status "Deployment information saved to: $INFO_FILE"
print_info "File details: $(ls -lh "$INFO_FILE" | awk '{print $5, $9}')"
else
print_error "⚠️ Failed to create deployment-info.txt file"
print_error "Failed to create deployment-info.txt file"
return 1
fi
}
@@ -2142,7 +2143,7 @@ deploy_instance() {
log_message "Backend port: $BACKEND_PORT"
log_message "SSL enabled: $USE_LETSENCRYPT"
print_status "🎉 PatchMon instance deployed successfully!"
print_status "PatchMon instance deployed successfully!"
echo ""
print_info "Next steps:"
echo " • Visit your URL: $SERVER_PROTOCOL_SEL://$FQDN (ensure DNS is configured)"
@@ -3236,7 +3237,7 @@ update_installation() {
sleep 5
if systemctl is-active --quiet "$service_name"; then
print_success "Update completed successfully!"
print_success "Update completed successfully!"
print_status "Service $service_name is running"
# Get new version
@@ -3264,7 +3265,7 @@ update_installation() {
local logs=$(journalctl -u "$service_name" -n 50 --no-pager 2>/dev/null || echo "")
if echo "$logs" | grep -q "WRONGPASS\|NOAUTH"; then
print_error "Detected Redis authentication error!"
print_error "Detected Redis authentication error!"
print_info "The service cannot authenticate with Redis."
echo ""
print_info "Current Redis configuration in .env:"
@@ -3281,12 +3282,12 @@ update_installation() {
print_info " redis-cli --user $test_user --pass $test_pass -n ${test_db:-0} ping"
echo ""
elif echo "$logs" | grep -q "ECONNREFUSED"; then
print_error "Detected connection refused error!"
print_error "Detected connection refused error!"
print_info "Check if required services are running:"
print_info " systemctl status postgresql"
print_info " systemctl status redis-server"
elif echo "$logs" | grep -q "Error:"; then
print_error "Application error detected in logs"
print_error "Application error detected in logs"
fi
echo ""
@@ -3319,7 +3320,7 @@ main() {
# Handle update mode
if [ "$UPDATE_MODE" = "true" ]; then
print_banner
print_info "🔄 PatchMon Update Mode"
print_info "PatchMon Update Mode"
echo ""
# Select installation to update
@@ -3335,7 +3336,7 @@ main() {
# Check if existing installations are present
local existing_installs=($(detect_installations))
if [ ${#existing_installs[@]} -gt 0 ]; then
print_warning "⚠️ Found ${#existing_installs[@]} existing PatchMon installation(s):"
print_warning "Found ${#existing_installs[@]} existing PatchMon installation(s):"
for install in "${existing_installs[@]}"; do
print_info " - $install"
done