diff --git a/backend/env.example b/backend/env.example index 5a9a6a7..e5a0eab 100644 --- a/backend/env.example +++ b/backend/env.example @@ -3,6 +3,13 @@ DATABASE_URL="postgresql://patchmon_user:your-password-here@localhost:5432/patch PM_DB_CONN_MAX_ATTEMPTS=30 PM_DB_CONN_WAIT_INTERVAL=2 +# Database Connection Pool Configuration (Prisma) +DB_CONNECTION_LIMIT=30 # Maximum connections per instance (default: 30) +DB_POOL_TIMEOUT=20 # Seconds to wait for available connection (default: 20) +DB_CONNECT_TIMEOUT=10 # Seconds to wait for initial connection (default: 10) +DB_IDLE_TIMEOUT=300 # Seconds before closing idle connections (default: 300) +DB_MAX_LIFETIME=1800 # Maximum lifetime of a connection in seconds (default: 1800) + # JWT Configuration JWT_SECRET=your-secure-random-secret-key-change-this-in-production JWT_EXPIRES_IN=1h diff --git a/backend/src/config/prisma.js b/backend/src/config/prisma.js index 6908c2a..3a71075 100644 --- a/backend/src/config/prisma.js +++ b/backend/src/config/prisma.js @@ -16,12 +16,28 @@ function getOptimizedDatabaseUrl() { // Parse the URL const url = new URL(originalUrl); - // Add connection pooling parameters for multiple instances - url.searchParams.set("connection_limit", "5"); // Reduced from default 10 - url.searchParams.set("pool_timeout", "10"); // 10 seconds - url.searchParams.set("connect_timeout", "10"); // 10 seconds - url.searchParams.set("idle_timeout", "300"); // 5 minutes - url.searchParams.set("max_lifetime", "1800"); // 30 minutes + // Add connection pooling parameters - configurable via environment variables + const connectionLimit = process.env.DB_CONNECTION_LIMIT || "30"; + const poolTimeout = process.env.DB_POOL_TIMEOUT || "20"; + const connectTimeout = process.env.DB_CONNECT_TIMEOUT || "10"; + const idleTimeout = process.env.DB_IDLE_TIMEOUT || "300"; + const maxLifetime = process.env.DB_MAX_LIFETIME || "1800"; + + url.searchParams.set("connection_limit", connectionLimit); + url.searchParams.set("pool_timeout", poolTimeout); + url.searchParams.set("connect_timeout", connectTimeout); + url.searchParams.set("idle_timeout", idleTimeout); + url.searchParams.set("max_lifetime", maxLifetime); + + // Log connection pool settings in development/debug mode + if ( + process.env.ENABLE_LOGGING === "true" || + process.env.LOG_LEVEL === "debug" + ) { + console.log( + `[Database Pool] connection_limit=${connectionLimit}, pool_timeout=${poolTimeout}s, connect_timeout=${connectTimeout}s`, + ); + } return url.toString(); } diff --git a/backend/src/routes/automationRoutes.js b/backend/src/routes/automationRoutes.js index f216aff..9d9dad9 100644 --- a/backend/src/routes/automationRoutes.js +++ b/backend/src/routes/automationRoutes.js @@ -218,6 +218,30 @@ router.post( }, ); +// Trigger manual Docker inventory cleanup +router.post( + "/trigger/docker-inventory-cleanup", + authenticateToken, + async (_req, res) => { + try { + const job = await queueManager.triggerDockerInventoryCleanup(); + res.json({ + success: true, + data: { + jobId: job.id, + message: "Docker inventory cleanup triggered successfully", + }, + }); + } catch (error) { + console.error("Error triggering Docker inventory cleanup:", error); + res.status(500).json({ + success: false, + error: "Failed to trigger Docker inventory cleanup", + }); + } + }, +); + // Get queue health status router.get("/health", authenticateToken, async (_req, res) => { try { @@ -274,6 +298,7 @@ router.get("/overview", authenticateToken, async (_req, res) => { queueManager.getRecentJobs(QUEUE_NAMES.SESSION_CLEANUP, 1), queueManager.getRecentJobs(QUEUE_NAMES.ORPHANED_REPO_CLEANUP, 1), queueManager.getRecentJobs(QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP, 1), + queueManager.getRecentJobs(QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP, 1), queueManager.getRecentJobs(QUEUE_NAMES.AGENT_COMMANDS, 1), ]); @@ -283,19 +308,22 @@ router.get("/overview", authenticateToken, async (_req, res) => { stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].delayed + stats[QUEUE_NAMES.SESSION_CLEANUP].delayed + stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].delayed + - stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].delayed, + stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].delayed + + stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].delayed, runningTasks: stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].active + stats[QUEUE_NAMES.SESSION_CLEANUP].active + stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].active + - stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].active, + stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].active + + stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].active, failedTasks: stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].failed + stats[QUEUE_NAMES.SESSION_CLEANUP].failed + stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].failed + - stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].failed, + stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].failed + + stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].failed, totalAutomations: Object.values(stats).reduce((sum, queueStats) => { return ( @@ -375,10 +403,11 @@ router.get("/overview", authenticateToken, async (_req, res) => { stats: stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP], }, { - name: "Collect Host Statistics", - queue: QUEUE_NAMES.AGENT_COMMANDS, - description: "Collects package statistics from connected agents only", - schedule: `Every ${settings.update_interval} minutes (Agent-driven)`, + name: "Docker Inventory Cleanup", + queue: QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP, + description: + "Removes Docker containers and images for non-existent hosts", + schedule: "Daily at 4 AM", lastRun: recentJobs[4][0]?.finishedOn ? new Date(recentJobs[4][0].finishedOn).toLocaleString() : "Never", @@ -388,6 +417,22 @@ router.get("/overview", authenticateToken, async (_req, res) => { : recentJobs[4][0] ? "Success" : "Never run", + stats: stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP], + }, + { + name: "Collect Host Statistics", + queue: QUEUE_NAMES.AGENT_COMMANDS, + description: "Collects package statistics from connected agents only", + schedule: `Every ${settings.update_interval} minutes (Agent-driven)`, + lastRun: recentJobs[5][0]?.finishedOn + ? new Date(recentJobs[5][0].finishedOn).toLocaleString() + : "Never", + lastRunTimestamp: recentJobs[5][0]?.finishedOn || 0, + status: recentJobs[5][0]?.failedReason + ? "Failed" + : recentJobs[5][0] + ? "Success" + : "Never run", stats: stats[QUEUE_NAMES.AGENT_COMMANDS], }, ].sort((a, b) => { diff --git a/backend/src/routes/dockerRoutes.js b/backend/src/routes/dockerRoutes.js index 271e33a..5c60bbf 100644 --- a/backend/src/routes/dockerRoutes.js +++ b/backend/src/routes/dockerRoutes.js @@ -522,7 +522,8 @@ router.get("/updates", authenticateToken, async (req, res) => { } }); -// POST /api/v1/docker/collect - Collect Docker data from agent +// POST /api/v1/docker/collect - Collect Docker data from agent (DEPRECATED - kept for backward compatibility) +// New agents should use POST /api/v1/integrations/docker router.post("/collect", async (req, res) => { try { const { apiId, apiKey, containers, images, updates } = req.body; @@ -745,6 +746,322 @@ router.post("/collect", async (req, res) => { } }); +// POST /api/v1/integrations/docker - New integration endpoint for Docker data collection +router.post("/../integrations/docker", async (req, res) => { + try { + const apiId = req.headers["x-api-id"]; + const apiKey = req.headers["x-api-key"]; + const { + containers, + images, + updates, + daemon_info, + hostname, + machine_id, + agent_version, + } = req.body; + + console.log( + `[Docker Integration] Received data from ${hostname || machine_id}`, + ); + + // Validate API credentials + const host = await prisma.hosts.findFirst({ + where: { api_id: apiId, api_key: apiKey }, + }); + + if (!host) { + console.warn("[Docker Integration] Invalid API credentials"); + return res.status(401).json({ error: "Invalid API credentials" }); + } + + console.log( + `[Docker Integration] Processing for host: ${host.friendly_name}`, + ); + + const now = new Date(); + + // Helper function to validate and parse dates + const parseDate = (dateString) => { + if (!dateString) return now; + const date = new Date(dateString); + return Number.isNaN(date.getTime()) ? now : date; + }; + + let containersProcessed = 0; + let imagesProcessed = 0; + let updatesProcessed = 0; + + // Process containers + if (containers && Array.isArray(containers)) { + console.log( + `[Docker Integration] Processing ${containers.length} containers`, + ); + for (const containerData of containers) { + const containerId = uuidv4(); + + // Find or create image + let imageId = null; + if (containerData.image_repository && containerData.image_tag) { + const image = await prisma.docker_images.upsert({ + where: { + repository_tag_image_id: { + repository: containerData.image_repository, + tag: containerData.image_tag, + image_id: containerData.image_id || "unknown", + }, + }, + update: { + last_checked: now, + updated_at: now, + }, + create: { + id: uuidv4(), + repository: containerData.image_repository, + tag: containerData.image_tag, + image_id: containerData.image_id || "unknown", + source: containerData.image_source || "docker-hub", + created_at: parseDate(containerData.created_at), + updated_at: now, + }, + }); + imageId = image.id; + } + + // Upsert container + await prisma.docker_containers.upsert({ + where: { + host_id_container_id: { + host_id: host.id, + container_id: containerData.container_id, + }, + }, + update: { + name: containerData.name, + image_id: imageId, + image_name: containerData.image_name, + image_tag: containerData.image_tag || "latest", + status: containerData.status, + state: containerData.state || containerData.status, + ports: containerData.ports || null, + started_at: containerData.started_at + ? parseDate(containerData.started_at) + : null, + updated_at: now, + last_checked: now, + }, + create: { + id: containerId, + host_id: host.id, + container_id: containerData.container_id, + name: containerData.name, + image_id: imageId, + image_name: containerData.image_name, + image_tag: containerData.image_tag || "latest", + status: containerData.status, + state: containerData.state || containerData.status, + ports: containerData.ports || null, + created_at: parseDate(containerData.created_at), + started_at: containerData.started_at + ? parseDate(containerData.started_at) + : null, + updated_at: now, + }, + }); + containersProcessed++; + } + } + + // Process standalone images + if (images && Array.isArray(images)) { + console.log(`[Docker Integration] Processing ${images.length} images`); + for (const imageData of images) { + await prisma.docker_images.upsert({ + where: { + repository_tag_image_id: { + repository: imageData.repository, + tag: imageData.tag, + image_id: imageData.image_id, + }, + }, + update: { + size_bytes: imageData.size_bytes + ? BigInt(imageData.size_bytes) + : null, + digest: imageData.digest || null, + last_checked: now, + updated_at: now, + }, + create: { + id: uuidv4(), + repository: imageData.repository, + tag: imageData.tag, + image_id: imageData.image_id, + digest: imageData.digest, + size_bytes: imageData.size_bytes + ? BigInt(imageData.size_bytes) + : null, + source: imageData.source || "docker-hub", + created_at: parseDate(imageData.created_at), + updated_at: now, + }, + }); + imagesProcessed++; + } + } + + // Process updates + if (updates && Array.isArray(updates)) { + console.log(`[Docker Integration] Processing ${updates.length} updates`); + for (const updateData of updates) { + // Find the image by repository and image_id + const image = await prisma.docker_images.findFirst({ + where: { + repository: updateData.repository, + tag: updateData.current_tag, + image_id: updateData.image_id, + }, + }); + + if (image) { + // Store digest info in changelog_url field as JSON + const digestInfo = JSON.stringify({ + method: "digest_comparison", + current_digest: updateData.current_digest, + available_digest: updateData.available_digest, + }); + + // Upsert the update record + await prisma.docker_image_updates.upsert({ + where: { + image_id_available_tag: { + image_id: image.id, + available_tag: updateData.available_tag, + }, + }, + update: { + updated_at: now, + changelog_url: digestInfo, + severity: "digest_changed", + }, + create: { + id: uuidv4(), + image_id: image.id, + current_tag: updateData.current_tag, + available_tag: updateData.available_tag, + severity: "digest_changed", + changelog_url: digestInfo, + updated_at: now, + }, + }); + updatesProcessed++; + } + } + } + + console.log( + `[Docker Integration] Successfully processed: ${containersProcessed} containers, ${imagesProcessed} images, ${updatesProcessed} updates`, + ); + + res.json({ + message: "Docker data collected successfully", + containers_received: containersProcessed, + images_received: imagesProcessed, + updates_found: updatesProcessed, + }); + } catch (error) { + console.error("[Docker Integration] Error collecting Docker data:", error); + console.error("[Docker Integration] Error stack:", error.stack); + res.status(500).json({ + error: "Failed to collect Docker data", + message: error.message, + details: process.env.NODE_ENV === "development" ? error.stack : undefined, + }); + } +}); + +// DELETE /api/v1/docker/containers/:id - Delete a container +router.delete("/containers/:id", authenticateToken, async (req, res) => { + try { + const { id } = req.params; + + // Check if container exists + const container = await prisma.docker_containers.findUnique({ + where: { id }, + }); + + if (!container) { + return res.status(404).json({ error: "Container not found" }); + } + + // Delete the container + await prisma.docker_containers.delete({ + where: { id }, + }); + + console.log(`๐Ÿ—‘๏ธ Deleted container: ${container.name} (${id})`); + + res.json({ + success: true, + message: `Container ${container.name} deleted successfully`, + }); + } catch (error) { + console.error("Error deleting container:", error); + res.status(500).json({ error: "Failed to delete container" }); + } +}); + +// DELETE /api/v1/docker/images/:id - Delete an image +router.delete("/images/:id", authenticateToken, async (req, res) => { + try { + const { id } = req.params; + + // Check if image exists + const image = await prisma.docker_images.findUnique({ + where: { id }, + include: { + _count: { + select: { + docker_containers: true, + }, + }, + }, + }); + + if (!image) { + return res.status(404).json({ error: "Image not found" }); + } + + // Check if image is in use by containers + if (image._count.docker_containers > 0) { + return res.status(400).json({ + error: `Cannot delete image: ${image._count.docker_containers} container(s) are using this image`, + containersCount: image._count.docker_containers, + }); + } + + // Delete image updates first + await prisma.docker_image_updates.deleteMany({ + where: { image_id: id }, + }); + + // Delete the image + await prisma.docker_images.delete({ + where: { id }, + }); + + console.log(`๐Ÿ—‘๏ธ Deleted image: ${image.repository}:${image.tag} (${id})`); + + res.json({ + success: true, + message: `Image ${image.repository}:${image.tag} deleted successfully`, + }); + } catch (error) { + console.error("Error deleting image:", error); + res.status(500).json({ error: "Failed to delete image" }); + } +}); + // GET /api/v1/docker/agent - Serve the Docker agent installation script router.get("/agent", async (_req, res) => { try { diff --git a/backend/src/routes/hostRoutes.js b/backend/src/routes/hostRoutes.js index bfa2905..2c11ac7 100644 --- a/backend/src/routes/hostRoutes.js +++ b/backend/src/routes/hostRoutes.js @@ -356,6 +356,29 @@ router.post( }); } catch (error) { console.error("Host creation error:", error); + + // Check if error is related to connection pool exhaustion + if ( + error.message && + (error.message.includes("connection pool") || + error.message.includes("Timed out fetching") || + error.message.includes("pool timeout")) + ) { + console.error("โš ๏ธ DATABASE CONNECTION POOL EXHAUSTED!"); + console.error( + "โš ๏ธ Current limit: DB_CONNECTION_LIMIT=" + + (process.env.DB_CONNECTION_LIMIT || "30"), + ); + console.error( + "โš ๏ธ Pool timeout: DB_POOL_TIMEOUT=" + + (process.env.DB_POOL_TIMEOUT || "20") + + "s", + ); + console.error( + "โš ๏ธ Suggestion: Increase DB_CONNECTION_LIMIT in your .env file", + ); + } + res.status(500).json({ error: "Failed to create host" }); } }, @@ -786,19 +809,41 @@ router.get("/info", validateApiCredentials, async (req, res) => { // Ping endpoint for health checks (now uses API credentials) router.post("/ping", validateApiCredentials, async (req, res) => { try { - // Update last update timestamp + const now = new Date(); + const lastUpdate = req.hostRecord.last_update; + + // Detect if this is an agent startup (first ping or after long absence) + const timeSinceLastUpdate = lastUpdate ? now - lastUpdate : null; + const isStartup = + !timeSinceLastUpdate || timeSinceLastUpdate > 5 * 60 * 1000; // 5 minutes + + // Log agent startup + if (isStartup) { + console.log( + `๐Ÿš€ Agent startup detected: ${req.hostRecord.friendly_name} (${req.hostRecord.hostname || req.hostRecord.api_id})`, + ); + + // Check if status was previously offline + if (req.hostRecord.status === "offline") { + console.log(`โœ… Agent back online: ${req.hostRecord.friendly_name}`); + } + } + + // Update last update timestamp and set status to active await prisma.hosts.update({ where: { id: req.hostRecord.id }, data: { - last_update: new Date(), - updated_at: new Date(), + last_update: now, + updated_at: now, + status: "active", }, }); const response = { message: "Ping successful", - timestamp: new Date().toISOString(), + timestamp: now.toISOString(), friendlyName: req.hostRecord.friendly_name, + agentStartup: isStartup, }; // Check if this is a crontab update trigger diff --git a/backend/src/routes/integrationRoutes.js b/backend/src/routes/integrationRoutes.js new file mode 100644 index 0000000..76fbb6f --- /dev/null +++ b/backend/src/routes/integrationRoutes.js @@ -0,0 +1,242 @@ +const express = require("express"); +const { getPrismaClient } = require("../config/prisma"); +const { v4: uuidv4 } = require("uuid"); + +const prisma = getPrismaClient(); +const router = express.Router(); + +// POST /api/v1/integrations/docker - Docker data collection endpoint +router.post("/docker", async (req, res) => { + try { + const apiId = req.headers["x-api-id"]; + const apiKey = req.headers["x-api-key"]; + const { + containers, + images, + updates, + daemon_info, + hostname, + machine_id, + agent_version, + } = req.body; + + console.log( + `[Docker Integration] Received data from ${hostname || machine_id}`, + ); + + // Validate API credentials + const host = await prisma.hosts.findFirst({ + where: { api_id: apiId, api_key: apiKey }, + }); + + if (!host) { + console.warn("[Docker Integration] Invalid API credentials"); + return res.status(401).json({ error: "Invalid API credentials" }); + } + + console.log( + `[Docker Integration] Processing for host: ${host.friendly_name}`, + ); + + const now = new Date(); + + // Helper function to validate and parse dates + const parseDate = (dateString) => { + if (!dateString) return now; + const date = new Date(dateString); + return Number.isNaN(date.getTime()) ? now : date; + }; + + let containersProcessed = 0; + let imagesProcessed = 0; + let updatesProcessed = 0; + + // Process containers + if (containers && Array.isArray(containers)) { + console.log( + `[Docker Integration] Processing ${containers.length} containers`, + ); + for (const containerData of containers) { + const containerId = uuidv4(); + + // Find or create image + let imageId = null; + if (containerData.image_repository && containerData.image_tag) { + const image = await prisma.docker_images.upsert({ + where: { + repository_tag_image_id: { + repository: containerData.image_repository, + tag: containerData.image_tag, + image_id: containerData.image_id || "unknown", + }, + }, + update: { + last_checked: now, + updated_at: now, + }, + create: { + id: uuidv4(), + repository: containerData.image_repository, + tag: containerData.image_tag, + image_id: containerData.image_id || "unknown", + source: containerData.image_source || "docker-hub", + created_at: parseDate(containerData.created_at), + updated_at: now, + }, + }); + imageId = image.id; + } + + // Upsert container + await prisma.docker_containers.upsert({ + where: { + host_id_container_id: { + host_id: host.id, + container_id: containerData.container_id, + }, + }, + update: { + name: containerData.name, + image_id: imageId, + image_name: containerData.image_name, + image_tag: containerData.image_tag || "latest", + status: containerData.status, + state: containerData.state || containerData.status, + ports: containerData.ports || null, + started_at: containerData.started_at + ? parseDate(containerData.started_at) + : null, + updated_at: now, + last_checked: now, + }, + create: { + id: containerId, + host_id: host.id, + container_id: containerData.container_id, + name: containerData.name, + image_id: imageId, + image_name: containerData.image_name, + image_tag: containerData.image_tag || "latest", + status: containerData.status, + state: containerData.state || containerData.status, + ports: containerData.ports || null, + created_at: parseDate(containerData.created_at), + started_at: containerData.started_at + ? parseDate(containerData.started_at) + : null, + updated_at: now, + }, + }); + containersProcessed++; + } + } + + // Process standalone images + if (images && Array.isArray(images)) { + console.log(`[Docker Integration] Processing ${images.length} images`); + for (const imageData of images) { + await prisma.docker_images.upsert({ + where: { + repository_tag_image_id: { + repository: imageData.repository, + tag: imageData.tag, + image_id: imageData.image_id, + }, + }, + update: { + size_bytes: imageData.size_bytes + ? BigInt(imageData.size_bytes) + : null, + digest: imageData.digest || null, + last_checked: now, + updated_at: now, + }, + create: { + id: uuidv4(), + repository: imageData.repository, + tag: imageData.tag, + image_id: imageData.image_id, + digest: imageData.digest, + size_bytes: imageData.size_bytes + ? BigInt(imageData.size_bytes) + : null, + source: imageData.source || "docker-hub", + created_at: parseDate(imageData.created_at), + updated_at: now, + }, + }); + imagesProcessed++; + } + } + + // Process updates + if (updates && Array.isArray(updates)) { + console.log(`[Docker Integration] Processing ${updates.length} updates`); + for (const updateData of updates) { + // Find the image by repository and image_id + const image = await prisma.docker_images.findFirst({ + where: { + repository: updateData.repository, + tag: updateData.current_tag, + image_id: updateData.image_id, + }, + }); + + if (image) { + // Store digest info in changelog_url field as JSON + const digestInfo = JSON.stringify({ + method: "digest_comparison", + current_digest: updateData.current_digest, + available_digest: updateData.available_digest, + }); + + // Upsert the update record + await prisma.docker_image_updates.upsert({ + where: { + image_id_available_tag: { + image_id: image.id, + available_tag: updateData.available_tag, + }, + }, + update: { + updated_at: now, + changelog_url: digestInfo, + severity: "digest_changed", + }, + create: { + id: uuidv4(), + image_id: image.id, + current_tag: updateData.current_tag, + available_tag: updateData.available_tag, + severity: "digest_changed", + changelog_url: digestInfo, + updated_at: now, + }, + }); + updatesProcessed++; + } + } + } + + console.log( + `[Docker Integration] Successfully processed: ${containersProcessed} containers, ${imagesProcessed} images, ${updatesProcessed} updates`, + ); + + res.json({ + message: "Docker data collected successfully", + containers_received: containersProcessed, + images_received: imagesProcessed, + updates_found: updatesProcessed, + }); + } catch (error) { + console.error("[Docker Integration] Error collecting Docker data:", error); + console.error("[Docker Integration] Error stack:", error.stack); + res.status(500).json({ + error: "Failed to collect Docker data", + message: error.message, + details: process.env.NODE_ENV === "development" ? error.stack : undefined, + }); + } +}); + +module.exports = router; diff --git a/backend/src/routes/versionRoutes.js b/backend/src/routes/versionRoutes.js index 72a39ac..23217a1 100644 --- a/backend/src/routes/versionRoutes.js +++ b/backend/src/routes/versionRoutes.js @@ -14,13 +14,16 @@ const router = express.Router(); function getCurrentVersion() { try { const packageJson = require("../../package.json"); - return packageJson?.version || "1.3.0"; + if (!packageJson?.version) { + throw new Error("Version not found in package.json"); + } + return packageJson.version; } catch (packageError) { - console.warn( - "Could not read version from package.json, using fallback:", + console.error( + "Could not read version from package.json:", packageError.message, ); - return "1.3.0"; + return "unknown"; } } diff --git a/backend/src/server.js b/backend/src/server.js index d13d3de..e46ef65 100644 --- a/backend/src/server.js +++ b/backend/src/server.js @@ -66,6 +66,7 @@ const autoEnrollmentRoutes = require("./routes/autoEnrollmentRoutes"); const gethomepageRoutes = require("./routes/gethomepageRoutes"); const automationRoutes = require("./routes/automationRoutes"); const dockerRoutes = require("./routes/dockerRoutes"); +const integrationRoutes = require("./routes/integrationRoutes"); const wsRoutes = require("./routes/wsRoutes"); const agentVersionRoutes = require("./routes/agentVersionRoutes"); const { initSettings } = require("./services/settingsService"); @@ -471,6 +472,7 @@ app.use( app.use(`/api/${apiVersion}/gethomepage`, gethomepageRoutes); app.use(`/api/${apiVersion}/automation`, automationRoutes); app.use(`/api/${apiVersion}/docker`, dockerRoutes); +app.use(`/api/${apiVersion}/integrations`, integrationRoutes); app.use(`/api/${apiVersion}/ws`, wsRoutes); app.use(`/api/${apiVersion}/agent`, agentVersionRoutes); diff --git a/backend/src/services/agentVersionService.js b/backend/src/services/agentVersionService.js index 8187cb9..bd2a53a 100644 --- a/backend/src/services/agentVersionService.js +++ b/backend/src/services/agentVersionService.js @@ -428,26 +428,29 @@ class AgentVersionService { async getVersionInfo() { let hasUpdate = false; let updateStatus = "unknown"; - let effectiveLatestVersion = this.currentVersion; // Always use local version if available - // If we have a local version, use it as the latest regardless of GitHub - if (this.currentVersion) { - effectiveLatestVersion = this.currentVersion; + // Latest version should ALWAYS come from GitHub, not from local binaries + // currentVersion = what's installed locally + // latestVersion = what's available on GitHub + if (this.latestVersion) { + console.log(`๐Ÿ“ฆ Latest version from GitHub: ${this.latestVersion}`); + } else { console.log( - `๐Ÿ”„ Using local agent version ${this.currentVersion} as latest`, - ); - } else if (this.latestVersion) { - // Fallback to GitHub version only if no local version - effectiveLatestVersion = this.latestVersion; - console.log( - `๐Ÿ”„ No local version found, using GitHub version ${this.latestVersion}`, + `โš ๏ธ No GitHub release version available (API may be unavailable)`, ); } - if (this.currentVersion && effectiveLatestVersion) { + if (this.currentVersion) { + console.log(`๐Ÿ’พ Current local agent version: ${this.currentVersion}`); + } else { + console.log(`โš ๏ธ No local agent binary found`); + } + + // Determine update status by comparing current vs latest (from GitHub) + if (this.currentVersion && this.latestVersion) { const comparison = compareVersions( this.currentVersion, - effectiveLatestVersion, + this.latestVersion, ); if (comparison < 0) { hasUpdate = true; @@ -459,25 +462,25 @@ class AgentVersionService { hasUpdate = false; updateStatus = "up-to-date"; } - } else if (effectiveLatestVersion && !this.currentVersion) { + } else if (this.latestVersion && !this.currentVersion) { hasUpdate = true; updateStatus = "no-agent"; - } else if (this.currentVersion && !effectiveLatestVersion) { + } else if (this.currentVersion && !this.latestVersion) { // We have a current version but no latest version (GitHub API unavailable) hasUpdate = false; updateStatus = "github-unavailable"; - } else if (!this.currentVersion && !effectiveLatestVersion) { + } else if (!this.currentVersion && !this.latestVersion) { updateStatus = "no-data"; } return { currentVersion: this.currentVersion, - latestVersion: effectiveLatestVersion, + latestVersion: this.latestVersion, // Always return GitHub version, not local hasUpdate: hasUpdate, updateStatus: updateStatus, lastChecked: this.lastChecked, supportedArchitectures: this.supportedArchitectures, - status: effectiveLatestVersion ? "ready" : "no-releases", + status: this.latestVersion ? "ready" : "no-releases", }; } diff --git a/backend/src/services/agentWs.js b/backend/src/services/agentWs.js index 9b28e81..2aa70fb 100644 --- a/backend/src/services/agentWs.js +++ b/backend/src/services/agentWs.js @@ -99,8 +99,22 @@ function init(server, prismaClient) { // Notify subscribers of connection notifyConnectionChange(apiId, true); - ws.on("message", () => { - // Currently we don't need to handle agent->server messages + ws.on("message", async (data) => { + // Handle incoming messages from agent (e.g., Docker status updates) + try { + const message = JSON.parse(data.toString()); + + if (message.type === "docker_status") { + // Handle Docker container status events + await handleDockerStatusEvent(apiId, message); + } + // Add more message types here as needed + } catch (err) { + console.error( + `[agent-ws] error parsing message from ${apiId}:`, + err, + ); + } }); ws.on("close", () => { @@ -255,6 +269,62 @@ function subscribeToConnectionChanges(apiId, callback) { }; } +// Handle Docker container status events from agent +async function handleDockerStatusEvent(apiId, message) { + try { + const { event, container_id, name, status, timestamp } = message; + + console.log( + `[Docker Event] ${apiId}: Container ${name} (${container_id}) - ${status}`, + ); + + // Find the host + const host = await prisma.hosts.findUnique({ + where: { api_id: apiId }, + }); + + if (!host) { + console.error(`[Docker Event] Host not found for api_id: ${apiId}`); + return; + } + + // Update container status in database + const container = await prisma.docker_containers.findUnique({ + where: { + host_id_container_id: { + host_id: host.id, + container_id: container_id, + }, + }, + }); + + if (container) { + await prisma.docker_containers.update({ + where: { id: container.id }, + data: { + status: status, + state: status, + updated_at: new Date(timestamp || Date.now()), + last_checked: new Date(), + }, + }); + + console.log( + `[Docker Event] Updated container ${name} status to ${status}`, + ); + } else { + console.log( + `[Docker Event] Container ${name} not found in database (may be new)`, + ); + } + + // TODO: Broadcast to connected dashboard clients via SSE or WebSocket + // This would notify the frontend UI in real-time + } catch (error) { + console.error(`[Docker Event] Error handling Docker status event:`, error); + } +} + module.exports = { init, broadcastSettingsUpdate, diff --git a/backend/src/services/automation/dockerInventoryCleanup.js b/backend/src/services/automation/dockerInventoryCleanup.js new file mode 100644 index 0000000..943bc9e --- /dev/null +++ b/backend/src/services/automation/dockerInventoryCleanup.js @@ -0,0 +1,164 @@ +const { prisma } = require("./shared/prisma"); + +/** + * Docker Inventory Cleanup Automation + * Removes Docker containers and images for hosts that no longer exist + */ +class DockerInventoryCleanup { + constructor(queueManager) { + this.queueManager = queueManager; + this.queueName = "docker-inventory-cleanup"; + } + + /** + * Process Docker inventory cleanup job + */ + async process(_job) { + const startTime = Date.now(); + console.log("๐Ÿงน Starting Docker inventory cleanup..."); + + try { + // Step 1: Find and delete orphaned containers (containers for non-existent hosts) + const orphanedContainers = await prisma.docker_containers.findMany({ + where: { + host_id: { + // Find containers where the host doesn't exist + notIn: await prisma.hosts + .findMany({ select: { id: true } }) + .then((hosts) => hosts.map((h) => h.id)), + }, + }, + }); + + let deletedContainersCount = 0; + const deletedContainers = []; + + for (const container of orphanedContainers) { + try { + await prisma.docker_containers.delete({ + where: { id: container.id }, + }); + deletedContainersCount++; + deletedContainers.push({ + id: container.id, + container_id: container.container_id, + name: container.name, + image_name: container.image_name, + host_id: container.host_id, + }); + console.log( + `๐Ÿ—‘๏ธ Deleted orphaned container: ${container.name} (host_id: ${container.host_id})`, + ); + } catch (deleteError) { + console.error( + `โŒ Failed to delete container ${container.id}:`, + deleteError.message, + ); + } + } + + // Step 2: Find and delete orphaned images (images with no containers using them) + const orphanedImages = await prisma.docker_images.findMany({ + where: { + docker_containers: { + none: {}, + }, + }, + include: { + _count: { + select: { + docker_containers: true, + docker_image_updates: true, + }, + }, + }, + }); + + let deletedImagesCount = 0; + const deletedImages = []; + + for (const image of orphanedImages) { + try { + // First delete any image updates associated with this image + if (image._count.docker_image_updates > 0) { + await prisma.docker_image_updates.deleteMany({ + where: { image_id: image.id }, + }); + } + + // Then delete the image itself + await prisma.docker_images.delete({ + where: { id: image.id }, + }); + deletedImagesCount++; + deletedImages.push({ + id: image.id, + repository: image.repository, + tag: image.tag, + image_id: image.image_id, + }); + console.log( + `๐Ÿ—‘๏ธ Deleted orphaned image: ${image.repository}:${image.tag}`, + ); + } catch (deleteError) { + console.error( + `โŒ Failed to delete image ${image.id}:`, + deleteError.message, + ); + } + } + + const executionTime = Date.now() - startTime; + console.log( + `โœ… Docker inventory cleanup completed in ${executionTime}ms - Deleted ${deletedContainersCount} containers and ${deletedImagesCount} images`, + ); + + return { + success: true, + deletedContainersCount, + deletedImagesCount, + deletedContainers, + deletedImages, + executionTime, + }; + } catch (error) { + const executionTime = Date.now() - startTime; + console.error( + `โŒ Docker inventory cleanup failed after ${executionTime}ms:`, + error.message, + ); + throw error; + } + } + + /** + * Schedule recurring Docker inventory cleanup (daily at 4 AM) + */ + async schedule() { + const job = await this.queueManager.queues[this.queueName].add( + "docker-inventory-cleanup", + {}, + { + repeat: { cron: "0 4 * * *" }, // Daily at 4 AM + jobId: "docker-inventory-cleanup-recurring", + }, + ); + console.log("โœ… Docker inventory cleanup scheduled"); + return job; + } + + /** + * Trigger manual Docker inventory cleanup + */ + async triggerManual() { + const job = await this.queueManager.queues[this.queueName].add( + "docker-inventory-cleanup-manual", + {}, + { priority: 1 }, + ); + console.log("โœ… Manual Docker inventory cleanup triggered"); + return job; + } +} + +module.exports = DockerInventoryCleanup; diff --git a/backend/src/services/automation/githubUpdateCheck.js b/backend/src/services/automation/githubUpdateCheck.js index 8148d0c..2f5a3d4 100644 --- a/backend/src/services/automation/githubUpdateCheck.js +++ b/backend/src/services/automation/githubUpdateCheck.js @@ -52,17 +52,24 @@ class GitHubUpdateCheck { } // Read version from package.json - let currentVersion = "1.3.0"; // fallback + let currentVersion = null; try { const packageJson = require("../../../package.json"); if (packageJson?.version) { currentVersion = packageJson.version; } } catch (packageError) { - console.warn( + console.error( "Could not read version from package.json:", packageError.message, ); + throw new Error( + "Could not determine current version from package.json", + ); + } + + if (!currentVersion) { + throw new Error("Version not found in package.json"); } const isUpdateAvailable = diff --git a/backend/src/services/automation/index.js b/backend/src/services/automation/index.js index b8c670c..00441e6 100644 --- a/backend/src/services/automation/index.js +++ b/backend/src/services/automation/index.js @@ -8,6 +8,7 @@ const GitHubUpdateCheck = require("./githubUpdateCheck"); const SessionCleanup = require("./sessionCleanup"); const OrphanedRepoCleanup = require("./orphanedRepoCleanup"); const OrphanedPackageCleanup = require("./orphanedPackageCleanup"); +const DockerInventoryCleanup = require("./dockerInventoryCleanup"); // Queue names const QUEUE_NAMES = { @@ -15,6 +16,7 @@ const QUEUE_NAMES = { SESSION_CLEANUP: "session-cleanup", ORPHANED_REPO_CLEANUP: "orphaned-repo-cleanup", ORPHANED_PACKAGE_CLEANUP: "orphaned-package-cleanup", + DOCKER_INVENTORY_CLEANUP: "docker-inventory-cleanup", AGENT_COMMANDS: "agent-commands", }; @@ -91,6 +93,8 @@ class QueueManager { new OrphanedRepoCleanup(this); this.automations[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP] = new OrphanedPackageCleanup(this); + this.automations[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP] = + new DockerInventoryCleanup(this); console.log("โœ… All automation classes initialized"); } @@ -149,6 +153,15 @@ class QueueManager { workerOptions, ); + // Docker Inventory Cleanup Worker + this.workers[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP] = new Worker( + QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP, + this.automations[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].process.bind( + this.automations[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP], + ), + workerOptions, + ); + // Agent Commands Worker this.workers[QUEUE_NAMES.AGENT_COMMANDS] = new Worker( QUEUE_NAMES.AGENT_COMMANDS, @@ -205,6 +218,7 @@ class QueueManager { await this.automations[QUEUE_NAMES.SESSION_CLEANUP].schedule(); await this.automations[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].schedule(); await this.automations[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].schedule(); + await this.automations[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].schedule(); } /** @@ -228,6 +242,12 @@ class QueueManager { ].triggerManual(); } + async triggerDockerInventoryCleanup() { + return this.automations[ + QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP + ].triggerManual(); + } + /** * Get queue statistics */ diff --git a/backend/src/services/automation/shared/utils.js b/backend/src/services/automation/shared/utils.js index 87a7f16..0b877c0 100644 --- a/backend/src/services/automation/shared/utils.js +++ b/backend/src/services/automation/shared/utils.js @@ -33,7 +33,8 @@ async function checkPublicRepo(owner, repo) { try { const httpsRepoUrl = `https://api.github.com/repos/${owner}/${repo}/releases/latest`; - let currentVersion = "1.3.0"; // fallback + // Get current version for User-Agent (or use generic if unavailable) + let currentVersion = "unknown"; try { const packageJson = require("../../../package.json"); if (packageJson?.version) { @@ -41,7 +42,7 @@ async function checkPublicRepo(owner, repo) { } } catch (packageError) { console.warn( - "Could not read version from package.json for User-Agent, using fallback:", + "Could not read version from package.json for User-Agent:", packageError.message, ); } diff --git a/biome.json b/biome.json index abe98bd..29a6487 100644 --- a/biome.json +++ b/biome.json @@ -1,10 +1,13 @@ { - "$schema": "https://biomejs.dev/schemas/2.2.4/schema.json", + "$schema": "https://biomejs.dev/schemas/2.3.0/schema.json", "vcs": { "enabled": true, "clientKind": "git", "useIgnoreFile": true }, + "files": { + "includes": ["**", "!**/*.css"] + }, "formatter": { "enabled": true }, diff --git a/docker/README.md b/docker/README.md index 952b0a5..32409c4 100644 --- a/docker/README.md +++ b/docker/README.md @@ -136,6 +136,24 @@ When you do this, updating to a new version requires manually updating the image | `PM_DB_CONN_MAX_ATTEMPTS` | Maximum database connection attempts | `30` | | `PM_DB_CONN_WAIT_INTERVAL` | Wait interval between connection attempts in seconds | `2` | +##### Database Connection Pool Configuration (Prisma) + +| Variable | Description | Default | +| --------------------- | ---------------------------------------------------------- | ------- | +| `DB_CONNECTION_LIMIT` | Maximum number of database connections per instance | `30` | +| `DB_POOL_TIMEOUT` | Seconds to wait for an available connection before timeout | `20` | +| `DB_CONNECT_TIMEOUT` | Seconds to wait for initial database connection | `10` | +| `DB_IDLE_TIMEOUT` | Seconds before closing idle connections | `300` | +| `DB_MAX_LIFETIME` | Maximum lifetime of a connection in seconds | `1800` | + +> [!TIP] +> The connection pool limit should be adjusted based on your deployment size: +> - **Small deployment (1-10 hosts)**: `DB_CONNECTION_LIMIT=15` is sufficient +> - **Medium deployment (10-50 hosts)**: `DB_CONNECTION_LIMIT=30` (default) +> - **Large deployment (50+ hosts)**: `DB_CONNECTION_LIMIT=50` or higher +> +> Each connection pool serves one backend instance. If you have concurrent operations (multiple users, background jobs, agent checkins), increase the pool size accordingly. + ##### Redis Configuration | Variable | Description | Default | diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml index 1568cc1..908722a 100644 --- a/docker/docker-compose.dev.yml +++ b/docker/docker-compose.dev.yml @@ -50,6 +50,10 @@ services: SERVER_HOST: localhost SERVER_PORT: 3000 CORS_ORIGIN: http://localhost:3000 + # Database Connection Pool Configuration + DB_CONNECTION_LIMIT: 30 + DB_POOL_TIMEOUT: 20 + DB_CONNECT_TIMEOUT: 10 # Rate Limiting (times in milliseconds) RATE_LIMIT_WINDOW_MS: 900000 RATE_LIMIT_MAX: 5000 diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 29f08e9..ea1dc04 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -56,6 +56,10 @@ services: SERVER_HOST: localhost SERVER_PORT: 3000 CORS_ORIGIN: http://localhost:3000 + # Database Connection Pool Configuration + DB_CONNECTION_LIMIT: 30 + DB_POOL_TIMEOUT: 20 + DB_CONNECT_TIMEOUT: 10 # Rate Limiting (times in milliseconds) RATE_LIMIT_WINDOW_MS: 900000 RATE_LIMIT_MAX: 5000 diff --git a/frontend/src/components/settings/UsersTab.jsx b/frontend/src/components/settings/UsersTab.jsx index 1b81314..39f7289 100644 --- a/frontend/src/components/settings/UsersTab.jsx +++ b/frontend/src/components/settings/UsersTab.jsx @@ -54,7 +54,7 @@ const UsersTab = () => { }); // Update user mutation - const _updateUserMutation = useMutation({ + const updateUserMutation = useMutation({ mutationFn: ({ id, data }) => adminUsersAPI.update(id, data), onSuccess: () => { queryClient.invalidateQueries(["users"]); diff --git a/frontend/src/pages/Automation.jsx b/frontend/src/pages/Automation.jsx index 1e593af..ede20c9 100644 --- a/frontend/src/pages/Automation.jsx +++ b/frontend/src/pages/Automation.jsx @@ -169,6 +169,20 @@ const Automation = () => { year: "numeric", }); } + if (schedule === "Daily at 4 AM") { + const now = new Date(); + const tomorrow = new Date(now); + tomorrow.setDate(tomorrow.getDate() + 1); + tomorrow.setHours(4, 0, 0, 0); + return tomorrow.toLocaleString([], { + hour12: true, + hour: "numeric", + minute: "2-digit", + day: "numeric", + month: "numeric", + year: "numeric", + }); + } if (schedule === "Every hour") { const now = new Date(); const nextHour = new Date(now); @@ -209,6 +223,13 @@ const Automation = () => { tomorrow.setHours(3, 0, 0, 0); return tomorrow.getTime(); } + if (schedule === "Daily at 4 AM") { + const now = new Date(); + const tomorrow = new Date(now); + tomorrow.setDate(tomorrow.getDate() + 1); + tomorrow.setHours(4, 0, 0, 0); + return tomorrow.getTime(); + } if (schedule === "Every hour") { const now = new Date(); const nextHour = new Date(now); @@ -269,6 +290,8 @@ const Automation = () => { endpoint = "/automation/trigger/orphaned-repo-cleanup"; } else if (jobType === "orphaned-packages") { endpoint = "/automation/trigger/orphaned-package-cleanup"; + } else if (jobType === "docker-inventory") { + endpoint = "/automation/trigger/docker-inventory-cleanup"; } else if (jobType === "agent-collection") { endpoint = "/automation/trigger/agent-collection"; } @@ -584,6 +607,10 @@ const Automation = () => { automation.queue.includes("orphaned-package") ) { triggerManualJob("orphaned-packages"); + } else if ( + automation.queue.includes("docker-inventory") + ) { + triggerManualJob("docker-inventory"); } else if ( automation.queue.includes("agent-commands") ) { diff --git a/frontend/src/pages/Docker.jsx b/frontend/src/pages/Docker.jsx index b82d0df..8c40915 100644 --- a/frontend/src/pages/Docker.jsx +++ b/frontend/src/pages/Docker.jsx @@ -1,4 +1,4 @@ -import { useQuery } from "@tanstack/react-query"; +import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query"; import { AlertTriangle, ArrowDown, @@ -11,6 +11,7 @@ import { Search, Server, Shield, + Trash2, X, } from "lucide-react"; import { useMemo, useState } from "react"; @@ -18,12 +19,15 @@ import { Link } from "react-router-dom"; import api from "../utils/api"; const Docker = () => { + const queryClient = useQueryClient(); const [searchTerm, setSearchTerm] = useState(""); const [activeTab, setActiveTab] = useState("containers"); const [sortField, setSortField] = useState("status"); const [sortDirection, setSortDirection] = useState("asc"); const [statusFilter, setStatusFilter] = useState("all"); const [sourceFilter, setSourceFilter] = useState("all"); + const [deleteContainerModal, setDeleteContainerModal] = useState(null); + const [deleteImageModal, setDeleteImageModal] = useState(null); // Fetch Docker dashboard data const { data: dashboard, isLoading: dashboardLoading } = useQuery({ @@ -36,7 +40,11 @@ const Docker = () => { }); // Fetch containers - const { data: containersData, isLoading: containersLoading } = useQuery({ + const { + data: containersData, + isLoading: containersLoading, + refetch: refetchContainers, + } = useQuery({ queryKey: ["docker", "containers", statusFilter], queryFn: async () => { const params = new URLSearchParams(); @@ -49,7 +57,11 @@ const Docker = () => { }); // Fetch images - const { data: imagesData, isLoading: imagesLoading } = useQuery({ + const { + data: imagesData, + isLoading: imagesLoading, + refetch: refetchImages, + } = useQuery({ queryKey: ["docker", "images", sourceFilter], queryFn: async () => { const params = new URLSearchParams(); @@ -81,6 +93,42 @@ const Docker = () => { enabled: activeTab === "updates", }); + // Delete container mutation + const deleteContainerMutation = useMutation({ + mutationFn: async (containerId) => { + const response = await api.delete(`/docker/containers/${containerId}`); + return response.data; + }, + onSuccess: () => { + queryClient.invalidateQueries(["docker", "containers"]); + queryClient.invalidateQueries(["docker", "dashboard"]); + setDeleteContainerModal(null); + }, + onError: (error) => { + alert( + `Failed to delete container: ${error.response?.data?.error || error.message}`, + ); + }, + }); + + // Delete image mutation + const deleteImageMutation = useMutation({ + mutationFn: async (imageId) => { + const response = await api.delete(`/docker/images/${imageId}`); + return response.data; + }, + onSuccess: () => { + queryClient.invalidateQueries(["docker", "images"]); + queryClient.invalidateQueries(["docker", "dashboard"]); + setDeleteImageModal(null); + }, + onError: (error) => { + alert( + `Failed to delete image: ${error.response?.data?.error || error.message}`, + ); + }, + }); + // Filter and sort containers const filteredContainers = useMemo(() => { if (!containersData?.containers) return []; @@ -288,32 +336,36 @@ const Docker = () => { }; return ( -
+
{/* Header */} -
+
-

+

Docker Inventory

-

+

Monitor containers, images, and updates across your infrastructure

- +
+ +
- {/* Dashboard Cards */} -
+ {/* Stats Summary */} +
@@ -400,11 +452,11 @@ const Docker = () => {
- {/* Tabs and Content */} -
+ {/* Docker List */} +
{/* Tab Navigation */} -
-