diff --git a/backend/package.json b/backend/package.json index 4387c75..51afcca 100644 --- a/backend/package.json +++ b/backend/package.json @@ -1,6 +1,6 @@ { "name": "patchmon-backend", - "version": "1.3.1", + "version": "1.3.2", "description": "Backend API for Linux Patch Monitoring System", "license": "AGPL-3.0", "main": "src/server.js", diff --git a/backend/prisma/migrations/20251029181253_add_docker_volumes_networks/migration.sql b/backend/prisma/migrations/20251029181253_add_docker_volumes_networks/migration.sql new file mode 100644 index 0000000..4cad27b --- /dev/null +++ b/backend/prisma/migrations/20251029181253_add_docker_volumes_networks/migration.sql @@ -0,0 +1,74 @@ +-- CreateTable +CREATE TABLE "docker_volumes" ( + "id" TEXT NOT NULL, + "host_id" TEXT NOT NULL, + "volume_id" TEXT NOT NULL, + "name" TEXT NOT NULL, + "driver" TEXT NOT NULL, + "mountpoint" TEXT, + "renderer" TEXT, + "scope" TEXT NOT NULL DEFAULT 'local', + "labels" JSONB, + "options" JSONB, + "size_bytes" BIGINT, + "ref_count" INTEGER NOT NULL DEFAULT 0, + "created_at" TIMESTAMP(3) NOT NULL, + "updated_at" TIMESTAMP(3) NOT NULL, + "last_checked" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + + CONSTRAINT "docker_volumes_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "docker_networks" ( + "id" TEXT NOT NULL, + "host_id" TEXT NOT NULL, + "network_id" TEXT NOT NULL, + "name" TEXT NOT NULL, + "driver" TEXT NOT NULL, + "scope" TEXT NOT NULL DEFAULT 'local', + "ipv6_enabled" BOOLEAN NOT NULL DEFAULT false, + "internal" BOOLEAN NOT NULL DEFAULT false, + "attachable" BOOLEAN NOT NULL DEFAULT true, + "ingress" BOOLEAN NOT NULL DEFAULT false, + "config_only" BOOLEAN NOT NULL DEFAULT false, + "labels" JSONB, + "ipam" JSONB, + "container_count" INTEGER NOT NULL DEFAULT 0, + "created_at" TIMESTAMP(3), + "updated_at" TIMESTAMP(3) NOT NULL, + "last_checked" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + + CONSTRAINT "docker_networks_pkey" PRIMARY KEY ("id") +); + +-- CreateIndex +CREATE INDEX "docker_volumes_host_id_idx" ON "docker_volumes"("host_id"); + +-- CreateIndex +CREATE INDEX "docker_volumes_name_idx" ON "docker_volumes"("name"); + +-- CreateIndex +CREATE INDEX "docker_volumes_driver_idx" ON "docker_volumes"("driver"); + +-- CreateIndex +CREATE UNIQUE INDEX "docker_volumes_host_id_volume_id_key" ON "docker_volumes"("host_id", "volume_id"); + +-- CreateIndex +CREATE INDEX "docker_networks_host_id_idx" ON "docker_networks"("host_id"); + +-- CreateIndex +CREATE INDEX "docker_networks_name_idx" ON "docker_networks"("name"); + +-- CreateIndex +CREATE INDEX "docker_networks_driver_idx" ON "docker_networks"("driver"); + +-- CreateIndex +CREATE UNIQUE INDEX "docker_networks_host_id_network_id_key" ON "docker_networks"("host_id", "network_id"); + +-- AddForeignKey +ALTER TABLE "docker_volumes" ADD CONSTRAINT "docker_volumes_host_id_fkey" FOREIGN KEY ("host_id") REFERENCES "hosts"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "docker_networks" ADD CONSTRAINT "docker_networks_host_id_fkey" FOREIGN KEY ("host_id") REFERENCES "hosts"("id") ON DELETE CASCADE ON UPDATE CASCADE; + diff --git a/backend/prisma/schema.prisma b/backend/prisma/schema.prisma index cb3825e..f703d58 100644 --- a/backend/prisma/schema.prisma +++ b/backend/prisma/schema.prisma @@ -114,6 +114,8 @@ model hosts { host_group_memberships host_group_memberships[] update_history update_history[] job_history job_history[] + docker_volumes docker_volumes[] + docker_networks docker_networks[] @@index([machine_id]) @@index([friendly_name]) @@ -342,6 +344,56 @@ model docker_image_updates { @@index([is_security_update]) } +model docker_volumes { + id String @id + host_id String + volume_id String + name String + driver String + mountpoint String? + renderer String? + scope String @default("local") + labels Json? + options Json? + size_bytes BigInt? + ref_count Int @default(0) + created_at DateTime + updated_at DateTime + last_checked DateTime @default(now()) + hosts hosts @relation(fields: [host_id], references: [id], onDelete: Cascade) + + @@unique([host_id, volume_id]) + @@index([host_id]) + @@index([name]) + @@index([driver]) +} + +model docker_networks { + id String @id + host_id String + network_id String + name String + driver String + scope String @default("local") + ipv6_enabled Boolean @default(false) + internal Boolean @default(false) + attachable Boolean @default(true) + ingress Boolean @default(false) + config_only Boolean @default(false) + labels Json? + ipam Json? // IPAM configuration (driver, config, options) + container_count Int @default(0) + created_at DateTime? + updated_at DateTime + last_checked DateTime @default(now()) + hosts hosts @relation(fields: [host_id], references: [id], onDelete: Cascade) + + @@unique([host_id, network_id]) + @@index([host_id]) + @@index([name]) + @@index([driver]) +} + model job_history { id String @id job_id String diff --git a/backend/src/routes/authRoutes.js b/backend/src/routes/authRoutes.js index 69930ea..a3fc005 100644 --- a/backend/src/routes/authRoutes.js +++ b/backend/src/routes/authRoutes.js @@ -17,6 +17,7 @@ const { refresh_access_token, revoke_session, revoke_all_user_sessions, + generate_device_fingerprint, } = require("../utils/session_manager"); const router = express.Router(); @@ -788,11 +789,39 @@ router.post( // Check if TFA is enabled if (user.tfa_enabled) { - return res.status(200).json({ - message: "TFA verification required", - requiresTfa: true, - username: user.username, - }); + // Get device fingerprint from X-Device-ID header + const device_fingerprint = generate_device_fingerprint(req); + + // Check if this device has a valid TFA bypass + if (device_fingerprint) { + const remembered_session = await prisma.user_sessions.findFirst({ + where: { + user_id: user.id, + device_fingerprint: device_fingerprint, + tfa_remember_me: true, + tfa_bypass_until: { gt: new Date() }, // Bypass still valid + }, + }); + + if (remembered_session) { + // Device is remembered and bypass is still valid - skip TFA + // Continue with login below + } else { + // No valid bypass for this device - require TFA + return res.status(200).json({ + message: "TFA verification required", + requiresTfa: true, + username: user.username, + }); + } + } else { + // No device ID provided - require TFA + return res.status(200).json({ + message: "TFA verification required", + requiresTfa: true, + username: user.username, + }); + } } // Update last login @@ -807,7 +836,13 @@ router.post( // Create session with access and refresh tokens const ip_address = req.ip || req.connection.remoteAddress; const user_agent = req.get("user-agent"); - const session = await create_session(user.id, ip_address, user_agent); + const session = await create_session( + user.id, + ip_address, + user_agent, + false, + req, + ); res.json({ message: "Login successful", @@ -841,8 +876,10 @@ router.post( body("username").notEmpty().withMessage("Username is required"), body("token") .isLength({ min: 6, max: 6 }) - .withMessage("Token must be 6 digits"), - body("token").isNumeric().withMessage("Token must contain only numbers"), + .withMessage("Token must be 6 characters"), + body("token") + .matches(/^[A-Z0-9]{6}$/) + .withMessage("Token must be 6 alphanumeric characters"), body("remember_me") .optional() .isBoolean() diff --git a/backend/src/routes/dockerRoutes.js b/backend/src/routes/dockerRoutes.js index db3a4eb..7a90214 100644 --- a/backend/src/routes/dockerRoutes.js +++ b/backend/src/routes/dockerRoutes.js @@ -573,6 +573,7 @@ router.post("/collect", async (req, res) => { image_id: containerData.image_id || "unknown", source: containerData.image_source || "docker-hub", created_at: parseDate(containerData.created_at), + last_checked: now, updated_at: now, }, }); @@ -822,6 +823,7 @@ router.post("/../integrations/docker", async (req, res) => { image_id: containerData.image_id || "unknown", source: containerData.image_source || "docker-hub", created_at: parseDate(containerData.created_at), + last_checked: now, updated_at: now, }, }); @@ -876,6 +878,12 @@ router.post("/../integrations/docker", async (req, res) => { if (images && Array.isArray(images)) { console.log(`[Docker Integration] Processing ${images.length} images`); for (const imageData of images) { + // If image has no digest, it's likely locally built - override source to "local" + const imageSource = + !imageData.digest || imageData.digest.trim() === "" + ? "local" + : imageData.source || "docker-hub"; + await prisma.docker_images.upsert({ where: { repository_tag_image_id: { @@ -889,6 +897,7 @@ router.post("/../integrations/docker", async (req, res) => { ? BigInt(imageData.size_bytes) : null, digest: imageData.digest || null, + source: imageSource, // Update source in case it changed last_checked: now, updated_at: now, }, @@ -901,8 +910,9 @@ router.post("/../integrations/docker", async (req, res) => { size_bytes: imageData.size_bytes ? BigInt(imageData.size_bytes) : null, - source: imageData.source || "docker-hub", + source: imageSource, created_at: parseDate(imageData.created_at), + last_checked: now, updated_at: now, }, }); @@ -1062,6 +1072,172 @@ router.delete("/images/:id", authenticateToken, async (req, res) => { } }); +// GET /api/v1/docker/volumes - Get all volumes with filters +router.get("/volumes", authenticateToken, async (req, res) => { + try { + const { driver, search, page = 1, limit = 50 } = req.query; + + const where = {}; + if (driver) where.driver = driver; + if (search) { + where.OR = [{ name: { contains: search, mode: "insensitive" } }]; + } + + const skip = (parseInt(page, 10) - 1) * parseInt(limit, 10); + const take = parseInt(limit, 10); + + const [volumes, total] = await Promise.all([ + prisma.docker_volumes.findMany({ + where, + include: { + hosts: { + select: { + id: true, + friendly_name: true, + hostname: true, + ip: true, + }, + }, + }, + orderBy: { updated_at: "desc" }, + skip, + take, + }), + prisma.docker_volumes.count({ where }), + ]); + + res.json( + convertBigIntToString({ + volumes, + pagination: { + page: parseInt(page, 10), + limit: parseInt(limit, 10), + total, + totalPages: Math.ceil(total / parseInt(limit, 10)), + }, + }), + ); + } catch (error) { + console.error("Error fetching volumes:", error); + res.status(500).json({ error: "Failed to fetch volumes" }); + } +}); + +// GET /api/v1/docker/volumes/:id - Get volume detail +router.get("/volumes/:id", authenticateToken, async (req, res) => { + try { + const { id } = req.params; + + const volume = await prisma.docker_volumes.findUnique({ + where: { id }, + include: { + hosts: { + select: { + id: true, + friendly_name: true, + hostname: true, + ip: true, + os_type: true, + os_version: true, + }, + }, + }, + }); + + if (!volume) { + return res.status(404).json({ error: "Volume not found" }); + } + + res.json(convertBigIntToString({ volume })); + } catch (error) { + console.error("Error fetching volume detail:", error); + res.status(500).json({ error: "Failed to fetch volume detail" }); + } +}); + +// GET /api/v1/docker/networks - Get all networks with filters +router.get("/networks", authenticateToken, async (req, res) => { + try { + const { driver, search, page = 1, limit = 50 } = req.query; + + const where = {}; + if (driver) where.driver = driver; + if (search) { + where.OR = [{ name: { contains: search, mode: "insensitive" } }]; + } + + const skip = (parseInt(page, 10) - 1) * parseInt(limit, 10); + const take = parseInt(limit, 10); + + const [networks, total] = await Promise.all([ + prisma.docker_networks.findMany({ + where, + include: { + hosts: { + select: { + id: true, + friendly_name: true, + hostname: true, + ip: true, + }, + }, + }, + orderBy: { updated_at: "desc" }, + skip, + take, + }), + prisma.docker_networks.count({ where }), + ]); + + res.json( + convertBigIntToString({ + networks, + pagination: { + page: parseInt(page, 10), + limit: parseInt(limit, 10), + total, + totalPages: Math.ceil(total / parseInt(limit, 10)), + }, + }), + ); + } catch (error) { + console.error("Error fetching networks:", error); + res.status(500).json({ error: "Failed to fetch networks" }); + } +}); + +// GET /api/v1/docker/networks/:id - Get network detail +router.get("/networks/:id", authenticateToken, async (req, res) => { + try { + const { id } = req.params; + + const network = await prisma.docker_networks.findUnique({ + where: { id }, + include: { + hosts: { + select: { + id: true, + friendly_name: true, + hostname: true, + ip: true, + os_type: true, + os_version: true, + }, + }, + }, + }); + + if (!network) { + return res.status(404).json({ error: "Network not found" }); + } + + res.json(convertBigIntToString({ network })); + } catch (error) { + console.error("Error fetching network detail:", error); + res.status(500).json({ error: "Failed to fetch network detail" }); + } +}); + // GET /api/v1/docker/agent - Serve the Docker agent installation script router.get("/agent", async (_req, res) => { try { @@ -1093,4 +1269,66 @@ router.get("/agent", async (_req, res) => { } }); +// DELETE /api/v1/docker/volumes/:id - Delete a volume +router.delete("/volumes/:id", authenticateToken, async (req, res) => { + try { + const { id } = req.params; + + // Check if volume exists + const volume = await prisma.docker_volumes.findUnique({ + where: { id }, + }); + + if (!volume) { + return res.status(404).json({ error: "Volume not found" }); + } + + // Delete the volume + await prisma.docker_volumes.delete({ + where: { id }, + }); + + console.log(`🗑️ Deleted volume: ${volume.name} (${id})`); + + res.json({ + success: true, + message: `Volume ${volume.name} deleted successfully`, + }); + } catch (error) { + console.error("Error deleting volume:", error); + res.status(500).json({ error: "Failed to delete volume" }); + } +}); + +// DELETE /api/v1/docker/networks/:id - Delete a network +router.delete("/networks/:id", authenticateToken, async (req, res) => { + try { + const { id } = req.params; + + // Check if network exists + const network = await prisma.docker_networks.findUnique({ + where: { id }, + }); + + if (!network) { + return res.status(404).json({ error: "Network not found" }); + } + + // Delete the network + await prisma.docker_networks.delete({ + where: { id }, + }); + + console.log(`🗑️ Deleted network: ${network.name} (${id})`); + + res.json({ + success: true, + message: `Network ${network.name} deleted successfully`, + }); + } catch (error) { + console.error("Error deleting network:", error); + res.status(500).json({ error: "Failed to delete network" }); + } +}); + module.exports = router; diff --git a/backend/src/routes/hostGroupRoutes.js b/backend/src/routes/hostGroupRoutes.js index 3c23db9..9102967 100644 --- a/backend/src/routes/hostGroupRoutes.js +++ b/backend/src/routes/hostGroupRoutes.js @@ -24,7 +24,15 @@ router.get("/", authenticateToken, async (_req, res) => { }, }); - res.json(hostGroups); + // Transform the count field to match frontend expectations + const transformedGroups = hostGroups.map((group) => ({ + ...group, + _count: { + hosts: group._count.host_group_memberships, + }, + })); + + res.json(transformedGroups); } catch (error) { console.error("Error fetching host groups:", error); res.status(500).json({ error: "Failed to fetch host groups" }); diff --git a/backend/src/routes/hostRoutes.js b/backend/src/routes/hostRoutes.js index 1c539bd..6236197 100644 --- a/backend/src/routes/hostRoutes.js +++ b/backend/src/routes/hostRoutes.js @@ -10,6 +10,7 @@ const { requireManageHosts, requireManageSettings, } = require("../middleware/permissions"); +const { queueManager, QUEUE_NAMES } = require("../services/automation"); const router = express.Router(); const prisma = getPrismaClient(); @@ -1387,6 +1388,66 @@ router.delete( }, ); +// Force immediate report from agent +router.post( + "/:hostId/fetch-report", + authenticateToken, + requireManageHosts, + async (req, res) => { + try { + const { hostId } = req.params; + + // Get host to verify it exists + const host = await prisma.hosts.findUnique({ + where: { id: hostId }, + }); + + if (!host) { + return res.status(404).json({ error: "Host not found" }); + } + + // Get the agent-commands queue + const queue = queueManager.queues[QUEUE_NAMES.AGENT_COMMANDS]; + + if (!queue) { + return res.status(500).json({ + error: "Queue not available", + }); + } + + // Add job to queue + const job = await queue.add( + "report_now", + { + api_id: host.api_id, + type: "report_now", + }, + { + attempts: 3, + backoff: { + type: "exponential", + delay: 2000, + }, + }, + ); + + res.json({ + success: true, + message: "Report fetch queued successfully", + jobId: job.id, + host: { + id: host.id, + friendlyName: host.friendly_name, + apiId: host.api_id, + }, + }); + } catch (error) { + console.error("Force fetch report error:", error); + res.status(500).json({ error: "Failed to fetch report" }); + } + }, +); + // Toggle agent auto-update setting router.patch( "/:hostId/auto-update", @@ -1448,21 +1509,17 @@ router.post( return res.status(404).json({ error: "Host not found" }); } - // Get queue manager - const { QUEUE_NAMES } = require("../services/automation"); - const queueManager = req.app.locals.queueManager; - - if (!queueManager) { - return res.status(500).json({ - error: "Queue manager not available", - }); - } - // Get the agent-commands queue const queue = queueManager.queues[QUEUE_NAMES.AGENT_COMMANDS]; + if (!queue) { + return res.status(500).json({ + error: "Queue not available", + }); + } + // Add job to queue - await queue.add( + const job = await queue.add( "update_agent", { api_id: host.api_id, @@ -1480,6 +1537,7 @@ router.post( res.json({ success: true, message: "Agent update queued successfully", + jobId: job.id, host: { id: host.id, friendlyName: host.friendly_name, diff --git a/backend/src/routes/integrationRoutes.js b/backend/src/routes/integrationRoutes.js index fccdf9a..216e0ae 100644 --- a/backend/src/routes/integrationRoutes.js +++ b/backend/src/routes/integrationRoutes.js @@ -13,6 +13,8 @@ router.post("/docker", async (req, res) => { const { containers, images, + volumes, + networks, updates, daemon_info: _daemon_info, hostname, @@ -49,6 +51,8 @@ router.post("/docker", async (req, res) => { let containersProcessed = 0; let imagesProcessed = 0; + let volumesProcessed = 0; + let networksProcessed = 0; let updatesProcessed = 0; // Process containers @@ -169,6 +173,114 @@ router.post("/docker", async (req, res) => { } } + // Process volumes + if (volumes && Array.isArray(volumes)) { + console.log(`[Docker Integration] Processing ${volumes.length} volumes`); + for (const volumeData of volumes) { + await prisma.docker_volumes.upsert({ + where: { + host_id_volume_id: { + host_id: host.id, + volume_id: volumeData.volume_id, + }, + }, + update: { + name: volumeData.name, + driver: volumeData.driver || "local", + mountpoint: volumeData.mountpoint || null, + renderer: volumeData.renderer || null, + scope: volumeData.scope || "local", + labels: volumeData.labels || null, + options: volumeData.options || null, + size_bytes: volumeData.size_bytes + ? BigInt(volumeData.size_bytes) + : null, + ref_count: volumeData.ref_count || 0, + updated_at: now, + last_checked: now, + }, + create: { + id: uuidv4(), + host_id: host.id, + volume_id: volumeData.volume_id, + name: volumeData.name, + driver: volumeData.driver || "local", + mountpoint: volumeData.mountpoint || null, + renderer: volumeData.renderer || null, + scope: volumeData.scope || "local", + labels: volumeData.labels || null, + options: volumeData.options || null, + size_bytes: volumeData.size_bytes + ? BigInt(volumeData.size_bytes) + : null, + ref_count: volumeData.ref_count || 0, + created_at: parseDate(volumeData.created_at), + updated_at: now, + }, + }); + volumesProcessed++; + } + } + + // Process networks + if (networks && Array.isArray(networks)) { + console.log( + `[Docker Integration] Processing ${networks.length} networks`, + ); + for (const networkData of networks) { + await prisma.docker_networks.upsert({ + where: { + host_id_network_id: { + host_id: host.id, + network_id: networkData.network_id, + }, + }, + update: { + name: networkData.name, + driver: networkData.driver, + scope: networkData.scope || "local", + ipv6_enabled: networkData.ipv6_enabled || false, + internal: networkData.internal || false, + attachable: + networkData.attachable !== undefined + ? networkData.attachable + : true, + ingress: networkData.ingress || false, + config_only: networkData.config_only || false, + labels: networkData.labels || null, + ipam: networkData.ipam || null, + container_count: networkData.container_count || 0, + updated_at: now, + last_checked: now, + }, + create: { + id: uuidv4(), + host_id: host.id, + network_id: networkData.network_id, + name: networkData.name, + driver: networkData.driver, + scope: networkData.scope || "local", + ipv6_enabled: networkData.ipv6_enabled || false, + internal: networkData.internal || false, + attachable: + networkData.attachable !== undefined + ? networkData.attachable + : true, + ingress: networkData.ingress || false, + config_only: networkData.config_only || false, + labels: networkData.labels || null, + ipam: networkData.ipam || null, + container_count: networkData.container_count || 0, + created_at: networkData.created_at + ? parseDate(networkData.created_at) + : null, + updated_at: now, + }, + }); + networksProcessed++; + } + } + // Process updates if (updates && Array.isArray(updates)) { console.log(`[Docker Integration] Processing ${updates.length} updates`); @@ -219,13 +331,15 @@ router.post("/docker", async (req, res) => { } console.log( - `[Docker Integration] Successfully processed: ${containersProcessed} containers, ${imagesProcessed} images, ${updatesProcessed} updates`, + `[Docker Integration] Successfully processed: ${containersProcessed} containers, ${imagesProcessed} images, ${volumesProcessed} volumes, ${networksProcessed} networks, ${updatesProcessed} updates`, ); res.json({ message: "Docker data collected successfully", containers_received: containersProcessed, images_received: imagesProcessed, + volumes_received: volumesProcessed, + networks_received: networksProcessed, updates_found: updatesProcessed, }); } catch (error) { diff --git a/backend/src/routes/tfaRoutes.js b/backend/src/routes/tfaRoutes.js index e27dbd4..f69206a 100644 --- a/backend/src/routes/tfaRoutes.js +++ b/backend/src/routes/tfaRoutes.js @@ -261,8 +261,10 @@ router.post( body("username").notEmpty().withMessage("Username is required"), body("token") .isLength({ min: 6, max: 6 }) - .withMessage("Token must be 6 digits"), - body("token").isNumeric().withMessage("Token must contain only numbers"), + .withMessage("Token must be 6 characters"), + body("token") + .matches(/^[A-Z0-9]{6}$/) + .withMessage("Token must be 6 alphanumeric characters"), ], async (req, res) => { try { diff --git a/backend/src/services/automation/dockerImageUpdateCheck.js b/backend/src/services/automation/dockerImageUpdateCheck.js new file mode 100644 index 0000000..2706768 --- /dev/null +++ b/backend/src/services/automation/dockerImageUpdateCheck.js @@ -0,0 +1,343 @@ +const { prisma } = require("./shared/prisma"); +const https = require("node:https"); +const http = require("node:http"); +const { v4: uuidv4 } = require("uuid"); + +/** + * Docker Image Update Check Automation + * Checks for Docker image updates by comparing local digests with remote registry digests + */ +class DockerImageUpdateCheck { + constructor(queueManager) { + this.queueManager = queueManager; + this.queueName = "docker-image-update-check"; + } + + /** + * Get remote digest from Docker registry using HEAD request + * Supports Docker Hub, GHCR, and other OCI-compliant registries + */ + async getRemoteDigest(imageName, tag = "latest") { + return new Promise((resolve, reject) => { + // Parse image name to determine registry + const registryInfo = this.parseImageName(imageName); + + // Construct manifest URL + const manifestPath = `/v2/${registryInfo.repository}/manifests/${tag}`; + const options = { + hostname: registryInfo.registry, + path: manifestPath, + method: "HEAD", + headers: { + Accept: + "application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.oci.image.index.v1+json", + "User-Agent": "PatchMon/1.0", + }, + }; + + // Add authentication token for Docker Hub if needed + if ( + registryInfo.registry === "registry-1.docker.io" && + registryInfo.isPublic + ) { + // For anonymous public images, we may need to get an auth token first + // For now, try without auth (works for public images) + } + + // Choose HTTP or HTTPS + const client = registryInfo.isSecure ? https : http; + + const req = client.request(options, (res) => { + if (res.statusCode === 401 || res.statusCode === 403) { + // Authentication required - skip for now (would need to implement auth) + return reject( + new Error(`Authentication required for ${imageName}:${tag}`), + ); + } + + if (res.statusCode !== 200) { + return reject( + new Error( + `Registry returned status ${res.statusCode} for ${imageName}:${tag}`, + ), + ); + } + + // Get digest from Docker-Content-Digest header + const digest = res.headers["docker-content-digest"]; + if (!digest) { + return reject( + new Error( + `No Docker-Content-Digest header for ${imageName}:${tag}`, + ), + ); + } + + // Clean up digest (remove sha256: prefix if present) + const cleanDigest = digest.startsWith("sha256:") + ? digest.substring(7) + : digest; + resolve(cleanDigest); + }); + + req.on("error", (error) => { + reject(error); + }); + + req.setTimeout(10000, () => { + req.destroy(); + reject(new Error(`Timeout getting digest for ${imageName}:${tag}`)); + }); + + req.end(); + }); + } + + /** + * Parse image name to extract registry, repository, and determine if secure + */ + parseImageName(imageName) { + let registry = "registry-1.docker.io"; + let repository = imageName; + const isSecure = true; + let isPublic = true; + + // Handle explicit registries (ghcr.io, quay.io, etc.) + if (imageName.includes("/")) { + const parts = imageName.split("/"); + const firstPart = parts[0]; + + // Check for known registries + if (firstPart.includes(".") || firstPart === "localhost") { + registry = firstPart; + repository = parts.slice(1).join("/"); + isPublic = false; // Assume private registries need auth for now + } else { + // Docker Hub - registry-1.docker.io + repository = imageName; + } + } + + // Docker Hub official images (no namespace) + if (!repository.includes("/")) { + repository = `library/${repository}`; + } + + return { + registry, + repository, + isSecure, + isPublic, + }; + } + + /** + * Process Docker image update check job + */ + async process(_job) { + const startTime = Date.now(); + console.log("🐳 Starting Docker image update check..."); + + try { + // Get all Docker images that have a digest and repository + const images = await prisma.docker_images.findMany({ + where: { + digest: { + not: null, + }, + repository: { + not: null, + }, + }, + include: { + docker_image_updates: true, + }, + }); + + console.log(`📦 Found ${images.length} images to check for updates`); + + let checkedCount = 0; + let updateCount = 0; + let errorCount = 0; + const errors = []; + + // Process images in batches to avoid overwhelming the API + const batchSize = 10; + for (let i = 0; i < images.length; i += batchSize) { + const batch = images.slice(i, i + batchSize); + + // Process batch concurrently with Promise.allSettled for error tolerance + const _results = await Promise.allSettled( + batch.map(async (image) => { + try { + checkedCount++; + + // Skip local images (no digest means they're local) + if (!image.digest || image.digest.trim() === "") { + return { image, skipped: true, reason: "No digest" }; + } + + // Get clean digest (remove sha256: prefix if present) + const localDigest = image.digest.startsWith("sha256:") + ? image.digest.substring(7) + : image.digest; + + // Get remote digest from registry + const remoteDigest = await this.getRemoteDigest( + image.repository, + image.tag || "latest", + ); + + // Compare digests + if (localDigest !== remoteDigest) { + console.log( + `🔄 Update found: ${image.repository}:${image.tag} (local: ${localDigest.substring(0, 12)}..., remote: ${remoteDigest.substring(0, 12)}...)`, + ); + + // Store digest info in changelog_url field as JSON + const digestInfo = JSON.stringify({ + method: "digest_comparison", + current_digest: localDigest, + available_digest: remoteDigest, + checked_at: new Date().toISOString(), + }); + + // Upsert the update record + await prisma.docker_image_updates.upsert({ + where: { + image_id_available_tag: { + image_id: image.id, + available_tag: image.tag || "latest", + }, + }, + update: { + updated_at: new Date(), + changelog_url: digestInfo, + severity: "digest_changed", + }, + create: { + id: uuidv4(), + image_id: image.id, + current_tag: image.tag || "latest", + available_tag: image.tag || "latest", + severity: "digest_changed", + changelog_url: digestInfo, + updated_at: new Date(), + }, + }); + + // Update last_checked timestamp on image + await prisma.docker_images.update({ + where: { id: image.id }, + data: { last_checked: new Date() }, + }); + + updateCount++; + return { image, updated: true }; + } else { + // No update - still update last_checked + await prisma.docker_images.update({ + where: { id: image.id }, + data: { last_checked: new Date() }, + }); + + // Remove existing update record if digest matches now + const existingUpdate = image.docker_image_updates?.find( + (u) => u.available_tag === (image.tag || "latest"), + ); + if (existingUpdate) { + await prisma.docker_image_updates.delete({ + where: { id: existingUpdate.id }, + }); + } + + return { image, updated: false }; + } + } catch (error) { + errorCount++; + const errorMsg = `Error checking ${image.repository}:${image.tag}: ${error.message}`; + errors.push(errorMsg); + console.error(`❌ ${errorMsg}`); + + // Still update last_checked even on error + try { + await prisma.docker_images.update({ + where: { id: image.id }, + data: { last_checked: new Date() }, + }); + } catch (_updateError) { + // Ignore update errors + } + + return { image, error: error.message }; + } + }), + ); + + // Log batch progress + if (i + batchSize < images.length) { + console.log( + `⏳ Processed ${Math.min(i + batchSize, images.length)}/${images.length} images...`, + ); + } + + // Small delay between batches to be respectful to registries + if (i + batchSize < images.length) { + await new Promise((resolve) => setTimeout(resolve, 500)); + } + } + + const executionTime = Date.now() - startTime; + console.log( + `✅ Docker image update check completed in ${executionTime}ms - Checked: ${checkedCount}, Updates: ${updateCount}, Errors: ${errorCount}`, + ); + + return { + success: true, + checked: checkedCount, + updates: updateCount, + errors: errorCount, + executionTime, + errorDetails: errors, + }; + } catch (error) { + const executionTime = Date.now() - startTime; + console.error( + `❌ Docker image update check failed after ${executionTime}ms:`, + error.message, + ); + throw error; + } + } + + /** + * Schedule recurring Docker image update check (daily at 2 AM) + */ + async schedule() { + const job = await this.queueManager.queues[this.queueName].add( + "docker-image-update-check", + {}, + { + repeat: { cron: "0 2 * * *" }, // Daily at 2 AM + jobId: "docker-image-update-check-recurring", + }, + ); + console.log("✅ Docker image update check scheduled"); + return job; + } + + /** + * Trigger manual Docker image update check + */ + async triggerManual() { + const job = await this.queueManager.queues[this.queueName].add( + "docker-image-update-check-manual", + {}, + { priority: 1 }, + ); + console.log("✅ Manual Docker image update check triggered"); + return job; + } +} + +module.exports = DockerImageUpdateCheck; diff --git a/backend/src/services/automation/index.js b/backend/src/services/automation/index.js index 53eacdc..1731e33 100644 --- a/backend/src/services/automation/index.js +++ b/backend/src/services/automation/index.js @@ -2,6 +2,7 @@ const { Queue, Worker } = require("bullmq"); const { redis, redisConnection } = require("./shared/redis"); const { prisma } = require("./shared/prisma"); const agentWs = require("../agentWs"); +const { v4: uuidv4 } = require("uuid"); // Import automation classes const GitHubUpdateCheck = require("./githubUpdateCheck"); @@ -9,6 +10,7 @@ const SessionCleanup = require("./sessionCleanup"); const OrphanedRepoCleanup = require("./orphanedRepoCleanup"); const OrphanedPackageCleanup = require("./orphanedPackageCleanup"); const DockerInventoryCleanup = require("./dockerInventoryCleanup"); +const DockerImageUpdateCheck = require("./dockerImageUpdateCheck"); const MetricsReporting = require("./metricsReporting"); // Queue names @@ -18,6 +20,7 @@ const QUEUE_NAMES = { ORPHANED_REPO_CLEANUP: "orphaned-repo-cleanup", ORPHANED_PACKAGE_CLEANUP: "orphaned-package-cleanup", DOCKER_INVENTORY_CLEANUP: "docker-inventory-cleanup", + DOCKER_IMAGE_UPDATE_CHECK: "docker-image-update-check", METRICS_REPORTING: "metrics-reporting", AGENT_COMMANDS: "agent-commands", }; @@ -97,6 +100,8 @@ class QueueManager { new OrphanedPackageCleanup(this); this.automations[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP] = new DockerInventoryCleanup(this); + this.automations[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK] = + new DockerImageUpdateCheck(this); this.automations[QUEUE_NAMES.METRICS_REPORTING] = new MetricsReporting( this, ); @@ -167,6 +172,15 @@ class QueueManager { workerOptions, ); + // Docker Image Update Check Worker + this.workers[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK] = new Worker( + QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK, + this.automations[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK].process.bind( + this.automations[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK], + ), + workerOptions, + ); + // Metrics Reporting Worker this.workers[QUEUE_NAMES.METRICS_REPORTING] = new Worker( QUEUE_NAMES.METRICS_REPORTING, @@ -183,28 +197,87 @@ class QueueManager { const { api_id, type } = job.data; console.log(`Processing agent command: ${type} for ${api_id}`); - // Send command via WebSocket based on type - if (type === "report_now") { - agentWs.pushReportNow(api_id); - } else if (type === "settings_update") { - // For settings update, we need additional data - const { update_interval } = job.data; - agentWs.pushSettingsUpdate(api_id, update_interval); - } else if (type === "update_agent") { - // Force agent to update by sending WebSocket command - const ws = agentWs.getConnectionByApiId(api_id); - if (ws && ws.readyState === 1) { - // WebSocket.OPEN - agentWs.pushUpdateAgent(api_id); - console.log(`✅ Update command sent to agent ${api_id}`); - } else { - console.error(`❌ Agent ${api_id} is not connected`); - throw new Error( - `Agent ${api_id} is not connected. Cannot send update command.`, - ); + // Log job to job_history + let historyRecord = null; + try { + const host = await prisma.hosts.findUnique({ + where: { api_id }, + select: { id: true }, + }); + + if (host) { + historyRecord = await prisma.job_history.create({ + data: { + id: uuidv4(), + job_id: job.id, + queue_name: QUEUE_NAMES.AGENT_COMMANDS, + job_name: type, + host_id: host.id, + api_id: api_id, + status: "active", + attempt_number: job.attemptsMade + 1, + created_at: new Date(), + updated_at: new Date(), + }, + }); + console.log(`📝 Logged job to job_history: ${job.id} (${type})`); } - } else { - console.error(`Unknown agent command type: ${type}`); + } catch (error) { + console.error("Failed to log job to job_history:", error); + } + + try { + // Send command via WebSocket based on type + if (type === "report_now") { + agentWs.pushReportNow(api_id); + } else if (type === "settings_update") { + // For settings update, we need additional data + const { update_interval } = job.data; + agentWs.pushSettingsUpdate(api_id, update_interval); + } else if (type === "update_agent") { + // Force agent to update by sending WebSocket command + const ws = agentWs.getConnectionByApiId(api_id); + if (ws && ws.readyState === 1) { + // WebSocket.OPEN + agentWs.pushUpdateAgent(api_id); + console.log(`✅ Update command sent to agent ${api_id}`); + } else { + console.error(`❌ Agent ${api_id} is not connected`); + throw new Error( + `Agent ${api_id} is not connected. Cannot send update command.`, + ); + } + } else { + console.error(`Unknown agent command type: ${type}`); + } + + // Update job history to completed + if (historyRecord) { + await prisma.job_history.updateMany({ + where: { job_id: job.id }, + data: { + status: "completed", + completed_at: new Date(), + updated_at: new Date(), + }, + }); + console.log(`✅ Marked job as completed in job_history: ${job.id}`); + } + } catch (error) { + // Update job history to failed + if (historyRecord) { + await prisma.job_history.updateMany({ + where: { job_id: job.id }, + data: { + status: "failed", + error_message: error.message, + completed_at: new Date(), + updated_at: new Date(), + }, + }); + console.log(`❌ Marked job as failed in job_history: ${job.id}`); + } + throw error; } }, workerOptions, @@ -234,6 +307,7 @@ class QueueManager { console.log(`✅ Job '${job.id}' in queue '${queueName}' completed.`); }); } + console.log("✅ Queue events initialized"); } @@ -246,6 +320,7 @@ class QueueManager { await this.automations[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].schedule(); await this.automations[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].schedule(); await this.automations[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].schedule(); + await this.automations[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK].schedule(); await this.automations[QUEUE_NAMES.METRICS_REPORTING].schedule(); } @@ -276,6 +351,12 @@ class QueueManager { ].triggerManual(); } + async triggerDockerImageUpdateCheck() { + return this.automations[ + QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK + ].triggerManual(); + } + async triggerMetricsReporting() { return this.automations[QUEUE_NAMES.METRICS_REPORTING].triggerManual(); } diff --git a/backend/src/utils/docker.js b/backend/src/utils/docker.js new file mode 100644 index 0000000..e23bf82 --- /dev/null +++ b/backend/src/utils/docker.js @@ -0,0 +1,179 @@ +/** + * Docker-related utility functions + */ + +/** + * Generate a registry link for a Docker image based on its repository and source + * Inspired by diun's registry link generation + * @param {string} repository - The full repository name (e.g., "ghcr.io/owner/repo") + * @param {string} source - The detected source (github, gitlab, docker-hub, etc.) + * @returns {string|null} - The URL to the registry page, or null if unknown + */ +function generateRegistryLink(repository, source) { + if (!repository) { + return null; + } + + // Parse the domain and path from the repository + const parts = repository.split("/"); + let domain = ""; + let path = ""; + + // Check if repository has a domain (contains a dot) + if (parts[0].includes(".") || parts[0].includes(":")) { + domain = parts[0]; + path = parts.slice(1).join("/"); + } else { + // No domain means Docker Hub + domain = "docker.io"; + path = repository; + } + + switch (source) { + case "docker-hub": + case "docker.io": { + // Docker Hub: https://hub.docker.com/r/{path} or https://hub.docker.com/_/{path} for official images + // Official images are those without a namespace (e.g., "postgres" not "user/postgres") + // or explicitly prefixed with "library/" + if (path.startsWith("library/")) { + const cleanPath = path.replace("library/", ""); + return `https://hub.docker.com/_/${cleanPath}`; + } + // Check if it's an official image (single part, no slash after removing library/) + if (!path.includes("/")) { + return `https://hub.docker.com/_/${path}`; + } + // Regular user/org image + return `https://hub.docker.com/r/${path}`; + } + + case "github": + case "ghcr.io": { + // GitHub Container Registry + // Format: ghcr.io/{owner}/{package} or ghcr.io/{owner}/{repo}/{package} + // URL format: https://github.com/{owner}/{repo}/pkgs/container/{package} + if (domain === "ghcr.io" && path) { + const pathParts = path.split("/"); + if (pathParts.length === 2) { + // Simple case: ghcr.io/owner/package -> github.com/owner/owner/pkgs/container/package + // OR: ghcr.io/owner/repo -> github.com/owner/repo/pkgs/container/{package} + // Actually, for 2 parts it's owner/package, and repo is same as owner typically + const owner = pathParts[0]; + const packageName = pathParts[1]; + return `https://github.com/${owner}/${owner}/pkgs/container/${packageName}`; + } else if (pathParts.length >= 3) { + // Extended case: ghcr.io/owner/repo/package -> github.com/owner/repo/pkgs/container/package + const owner = pathParts[0]; + const repo = pathParts[1]; + const packageName = pathParts.slice(2).join("/"); + return `https://github.com/${owner}/${repo}/pkgs/container/${packageName}`; + } + } + // Legacy GitHub Packages + if (domain === "docker.pkg.github.com" && path) { + const pathParts = path.split("/"); + if (pathParts.length >= 1) { + return `https://github.com/${pathParts[0]}/packages`; + } + } + return null; + } + + case "gitlab": + case "registry.gitlab.com": { + // GitLab Container Registry: https://gitlab.com/{path}/container_registry + if (path) { + return `https://gitlab.com/${path}/container_registry`; + } + return null; + } + + case "google": + case "gcr.io": { + // Google Container Registry: https://gcr.io/{path} + if (domain.includes("gcr.io") || domain.includes("pkg.dev")) { + return `https://console.cloud.google.com/gcr/images/${path}`; + } + return null; + } + + case "quay": + case "quay.io": { + // Quay.io: https://quay.io/repository/{path} + if (path) { + return `https://quay.io/repository/${path}`; + } + return null; + } + + case "redhat": + case "registry.access.redhat.com": { + // Red Hat: https://access.redhat.com/containers/#/registry.access.redhat.com/{path} + if (path) { + return `https://access.redhat.com/containers/#/registry.access.redhat.com/${path}`; + } + return null; + } + + case "azure": + case "azurecr.io": { + // Azure Container Registry - link to portal + // Format: {registry}.azurecr.io/{repository} + if (domain.includes("azurecr.io")) { + const registryName = domain.split(".")[0]; + return `https://portal.azure.com/#view/Microsoft_Azure_ContainerRegistries/RepositoryBlade/registryName/${registryName}/repositoryName/${path}`; + } + return null; + } + + case "aws": + case "amazonaws.com": { + // AWS ECR - link to console + // Format: {account}.dkr.ecr.{region}.amazonaws.com/{repository} + if (domain.includes("amazonaws.com")) { + const domainParts = domain.split("."); + const region = domainParts[3]; // Extract region + return `https://${region}.console.aws.amazon.com/ecr/repositories/private/${path}`; + } + return null; + } + + case "private": + // For private registries, try to construct a basic URL + if (domain) { + return `https://${domain}`; + } + return null; + + default: + return null; + } +} + +/** + * Get a user-friendly display name for a registry source + * @param {string} source - The source identifier + * @returns {string} - Human-readable source name + */ +function getSourceDisplayName(source) { + const sourceNames = { + "docker-hub": "Docker Hub", + github: "GitHub", + gitlab: "GitLab", + google: "Google", + quay: "Quay.io", + redhat: "Red Hat", + azure: "Azure", + aws: "AWS ECR", + private: "Private Registry", + local: "Local", + unknown: "Unknown", + }; + + return sourceNames[source] || source; +} + +module.exports = { + generateRegistryLink, + getSourceDisplayName, +}; diff --git a/backend/src/utils/session_manager.js b/backend/src/utils/session_manager.js index 3550442..09bf2ac 100644 --- a/backend/src/utils/session_manager.js +++ b/backend/src/utils/session_manager.js @@ -84,21 +84,20 @@ function parse_expiration(expiration_string) { * Generate device fingerprint from request data */ function generate_device_fingerprint(req) { - const components = [ - req.get("user-agent") || "", - req.get("accept-language") || "", - req.get("accept-encoding") || "", - req.ip || "", - ]; + // Use the X-Device-ID header from frontend (unique per browser profile/localStorage) + const deviceId = req.get("x-device-id"); - // Create a simple hash of device characteristics - const fingerprint = crypto - .createHash("sha256") - .update(components.join("|")) - .digest("hex") - .substring(0, 32); // Use first 32 chars for storage efficiency + if (deviceId) { + // Hash the device ID for consistent storage format + return crypto + .createHash("sha256") + .update(deviceId) + .digest("hex") + .substring(0, 32); + } - return fingerprint; + // No device ID - return null (user needs to provide device ID for remember-me) + return null; } /** diff --git a/frontend/package.json b/frontend/package.json index 1feada8..5de649f 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -1,7 +1,7 @@ { "name": "patchmon-frontend", "private": true, - "version": "1.3.1", + "version": "1.3.2", "license": "AGPL-3.0", "type": "module", "scripts": { diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index bcc31af..d254f29 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -28,6 +28,8 @@ const DockerContainerDetail = lazy( ); const DockerImageDetail = lazy(() => import("./pages/docker/ImageDetail")); const DockerHostDetail = lazy(() => import("./pages/docker/HostDetail")); +const DockerVolumeDetail = lazy(() => import("./pages/docker/VolumeDetail")); +const DockerNetworkDetail = lazy(() => import("./pages/docker/NetworkDetail")); const AlertChannels = lazy(() => import("./pages/settings/AlertChannels")); const Integrations = lazy(() => import("./pages/settings/Integrations")); const Notifications = lazy(() => import("./pages/settings/Notifications")); @@ -194,6 +196,26 @@ function AppRoutes() { } /> + + + + + + } + /> + + + + + + } + /> { const login = async (username, password) => { try { + // Get or generate device ID for TFA remember-me + let deviceId = localStorage.getItem("device_id"); + if (!deviceId) { + if (typeof crypto !== "undefined" && crypto.randomUUID) { + deviceId = crypto.randomUUID(); + } else { + deviceId = "xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace( + /[xy]/g, + (c) => { + const r = (Math.random() * 16) | 0; + const v = c === "x" ? r : (r & 0x3) | 0x8; + return v.toString(16); + }, + ); + } + localStorage.setItem("device_id", deviceId); + } + const response = await fetch("/api/v1/auth/login", { method: "POST", headers: { "Content-Type": "application/json", + "X-Device-ID": deviceId, }, body: JSON.stringify({ username, password }), }); diff --git a/frontend/src/pages/Docker.jsx b/frontend/src/pages/Docker.jsx index 8c40915..4f02673 100644 --- a/frontend/src/pages/Docker.jsx +++ b/frontend/src/pages/Docker.jsx @@ -6,17 +6,19 @@ import { ArrowUpDown, Container, ExternalLink, + HardDrive, + Network, Package, RefreshCw, Search, Server, - Shield, Trash2, X, } from "lucide-react"; import { useMemo, useState } from "react"; import { Link } from "react-router-dom"; import api from "../utils/api"; +import { generateRegistryLink, getSourceDisplayName } from "../utils/docker"; const Docker = () => { const queryClient = useQueryClient(); @@ -26,8 +28,12 @@ const Docker = () => { const [sortDirection, setSortDirection] = useState("asc"); const [statusFilter, setStatusFilter] = useState("all"); const [sourceFilter, setSourceFilter] = useState("all"); + const [updatesFilter, setUpdatesFilter] = useState("all"); + const [driverFilter, setDriverFilter] = useState("all"); const [deleteContainerModal, setDeleteContainerModal] = useState(null); const [deleteImageModal, setDeleteImageModal] = useState(null); + const [deleteVolumeModal, setDeleteVolumeModal] = useState(null); + const [deleteNetworkModal, setDeleteNetworkModal] = useState(null); // Fetch Docker dashboard data const { data: dashboard, isLoading: dashboardLoading } = useQuery({ @@ -83,14 +89,38 @@ const Docker = () => { enabled: activeTab === "hosts", }); - // Fetch updates - const { data: updatesData, isLoading: updatesLoading } = useQuery({ - queryKey: ["docker", "updates"], + // Fetch volumes + const { + data: volumesData, + isLoading: volumesLoading, + refetch: refetchVolumes, + } = useQuery({ + queryKey: ["docker", "volumes", driverFilter], queryFn: async () => { - const response = await api.get("/docker/updates?limit=1000"); + const params = new URLSearchParams(); + if (driverFilter !== "all") params.set("driver", driverFilter); + params.set("limit", "1000"); + const response = await api.get(`/docker/volumes?${params}`); return response.data; }, - enabled: activeTab === "updates", + enabled: activeTab === "volumes", + }); + + // Fetch networks + const { + data: networksData, + isLoading: networksLoading, + refetch: refetchNetworks, + } = useQuery({ + queryKey: ["docker", "networks", driverFilter], + queryFn: async () => { + const params = new URLSearchParams(); + if (driverFilter !== "all") params.set("driver", driverFilter); + params.set("limit", "1000"); + const response = await api.get(`/docker/networks?${params}`); + return response.data; + }, + enabled: activeTab === "networks", }); // Delete container mutation @@ -129,6 +159,42 @@ const Docker = () => { }, }); + // Delete volume mutation + const deleteVolumeMutation = useMutation({ + mutationFn: async (volumeId) => { + const response = await api.delete(`/docker/volumes/${volumeId}`); + return response.data; + }, + onSuccess: () => { + queryClient.invalidateQueries(["docker", "volumes"]); + queryClient.invalidateQueries(["docker", "dashboard"]); + setDeleteVolumeModal(null); + }, + onError: (error) => { + alert( + `Failed to delete volume: ${error.response?.data?.error || error.message}`, + ); + }, + }); + + // Delete network mutation + const deleteNetworkMutation = useMutation({ + mutationFn: async (networkId) => { + const response = await api.delete(`/docker/networks/${networkId}`); + return response.data; + }, + onSuccess: () => { + queryClient.invalidateQueries(["docker", "networks"]); + queryClient.invalidateQueries(["docker", "dashboard"]); + setDeleteNetworkModal(null); + }, + onError: (error) => { + alert( + `Failed to delete network: ${error.response?.data?.error || error.message}`, + ); + }, + }); + // Filter and sort containers const filteredContainers = useMemo(() => { if (!containersData?.containers) return []; @@ -207,6 +273,15 @@ const Docker = () => { ); } + // Filter by updates status + if (updatesFilter !== "all") { + if (updatesFilter === "available") { + filtered = filtered.filter((img) => img.hasUpdates === true); + } else if (updatesFilter === "none") { + filtered = filtered.filter((img) => !img.hasUpdates); + } + } + filtered.sort((a, b) => { let aValue, bValue; if (sortField === "repository") { @@ -226,7 +301,7 @@ const Docker = () => { }); return filtered; - }, [imagesData, searchTerm, sortField, sortDirection]); + }, [imagesData, searchTerm, sortField, sortDirection, updatesFilter]); // Filter and sort hosts const filteredHosts = useMemo(() => { @@ -263,6 +338,103 @@ const Docker = () => { return filtered; }, [hostsData, searchTerm, sortField, sortDirection]); + // Filter and sort volumes + const filteredVolumes = useMemo(() => { + if (!volumesData?.volumes) return []; + let filtered = volumesData.volumes; + + if (searchTerm) { + const term = searchTerm.toLowerCase(); + filtered = filtered.filter( + (v) => + v.name.toLowerCase().includes(term) || + v.hosts?.friendly_name?.toLowerCase().includes(term), + ); + } + + filtered.sort((a, b) => { + let aValue, bValue; + if (sortField === "name") { + aValue = a.name?.toLowerCase() || ""; + bValue = b.name?.toLowerCase() || ""; + } else if (sortField === "driver") { + aValue = a.driver?.toLowerCase() || ""; + bValue = b.driver?.toLowerCase() || ""; + } else if (sortField === "size") { + aValue = a.size_bytes ? BigInt(a.size_bytes) : BigInt(0); + bValue = b.size_bytes ? BigInt(b.size_bytes) : BigInt(0); + } else if (sortField === "ref_count") { + aValue = a.ref_count || 0; + bValue = b.ref_count || 0; + } else if (sortField === "host") { + aValue = a.hosts?.friendly_name?.toLowerCase() || ""; + bValue = b.hosts?.friendly_name?.toLowerCase() || ""; + } + + if (sortField === "size") { + // BigInt comparison + if (aValue < bValue) return sortDirection === "asc" ? -1 : 1; + if (aValue > bValue) return sortDirection === "asc" ? 1 : -1; + } else if (sortField === "ref_count") { + // Number comparison + if (aValue < bValue) return sortDirection === "asc" ? -1 : 1; + if (aValue > bValue) return sortDirection === "asc" ? 1 : -1; + } else { + // String comparison + if (aValue < bValue) return sortDirection === "asc" ? -1 : 1; + if (aValue > bValue) return sortDirection === "asc" ? 1 : -1; + } + return 0; + }); + + return filtered; + }, [volumesData, searchTerm, sortField, sortDirection]); + + // Filter and sort networks + const filteredNetworks = useMemo(() => { + if (!networksData?.networks) return []; + let filtered = networksData.networks; + + if (searchTerm) { + const term = searchTerm.toLowerCase(); + filtered = filtered.filter( + (n) => + n.name.toLowerCase().includes(term) || + n.hosts?.friendly_name?.toLowerCase().includes(term), + ); + } + + filtered.sort((a, b) => { + let aValue, bValue; + if (sortField === "name") { + aValue = a.name?.toLowerCase() || ""; + bValue = b.name?.toLowerCase() || ""; + } else if (sortField === "driver") { + aValue = a.driver?.toLowerCase() || ""; + bValue = b.driver?.toLowerCase() || ""; + } else if (sortField === "containers") { + aValue = a.container_count || 0; + bValue = b.container_count || 0; + } else if (sortField === "host") { + aValue = a.hosts?.friendly_name?.toLowerCase() || ""; + bValue = b.hosts?.friendly_name?.toLowerCase() || ""; + } + + if (sortField === "containers") { + // Number comparison + if (aValue < bValue) return sortDirection === "asc" ? -1 : 1; + if (aValue > bValue) return sortDirection === "asc" ? 1 : -1; + } else { + // String comparison + if (aValue < bValue) return sortDirection === "asc" ? -1 : 1; + if (aValue > bValue) return sortDirection === "asc" ? 1 : -1; + } + return 0; + }); + + return filtered; + }, [networksData, searchTerm, sortField, sortDirection]); + const handleSort = (field) => { if (sortField === field) { setSortDirection(sortDirection === "asc" ? "desc" : "asc"); @@ -303,35 +475,64 @@ const Docker = () => { ); }; - const getSourceBadge = (source) => { - const badges = { - "docker-hub": ( - - Docker Hub - - ), - github: ( - - GitHub - - ), - gitlab: ( - - GitLab - - ), - private: ( - - Private - - ), + const getSourceBadge = (source, repository) => { + // Generate registry link if possible + const registryLink = repository + ? generateRegistryLink(repository, source) + : null; + + // Get display name + const displayName = getSourceDisplayName(source); + + // Color schemes for different sources + const colorSchemes = { + "docker-hub": + "bg-blue-100 text-blue-800 dark:bg-blue-900 dark:text-blue-200 hover:bg-blue-200 dark:hover:bg-blue-800", + github: + "bg-secondary-100 text-secondary-900 dark:bg-secondary-700 dark:text-white hover:bg-secondary-200 dark:hover:bg-secondary-600", + gitlab: + "bg-orange-100 text-orange-800 dark:bg-orange-900 dark:text-orange-200 hover:bg-orange-200 dark:hover:bg-orange-800", + google: + "bg-red-100 text-red-800 dark:bg-red-900 dark:text-red-200 hover:bg-red-200 dark:hover:bg-red-800", + quay: "bg-teal-100 text-teal-800 dark:bg-teal-900 dark:text-teal-200 hover:bg-teal-200 dark:hover:bg-teal-800", + redhat: + "bg-red-100 text-red-800 dark:bg-red-900 dark:text-red-200 hover:bg-red-200 dark:hover:bg-red-800", + azure: + "bg-blue-100 text-blue-800 dark:bg-blue-900 dark:text-blue-200 hover:bg-blue-200 dark:hover:bg-blue-800", + aws: "bg-yellow-100 text-yellow-800 dark:bg-yellow-900 dark:text-yellow-200 hover:bg-yellow-200 dark:hover:bg-yellow-800", + private: + "bg-purple-100 text-purple-800 dark:bg-purple-900 dark:text-purple-200 hover:bg-purple-200 dark:hover:bg-purple-800", + local: + "bg-secondary-100 text-secondary-800 dark:bg-secondary-700 dark:text-secondary-200 hover:bg-secondary-200 dark:hover:bg-secondary-600", + unknown: + "bg-secondary-100 text-secondary-800 dark:bg-secondary-700 dark:text-secondary-200 hover:bg-secondary-200 dark:hover:bg-secondary-600", }; + + const colorScheme = colorSchemes[source] || colorSchemes.unknown; + + if (registryLink) { + // Return as clickable link + return ( + + {displayName} + + + ); + } + + // Return as non-clickable badge return ( - badges[source] || ( - - {source} - - ) + + {displayName} + ); }; @@ -354,6 +555,8 @@ const Docker = () => { // Trigger refresh based on active tab if (activeTab === "containers") refetchContainers(); else if (activeTab === "images") refetchImages(); + else if (activeTab === "volumes") refetchVolumes(); + else if (activeTab === "networks") refetchNetworks(); else window.location.reload(); }} className="btn-outline flex items-center justify-center p-2" @@ -431,7 +634,19 @@ const Docker = () => { -
+
+ {/* Docker List */} @@ -460,8 +675,9 @@ const Docker = () => { {[ { id: "containers", label: "Containers", icon: Container }, { id: "images", label: "Images", icon: Package }, + { id: "volumes", label: "Volumes", icon: HardDrive }, + { id: "networks", label: "Networks", icon: Network }, { id: "hosts", label: "Hosts", icon: Server }, - { id: "updates", label: "Updates", icon: AlertTriangle }, ].map((tab) => { const Icon = tab.icon; return ( @@ -471,12 +687,17 @@ const Docker = () => { onClick={() => { setActiveTab(tab.id); setSearchTerm(""); + setUpdatesFilter("all"); // Reset updates filter when switching tabs setSortField( tab.id === "containers" ? "status" : tab.id === "images" ? "repository" - : "name", + : tab.id === "volumes" + ? "name" + : tab.id === "networks" + ? "name" + : "name", ); setSortDirection("asc"); }} @@ -534,16 +755,47 @@ const Docker = () => { )} {activeTab === "images" && ( + <> + + + + )} + {(activeTab === "volumes" || activeTab === "networks") && ( )} @@ -774,7 +1026,7 @@ const Docker = () => { - {getSourceBadge(image.source)} + {getSourceBadge(image.source, image.repository)} {image._count?.docker_containers || 0} @@ -925,24 +1177,26 @@ const Docker = () => { )} - {/* Updates Tab */} - {activeTab === "updates" && ( + {/* Volumes Tab */} + {activeTab === "volumes" && (
- {updatesLoading ? ( + {volumesLoading ? (

- Loading updates... + Loading volumes...

- ) : !updatesData?.updates || updatesData.updates.length === 0 ? ( + ) : filteredVolumes.length === 0 ? (
- +

- All images up to date! + No volumes found

- No updates available for your Docker images + {searchTerm + ? "Try adjusting your search filters" + : "No Docker volumes detected"}

) : ( @@ -950,19 +1204,54 @@ const Docker = () => { - Image - - - Tag - - - Detection Method - - - Status + - Affected + + + + + + + + + + Actions @@ -970,59 +1259,260 @@ const Docker = () => { - {updatesData.updates.map((update) => ( + {filteredVolumes.map((volume) => (
- + - {update.docker_images?.repository} + {volume.name}
+ + + {volume.driver} + + + + {volume.size_bytes + ? `${(Number(volume.size_bytes) / 1024 / 1024 / 1024).toFixed(2)} GB` + : "-"} + + + {volume.ref_count > 0 ? ( + + {volume.ref_count} container + {volume.ref_count !== 1 ? "s" : ""} + + ) : ( + + Unused + + )} + + + + {volume.hosts?.friendly_name || + volume.hosts?.hostname || + "Unknown"} + + + +
+ + + + +
+ + + ))} + + + )} +
+ )} + + {/* Networks Tab */} + {activeTab === "networks" && ( +
+ {networksLoading ? ( +
+ +

+ Loading networks... +

+
+ ) : filteredNetworks.length === 0 ? ( +
+ +

+ No networks found +

+

+ {searchTerm + ? "Try adjusting your search filters" + : "No Docker networks detected"} +

+
+ ) : ( + + + + + + + + + + + + + + {filteredNetworks.map((network) => ( + + + - - - + + ))} @@ -1033,6 +1523,8 @@ const Docker = () => { + {/* Delete Container Modal */} + {/* Delete Container Modal */} {deleteContainerModal && (
@@ -1167,6 +1659,149 @@ const Docker = () => {
)} + + {/* Delete Volume Modal */} + {deleteVolumeModal && ( +
+
+
+
+ +
+
+

+ Delete Volume +

+
+

+ Are you sure you want to delete this volume from the + inventory? +

+
+

+ {deleteVolumeModal.name} +

+

+ Driver: {deleteVolumeModal.driver} +

+

+ Host:{" "} + {deleteVolumeModal.hosts?.friendly_name || + deleteVolumeModal.hosts?.hostname || + "Unknown"} +

+ {deleteVolumeModal.ref_count > 0 && ( +

+ In use by: {deleteVolumeModal.ref_count} container + {deleteVolumeModal.ref_count !== 1 ? "s" : ""} +

+ )} +
+

+ ⚠️ This only removes the volume from PatchMon's inventory. It + does NOT delete the actual Docker volume from the host. +

+
+
+
+
+ + +
+
+
+ )} + + {/* Delete Network Modal */} + {deleteNetworkModal && ( +
+
+
+
+ +
+
+

+ Delete Network +

+
+

+ Are you sure you want to delete this network from the + inventory? +

+
+

+ {deleteNetworkModal.name} +

+

+ Driver: {deleteNetworkModal.driver} +

+

+ Scope: {deleteNetworkModal.scope} +

+

+ Host:{" "} + {deleteNetworkModal.hosts?.friendly_name || + deleteNetworkModal.hosts?.hostname || + "Unknown"} +

+ {deleteNetworkModal.container_count > 0 && ( +

+ Connected containers:{" "} + {deleteNetworkModal.container_count} +

+ )} +
+

+ ⚠️ This only removes the network from PatchMon's inventory. + It does NOT delete the actual Docker network from the host. +

+
+
+
+
+ + +
+
+
+ )} ); }; diff --git a/frontend/src/pages/HostDetail.jsx b/frontend/src/pages/HostDetail.jsx index 750da57..7b943b2 100644 --- a/frontend/src/pages/HostDetail.jsx +++ b/frontend/src/pages/HostDetail.jsx @@ -12,6 +12,7 @@ import { Copy, Cpu, Database, + Download, Eye, EyeOff, HardDrive, @@ -53,6 +54,8 @@ const HostDetail = () => { const [historyLimit] = useState(10); const [notes, setNotes] = useState(""); const [notesMessage, setNotesMessage] = useState({ text: "", type: "" }); + const [updateMessage, setUpdateMessage] = useState({ text: "", jobId: "" }); + const [reportMessage, setReportMessage] = useState({ text: "", jobId: "" }); const { data: host, @@ -191,9 +194,50 @@ const HostDetail = () => { const forceAgentUpdateMutation = useMutation({ mutationFn: () => adminHostsAPI.forceAgentUpdate(hostId).then((res) => res.data), - onSuccess: () => { + onSuccess: (data) => { queryClient.invalidateQueries(["host", hostId]); queryClient.invalidateQueries(["hosts"]); + // Show success message with job ID + if (data?.jobId) { + setUpdateMessage({ + text: "Update queued successfully", + jobId: data.jobId, + }); + // Clear message after 5 seconds + setTimeout(() => setUpdateMessage({ text: "", jobId: "" }), 5000); + } + }, + onError: (error) => { + setUpdateMessage({ + text: error.response?.data?.error || "Failed to queue update", + jobId: "", + }); + setTimeout(() => setUpdateMessage({ text: "", jobId: "" }), 5000); + }, + }); + + // Fetch report mutation + const fetchReportMutation = useMutation({ + mutationFn: () => adminHostsAPI.fetchReport(hostId).then((res) => res.data), + onSuccess: (data) => { + queryClient.invalidateQueries(["host", hostId]); + queryClient.invalidateQueries(["hosts"]); + // Show success message with job ID + if (data?.jobId) { + setReportMessage({ + text: "Report fetch queued successfully", + jobId: data.jobId, + }); + // Clear message after 5 seconds + setTimeout(() => setReportMessage({ text: "", jobId: "" }), 5000); + } + }, + onError: (error) => { + setReportMessage({ + text: error.response?.data?.error || "Failed to fetch report", + jobId: "", + }); + setTimeout(() => setReportMessage({ text: "", jobId: "" }), 5000); }, }); @@ -409,20 +453,53 @@ const HostDetail = () => {
+
+ + {reportMessage.text && ( +

+ {reportMessage.text} + {reportMessage.jobId && ( + + (Job #{reportMessage.jobId}) + + )} +

+ )} +
+ {updateMessage.text && ( +

+ {updateMessage.text} + {updateMessage.jobId && ( + + (Job #{updateMessage.jobId}) + + )} +

+ )}
diff --git a/frontend/src/pages/HostGroups.jsx b/frontend/src/pages/HostGroups.jsx index 6d4577e..b5e0d24 100644 --- a/frontend/src/pages/HostGroups.jsx +++ b/frontend/src/pages/HostGroups.jsx @@ -470,9 +470,18 @@ const EditHostGroupModal = ({ group, onClose, onSubmit, isLoading }) => { // Delete Confirmation Modal const DeleteHostGroupModal = ({ group, onClose, onConfirm, isLoading }) => { + // Fetch hosts for this group + const { data: hostsData } = useQuery({ + queryKey: ["hostGroupHosts", group?.id], + queryFn: () => hostGroupsAPI.getHosts(group.id).then((res) => res.data), + enabled: !!group && group._count?.hosts > 0, + }); + + const hosts = hostsData || []; + return (
-
+
@@ -494,12 +503,30 @@ const DeleteHostGroupModal = ({ group, onClose, onConfirm, isLoading }) => {

{group._count.hosts > 0 && (
-

+

Warning: This group contains{" "} {group._count.hosts} host {group._count.hosts !== 1 ? "s" : ""}. You must move or remove these hosts before deleting the group.

+ {hosts.length > 0 && ( +
+

+ Hosts in this group: +

+
+ {hosts.map((host) => ( +
+ + {host.friendly_name || host.hostname} +
+ ))} +
+
+ )}
)}
diff --git a/frontend/src/pages/Hosts.jsx b/frontend/src/pages/Hosts.jsx index dde4fd8..265a5a4 100644 --- a/frontend/src/pages/Hosts.jsx +++ b/frontend/src/pages/Hosts.jsx @@ -531,12 +531,11 @@ const Hosts = () => { "with new data:", data.host, ); - // Ensure hostGroupId is set correctly + // Host already has host_group_memberships from backend const updatedHost = { ...data.host, - hostGroupId: data.host.host_groups?.id || null, }; - console.log("Updated host with hostGroupId:", updatedHost); + console.log("Updated host in cache:", updatedHost); return updatedHost; } return host; @@ -654,11 +653,15 @@ const Hosts = () => { host.os_type?.toLowerCase().includes(searchTerm.toLowerCase()) || host.notes?.toLowerCase().includes(searchTerm.toLowerCase()); - // Group filter + // Group filter - handle multiple groups per host + const memberships = host.host_group_memberships || []; const matchesGroup = groupFilter === "all" || - (groupFilter === "ungrouped" && !host.host_groups) || - (groupFilter !== "ungrouped" && host.host_groups?.id === groupFilter); + (groupFilter === "ungrouped" && memberships.length === 0) || + (groupFilter !== "ungrouped" && + memberships.some( + (membership) => membership.host_groups?.id === groupFilter, + )); // Status filter const matchesStatus = @@ -711,10 +714,30 @@ const Hosts = () => { aValue = a.ip?.toLowerCase() || "zzz_no_ip"; bValue = b.ip?.toLowerCase() || "zzz_no_ip"; break; - case "group": - aValue = a.host_groups?.name || "zzz_ungrouped"; - bValue = b.host_groups?.name || "zzz_ungrouped"; + case "group": { + // Handle multiple groups per host - use first group alphabetically for sorting + const aGroups = a.host_group_memberships || []; + const bGroups = b.host_group_memberships || []; + if (aGroups.length === 0) { + aValue = "zzz_ungrouped"; + } else { + const aGroupNames = aGroups + .map((m) => m.host_groups?.name || "") + .filter((name) => name) + .sort(); + aValue = aGroupNames[0] || "zzz_ungrouped"; + } + if (bGroups.length === 0) { + bValue = "zzz_ungrouped"; + } else { + const bGroupNames = bGroups + .map((m) => m.host_groups?.name || "") + .filter((name) => name) + .sort(); + bValue = bGroupNames[0] || "zzz_ungrouped"; + } break; + } case "os": aValue = a.os_type?.toLowerCase() || "zzz_unknown"; bValue = b.os_type?.toLowerCase() || "zzz_unknown"; @@ -787,27 +810,46 @@ const Hosts = () => { const groups = {}; filteredAndSortedHosts.forEach((host) => { - let groupKey; - switch (groupBy) { - case "group": - groupKey = host.host_groups?.name || "Ungrouped"; - break; - case "status": - groupKey = - (host.effectiveStatus || host.status).charAt(0).toUpperCase() + - (host.effectiveStatus || host.status).slice(1); - break; - case "os": - groupKey = host.os_type || "Unknown"; - break; - default: - groupKey = "All Hosts"; - } + if (groupBy === "group") { + // Handle multiple groups per host + const memberships = host.host_group_memberships || []; + if (memberships.length === 0) { + // Host has no groups, add to "Ungrouped" + if (!groups.Ungrouped) { + groups.Ungrouped = []; + } + groups.Ungrouped.push(host); + } else { + // Host has one or more groups, add to each group + memberships.forEach((membership) => { + const groupName = membership.host_groups?.name || "Unknown"; + if (!groups[groupName]) { + groups[groupName] = []; + } + groups[groupName].push(host); + }); + } + } else { + // Other grouping types (status, os, etc.) + let groupKey; + switch (groupBy) { + case "status": + groupKey = + (host.effectiveStatus || host.status).charAt(0).toUpperCase() + + (host.effectiveStatus || host.status).slice(1); + break; + case "os": + groupKey = host.os_type || "Unknown"; + break; + default: + groupKey = "All Hosts"; + } - if (!groups[groupKey]) { - groups[groupKey] = []; + if (!groups[groupKey]) { + groups[groupKey] = []; + } + groups[groupKey].push(host); } - groups[groupKey].push(host); }); return groups; @@ -1394,14 +1436,6 @@ const Hosts = () => { Hide Stale -
diff --git a/frontend/src/pages/Login.jsx b/frontend/src/pages/Login.jsx index 4141c48..3938358 100644 --- a/frontend/src/pages/Login.jsx +++ b/frontend/src/pages/Login.jsx @@ -407,7 +407,12 @@ const Login = () => { setTfaData({ ...tfaData, [name]: - type === "checkbox" ? checked : value.replace(/\D/g, "").slice(0, 6), + type === "checkbox" + ? checked + : value + .toUpperCase() + .replace(/[^A-Z0-9]/g, "") + .slice(0, 6), }); // Clear error when user starts typing if (error) { @@ -872,7 +877,8 @@ const Login = () => { Two-Factor Authentication

- Enter the 6-digit code from your authenticator app + Enter the code from your authenticator app, or use a backup + code

@@ -891,11 +897,15 @@ const Login = () => { required value={tfaData.token} onChange={handleTfaInputChange} - className="appearance-none rounded-md relative block w-full px-3 py-2 border border-secondary-300 placeholder-secondary-500 text-secondary-900 focus:outline-none focus:ring-primary-500 focus:border-primary-500 focus:z-10 sm:text-sm text-center text-lg font-mono tracking-widest" - placeholder="000000" + className="appearance-none rounded-md relative block w-full px-3 py-2 border border-secondary-300 placeholder-secondary-500 text-secondary-900 focus:outline-none focus:ring-primary-500 focus:border-primary-500 focus:z-10 sm:text-sm text-center text-lg font-mono tracking-widest uppercase" + placeholder="Enter code" maxLength="6" + pattern="[A-Z0-9]{6}" />
+

+ Enter a 6-digit TOTP code or a 6-character backup code +

@@ -955,12 +965,6 @@ const Login = () => { Back to Login
- -
-

- Don't have access to your authenticator? Use a backup code. -

-
)} diff --git a/frontend/src/pages/Options.jsx b/frontend/src/pages/Options.jsx index e623b61..f1bfa36 100644 --- a/frontend/src/pages/Options.jsx +++ b/frontend/src/pages/Options.jsx @@ -557,9 +557,18 @@ const EditHostGroupModal = ({ group, onClose, onSubmit, isLoading }) => { // Delete Confirmation Modal const DeleteHostGroupModal = ({ group, onClose, onConfirm, isLoading }) => { + // Fetch hosts for this group + const { data: hostsData } = useQuery({ + queryKey: ["hostGroupHosts", group?.id], + queryFn: () => hostGroupsAPI.getHosts(group.id).then((res) => res.data), + enabled: !!group && group._count?.hosts > 0, + }); + + const hosts = hostsData || []; + return (
-
+
@@ -581,12 +590,30 @@ const DeleteHostGroupModal = ({ group, onClose, onConfirm, isLoading }) => {

{group._count.hosts > 0 && (
-

+

Warning: This group contains{" "} {group._count.hosts} host {group._count.hosts !== 1 ? "s" : ""}. You must move or remove these hosts before deleting the group.

+ {hosts.length > 0 && ( +
+

+ Hosts in this group: +

+
+ {hosts.map((host) => ( +
+ + {host.friendly_name || host.hostname} +
+ ))} +
+
+ )}
)}
diff --git a/frontend/src/pages/Packages.jsx b/frontend/src/pages/Packages.jsx index fdd87b4..eb75c6c 100644 --- a/frontend/src/pages/Packages.jsx +++ b/frontend/src/pages/Packages.jsx @@ -539,7 +539,7 @@ const Packages = () => {

- Total Packages + Packages

{totalPackagesCount} @@ -553,7 +553,7 @@ const Packages = () => {

- Total Installations + Installations

{totalInstallationsCount} @@ -562,47 +562,72 @@ const Packages = () => {

-
+
+ -
-
- -
-

- Hosts Pending Updates -

-

- {uniquePackageHostsCount} -

-
-
-
- -
+
+ + +
{/* Packages List */} diff --git a/frontend/src/pages/Profile.jsx b/frontend/src/pages/Profile.jsx index 568f548..2404af4 100644 --- a/frontend/src/pages/Profile.jsx +++ b/frontend/src/pages/Profile.jsx @@ -564,6 +564,7 @@ const Profile = () => { // TFA Tab Component const TfaTab = () => { const verificationTokenId = useId(); + const disablePasswordId = useId(); const [setupStep, setSetupStep] = useState("status"); // 'status', 'setup', 'verify', 'backup-codes' const [verificationToken, setVerificationToken] = useState(""); const [password, setPassword] = useState(""); diff --git a/frontend/src/pages/docker/NetworkDetail.jsx b/frontend/src/pages/docker/NetworkDetail.jsx new file mode 100644 index 0000000..3bfe535 --- /dev/null +++ b/frontend/src/pages/docker/NetworkDetail.jsx @@ -0,0 +1,483 @@ +import { useQuery } from "@tanstack/react-query"; +import { + AlertTriangle, + ArrowLeft, + CheckCircle, + Container, + Globe, + Network, + RefreshCw, + Server, + Tag, + XCircle, +} from "lucide-react"; +import { Link, useParams } from "react-router-dom"; +import api, { formatRelativeTime } from "../../utils/api"; + +const NetworkDetail = () => { + const { id } = useParams(); + + const { data, isLoading, error } = useQuery({ + queryKey: ["docker", "network", id], + queryFn: async () => { + const response = await api.get(`/docker/networks/${id}`); + return response.data; + }, + refetchInterval: 30000, + }); + + const network = data?.network; + const host = data?.host; + + if (isLoading) { + return ( +
+ +
+ ); + } + + if (error || !network) { + return ( +
+
+
+ +
+

+ Network not found +

+

+ The network you're looking for doesn't exist or has been + removed. +

+
+
+
+ + + Back to Docker + +
+ ); + } + + const BooleanBadge = ({ value, trueLabel = "Yes", falseLabel = "No" }) => { + return value ? ( + + + {trueLabel} + + ) : ( + + + {falseLabel} + + ); + }; + + return ( +
+ {/* Header */} +
+ + + Back to Docker + +
+ +
+

+ {network.name} +

+

+ Network ID: {network.network_id.substring(0, 12)} +

+
+
+
+ + {/* Overview Cards */} +
+
+
+
+ +
+
+

+ Driver +

+

+ {network.driver} +

+
+
+
+ +
+
+
+ +
+
+

+ Scope +

+

+ {network.scope} +

+
+
+
+ +
+
+
+ +
+
+

+ Containers +

+

+ {network.container_count || 0} +

+
+
+
+ +
+
+
+ +
+
+

+ Last Checked +

+

+ {formatRelativeTime(network.last_checked)} +

+
+
+
+
+ + {/* Network Information Card */} +
+
+

+ Network Information +

+
+
+
+
+
+ Network ID +
+
+ {network.network_id} +
+
+
+
+ Name +
+
+ {network.name} +
+
+
+
+ Driver +
+
+ + {network.driver} + +
+
+
+
+ Scope +
+
+ + {network.scope} + +
+
+
+
+ Containers Attached +
+
+ {network.container_count || 0} +
+
+
+
+ IPv6 Enabled +
+
+ +
+
+
+
+ Internal +
+
+ +
+
+
+
+ Attachable +
+
+ +
+
+
+
+ Ingress +
+
+ +
+
+
+
+ Config Only +
+
+ +
+
+ {network.created_at && ( +
+
+ Created +
+
+ {formatRelativeTime(network.created_at)} +
+
+ )} +
+
+ Last Checked +
+
+ {formatRelativeTime(network.last_checked)} +
+
+
+
+
+ + {/* IPAM Configuration */} + {network.ipam && ( +
+
+

+ IPAM Configuration +

+

+ IP Address Management settings +

+
+
+ {network.ipam.driver && ( +
+
+ Driver +
+
+ + {network.ipam.driver} + +
+
+ )} + {network.ipam.config && network.ipam.config.length > 0 && ( +
+
+ Subnet Configuration +
+
+ {network.ipam.config.map((config, index) => ( +
+
+ {config.subnet && ( +
+
+ Subnet +
+
+ {config.subnet} +
+
+ )} + {config.gateway && ( +
+
+ Gateway +
+
+ {config.gateway} +
+
+ )} + {config.ip_range && ( +
+
+ IP Range +
+
+ {config.ip_range} +
+
+ )} + {config.aux_addresses && + Object.keys(config.aux_addresses).length > 0 && ( +
+
+ Auxiliary Addresses +
+
+ {Object.entries(config.aux_addresses).map( + ([key, value]) => ( +
+ + {key}: + + + {value} + +
+ ), + )} +
+
+ )} +
+
+ ))} +
+
+ )} + {network.ipam.options && + Object.keys(network.ipam.options).length > 0 && ( +
+
+ IPAM Options +
+
+ {Object.entries(network.ipam.options).map( + ([key, value]) => ( +
+ + {key} + + + {value} + +
+ ), + )} +
+
+ )} +
+
+ )} + + {/* Host Information */} + {host && ( +
+
+

+ + Host Information +

+
+
+
+
+
+ Hostname +
+
+ + {host.hostname} + +
+
+
+
+ Operating System +
+
+ {host.os_name} {host.os_version} +
+
+
+
+
+ )} + + {/* Labels */} + {network.labels && Object.keys(network.labels).length > 0 && ( +
+
+

+ + Labels +

+
+
+
+ {Object.entries(network.labels).map(([key, value]) => ( +
+ + {key} + + + {value} + +
+ ))} +
+
+
+ )} +
+ ); +}; + +export default NetworkDetail; diff --git a/frontend/src/pages/docker/VolumeDetail.jsx b/frontend/src/pages/docker/VolumeDetail.jsx new file mode 100644 index 0000000..7754066 --- /dev/null +++ b/frontend/src/pages/docker/VolumeDetail.jsx @@ -0,0 +1,359 @@ +import { useQuery } from "@tanstack/react-query"; +import { + AlertTriangle, + ArrowLeft, + Database, + HardDrive, + RefreshCw, + Server, + Tag, +} from "lucide-react"; +import { Link, useParams } from "react-router-dom"; +import api, { formatRelativeTime } from "../../utils/api"; + +const VolumeDetail = () => { + const { id } = useParams(); + + const { data, isLoading, error } = useQuery({ + queryKey: ["docker", "volume", id], + queryFn: async () => { + const response = await api.get(`/docker/volumes/${id}`); + return response.data; + }, + refetchInterval: 30000, + }); + + const volume = data?.volume; + const host = data?.host; + + if (isLoading) { + return ( +
+ +
+ ); + } + + if (error || !volume) { + return ( +
+
+
+ +
+

+ Volume not found +

+

+ The volume you're looking for doesn't exist or has been removed. +

+
+
+
+ + + Back to Docker + +
+ ); + } + + const formatBytes = (bytes) => { + if (bytes === null || bytes === undefined) return "N/A"; + const sizes = ["Bytes", "KB", "MB", "GB", "TB"]; + if (bytes === 0) return "0 Bytes"; + const i = Math.floor(Math.log(bytes) / Math.log(1024)); + return `${Math.round((bytes / 1024 ** i) * 100) / 100} ${sizes[i]}`; + }; + + return ( +
+ {/* Header */} +
+ + + Back to Docker + +
+ +
+

+ {volume.name} +

+

+ Volume ID: {volume.volume_id} +

+
+
+
+ + {/* Overview Cards */} +
+
+
+
+ +
+
+

+ Driver +

+

+ {volume.driver} +

+
+
+
+ +
+
+
+ +
+
+

Size

+

+ {formatBytes(volume.size_bytes)} +

+
+
+
+ +
+
+
+ +
+
+

+ Containers +

+

+ {volume.ref_count || 0} +

+
+
+
+ +
+
+
+ +
+
+

+ Last Checked +

+

+ {formatRelativeTime(volume.last_checked)} +

+
+
+
+
+ + {/* Volume Information Card */} +
+
+

+ Volume Information +

+
+
+
+
+
+ Volume ID +
+
+ {volume.volume_id} +
+
+
+
+ Name +
+
+ {volume.name} +
+
+
+
+ Driver +
+
+ + {volume.driver} + +
+
+
+
+ Scope +
+
+ + {volume.scope} + +
+
+
+
+ Size +
+
+ {formatBytes(volume.size_bytes)} +
+
+
+
+ Containers Using +
+
+ {volume.ref_count || 0} +
+
+ {volume.mountpoint && ( +
+
+ Mount Point +
+
+ {volume.mountpoint} +
+
+ )} + {volume.renderer && ( +
+
+ Renderer +
+
+ {volume.renderer} +
+
+ )} +
+
+ Created +
+
+ {formatRelativeTime(volume.created_at)} +
+
+
+
+ Last Checked +
+
+ {formatRelativeTime(volume.last_checked)} +
+
+
+
+
+ + {/* Host Information */} + {host && ( +
+
+

+ + Host Information +

+
+
+
+
+
+ Hostname +
+
+ + {host.hostname} + +
+
+
+
+ Operating System +
+
+ {host.os_name} {host.os_version} +
+
+
+
+
+ )} + + {/* Labels */} + {volume.labels && Object.keys(volume.labels).length > 0 && ( +
+
+

+ + Labels +

+
+
+
+ {Object.entries(volume.labels).map(([key, value]) => ( +
+ + {key} + + + {value} + +
+ ))} +
+
+
+ )} + + {/* Options */} + {volume.options && Object.keys(volume.options).length > 0 && ( +
+
+

+ Volume Options +

+
+
+
+ {Object.entries(volume.options).map(([key, value]) => ( +
+ + {key} + + + {value} + +
+ ))} +
+
+
+ )} +
+ ); +}; + +export default VolumeDetail; diff --git a/frontend/src/pages/settings/Integrations.jsx b/frontend/src/pages/settings/Integrations.jsx index 3f021dc..1d3d493 100644 --- a/frontend/src/pages/settings/Integrations.jsx +++ b/frontend/src/pages/settings/Integrations.jsx @@ -746,239 +746,126 @@ const Integrations = () => {

- Docker Container Monitoring + Docker Inventory Collection

- Monitor Docker containers and images for available updates + Docker monitoring is now built into the PatchMon Go agent

- {/* Installation Instructions */} + {/* Info Message */}
-

- Agent Installation -

-
    +
    + +
    +

    + Automatic Docker Discovery +

    +

    + The PatchMon Go agent automatically discovers Docker + when it's available on your host and collects + comprehensive inventory information: +

    +
      +
    • + Containers - Running and stopped + containers with status, images, ports, and labels +
    • +
    • + Images - All Docker images with + repository, tags, sizes, and sources +
    • +
    • + Volumes - Named and anonymous volumes + with drivers, mountpoints, and usage +
    • +
    • + Networks - Docker networks with + drivers, IPAM configuration, and connected containers +
    • +
    • + Real-time Updates - Container status + changes are pushed instantly via WebSocket +
    • +
    +
    +
    +
+ + {/* How It Works */} +
+

+ How It Works +

+
  1. - Make sure you have the PatchMon credentials file set up on - your host ( - - /etc/patchmon/credentials - - ) + Install the PatchMon Go agent on your host (see the Hosts + page for installation instructions)
  2. - SSH into your Docker host where you want to monitor - containers + The agent automatically detects if Docker is installed and + running on the host
  3. -
  4. Run the installation command below
  5. - The agent will automatically collect Docker container and - image information every 5 minutes + During each collection cycle, the agent gathers Docker + inventory data and sends it to the PatchMon server +
  6. +
  7. + View your complete Docker inventory (containers, images, + volumes, networks) in the{" "} + + Docker page + +
  8. +
  9. + Container status changes are pushed to the server in + real-time via WebSocket connection
  10. -
  11. View your Docker inventory in the Docker page
- {/* Installation Command */} -
-

- Quick Installation (One-Line Command) -

-
-
-
- Download and install the Docker agent: -
-
- - -
-

- 💡 This will download the agent, make it executable, and - set up a cron job to run every 5 minutes -

-
-
-
- - {/* Manual Installation Steps */} -
-

- Manual Installation Steps -

-
-
-

- Step 1: Download the agent -

-
- - -
-
- -
-

- Step 2: Make it executable -

-
- - -
-
- -
-

- Step 3: Test the agent -

-
- - -
-
- -
-

- Step 4: Set up automatic collection - (every 5 minutes) -

-
- - -
-
-
-
- - {/* Prerequisites */} -
+ {/* No Configuration Required */} +
- -
-

Prerequisites:

+ +
+

+ No Additional Configuration Required +

+

+ Once the Go agent is installed and Docker is running on + your host, Docker inventory collection happens + automatically. No separate Docker agent or cron jobs + needed. +

+
+
+
+ + {/* Requirements */} +
+
+ +
+

Requirements:

    +
  • PatchMon Go agent must be installed and running
  • +
  • Docker daemon must be installed and running
  • - Docker must be installed and running on the host -
  • -
  • - PatchMon credentials file must exist at{" "} - - /etc/patchmon/credentials + Agent must have access to the Docker socket ( + + /var/run/docker.sock + )
  • - The host must have network access to your PatchMon - server + Typically requires running the agent as root or with + Docker group permissions
  • -
  • The agent must run as root (or with sudo)
diff --git a/frontend/src/pages/settings/SettingsHostGroups.jsx b/frontend/src/pages/settings/SettingsHostGroups.jsx index e3111ad..7682ffc 100644 --- a/frontend/src/pages/settings/SettingsHostGroups.jsx +++ b/frontend/src/pages/settings/SettingsHostGroups.jsx @@ -215,8 +215,8 @@ const SettingsHostGroups = () => { title={`View hosts in ${group.name}`} > - {group._count.hosts} host - {group._count.hosts !== 1 ? "s" : ""} + {group._count?.hosts || 0} host + {group._count?.hosts !== 1 ? "s" : ""}
+ + + + + Scope + + + + Flags + + + + Actions +
+
+ + + {network.name} + +
+
+ + {network.driver} + + - {update.current_tag} + {network.scope} - - - Digest - - - - - Available - - - {update.affectedContainersCount} container - {update.affectedContainersCount !== 1 ? "s" : ""} - {update.affectedHosts?.length > 0 && ( - - {" "} - on {update.affectedHosts.length} host - {update.affectedHosts.length !== 1 ? "s" : ""} + + {network.container_count > 0 ? ( + + {network.container_count} + + ) : ( + + 0 )} +
+ {network.internal && ( + + I + + )} + {network.ipv6_enabled && ( + + 6 + + )} + {network.ingress && ( + + S + + )} + {!network.internal && + !network.ipv6_enabled && + !network.ingress && ( + + - + + )} +
+
- + {network.hosts?.friendly_name || + network.hosts?.hostname || + "Unknown"} +
+ + + + +
+
@@ -539,9 +539,18 @@ const EditHostGroupModal = ({ group, onClose, onSubmit, isLoading }) => { // Delete Confirmation Modal const DeleteHostGroupModal = ({ group, onClose, onConfirm, isLoading }) => { + // Fetch hosts for this group + const { data: hostsData } = useQuery({ + queryKey: ["hostGroupHosts", group?.id], + queryFn: () => hostGroupsAPI.getHosts(group.id).then((res) => res.data), + enabled: !!group && group._count?.hosts > 0, + }); + + const hosts = hostsData || []; + return (
-
+
@@ -561,14 +570,32 @@ const DeleteHostGroupModal = ({ group, onClose, onConfirm, isLoading }) => { Are you sure you want to delete the host group{" "} "{group.name}"?

- {group._count.hosts > 0 && ( + {group._count?.hosts > 0 && (
-

- Note: This group contains {group._count.hosts}{" "} +

+ Note: This group contains {group._count?.hosts}{" "} host - {group._count.hosts !== 1 ? "s" : ""}. These hosts will be moved - to "No group" after deletion. + {group._count?.hosts !== 1 ? "s" : ""}. These hosts will be + moved to "No group" after deletion.

+ {hosts.length > 0 && ( +
+

+ Hosts in this group: +

+
+ {hosts.map((host) => ( +
+ + {host.friendly_name || host.hostname} +
+ ))} +
+
+ )}
)}
diff --git a/frontend/src/utils/api.js b/frontend/src/utils/api.js index 75983bb..5fe509b 100644 --- a/frontend/src/utils/api.js +++ b/frontend/src/utils/api.js @@ -19,6 +19,30 @@ api.interceptors.request.use( if (token) { config.headers.Authorization = `Bearer ${token}`; } + + // Add device ID for TFA remember-me functionality + // This uniquely identifies the browser profile (normal vs incognito) + let deviceId = localStorage.getItem("device_id"); + if (!deviceId) { + // Generate a unique device ID and store it + // Use crypto.randomUUID() if available, otherwise generate a UUID v4 manually + if (typeof crypto !== "undefined" && crypto.randomUUID) { + deviceId = crypto.randomUUID(); + } else { + // Fallback: Generate UUID v4 manually + deviceId = "xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace( + /[xy]/g, + (c) => { + const r = (Math.random() * 16) | 0; + const v = c === "x" ? r : (r & 0x3) | 0x8; + return v.toString(16); + }, + ); + } + localStorage.setItem("device_id", deviceId); + } + config.headers["X-Device-ID"] = deviceId; + return config; }, (error) => { @@ -96,6 +120,7 @@ export const adminHostsAPI = { toggleAutoUpdate: (hostId, autoUpdate) => api.patch(`/hosts/${hostId}/auto-update`, { auto_update: autoUpdate }), forceAgentUpdate: (hostId) => api.post(`/hosts/${hostId}/force-agent-update`), + fetchReport: (hostId) => api.post(`/hosts/${hostId}/fetch-report`), updateFriendlyName: (hostId, friendlyName) => api.patch(`/hosts/${hostId}/friendly-name`, { friendly_name: friendlyName, diff --git a/frontend/src/utils/docker.js b/frontend/src/utils/docker.js new file mode 100644 index 0000000..d72fa51 --- /dev/null +++ b/frontend/src/utils/docker.js @@ -0,0 +1,171 @@ +/** + * Docker-related utility functions for the frontend + */ + +/** + * Generate a registry link for a Docker image based on its repository and source + * @param {string} repository - The full repository name (e.g., "ghcr.io/owner/repo") + * @param {string} source - The detected source (github, gitlab, docker-hub, etc.) + * @returns {string|null} - The URL to the registry page, or null if unknown + */ +export function generateRegistryLink(repository, source) { + if (!repository) { + return null; + } + + // Parse the domain and path from the repository + const parts = repository.split("/"); + let domain = ""; + let path = ""; + + // Check if repository has a domain (contains a dot) + if (parts[0].includes(".") || parts[0].includes(":")) { + domain = parts[0]; + path = parts.slice(1).join("/"); + } else { + // No domain means Docker Hub + domain = "docker.io"; + path = repository; + } + + switch (source) { + case "docker-hub": + case "docker.io": { + // Docker Hub: https://hub.docker.com/r/{path} or https://hub.docker.com/_/{path} for official images + // Official images are those without a namespace (e.g., "postgres" not "user/postgres") + // or explicitly prefixed with "library/" + if (path.startsWith("library/")) { + const cleanPath = path.replace("library/", ""); + return `https://hub.docker.com/_/${cleanPath}`; + } + // Check if it's an official image (single part, no slash after removing library/) + if (!path.includes("/")) { + return `https://hub.docker.com/_/${path}`; + } + // Regular user/org image + return `https://hub.docker.com/r/${path}`; + } + + case "github": + case "ghcr.io": { + // GitHub Container Registry + // Format: ghcr.io/{owner}/{package} or ghcr.io/{owner}/{repo}/{package} + // URL format: https://github.com/{owner}/{repo}/pkgs/container/{package} + if (domain === "ghcr.io" && path) { + const pathParts = path.split("/"); + if (pathParts.length === 2) { + // Simple case: ghcr.io/owner/package -> github.com/owner/owner/pkgs/container/package + // OR: ghcr.io/owner/repo -> github.com/owner/repo/pkgs/container/{package} + // Actually, for 2 parts it's owner/package, and repo is same as owner typically + const owner = pathParts[0]; + const packageName = pathParts[1]; + return `https://github.com/${owner}/${owner}/pkgs/container/${packageName}`; + } else if (pathParts.length >= 3) { + // Extended case: ghcr.io/owner/repo/package -> github.com/owner/repo/pkgs/container/package + const owner = pathParts[0]; + const repo = pathParts[1]; + const packageName = pathParts.slice(2).join("/"); + return `https://github.com/${owner}/${repo}/pkgs/container/${packageName}`; + } + } + // Legacy GitHub Packages + if (domain === "docker.pkg.github.com" && path) { + const pathParts = path.split("/"); + if (pathParts.length >= 1) { + return `https://github.com/${pathParts[0]}/packages`; + } + } + return null; + } + + case "gitlab": + case "registry.gitlab.com": { + // GitLab Container Registry + if (path) { + return `https://gitlab.com/${path}/container_registry`; + } + return null; + } + + case "google": + case "gcr.io": { + // Google Container Registry + if (domain.includes("gcr.io") || domain.includes("pkg.dev")) { + return `https://console.cloud.google.com/gcr/images/${path}`; + } + return null; + } + + case "quay": + case "quay.io": { + // Quay.io + if (path) { + return `https://quay.io/repository/${path}`; + } + return null; + } + + case "redhat": + case "registry.access.redhat.com": { + // Red Hat + if (path) { + return `https://access.redhat.com/containers/#/registry.access.redhat.com/${path}`; + } + return null; + } + + case "azure": + case "azurecr.io": { + // Azure Container Registry + if (domain.includes("azurecr.io")) { + const registryName = domain.split(".")[0]; + return `https://portal.azure.com/#view/Microsoft_Azure_ContainerRegistries/RepositoryBlade/registryName/${registryName}/repositoryName/${path}`; + } + return null; + } + + case "aws": + case "amazonaws.com": { + // AWS ECR + if (domain.includes("amazonaws.com")) { + const domainParts = domain.split("."); + const region = domainParts[3]; // Extract region + return `https://${region}.console.aws.amazon.com/ecr/repositories/private/${path}`; + } + return null; + } + + case "private": + // For private registries, try to construct a basic URL + if (domain) { + return `https://${domain}`; + } + return null; + + default: + return null; + } +} + +/** + * Get a user-friendly display name for a registry source + * @param {string} source - The source identifier + * @returns {string} - Human-readable source name + */ +export function getSourceDisplayName(source) { + const sourceNames = { + "docker-hub": "Docker Hub", + github: "GitHub", + gitlab: "GitLab", + google: "Google", + quay: "Quay.io", + redhat: "Red Hat", + azure: "Azure", + aws: "AWS ECR", + private: "Private Registry", + local: "Local", + unknown: "Unknown", + }; + + return sourceNames[source] || source; +} diff --git a/package.json b/package.json index 05d7347..7c43d31 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "patchmon", - "version": "1.3.1", + "version": "1.3.2", "description": "Linux Patch Monitoring System", "license": "AGPL-3.0", "private": true, diff --git a/setup.sh b/setup.sh index b278cb3..bb2501e 100755 --- a/setup.sh +++ b/setup.sh @@ -34,7 +34,7 @@ BLUE='\033[0;34m' NC='\033[0m' # No Color # Global variables -SCRIPT_VERSION="self-hosting-install.sh v1.3.0-selfhost-2025-10-19-1" +SCRIPT_VERSION="self-hosting-install.sh v1.3.2-selfhost-2025-10-31-1" DEFAULT_GITHUB_REPO="https://github.com/PatchMon/PatchMon.git" FQDN="" CUSTOM_FQDN="" @@ -2197,34 +2197,66 @@ select_installation_to_update() { version=$(grep '"version"' "/opt/$install/backend/package.json" | head -1 | sed 's/.*"version": "\([^"]*\)".*/\1/') fi - # Get service status - try multiple naming conventions - # Convention 1: Just the install name (e.g., patchmon.internal) - local service_name="$install" - # Convention 2: patchmon. prefix (e.g., patchmon.patchmon.internal) - local alt_service_name1="patchmon.$install" - # Convention 3: patchmon- prefix with underscores (e.g., patchmon-patchmon_internal) - local alt_service_name2="patchmon-$(echo "$install" | tr '.' '_')" + # Get service status - search for service files that reference this installation + local service_name="" local status="unknown" - # Try convention 1 first (most common) - if systemctl is-active --quiet "$service_name" 2>/dev/null; then - status="running" - elif systemctl is-enabled --quiet "$service_name" 2>/dev/null; then - status="stopped" - # Try convention 2 - elif systemctl is-active --quiet "$alt_service_name1" 2>/dev/null; then - status="running" - service_name="$alt_service_name1" - elif systemctl is-enabled --quiet "$alt_service_name1" 2>/dev/null; then - status="stopped" - service_name="$alt_service_name1" - # Try convention 3 - elif systemctl is-active --quiet "$alt_service_name2" 2>/dev/null; then - status="running" - service_name="$alt_service_name2" - elif systemctl is-enabled --quiet "$alt_service_name2" 2>/dev/null; then - status="stopped" - service_name="$alt_service_name2" + # Search systemd directory for service files that reference this installation + for service_file in /etc/systemd/system/*.service; do + if [ -f "$service_file" ]; then + # Check if this service file references our installation directory + if grep -q "/opt/$install" "$service_file"; then + service_name=$(basename "$service_file" .service) + + # Check service status + if systemctl is-active --quiet "$service_name" 2>/dev/null; then + status="running" + break + elif systemctl is-enabled --quiet "$service_name" 2>/dev/null; then + status="stopped" + break + fi + fi + fi + done + + # If not found by searching, try common naming conventions + if [ -z "$service_name" ] || [ "$status" == "unknown" ]; then + # Convention 1: Just the install name (e.g., patchmon.internal) + local try_service="$install" + # Convention 2: patchmon. prefix (e.g., patchmon.patchmon.internal) + local alt_service_name1="patchmon.$install" + # Convention 3: patchmon- prefix with underscores (e.g., patchmon-patchmon_internal) + local alt_service_name2="patchmon-$(echo "$install" | tr '.' '_')" + + # Try convention 1 first (most common) + if systemctl is-active --quiet "$try_service" 2>/dev/null; then + status="running" + service_name="$try_service" + elif systemctl is-enabled --quiet "$try_service" 2>/dev/null; then + status="stopped" + service_name="$try_service" + # Try convention 2 + elif systemctl is-active --quiet "$alt_service_name1" 2>/dev/null; then + status="running" + service_name="$alt_service_name1" + elif systemctl is-enabled --quiet "$alt_service_name1" 2>/dev/null; then + status="stopped" + service_name="$alt_service_name1" + # Try convention 3 + elif systemctl is-active --quiet "$alt_service_name2" 2>/dev/null; then + status="running" + service_name="$alt_service_name2" + elif systemctl is-enabled --quiet "$alt_service_name2" 2>/dev/null; then + status="stopped" + service_name="$alt_service_name2" + fi + fi + + # Fallback: if still no service found, use default naming convention + if [ -z "$service_name" ]; then + service_name="$install" + status="not_found" fi printf "%2d. %-30s (v%-10s - %s)\n" "$i" "$install" "$version" "$status" @@ -3072,11 +3104,16 @@ update_installation() { # Clean up any untracked files that might conflict with incoming changes print_info "Cleaning up untracked files to prevent merge conflicts..." - git clean -fd + git clean -fd 2>/dev/null || true # Reset any local changes to ensure clean state + # Check if HEAD exists before trying to reset print_info "Resetting local changes to ensure clean state..." - git reset --hard HEAD + if git rev-parse --verify HEAD >/dev/null 2>&1; then + git reset --hard HEAD + else + print_warning "HEAD not found, skipping reset (fresh repository or detached state)" + fi # Fetch latest changes git fetch origin