mirror of
https://github.com/9technologygroup/patchmon.net.git
synced 2025-11-12 18:06:39 +00:00
Docker implementation
Profile fixes Hostgroup fixes TFA fixes
This commit is contained in:
@@ -17,6 +17,7 @@ const {
|
||||
refresh_access_token,
|
||||
revoke_session,
|
||||
revoke_all_user_sessions,
|
||||
generate_device_fingerprint,
|
||||
} = require("../utils/session_manager");
|
||||
|
||||
const router = express.Router();
|
||||
@@ -788,11 +789,39 @@ router.post(
|
||||
|
||||
// Check if TFA is enabled
|
||||
if (user.tfa_enabled) {
|
||||
return res.status(200).json({
|
||||
message: "TFA verification required",
|
||||
requiresTfa: true,
|
||||
username: user.username,
|
||||
});
|
||||
// Get device fingerprint from X-Device-ID header
|
||||
const device_fingerprint = generate_device_fingerprint(req);
|
||||
|
||||
// Check if this device has a valid TFA bypass
|
||||
if (device_fingerprint) {
|
||||
const remembered_session = await prisma.user_sessions.findFirst({
|
||||
where: {
|
||||
user_id: user.id,
|
||||
device_fingerprint: device_fingerprint,
|
||||
tfa_remember_me: true,
|
||||
tfa_bypass_until: { gt: new Date() }, // Bypass still valid
|
||||
},
|
||||
});
|
||||
|
||||
if (remembered_session) {
|
||||
// Device is remembered and bypass is still valid - skip TFA
|
||||
// Continue with login below
|
||||
} else {
|
||||
// No valid bypass for this device - require TFA
|
||||
return res.status(200).json({
|
||||
message: "TFA verification required",
|
||||
requiresTfa: true,
|
||||
username: user.username,
|
||||
});
|
||||
}
|
||||
} else {
|
||||
// No device ID provided - require TFA
|
||||
return res.status(200).json({
|
||||
message: "TFA verification required",
|
||||
requiresTfa: true,
|
||||
username: user.username,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Update last login
|
||||
@@ -807,7 +836,13 @@ router.post(
|
||||
// Create session with access and refresh tokens
|
||||
const ip_address = req.ip || req.connection.remoteAddress;
|
||||
const user_agent = req.get("user-agent");
|
||||
const session = await create_session(user.id, ip_address, user_agent);
|
||||
const session = await create_session(
|
||||
user.id,
|
||||
ip_address,
|
||||
user_agent,
|
||||
false,
|
||||
req,
|
||||
);
|
||||
|
||||
res.json({
|
||||
message: "Login successful",
|
||||
@@ -841,8 +876,10 @@ router.post(
|
||||
body("username").notEmpty().withMessage("Username is required"),
|
||||
body("token")
|
||||
.isLength({ min: 6, max: 6 })
|
||||
.withMessage("Token must be 6 digits"),
|
||||
body("token").isNumeric().withMessage("Token must contain only numbers"),
|
||||
.withMessage("Token must be 6 characters"),
|
||||
body("token")
|
||||
.matches(/^[A-Z0-9]{6}$/)
|
||||
.withMessage("Token must be 6 alphanumeric characters"),
|
||||
body("remember_me")
|
||||
.optional()
|
||||
.isBoolean()
|
||||
|
||||
@@ -573,6 +573,7 @@ router.post("/collect", async (req, res) => {
|
||||
image_id: containerData.image_id || "unknown",
|
||||
source: containerData.image_source || "docker-hub",
|
||||
created_at: parseDate(containerData.created_at),
|
||||
last_checked: now,
|
||||
updated_at: now,
|
||||
},
|
||||
});
|
||||
@@ -822,6 +823,7 @@ router.post("/../integrations/docker", async (req, res) => {
|
||||
image_id: containerData.image_id || "unknown",
|
||||
source: containerData.image_source || "docker-hub",
|
||||
created_at: parseDate(containerData.created_at),
|
||||
last_checked: now,
|
||||
updated_at: now,
|
||||
},
|
||||
});
|
||||
@@ -876,6 +878,12 @@ router.post("/../integrations/docker", async (req, res) => {
|
||||
if (images && Array.isArray(images)) {
|
||||
console.log(`[Docker Integration] Processing ${images.length} images`);
|
||||
for (const imageData of images) {
|
||||
// If image has no digest, it's likely locally built - override source to "local"
|
||||
const imageSource =
|
||||
!imageData.digest || imageData.digest.trim() === ""
|
||||
? "local"
|
||||
: imageData.source || "docker-hub";
|
||||
|
||||
await prisma.docker_images.upsert({
|
||||
where: {
|
||||
repository_tag_image_id: {
|
||||
@@ -889,6 +897,7 @@ router.post("/../integrations/docker", async (req, res) => {
|
||||
? BigInt(imageData.size_bytes)
|
||||
: null,
|
||||
digest: imageData.digest || null,
|
||||
source: imageSource, // Update source in case it changed
|
||||
last_checked: now,
|
||||
updated_at: now,
|
||||
},
|
||||
@@ -901,8 +910,9 @@ router.post("/../integrations/docker", async (req, res) => {
|
||||
size_bytes: imageData.size_bytes
|
||||
? BigInt(imageData.size_bytes)
|
||||
: null,
|
||||
source: imageData.source || "docker-hub",
|
||||
source: imageSource,
|
||||
created_at: parseDate(imageData.created_at),
|
||||
last_checked: now,
|
||||
updated_at: now,
|
||||
},
|
||||
});
|
||||
@@ -1062,6 +1072,172 @@ router.delete("/images/:id", authenticateToken, async (req, res) => {
|
||||
}
|
||||
});
|
||||
|
||||
// GET /api/v1/docker/volumes - Get all volumes with filters
|
||||
router.get("/volumes", authenticateToken, async (req, res) => {
|
||||
try {
|
||||
const { driver, search, page = 1, limit = 50 } = req.query;
|
||||
|
||||
const where = {};
|
||||
if (driver) where.driver = driver;
|
||||
if (search) {
|
||||
where.OR = [{ name: { contains: search, mode: "insensitive" } }];
|
||||
}
|
||||
|
||||
const skip = (parseInt(page, 10) - 1) * parseInt(limit, 10);
|
||||
const take = parseInt(limit, 10);
|
||||
|
||||
const [volumes, total] = await Promise.all([
|
||||
prisma.docker_volumes.findMany({
|
||||
where,
|
||||
include: {
|
||||
hosts: {
|
||||
select: {
|
||||
id: true,
|
||||
friendly_name: true,
|
||||
hostname: true,
|
||||
ip: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
orderBy: { updated_at: "desc" },
|
||||
skip,
|
||||
take,
|
||||
}),
|
||||
prisma.docker_volumes.count({ where }),
|
||||
]);
|
||||
|
||||
res.json(
|
||||
convertBigIntToString({
|
||||
volumes,
|
||||
pagination: {
|
||||
page: parseInt(page, 10),
|
||||
limit: parseInt(limit, 10),
|
||||
total,
|
||||
totalPages: Math.ceil(total / parseInt(limit, 10)),
|
||||
},
|
||||
}),
|
||||
);
|
||||
} catch (error) {
|
||||
console.error("Error fetching volumes:", error);
|
||||
res.status(500).json({ error: "Failed to fetch volumes" });
|
||||
}
|
||||
});
|
||||
|
||||
// GET /api/v1/docker/volumes/:id - Get volume detail
|
||||
router.get("/volumes/:id", authenticateToken, async (req, res) => {
|
||||
try {
|
||||
const { id } = req.params;
|
||||
|
||||
const volume = await prisma.docker_volumes.findUnique({
|
||||
where: { id },
|
||||
include: {
|
||||
hosts: {
|
||||
select: {
|
||||
id: true,
|
||||
friendly_name: true,
|
||||
hostname: true,
|
||||
ip: true,
|
||||
os_type: true,
|
||||
os_version: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
if (!volume) {
|
||||
return res.status(404).json({ error: "Volume not found" });
|
||||
}
|
||||
|
||||
res.json(convertBigIntToString({ volume }));
|
||||
} catch (error) {
|
||||
console.error("Error fetching volume detail:", error);
|
||||
res.status(500).json({ error: "Failed to fetch volume detail" });
|
||||
}
|
||||
});
|
||||
|
||||
// GET /api/v1/docker/networks - Get all networks with filters
|
||||
router.get("/networks", authenticateToken, async (req, res) => {
|
||||
try {
|
||||
const { driver, search, page = 1, limit = 50 } = req.query;
|
||||
|
||||
const where = {};
|
||||
if (driver) where.driver = driver;
|
||||
if (search) {
|
||||
where.OR = [{ name: { contains: search, mode: "insensitive" } }];
|
||||
}
|
||||
|
||||
const skip = (parseInt(page, 10) - 1) * parseInt(limit, 10);
|
||||
const take = parseInt(limit, 10);
|
||||
|
||||
const [networks, total] = await Promise.all([
|
||||
prisma.docker_networks.findMany({
|
||||
where,
|
||||
include: {
|
||||
hosts: {
|
||||
select: {
|
||||
id: true,
|
||||
friendly_name: true,
|
||||
hostname: true,
|
||||
ip: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
orderBy: { updated_at: "desc" },
|
||||
skip,
|
||||
take,
|
||||
}),
|
||||
prisma.docker_networks.count({ where }),
|
||||
]);
|
||||
|
||||
res.json(
|
||||
convertBigIntToString({
|
||||
networks,
|
||||
pagination: {
|
||||
page: parseInt(page, 10),
|
||||
limit: parseInt(limit, 10),
|
||||
total,
|
||||
totalPages: Math.ceil(total / parseInt(limit, 10)),
|
||||
},
|
||||
}),
|
||||
);
|
||||
} catch (error) {
|
||||
console.error("Error fetching networks:", error);
|
||||
res.status(500).json({ error: "Failed to fetch networks" });
|
||||
}
|
||||
});
|
||||
|
||||
// GET /api/v1/docker/networks/:id - Get network detail
|
||||
router.get("/networks/:id", authenticateToken, async (req, res) => {
|
||||
try {
|
||||
const { id } = req.params;
|
||||
|
||||
const network = await prisma.docker_networks.findUnique({
|
||||
where: { id },
|
||||
include: {
|
||||
hosts: {
|
||||
select: {
|
||||
id: true,
|
||||
friendly_name: true,
|
||||
hostname: true,
|
||||
ip: true,
|
||||
os_type: true,
|
||||
os_version: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
if (!network) {
|
||||
return res.status(404).json({ error: "Network not found" });
|
||||
}
|
||||
|
||||
res.json(convertBigIntToString({ network }));
|
||||
} catch (error) {
|
||||
console.error("Error fetching network detail:", error);
|
||||
res.status(500).json({ error: "Failed to fetch network detail" });
|
||||
}
|
||||
});
|
||||
|
||||
// GET /api/v1/docker/agent - Serve the Docker agent installation script
|
||||
router.get("/agent", async (_req, res) => {
|
||||
try {
|
||||
@@ -1093,4 +1269,66 @@ router.get("/agent", async (_req, res) => {
|
||||
}
|
||||
});
|
||||
|
||||
// DELETE /api/v1/docker/volumes/:id - Delete a volume
|
||||
router.delete("/volumes/:id", authenticateToken, async (req, res) => {
|
||||
try {
|
||||
const { id } = req.params;
|
||||
|
||||
// Check if volume exists
|
||||
const volume = await prisma.docker_volumes.findUnique({
|
||||
where: { id },
|
||||
});
|
||||
|
||||
if (!volume) {
|
||||
return res.status(404).json({ error: "Volume not found" });
|
||||
}
|
||||
|
||||
// Delete the volume
|
||||
await prisma.docker_volumes.delete({
|
||||
where: { id },
|
||||
});
|
||||
|
||||
console.log(`🗑️ Deleted volume: ${volume.name} (${id})`);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `Volume ${volume.name} deleted successfully`,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error("Error deleting volume:", error);
|
||||
res.status(500).json({ error: "Failed to delete volume" });
|
||||
}
|
||||
});
|
||||
|
||||
// DELETE /api/v1/docker/networks/:id - Delete a network
|
||||
router.delete("/networks/:id", authenticateToken, async (req, res) => {
|
||||
try {
|
||||
const { id } = req.params;
|
||||
|
||||
// Check if network exists
|
||||
const network = await prisma.docker_networks.findUnique({
|
||||
where: { id },
|
||||
});
|
||||
|
||||
if (!network) {
|
||||
return res.status(404).json({ error: "Network not found" });
|
||||
}
|
||||
|
||||
// Delete the network
|
||||
await prisma.docker_networks.delete({
|
||||
where: { id },
|
||||
});
|
||||
|
||||
console.log(`🗑️ Deleted network: ${network.name} (${id})`);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `Network ${network.name} deleted successfully`,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error("Error deleting network:", error);
|
||||
res.status(500).json({ error: "Failed to delete network" });
|
||||
}
|
||||
});
|
||||
|
||||
module.exports = router;
|
||||
|
||||
@@ -24,7 +24,15 @@ router.get("/", authenticateToken, async (_req, res) => {
|
||||
},
|
||||
});
|
||||
|
||||
res.json(hostGroups);
|
||||
// Transform the count field to match frontend expectations
|
||||
const transformedGroups = hostGroups.map((group) => ({
|
||||
...group,
|
||||
_count: {
|
||||
hosts: group._count.host_group_memberships,
|
||||
},
|
||||
}));
|
||||
|
||||
res.json(transformedGroups);
|
||||
} catch (error) {
|
||||
console.error("Error fetching host groups:", error);
|
||||
res.status(500).json({ error: "Failed to fetch host groups" });
|
||||
|
||||
@@ -10,6 +10,7 @@ const {
|
||||
requireManageHosts,
|
||||
requireManageSettings,
|
||||
} = require("../middleware/permissions");
|
||||
const { queueManager, QUEUE_NAMES } = require("../services/automation");
|
||||
|
||||
const router = express.Router();
|
||||
const prisma = getPrismaClient();
|
||||
@@ -1387,6 +1388,66 @@ router.delete(
|
||||
},
|
||||
);
|
||||
|
||||
// Force immediate report from agent
|
||||
router.post(
|
||||
"/:hostId/fetch-report",
|
||||
authenticateToken,
|
||||
requireManageHosts,
|
||||
async (req, res) => {
|
||||
try {
|
||||
const { hostId } = req.params;
|
||||
|
||||
// Get host to verify it exists
|
||||
const host = await prisma.hosts.findUnique({
|
||||
where: { id: hostId },
|
||||
});
|
||||
|
||||
if (!host) {
|
||||
return res.status(404).json({ error: "Host not found" });
|
||||
}
|
||||
|
||||
// Get the agent-commands queue
|
||||
const queue = queueManager.queues[QUEUE_NAMES.AGENT_COMMANDS];
|
||||
|
||||
if (!queue) {
|
||||
return res.status(500).json({
|
||||
error: "Queue not available",
|
||||
});
|
||||
}
|
||||
|
||||
// Add job to queue
|
||||
const job = await queue.add(
|
||||
"report_now",
|
||||
{
|
||||
api_id: host.api_id,
|
||||
type: "report_now",
|
||||
},
|
||||
{
|
||||
attempts: 3,
|
||||
backoff: {
|
||||
type: "exponential",
|
||||
delay: 2000,
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: "Report fetch queued successfully",
|
||||
jobId: job.id,
|
||||
host: {
|
||||
id: host.id,
|
||||
friendlyName: host.friendly_name,
|
||||
apiId: host.api_id,
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
console.error("Force fetch report error:", error);
|
||||
res.status(500).json({ error: "Failed to fetch report" });
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
// Toggle agent auto-update setting
|
||||
router.patch(
|
||||
"/:hostId/auto-update",
|
||||
@@ -1448,21 +1509,17 @@ router.post(
|
||||
return res.status(404).json({ error: "Host not found" });
|
||||
}
|
||||
|
||||
// Get queue manager
|
||||
const { QUEUE_NAMES } = require("../services/automation");
|
||||
const queueManager = req.app.locals.queueManager;
|
||||
|
||||
if (!queueManager) {
|
||||
return res.status(500).json({
|
||||
error: "Queue manager not available",
|
||||
});
|
||||
}
|
||||
|
||||
// Get the agent-commands queue
|
||||
const queue = queueManager.queues[QUEUE_NAMES.AGENT_COMMANDS];
|
||||
|
||||
if (!queue) {
|
||||
return res.status(500).json({
|
||||
error: "Queue not available",
|
||||
});
|
||||
}
|
||||
|
||||
// Add job to queue
|
||||
await queue.add(
|
||||
const job = await queue.add(
|
||||
"update_agent",
|
||||
{
|
||||
api_id: host.api_id,
|
||||
@@ -1480,6 +1537,7 @@ router.post(
|
||||
res.json({
|
||||
success: true,
|
||||
message: "Agent update queued successfully",
|
||||
jobId: job.id,
|
||||
host: {
|
||||
id: host.id,
|
||||
friendlyName: host.friendly_name,
|
||||
|
||||
@@ -13,6 +13,8 @@ router.post("/docker", async (req, res) => {
|
||||
const {
|
||||
containers,
|
||||
images,
|
||||
volumes,
|
||||
networks,
|
||||
updates,
|
||||
daemon_info: _daemon_info,
|
||||
hostname,
|
||||
@@ -49,6 +51,8 @@ router.post("/docker", async (req, res) => {
|
||||
|
||||
let containersProcessed = 0;
|
||||
let imagesProcessed = 0;
|
||||
let volumesProcessed = 0;
|
||||
let networksProcessed = 0;
|
||||
let updatesProcessed = 0;
|
||||
|
||||
// Process containers
|
||||
@@ -169,6 +173,114 @@ router.post("/docker", async (req, res) => {
|
||||
}
|
||||
}
|
||||
|
||||
// Process volumes
|
||||
if (volumes && Array.isArray(volumes)) {
|
||||
console.log(`[Docker Integration] Processing ${volumes.length} volumes`);
|
||||
for (const volumeData of volumes) {
|
||||
await prisma.docker_volumes.upsert({
|
||||
where: {
|
||||
host_id_volume_id: {
|
||||
host_id: host.id,
|
||||
volume_id: volumeData.volume_id,
|
||||
},
|
||||
},
|
||||
update: {
|
||||
name: volumeData.name,
|
||||
driver: volumeData.driver || "local",
|
||||
mountpoint: volumeData.mountpoint || null,
|
||||
renderer: volumeData.renderer || null,
|
||||
scope: volumeData.scope || "local",
|
||||
labels: volumeData.labels || null,
|
||||
options: volumeData.options || null,
|
||||
size_bytes: volumeData.size_bytes
|
||||
? BigInt(volumeData.size_bytes)
|
||||
: null,
|
||||
ref_count: volumeData.ref_count || 0,
|
||||
updated_at: now,
|
||||
last_checked: now,
|
||||
},
|
||||
create: {
|
||||
id: uuidv4(),
|
||||
host_id: host.id,
|
||||
volume_id: volumeData.volume_id,
|
||||
name: volumeData.name,
|
||||
driver: volumeData.driver || "local",
|
||||
mountpoint: volumeData.mountpoint || null,
|
||||
renderer: volumeData.renderer || null,
|
||||
scope: volumeData.scope || "local",
|
||||
labels: volumeData.labels || null,
|
||||
options: volumeData.options || null,
|
||||
size_bytes: volumeData.size_bytes
|
||||
? BigInt(volumeData.size_bytes)
|
||||
: null,
|
||||
ref_count: volumeData.ref_count || 0,
|
||||
created_at: parseDate(volumeData.created_at),
|
||||
updated_at: now,
|
||||
},
|
||||
});
|
||||
volumesProcessed++;
|
||||
}
|
||||
}
|
||||
|
||||
// Process networks
|
||||
if (networks && Array.isArray(networks)) {
|
||||
console.log(
|
||||
`[Docker Integration] Processing ${networks.length} networks`,
|
||||
);
|
||||
for (const networkData of networks) {
|
||||
await prisma.docker_networks.upsert({
|
||||
where: {
|
||||
host_id_network_id: {
|
||||
host_id: host.id,
|
||||
network_id: networkData.network_id,
|
||||
},
|
||||
},
|
||||
update: {
|
||||
name: networkData.name,
|
||||
driver: networkData.driver,
|
||||
scope: networkData.scope || "local",
|
||||
ipv6_enabled: networkData.ipv6_enabled || false,
|
||||
internal: networkData.internal || false,
|
||||
attachable:
|
||||
networkData.attachable !== undefined
|
||||
? networkData.attachable
|
||||
: true,
|
||||
ingress: networkData.ingress || false,
|
||||
config_only: networkData.config_only || false,
|
||||
labels: networkData.labels || null,
|
||||
ipam: networkData.ipam || null,
|
||||
container_count: networkData.container_count || 0,
|
||||
updated_at: now,
|
||||
last_checked: now,
|
||||
},
|
||||
create: {
|
||||
id: uuidv4(),
|
||||
host_id: host.id,
|
||||
network_id: networkData.network_id,
|
||||
name: networkData.name,
|
||||
driver: networkData.driver,
|
||||
scope: networkData.scope || "local",
|
||||
ipv6_enabled: networkData.ipv6_enabled || false,
|
||||
internal: networkData.internal || false,
|
||||
attachable:
|
||||
networkData.attachable !== undefined
|
||||
? networkData.attachable
|
||||
: true,
|
||||
ingress: networkData.ingress || false,
|
||||
config_only: networkData.config_only || false,
|
||||
labels: networkData.labels || null,
|
||||
ipam: networkData.ipam || null,
|
||||
container_count: networkData.container_count || 0,
|
||||
created_at: networkData.created_at
|
||||
? parseDate(networkData.created_at)
|
||||
: null,
|
||||
updated_at: now,
|
||||
},
|
||||
});
|
||||
networksProcessed++;
|
||||
}
|
||||
}
|
||||
|
||||
// Process updates
|
||||
if (updates && Array.isArray(updates)) {
|
||||
console.log(`[Docker Integration] Processing ${updates.length} updates`);
|
||||
@@ -219,13 +331,15 @@ router.post("/docker", async (req, res) => {
|
||||
}
|
||||
|
||||
console.log(
|
||||
`[Docker Integration] Successfully processed: ${containersProcessed} containers, ${imagesProcessed} images, ${updatesProcessed} updates`,
|
||||
`[Docker Integration] Successfully processed: ${containersProcessed} containers, ${imagesProcessed} images, ${volumesProcessed} volumes, ${networksProcessed} networks, ${updatesProcessed} updates`,
|
||||
);
|
||||
|
||||
res.json({
|
||||
message: "Docker data collected successfully",
|
||||
containers_received: containersProcessed,
|
||||
images_received: imagesProcessed,
|
||||
volumes_received: volumesProcessed,
|
||||
networks_received: networksProcessed,
|
||||
updates_found: updatesProcessed,
|
||||
});
|
||||
} catch (error) {
|
||||
|
||||
@@ -261,8 +261,10 @@ router.post(
|
||||
body("username").notEmpty().withMessage("Username is required"),
|
||||
body("token")
|
||||
.isLength({ min: 6, max: 6 })
|
||||
.withMessage("Token must be 6 digits"),
|
||||
body("token").isNumeric().withMessage("Token must contain only numbers"),
|
||||
.withMessage("Token must be 6 characters"),
|
||||
body("token")
|
||||
.matches(/^[A-Z0-9]{6}$/)
|
||||
.withMessage("Token must be 6 alphanumeric characters"),
|
||||
],
|
||||
async (req, res) => {
|
||||
try {
|
||||
|
||||
343
backend/src/services/automation/dockerImageUpdateCheck.js
Normal file
343
backend/src/services/automation/dockerImageUpdateCheck.js
Normal file
@@ -0,0 +1,343 @@
|
||||
const { prisma } = require("./shared/prisma");
|
||||
const https = require("node:https");
|
||||
const http = require("node:http");
|
||||
const { v4: uuidv4 } = require("uuid");
|
||||
|
||||
/**
|
||||
* Docker Image Update Check Automation
|
||||
* Checks for Docker image updates by comparing local digests with remote registry digests
|
||||
*/
|
||||
class DockerImageUpdateCheck {
|
||||
constructor(queueManager) {
|
||||
this.queueManager = queueManager;
|
||||
this.queueName = "docker-image-update-check";
|
||||
}
|
||||
|
||||
/**
|
||||
* Get remote digest from Docker registry using HEAD request
|
||||
* Supports Docker Hub, GHCR, and other OCI-compliant registries
|
||||
*/
|
||||
async getRemoteDigest(imageName, tag = "latest") {
|
||||
return new Promise((resolve, reject) => {
|
||||
// Parse image name to determine registry
|
||||
const registryInfo = this.parseImageName(imageName);
|
||||
|
||||
// Construct manifest URL
|
||||
const manifestPath = `/v2/${registryInfo.repository}/manifests/${tag}`;
|
||||
const options = {
|
||||
hostname: registryInfo.registry,
|
||||
path: manifestPath,
|
||||
method: "HEAD",
|
||||
headers: {
|
||||
Accept:
|
||||
"application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.oci.image.index.v1+json",
|
||||
"User-Agent": "PatchMon/1.0",
|
||||
},
|
||||
};
|
||||
|
||||
// Add authentication token for Docker Hub if needed
|
||||
if (
|
||||
registryInfo.registry === "registry-1.docker.io" &&
|
||||
registryInfo.isPublic
|
||||
) {
|
||||
// For anonymous public images, we may need to get an auth token first
|
||||
// For now, try without auth (works for public images)
|
||||
}
|
||||
|
||||
// Choose HTTP or HTTPS
|
||||
const client = registryInfo.isSecure ? https : http;
|
||||
|
||||
const req = client.request(options, (res) => {
|
||||
if (res.statusCode === 401 || res.statusCode === 403) {
|
||||
// Authentication required - skip for now (would need to implement auth)
|
||||
return reject(
|
||||
new Error(`Authentication required for ${imageName}:${tag}`),
|
||||
);
|
||||
}
|
||||
|
||||
if (res.statusCode !== 200) {
|
||||
return reject(
|
||||
new Error(
|
||||
`Registry returned status ${res.statusCode} for ${imageName}:${tag}`,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
// Get digest from Docker-Content-Digest header
|
||||
const digest = res.headers["docker-content-digest"];
|
||||
if (!digest) {
|
||||
return reject(
|
||||
new Error(
|
||||
`No Docker-Content-Digest header for ${imageName}:${tag}`,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
// Clean up digest (remove sha256: prefix if present)
|
||||
const cleanDigest = digest.startsWith("sha256:")
|
||||
? digest.substring(7)
|
||||
: digest;
|
||||
resolve(cleanDigest);
|
||||
});
|
||||
|
||||
req.on("error", (error) => {
|
||||
reject(error);
|
||||
});
|
||||
|
||||
req.setTimeout(10000, () => {
|
||||
req.destroy();
|
||||
reject(new Error(`Timeout getting digest for ${imageName}:${tag}`));
|
||||
});
|
||||
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse image name to extract registry, repository, and determine if secure
|
||||
*/
|
||||
parseImageName(imageName) {
|
||||
let registry = "registry-1.docker.io";
|
||||
let repository = imageName;
|
||||
const isSecure = true;
|
||||
let isPublic = true;
|
||||
|
||||
// Handle explicit registries (ghcr.io, quay.io, etc.)
|
||||
if (imageName.includes("/")) {
|
||||
const parts = imageName.split("/");
|
||||
const firstPart = parts[0];
|
||||
|
||||
// Check for known registries
|
||||
if (firstPart.includes(".") || firstPart === "localhost") {
|
||||
registry = firstPart;
|
||||
repository = parts.slice(1).join("/");
|
||||
isPublic = false; // Assume private registries need auth for now
|
||||
} else {
|
||||
// Docker Hub - registry-1.docker.io
|
||||
repository = imageName;
|
||||
}
|
||||
}
|
||||
|
||||
// Docker Hub official images (no namespace)
|
||||
if (!repository.includes("/")) {
|
||||
repository = `library/${repository}`;
|
||||
}
|
||||
|
||||
return {
|
||||
registry,
|
||||
repository,
|
||||
isSecure,
|
||||
isPublic,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Process Docker image update check job
|
||||
*/
|
||||
async process(_job) {
|
||||
const startTime = Date.now();
|
||||
console.log("🐳 Starting Docker image update check...");
|
||||
|
||||
try {
|
||||
// Get all Docker images that have a digest and repository
|
||||
const images = await prisma.docker_images.findMany({
|
||||
where: {
|
||||
digest: {
|
||||
not: null,
|
||||
},
|
||||
repository: {
|
||||
not: null,
|
||||
},
|
||||
},
|
||||
include: {
|
||||
docker_image_updates: true,
|
||||
},
|
||||
});
|
||||
|
||||
console.log(`📦 Found ${images.length} images to check for updates`);
|
||||
|
||||
let checkedCount = 0;
|
||||
let updateCount = 0;
|
||||
let errorCount = 0;
|
||||
const errors = [];
|
||||
|
||||
// Process images in batches to avoid overwhelming the API
|
||||
const batchSize = 10;
|
||||
for (let i = 0; i < images.length; i += batchSize) {
|
||||
const batch = images.slice(i, i + batchSize);
|
||||
|
||||
// Process batch concurrently with Promise.allSettled for error tolerance
|
||||
const _results = await Promise.allSettled(
|
||||
batch.map(async (image) => {
|
||||
try {
|
||||
checkedCount++;
|
||||
|
||||
// Skip local images (no digest means they're local)
|
||||
if (!image.digest || image.digest.trim() === "") {
|
||||
return { image, skipped: true, reason: "No digest" };
|
||||
}
|
||||
|
||||
// Get clean digest (remove sha256: prefix if present)
|
||||
const localDigest = image.digest.startsWith("sha256:")
|
||||
? image.digest.substring(7)
|
||||
: image.digest;
|
||||
|
||||
// Get remote digest from registry
|
||||
const remoteDigest = await this.getRemoteDigest(
|
||||
image.repository,
|
||||
image.tag || "latest",
|
||||
);
|
||||
|
||||
// Compare digests
|
||||
if (localDigest !== remoteDigest) {
|
||||
console.log(
|
||||
`🔄 Update found: ${image.repository}:${image.tag} (local: ${localDigest.substring(0, 12)}..., remote: ${remoteDigest.substring(0, 12)}...)`,
|
||||
);
|
||||
|
||||
// Store digest info in changelog_url field as JSON
|
||||
const digestInfo = JSON.stringify({
|
||||
method: "digest_comparison",
|
||||
current_digest: localDigest,
|
||||
available_digest: remoteDigest,
|
||||
checked_at: new Date().toISOString(),
|
||||
});
|
||||
|
||||
// Upsert the update record
|
||||
await prisma.docker_image_updates.upsert({
|
||||
where: {
|
||||
image_id_available_tag: {
|
||||
image_id: image.id,
|
||||
available_tag: image.tag || "latest",
|
||||
},
|
||||
},
|
||||
update: {
|
||||
updated_at: new Date(),
|
||||
changelog_url: digestInfo,
|
||||
severity: "digest_changed",
|
||||
},
|
||||
create: {
|
||||
id: uuidv4(),
|
||||
image_id: image.id,
|
||||
current_tag: image.tag || "latest",
|
||||
available_tag: image.tag || "latest",
|
||||
severity: "digest_changed",
|
||||
changelog_url: digestInfo,
|
||||
updated_at: new Date(),
|
||||
},
|
||||
});
|
||||
|
||||
// Update last_checked timestamp on image
|
||||
await prisma.docker_images.update({
|
||||
where: { id: image.id },
|
||||
data: { last_checked: new Date() },
|
||||
});
|
||||
|
||||
updateCount++;
|
||||
return { image, updated: true };
|
||||
} else {
|
||||
// No update - still update last_checked
|
||||
await prisma.docker_images.update({
|
||||
where: { id: image.id },
|
||||
data: { last_checked: new Date() },
|
||||
});
|
||||
|
||||
// Remove existing update record if digest matches now
|
||||
const existingUpdate = image.docker_image_updates?.find(
|
||||
(u) => u.available_tag === (image.tag || "latest"),
|
||||
);
|
||||
if (existingUpdate) {
|
||||
await prisma.docker_image_updates.delete({
|
||||
where: { id: existingUpdate.id },
|
||||
});
|
||||
}
|
||||
|
||||
return { image, updated: false };
|
||||
}
|
||||
} catch (error) {
|
||||
errorCount++;
|
||||
const errorMsg = `Error checking ${image.repository}:${image.tag}: ${error.message}`;
|
||||
errors.push(errorMsg);
|
||||
console.error(`❌ ${errorMsg}`);
|
||||
|
||||
// Still update last_checked even on error
|
||||
try {
|
||||
await prisma.docker_images.update({
|
||||
where: { id: image.id },
|
||||
data: { last_checked: new Date() },
|
||||
});
|
||||
} catch (_updateError) {
|
||||
// Ignore update errors
|
||||
}
|
||||
|
||||
return { image, error: error.message };
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
// Log batch progress
|
||||
if (i + batchSize < images.length) {
|
||||
console.log(
|
||||
`⏳ Processed ${Math.min(i + batchSize, images.length)}/${images.length} images...`,
|
||||
);
|
||||
}
|
||||
|
||||
// Small delay between batches to be respectful to registries
|
||||
if (i + batchSize < images.length) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500));
|
||||
}
|
||||
}
|
||||
|
||||
const executionTime = Date.now() - startTime;
|
||||
console.log(
|
||||
`✅ Docker image update check completed in ${executionTime}ms - Checked: ${checkedCount}, Updates: ${updateCount}, Errors: ${errorCount}`,
|
||||
);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
checked: checkedCount,
|
||||
updates: updateCount,
|
||||
errors: errorCount,
|
||||
executionTime,
|
||||
errorDetails: errors,
|
||||
};
|
||||
} catch (error) {
|
||||
const executionTime = Date.now() - startTime;
|
||||
console.error(
|
||||
`❌ Docker image update check failed after ${executionTime}ms:`,
|
||||
error.message,
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedule recurring Docker image update check (daily at 2 AM)
|
||||
*/
|
||||
async schedule() {
|
||||
const job = await this.queueManager.queues[this.queueName].add(
|
||||
"docker-image-update-check",
|
||||
{},
|
||||
{
|
||||
repeat: { cron: "0 2 * * *" }, // Daily at 2 AM
|
||||
jobId: "docker-image-update-check-recurring",
|
||||
},
|
||||
);
|
||||
console.log("✅ Docker image update check scheduled");
|
||||
return job;
|
||||
}
|
||||
|
||||
/**
|
||||
* Trigger manual Docker image update check
|
||||
*/
|
||||
async triggerManual() {
|
||||
const job = await this.queueManager.queues[this.queueName].add(
|
||||
"docker-image-update-check-manual",
|
||||
{},
|
||||
{ priority: 1 },
|
||||
);
|
||||
console.log("✅ Manual Docker image update check triggered");
|
||||
return job;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = DockerImageUpdateCheck;
|
||||
@@ -2,6 +2,7 @@ const { Queue, Worker } = require("bullmq");
|
||||
const { redis, redisConnection } = require("./shared/redis");
|
||||
const { prisma } = require("./shared/prisma");
|
||||
const agentWs = require("../agentWs");
|
||||
const { v4: uuidv4 } = require("uuid");
|
||||
|
||||
// Import automation classes
|
||||
const GitHubUpdateCheck = require("./githubUpdateCheck");
|
||||
@@ -9,6 +10,7 @@ const SessionCleanup = require("./sessionCleanup");
|
||||
const OrphanedRepoCleanup = require("./orphanedRepoCleanup");
|
||||
const OrphanedPackageCleanup = require("./orphanedPackageCleanup");
|
||||
const DockerInventoryCleanup = require("./dockerInventoryCleanup");
|
||||
const DockerImageUpdateCheck = require("./dockerImageUpdateCheck");
|
||||
const MetricsReporting = require("./metricsReporting");
|
||||
|
||||
// Queue names
|
||||
@@ -18,6 +20,7 @@ const QUEUE_NAMES = {
|
||||
ORPHANED_REPO_CLEANUP: "orphaned-repo-cleanup",
|
||||
ORPHANED_PACKAGE_CLEANUP: "orphaned-package-cleanup",
|
||||
DOCKER_INVENTORY_CLEANUP: "docker-inventory-cleanup",
|
||||
DOCKER_IMAGE_UPDATE_CHECK: "docker-image-update-check",
|
||||
METRICS_REPORTING: "metrics-reporting",
|
||||
AGENT_COMMANDS: "agent-commands",
|
||||
};
|
||||
@@ -97,6 +100,8 @@ class QueueManager {
|
||||
new OrphanedPackageCleanup(this);
|
||||
this.automations[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP] =
|
||||
new DockerInventoryCleanup(this);
|
||||
this.automations[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK] =
|
||||
new DockerImageUpdateCheck(this);
|
||||
this.automations[QUEUE_NAMES.METRICS_REPORTING] = new MetricsReporting(
|
||||
this,
|
||||
);
|
||||
@@ -167,6 +172,15 @@ class QueueManager {
|
||||
workerOptions,
|
||||
);
|
||||
|
||||
// Docker Image Update Check Worker
|
||||
this.workers[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK] = new Worker(
|
||||
QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK,
|
||||
this.automations[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK].process.bind(
|
||||
this.automations[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK],
|
||||
),
|
||||
workerOptions,
|
||||
);
|
||||
|
||||
// Metrics Reporting Worker
|
||||
this.workers[QUEUE_NAMES.METRICS_REPORTING] = new Worker(
|
||||
QUEUE_NAMES.METRICS_REPORTING,
|
||||
@@ -183,28 +197,87 @@ class QueueManager {
|
||||
const { api_id, type } = job.data;
|
||||
console.log(`Processing agent command: ${type} for ${api_id}`);
|
||||
|
||||
// Send command via WebSocket based on type
|
||||
if (type === "report_now") {
|
||||
agentWs.pushReportNow(api_id);
|
||||
} else if (type === "settings_update") {
|
||||
// For settings update, we need additional data
|
||||
const { update_interval } = job.data;
|
||||
agentWs.pushSettingsUpdate(api_id, update_interval);
|
||||
} else if (type === "update_agent") {
|
||||
// Force agent to update by sending WebSocket command
|
||||
const ws = agentWs.getConnectionByApiId(api_id);
|
||||
if (ws && ws.readyState === 1) {
|
||||
// WebSocket.OPEN
|
||||
agentWs.pushUpdateAgent(api_id);
|
||||
console.log(`✅ Update command sent to agent ${api_id}`);
|
||||
} else {
|
||||
console.error(`❌ Agent ${api_id} is not connected`);
|
||||
throw new Error(
|
||||
`Agent ${api_id} is not connected. Cannot send update command.`,
|
||||
);
|
||||
// Log job to job_history
|
||||
let historyRecord = null;
|
||||
try {
|
||||
const host = await prisma.hosts.findUnique({
|
||||
where: { api_id },
|
||||
select: { id: true },
|
||||
});
|
||||
|
||||
if (host) {
|
||||
historyRecord = await prisma.job_history.create({
|
||||
data: {
|
||||
id: uuidv4(),
|
||||
job_id: job.id,
|
||||
queue_name: QUEUE_NAMES.AGENT_COMMANDS,
|
||||
job_name: type,
|
||||
host_id: host.id,
|
||||
api_id: api_id,
|
||||
status: "active",
|
||||
attempt_number: job.attemptsMade + 1,
|
||||
created_at: new Date(),
|
||||
updated_at: new Date(),
|
||||
},
|
||||
});
|
||||
console.log(`📝 Logged job to job_history: ${job.id} (${type})`);
|
||||
}
|
||||
} else {
|
||||
console.error(`Unknown agent command type: ${type}`);
|
||||
} catch (error) {
|
||||
console.error("Failed to log job to job_history:", error);
|
||||
}
|
||||
|
||||
try {
|
||||
// Send command via WebSocket based on type
|
||||
if (type === "report_now") {
|
||||
agentWs.pushReportNow(api_id);
|
||||
} else if (type === "settings_update") {
|
||||
// For settings update, we need additional data
|
||||
const { update_interval } = job.data;
|
||||
agentWs.pushSettingsUpdate(api_id, update_interval);
|
||||
} else if (type === "update_agent") {
|
||||
// Force agent to update by sending WebSocket command
|
||||
const ws = agentWs.getConnectionByApiId(api_id);
|
||||
if (ws && ws.readyState === 1) {
|
||||
// WebSocket.OPEN
|
||||
agentWs.pushUpdateAgent(api_id);
|
||||
console.log(`✅ Update command sent to agent ${api_id}`);
|
||||
} else {
|
||||
console.error(`❌ Agent ${api_id} is not connected`);
|
||||
throw new Error(
|
||||
`Agent ${api_id} is not connected. Cannot send update command.`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
console.error(`Unknown agent command type: ${type}`);
|
||||
}
|
||||
|
||||
// Update job history to completed
|
||||
if (historyRecord) {
|
||||
await prisma.job_history.updateMany({
|
||||
where: { job_id: job.id },
|
||||
data: {
|
||||
status: "completed",
|
||||
completed_at: new Date(),
|
||||
updated_at: new Date(),
|
||||
},
|
||||
});
|
||||
console.log(`✅ Marked job as completed in job_history: ${job.id}`);
|
||||
}
|
||||
} catch (error) {
|
||||
// Update job history to failed
|
||||
if (historyRecord) {
|
||||
await prisma.job_history.updateMany({
|
||||
where: { job_id: job.id },
|
||||
data: {
|
||||
status: "failed",
|
||||
error_message: error.message,
|
||||
completed_at: new Date(),
|
||||
updated_at: new Date(),
|
||||
},
|
||||
});
|
||||
console.log(`❌ Marked job as failed in job_history: ${job.id}`);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
},
|
||||
workerOptions,
|
||||
@@ -234,6 +307,7 @@ class QueueManager {
|
||||
console.log(`✅ Job '${job.id}' in queue '${queueName}' completed.`);
|
||||
});
|
||||
}
|
||||
|
||||
console.log("✅ Queue events initialized");
|
||||
}
|
||||
|
||||
@@ -246,6 +320,7 @@ class QueueManager {
|
||||
await this.automations[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].schedule();
|
||||
await this.automations[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].schedule();
|
||||
await this.automations[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].schedule();
|
||||
await this.automations[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK].schedule();
|
||||
await this.automations[QUEUE_NAMES.METRICS_REPORTING].schedule();
|
||||
}
|
||||
|
||||
@@ -276,6 +351,12 @@ class QueueManager {
|
||||
].triggerManual();
|
||||
}
|
||||
|
||||
async triggerDockerImageUpdateCheck() {
|
||||
return this.automations[
|
||||
QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK
|
||||
].triggerManual();
|
||||
}
|
||||
|
||||
async triggerMetricsReporting() {
|
||||
return this.automations[QUEUE_NAMES.METRICS_REPORTING].triggerManual();
|
||||
}
|
||||
|
||||
179
backend/src/utils/docker.js
Normal file
179
backend/src/utils/docker.js
Normal file
@@ -0,0 +1,179 @@
|
||||
/**
|
||||
* Docker-related utility functions
|
||||
*/
|
||||
|
||||
/**
|
||||
* Generate a registry link for a Docker image based on its repository and source
|
||||
* Inspired by diun's registry link generation
|
||||
* @param {string} repository - The full repository name (e.g., "ghcr.io/owner/repo")
|
||||
* @param {string} source - The detected source (github, gitlab, docker-hub, etc.)
|
||||
* @returns {string|null} - The URL to the registry page, or null if unknown
|
||||
*/
|
||||
function generateRegistryLink(repository, source) {
|
||||
if (!repository) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Parse the domain and path from the repository
|
||||
const parts = repository.split("/");
|
||||
let domain = "";
|
||||
let path = "";
|
||||
|
||||
// Check if repository has a domain (contains a dot)
|
||||
if (parts[0].includes(".") || parts[0].includes(":")) {
|
||||
domain = parts[0];
|
||||
path = parts.slice(1).join("/");
|
||||
} else {
|
||||
// No domain means Docker Hub
|
||||
domain = "docker.io";
|
||||
path = repository;
|
||||
}
|
||||
|
||||
switch (source) {
|
||||
case "docker-hub":
|
||||
case "docker.io": {
|
||||
// Docker Hub: https://hub.docker.com/r/{path} or https://hub.docker.com/_/{path} for official images
|
||||
// Official images are those without a namespace (e.g., "postgres" not "user/postgres")
|
||||
// or explicitly prefixed with "library/"
|
||||
if (path.startsWith("library/")) {
|
||||
const cleanPath = path.replace("library/", "");
|
||||
return `https://hub.docker.com/_/${cleanPath}`;
|
||||
}
|
||||
// Check if it's an official image (single part, no slash after removing library/)
|
||||
if (!path.includes("/")) {
|
||||
return `https://hub.docker.com/_/${path}`;
|
||||
}
|
||||
// Regular user/org image
|
||||
return `https://hub.docker.com/r/${path}`;
|
||||
}
|
||||
|
||||
case "github":
|
||||
case "ghcr.io": {
|
||||
// GitHub Container Registry
|
||||
// Format: ghcr.io/{owner}/{package} or ghcr.io/{owner}/{repo}/{package}
|
||||
// URL format: https://github.com/{owner}/{repo}/pkgs/container/{package}
|
||||
if (domain === "ghcr.io" && path) {
|
||||
const pathParts = path.split("/");
|
||||
if (pathParts.length === 2) {
|
||||
// Simple case: ghcr.io/owner/package -> github.com/owner/owner/pkgs/container/package
|
||||
// OR: ghcr.io/owner/repo -> github.com/owner/repo/pkgs/container/{package}
|
||||
// Actually, for 2 parts it's owner/package, and repo is same as owner typically
|
||||
const owner = pathParts[0];
|
||||
const packageName = pathParts[1];
|
||||
return `https://github.com/${owner}/${owner}/pkgs/container/${packageName}`;
|
||||
} else if (pathParts.length >= 3) {
|
||||
// Extended case: ghcr.io/owner/repo/package -> github.com/owner/repo/pkgs/container/package
|
||||
const owner = pathParts[0];
|
||||
const repo = pathParts[1];
|
||||
const packageName = pathParts.slice(2).join("/");
|
||||
return `https://github.com/${owner}/${repo}/pkgs/container/${packageName}`;
|
||||
}
|
||||
}
|
||||
// Legacy GitHub Packages
|
||||
if (domain === "docker.pkg.github.com" && path) {
|
||||
const pathParts = path.split("/");
|
||||
if (pathParts.length >= 1) {
|
||||
return `https://github.com/${pathParts[0]}/packages`;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
case "gitlab":
|
||||
case "registry.gitlab.com": {
|
||||
// GitLab Container Registry: https://gitlab.com/{path}/container_registry
|
||||
if (path) {
|
||||
return `https://gitlab.com/${path}/container_registry`;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
case "google":
|
||||
case "gcr.io": {
|
||||
// Google Container Registry: https://gcr.io/{path}
|
||||
if (domain.includes("gcr.io") || domain.includes("pkg.dev")) {
|
||||
return `https://console.cloud.google.com/gcr/images/${path}`;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
case "quay":
|
||||
case "quay.io": {
|
||||
// Quay.io: https://quay.io/repository/{path}
|
||||
if (path) {
|
||||
return `https://quay.io/repository/${path}`;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
case "redhat":
|
||||
case "registry.access.redhat.com": {
|
||||
// Red Hat: https://access.redhat.com/containers/#/registry.access.redhat.com/{path}
|
||||
if (path) {
|
||||
return `https://access.redhat.com/containers/#/registry.access.redhat.com/${path}`;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
case "azure":
|
||||
case "azurecr.io": {
|
||||
// Azure Container Registry - link to portal
|
||||
// Format: {registry}.azurecr.io/{repository}
|
||||
if (domain.includes("azurecr.io")) {
|
||||
const registryName = domain.split(".")[0];
|
||||
return `https://portal.azure.com/#view/Microsoft_Azure_ContainerRegistries/RepositoryBlade/registryName/${registryName}/repositoryName/${path}`;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
case "aws":
|
||||
case "amazonaws.com": {
|
||||
// AWS ECR - link to console
|
||||
// Format: {account}.dkr.ecr.{region}.amazonaws.com/{repository}
|
||||
if (domain.includes("amazonaws.com")) {
|
||||
const domainParts = domain.split(".");
|
||||
const region = domainParts[3]; // Extract region
|
||||
return `https://${region}.console.aws.amazon.com/ecr/repositories/private/${path}`;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
case "private":
|
||||
// For private registries, try to construct a basic URL
|
||||
if (domain) {
|
||||
return `https://${domain}`;
|
||||
}
|
||||
return null;
|
||||
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a user-friendly display name for a registry source
|
||||
* @param {string} source - The source identifier
|
||||
* @returns {string} - Human-readable source name
|
||||
*/
|
||||
function getSourceDisplayName(source) {
|
||||
const sourceNames = {
|
||||
"docker-hub": "Docker Hub",
|
||||
github: "GitHub",
|
||||
gitlab: "GitLab",
|
||||
google: "Google",
|
||||
quay: "Quay.io",
|
||||
redhat: "Red Hat",
|
||||
azure: "Azure",
|
||||
aws: "AWS ECR",
|
||||
private: "Private Registry",
|
||||
local: "Local",
|
||||
unknown: "Unknown",
|
||||
};
|
||||
|
||||
return sourceNames[source] || source;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
generateRegistryLink,
|
||||
getSourceDisplayName,
|
||||
};
|
||||
@@ -84,21 +84,20 @@ function parse_expiration(expiration_string) {
|
||||
* Generate device fingerprint from request data
|
||||
*/
|
||||
function generate_device_fingerprint(req) {
|
||||
const components = [
|
||||
req.get("user-agent") || "",
|
||||
req.get("accept-language") || "",
|
||||
req.get("accept-encoding") || "",
|
||||
req.ip || "",
|
||||
];
|
||||
// Use the X-Device-ID header from frontend (unique per browser profile/localStorage)
|
||||
const deviceId = req.get("x-device-id");
|
||||
|
||||
// Create a simple hash of device characteristics
|
||||
const fingerprint = crypto
|
||||
.createHash("sha256")
|
||||
.update(components.join("|"))
|
||||
.digest("hex")
|
||||
.substring(0, 32); // Use first 32 chars for storage efficiency
|
||||
if (deviceId) {
|
||||
// Hash the device ID for consistent storage format
|
||||
return crypto
|
||||
.createHash("sha256")
|
||||
.update(deviceId)
|
||||
.digest("hex")
|
||||
.substring(0, 32);
|
||||
}
|
||||
|
||||
return fingerprint;
|
||||
// No device ID - return null (user needs to provide device ID for remember-me)
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user