I should really commit more often instead of sending over one massive commit

Blame my ADHD brain
Sorry
- Now we have the server working properly in automation using BullMQ and Redis
- It also presents an API endpoint that is used to accept connections for websockets by agents (WS or WSS)
- Updated the docker-compose.yml and its documentation
This commit is contained in:
Muhammad Ibrahim
2025-10-17 22:10:55 +01:00
parent c43afeb127
commit 46eb797ac3
31 changed files with 2564 additions and 457 deletions

View File

@@ -202,11 +202,15 @@ router.get("/hosts", authenticateToken, requireViewHosts, async (_req, res) => {
auto_update: true,
notes: true,
api_id: true,
host_groups: {
select: {
id: true,
name: true,
color: true,
host_group_memberships: {
include: {
host_groups: {
select: {
id: true,
name: true,
color: true,
},
},
},
},
_count: {
@@ -356,11 +360,15 @@ router.get(
prisma.hosts.findUnique({
where: { id: hostId },
include: {
host_groups: {
select: {
id: true,
name: true,
color: true,
host_group_memberships: {
include: {
host_groups: {
select: {
id: true,
name: true,
color: true,
},
},
},
},
host_packages: {
@@ -558,22 +566,34 @@ router.get(
packages_count: true,
security_count: true,
total_packages: true,
host_id: true,
status: true,
},
orderBy: {
timestamp: "asc",
},
});
// Process data to show actual values (no averaging)
// Enhanced data validation and processing
const processedData = trendsData
.filter((record) => record.total_packages !== null) // Only include records with valid data
.filter((record) => {
// Enhanced validation
return (
record.total_packages !== null &&
record.total_packages >= 0 &&
record.packages_count >= 0 &&
record.security_count >= 0 &&
record.security_count <= record.packages_count && // Security can't exceed outdated
record.status === "success"
); // Only include successful reports
})
.map((record) => {
const date = new Date(record.timestamp);
let timeKey;
if (daysInt <= 1) {
// For hourly view, use exact timestamp
timeKey = date.toISOString().substring(0, 16); // YYYY-MM-DDTHH:MM
// For hourly view, group by hour only (not minutes)
timeKey = date.toISOString().substring(0, 13); // YYYY-MM-DDTHH
} else {
// For daily view, group by day
timeKey = date.toISOString().split("T")[0]; // YYYY-MM-DD
@@ -584,64 +604,342 @@ router.get(
total_packages: record.total_packages,
packages_count: record.packages_count || 0,
security_count: record.security_count || 0,
host_id: record.host_id,
timestamp: record.timestamp,
};
})
.sort((a, b) => a.timeKey.localeCompare(b.timeKey)); // Sort by time
});
// Get hosts list for dropdown (always fetch for dropdown functionality)
// Determine if we need aggregation based on host filter
const needsAggregation =
!hostId || hostId === "all" || hostId === "undefined";
let aggregatedArray;
if (needsAggregation) {
// For "All Hosts" mode, we need to calculate the actual total packages differently
// Instead of aggregating historical data (which is per-host), we'll use the current total
// and show that as a flat line, since total packages don't change much over time
// Get the current total packages count (unique packages across all hosts)
const currentTotalPackages = await prisma.packages.count({
where: {
host_packages: {
some: {}, // At least one host has this package
},
},
});
// Aggregate data by timeKey when looking at "All Hosts" or no specific host
const aggregatedData = processedData.reduce((acc, item) => {
if (!acc[item.timeKey]) {
acc[item.timeKey] = {
timeKey: item.timeKey,
total_packages: currentTotalPackages, // Use current total packages
packages_count: 0,
security_count: 0,
record_count: 0,
host_ids: new Set(),
min_timestamp: item.timestamp,
max_timestamp: item.timestamp,
};
}
// For outdated and security packages: SUM (these represent counts across hosts)
acc[item.timeKey].packages_count += item.packages_count;
acc[item.timeKey].security_count += item.security_count;
acc[item.timeKey].record_count += 1;
acc[item.timeKey].host_ids.add(item.host_id);
// Track timestamp range
if (item.timestamp < acc[item.timeKey].min_timestamp) {
acc[item.timeKey].min_timestamp = item.timestamp;
}
if (item.timestamp > acc[item.timeKey].max_timestamp) {
acc[item.timeKey].max_timestamp = item.timestamp;
}
return acc;
}, {});
// Convert to array and add metadata
aggregatedArray = Object.values(aggregatedData)
.map((item) => ({
...item,
host_count: item.host_ids.size,
host_ids: Array.from(item.host_ids),
}))
.sort((a, b) => a.timeKey.localeCompare(b.timeKey));
} else {
// For specific host, show individual data points without aggregation
// But still group by timeKey to handle multiple reports from same host in same time period
const hostAggregatedData = processedData.reduce((acc, item) => {
if (!acc[item.timeKey]) {
acc[item.timeKey] = {
timeKey: item.timeKey,
total_packages: 0,
packages_count: 0,
security_count: 0,
record_count: 0,
host_ids: new Set([item.host_id]),
min_timestamp: item.timestamp,
max_timestamp: item.timestamp,
};
}
// For same host, take the latest values (not sum)
// This handles cases where a host reports multiple times in the same time period
if (item.timestamp > acc[item.timeKey].max_timestamp) {
acc[item.timeKey].total_packages = item.total_packages;
acc[item.timeKey].packages_count = item.packages_count;
acc[item.timeKey].security_count = item.security_count;
acc[item.timeKey].max_timestamp = item.timestamp;
}
acc[item.timeKey].record_count += 1;
return acc;
}, {});
// Convert to array
aggregatedArray = Object.values(hostAggregatedData)
.map((item) => ({
...item,
host_count: item.host_ids.size,
host_ids: Array.from(item.host_ids),
}))
.sort((a, b) => a.timeKey.localeCompare(b.timeKey));
}
// Handle sparse data by filling missing time periods
const fillMissingPeriods = (data, daysInt) => {
const filledData = [];
const startDate = new Date();
startDate.setDate(startDate.getDate() - daysInt);
const dataMap = new Map(data.map((item) => [item.timeKey, item]));
const endDate = new Date();
const currentDate = new Date(startDate);
// Find the last known values for interpolation
let lastKnownValues = null;
if (data.length > 0) {
lastKnownValues = {
total_packages: data[0].total_packages,
packages_count: data[0].packages_count,
security_count: data[0].security_count,
};
}
while (currentDate <= endDate) {
let timeKey;
if (daysInt <= 1) {
timeKey = currentDate.toISOString().substring(0, 13); // Hourly
currentDate.setHours(currentDate.getHours() + 1);
} else {
timeKey = currentDate.toISOString().split("T")[0]; // Daily
currentDate.setDate(currentDate.getDate() + 1);
}
if (dataMap.has(timeKey)) {
const item = dataMap.get(timeKey);
filledData.push(item);
// Update last known values
lastKnownValues = {
total_packages: item.total_packages,
packages_count: item.packages_count,
security_count: item.security_count,
};
} else {
// For missing periods, use the last known values (interpolation)
// This creates a continuous line instead of gaps
filledData.push({
timeKey,
total_packages: lastKnownValues?.total_packages || 0,
packages_count: lastKnownValues?.packages_count || 0,
security_count: lastKnownValues?.security_count || 0,
record_count: 0,
host_count: 0,
host_ids: [],
min_timestamp: null,
max_timestamp: null,
isInterpolated: true, // Mark as interpolated for debugging
});
}
}
return filledData;
};
const finalProcessedData = fillMissingPeriods(aggregatedArray, daysInt);
// Get hosts list for dropdown
const hostsList = await prisma.hosts.findMany({
select: {
id: true,
friendly_name: true,
hostname: true,
last_update: true,
status: true,
},
orderBy: {
friendly_name: "asc",
},
});
// Get current package state for offline fallback
let currentPackageState = null;
if (hostId && hostId !== "all" && hostId !== "undefined") {
// Get current package counts for specific host
const currentState = await prisma.host_packages.aggregate({
where: {
host_id: hostId,
},
_count: {
id: true,
},
});
// Get counts for boolean fields separately
const outdatedCount = await prisma.host_packages.count({
where: {
host_id: hostId,
needs_update: true,
},
});
const securityCount = await prisma.host_packages.count({
where: {
host_id: hostId,
is_security_update: true,
},
});
currentPackageState = {
total_packages: currentState._count.id,
packages_count: outdatedCount,
security_count: securityCount,
};
} else {
// Get current package counts for all hosts
// Total packages = count of unique packages installed on at least one host
const totalPackagesCount = await prisma.packages.count({
where: {
host_packages: {
some: {}, // At least one host has this package
},
},
});
// Get counts for boolean fields separately
const outdatedCount = await prisma.host_packages.count({
where: {
needs_update: true,
},
});
const securityCount = await prisma.host_packages.count({
where: {
is_security_update: true,
},
});
currentPackageState = {
total_packages: totalPackagesCount,
packages_count: outdatedCount,
security_count: securityCount,
};
}
// Format data for chart
const chartData = {
labels: [],
datasets: [
{
label: "Total Packages",
label: needsAggregation
? "Total Packages (All Hosts)"
: "Total Packages",
data: [],
borderColor: "#3B82F6", // Blue
backgroundColor: "rgba(59, 130, 246, 0.1)",
tension: 0.4,
hidden: true, // Hidden by default
spanGaps: true, // Connect lines across missing data
pointRadius: 3,
pointHoverRadius: 5,
},
{
label: "Outdated Packages",
label: needsAggregation
? "Total Outdated Packages"
: "Outdated Packages",
data: [],
borderColor: "#F59E0B", // Orange
backgroundColor: "rgba(245, 158, 11, 0.1)",
tension: 0.4,
spanGaps: true, // Connect lines across missing data
pointRadius: 3,
pointHoverRadius: 5,
},
{
label: "Security Packages",
label: needsAggregation
? "Total Security Packages"
: "Security Packages",
data: [],
borderColor: "#EF4444", // Red
backgroundColor: "rgba(239, 68, 68, 0.1)",
tension: 0.4,
spanGaps: true, // Connect lines across missing data
pointRadius: 3,
pointHoverRadius: 5,
},
],
};
// Process aggregated data
processedData.forEach((item) => {
finalProcessedData.forEach((item) => {
chartData.labels.push(item.timeKey);
chartData.datasets[0].data.push(item.total_packages);
chartData.datasets[1].data.push(item.packages_count);
chartData.datasets[2].data.push(item.security_count);
});
// Calculate data quality metrics
const dataQuality = {
totalRecords: trendsData.length,
validRecords: processedData.length,
aggregatedPoints: aggregatedArray.length,
filledPoints: finalProcessedData.length,
recordsWithNullTotal: trendsData.filter(
(r) => r.total_packages === null,
).length,
recordsWithInvalidData: trendsData.length - processedData.length,
successfulReports: trendsData.filter((r) => r.status === "success")
.length,
failedReports: trendsData.filter((r) => r.status === "error").length,
};
res.json({
chartData,
hosts: hostsList,
period: daysInt,
hostId: hostId || "all",
currentPackageState,
dataQuality,
aggregationInfo: {
hasData: aggregatedArray.length > 0,
hasGaps: finalProcessedData.some((item) => item.record_count === 0),
lastDataPoint:
aggregatedArray.length > 0
? aggregatedArray[aggregatedArray.length - 1]
: null,
aggregationMode: needsAggregation
? "sum_across_hosts"
: "individual_host_data",
explanation: needsAggregation
? "Data is summed across all hosts for each time period"
: "Data shows individual host values without cross-host aggregation",
},
});
} catch (error) {
console.error("Error fetching package trends:", error);
@@ -650,4 +948,348 @@ router.get(
},
);
// Diagnostic endpoint to investigate package spikes
router.get(
"/package-spike-analysis",
authenticateToken,
requireViewHosts,
async (req, res) => {
try {
const { date, time, hours = 2 } = req.query;
if (!date || !time) {
return res.status(400).json({
error:
"Date and time parameters are required. Format: date=2025-10-17&time=18:00",
});
}
// Parse the specific date and time
const targetDateTime = new Date(`${date}T${time}:00`);
const startTime = new Date(targetDateTime);
startTime.setHours(startTime.getHours() - parseInt(hours, 10));
const endTime = new Date(targetDateTime);
endTime.setHours(endTime.getHours() + parseInt(hours, 10));
console.log(
`Analyzing package spike around ${targetDateTime.toISOString()}`,
);
console.log(
`Time range: ${startTime.toISOString()} to ${endTime.toISOString()}`,
);
// Get all update history records in the time window
const spikeData = await prisma.update_history.findMany({
where: {
timestamp: {
gte: startTime,
lte: endTime,
},
},
select: {
id: true,
host_id: true,
timestamp: true,
packages_count: true,
security_count: true,
total_packages: true,
status: true,
error_message: true,
execution_time: true,
payload_size_kb: true,
hosts: {
select: {
friendly_name: true,
hostname: true,
os_type: true,
os_version: true,
},
},
},
orderBy: {
timestamp: "asc",
},
});
// Analyze the data
const analysis = {
timeWindow: {
start: startTime.toISOString(),
end: endTime.toISOString(),
target: targetDateTime.toISOString(),
},
totalRecords: spikeData.length,
successfulReports: spikeData.filter((r) => r.status === "success")
.length,
failedReports: spikeData.filter((r) => r.status === "error").length,
uniqueHosts: [...new Set(spikeData.map((r) => r.host_id))].length,
hosts: {},
timeline: [],
summary: {
maxPackagesCount: 0,
maxSecurityCount: 0,
maxTotalPackages: 0,
avgPackagesCount: 0,
avgSecurityCount: 0,
avgTotalPackages: 0,
},
};
// Group by host and analyze each host's behavior
spikeData.forEach((record) => {
const hostId = record.host_id;
if (!analysis.hosts[hostId]) {
analysis.hosts[hostId] = {
hostInfo: record.hosts,
records: [],
summary: {
totalReports: 0,
successfulReports: 0,
failedReports: 0,
maxPackagesCount: 0,
maxSecurityCount: 0,
maxTotalPackages: 0,
avgPackagesCount: 0,
avgSecurityCount: 0,
avgTotalPackages: 0,
},
};
}
analysis.hosts[hostId].records.push({
timestamp: record.timestamp,
packages_count: record.packages_count,
security_count: record.security_count,
total_packages: record.total_packages,
status: record.status,
error_message: record.error_message,
execution_time: record.execution_time,
payload_size_kb: record.payload_size_kb,
});
analysis.hosts[hostId].summary.totalReports++;
if (record.status === "success") {
analysis.hosts[hostId].summary.successfulReports++;
analysis.hosts[hostId].summary.maxPackagesCount = Math.max(
analysis.hosts[hostId].summary.maxPackagesCount,
record.packages_count,
);
analysis.hosts[hostId].summary.maxSecurityCount = Math.max(
analysis.hosts[hostId].summary.maxSecurityCount,
record.security_count,
);
analysis.hosts[hostId].summary.maxTotalPackages = Math.max(
analysis.hosts[hostId].summary.maxTotalPackages,
record.total_packages || 0,
);
} else {
analysis.hosts[hostId].summary.failedReports++;
}
});
// Calculate averages for each host
Object.keys(analysis.hosts).forEach((hostId) => {
const host = analysis.hosts[hostId];
const successfulRecords = host.records.filter(
(r) => r.status === "success",
);
if (successfulRecords.length > 0) {
host.summary.avgPackagesCount = Math.round(
successfulRecords.reduce((sum, r) => sum + r.packages_count, 0) /
successfulRecords.length,
);
host.summary.avgSecurityCount = Math.round(
successfulRecords.reduce((sum, r) => sum + r.security_count, 0) /
successfulRecords.length,
);
host.summary.avgTotalPackages = Math.round(
successfulRecords.reduce(
(sum, r) => sum + (r.total_packages || 0),
0,
) / successfulRecords.length,
);
}
});
// Create timeline with hourly/daily aggregation
const timelineMap = new Map();
spikeData.forEach((record) => {
const timeKey = record.timestamp.toISOString().substring(0, 13); // Hourly
if (!timelineMap.has(timeKey)) {
timelineMap.set(timeKey, {
timestamp: timeKey,
totalReports: 0,
successfulReports: 0,
failedReports: 0,
totalPackagesCount: 0,
totalSecurityCount: 0,
totalTotalPackages: 0,
uniqueHosts: new Set(),
});
}
const timelineEntry = timelineMap.get(timeKey);
timelineEntry.totalReports++;
timelineEntry.uniqueHosts.add(record.host_id);
if (record.status === "success") {
timelineEntry.successfulReports++;
timelineEntry.totalPackagesCount += record.packages_count;
timelineEntry.totalSecurityCount += record.security_count;
timelineEntry.totalTotalPackages += record.total_packages || 0;
} else {
timelineEntry.failedReports++;
}
});
// Convert timeline map to array
analysis.timeline = Array.from(timelineMap.values())
.map((entry) => ({
...entry,
uniqueHosts: entry.uniqueHosts.size,
}))
.sort((a, b) => a.timestamp.localeCompare(b.timestamp));
// Calculate overall summary
const successfulRecords = spikeData.filter((r) => r.status === "success");
if (successfulRecords.length > 0) {
analysis.summary.maxPackagesCount = Math.max(
...successfulRecords.map((r) => r.packages_count),
);
analysis.summary.maxSecurityCount = Math.max(
...successfulRecords.map((r) => r.security_count),
);
analysis.summary.maxTotalPackages = Math.max(
...successfulRecords.map((r) => r.total_packages || 0),
);
analysis.summary.avgPackagesCount = Math.round(
successfulRecords.reduce((sum, r) => sum + r.packages_count, 0) /
successfulRecords.length,
);
analysis.summary.avgSecurityCount = Math.round(
successfulRecords.reduce((sum, r) => sum + r.security_count, 0) /
successfulRecords.length,
);
analysis.summary.avgTotalPackages = Math.round(
successfulRecords.reduce(
(sum, r) => sum + (r.total_packages || 0),
0,
) / successfulRecords.length,
);
}
// Identify potential causes of the spike
const potentialCauses = [];
// Check for hosts with unusually high package counts
Object.keys(analysis.hosts).forEach((hostId) => {
const host = analysis.hosts[hostId];
if (
host.summary.maxPackagesCount >
analysis.summary.avgPackagesCount * 2
) {
potentialCauses.push({
type: "high_package_count",
hostId,
hostName: host.hostInfo.friendly_name || host.hostInfo.hostname,
value: host.summary.maxPackagesCount,
avg: analysis.summary.avgPackagesCount,
});
}
});
// Check for multiple hosts reporting at the same time (this explains the 500 vs 59 discrepancy)
const concurrentReports = analysis.timeline.filter(
(entry) => entry.uniqueHosts > 1,
);
if (concurrentReports.length > 0) {
potentialCauses.push({
type: "concurrent_reports",
description:
"Multiple hosts reported simultaneously - this explains why chart shows higher numbers than individual host reports",
count: concurrentReports.length,
details: concurrentReports.map((entry) => ({
timestamp: entry.timestamp,
totalPackagesCount: entry.totalPackagesCount,
uniqueHosts: entry.uniqueHosts,
avgPerHost: Math.round(
entry.totalPackagesCount / entry.uniqueHosts,
),
})),
explanation:
"The chart sums package counts across all hosts. If multiple hosts report at the same time, the chart shows the total sum, not individual host counts.",
});
}
// Check for failed reports that might indicate system issues
if (analysis.failedReports > 0) {
potentialCauses.push({
type: "failed_reports",
count: analysis.failedReports,
percentage: Math.round(
(analysis.failedReports / analysis.totalRecords) * 100,
),
});
}
// Add aggregation explanation
const aggregationExplanation = {
type: "aggregation_explanation",
description: "Chart Aggregation Logic",
details: {
howItWorks:
"The package trends chart sums package counts across all hosts for each time period",
individualHosts:
"Each host reports its own package count (e.g., 59 packages)",
chartDisplay:
"Chart shows the sum of all hosts' package counts (e.g., 59 + other hosts = 500)",
timeGrouping:
"Multiple hosts reporting in the same hour/day are aggregated together",
},
example: {
host1: "Host A reports 59 outdated packages",
host2: "Host B reports 120 outdated packages",
host3: "Host C reports 321 outdated packages",
chartShows: "Chart displays 500 total packages (59+120+321)",
},
};
potentialCauses.push(aggregationExplanation);
// Add specific host breakdown if a host ID is provided
let specificHostAnalysis = null;
if (req.query.hostId) {
const hostId = req.query.hostId;
const hostData = analysis.hosts[hostId];
if (hostData) {
specificHostAnalysis = {
hostId,
hostInfo: hostData.hostInfo,
summary: hostData.summary,
records: hostData.records,
explanation: `This host reported ${hostData.summary.maxPackagesCount} outdated packages, but the chart shows ${analysis.summary.maxPackagesCount} because it sums across all hosts that reported at the same time.`,
};
}
}
res.json({
analysis,
potentialCauses,
specificHostAnalysis,
recommendations: [
"Check if any hosts had major package updates around this time",
"Verify if any new hosts were added to the system",
"Check for system maintenance or updates that might have triggered package checks",
"Review any automation or scheduled tasks that run around 6pm",
"Check if any repositories were updated or new packages were released",
"Remember: Chart shows SUM of all hosts' package counts, not individual host counts",
],
});
} catch (error) {
console.error("Error analyzing package spike:", error);
res.status(500).json({ error: "Failed to analyze package spike" });
}
},
);
module.exports = router;