mirror of
https://github.com/9technologygroup/patchmon.net.git
synced 2025-11-07 23:43:54 +00:00
Compare commits
561 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
148ff2e77f | ||
|
|
a655a24f2f | ||
|
|
417f6deccf | ||
|
|
55de7b40ed | ||
|
|
90e56d62bb | ||
|
|
497aeb8068 | ||
|
|
f5b0e930f7 | ||
|
|
e73ebc383c | ||
|
|
63831caba3 | ||
|
|
8e5eb54e02 | ||
|
|
a8eb3ec21c | ||
|
|
e57ff7612e | ||
|
|
7a3d98862f | ||
|
|
913976b7f6 | ||
|
|
53ff3bb1e2 | ||
|
|
428207bc58 | ||
|
|
1547af6986 | ||
|
|
39fbafe01f | ||
|
|
f296cf2003 | ||
|
|
052a77dce8 | ||
|
|
94bfffd882 | ||
|
|
37462f4831 | ||
|
|
5457a1e9bc | ||
|
|
f3bca4a6d5 | ||
|
|
ca4d34c230 | ||
|
|
1e75f2b1fe | ||
|
|
79317b0052 | ||
|
|
77a945a5b6 | ||
|
|
276d910e83 | ||
|
|
dae536e96b | ||
|
|
8361caabe8 | ||
|
|
f6d23e45b2 | ||
|
|
aba0f5cb6b | ||
|
|
2ec2b3992c | ||
|
|
f85721b292 | ||
|
|
1d2c003830 | ||
|
|
2975da0f69 | ||
|
|
93760d03e1 | ||
|
|
43fb54a683 | ||
|
|
e9368d1a95 | ||
|
|
3ce8c02a31 | ||
|
|
ac420901a6 | ||
|
|
eb0218bdcb | ||
|
|
1f6f58360f | ||
|
|
746451c296 | ||
|
|
285e4c59ee | ||
|
|
9050595b7c | ||
|
|
cc46940b0c | ||
|
|
203a065479 | ||
|
|
8864de6c15 | ||
|
|
96aedbe761 | ||
|
|
3df2057f7e | ||
|
|
42f4e58bb4 | ||
|
|
12eef22912 | ||
|
|
c2121e3995 | ||
|
|
6792f96af9 | ||
|
|
1e617c8bb8 | ||
|
|
a76c5b8963 | ||
|
|
212b24b1c8 | ||
|
|
9fc3f4f9d1 | ||
|
|
3029278742 | ||
|
|
e4d6c1205c | ||
|
|
0f5272d12a | ||
|
|
5776d32e71 | ||
|
|
a11ff842eb | ||
|
|
48ce1951de | ||
|
|
9705e24b83 | ||
|
|
933c7a067e | ||
|
|
68f10c6c43 | ||
|
|
4b6f19c28e | ||
|
|
ae6afb0ef4 | ||
|
|
61523c9a44 | ||
|
|
3f9a5576ac | ||
|
|
e2dd7acca5 | ||
|
|
1c3b01f13c | ||
|
|
2c5a35b6c2 | ||
|
|
f42c53d34b | ||
|
|
95800e6d76 | ||
|
|
8d372411be | ||
|
|
de449c547f | ||
|
|
cd03f0e66a | ||
|
|
a8bd09be89 | ||
|
|
deb6bed1a6 | ||
|
|
3ae8422487 | ||
|
|
c98203a997 | ||
|
|
37c8f5fa76 | ||
|
|
0189a307ef | ||
|
|
50e546ee7e | ||
|
|
2174abf395 | ||
|
|
00abbc8c62 | ||
|
|
1350fd4e47 | ||
|
|
6b9a42fb0b | ||
|
|
3ee6f9aaa0 | ||
|
|
c9aef78912 | ||
|
|
8a5d61a7c1 | ||
|
|
fd2df0729e | ||
|
|
df502c676f | ||
|
|
d7f7b24f8f | ||
|
|
54cea6b20b | ||
|
|
1ef2308d56 | ||
|
|
af9b0d5d76 | ||
|
|
7b8c29860c | ||
|
|
fcd1b52e0e | ||
|
|
d78fb63c2d | ||
|
|
d3dc068c8e | ||
|
|
46e19fbfc2 | ||
|
|
5be8e01aa3 | ||
|
|
80a701cc33 | ||
|
|
293733dc0b | ||
|
|
c4d0d8bee8 | ||
|
|
30c89de134 | ||
|
|
c7ab40e4a2 | ||
|
|
4b35fc9ab9 | ||
|
|
191a1afada | ||
|
|
175f10b8b7 | ||
|
|
080bcbe22e | ||
|
|
3175ed79a5 | ||
|
|
fba6d0ede5 | ||
|
|
54a5012012 | ||
|
|
5004e062b4 | ||
|
|
44d52a5536 | ||
|
|
52c8ba6b03 | ||
|
|
9db563dec3 | ||
|
|
c328123bd3 | ||
|
|
46eb797ac3 | ||
|
|
c43afeb127 | ||
|
|
5b77a1328d | ||
|
|
9a40d5e6ee | ||
|
|
fdd0cfd619 | ||
|
|
de236f9ae2 | ||
|
|
4d5040e0e9 | ||
|
|
28c5310b99 | ||
|
|
a2e9743da6 | ||
|
|
3863d641fa | ||
|
|
cc8f77a946 | ||
|
|
36455e2bfd | ||
|
|
af65d38cad | ||
|
|
29266b6d77 | ||
|
|
f96e468482 | ||
|
|
9f8c88badf | ||
|
|
7985a225d7 | ||
|
|
8c538bd99c | ||
|
|
623bf5e2c8 | ||
|
|
ed8cc81b89 | ||
|
|
5c4353a688 | ||
|
|
6ebcdd57d5 | ||
|
|
a3d0dfd665 | ||
|
|
d99ded6d65 | ||
|
|
1ea96b6172 | ||
|
|
1e5ee66825 | ||
|
|
88130797e4 | ||
|
|
0ad1a96871 | ||
|
|
566c415471 | ||
|
|
cfc91243eb | ||
|
|
84cf31869b | ||
|
|
18c9d241eb | ||
|
|
86b5da3ea0 | ||
|
|
c9b5ee63d8 | ||
|
|
ac4415e1dc | ||
|
|
3737a5a935 | ||
|
|
bcce48948a | ||
|
|
5e4c628110 | ||
|
|
a8668ee3f3 | ||
|
|
5487206384 | ||
|
|
daa31973f9 | ||
|
|
561c78fb08 | ||
|
|
6d3f2d94ba | ||
|
|
93534ebe52 | ||
|
|
5cf2811bfd | ||
|
|
8fd91eae1a | ||
|
|
da8c661d20 | ||
|
|
2bf639e315 | ||
|
|
c02ac4bd6f | ||
|
|
4e0eaf7323 | ||
|
|
ef9ef58bcb | ||
|
|
29afe3da1f | ||
|
|
a861e4f9eb | ||
|
|
12ef6fd8e1 | ||
|
|
ba9de097dc | ||
|
|
8103581d17 | ||
|
|
cdb24520d8 | ||
|
|
831adf3038 | ||
|
|
2a1eed1354 | ||
|
|
7819d4512e | ||
|
|
a305fe23d3 | ||
|
|
2b36e88d85 | ||
|
|
6624ec002d | ||
|
|
840779844a | ||
|
|
f91d3324ba | ||
|
|
8c60b5277e | ||
|
|
2ac756af84 | ||
|
|
e227004d6b | ||
|
|
d379473568 | ||
|
|
2edc773adf | ||
|
|
2db839556c | ||
|
|
aab6fc244e | ||
|
|
811f5b5885 | ||
|
|
b43c9e94fd | ||
|
|
2e2a554aa3 | ||
|
|
eabcfd370c | ||
|
|
55cb07b3c8 | ||
|
|
0e049ec3d5 | ||
|
|
a2464fac5c | ||
|
|
5dc3e8ba81 | ||
|
|
63817b450f | ||
|
|
1fa0502d7d | ||
|
|
581dc5884c | ||
|
|
dcaffe2805 | ||
|
|
a3005bccb4 | ||
|
|
499ef9d5d9 | ||
|
|
6eb6ea3fd6 | ||
|
|
a27c607d9e | ||
|
|
d4e0abd407 | ||
|
|
8d447cab0d | ||
|
|
6988ecab12 | ||
|
|
fd108c6a21 | ||
|
|
3ea8cc74b6 | ||
|
|
a43fc9d380 | ||
|
|
864719b4b3 | ||
|
|
cc89df161b | ||
|
|
2659a930d6 | ||
|
|
fa57b35270 | ||
|
|
766d36ff80 | ||
|
|
3a76d54707 | ||
|
|
dd28e741d4 | ||
|
|
35d3c28ae5 | ||
|
|
3cf2ada84e | ||
|
|
b25bba50a7 | ||
|
|
811930d1e2 | ||
|
|
f3db16d6d0 | ||
|
|
b3887c818d | ||
|
|
f7b73ba280 | ||
|
|
5c2bacb322 | ||
|
|
657017801b | ||
|
|
5e8cfa6b63 | ||
|
|
f9bd56215d | ||
|
|
aa8b42cbb0 | ||
|
|
51f6fabd45 | ||
|
|
32ab004f3f | ||
|
|
71b27b4bcf | ||
|
|
60ca2064bf | ||
|
|
5ccd0aa163 | ||
|
|
a13b4941cd | ||
|
|
482a9e27c9 | ||
|
|
f085596b87 | ||
|
|
757feab9cd | ||
|
|
fffc571453 | ||
|
|
6f59a1981d | ||
|
|
8bb16f0896 | ||
|
|
b454b8d130 | ||
|
|
3fc4b799be | ||
|
|
9c39d83fe5 | ||
|
|
2ce6d9cd73 | ||
|
|
e97ccc5cbd | ||
|
|
1f77e459ce | ||
|
|
9ddc27e50c | ||
|
|
26c58f687b | ||
|
|
c004734a44 | ||
|
|
841b97cb5d | ||
|
|
8464a3692d | ||
|
|
258bc67efc | ||
|
|
b3c1319df4 | ||
|
|
f6d21e0ed5 | ||
|
|
b85eddf22a | ||
|
|
01dac49c05 | ||
|
|
ab97e04cc1 | ||
|
|
50b47bdd65 | ||
|
|
7a17958ad8 | ||
|
|
806f554b96 | ||
|
|
373ef8f468 | ||
|
|
513c268b36 | ||
|
|
13c4342135 | ||
|
|
bbb97dbfda | ||
|
|
31a95ed946 | ||
|
|
3eb4130865 | ||
|
|
5a498a5f7a | ||
|
|
e0eb544205 | ||
|
|
51982010db | ||
|
|
dc68afcb87 | ||
|
|
bec09b9457 | ||
|
|
55c8f74b73 | ||
|
|
16ea1dc743 | ||
|
|
8c326c8fe2 | ||
|
|
2abc9b1f8a | ||
|
|
e5f3b0ed26 | ||
|
|
bfc5db11da | ||
|
|
a0bea9b6e5 | ||
|
|
ebda7331a9 | ||
|
|
9963cfa417 | ||
|
|
4e6a9829cf | ||
|
|
b99f4aad4e | ||
|
|
7a8e9d95a0 | ||
|
|
ac22adde67 | ||
|
|
db1f03b0e0 | ||
|
|
74cc13b7de | ||
|
|
65025b50cf | ||
|
|
de76836ba0 | ||
|
|
fe448d0111 | ||
|
|
1b08be8864 | ||
|
|
28124f5fba | ||
|
|
f789c1cebe | ||
|
|
d13469ce33 | ||
|
|
443ec145e1 | ||
|
|
42f882c1c6 | ||
|
|
69acd1726c | ||
|
|
02f9899b23 | ||
|
|
0742c4b05c | ||
|
|
5d8a1e71d6 | ||
|
|
84c26054b2 | ||
|
|
f254b54404 | ||
|
|
7682d2fffd | ||
|
|
d6db557d87 | ||
|
|
af62a466c8 | ||
|
|
434fa86941 | ||
|
|
a61a8681e0 | ||
|
|
8eb75fba7d | ||
|
|
b3d7e49961 | ||
|
|
8be25283dc | ||
|
|
ed0cf79b53 | ||
|
|
678efa9574 | ||
|
|
8ca22dc7ab | ||
|
|
3466c0c7fb | ||
|
|
3da0625231 | ||
|
|
21d6a3b763 | ||
|
|
33b2b4b0fe | ||
|
|
479909ecf3 | ||
|
|
61ca05526b | ||
|
|
e04680bc33 | ||
|
|
a765b58868 | ||
|
|
654943a00c | ||
|
|
b54900aaed | ||
|
|
cdaba97232 | ||
|
|
45ec71c387 | ||
|
|
823ae7f30a | ||
|
|
8553f717e2 | ||
|
|
841b6e41ff | ||
|
|
d626493100 | ||
|
|
12a82a8522 | ||
|
|
44f90edcd2 | ||
|
|
1cc5254331 | ||
|
|
5bf6283a1c | ||
|
|
e9843f80f8 | ||
|
|
b49ea6b197 | ||
|
|
49c02a54dc | ||
|
|
c7b177d5cb | ||
|
|
8409b71857 | ||
|
|
78eb2b183e | ||
|
|
b49d225e32 | ||
|
|
470948165c | ||
|
|
20df1eceb1 | ||
|
|
372bb42fc5 | ||
|
|
4a6b486ba1 | ||
|
|
1f5b33eb73 | ||
|
|
aca8b300dd | ||
|
|
c6459a965f | ||
|
|
3b72794307 | ||
|
|
b5b110fed2 | ||
|
|
40bf8747b1 | ||
|
|
178f871582 | ||
|
|
840664c39e | ||
|
|
c18696f772 | ||
|
|
6adbbca439 | ||
|
|
edfd82a86d | ||
|
|
bed52a04b2 | ||
|
|
7369e23061 | ||
|
|
271d2c0df1 | ||
|
|
518b08895e | ||
|
|
aba8ec5d01 | ||
|
|
630949b7b9 | ||
|
|
82d0ff315f | ||
|
|
df04770113 | ||
|
|
038d4c515b | ||
|
|
f99e01a120 | ||
|
|
175042690e | ||
|
|
102546e45d | ||
|
|
751a202fec | ||
|
|
c886b812d6 | ||
|
|
be3fe52aea | ||
|
|
d85920669d | ||
|
|
c4e056711b | ||
|
|
60fa598803 | ||
|
|
5c66887732 | ||
|
|
ba087eb23e | ||
|
|
e3aa28a8d9 | ||
|
|
71d9884a86 | ||
|
|
2c47999cb4 | ||
|
|
6bf2a21f48 | ||
|
|
a76a722364 | ||
|
|
40a9003e6f | ||
|
|
e9bac06526 | ||
|
|
0c0446ad69 | ||
|
|
dbebb866b9 | ||
|
|
eb3f3599f9 | ||
|
|
527b0ccc3c | ||
|
|
1ff3da0a21 | ||
|
|
641272dfb8 | ||
|
|
3c01c4bfb2 | ||
|
|
35eb9303b1 | ||
|
|
469107c149 | ||
|
|
22f6befc89 | ||
|
|
03802daf13 | ||
|
|
17509cbf3c | ||
|
|
ffbf5f12e5 | ||
|
|
3bdf3d1843 | ||
|
|
ea550259ff | ||
|
|
047fdb4bd1 | ||
|
|
adc142fd85 | ||
|
|
42f6971da7 | ||
|
|
0414ea39d0 | ||
|
|
6357839619 | ||
|
|
c840a3fdcc | ||
|
|
a1bf2df59d | ||
|
|
67a5462a25 | ||
|
|
a32007f56b | ||
|
|
6e1ec0d031 | ||
|
|
ce2ba0face | ||
|
|
53f8471d75 | ||
|
|
74f42b5bee | ||
|
|
a84da7c731 | ||
|
|
83ce7c64fd | ||
|
|
15902da87c | ||
|
|
a11f180d23 | ||
|
|
35bf858977 | ||
|
|
330f80478d | ||
|
|
b43b20fbe9 | ||
|
|
591389a91f | ||
|
|
6d70a67a49 | ||
|
|
0cca6607d7 | ||
|
|
5f0ce7f26a | ||
|
|
38d0dcb3c4 | ||
|
|
03d6ebb43a | ||
|
|
b7ce2a3f54 | ||
|
|
783f8d73fe | ||
|
|
9f72690f82 | ||
|
|
48d2a656e5 | ||
|
|
e9402dbf32 | ||
|
|
dc1ad6882c | ||
|
|
22f616e110 | ||
|
|
3af269ee47 | ||
|
|
f4ece11636 | ||
|
|
da6bd2c098 | ||
|
|
a479003ba9 | ||
|
|
78f4eff375 | ||
|
|
c4376d35c9 | ||
|
|
9f3016be57 | ||
|
|
c3013cccd3 | ||
|
|
456184d327 | ||
|
|
a9c579bdd0 | ||
|
|
13fe8a0bc5 | ||
|
|
1d8742ccad | ||
|
|
bccfc0876f | ||
|
|
43a42aa931 | ||
|
|
ad5627362b | ||
|
|
61db61e1ba | ||
|
|
8dc702e7fc | ||
|
|
45f5ccc638 | ||
|
|
3fbe1369cf | ||
|
|
e62a4fed56 | ||
|
|
be549d4b34 | ||
|
|
99aa79a6a4 | ||
|
|
73761d8927 | ||
|
|
9889083900 | ||
|
|
acb30f22bd | ||
|
|
3a0b564a6f | ||
|
|
e536a5b706 | ||
|
|
0a3e4ad5ee | ||
|
|
abcf88b8b9 | ||
|
|
94ec14f08b | ||
|
|
f25834b4ba | ||
|
|
f85464ad26 | ||
|
|
db0ba201a4 | ||
|
|
676082a967 | ||
|
|
30bb29c9f4 | ||
|
|
968d9f964b | ||
|
|
3e413e71e4 | ||
|
|
e25baf0f55 | ||
|
|
2869d4e850 | ||
|
|
e459d8b378 | ||
|
|
31583716c8 | ||
|
|
e645124356 | ||
|
|
c3aa5534f3 | ||
|
|
bf2ea908f4 | ||
|
|
43ce146987 | ||
|
|
69a121cdde | ||
|
|
001b234ecc | ||
|
|
d300922312 | ||
|
|
20ff5b5b72 | ||
|
|
5e6a2d863c | ||
|
|
ab46b0138b | ||
|
|
5ca0f086d4 | ||
|
|
9cb5cd380b | ||
|
|
517b5cd7cb | ||
|
|
5dafe34322 | ||
|
|
677d3b4df1 | ||
|
|
c3365fedb2 | ||
|
|
f23f075e41 | ||
|
|
9b76d9f81a | ||
|
|
64d9c14002 | ||
|
|
9a01d27d8b | ||
|
|
d72f96b598 | ||
|
|
8f8b23ccf1 | ||
|
|
1392976a7b | ||
|
|
797be20c45 | ||
|
|
a268f6b8f1 | ||
|
|
a4770e5106 | ||
|
|
523756cef2 | ||
|
|
697da088d4 | ||
|
|
739ca6486a | ||
|
|
38d299701d | ||
|
|
5d35abe496 | ||
|
|
7ff051be3e | ||
|
|
2de80f0c06 | ||
|
|
875ab31317 | ||
|
|
a96439596d | ||
|
|
d2bf201f1e | ||
|
|
b2d3181ffe | ||
|
|
5a0229cef4 | ||
|
|
f73c10f309 | ||
|
|
8722bd170f | ||
|
|
fd76a9efd2 | ||
|
|
584e5ed52b | ||
|
|
c5ff4b346a | ||
|
|
cc9f0af1ac | ||
|
|
d7460068d7 | ||
|
|
9135fa93b3 | ||
|
|
662a8d665a | ||
|
|
f3351d577d | ||
|
|
e1b8e4458a | ||
|
|
976ca79f57 | ||
|
|
01a8bd6c77 | ||
|
|
d210d6adde | ||
|
|
229ba4f7be | ||
|
|
9a3827dced | ||
|
|
d687ec4e45 | ||
|
|
bbd7769b8c | ||
|
|
8245c6b90d | ||
|
|
1afb9c1ed3 | ||
|
|
417942f674 | ||
|
|
75a4b4a912 | ||
|
|
4576781900 | ||
|
|
0d10d7ee9b | ||
|
|
1cdd6eba6d | ||
|
|
adb207fef9 | ||
|
|
216c9dbefa | ||
|
|
52d6d46ea3 | ||
|
|
6bc4316fbc | ||
|
|
b1470f57a8 | ||
|
|
51d6dd63b1 | ||
|
|
2d7a3c3103 | ||
|
|
5bdd0b5830 | ||
|
|
98cadb1ff1 | ||
|
|
42a6b7e19c | ||
|
|
e35f96d30f | ||
|
|
08f82bc795 | ||
|
|
c497c1db2a | ||
|
|
5b7e7216e8 | ||
|
|
fe5fb92e48 | ||
|
|
c8d54facb9 | ||
|
|
f97b300158 | ||
|
|
17ffa48158 | ||
|
|
16821d6b5e |
34
.dockerignore
Normal file
34
.dockerignore
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
# Environment files
|
||||||
|
**/.env
|
||||||
|
**/.env.*
|
||||||
|
**/env.example
|
||||||
|
|
||||||
|
# Node modules
|
||||||
|
**/node_modules
|
||||||
|
|
||||||
|
# Logs
|
||||||
|
**/logs
|
||||||
|
**/*.log
|
||||||
|
|
||||||
|
# Git
|
||||||
|
**/.git
|
||||||
|
**/.gitignore
|
||||||
|
|
||||||
|
# IDE files
|
||||||
|
**/.vscode
|
||||||
|
**/.idea
|
||||||
|
**/*.swp
|
||||||
|
**/*.swo
|
||||||
|
|
||||||
|
# OS files
|
||||||
|
**/.DS_Store
|
||||||
|
**/Thumbs.db
|
||||||
|
|
||||||
|
# Build artifacts
|
||||||
|
**/dist
|
||||||
|
**/build
|
||||||
|
**/coverage
|
||||||
|
|
||||||
|
# Temporary files
|
||||||
|
**/tmp
|
||||||
|
**/temp
|
||||||
25
.github/workflows/app_build.yml
vendored
Normal file
25
.github/workflows/app_build.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
name: Build on Merge
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
paths-ignore:
|
||||||
|
- 'docker/**'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
runs-on: self-hosted
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
|
- name: Run rebuild script
|
||||||
|
run: /root/patchmon/platform/scripts/app_build.sh ${{ github.ref_name }}
|
||||||
|
|
||||||
|
rebuild-pmon:
|
||||||
|
runs-on: self-hosted
|
||||||
|
needs: deploy
|
||||||
|
if: github.ref_name == 'dev'
|
||||||
|
steps:
|
||||||
|
- name: Rebuild pmon
|
||||||
|
run: /root/patchmon/platform/scripts/manage_pmon_auto.sh
|
||||||
28
.github/workflows/code_quality.yml
vendored
Normal file
28
.github/workflows/code_quality.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
name: Code quality
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths-ignore:
|
||||||
|
- 'docker/**'
|
||||||
|
pull_request:
|
||||||
|
paths-ignore:
|
||||||
|
- 'docker/**'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Setup Biome
|
||||||
|
uses: biomejs/setup-biome@v2
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
|
||||||
|
- name: Run Biome
|
||||||
|
run: biome ci .
|
||||||
76
.github/workflows/docker.yml
vendored
Normal file
76
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
name: Build and Push Docker Images
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
tags:
|
||||||
|
- 'v*'
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
push:
|
||||||
|
description: Push images to registry
|
||||||
|
required: false
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
|
||||||
|
env:
|
||||||
|
REGISTRY: ghcr.io
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
image: [backend, frontend]
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
|
- name: Log in to container registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ${{ env.REGISTRY }}
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Extract metadata (tags, labels)
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: ${{ env.REGISTRY }}/${{ github.repository }}-${{ matrix.image }}
|
||||||
|
tags: |
|
||||||
|
type=ref,event=pr
|
||||||
|
type=semver,pattern={{version}}
|
||||||
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
|
type=semver,pattern={{major}}
|
||||||
|
type=edge,branch=main
|
||||||
|
|
||||||
|
- name: Build and push ${{ matrix.image }} image
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
file: docker/${{ matrix.image }}.Dockerfile
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
# Push if:
|
||||||
|
# - Event is not workflow_dispatch OR input 'push' is true
|
||||||
|
# AND
|
||||||
|
# - Event is not pull_request OR the PR is from the same repository (to avoid pushing from forks)
|
||||||
|
push: ${{ (github.event_name != 'workflow_dispatch' || inputs.push == 'true') && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository) }}
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
cache-from: type=gha,scope=${{ matrix.image }}
|
||||||
|
cache-to: type=gha,mode=max,scope=${{ matrix.image }}
|
||||||
13
.gitignore
vendored
13
.gitignore
vendored
@@ -71,6 +71,13 @@ jspm_packages/
|
|||||||
.cache/
|
.cache/
|
||||||
public
|
public
|
||||||
|
|
||||||
|
# Exception: Allow frontend/public/assets for logo files
|
||||||
|
!frontend/public/
|
||||||
|
!frontend/public/assets/
|
||||||
|
!frontend/public/assets/*.png
|
||||||
|
!frontend/public/assets/*.svg
|
||||||
|
!frontend/public/assets/*.jpg
|
||||||
|
|
||||||
# Storybook build outputs
|
# Storybook build outputs
|
||||||
.out
|
.out
|
||||||
.storybook-out
|
.storybook-out
|
||||||
@@ -130,6 +137,9 @@ agents/*.log
|
|||||||
test-results/
|
test-results/
|
||||||
playwright-report/
|
playwright-report/
|
||||||
test-results.xml
|
test-results.xml
|
||||||
|
test_*.sh
|
||||||
|
test-*.sh
|
||||||
|
*.code-workspace
|
||||||
|
|
||||||
# Package manager lock files (uncomment if you want to ignore them)
|
# Package manager lock files (uncomment if you want to ignore them)
|
||||||
# package-lock.json
|
# package-lock.json
|
||||||
@@ -140,6 +150,9 @@ test-results.xml
|
|||||||
deploy-patchmon.sh
|
deploy-patchmon.sh
|
||||||
manage-instances.sh
|
manage-instances.sh
|
||||||
manage-patchmon.sh
|
manage-patchmon.sh
|
||||||
|
manage-patchmon-dev.sh
|
||||||
setup-installer-site.sh
|
setup-installer-site.sh
|
||||||
install-server.*
|
install-server.*
|
||||||
notify-clients-upgrade.sh
|
notify-clients-upgrade.sh
|
||||||
|
debug-agent.sh
|
||||||
|
docker/compose_dev_*
|
||||||
|
|||||||
674
LICENSE
Normal file
674
LICENSE
Normal file
@@ -0,0 +1,674 @@
|
|||||||
|
GNU GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 29 June 2007
|
||||||
|
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The GNU General Public License is a free, copyleft license for
|
||||||
|
software and other kinds of works.
|
||||||
|
|
||||||
|
The licenses for most software and other practical works are designed
|
||||||
|
to take away your freedom to share and change the works. By contrast,
|
||||||
|
the GNU General Public License is intended to guarantee your freedom to
|
||||||
|
share and change all versions of a program--to make sure it remains free
|
||||||
|
software for all its users. We, the Free Software Foundation, use the
|
||||||
|
GNU General Public License for most of our software; it applies also to
|
||||||
|
any other work released this way by its authors. You can apply it to
|
||||||
|
your programs, too.
|
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom, not
|
||||||
|
price. Our General Public Licenses are designed to make sure that you
|
||||||
|
have the freedom to distribute copies of free software (and charge for
|
||||||
|
them if you wish), that you receive source code or can get it if you
|
||||||
|
want it, that you can change the software or use pieces of it in new
|
||||||
|
free programs, and that you know you can do these things.
|
||||||
|
|
||||||
|
To protect your rights, we need to prevent others from denying you
|
||||||
|
these rights or asking you to surrender the rights. Therefore, you have
|
||||||
|
certain responsibilities if you distribute copies of the software, or if
|
||||||
|
you modify it: responsibilities to respect the freedom of others.
|
||||||
|
|
||||||
|
For example, if you distribute copies of such a program, whether
|
||||||
|
gratis or for a fee, you must pass on to the recipients the same
|
||||||
|
freedoms that you received. You must make sure that they, too, receive
|
||||||
|
or can get the source code. And you must show them these terms so they
|
||||||
|
know their rights.
|
||||||
|
|
||||||
|
Developers that use the GNU GPL protect your rights with two steps:
|
||||||
|
(1) assert copyright on the software, and (2) offer you this License
|
||||||
|
giving you legal permission to copy, distribute and/or modify it.
|
||||||
|
|
||||||
|
For the developers' and authors' protection, the GPL clearly explains
|
||||||
|
that there is no warranty for this free software. For both users' and
|
||||||
|
authors' sake, the GPL requires that modified versions be marked as
|
||||||
|
changed, so that their problems will not be attributed erroneously to
|
||||||
|
authors of previous versions.
|
||||||
|
|
||||||
|
Some devices are designed to deny users access to install or run
|
||||||
|
modified versions of the software inside them, although the manufacturer
|
||||||
|
can do so. This is fundamentally incompatible with the aim of
|
||||||
|
protecting users' freedom to change the software. The systematic
|
||||||
|
pattern of such abuse occurs in the area of products for individuals to
|
||||||
|
use, which is precisely where it is most unacceptable. Therefore, we
|
||||||
|
have designed this version of the GPL to prohibit the practice for those
|
||||||
|
products. If such problems arise substantially in other domains, we
|
||||||
|
stand ready to extend this provision to those domains in future versions
|
||||||
|
of the GPL, as needed to protect the freedom of users.
|
||||||
|
|
||||||
|
Finally, every program is threatened constantly by software patents.
|
||||||
|
States should not allow patents to restrict development and use of
|
||||||
|
software on general-purpose computers, but in those that do, we wish to
|
||||||
|
avoid the special danger that patents applied to a free program could
|
||||||
|
make it effectively proprietary. To prevent this, the GPL assures that
|
||||||
|
patents cannot be used to render the program non-free.
|
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and
|
||||||
|
modification follow.
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
0. Definitions.
|
||||||
|
|
||||||
|
"This License" refers to version 3 of the GNU General Public License.
|
||||||
|
|
||||||
|
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||||
|
works, such as semiconductor masks.
|
||||||
|
|
||||||
|
"The Program" refers to any copyrightable work licensed under this
|
||||||
|
License. Each licensee is addressed as "you". "Licensees" and
|
||||||
|
"recipients" may be individuals or organizations.
|
||||||
|
|
||||||
|
To "modify" a work means to copy from or adapt all or part of the work
|
||||||
|
in a fashion requiring copyright permission, other than the making of an
|
||||||
|
exact copy. The resulting work is called a "modified version" of the
|
||||||
|
earlier work or a work "based on" the earlier work.
|
||||||
|
|
||||||
|
A "covered work" means either the unmodified Program or a work based
|
||||||
|
on the Program.
|
||||||
|
|
||||||
|
To "propagate" a work means to do anything with it that, without
|
||||||
|
permission, would make you directly or secondarily liable for
|
||||||
|
infringement under applicable copyright law, except executing it on a
|
||||||
|
computer or modifying a private copy. Propagation includes copying,
|
||||||
|
distribution (with or without modification), making available to the
|
||||||
|
public, and in some countries other activities as well.
|
||||||
|
|
||||||
|
To "convey" a work means any kind of propagation that enables other
|
||||||
|
parties to make or receive copies. Mere interaction with a user through
|
||||||
|
a computer network, with no transfer of a copy, is not conveying.
|
||||||
|
|
||||||
|
An interactive user interface displays "Appropriate Legal Notices"
|
||||||
|
to the extent that it includes a convenient and prominently visible
|
||||||
|
feature that (1) displays an appropriate copyright notice, and (2)
|
||||||
|
tells the user that there is no warranty for the work (except to the
|
||||||
|
extent that warranties are provided), that licensees may convey the
|
||||||
|
work under this License, and how to view a copy of this License. If
|
||||||
|
the interface presents a list of user commands or options, such as a
|
||||||
|
menu, a prominent item in the list meets this criterion.
|
||||||
|
|
||||||
|
1. Source Code.
|
||||||
|
|
||||||
|
The "source code" for a work means the preferred form of the work
|
||||||
|
for making modifications to it. "Object code" means any non-source
|
||||||
|
form of a work.
|
||||||
|
|
||||||
|
A "Standard Interface" means an interface that either is an official
|
||||||
|
standard defined by a recognized standards body, or, in the case of
|
||||||
|
interfaces specified for a particular programming language, one that
|
||||||
|
is widely used among developers working in that language.
|
||||||
|
|
||||||
|
The "System Libraries" of an executable work include anything, other
|
||||||
|
than the work as a whole, that (a) is included in the normal form of
|
||||||
|
packaging a Major Component, but which is not part of that Major
|
||||||
|
Component, and (b) serves only to enable use of the work with that
|
||||||
|
Major Component, or to implement a Standard Interface for which an
|
||||||
|
implementation is available to the public in source code form. A
|
||||||
|
"Major Component", in this context, means a major essential component
|
||||||
|
(kernel, window system, and so on) of the specific operating system
|
||||||
|
(if any) on which the executable work runs, or a compiler used to
|
||||||
|
produce the work, or an object code interpreter used to run it.
|
||||||
|
|
||||||
|
The "Corresponding Source" for a work in object code form means all
|
||||||
|
the source code needed to generate, install, and (for an executable
|
||||||
|
work) run the object code and to modify the work, including scripts to
|
||||||
|
control those activities. However, it does not include the work's
|
||||||
|
System Libraries, or general-purpose tools or generally available free
|
||||||
|
programs which are used unmodified in performing those activities but
|
||||||
|
which are not part of the work. For example, Corresponding Source
|
||||||
|
includes interface definition files associated with source files for
|
||||||
|
the work, and the source code for shared libraries and dynamically
|
||||||
|
linked subprograms that the work is specifically designed to require,
|
||||||
|
such as by intimate data communication or control flow between those
|
||||||
|
subprograms and other parts of the work.
|
||||||
|
|
||||||
|
The Corresponding Source need not include anything that users
|
||||||
|
can regenerate automatically from other parts of the Corresponding
|
||||||
|
Source.
|
||||||
|
|
||||||
|
The Corresponding Source for a work in source code form is that
|
||||||
|
same work.
|
||||||
|
|
||||||
|
2. Basic Permissions.
|
||||||
|
|
||||||
|
All rights granted under this License are granted for the term of
|
||||||
|
copyright on the Program, and are irrevocable provided the stated
|
||||||
|
conditions are met. This License explicitly affirms your unlimited
|
||||||
|
permission to run the unmodified Program. The output from running a
|
||||||
|
covered work is covered by this License only if the output, given its
|
||||||
|
content, constitutes a covered work. This License acknowledges your
|
||||||
|
rights of fair use or other equivalent, as provided by copyright law.
|
||||||
|
|
||||||
|
You may make, run and propagate covered works that you do not
|
||||||
|
convey, without conditions so long as your license otherwise remains
|
||||||
|
in force. You may convey covered works to others for the sole purpose
|
||||||
|
of having them make modifications exclusively for you, or provide you
|
||||||
|
with facilities for running those works, provided that you comply with
|
||||||
|
the terms of this License in conveying all material for which you do
|
||||||
|
not control copyright. Those thus making or running the covered works
|
||||||
|
for you must do so exclusively on your behalf, under your direction
|
||||||
|
and control, on terms that prohibit them from making any copies of
|
||||||
|
your copyrighted material outside their relationship with you.
|
||||||
|
|
||||||
|
Conveying under any other circumstances is permitted solely under
|
||||||
|
the conditions stated below. Sublicensing is not allowed; section 10
|
||||||
|
makes it unnecessary.
|
||||||
|
|
||||||
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||||
|
|
||||||
|
No covered work shall be deemed part of an effective technological
|
||||||
|
measure under any applicable law fulfilling obligations under article
|
||||||
|
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||||
|
similar laws prohibiting or restricting circumvention of such
|
||||||
|
measures.
|
||||||
|
|
||||||
|
When you convey a covered work, you waive any legal power to forbid
|
||||||
|
circumvention of technological measures to the extent such circumvention
|
||||||
|
is effected by exercising rights under this License with respect to
|
||||||
|
the covered work, and you disclaim any intention to limit operation or
|
||||||
|
modification of the work as a means of enforcing, against the work's
|
||||||
|
users, your or third parties' legal rights to forbid circumvention of
|
||||||
|
technological measures.
|
||||||
|
|
||||||
|
4. Conveying Verbatim Copies.
|
||||||
|
|
||||||
|
You may convey verbatim copies of the Program's source code as you
|
||||||
|
receive it, in any medium, provided that you conspicuously and
|
||||||
|
appropriately publish on each copy an appropriate copyright notice;
|
||||||
|
keep intact all notices stating that this License and any
|
||||||
|
non-permissive terms added in accord with section 7 apply to the code;
|
||||||
|
keep intact all notices of the absence of any warranty; and give all
|
||||||
|
recipients a copy of this License along with the Program.
|
||||||
|
|
||||||
|
You may charge any price or no price for each copy that you convey,
|
||||||
|
and you may offer support or warranty protection for a fee.
|
||||||
|
|
||||||
|
5. Conveying Modified Source Versions.
|
||||||
|
|
||||||
|
You may convey a work based on the Program, or the modifications to
|
||||||
|
produce it from the Program, in the form of source code under the
|
||||||
|
terms of section 4, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) The work must carry prominent notices stating that you modified
|
||||||
|
it, and giving a relevant date.
|
||||||
|
|
||||||
|
b) The work must carry prominent notices stating that it is
|
||||||
|
released under this License and any conditions added under section
|
||||||
|
7. This requirement modifies the requirement in section 4 to
|
||||||
|
"keep intact all notices".
|
||||||
|
|
||||||
|
c) You must license the entire work, as a whole, under this
|
||||||
|
License to anyone who comes into possession of a copy. This
|
||||||
|
License will therefore apply, along with any applicable section 7
|
||||||
|
additional terms, to the whole of the work, and all its parts,
|
||||||
|
regardless of how they are packaged. This License gives no
|
||||||
|
permission to license the work in any other way, but it does not
|
||||||
|
invalidate such permission if you have separately received it.
|
||||||
|
|
||||||
|
d) If the work has interactive user interfaces, each must display
|
||||||
|
Appropriate Legal Notices; however, if the Program has interactive
|
||||||
|
interfaces that do not display Appropriate Legal Notices, your
|
||||||
|
work need not make them do so.
|
||||||
|
|
||||||
|
A compilation of a covered work with other separate and independent
|
||||||
|
works, which are not by their nature extensions of the covered work,
|
||||||
|
and which are not combined with it such as to form a larger program,
|
||||||
|
in or on a volume of a storage or distribution medium, is called an
|
||||||
|
"aggregate" if the compilation and its resulting copyright are not
|
||||||
|
used to limit the access or legal rights of the compilation's users
|
||||||
|
beyond what the individual works permit. Inclusion of a covered work
|
||||||
|
in an aggregate does not cause this License to apply to the other
|
||||||
|
parts of the aggregate.
|
||||||
|
|
||||||
|
6. Conveying Non-Source Forms.
|
||||||
|
|
||||||
|
You may convey a covered work in object code form under the terms
|
||||||
|
of sections 4 and 5, provided that you also convey the
|
||||||
|
machine-readable Corresponding Source under the terms of this License,
|
||||||
|
in one of these ways:
|
||||||
|
|
||||||
|
a) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by the
|
||||||
|
Corresponding Source fixed on a durable physical medium
|
||||||
|
customarily used for software interchange.
|
||||||
|
|
||||||
|
b) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by a
|
||||||
|
written offer, valid for at least three years and valid for as
|
||||||
|
long as you offer spare parts or customer support for that product
|
||||||
|
model, to give anyone who possesses the object code either (1) a
|
||||||
|
copy of the Corresponding Source for all the software in the
|
||||||
|
product that is covered by this License, on a durable physical
|
||||||
|
medium customarily used for software interchange, for a price no
|
||||||
|
more than your reasonable cost of physically performing this
|
||||||
|
conveying of source, or (2) access to copy the
|
||||||
|
Corresponding Source from a network server at no charge.
|
||||||
|
|
||||||
|
c) Convey individual copies of the object code with a copy of the
|
||||||
|
written offer to provide the Corresponding Source. This
|
||||||
|
alternative is allowed only occasionally and noncommercially, and
|
||||||
|
only if you received the object code with such an offer, in accord
|
||||||
|
with subsection 6b.
|
||||||
|
|
||||||
|
d) Convey the object code by offering access from a designated
|
||||||
|
place (gratis or for a charge), and offer equivalent access to the
|
||||||
|
Corresponding Source in the same way through the same place at no
|
||||||
|
further charge. You need not require recipients to copy the
|
||||||
|
Corresponding Source along with the object code. If the place to
|
||||||
|
copy the object code is a network server, the Corresponding Source
|
||||||
|
may be on a different server (operated by you or a third party)
|
||||||
|
that supports equivalent copying facilities, provided you maintain
|
||||||
|
clear directions next to the object code saying where to find the
|
||||||
|
Corresponding Source. Regardless of what server hosts the
|
||||||
|
Corresponding Source, you remain obligated to ensure that it is
|
||||||
|
available for as long as needed to satisfy these requirements.
|
||||||
|
|
||||||
|
e) Convey the object code using peer-to-peer transmission, provided
|
||||||
|
you inform other peers where the object code and Corresponding
|
||||||
|
Source of the work are being offered to the general public at no
|
||||||
|
charge under subsection 6d.
|
||||||
|
|
||||||
|
A separable portion of the object code, whose source code is excluded
|
||||||
|
from the Corresponding Source as a System Library, need not be
|
||||||
|
included in conveying the object code work.
|
||||||
|
|
||||||
|
A "User Product" is either (1) a "consumer product", which means any
|
||||||
|
tangible personal property which is normally used for personal, family,
|
||||||
|
or household purposes, or (2) anything designed or sold for incorporation
|
||||||
|
into a dwelling. In determining whether a product is a consumer product,
|
||||||
|
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||||
|
product received by a particular user, "normally used" refers to a
|
||||||
|
typical or common use of that class of product, regardless of the status
|
||||||
|
of the particular user or of the way in which the particular user
|
||||||
|
actually uses, or expects or is expected to use, the product. A product
|
||||||
|
is a consumer product regardless of whether the product has substantial
|
||||||
|
commercial, industrial or non-consumer uses, unless such uses represent
|
||||||
|
the only significant mode of use of the product.
|
||||||
|
|
||||||
|
"Installation Information" for a User Product means any methods,
|
||||||
|
procedures, authorization keys, or other information required to install
|
||||||
|
and execute modified versions of a covered work in that User Product from
|
||||||
|
a modified version of its Corresponding Source. The information must
|
||||||
|
suffice to ensure that the continued functioning of the modified object
|
||||||
|
code is in no case prevented or interfered with solely because
|
||||||
|
modification has been made.
|
||||||
|
|
||||||
|
If you convey an object code work under this section in, or with, or
|
||||||
|
specifically for use in, a User Product, and the conveying occurs as
|
||||||
|
part of a transaction in which the right of possession and use of the
|
||||||
|
User Product is transferred to the recipient in perpetuity or for a
|
||||||
|
fixed term (regardless of how the transaction is characterized), the
|
||||||
|
Corresponding Source conveyed under this section must be accompanied
|
||||||
|
by the Installation Information. But this requirement does not apply
|
||||||
|
if neither you nor any third party retains the ability to install
|
||||||
|
modified object code on the User Product (for example, the work has
|
||||||
|
been installed in ROM).
|
||||||
|
|
||||||
|
The requirement to provide Installation Information does not include a
|
||||||
|
requirement to continue to provide support service, warranty, or updates
|
||||||
|
for a work that has been modified or installed by the recipient, or for
|
||||||
|
the User Product in which it has been modified or installed. Access to a
|
||||||
|
network may be denied when the modification itself materially and
|
||||||
|
adversely affects the operation of the network or violates the rules and
|
||||||
|
protocols for communication across the network.
|
||||||
|
|
||||||
|
Corresponding Source conveyed, and Installation Information provided,
|
||||||
|
in accord with this section must be in a format that is publicly
|
||||||
|
documented (and with an implementation available to the public in
|
||||||
|
source code form), and must require no special password or key for
|
||||||
|
unpacking, reading or copying.
|
||||||
|
|
||||||
|
7. Additional Terms.
|
||||||
|
|
||||||
|
"Additional permissions" are terms that supplement the terms of this
|
||||||
|
License by making exceptions from one or more of its conditions.
|
||||||
|
Additional permissions that are applicable to the entire Program shall
|
||||||
|
be treated as though they were included in this License, to the extent
|
||||||
|
that they are valid under applicable law. If additional permissions
|
||||||
|
apply only to part of the Program, that part may be used separately
|
||||||
|
under those permissions, but the entire Program remains governed by
|
||||||
|
this License without regard to the additional permissions.
|
||||||
|
|
||||||
|
When you convey a copy of a covered work, you may at your option
|
||||||
|
remove any additional permissions from that copy, or from any part of
|
||||||
|
it. (Additional permissions may be written to require their own
|
||||||
|
removal in certain cases when you modify the work.) You may place
|
||||||
|
additional permissions on material, added by you to a covered work,
|
||||||
|
for which you have or can give appropriate copyright permission.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, for material you
|
||||||
|
add to a covered work, you may (if authorized by the copyright holders of
|
||||||
|
that material) supplement the terms of this License with terms:
|
||||||
|
|
||||||
|
a) Disclaiming warranty or limiting liability differently from the
|
||||||
|
terms of sections 15 and 16 of this License; or
|
||||||
|
|
||||||
|
b) Requiring preservation of specified reasonable legal notices or
|
||||||
|
author attributions in that material or in the Appropriate Legal
|
||||||
|
Notices displayed by works containing it; or
|
||||||
|
|
||||||
|
c) Prohibiting misrepresentation of the origin of that material, or
|
||||||
|
requiring that modified versions of such material be marked in
|
||||||
|
reasonable ways as different from the original version; or
|
||||||
|
|
||||||
|
d) Limiting the use for publicity purposes of names of licensors or
|
||||||
|
authors of the material; or
|
||||||
|
|
||||||
|
e) Declining to grant rights under trademark law for use of some
|
||||||
|
trade names, trademarks, or service marks; or
|
||||||
|
|
||||||
|
f) Requiring indemnification of licensors and authors of that
|
||||||
|
material by anyone who conveys the material (or modified versions of
|
||||||
|
it) with contractual assumptions of liability to the recipient, for
|
||||||
|
any liability that these contractual assumptions directly impose on
|
||||||
|
those licensors and authors.
|
||||||
|
|
||||||
|
All other non-permissive additional terms are considered "further
|
||||||
|
restrictions" within the meaning of section 10. If the Program as you
|
||||||
|
received it, or any part of it, contains a notice stating that it is
|
||||||
|
governed by this License along with a term that is a further
|
||||||
|
restriction, you may remove that term. If a license document contains
|
||||||
|
a further restriction but permits relicensing or conveying under this
|
||||||
|
License, you may add to a covered work material governed by the terms
|
||||||
|
of that license document, provided that the further restriction does
|
||||||
|
not survive such relicensing or conveying.
|
||||||
|
|
||||||
|
If you add terms to a covered work in accord with this section, you
|
||||||
|
must place, in the relevant source files, a statement of the
|
||||||
|
additional terms that apply to those files, or a notice indicating
|
||||||
|
where to find the applicable terms.
|
||||||
|
|
||||||
|
Additional terms, permissive or non-permissive, may be stated in the
|
||||||
|
form of a separately written license, or stated as exceptions;
|
||||||
|
the above requirements apply either way.
|
||||||
|
|
||||||
|
8. Termination.
|
||||||
|
|
||||||
|
You may not propagate or modify a covered work except as expressly
|
||||||
|
provided under this License. Any attempt otherwise to propagate or
|
||||||
|
modify it is void, and will automatically terminate your rights under
|
||||||
|
this License (including any patent licenses granted under the third
|
||||||
|
paragraph of section 11).
|
||||||
|
|
||||||
|
However, if you cease all violation of this License, then your
|
||||||
|
license from a particular copyright holder is reinstated (a)
|
||||||
|
provisionally, unless and until the copyright holder explicitly and
|
||||||
|
finally terminates your license, and (b) permanently, if the copyright
|
||||||
|
holder fails to notify you of the violation by some reasonable means
|
||||||
|
prior to 60 days after the cessation.
|
||||||
|
|
||||||
|
Moreover, your license from a particular copyright holder is
|
||||||
|
reinstated permanently if the copyright holder notifies you of the
|
||||||
|
violation by some reasonable means, this is the first time you have
|
||||||
|
received notice of violation of this License (for any work) from that
|
||||||
|
copyright holder, and you cure the violation prior to 30 days after
|
||||||
|
your receipt of the notice.
|
||||||
|
|
||||||
|
Termination of your rights under this section does not terminate the
|
||||||
|
licenses of parties who have received copies or rights from you under
|
||||||
|
this License. If your rights have been terminated and not permanently
|
||||||
|
reinstated, you do not qualify to receive new licenses for the same
|
||||||
|
material under section 10.
|
||||||
|
|
||||||
|
9. Acceptance Not Required for Having Copies.
|
||||||
|
|
||||||
|
You are not required to accept this License in order to receive or
|
||||||
|
run a copy of the Program. Ancillary propagation of a covered work
|
||||||
|
occurring solely as a consequence of using peer-to-peer transmission
|
||||||
|
to receive a copy likewise does not require acceptance. However,
|
||||||
|
nothing other than this License grants you permission to propagate or
|
||||||
|
modify any covered work. These actions infringe copyright if you do
|
||||||
|
not accept this License. Therefore, by modifying or propagating a
|
||||||
|
covered work, you indicate your acceptance of this License to do so.
|
||||||
|
|
||||||
|
10. Automatic Licensing of Downstream Recipients.
|
||||||
|
|
||||||
|
Each time you convey a covered work, the recipient automatically
|
||||||
|
receives a license from the original licensors, to run, modify and
|
||||||
|
propagate that work, subject to this License. You are not responsible
|
||||||
|
for enforcing compliance by third parties with this License.
|
||||||
|
|
||||||
|
An "entity transaction" is a transaction transferring control of an
|
||||||
|
organization, or substantially all assets of one, or subdividing an
|
||||||
|
organization, or merging organizations. If propagation of a covered
|
||||||
|
work results from an entity transaction, each party to that
|
||||||
|
transaction who receives a copy of the work also receives whatever
|
||||||
|
licenses to the work the party's predecessor in interest had or could
|
||||||
|
give under the previous paragraph, plus a right to possession of the
|
||||||
|
Corresponding Source of the work from the predecessor in interest, if
|
||||||
|
the predecessor has it or can get it with reasonable efforts.
|
||||||
|
|
||||||
|
You may not impose any further restrictions on the exercise of the
|
||||||
|
rights granted or affirmed under this License. For example, you may
|
||||||
|
not impose a license fee, royalty, or other charge for exercise of
|
||||||
|
rights granted under this License, and you may not initiate litigation
|
||||||
|
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||||
|
any patent claim is infringed by making, using, selling, offering for
|
||||||
|
sale, or importing the Program or any portion of it.
|
||||||
|
|
||||||
|
11. Patents.
|
||||||
|
|
||||||
|
A "contributor" is a copyright holder who authorizes use under this
|
||||||
|
License of the Program or a work on which the Program is based. The
|
||||||
|
work thus licensed is called the contributor's "contributor version".
|
||||||
|
|
||||||
|
A contributor's "essential patent claims" are all patent claims
|
||||||
|
owned or controlled by the contributor, whether already acquired or
|
||||||
|
hereafter acquired, that would be infringed by some manner, permitted
|
||||||
|
by this License, of making, using, or selling its contributor version,
|
||||||
|
but do not include claims that would be infringed only as a
|
||||||
|
consequence of further modification of the contributor version. For
|
||||||
|
purposes of this definition, "control" includes the right to grant
|
||||||
|
patent sublicenses in a manner consistent with the requirements of
|
||||||
|
this License.
|
||||||
|
|
||||||
|
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||||
|
patent license under the contributor's essential patent claims, to
|
||||||
|
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||||
|
propagate the contents of its contributor version.
|
||||||
|
|
||||||
|
In the following three paragraphs, a "patent license" is any express
|
||||||
|
agreement or commitment, however denominated, not to enforce a patent
|
||||||
|
(such as an express permission to practice a patent or covenant not to
|
||||||
|
sue for patent infringement). To "grant" such a patent license to a
|
||||||
|
party means to make such an agreement or commitment not to enforce a
|
||||||
|
patent against the party.
|
||||||
|
|
||||||
|
If you convey a covered work, knowingly relying on a patent license,
|
||||||
|
and the Corresponding Source of the work is not available for anyone
|
||||||
|
to copy, free of charge and under the terms of this License, through a
|
||||||
|
publicly available network server or other readily accessible means,
|
||||||
|
then you must either (1) cause the Corresponding Source to be so
|
||||||
|
available, or (2) arrange to deprive yourself of the benefit of the
|
||||||
|
patent license for this particular work, or (3) arrange, in a manner
|
||||||
|
consistent with the requirements of this License, to extend the patent
|
||||||
|
license to downstream recipients. "Knowingly relying" means you have
|
||||||
|
actual knowledge that, but for the patent license, your conveying the
|
||||||
|
covered work in a country, or your recipient's use of the covered work
|
||||||
|
in a country, would infringe one or more identifiable patents in that
|
||||||
|
country that you have reason to believe are valid.
|
||||||
|
|
||||||
|
If, pursuant to or in connection with a single transaction or
|
||||||
|
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||||
|
covered work, and grant a patent license to some of the parties
|
||||||
|
receiving the covered work authorizing them to use, propagate, modify
|
||||||
|
or convey a specific copy of the covered work, then the patent license
|
||||||
|
you grant is automatically extended to all recipients of the covered
|
||||||
|
work and works based on it.
|
||||||
|
|
||||||
|
A patent license is "discriminatory" if it does not include within
|
||||||
|
the scope of its coverage, prohibits the exercise of, or is
|
||||||
|
conditioned on the non-exercise of one or more of the rights that are
|
||||||
|
specifically granted under this License. You may not convey a covered
|
||||||
|
work if you are a party to an arrangement with a third party that is
|
||||||
|
in the business of distributing software, under which you make payment
|
||||||
|
to the third party based on the extent of your activity of conveying
|
||||||
|
the work, and under which the third party grants, to any of the
|
||||||
|
parties who would receive the covered work from you, a discriminatory
|
||||||
|
patent license (a) in connection with copies of the covered work
|
||||||
|
conveyed by you (or copies made from those copies), or (b) primarily
|
||||||
|
for and in connection with specific products or compilations that
|
||||||
|
contain the covered work, unless you entered into that arrangement,
|
||||||
|
or that patent license was granted, prior to 28 March 2007.
|
||||||
|
|
||||||
|
Nothing in this License shall be construed as excluding or limiting
|
||||||
|
any implied license or other defenses to infringement that may
|
||||||
|
otherwise be available to you under applicable patent law.
|
||||||
|
|
||||||
|
12. No Surrender of Others' Freedom.
|
||||||
|
|
||||||
|
If conditions are imposed on you (whether by court order, agreement or
|
||||||
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
|
excuse you from the conditions of this License. If you cannot convey a
|
||||||
|
covered work so as to satisfy simultaneously your obligations under this
|
||||||
|
License and any other pertinent obligations, then as a consequence you may
|
||||||
|
not convey it at all. For example, if you agree to terms that obligate you
|
||||||
|
to collect a royalty for further conveying from those to whom you convey
|
||||||
|
the Program, the only way you could satisfy both those terms and this
|
||||||
|
License would be to refrain entirely from conveying the Program.
|
||||||
|
|
||||||
|
13. Use with the GNU Affero General Public License.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, you have
|
||||||
|
permission to link or combine any covered work with a work licensed
|
||||||
|
under version 3 of the GNU Affero General Public License into a single
|
||||||
|
combined work, and to convey the resulting work. The terms of this
|
||||||
|
License will continue to apply to the part which is the covered work,
|
||||||
|
but the special requirements of the GNU Affero General Public License,
|
||||||
|
section 13, concerning interaction through a network will apply to the
|
||||||
|
combination as such.
|
||||||
|
|
||||||
|
14. Revised Versions of this License.
|
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions of
|
||||||
|
the GNU General Public License from time to time. Such new versions will
|
||||||
|
be similar in spirit to the present version, but may differ in detail to
|
||||||
|
address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the
|
||||||
|
Program specifies that a certain numbered version of the GNU General
|
||||||
|
Public License "or any later version" applies to it, you have the
|
||||||
|
option of following the terms and conditions either of that numbered
|
||||||
|
version or of any later version published by the Free Software
|
||||||
|
Foundation. If the Program does not specify a version number of the
|
||||||
|
GNU General Public License, you may choose any version ever published
|
||||||
|
by the Free Software Foundation.
|
||||||
|
|
||||||
|
If the Program specifies that a proxy can decide which future
|
||||||
|
versions of the GNU General Public License can be used, that proxy's
|
||||||
|
public statement of acceptance of a version permanently authorizes you
|
||||||
|
to choose that version for the Program.
|
||||||
|
|
||||||
|
Later license versions may give you additional or different
|
||||||
|
permissions. However, no additional obligations are imposed on any
|
||||||
|
author or copyright holder as a result of your choosing to follow a
|
||||||
|
later version.
|
||||||
|
|
||||||
|
15. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||||
|
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||||
|
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||||
|
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||||
|
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||||
|
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
|
|
||||||
|
16. Limitation of Liability.
|
||||||
|
|
||||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||||
|
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||||
|
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||||
|
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||||
|
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||||
|
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||||
|
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||||
|
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGES.
|
||||||
|
|
||||||
|
17. Interpretation of Sections 15 and 16.
|
||||||
|
|
||||||
|
If the disclaimer of warranty and limitation of liability provided
|
||||||
|
above cannot be given local legal effect according to their terms,
|
||||||
|
reviewing courts shall apply local law that most closely approximates
|
||||||
|
an absolute waiver of all civil liability in connection with the
|
||||||
|
Program, unless a warranty or assumption of liability accompanies a
|
||||||
|
copy of the Program in return for a fee.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
How to Apply These Terms to Your New Programs
|
||||||
|
|
||||||
|
If you develop a new program, and you want it to be of the greatest
|
||||||
|
possible use to the public, the best way to achieve this is to make it
|
||||||
|
free software which everyone can redistribute and change under these terms.
|
||||||
|
|
||||||
|
To do so, attach the following notices to the program. It is safest
|
||||||
|
to attach them to the start of each source file to most effectively
|
||||||
|
state the exclusion of warranty; and each file should have at least
|
||||||
|
the "copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
|
<one line to give the program's name and a brief idea of what it does.>
|
||||||
|
Copyright (C) <year> <name of author>
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
If the program does terminal interaction, make it output a short
|
||||||
|
notice like this when it starts in an interactive mode:
|
||||||
|
|
||||||
|
<program> Copyright (C) <year> <name of author>
|
||||||
|
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||||
|
This is free software, and you are welcome to redistribute it
|
||||||
|
under certain conditions; type `show c' for details.
|
||||||
|
|
||||||
|
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||||
|
parts of the General Public License. Of course, your program's commands
|
||||||
|
might be different; for a GUI interface, you would use an "about box".
|
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
|
For more information on this, and how to apply and follow the GNU GPL, see
|
||||||
|
<https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
The GNU General Public License does not permit incorporating your program
|
||||||
|
into proprietary programs. If your program is a subroutine library, you
|
||||||
|
may consider it more useful to permit linking proprietary applications with
|
||||||
|
the library. If this is what you want to do, use the GNU Lesser General
|
||||||
|
Public License instead of this License. But first, please read
|
||||||
|
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||||
298
README.md
Normal file
298
README.md
Normal file
@@ -0,0 +1,298 @@
|
|||||||
|
# PatchMon - Linux Patch Monitoring made Simple
|
||||||
|
|
||||||
|
[](https://patchmon.net)
|
||||||
|
[](https://patchmon.net/discord)
|
||||||
|
[](https://github.com/9technologygroup/patchmon.net)
|
||||||
|
[](https://github.com/users/9technologygroup/projects/1)
|
||||||
|
[](https://docs.patchmon.net/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Please STAR this repo :D
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
|
||||||
|
PatchMon provides centralized patch management across diverse server environments. Agents communicate outbound-only to the PatchMon server, eliminating inbound ports on monitored hosts while delivering comprehensive visibility and safe automation.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
### Dashboard
|
||||||
|
- Customisable dashboard with per‑user card layout and ordering
|
||||||
|
|
||||||
|
### Users & Authentication
|
||||||
|
- Multi-user accounts (admin and standard users)
|
||||||
|
- Roles, Permissions & RBAC
|
||||||
|
|
||||||
|
### Hosts & Inventory
|
||||||
|
- Host inventory/groups with key attributes and OS details
|
||||||
|
- Host grouping (create and manage host groups)
|
||||||
|
|
||||||
|
### Packages & Updates
|
||||||
|
- Package inventory across hosts
|
||||||
|
- Outdated packages overview and counts
|
||||||
|
- Repositories per host tracking
|
||||||
|
|
||||||
|
### Agent & Data Collection
|
||||||
|
- Agent version management and script content stored in DB
|
||||||
|
|
||||||
|
### Settings & Configuration
|
||||||
|
- Server URL/protocol/host/port
|
||||||
|
- Signup toggle and default user role selection
|
||||||
|
|
||||||
|
### API & Integrations
|
||||||
|
- REST API under `/api/v1` with JWT auth
|
||||||
|
- Proxmox LXC Auto-Enrollment - Automatically discover and enroll LXC containers from Proxmox hosts
|
||||||
|
|
||||||
|
### Security
|
||||||
|
- Rate limiting for general, auth, and agent endpoints
|
||||||
|
- Outbound‑only agent model reduces attack surface
|
||||||
|
|
||||||
|
### Deployment & Operations
|
||||||
|
- Docker installation & One‑line self‑host installer (Ubuntu/Debian)
|
||||||
|
- systemd service for backend lifecycle
|
||||||
|
- nginx vhost for frontend + API proxy; optional Let’s Encrypt integration
|
||||||
|
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
### PatchMon Cloud (coming soon)
|
||||||
|
|
||||||
|
Managed, zero-maintenance PatchMon hosting. Stay tuned.
|
||||||
|
|
||||||
|
### Self-hosted Installation
|
||||||
|
|
||||||
|
#### Docker (preferred)
|
||||||
|
|
||||||
|
For getting started with Docker, see the [Docker documentation](https://github.com/PatchMon/PatchMon/blob/main/docker/README.md)
|
||||||
|
|
||||||
|
#### Native Install (advanced/non-docker)
|
||||||
|
|
||||||
|
Run on a clean Ubuntu/Debian server with internet access:
|
||||||
|
|
||||||
|
#### Debian:
|
||||||
|
```bash
|
||||||
|
apt update -y
|
||||||
|
apt upgrade -y
|
||||||
|
apt install curl -y
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Ubuntu:
|
||||||
|
```bash
|
||||||
|
apt-get update -y
|
||||||
|
apt-get upgrade -y
|
||||||
|
apt install curl -y
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Install Script
|
||||||
|
```bash
|
||||||
|
curl -fsSL -o setup.sh https://raw.githubusercontent.com/PatchMon/PatchMon/refs/heads/main/setup.sh && chmod +x setup.sh && bash setup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Update Script (--update flag)
|
||||||
|
```bash
|
||||||
|
curl -fsSL -o setup.sh https://raw.githubusercontent.com/PatchMon/PatchMon/refs/heads/main/setup.sh && chmod +x setup.sh && bash setup.sh --update
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Minimum specs for building : #####
|
||||||
|
CPU : 2 vCPU
|
||||||
|
RAM : 2GB
|
||||||
|
Disk : 15GB
|
||||||
|
|
||||||
|
During setup you’ll be asked:
|
||||||
|
- Domain/IP: public DNS or local IP (default: `patchmon.internal`)
|
||||||
|
- SSL/HTTPS: `y` for public deployments with a public IP, `n` for internal networks
|
||||||
|
- Email: only if SSL is enabled (for Let’s Encrypt)
|
||||||
|
- Git Branch: default is `main` (press Enter)
|
||||||
|
|
||||||
|
The script will:
|
||||||
|
- Install prerequisites (Node.js, PostgreSQL, nginx)
|
||||||
|
- Clone the repo, install dependencies, build the frontend, run migrations
|
||||||
|
- Create a systemd service and nginx site vhost config
|
||||||
|
- Start the service and write a consolidated info file at:
|
||||||
|
- `/opt/<your-domain>/deployment-info.txt`
|
||||||
|
- Copies the full installer log to `/opt/<your-domain>/patchmon-install.log` from /var/log/patchmon-install.log
|
||||||
|
|
||||||
|
After installation:
|
||||||
|
- Visit `http(s)://<your-domain>` and complete first-time admin setup
|
||||||
|
- See all useful info in `deployment-info.txt`
|
||||||
|
|
||||||
|
## Forcing updates after host package changes
|
||||||
|
Should you perform a manual package update on your host and wish to see the results reflected in PatchMon quicker than the usual scheduled update, you can trigger the process manually by running:
|
||||||
|
```bash
|
||||||
|
/usr/local/bin/patchmon-agent.sh update
|
||||||
|
```
|
||||||
|
|
||||||
|
This will send the results immediately to PatchMon.
|
||||||
|
|
||||||
|
## Communication Model
|
||||||
|
|
||||||
|
- Outbound-only agents: servers initiate communication to PatchMon
|
||||||
|
- No inbound connections required on monitored servers
|
||||||
|
- Secure server-side API with JWT authentication and rate limiting
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
- Backend: Node.js/Express + Prisma + PostgreSQL
|
||||||
|
- Frontend: Vite + React
|
||||||
|
- Reverse proxy: nginx
|
||||||
|
- Database: PostgreSQL
|
||||||
|
- System service: systemd-managed backend
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
A[End Users / Browser<br>Admin UI / Frontend] -- HTTPS --> B[nginx<br>serve FE, proxy API]
|
||||||
|
B -- HTTP --> C["Backend<br>(Node/Express)<br>/api, auth, Prisma"]
|
||||||
|
C -- TCP --> D[PostgreSQL<br>Database]
|
||||||
|
|
||||||
|
E["Agents on your servers (Outbound Only)"] -- HTTPS --> F["Backend API<br>(/api/v1)"]
|
||||||
|
```
|
||||||
|
Operational
|
||||||
|
- systemd manages backend service
|
||||||
|
- certbot/nginx for TLS (public)
|
||||||
|
- setup.sh bootstraps OS, app, DB, config
|
||||||
|
|
||||||
|
## Support
|
||||||
|
|
||||||
|
- Discord: [https://patchmon.net/discord](https://patchmon.net/discord)
|
||||||
|
- Email: support@patchmon.net
|
||||||
|
|
||||||
|
## Roadmap
|
||||||
|
|
||||||
|
- Roadmap board: https://github.com/orgs/PatchMon/projects/2
|
||||||
|
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
- AGPLv3 (More information on this soon)
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🤝 Contributing
|
||||||
|
|
||||||
|
We welcome contributions from the community! Here's how you can get involved:
|
||||||
|
|
||||||
|
### Development Setup
|
||||||
|
1. **Fork the Repository**
|
||||||
|
```bash
|
||||||
|
# Click the "Fork" button on GitHub, then clone your fork
|
||||||
|
git clone https://github.com/YOUR_USERNAME/patchmon.net.git
|
||||||
|
cd patchmon.net
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Create a Feature Branch**
|
||||||
|
```bash
|
||||||
|
git checkout -b feature/your-feature-name
|
||||||
|
# or
|
||||||
|
git checkout -b fix/your-bug-fix
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Install Dependencies and Setup Hooks**
|
||||||
|
```bash
|
||||||
|
npm install
|
||||||
|
npm run prepare
|
||||||
|
```
|
||||||
|
|
||||||
|
5. **Make Your Changes**
|
||||||
|
- Write clean, well-documented code
|
||||||
|
- Follow existing code style and patterns
|
||||||
|
- Add tests for new functionality
|
||||||
|
- Update documentation as needed
|
||||||
|
|
||||||
|
6. **Test Your Changes**
|
||||||
|
```bash
|
||||||
|
# Run backend tests
|
||||||
|
cd backend
|
||||||
|
npm test
|
||||||
|
|
||||||
|
# Run frontend tests
|
||||||
|
cd ../frontend
|
||||||
|
npm test
|
||||||
|
```
|
||||||
|
|
||||||
|
7. **Commit and Push**
|
||||||
|
```bash
|
||||||
|
git add .
|
||||||
|
git commit -m "Add: descriptive commit message"
|
||||||
|
git push origin feature/your-feature-name
|
||||||
|
```
|
||||||
|
|
||||||
|
8. **Create a Pull Request**
|
||||||
|
- Go to your fork on GitHub
|
||||||
|
- Click "New Pull Request"
|
||||||
|
- Provide a clear description of your changes
|
||||||
|
- Link any related issues
|
||||||
|
|
||||||
|
### Contribution Guidelines
|
||||||
|
- **Code Style**: Follow the existing code patterns and Biome configuration
|
||||||
|
- **Commits**: Use conventional commit messages (feat:, fix:, docs:, etc.)
|
||||||
|
- **Testing**: Ensure all tests pass and add tests for new features
|
||||||
|
- **Documentation**: Update README and code comments as needed
|
||||||
|
- **Issues**: Check existing issues before creating new ones
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
## 🏢 Enterprise & Custom Solutions
|
||||||
|
|
||||||
|
### PatchMon Cloud
|
||||||
|
- **Fully Managed**: We handle all infrastructure and maintenance
|
||||||
|
- **Scalable**: Grows with your organization
|
||||||
|
- **Secure**: Enterprise-grade security and compliance
|
||||||
|
- **Support**: Dedicated support team
|
||||||
|
|
||||||
|
### Custom Integrations
|
||||||
|
- **API Development**: Custom endpoints for your specific needs
|
||||||
|
- **Third-Party Integrations**: Connect with your existing tools
|
||||||
|
- **Custom Dashboards**: Tailored reporting and visualization
|
||||||
|
- **White-Label Solutions**: Brand PatchMon as your own
|
||||||
|
|
||||||
|
### Enterprise Deployment
|
||||||
|
- **On-Premises**: Deploy in your own data center
|
||||||
|
- **Air-Gapped**: Support for isolated environments
|
||||||
|
- **Compliance**: Meet industry-specific requirements
|
||||||
|
- **Training**: Comprehensive team training and onboarding
|
||||||
|
|
||||||
|
*Contact us at support@patchmon.net for enterprise inquiries*
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🙏 Acknowledgments
|
||||||
|
|
||||||
|
### Special Thanks
|
||||||
|
- **Jonathan Higson** - For inspiration, ideas, and valuable feedback
|
||||||
|
- **@Adam20054** - For working on Docker Compose deployment
|
||||||
|
- **@tigattack** - For working on GitHub CI/CD pipelines
|
||||||
|
- **Cloud X** and **Crazy Dead** - For moderating our Discord server and keeping the community awesome
|
||||||
|
- **Beta Testers** - For keeping me awake at night
|
||||||
|
- **My family** - For understanding my passion
|
||||||
|
|
||||||
|
|
||||||
|
### Contributors
|
||||||
|
Thank you to all our contributors who help make PatchMon better every day!
|
||||||
|
|
||||||
|
|
||||||
|
## 🔗 Links
|
||||||
|
|
||||||
|
- **Website**: [patchmon.net](https://patchmon.net)
|
||||||
|
- **Discord**: [https://patchmon.net/discord](https://patchmon.net/discord)
|
||||||
|
- **Roadmap**: [GitHub Projects](https://github.com/users/9technologygroup/projects/1)
|
||||||
|
- **Documentation**: [https://docs.patchmon.net](https://docs.patchmon.net)
|
||||||
|
- **Support**: support@patchmon.net
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
|
||||||
|
**Made with ❤️ by the PatchMon Team**
|
||||||
|
|
||||||
|
[](https://patchmon.net/discord)
|
||||||
|
[](https://github.com/PatchMon/PatchMon)
|
||||||
|
|
||||||
|
</div>
|
||||||
1598
agents/patchmon-agent-legacy1-2-8.sh
Normal file
1598
agents/patchmon-agent-legacy1-2-8.sh
Normal file
File diff suppressed because it is too large
Load Diff
BIN
agents/patchmon-agent-linux-386
Executable file
BIN
agents/patchmon-agent-linux-386
Executable file
Binary file not shown.
BIN
agents/patchmon-agent-linux-amd64
Executable file
BIN
agents/patchmon-agent-linux-amd64
Executable file
Binary file not shown.
BIN
agents/patchmon-agent-linux-arm
Executable file
BIN
agents/patchmon-agent-linux-arm
Executable file
Binary file not shown.
BIN
agents/patchmon-agent-linux-arm64
Executable file
BIN
agents/patchmon-agent-linux-arm64
Executable file
Binary file not shown.
File diff suppressed because it is too large
Load Diff
496
agents/patchmon-docker-agent.sh
Executable file
496
agents/patchmon-docker-agent.sh
Executable file
@@ -0,0 +1,496 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# PatchMon Docker Agent Script v1.3.0
|
||||||
|
# This script collects Docker container and image information and sends it to PatchMon
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
PATCHMON_SERVER="${PATCHMON_SERVER:-http://localhost:3001}"
|
||||||
|
API_VERSION="v1"
|
||||||
|
AGENT_VERSION="1.3.0"
|
||||||
|
CONFIG_FILE="/etc/patchmon/agent.conf"
|
||||||
|
CREDENTIALS_FILE="/etc/patchmon/credentials"
|
||||||
|
LOG_FILE="/var/log/patchmon-docker-agent.log"
|
||||||
|
|
||||||
|
# Curl flags placeholder (replaced by server based on SSL settings)
|
||||||
|
CURL_FLAGS=""
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Logging function
|
||||||
|
log() {
|
||||||
|
if [[ -w "$(dirname "$LOG_FILE")" ]] 2>/dev/null; then
|
||||||
|
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" >> "$LOG_FILE" 2>/dev/null
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Error handling
|
||||||
|
error() {
|
||||||
|
echo -e "${RED}ERROR: $1${NC}" >&2
|
||||||
|
log "ERROR: $1"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Info logging
|
||||||
|
info() {
|
||||||
|
echo -e "${BLUE}ℹ️ $1${NC}" >&2
|
||||||
|
log "INFO: $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Success logging
|
||||||
|
success() {
|
||||||
|
echo -e "${GREEN}✅ $1${NC}" >&2
|
||||||
|
log "SUCCESS: $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Warning logging
|
||||||
|
warning() {
|
||||||
|
echo -e "${YELLOW}⚠️ $1${NC}" >&2
|
||||||
|
log "WARNING: $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if Docker is installed and running
|
||||||
|
check_docker() {
|
||||||
|
if ! command -v docker &> /dev/null; then
|
||||||
|
error "Docker is not installed on this system"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! docker info &> /dev/null; then
|
||||||
|
error "Docker daemon is not running or you don't have permission to access it. Try running with sudo."
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Load credentials
|
||||||
|
load_credentials() {
|
||||||
|
if [[ ! -f "$CREDENTIALS_FILE" ]]; then
|
||||||
|
error "Credentials file not found at $CREDENTIALS_FILE. Please configure the main PatchMon agent first."
|
||||||
|
fi
|
||||||
|
|
||||||
|
source "$CREDENTIALS_FILE"
|
||||||
|
|
||||||
|
if [[ -z "$API_ID" ]] || [[ -z "$API_KEY" ]]; then
|
||||||
|
error "API credentials not found in $CREDENTIALS_FILE"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Use PATCHMON_URL from credentials if available, otherwise use default
|
||||||
|
if [[ -n "$PATCHMON_URL" ]]; then
|
||||||
|
PATCHMON_SERVER="$PATCHMON_URL"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Load configuration
|
||||||
|
load_config() {
|
||||||
|
if [[ -f "$CONFIG_FILE" ]]; then
|
||||||
|
source "$CONFIG_FILE"
|
||||||
|
if [[ -n "$SERVER_URL" ]]; then
|
||||||
|
PATCHMON_SERVER="$SERVER_URL"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Collect Docker containers
|
||||||
|
collect_containers() {
|
||||||
|
info "Collecting Docker container information..."
|
||||||
|
|
||||||
|
local containers_json="["
|
||||||
|
local first=true
|
||||||
|
|
||||||
|
# Get all containers (running and stopped)
|
||||||
|
while IFS='|' read -r container_id name image status state created started ports; do
|
||||||
|
if [[ -z "$container_id" ]]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Parse image name and tag
|
||||||
|
local image_name="${image%%:*}"
|
||||||
|
local image_tag="${image##*:}"
|
||||||
|
if [[ "$image_tag" == "$image_name" ]]; then
|
||||||
|
image_tag="latest"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Determine image source based on registry
|
||||||
|
local image_source="docker-hub"
|
||||||
|
if [[ "$image_name" == ghcr.io/* ]]; then
|
||||||
|
image_source="github"
|
||||||
|
elif [[ "$image_name" == registry.gitlab.com/* ]]; then
|
||||||
|
image_source="gitlab"
|
||||||
|
elif [[ "$image_name" == *"/"*"/"* ]]; then
|
||||||
|
image_source="private"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get repository name (without registry prefix for common registries)
|
||||||
|
local image_repository="$image_name"
|
||||||
|
image_repository="${image_repository#ghcr.io/}"
|
||||||
|
image_repository="${image_repository#registry.gitlab.com/}"
|
||||||
|
|
||||||
|
# Get image ID
|
||||||
|
local full_image_id=$(docker inspect --format='{{.Image}}' "$container_id" 2>/dev/null || echo "unknown")
|
||||||
|
full_image_id="${full_image_id#sha256:}"
|
||||||
|
|
||||||
|
# Normalize status (extract just the status keyword)
|
||||||
|
local normalized_status="unknown"
|
||||||
|
if [[ "$status" =~ ^Up ]]; then
|
||||||
|
normalized_status="running"
|
||||||
|
elif [[ "$status" =~ ^Exited ]]; then
|
||||||
|
normalized_status="exited"
|
||||||
|
elif [[ "$status" =~ ^Created ]]; then
|
||||||
|
normalized_status="created"
|
||||||
|
elif [[ "$status" =~ ^Restarting ]]; then
|
||||||
|
normalized_status="restarting"
|
||||||
|
elif [[ "$status" =~ ^Paused ]]; then
|
||||||
|
normalized_status="paused"
|
||||||
|
elif [[ "$status" =~ ^Dead ]]; then
|
||||||
|
normalized_status="dead"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Parse ports
|
||||||
|
local ports_json="null"
|
||||||
|
if [[ -n "$ports" && "$ports" != "null" ]]; then
|
||||||
|
# Convert Docker port format to JSON
|
||||||
|
ports_json=$(echo "$ports" | jq -R -s -c 'split(",") | map(select(length > 0)) | map(split("->") | {(.[0]): .[1]}) | add // {}')
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Convert dates to ISO 8601 format
|
||||||
|
# If date conversion fails, use null instead of invalid date string
|
||||||
|
local created_iso=$(date -d "$created" -Iseconds 2>/dev/null || echo "null")
|
||||||
|
local started_iso="null"
|
||||||
|
if [[ -n "$started" && "$started" != "null" ]]; then
|
||||||
|
started_iso=$(date -d "$started" -Iseconds 2>/dev/null || echo "null")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Add comma for JSON array
|
||||||
|
if [[ "$first" == false ]]; then
|
||||||
|
containers_json+=","
|
||||||
|
fi
|
||||||
|
first=false
|
||||||
|
|
||||||
|
# Build JSON object for this container
|
||||||
|
containers_json+="{\"container_id\":\"$container_id\","
|
||||||
|
containers_json+="\"name\":\"$name\","
|
||||||
|
containers_json+="\"image_name\":\"$image_name\","
|
||||||
|
containers_json+="\"image_tag\":\"$image_tag\","
|
||||||
|
containers_json+="\"image_repository\":\"$image_repository\","
|
||||||
|
containers_json+="\"image_source\":\"$image_source\","
|
||||||
|
containers_json+="\"image_id\":\"$full_image_id\","
|
||||||
|
containers_json+="\"status\":\"$normalized_status\","
|
||||||
|
containers_json+="\"state\":\"$state\","
|
||||||
|
containers_json+="\"ports\":$ports_json"
|
||||||
|
|
||||||
|
# Only add created_at if we have a valid date
|
||||||
|
if [[ "$created_iso" != "null" ]]; then
|
||||||
|
containers_json+=",\"created_at\":\"$created_iso\""
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Only add started_at if we have a valid date
|
||||||
|
if [[ "$started_iso" != "null" ]]; then
|
||||||
|
containers_json+=",\"started_at\":\"$started_iso\""
|
||||||
|
fi
|
||||||
|
|
||||||
|
containers_json+="}"
|
||||||
|
|
||||||
|
done < <(docker ps -a --format '{{.ID}}|{{.Names}}|{{.Image}}|{{.Status}}|{{.State}}|{{.CreatedAt}}|{{.RunningFor}}|{{.Ports}}' 2>/dev/null)
|
||||||
|
|
||||||
|
containers_json+="]"
|
||||||
|
|
||||||
|
echo "$containers_json"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Collect Docker images
|
||||||
|
collect_images() {
|
||||||
|
info "Collecting Docker image information..."
|
||||||
|
|
||||||
|
local images_json="["
|
||||||
|
local first=true
|
||||||
|
|
||||||
|
while IFS='|' read -r repository tag image_id created size digest; do
|
||||||
|
if [[ -z "$repository" || "$repository" == "<none>" ]]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean up tag
|
||||||
|
if [[ -z "$tag" || "$tag" == "<none>" ]]; then
|
||||||
|
tag="latest"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean image ID
|
||||||
|
image_id="${image_id#sha256:}"
|
||||||
|
|
||||||
|
# Determine source
|
||||||
|
local source="docker-hub"
|
||||||
|
if [[ "$repository" == ghcr.io/* ]]; then
|
||||||
|
source="github"
|
||||||
|
elif [[ "$repository" == registry.gitlab.com/* ]]; then
|
||||||
|
source="gitlab"
|
||||||
|
elif [[ "$repository" == *"/"*"/"* ]]; then
|
||||||
|
source="private"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Convert size to bytes (approximate)
|
||||||
|
local size_bytes=0
|
||||||
|
if [[ "$size" =~ ([0-9.]+)([KMGT]?B) ]]; then
|
||||||
|
local num="${BASH_REMATCH[1]}"
|
||||||
|
local unit="${BASH_REMATCH[2]}"
|
||||||
|
case "$unit" in
|
||||||
|
KB) size_bytes=$(echo "$num * 1024" | bc | cut -d. -f1) ;;
|
||||||
|
MB) size_bytes=$(echo "$num * 1024 * 1024" | bc | cut -d. -f1) ;;
|
||||||
|
GB) size_bytes=$(echo "$num * 1024 * 1024 * 1024" | bc | cut -d. -f1) ;;
|
||||||
|
TB) size_bytes=$(echo "$num * 1024 * 1024 * 1024 * 1024" | bc | cut -d. -f1) ;;
|
||||||
|
B) size_bytes=$(echo "$num" | cut -d. -f1) ;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Convert created date to ISO 8601
|
||||||
|
# If date conversion fails, use null instead of invalid date string
|
||||||
|
local created_iso=$(date -d "$created" -Iseconds 2>/dev/null || echo "null")
|
||||||
|
|
||||||
|
# Add comma for JSON array
|
||||||
|
if [[ "$first" == false ]]; then
|
||||||
|
images_json+=","
|
||||||
|
fi
|
||||||
|
first=false
|
||||||
|
|
||||||
|
# Build JSON object for this image
|
||||||
|
images_json+="{\"repository\":\"$repository\","
|
||||||
|
images_json+="\"tag\":\"$tag\","
|
||||||
|
images_json+="\"image_id\":\"$image_id\","
|
||||||
|
images_json+="\"source\":\"$source\","
|
||||||
|
images_json+="\"size_bytes\":$size_bytes"
|
||||||
|
|
||||||
|
# Only add created_at if we have a valid date
|
||||||
|
if [[ "$created_iso" != "null" ]]; then
|
||||||
|
images_json+=",\"created_at\":\"$created_iso\""
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Only add digest if present
|
||||||
|
if [[ -n "$digest" && "$digest" != "<none>" ]]; then
|
||||||
|
images_json+=",\"digest\":\"$digest\""
|
||||||
|
fi
|
||||||
|
|
||||||
|
images_json+="}"
|
||||||
|
|
||||||
|
done < <(docker images --format '{{.Repository}}|{{.Tag}}|{{.ID}}|{{.CreatedAt}}|{{.Size}}|{{.Digest}}' --no-trunc 2>/dev/null)
|
||||||
|
|
||||||
|
images_json+="]"
|
||||||
|
|
||||||
|
echo "$images_json"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check for image updates
|
||||||
|
check_image_updates() {
|
||||||
|
info "Checking for image updates..."
|
||||||
|
|
||||||
|
local updates_json="["
|
||||||
|
local first=true
|
||||||
|
local update_count=0
|
||||||
|
|
||||||
|
# Get all images
|
||||||
|
while IFS='|' read -r repository tag image_id digest; do
|
||||||
|
if [[ -z "$repository" || "$repository" == "<none>" || "$tag" == "<none>" ]]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Skip checking 'latest' tag as it's always considered current by name
|
||||||
|
# We'll still check digest though
|
||||||
|
local full_image="${repository}:${tag}"
|
||||||
|
|
||||||
|
# Try to get remote digest from registry
|
||||||
|
# Use docker manifest inspect to avoid pulling the image
|
||||||
|
local remote_digest=$(docker manifest inspect "$full_image" 2>/dev/null | jq -r '.config.digest // .manifests[0].digest // empty' 2>/dev/null)
|
||||||
|
|
||||||
|
if [[ -z "$remote_digest" ]]; then
|
||||||
|
# If manifest inspect fails, try buildx imagetools inspect (works for more registries)
|
||||||
|
remote_digest=$(docker buildx imagetools inspect "$full_image" 2>/dev/null | grep -oP 'Digest:\s*\K\S+' | head -1)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean up digests for comparison
|
||||||
|
local local_digest="${digest#sha256:}"
|
||||||
|
remote_digest="${remote_digest#sha256:}"
|
||||||
|
|
||||||
|
# If we got a remote digest and it's different from local, there's an update
|
||||||
|
if [[ -n "$remote_digest" && -n "$local_digest" && "$remote_digest" != "$local_digest" ]]; then
|
||||||
|
if [[ "$first" == false ]]; then
|
||||||
|
updates_json+=","
|
||||||
|
fi
|
||||||
|
first=false
|
||||||
|
|
||||||
|
# Build update JSON object
|
||||||
|
updates_json+="{\"repository\":\"$repository\","
|
||||||
|
updates_json+="\"current_tag\":\"$tag\","
|
||||||
|
updates_json+="\"available_tag\":\"$tag\","
|
||||||
|
updates_json+="\"current_digest\":\"$local_digest\","
|
||||||
|
updates_json+="\"available_digest\":\"$remote_digest\","
|
||||||
|
updates_json+="\"image_id\":\"${image_id#sha256:}\""
|
||||||
|
updates_json+="}"
|
||||||
|
|
||||||
|
((update_count++))
|
||||||
|
fi
|
||||||
|
|
||||||
|
done < <(docker images --format '{{.Repository}}|{{.Tag}}|{{.ID}}|{{.Digest}}' --no-trunc 2>/dev/null)
|
||||||
|
|
||||||
|
updates_json+="]"
|
||||||
|
|
||||||
|
info "Found $update_count image update(s) available"
|
||||||
|
|
||||||
|
echo "$updates_json"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send Docker data to server
|
||||||
|
send_docker_data() {
|
||||||
|
load_credentials
|
||||||
|
|
||||||
|
info "Collecting Docker data..."
|
||||||
|
|
||||||
|
local containers=$(collect_containers)
|
||||||
|
local images=$(collect_images)
|
||||||
|
local updates=$(check_image_updates)
|
||||||
|
|
||||||
|
# Count collected items
|
||||||
|
local container_count=$(echo "$containers" | jq '. | length' 2>/dev/null || echo "0")
|
||||||
|
local image_count=$(echo "$images" | jq '. | length' 2>/dev/null || echo "0")
|
||||||
|
local update_count=$(echo "$updates" | jq '. | length' 2>/dev/null || echo "0")
|
||||||
|
|
||||||
|
info "Found $container_count containers, $image_count images, and $update_count update(s) available"
|
||||||
|
|
||||||
|
# Build payload
|
||||||
|
local payload="{\"apiId\":\"$API_ID\",\"apiKey\":\"$API_KEY\",\"containers\":$containers,\"images\":$images,\"updates\":$updates}"
|
||||||
|
|
||||||
|
# Send to server
|
||||||
|
info "Sending Docker data to PatchMon server..."
|
||||||
|
|
||||||
|
local response=$(curl $CURL_FLAGS -s -w "\n%{http_code}" -X POST \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d "$payload" \
|
||||||
|
"${PATCHMON_SERVER}/api/${API_VERSION}/docker/collect" 2>&1)
|
||||||
|
|
||||||
|
local http_code=$(echo "$response" | tail -n1)
|
||||||
|
local response_body=$(echo "$response" | head -n-1)
|
||||||
|
|
||||||
|
if [[ "$http_code" == "200" ]]; then
|
||||||
|
success "Docker data sent successfully!"
|
||||||
|
log "Docker data sent: $container_count containers, $image_count images"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
error "Failed to send Docker data. HTTP Status: $http_code\nResponse: $response_body"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test Docker data collection without sending
|
||||||
|
test_collection() {
|
||||||
|
check_docker
|
||||||
|
|
||||||
|
info "Testing Docker data collection (dry run)..."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
local containers=$(collect_containers)
|
||||||
|
local images=$(collect_images)
|
||||||
|
local updates=$(check_image_updates)
|
||||||
|
|
||||||
|
local container_count=$(echo "$containers" | jq '. | length' 2>/dev/null || echo "0")
|
||||||
|
local image_count=$(echo "$images" | jq '. | length' 2>/dev/null || echo "0")
|
||||||
|
local update_count=$(echo "$updates" | jq '. | length' 2>/dev/null || echo "0")
|
||||||
|
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
echo -e "${GREEN}Docker Data Collection Results${NC}"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
echo -e "Containers found: ${GREEN}$container_count${NC}"
|
||||||
|
echo -e "Images found: ${GREEN}$image_count${NC}"
|
||||||
|
echo -e "Updates available: ${YELLOW}$update_count${NC}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if command -v jq &> /dev/null; then
|
||||||
|
echo "━━━ Containers ━━━"
|
||||||
|
echo "$containers" | jq -r '.[] | "\(.name) (\(.status)) - \(.image_name):\(.image_tag)"' | head -10
|
||||||
|
if [[ $container_count -gt 10 ]]; then
|
||||||
|
echo "... and $((container_count - 10)) more"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
echo "━━━ Images ━━━"
|
||||||
|
echo "$images" | jq -r '.[] | "\(.repository):\(.tag) (\(.size_bytes / 1024 / 1024 | floor)MB)"' | head -10
|
||||||
|
if [[ $image_count -gt 10 ]]; then
|
||||||
|
echo "... and $((image_count - 10)) more"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $update_count -gt 0 ]]; then
|
||||||
|
echo ""
|
||||||
|
echo "━━━ Available Updates ━━━"
|
||||||
|
echo "$updates" | jq -r '.[] | "\(.repository):\(.current_tag) → \(.available_tag)"'
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
success "Test collection completed successfully!"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Show help
|
||||||
|
show_help() {
|
||||||
|
cat << EOF
|
||||||
|
PatchMon Docker Agent v${AGENT_VERSION}
|
||||||
|
|
||||||
|
This agent collects Docker container and image information and sends it to PatchMon.
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
$0 <command>
|
||||||
|
|
||||||
|
COMMANDS:
|
||||||
|
collect Collect and send Docker data to PatchMon server
|
||||||
|
test Test Docker data collection without sending (dry run)
|
||||||
|
help Show this help message
|
||||||
|
|
||||||
|
REQUIREMENTS:
|
||||||
|
- Docker must be installed and running
|
||||||
|
- Main PatchMon agent must be configured first
|
||||||
|
- Credentials file must exist at $CREDENTIALS_FILE
|
||||||
|
|
||||||
|
EXAMPLES:
|
||||||
|
# Test collection (dry run)
|
||||||
|
sudo $0 test
|
||||||
|
|
||||||
|
# Collect and send Docker data
|
||||||
|
sudo $0 collect
|
||||||
|
|
||||||
|
SCHEDULING:
|
||||||
|
To run this agent automatically, add a cron job:
|
||||||
|
|
||||||
|
# Run every 5 minutes
|
||||||
|
*/5 * * * * /usr/local/bin/patchmon-docker-agent.sh collect
|
||||||
|
|
||||||
|
# Run every hour
|
||||||
|
0 * * * * /usr/local/bin/patchmon-docker-agent.sh collect
|
||||||
|
|
||||||
|
FILES:
|
||||||
|
Config: $CONFIG_FILE
|
||||||
|
Credentials: $CREDENTIALS_FILE
|
||||||
|
Log: $LOG_FILE
|
||||||
|
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main function
|
||||||
|
main() {
|
||||||
|
case "$1" in
|
||||||
|
"collect")
|
||||||
|
check_docker
|
||||||
|
load_config
|
||||||
|
send_docker_data
|
||||||
|
;;
|
||||||
|
"test")
|
||||||
|
check_docker
|
||||||
|
load_config
|
||||||
|
test_collection
|
||||||
|
;;
|
||||||
|
"help"|"--help"|"-h"|"")
|
||||||
|
show_help
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
error "Unknown command: $1\n\nRun '$0 help' for usage information."
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run main function
|
||||||
|
main "$@"
|
||||||
|
|
||||||
@@ -1,10 +1,15 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# PatchMon Agent Installation Script
|
# PatchMon Agent Installation Script
|
||||||
# Usage: curl -sSL {PATCHMON_URL}/api/v1/hosts/install | bash -s -- {PATCHMON_URL} {API_ID} {API_KEY}
|
# Usage: curl -s {PATCHMON_URL}/api/v1/hosts/install -H "X-API-ID: {API_ID}" -H "X-API-KEY: {API_KEY}" | bash
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
|
# This placeholder will be dynamically replaced by the server when serving this
|
||||||
|
# script based on the "ignore SSL self-signed" setting. If set to -k, curl will
|
||||||
|
# ignore certificate validation. Otherwise, it will be empty for secure default.
|
||||||
|
# CURL_FLAGS is now set via environment variables by the backend
|
||||||
|
|
||||||
# Colors for output
|
# Colors for output
|
||||||
RED='\033[0;31m'
|
RED='\033[0;31m'
|
||||||
GREEN='\033[0;32m'
|
GREEN='\033[0;32m'
|
||||||
@@ -35,122 +40,636 @@ if [[ $EUID -ne 0 ]]; then
|
|||||||
error "This script must be run as root (use sudo)"
|
error "This script must be run as root (use sudo)"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Default server URL (will be replaced by backend with configured URL)
|
# Verify system datetime and timezone
|
||||||
PATCHMON_URL="http://localhost:3001"
|
verify_datetime() {
|
||||||
|
info "🕐 Verifying system datetime and timezone..."
|
||||||
|
|
||||||
# Parse arguments
|
# Get current system time
|
||||||
if [[ $# -ne 3 ]]; then
|
local system_time=$(date)
|
||||||
echo "Usage: curl -sSL {PATCHMON_URL}/api/v1/hosts/install | bash -s -- {PATCHMON_URL} {API_ID} {API_KEY}"
|
local timezone=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "Unknown")
|
||||||
|
|
||||||
|
# Display current datetime info
|
||||||
echo ""
|
echo ""
|
||||||
echo "Example:"
|
echo -e "${BLUE}📅 Current System Date/Time:${NC}"
|
||||||
echo "curl -sSL http://patchmon.example.com/api/v1/hosts/install | bash -s -- http://patchmon.example.com patchmon_1a2b3c4d abcd1234567890abcdef1234567890abcdef1234567890abcdef1234567890"
|
echo " • Date/Time: $system_time"
|
||||||
|
echo " • Timezone: $timezone"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Contact your PatchMon administrator to get your API credentials."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
PATCHMON_URL="$1"
|
# Check if we can read from stdin (interactive terminal)
|
||||||
API_ID="$2"
|
if [[ -t 0 ]]; then
|
||||||
API_KEY="$3"
|
# Interactive terminal - ask user
|
||||||
|
read -p "Does this date/time look correct to you? (y/N): " -r response
|
||||||
# Validate inputs
|
if [[ "$response" =~ ^[Yy]$ ]]; then
|
||||||
if [[ ! "$PATCHMON_URL" =~ ^https?:// ]]; then
|
success "✅ Date/time verification passed"
|
||||||
error "Invalid URL format. Must start with http:// or https://"
|
echo ""
|
||||||
fi
|
return 0
|
||||||
|
else
|
||||||
if [[ ! "$API_ID" =~ ^patchmon_[a-f0-9]{16}$ ]]; then
|
echo ""
|
||||||
error "Invalid API ID format. API ID should be in format: patchmon_xxxxxxxxxxxxxxxx"
|
echo -e "${RED}❌ Date/time verification failed${NC}"
|
||||||
fi
|
echo ""
|
||||||
|
echo -e "${YELLOW}💡 Please fix the date/time and re-run the installation script:${NC}"
|
||||||
if [[ ! "$API_KEY" =~ ^[a-f0-9]{64}$ ]]; then
|
echo " sudo timedatectl set-time 'YYYY-MM-DD HH:MM:SS'"
|
||||||
error "Invalid API Key format. API Key should be 64 hexadecimal characters."
|
echo " sudo timedatectl set-timezone 'America/New_York' # or your timezone"
|
||||||
fi
|
echo " sudo timedatectl list-timezones # to see available timezones"
|
||||||
|
echo ""
|
||||||
info "🚀 Installing PatchMon Agent..."
|
echo -e "${BLUE}ℹ️ After fixing the date/time, re-run this installation script.${NC}"
|
||||||
info " Server: $PATCHMON_URL"
|
error "Installation cancelled - please fix date/time and re-run"
|
||||||
info " API ID: $API_ID"
|
fi
|
||||||
|
else
|
||||||
# Create patchmon directory
|
# Non-interactive (piped from curl) - show warning and continue
|
||||||
info "📁 Creating configuration directory..."
|
echo -e "${YELLOW}⚠️ Non-interactive installation detected${NC}"
|
||||||
mkdir -p /etc/patchmon
|
echo ""
|
||||||
|
echo "Please verify the date/time shown above is correct."
|
||||||
# Download the agent script
|
echo "If the date/time is incorrect, it may cause issues with:"
|
||||||
info "📥 Downloading PatchMon agent script..."
|
echo " • Logging timestamps"
|
||||||
curl -sSL "$PATCHMON_URL/api/v1/hosts/agent/download" -o /usr/local/bin/patchmon-agent.sh
|
echo " • Scheduled updates"
|
||||||
chmod +x /usr/local/bin/patchmon-agent.sh
|
echo " • Data synchronization"
|
||||||
|
echo ""
|
||||||
# Get the agent version from the downloaded script
|
echo -e "${GREEN}✅ Continuing with installation...${NC}"
|
||||||
AGENT_VERSION=$(grep '^AGENT_VERSION=' /usr/local/bin/patchmon-agent.sh | cut -d'"' -f2)
|
success "✅ Date/time verification completed (assumed correct)"
|
||||||
info "📋 Agent version: $AGENT_VERSION"
|
echo ""
|
||||||
|
|
||||||
# Get expected agent version from server
|
|
||||||
EXPECTED_VERSION=$(curl -s "$PATCHMON_URL/api/v1/hosts/agent/version" | grep -o '"currentVersion":"[^"]*' | cut -d'"' -f4 2>/dev/null || echo "Unknown")
|
|
||||||
if [[ "$EXPECTED_VERSION" != "Unknown" ]]; then
|
|
||||||
info "📋 Expected version: $EXPECTED_VERSION"
|
|
||||||
if [[ "$AGENT_VERSION" != "$EXPECTED_VERSION" ]]; then
|
|
||||||
warning "⚠️ Agent version mismatch! Installed: $AGENT_VERSION, Expected: $EXPECTED_VERSION"
|
|
||||||
fi
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run datetime verification
|
||||||
|
verify_datetime
|
||||||
|
|
||||||
|
# Clean up old files (keep only last 3 of each type)
|
||||||
|
cleanup_old_files() {
|
||||||
|
# Clean up old credential backups
|
||||||
|
ls -t /etc/patchmon/credentials.yml.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f
|
||||||
|
|
||||||
|
# Clean up old config backups
|
||||||
|
ls -t /etc/patchmon/config.yml.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f
|
||||||
|
|
||||||
|
# Clean up old agent backups
|
||||||
|
ls -t /usr/local/bin/patchmon-agent.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f
|
||||||
|
|
||||||
|
# Clean up old log files
|
||||||
|
ls -t /etc/patchmon/logs/patchmon-agent.log.old.* 2>/dev/null | tail -n +4 | xargs -r rm -f
|
||||||
|
|
||||||
|
# Clean up old shell script backups (if any exist)
|
||||||
|
ls -t /usr/local/bin/patchmon-agent.sh.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f
|
||||||
|
|
||||||
|
# Clean up old credentials backups (if any exist)
|
||||||
|
ls -t /etc/patchmon/credentials.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run cleanup at start
|
||||||
|
cleanup_old_files
|
||||||
|
|
||||||
|
# Generate or retrieve machine ID
|
||||||
|
get_machine_id() {
|
||||||
|
# Try multiple sources for machine ID
|
||||||
|
if [[ -f /etc/machine-id ]]; then
|
||||||
|
cat /etc/machine-id
|
||||||
|
elif [[ -f /var/lib/dbus/machine-id ]]; then
|
||||||
|
cat /var/lib/dbus/machine-id
|
||||||
|
else
|
||||||
|
# Fallback: generate from hardware info (less ideal but works)
|
||||||
|
echo "patchmon-$(cat /sys/class/dmi/id/product_uuid 2>/dev/null || cat /proc/sys/kernel/random/uuid)"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse arguments from environment (passed via HTTP headers)
|
||||||
|
if [[ -z "$PATCHMON_URL" ]] || [[ -z "$API_ID" ]] || [[ -z "$API_KEY" ]]; then
|
||||||
|
error "Missing required parameters. This script should be called via the PatchMon web interface."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Get update interval policy from server
|
# Auto-detect architecture if not explicitly set
|
||||||
UPDATE_INTERVAL=$(curl -s "$PATCHMON_URL/api/v1/settings/update-interval" | grep -o '"updateInterval":[0-9]*' | cut -d':' -f2 2>/dev/null || echo "60")
|
if [[ -z "$ARCHITECTURE" ]]; then
|
||||||
info "📋 Update interval: $UPDATE_INTERVAL minutes"
|
arch_raw=$(uname -m 2>/dev/null || echo "unknown")
|
||||||
|
|
||||||
# Create credentials file
|
# Map architecture to supported values
|
||||||
info "🔐 Setting up API credentials..."
|
case "$arch_raw" in
|
||||||
cat > /etc/patchmon/credentials << EOF
|
"x86_64")
|
||||||
# PatchMon API Credentials
|
ARCHITECTURE="amd64"
|
||||||
|
;;
|
||||||
|
"i386"|"i686")
|
||||||
|
ARCHITECTURE="386"
|
||||||
|
;;
|
||||||
|
"aarch64"|"arm64")
|
||||||
|
ARCHITECTURE="arm64"
|
||||||
|
;;
|
||||||
|
"armv7l"|"armv6l"|"arm")
|
||||||
|
ARCHITECTURE="arm"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
warning "⚠️ Unknown architecture '$arch_raw', defaulting to amd64"
|
||||||
|
ARCHITECTURE="amd64"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Validate architecture
|
||||||
|
if [[ "$ARCHITECTURE" != "amd64" && "$ARCHITECTURE" != "386" && "$ARCHITECTURE" != "arm64" && "$ARCHITECTURE" != "arm" ]]; then
|
||||||
|
error "Invalid architecture '$ARCHITECTURE'. Must be one of: amd64, 386, arm64, arm"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if --force flag is set (for bypassing broken packages)
|
||||||
|
FORCE_INSTALL="${FORCE_INSTALL:-false}"
|
||||||
|
if [[ "$*" == *"--force"* ]] || [[ "$FORCE_INSTALL" == "true" ]]; then
|
||||||
|
FORCE_INSTALL="true"
|
||||||
|
warning "⚠️ Force mode enabled - will bypass broken packages"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get unique machine ID for this host
|
||||||
|
MACHINE_ID=$(get_machine_id)
|
||||||
|
export MACHINE_ID
|
||||||
|
|
||||||
|
info "🚀 Starting PatchMon Agent Installation..."
|
||||||
|
info "📋 Server: $PATCHMON_URL"
|
||||||
|
info "🔑 API ID: ${API_ID:0:16}..."
|
||||||
|
info "🆔 Machine ID: ${MACHINE_ID:0:16}..."
|
||||||
|
info "🏗️ Architecture: $ARCHITECTURE"
|
||||||
|
|
||||||
|
# Display diagnostic information
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}🔧 Installation Diagnostics:${NC}"
|
||||||
|
echo " • URL: $PATCHMON_URL"
|
||||||
|
echo " • CURL FLAGS: $CURL_FLAGS"
|
||||||
|
echo " • API ID: ${API_ID:0:16}..."
|
||||||
|
echo " • API Key: ${API_KEY:0:16}..."
|
||||||
|
echo " • Architecture: $ARCHITECTURE"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Install required dependencies
|
||||||
|
info "📦 Installing required dependencies..."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Function to check if a command exists
|
||||||
|
command_exists() {
|
||||||
|
command -v "$1" >/dev/null 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to install packages with error handling
|
||||||
|
install_apt_packages() {
|
||||||
|
local packages=("$@")
|
||||||
|
local missing_packages=()
|
||||||
|
|
||||||
|
# Check which packages are missing
|
||||||
|
for pkg in "${packages[@]}"; do
|
||||||
|
if ! command_exists "$pkg"; then
|
||||||
|
missing_packages+=("$pkg")
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ ${#missing_packages[@]} -eq 0 ]; then
|
||||||
|
success "All required packages are already installed"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Need to install: ${missing_packages[*]}"
|
||||||
|
|
||||||
|
# Build apt-get command based on force mode
|
||||||
|
local apt_cmd="apt-get install ${missing_packages[*]} -y"
|
||||||
|
|
||||||
|
if [[ "$FORCE_INSTALL" == "true" ]]; then
|
||||||
|
info "Using force mode - bypassing broken packages..."
|
||||||
|
apt_cmd="$apt_cmd -o APT::Get::Fix-Broken=false -o DPkg::Options::=\"--force-confold\" -o DPkg::Options::=\"--force-confdef\""
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Try to install packages
|
||||||
|
if eval "$apt_cmd" 2>&1 | tee /tmp/patchmon_apt_install.log; then
|
||||||
|
success "Packages installed successfully"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
warning "Package installation encountered issues, checking if required tools are available..."
|
||||||
|
|
||||||
|
# Verify critical dependencies are actually available
|
||||||
|
local all_ok=true
|
||||||
|
for pkg in "${packages[@]}"; do
|
||||||
|
if ! command_exists "$pkg"; then
|
||||||
|
if [[ "$FORCE_INSTALL" == "true" ]]; then
|
||||||
|
error "Critical dependency '$pkg' is not available even with --force. Please install manually."
|
||||||
|
else
|
||||||
|
error "Critical dependency '$pkg' is not available. Try again with --force flag or install manually: apt-get install $pkg"
|
||||||
|
fi
|
||||||
|
all_ok=false
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if $all_ok; then
|
||||||
|
success "All required tools are available despite installation warnings"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to check and install packages for yum/dnf
|
||||||
|
install_yum_dnf_packages() {
|
||||||
|
local pkg_manager="$1"
|
||||||
|
shift
|
||||||
|
local packages=("$@")
|
||||||
|
local missing_packages=()
|
||||||
|
|
||||||
|
# Check which packages are missing
|
||||||
|
for pkg in "${packages[@]}"; do
|
||||||
|
if ! command_exists "$pkg"; then
|
||||||
|
missing_packages+=("$pkg")
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ ${#missing_packages[@]} -eq 0 ]; then
|
||||||
|
success "All required packages are already installed"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Need to install: ${missing_packages[*]}"
|
||||||
|
|
||||||
|
if [[ "$pkg_manager" == "yum" ]]; then
|
||||||
|
yum install -y "${missing_packages[@]}"
|
||||||
|
else
|
||||||
|
dnf install -y "${missing_packages[@]}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to check and install packages for zypper
|
||||||
|
install_zypper_packages() {
|
||||||
|
local packages=("$@")
|
||||||
|
local missing_packages=()
|
||||||
|
|
||||||
|
# Check which packages are missing
|
||||||
|
for pkg in "${packages[@]}"; do
|
||||||
|
if ! command_exists "$pkg"; then
|
||||||
|
missing_packages+=("$pkg")
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ ${#missing_packages[@]} -eq 0 ]; then
|
||||||
|
success "All required packages are already installed"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Need to install: ${missing_packages[*]}"
|
||||||
|
zypper install -y "${missing_packages[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to check and install packages for pacman
|
||||||
|
install_pacman_packages() {
|
||||||
|
local packages=("$@")
|
||||||
|
local missing_packages=()
|
||||||
|
|
||||||
|
# Check which packages are missing
|
||||||
|
for pkg in "${packages[@]}"; do
|
||||||
|
if ! command_exists "$pkg"; then
|
||||||
|
missing_packages+=("$pkg")
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ ${#missing_packages[@]} -eq 0 ]; then
|
||||||
|
success "All required packages are already installed"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Need to install: ${missing_packages[*]}"
|
||||||
|
pacman -S --noconfirm "${missing_packages[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to check and install packages for apk
|
||||||
|
install_apk_packages() {
|
||||||
|
local packages=("$@")
|
||||||
|
local missing_packages=()
|
||||||
|
|
||||||
|
# Check which packages are missing
|
||||||
|
for pkg in "${packages[@]}"; do
|
||||||
|
if ! command_exists "$pkg"; then
|
||||||
|
missing_packages+=("$pkg")
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ ${#missing_packages[@]} -eq 0 ]; then
|
||||||
|
success "All required packages are already installed"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Need to install: ${missing_packages[*]}"
|
||||||
|
apk add --no-cache "${missing_packages[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Detect package manager and install jq, curl, and bc
|
||||||
|
if command -v apt-get >/dev/null 2>&1; then
|
||||||
|
# Debian/Ubuntu
|
||||||
|
info "Detected apt-get (Debian/Ubuntu)"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check for broken packages
|
||||||
|
if dpkg -l | grep -q "^iH\|^iF" 2>/dev/null; then
|
||||||
|
if [[ "$FORCE_INSTALL" == "true" ]]; then
|
||||||
|
warning "Detected broken packages on system - force mode will work around them"
|
||||||
|
else
|
||||||
|
warning "⚠️ Broken packages detected on system"
|
||||||
|
warning "If installation fails, retry with: curl -s {URL}/api/v1/hosts/install --force -H ..."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Updating package lists..."
|
||||||
|
apt-get update || true
|
||||||
|
echo ""
|
||||||
|
info "Installing jq, curl, and bc..."
|
||||||
|
install_apt_packages jq curl bc
|
||||||
|
elif command -v yum >/dev/null 2>&1; then
|
||||||
|
# CentOS/RHEL 7
|
||||||
|
info "Detected yum (CentOS/RHEL 7)"
|
||||||
|
echo ""
|
||||||
|
info "Installing jq, curl, and bc..."
|
||||||
|
install_yum_dnf_packages yum jq curl bc
|
||||||
|
elif command -v dnf >/dev/null 2>&1; then
|
||||||
|
# CentOS/RHEL 8+/Fedora
|
||||||
|
info "Detected dnf (CentOS/RHEL 8+/Fedora)"
|
||||||
|
echo ""
|
||||||
|
info "Installing jq, curl, and bc..."
|
||||||
|
install_yum_dnf_packages dnf jq curl bc
|
||||||
|
elif command -v zypper >/dev/null 2>&1; then
|
||||||
|
# openSUSE
|
||||||
|
info "Detected zypper (openSUSE)"
|
||||||
|
echo ""
|
||||||
|
info "Installing jq, curl, and bc..."
|
||||||
|
install_zypper_packages jq curl bc
|
||||||
|
elif command -v pacman >/dev/null 2>&1; then
|
||||||
|
# Arch Linux
|
||||||
|
info "Detected pacman (Arch Linux)"
|
||||||
|
echo ""
|
||||||
|
info "Installing jq, curl, and bc..."
|
||||||
|
install_pacman_packages jq curl bc
|
||||||
|
elif command -v apk >/dev/null 2>&1; then
|
||||||
|
# Alpine Linux
|
||||||
|
info "Detected apk (Alpine Linux)"
|
||||||
|
echo ""
|
||||||
|
info "Installing jq, curl, and bc..."
|
||||||
|
install_apk_packages jq curl bc
|
||||||
|
else
|
||||||
|
warning "Could not detect package manager. Please ensure 'jq', 'curl', and 'bc' are installed manually."
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
success "Dependencies installation completed"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Step 1: Handle existing configuration directory
|
||||||
|
info "📁 Setting up configuration directory..."
|
||||||
|
|
||||||
|
# Check if configuration directory already exists
|
||||||
|
if [[ -d "/etc/patchmon" ]]; then
|
||||||
|
warning "⚠️ Configuration directory already exists at /etc/patchmon"
|
||||||
|
warning "⚠️ Preserving existing configuration files"
|
||||||
|
|
||||||
|
# List existing files for user awareness
|
||||||
|
info "📋 Existing files in /etc/patchmon:"
|
||||||
|
ls -la /etc/patchmon/ 2>/dev/null | grep -v "^total" | while read -r line; do
|
||||||
|
echo " $line"
|
||||||
|
done
|
||||||
|
else
|
||||||
|
info "📁 Creating new configuration directory..."
|
||||||
|
mkdir -p /etc/patchmon
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if agent is already configured and working (before we overwrite anything)
|
||||||
|
info "🔍 Checking if agent is already configured..."
|
||||||
|
|
||||||
|
if [[ -f /etc/patchmon/config.yml ]] && [[ -f /etc/patchmon/credentials.yml ]]; then
|
||||||
|
if [[ -f /usr/local/bin/patchmon-agent ]]; then
|
||||||
|
info "📋 Found existing agent configuration"
|
||||||
|
info "🧪 Testing existing configuration with ping..."
|
||||||
|
|
||||||
|
if /usr/local/bin/patchmon-agent ping >/dev/null 2>&1; then
|
||||||
|
success "✅ Agent is already configured and ping successful"
|
||||||
|
info "📋 Existing configuration is working - skipping installation"
|
||||||
|
info ""
|
||||||
|
info "If you want to reinstall, remove the configuration files first:"
|
||||||
|
info " sudo rm -f /etc/patchmon/config.yml /etc/patchmon/credentials.yml"
|
||||||
|
echo ""
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
warning "⚠️ Agent configuration exists but ping failed"
|
||||||
|
warning "⚠️ Will move existing configuration and reinstall"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
warning "⚠️ Configuration files exist but agent binary is missing"
|
||||||
|
warning "⚠️ Will move existing configuration and reinstall"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
success "✅ Agent not yet configured - proceeding with installation"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Step 2: Create configuration files
|
||||||
|
info "🔐 Creating configuration files..."
|
||||||
|
|
||||||
|
# Check if config file already exists
|
||||||
|
if [[ -f "/etc/patchmon/config.yml" ]]; then
|
||||||
|
warning "⚠️ Config file already exists at /etc/patchmon/config.yml"
|
||||||
|
warning "⚠️ Moving existing file out of the way for fresh installation"
|
||||||
|
|
||||||
|
# Clean up old config backups (keep only last 3)
|
||||||
|
ls -t /etc/patchmon/config.yml.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f
|
||||||
|
|
||||||
|
# Move existing file out of the way
|
||||||
|
mv /etc/patchmon/config.yml /etc/patchmon/config.yml.backup.$(date +%Y%m%d_%H%M%S)
|
||||||
|
info "📋 Moved existing config to: /etc/patchmon/config.yml.backup.$(date +%Y%m%d_%H%M%S)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if credentials file already exists
|
||||||
|
if [[ -f "/etc/patchmon/credentials.yml" ]]; then
|
||||||
|
warning "⚠️ Credentials file already exists at /etc/patchmon/credentials.yml"
|
||||||
|
warning "⚠️ Moving existing file out of the way for fresh installation"
|
||||||
|
|
||||||
|
# Clean up old credential backups (keep only last 3)
|
||||||
|
ls -t /etc/patchmon/credentials.yml.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f
|
||||||
|
|
||||||
|
# Move existing file out of the way
|
||||||
|
mv /etc/patchmon/credentials.yml /etc/patchmon/credentials.yml.backup.$(date +%Y%m%d_%H%M%S)
|
||||||
|
info "📋 Moved existing credentials to: /etc/patchmon/credentials.yml.backup.$(date +%Y%m%d_%H%M%S)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean up old credentials file if it exists (from previous installations)
|
||||||
|
if [[ -f "/etc/patchmon/credentials" ]]; then
|
||||||
|
warning "⚠️ Found old credentials file, removing it..."
|
||||||
|
rm -f /etc/patchmon/credentials
|
||||||
|
info "📋 Removed old credentials file"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create main config file
|
||||||
|
cat > /etc/patchmon/config.yml << EOF
|
||||||
|
# PatchMon Agent Configuration
|
||||||
# Generated on $(date)
|
# Generated on $(date)
|
||||||
PATCHMON_URL="$PATCHMON_URL"
|
patchmon_server: "$PATCHMON_URL"
|
||||||
API_ID="$API_ID"
|
api_version: "v1"
|
||||||
API_KEY="$API_KEY"
|
credentials_file: "/etc/patchmon/credentials.yml"
|
||||||
|
log_file: "/etc/patchmon/logs/patchmon-agent.log"
|
||||||
|
log_level: "info"
|
||||||
|
skip_ssl_verify: ${SKIP_SSL_VERIFY:-false}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
chmod 600 /etc/patchmon/credentials
|
# Create credentials file
|
||||||
|
cat > /etc/patchmon/credentials.yml << EOF
|
||||||
|
# PatchMon API Credentials
|
||||||
|
# Generated on $(date)
|
||||||
|
api_id: "$API_ID"
|
||||||
|
api_key: "$API_KEY"
|
||||||
|
EOF
|
||||||
|
|
||||||
# Test the configuration
|
chmod 600 /etc/patchmon/config.yml
|
||||||
info "🧪 Testing configuration..."
|
chmod 600 /etc/patchmon/credentials.yml
|
||||||
if /usr/local/bin/patchmon-agent.sh test; then
|
|
||||||
success "Configuration test passed!"
|
# Step 3: Download the PatchMon agent binary using API credentials
|
||||||
|
info "📥 Downloading PatchMon agent binary..."
|
||||||
|
|
||||||
|
# Determine the binary filename based on architecture
|
||||||
|
BINARY_NAME="patchmon-agent-linux-${ARCHITECTURE}"
|
||||||
|
|
||||||
|
# Check if agent binary already exists
|
||||||
|
if [[ -f "/usr/local/bin/patchmon-agent" ]]; then
|
||||||
|
warning "⚠️ Agent binary already exists at /usr/local/bin/patchmon-agent"
|
||||||
|
warning "⚠️ Moving existing file out of the way for fresh installation"
|
||||||
|
|
||||||
|
# Clean up old agent backups (keep only last 3)
|
||||||
|
ls -t /usr/local/bin/patchmon-agent.backup.* 2>/dev/null | tail -n +4 | xargs -r rm -f
|
||||||
|
|
||||||
|
# Move existing file out of the way
|
||||||
|
mv /usr/local/bin/patchmon-agent /usr/local/bin/patchmon-agent.backup.$(date +%Y%m%d_%H%M%S)
|
||||||
|
info "📋 Moved existing agent to: /usr/local/bin/patchmon-agent.backup.$(date +%Y%m%d_%H%M%S)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean up old shell script if it exists (from previous installations)
|
||||||
|
if [[ -f "/usr/local/bin/patchmon-agent.sh" ]]; then
|
||||||
|
warning "⚠️ Found old shell script agent, removing it..."
|
||||||
|
rm -f /usr/local/bin/patchmon-agent.sh
|
||||||
|
info "📋 Removed old shell script agent"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Download the binary
|
||||||
|
curl $CURL_FLAGS \
|
||||||
|
-H "X-API-ID: $API_ID" \
|
||||||
|
-H "X-API-KEY: $API_KEY" \
|
||||||
|
"$PATCHMON_URL/api/v1/hosts/agent/download?arch=$ARCHITECTURE&force=binary" \
|
||||||
|
-o /usr/local/bin/patchmon-agent
|
||||||
|
|
||||||
|
chmod +x /usr/local/bin/patchmon-agent
|
||||||
|
|
||||||
|
# Get the agent version from the binary
|
||||||
|
AGENT_VERSION=$(/usr/local/bin/patchmon-agent version 2>/dev/null || echo "Unknown")
|
||||||
|
info "📋 Agent version: $AGENT_VERSION"
|
||||||
|
|
||||||
|
# Handle existing log files and create log directory
|
||||||
|
info "📁 Setting up log directory..."
|
||||||
|
|
||||||
|
# Create log directory if it doesn't exist
|
||||||
|
mkdir -p /etc/patchmon/logs
|
||||||
|
|
||||||
|
# Handle existing log files
|
||||||
|
if [[ -f "/etc/patchmon/logs/patchmon-agent.log" ]]; then
|
||||||
|
warning "⚠️ Existing log file found at /etc/patchmon/logs/patchmon-agent.log"
|
||||||
|
warning "⚠️ Rotating log file for fresh start"
|
||||||
|
|
||||||
|
# Rotate the log file
|
||||||
|
mv /etc/patchmon/logs/patchmon-agent.log /etc/patchmon/logs/patchmon-agent.log.old.$(date +%Y%m%d_%H%M%S)
|
||||||
|
info "📋 Log file rotated to: /etc/patchmon/logs/patchmon-agent.log.old.$(date +%Y%m%d_%H%M%S)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Step 4: Test the configuration
|
||||||
|
info "🧪 Testing API credentials and connectivity..."
|
||||||
|
if /usr/local/bin/patchmon-agent ping; then
|
||||||
|
success "✅ TEST: API credentials are valid and server is reachable"
|
||||||
else
|
else
|
||||||
error "Configuration test failed. Please check your credentials."
|
error "❌ Failed to validate API credentials or reach server"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Send initial update
|
# Step 5: Setup systemd service for WebSocket connection
|
||||||
info "📊 Sending initial package data..."
|
# Note: The service will automatically send an initial report on startup (see serve.go)
|
||||||
if /usr/local/bin/patchmon-agent.sh update; then
|
info "🔧 Setting up systemd service..."
|
||||||
success "Initial package data sent successfully!"
|
|
||||||
|
# Stop and disable existing service if it exists
|
||||||
|
if systemctl is-active --quiet patchmon-agent.service 2>/dev/null; then
|
||||||
|
warning "⚠️ Stopping existing PatchMon agent service..."
|
||||||
|
systemctl stop patchmon-agent.service
|
||||||
|
fi
|
||||||
|
|
||||||
|
if systemctl is-enabled --quiet patchmon-agent.service 2>/dev/null; then
|
||||||
|
warning "⚠️ Disabling existing PatchMon agent service..."
|
||||||
|
systemctl disable patchmon-agent.service
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create systemd service file
|
||||||
|
cat > /etc/systemd/system/patchmon-agent.service << EOF
|
||||||
|
[Unit]
|
||||||
|
Description=PatchMon Agent Service
|
||||||
|
After=network.target
|
||||||
|
Wants=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
User=root
|
||||||
|
ExecStart=/usr/local/bin/patchmon-agent serve
|
||||||
|
Restart=always
|
||||||
|
RestartSec=10
|
||||||
|
WorkingDirectory=/etc/patchmon
|
||||||
|
|
||||||
|
# Logging
|
||||||
|
StandardOutput=journal
|
||||||
|
StandardError=journal
|
||||||
|
SyslogIdentifier=patchmon-agent
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Clean up old crontab entries if they exist (from previous installations)
|
||||||
|
if crontab -l 2>/dev/null | grep -q "patchmon-agent"; then
|
||||||
|
warning "⚠️ Found old crontab entries, removing them..."
|
||||||
|
crontab -l 2>/dev/null | grep -v "patchmon-agent" | crontab -
|
||||||
|
info "📋 Removed old crontab entries"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Reload systemd and enable/start the service
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl enable patchmon-agent.service
|
||||||
|
systemctl start patchmon-agent.service
|
||||||
|
|
||||||
|
# Check if service started successfully
|
||||||
|
if systemctl is-active --quiet patchmon-agent.service; then
|
||||||
|
success "✅ PatchMon Agent service started successfully"
|
||||||
|
info "🔗 WebSocket connection established"
|
||||||
else
|
else
|
||||||
warning "Initial package data failed, but agent is configured. You can run 'patchmon-agent.sh update' manually."
|
warning "⚠️ Service may have failed to start. Check status with: systemctl status patchmon-agent"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Setup crontab for automatic updates
|
# Installation complete
|
||||||
info "⏰ Setting up automatic updates every $UPDATE_INTERVAL minutes..."
|
success "🎉 PatchMon Agent installation completed successfully!"
|
||||||
if [[ $UPDATE_INTERVAL -eq 60 ]]; then
|
echo ""
|
||||||
# Hourly updates
|
echo -e "${GREEN}📋 Installation Summary:${NC}"
|
||||||
echo "0 * * * * /usr/local/bin/patchmon-agent.sh update >/dev/null 2>&1" | crontab -
|
echo " • Configuration directory: /etc/patchmon"
|
||||||
else
|
echo " • Agent binary installed: /usr/local/bin/patchmon-agent"
|
||||||
# Custom interval updates
|
echo " • Architecture: $ARCHITECTURE"
|
||||||
echo "*/$UPDATE_INTERVAL * * * * /usr/local/bin/patchmon-agent.sh update >/dev/null 2>&1" | crontab -
|
echo " • Dependencies installed: jq, curl, bc"
|
||||||
|
echo " • Systemd service configured and running"
|
||||||
|
echo " • API credentials configured and tested"
|
||||||
|
echo " • WebSocket connection established"
|
||||||
|
echo " • Logs directory: /etc/patchmon/logs"
|
||||||
|
|
||||||
|
# Check for moved files and show them
|
||||||
|
MOVED_FILES=$(ls /etc/patchmon/credentials.yml.backup.* /etc/patchmon/config.yml.backup.* /usr/local/bin/patchmon-agent.backup.* /etc/patchmon/logs/patchmon-agent.log.old.* /usr/local/bin/patchmon-agent.sh.backup.* /etc/patchmon/credentials.backup.* 2>/dev/null || true)
|
||||||
|
if [[ -n "$MOVED_FILES" ]]; then
|
||||||
|
echo ""
|
||||||
|
echo -e "${YELLOW}📋 Files Moved for Fresh Installation:${NC}"
|
||||||
|
echo "$MOVED_FILES" | while read -r moved_file; do
|
||||||
|
echo " • $moved_file"
|
||||||
|
done
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}💡 Note: Old files are automatically cleaned up (keeping last 3)${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
success "🎉 PatchMon Agent installation complete!"
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "📋 Installation Summary:"
|
echo -e "${BLUE}🔧 Management Commands:${NC}"
|
||||||
echo " • Agent installed: /usr/local/bin/patchmon-agent.sh"
|
echo " • Test connection: /usr/local/bin/patchmon-agent ping"
|
||||||
echo " • Agent version: $AGENT_VERSION"
|
echo " • Manual report: /usr/local/bin/patchmon-agent report"
|
||||||
if [[ "$EXPECTED_VERSION" != "Unknown" ]]; then
|
echo " • Check status: /usr/local/bin/patchmon-agent diagnostics"
|
||||||
echo " • Expected version: $EXPECTED_VERSION"
|
echo " • Service status: systemctl status patchmon-agent"
|
||||||
fi
|
echo " • Service logs: journalctl -u patchmon-agent -f"
|
||||||
echo " • Config directory: /etc/patchmon/"
|
echo " • Restart service: systemctl restart patchmon-agent"
|
||||||
echo " • Credentials file: /etc/patchmon/credentials"
|
|
||||||
echo " • Automatic updates: Every $UPDATE_INTERVAL minutes via crontab"
|
|
||||||
echo " • View logs: tail -f /var/log/patchmon-agent.sh"
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "🔧 Manual commands:"
|
success "✅ Your system is now being monitored by PatchMon!"
|
||||||
echo " • Test connection: patchmon-agent.sh test"
|
|
||||||
echo " • Send update: patchmon-agent.sh update"
|
|
||||||
echo " • Check status: patchmon-agent.sh ping"
|
|
||||||
echo ""
|
|
||||||
success "Your host is now connected to PatchMon!"
|
|
||||||
|
|
||||||
|
|||||||
222
agents/patchmon_remove.sh
Executable file
222
agents/patchmon_remove.sh
Executable file
@@ -0,0 +1,222 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# PatchMon Agent Removal Script
|
||||||
|
# Usage: curl -s {PATCHMON_URL}/api/v1/hosts/remove | bash
|
||||||
|
# This script completely removes PatchMon from the system
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# This placeholder will be dynamically replaced by the server when serving this
|
||||||
|
# script based on the "ignore SSL self-signed" setting for any curl calls in
|
||||||
|
# future (left for consistency with install script).
|
||||||
|
CURL_FLAGS=""
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Functions
|
||||||
|
error() {
|
||||||
|
echo -e "${RED}❌ ERROR: $1${NC}" >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
info() {
|
||||||
|
echo -e "${BLUE}ℹ️ $1${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
success() {
|
||||||
|
echo -e "${GREEN}✅ $1${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
warning() {
|
||||||
|
echo -e "${YELLOW}⚠️ $1${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if running as root
|
||||||
|
if [[ $EUID -ne 0 ]]; then
|
||||||
|
error "This script must be run as root (use sudo)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "🗑️ Starting PatchMon Agent Removal..."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Step 1: Stop any running PatchMon processes
|
||||||
|
info "🛑 Stopping PatchMon processes..."
|
||||||
|
if pgrep -f "patchmon-agent.sh" >/dev/null; then
|
||||||
|
warning "Found running PatchMon processes, stopping them..."
|
||||||
|
pkill -f "patchmon-agent.sh" || true
|
||||||
|
sleep 2
|
||||||
|
success "PatchMon processes stopped"
|
||||||
|
else
|
||||||
|
info "No running PatchMon processes found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Step 2: Remove crontab entries
|
||||||
|
info "📅 Removing PatchMon crontab entries..."
|
||||||
|
if crontab -l 2>/dev/null | grep -q "patchmon-agent.sh"; then
|
||||||
|
warning "Found PatchMon crontab entries, removing them..."
|
||||||
|
crontab -l 2>/dev/null | grep -v "patchmon-agent.sh" | crontab -
|
||||||
|
success "Crontab entries removed"
|
||||||
|
else
|
||||||
|
info "No PatchMon crontab entries found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Step 3: Remove agent script
|
||||||
|
info "📄 Removing agent script..."
|
||||||
|
if [[ -f "/usr/local/bin/patchmon-agent.sh" ]]; then
|
||||||
|
warning "Removing agent script: /usr/local/bin/patchmon-agent.sh"
|
||||||
|
rm -f /usr/local/bin/patchmon-agent.sh
|
||||||
|
success "Agent script removed"
|
||||||
|
else
|
||||||
|
info "Agent script not found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Step 4: Remove configuration directory and files
|
||||||
|
info "📁 Removing configuration files..."
|
||||||
|
if [[ -d "/etc/patchmon" ]]; then
|
||||||
|
warning "Removing configuration directory: /etc/patchmon"
|
||||||
|
|
||||||
|
# Show what's being removed
|
||||||
|
info "📋 Files in /etc/patchmon:"
|
||||||
|
ls -la /etc/patchmon/ 2>/dev/null | grep -v "^total" | while read -r line; do
|
||||||
|
echo " $line"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Remove the directory
|
||||||
|
rm -rf /etc/patchmon
|
||||||
|
success "Configuration directory removed"
|
||||||
|
else
|
||||||
|
info "Configuration directory not found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Step 5: Remove log files
|
||||||
|
info "📝 Removing log files..."
|
||||||
|
if [[ -f "/var/log/patchmon-agent.log" ]]; then
|
||||||
|
warning "Removing log file: /var/log/patchmon-agent.log"
|
||||||
|
rm -f /var/log/patchmon-agent.log
|
||||||
|
success "Log file removed"
|
||||||
|
else
|
||||||
|
info "Log file not found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Step 6: Clean up backup files (optional)
|
||||||
|
info "🧹 Cleaning up backup files..."
|
||||||
|
BACKUP_COUNT=0
|
||||||
|
|
||||||
|
# Count credential backups
|
||||||
|
CRED_BACKUPS=$(ls /etc/patchmon/credentials.backup.* 2>/dev/null | wc -l || echo "0")
|
||||||
|
if [[ $CRED_BACKUPS -gt 0 ]]; then
|
||||||
|
BACKUP_COUNT=$((BACKUP_COUNT + CRED_BACKUPS))
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Count agent backups
|
||||||
|
AGENT_BACKUPS=$(ls /usr/local/bin/patchmon-agent.sh.backup.* 2>/dev/null | wc -l || echo "0")
|
||||||
|
if [[ $AGENT_BACKUPS -gt 0 ]]; then
|
||||||
|
BACKUP_COUNT=$((BACKUP_COUNT + AGENT_BACKUPS))
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Count log backups
|
||||||
|
LOG_BACKUPS=$(ls /var/log/patchmon-agent.log.old.* 2>/dev/null | wc -l || echo "0")
|
||||||
|
if [[ $LOG_BACKUPS -gt 0 ]]; then
|
||||||
|
BACKUP_COUNT=$((BACKUP_COUNT + LOG_BACKUPS))
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $BACKUP_COUNT -gt 0 ]]; then
|
||||||
|
warning "Found $BACKUP_COUNT backup files"
|
||||||
|
echo ""
|
||||||
|
echo -e "${YELLOW}📋 Backup files found:${NC}"
|
||||||
|
|
||||||
|
# Show credential backups
|
||||||
|
if [[ $CRED_BACKUPS -gt 0 ]]; then
|
||||||
|
echo " Credential backups:"
|
||||||
|
ls /etc/patchmon/credentials.backup.* 2>/dev/null | while read -r file; do
|
||||||
|
echo " • $file"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show agent backups
|
||||||
|
if [[ $AGENT_BACKUPS -gt 0 ]]; then
|
||||||
|
echo " Agent script backups:"
|
||||||
|
ls /usr/local/bin/patchmon-agent.sh.backup.* 2>/dev/null | while read -r file; do
|
||||||
|
echo " • $file"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show log backups
|
||||||
|
if [[ $LOG_BACKUPS -gt 0 ]]; then
|
||||||
|
echo " Log file backups:"
|
||||||
|
ls /var/log/patchmon-agent.log.old.* 2>/dev/null | while read -r file; do
|
||||||
|
echo " • $file"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}💡 Note: Backup files are preserved for safety${NC}"
|
||||||
|
echo -e "${BLUE}💡 You can remove them manually if not needed${NC}"
|
||||||
|
else
|
||||||
|
info "No backup files found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Step 7: Remove dependencies (optional)
|
||||||
|
info "📦 Checking for PatchMon-specific dependencies..."
|
||||||
|
if command -v jq >/dev/null 2>&1; then
|
||||||
|
warning "jq is installed (used by PatchMon)"
|
||||||
|
echo -e "${BLUE}💡 Note: jq may be used by other applications${NC}"
|
||||||
|
echo -e "${BLUE}💡 Consider keeping it unless you're sure it's not needed${NC}"
|
||||||
|
else
|
||||||
|
info "jq not found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if command -v curl >/dev/null 2>&1; then
|
||||||
|
warning "curl is installed (used by PatchMon)"
|
||||||
|
echo -e "${BLUE}💡 Note: curl is commonly used by many applications${NC}"
|
||||||
|
echo -e "${BLUE}💡 Consider keeping it unless you're sure it's not needed${NC}"
|
||||||
|
else
|
||||||
|
info "curl not found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Step 8: Final verification
|
||||||
|
info "🔍 Verifying removal..."
|
||||||
|
REMAINING_FILES=0
|
||||||
|
|
||||||
|
if [[ -f "/usr/local/bin/patchmon-agent.sh" ]]; then
|
||||||
|
REMAINING_FILES=$((REMAINING_FILES + 1))
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -d "/etc/patchmon" ]]; then
|
||||||
|
REMAINING_FILES=$((REMAINING_FILES + 1))
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -f "/var/log/patchmon-agent.log" ]]; then
|
||||||
|
REMAINING_FILES=$((REMAINING_FILES + 1))
|
||||||
|
fi
|
||||||
|
|
||||||
|
if crontab -l 2>/dev/null | grep -q "patchmon-agent.sh"; then
|
||||||
|
REMAINING_FILES=$((REMAINING_FILES + 1))
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $REMAINING_FILES -eq 0 ]]; then
|
||||||
|
success "✅ PatchMon has been completely removed from the system!"
|
||||||
|
else
|
||||||
|
warning "⚠️ Some PatchMon files may still remain ($REMAINING_FILES items)"
|
||||||
|
echo -e "${BLUE}💡 You may need to remove them manually${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo -e "${GREEN}📋 Removal Summary:${NC}"
|
||||||
|
echo " • Agent script: Removed"
|
||||||
|
echo " • Configuration files: Removed"
|
||||||
|
echo " • Log files: Removed"
|
||||||
|
echo " • Crontab entries: Removed"
|
||||||
|
echo " • Running processes: Stopped"
|
||||||
|
echo " • Backup files: Preserved (if any)"
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}🔧 Manual cleanup (if needed):${NC}"
|
||||||
|
echo " • Remove backup files: rm /etc/patchmon/credentials.backup.* /usr/local/bin/patchmon-agent.sh.backup.* /var/log/patchmon-agent.log.old.*"
|
||||||
|
echo " • Remove dependencies: apt remove jq curl (if not needed by other apps)"
|
||||||
|
echo ""
|
||||||
|
success "🎉 PatchMon removal completed!"
|
||||||
501
agents/proxmox_auto_enroll.sh
Executable file
501
agents/proxmox_auto_enroll.sh
Executable file
@@ -0,0 +1,501 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -eo pipefail # Exit on error, pipe failures (removed -u as we handle unset vars explicitly)
|
||||||
|
|
||||||
|
# Trap to catch errors only (not normal exits)
|
||||||
|
trap 'echo "[ERROR] Script failed at line $LINENO with exit code $?"' ERR
|
||||||
|
|
||||||
|
SCRIPT_VERSION="2.0.0"
|
||||||
|
echo "[DEBUG] Script Version: $SCRIPT_VERSION ($(date +%Y-%m-%d\ %H:%M:%S))"
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# PatchMon Proxmox LXC Auto-Enrollment Script
|
||||||
|
# =============================================================================
|
||||||
|
# This script discovers LXC containers on a Proxmox host and automatically
|
||||||
|
# enrolls them into PatchMon for patch management.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# 1. Set environment variables or edit configuration below
|
||||||
|
# 2. Run: bash proxmox_auto_enroll.sh
|
||||||
|
#
|
||||||
|
# Requirements:
|
||||||
|
# - Must run on Proxmox host (requires 'pct' command)
|
||||||
|
# - Auto-enrollment token from PatchMon
|
||||||
|
# - Network access to PatchMon server
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# ===== CONFIGURATION =====
|
||||||
|
PATCHMON_URL="${PATCHMON_URL:-https://patchmon.example.com}"
|
||||||
|
AUTO_ENROLLMENT_KEY="${AUTO_ENROLLMENT_KEY:-}"
|
||||||
|
AUTO_ENROLLMENT_SECRET="${AUTO_ENROLLMENT_SECRET:-}"
|
||||||
|
CURL_FLAGS="${CURL_FLAGS:--s}"
|
||||||
|
DRY_RUN="${DRY_RUN:-false}"
|
||||||
|
HOST_PREFIX="${HOST_PREFIX:-}"
|
||||||
|
SKIP_STOPPED="${SKIP_STOPPED:-true}"
|
||||||
|
PARALLEL_INSTALL="${PARALLEL_INSTALL:-false}"
|
||||||
|
MAX_PARALLEL="${MAX_PARALLEL:-5}"
|
||||||
|
FORCE_INSTALL="${FORCE_INSTALL:-false}"
|
||||||
|
|
||||||
|
# ===== COLOR OUTPUT =====
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# ===== LOGGING FUNCTIONS =====
|
||||||
|
info() { echo -e "${GREEN}[INFO]${NC} $1"; return 0; }
|
||||||
|
warn() { echo -e "${YELLOW}[WARN]${NC} $1"; return 0; }
|
||||||
|
error() { echo -e "${RED}[ERROR]${NC} $1"; exit 1; }
|
||||||
|
success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; return 0; }
|
||||||
|
debug() { [[ "${DEBUG:-false}" == "true" ]] && echo -e "${BLUE}[DEBUG]${NC} $1" || true; return 0; }
|
||||||
|
|
||||||
|
# ===== BANNER =====
|
||||||
|
cat << "EOF"
|
||||||
|
╔═══════════════════════════════════════════════════════════════╗
|
||||||
|
║ ║
|
||||||
|
║ ____ _ _ __ __ ║
|
||||||
|
║ | _ \ __ _| |_ ___| |__ | \/ | ___ _ __ ║
|
||||||
|
║ | |_) / _` | __/ __| '_ \| |\/| |/ _ \| '_ \ ║
|
||||||
|
║ | __/ (_| | || (__| | | | | | | (_) | | | | ║
|
||||||
|
║ |_| \__,_|\__\___|_| |_|_| |_|\___/|_| |_| ║
|
||||||
|
║ ║
|
||||||
|
║ Proxmox LXC Auto-Enrollment Script ║
|
||||||
|
║ ║
|
||||||
|
╚═══════════════════════════════════════════════════════════════╝
|
||||||
|
EOF
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# ===== VALIDATION =====
|
||||||
|
info "Validating configuration..."
|
||||||
|
|
||||||
|
if [[ -z "$AUTO_ENROLLMENT_KEY" ]] || [[ -z "$AUTO_ENROLLMENT_SECRET" ]]; then
|
||||||
|
error "AUTO_ENROLLMENT_KEY and AUTO_ENROLLMENT_SECRET must be set"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "$PATCHMON_URL" ]]; then
|
||||||
|
error "PATCHMON_URL must be set"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if running on Proxmox
|
||||||
|
if ! command -v pct &> /dev/null; then
|
||||||
|
error "This script must run on a Proxmox host (pct command not found)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for required commands
|
||||||
|
for cmd in curl jq; do
|
||||||
|
if ! command -v $cmd &> /dev/null; then
|
||||||
|
error "Required command '$cmd' not found. Please install it first."
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
info "Configuration validated successfully"
|
||||||
|
info "PatchMon Server: $PATCHMON_URL"
|
||||||
|
info "Dry Run Mode: $DRY_RUN"
|
||||||
|
info "Skip Stopped Containers: $SKIP_STOPPED"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# ===== DISCOVER LXC CONTAINERS =====
|
||||||
|
info "Discovering LXC containers..."
|
||||||
|
lxc_list=$(pct list | tail -n +2) # Skip header
|
||||||
|
|
||||||
|
if [[ -z "$lxc_list" ]]; then
|
||||||
|
warn "No LXC containers found on this Proxmox host"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Count containers
|
||||||
|
total_containers=$(echo "$lxc_list" | wc -l)
|
||||||
|
info "Found $total_containers LXC container(s)"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
info "Initializing statistics..."
|
||||||
|
# ===== STATISTICS =====
|
||||||
|
enrolled_count=0
|
||||||
|
skipped_count=0
|
||||||
|
failed_count=0
|
||||||
|
|
||||||
|
# Track containers with dpkg errors for later recovery
|
||||||
|
declare -A dpkg_error_containers
|
||||||
|
|
||||||
|
# Track all failed containers for summary
|
||||||
|
declare -A failed_containers
|
||||||
|
info "Statistics initialized"
|
||||||
|
|
||||||
|
# ===== PROCESS CONTAINERS =====
|
||||||
|
info "Starting container processing loop..."
|
||||||
|
while IFS= read -r line; do
|
||||||
|
info "[DEBUG] Read line from lxc_list"
|
||||||
|
vmid=$(echo "$line" | awk '{print $1}')
|
||||||
|
status=$(echo "$line" | awk '{print $2}')
|
||||||
|
name=$(echo "$line" | awk '{print $3}')
|
||||||
|
|
||||||
|
info "Processing LXC $vmid: $name (status: $status)"
|
||||||
|
|
||||||
|
# Skip stopped containers if configured
|
||||||
|
if [[ "$status" != "running" ]] && [[ "$SKIP_STOPPED" == "true" ]]; then
|
||||||
|
warn " Skipping $name - container not running"
|
||||||
|
((skipped_count++)) || true
|
||||||
|
echo ""
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if container is stopped
|
||||||
|
if [[ "$status" != "running" ]]; then
|
||||||
|
warn " Container $name is stopped - cannot gather info or install agent"
|
||||||
|
((skipped_count++)) || true
|
||||||
|
echo ""
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get container details
|
||||||
|
debug " Gathering container information..."
|
||||||
|
hostname=$(timeout 5 pct exec "$vmid" -- hostname 2>/dev/null </dev/null || echo "$name")
|
||||||
|
ip_address=$(timeout 5 pct exec "$vmid" -- hostname -I 2>/dev/null </dev/null | awk '{print $1}' || echo "unknown")
|
||||||
|
os_info=$(timeout 5 pct exec "$vmid" -- cat /etc/os-release 2>/dev/null </dev/null | grep "^PRETTY_NAME=" | cut -d'"' -f2 || echo "unknown")
|
||||||
|
|
||||||
|
# Detect container architecture
|
||||||
|
debug " Detecting container architecture..."
|
||||||
|
arch_raw=$(timeout 5 pct exec "$vmid" -- uname -m 2>/dev/null </dev/null || echo "unknown")
|
||||||
|
|
||||||
|
# Map architecture to supported values
|
||||||
|
case "$arch_raw" in
|
||||||
|
"x86_64")
|
||||||
|
architecture="amd64"
|
||||||
|
;;
|
||||||
|
"i386"|"i686")
|
||||||
|
architecture="386"
|
||||||
|
;;
|
||||||
|
"aarch64"|"arm64")
|
||||||
|
architecture="arm64"
|
||||||
|
;;
|
||||||
|
"armv7l"|"armv6l"|"arm")
|
||||||
|
architecture="arm"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
warn " ⚠ Unknown architecture '$arch_raw', defaulting to amd64"
|
||||||
|
architecture="amd64"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
debug " Detected architecture: $arch_raw -> $architecture"
|
||||||
|
|
||||||
|
# Get machine ID from container
|
||||||
|
machine_id=$(timeout 5 pct exec "$vmid" -- bash -c "cat /etc/machine-id 2>/dev/null || cat /var/lib/dbus/machine-id 2>/dev/null || echo 'proxmox-lxc-$vmid-'$(cat /proc/sys/kernel/random/uuid)" </dev/null 2>/dev/null || echo "proxmox-lxc-$vmid-unknown")
|
||||||
|
|
||||||
|
friendly_name="${HOST_PREFIX}${hostname}"
|
||||||
|
|
||||||
|
info " Hostname: $hostname"
|
||||||
|
info " IP Address: $ip_address"
|
||||||
|
info " OS: $os_info"
|
||||||
|
info " Architecture: $architecture ($arch_raw)"
|
||||||
|
info " Machine ID: ${machine_id:0:16}..."
|
||||||
|
|
||||||
|
if [[ "$DRY_RUN" == "true" ]]; then
|
||||||
|
info " [DRY RUN] Would enroll: $friendly_name"
|
||||||
|
((enrolled_count++)) || true
|
||||||
|
echo ""
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Call PatchMon auto-enrollment API
|
||||||
|
info " Enrolling $friendly_name in PatchMon..."
|
||||||
|
|
||||||
|
response=$(curl $CURL_FLAGS -X POST \
|
||||||
|
-H "X-Auto-Enrollment-Key: $AUTO_ENROLLMENT_KEY" \
|
||||||
|
-H "X-Auto-Enrollment-Secret: $AUTO_ENROLLMENT_SECRET" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d "{
|
||||||
|
\"friendly_name\": \"$friendly_name\",
|
||||||
|
\"machine_id\": \"$machine_id\",
|
||||||
|
\"metadata\": {
|
||||||
|
\"vmid\": \"$vmid\",
|
||||||
|
\"proxmox_node\": \"$(hostname)\",
|
||||||
|
\"ip_address\": \"$ip_address\",
|
||||||
|
\"os_info\": \"$os_info\"
|
||||||
|
}
|
||||||
|
}" \
|
||||||
|
"$PATCHMON_URL/api/v1/auto-enrollment/enroll" \
|
||||||
|
-w "\n%{http_code}" 2>&1)
|
||||||
|
|
||||||
|
http_code=$(echo "$response" | tail -n 1)
|
||||||
|
body=$(echo "$response" | sed '$d')
|
||||||
|
|
||||||
|
if [[ "$http_code" == "201" ]]; then
|
||||||
|
api_id=$(echo "$body" | jq -r '.host.api_id' 2>/dev/null || echo "")
|
||||||
|
api_key=$(echo "$body" | jq -r '.host.api_key' 2>/dev/null || echo "")
|
||||||
|
|
||||||
|
if [[ -z "$api_id" ]] || [[ -z "$api_key" ]]; then
|
||||||
|
error " Failed to parse API credentials from response"
|
||||||
|
fi
|
||||||
|
|
||||||
|
info " ✓ Host enrolled successfully: $api_id"
|
||||||
|
|
||||||
|
# Check if agent is already installed and working
|
||||||
|
info " Checking if agent is already configured..."
|
||||||
|
config_check=$(timeout 10 pct exec "$vmid" -- bash -c "
|
||||||
|
if [[ -f /etc/patchmon/config.yml ]] && [[ -f /etc/patchmon/credentials.yml ]]; then
|
||||||
|
if [[ -f /usr/local/bin/patchmon-agent ]]; then
|
||||||
|
# Try to ping using existing configuration
|
||||||
|
if /usr/local/bin/patchmon-agent ping >/dev/null 2>&1; then
|
||||||
|
echo 'ping_success'
|
||||||
|
else
|
||||||
|
echo 'ping_failed'
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo 'binary_missing'
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo 'not_configured'
|
||||||
|
fi
|
||||||
|
" 2>/dev/null </dev/null || echo "error")
|
||||||
|
|
||||||
|
if [[ "$config_check" == "ping_success" ]]; then
|
||||||
|
info " ✓ Host already enrolled and agent ping successful - skipping"
|
||||||
|
((skipped_count++)) || true
|
||||||
|
echo ""
|
||||||
|
continue
|
||||||
|
elif [[ "$config_check" == "ping_failed" ]]; then
|
||||||
|
warn " ⚠ Agent configuration exists but ping failed - will reinstall"
|
||||||
|
elif [[ "$config_check" == "binary_missing" ]]; then
|
||||||
|
warn " ⚠ Config exists but agent binary missing - will reinstall"
|
||||||
|
elif [[ "$config_check" == "not_configured" ]]; then
|
||||||
|
info " ℹ Agent not yet configured - proceeding with installation"
|
||||||
|
else
|
||||||
|
warn " ⚠ Could not check agent status - proceeding with installation"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Ensure curl is installed in the container
|
||||||
|
info " Checking for curl in container..."
|
||||||
|
curl_check=$(timeout 10 pct exec "$vmid" -- bash -c "command -v curl >/dev/null 2>&1 && echo 'installed' || echo 'missing'" 2>/dev/null </dev/null || echo "error")
|
||||||
|
|
||||||
|
if [[ "$curl_check" == "missing" ]]; then
|
||||||
|
info " Installing curl in container..."
|
||||||
|
|
||||||
|
# Detect package manager and install curl
|
||||||
|
curl_install_output=$(timeout 60 pct exec "$vmid" -- bash -c "
|
||||||
|
if command -v apt-get >/dev/null 2>&1; then
|
||||||
|
export DEBIAN_FRONTEND=noninteractive
|
||||||
|
apt-get update -qq && apt-get install -y -qq curl
|
||||||
|
elif command -v yum >/dev/null 2>&1; then
|
||||||
|
yum install -y -q curl
|
||||||
|
elif command -v dnf >/dev/null 2>&1; then
|
||||||
|
dnf install -y -q curl
|
||||||
|
elif command -v apk >/dev/null 2>&1; then
|
||||||
|
apk add --no-cache curl
|
||||||
|
else
|
||||||
|
echo 'ERROR: No supported package manager found'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
" 2>&1 </dev/null) || true
|
||||||
|
|
||||||
|
if [[ "$curl_install_output" == *"ERROR: No supported package manager"* ]]; then
|
||||||
|
warn " ✗ Could not install curl - no supported package manager found"
|
||||||
|
failed_containers["$vmid"]="$friendly_name|No package manager for curl|$curl_install_output"
|
||||||
|
((failed_count++)) || true
|
||||||
|
echo ""
|
||||||
|
sleep 1
|
||||||
|
continue
|
||||||
|
else
|
||||||
|
info " ✓ curl installed successfully"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
info " ✓ curl already installed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Install PatchMon agent in container
|
||||||
|
info " Installing PatchMon agent..."
|
||||||
|
|
||||||
|
# Build install URL with force flag and architecture if enabled
|
||||||
|
install_url="$PATCHMON_URL/api/v1/hosts/install?arch=$architecture"
|
||||||
|
if [[ "$FORCE_INSTALL" == "true" ]]; then
|
||||||
|
install_url="$install_url&force=true"
|
||||||
|
info " Using force mode - will bypass broken packages"
|
||||||
|
fi
|
||||||
|
info " Using architecture: $architecture"
|
||||||
|
|
||||||
|
# Reset exit code for this container
|
||||||
|
install_exit_code=0
|
||||||
|
|
||||||
|
# Download and execute in separate steps to avoid stdin issues with piping
|
||||||
|
# Pass CURL_FLAGS as environment variable to container
|
||||||
|
install_output=$(timeout 180 pct exec "$vmid" --env CURL_FLAGS="$CURL_FLAGS" -- bash -c "
|
||||||
|
cd /tmp
|
||||||
|
curl \$CURL_FLAGS \
|
||||||
|
-H \"X-API-ID: $api_id\" \
|
||||||
|
-H \"X-API-KEY: $api_key\" \
|
||||||
|
-o patchmon-install.sh \
|
||||||
|
'$install_url' && \
|
||||||
|
bash patchmon-install.sh && \
|
||||||
|
rm -f patchmon-install.sh
|
||||||
|
" 2>&1 </dev/null) || install_exit_code=$?
|
||||||
|
|
||||||
|
# Check both exit code AND success message in output for reliability
|
||||||
|
if [[ $install_exit_code -eq 0 ]] || [[ "$install_output" == *"PatchMon Agent installation completed successfully"* ]]; then
|
||||||
|
info " ✓ Agent installed successfully in $friendly_name"
|
||||||
|
((enrolled_count++)) || true
|
||||||
|
elif [[ $install_exit_code -eq 124 ]]; then
|
||||||
|
warn " ⏱ Agent installation timed out (>180s) in $friendly_name"
|
||||||
|
info " Install output: $install_output"
|
||||||
|
# Store failure details
|
||||||
|
failed_containers["$vmid"]="$friendly_name|Timeout (>180s)|$install_output"
|
||||||
|
((failed_count++)) || true
|
||||||
|
else
|
||||||
|
# Check if it's a dpkg error
|
||||||
|
if [[ "$install_output" == *"dpkg was interrupted"* ]] || [[ "$install_output" == *"dpkg --configure -a"* ]]; then
|
||||||
|
warn " ⚠ Failed due to dpkg error in $friendly_name (can be fixed)"
|
||||||
|
dpkg_error_containers["$vmid"]="$friendly_name:$api_id:$api_key"
|
||||||
|
# Store failure details
|
||||||
|
failed_containers["$vmid"]="$friendly_name|dpkg error|$install_output"
|
||||||
|
else
|
||||||
|
warn " ✗ Failed to install agent in $friendly_name (exit: $install_exit_code)"
|
||||||
|
# Store failure details
|
||||||
|
failed_containers["$vmid"]="$friendly_name|Exit code $install_exit_code|$install_output"
|
||||||
|
fi
|
||||||
|
info " Install output: $install_output"
|
||||||
|
((failed_count++)) || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
elif [[ "$http_code" == "409" ]]; then
|
||||||
|
warn " ⊘ Host $friendly_name already enrolled - skipping"
|
||||||
|
((skipped_count++)) || true
|
||||||
|
elif [[ "$http_code" == "429" ]]; then
|
||||||
|
error " ✗ Rate limit exceeded - maximum hosts per day reached"
|
||||||
|
failed_containers["$vmid"]="$friendly_name|Rate limit exceeded|$body"
|
||||||
|
((failed_count++)) || true
|
||||||
|
else
|
||||||
|
error " ✗ Failed to enroll $friendly_name - HTTP $http_code"
|
||||||
|
debug " Response: $body"
|
||||||
|
failed_containers["$vmid"]="$friendly_name|HTTP $http_code enrollment failed|$body"
|
||||||
|
((failed_count++)) || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
sleep 1 # Rate limiting between containers
|
||||||
|
|
||||||
|
done <<< "$lxc_list"
|
||||||
|
|
||||||
|
# ===== SUMMARY =====
|
||||||
|
echo ""
|
||||||
|
echo "╔═══════════════════════════════════════════════════════════════╗"
|
||||||
|
echo "║ ENROLLMENT SUMMARY ║"
|
||||||
|
echo "╚═══════════════════════════════════════════════════════════════╝"
|
||||||
|
echo ""
|
||||||
|
info "Total Containers Found: $total_containers"
|
||||||
|
info "Successfully Enrolled: $enrolled_count"
|
||||||
|
info "Skipped: $skipped_count"
|
||||||
|
info "Failed: $failed_count"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# ===== FAILURE DETAILS =====
|
||||||
|
if [[ ${#failed_containers[@]} -gt 0 ]]; then
|
||||||
|
echo "╔═══════════════════════════════════════════════════════════════╗"
|
||||||
|
echo "║ FAILURE DETAILS ║"
|
||||||
|
echo "╚═══════════════════════════════════════════════════════════════╝"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
for vmid in "${!failed_containers[@]}"; do
|
||||||
|
IFS='|' read -r name reason output <<< "${failed_containers[$vmid]}"
|
||||||
|
|
||||||
|
warn "Container $vmid: $name"
|
||||||
|
info " Reason: $reason"
|
||||||
|
info " Last 5 lines of output:"
|
||||||
|
|
||||||
|
# Get last 5 lines of output
|
||||||
|
last_5_lines=$(echo "$output" | tail -n 5)
|
||||||
|
|
||||||
|
# Display each line with proper indentation
|
||||||
|
while IFS= read -r line; do
|
||||||
|
echo " $line"
|
||||||
|
done <<< "$last_5_lines"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$DRY_RUN" == "true" ]]; then
|
||||||
|
warn "This was a DRY RUN - no actual changes were made"
|
||||||
|
warn "Set DRY_RUN=false to perform actual enrollment"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ===== DPKG ERROR RECOVERY =====
|
||||||
|
if [[ ${#dpkg_error_containers[@]} -gt 0 ]]; then
|
||||||
|
echo ""
|
||||||
|
echo "╔═══════════════════════════════════════════════════════════════╗"
|
||||||
|
echo "║ DPKG ERROR RECOVERY AVAILABLE ║"
|
||||||
|
echo "╚═══════════════════════════════════════════════════════════════╝"
|
||||||
|
echo ""
|
||||||
|
warn "Detected ${#dpkg_error_containers[@]} container(s) with dpkg errors:"
|
||||||
|
for vmid in "${!dpkg_error_containers[@]}"; do
|
||||||
|
IFS=':' read -r name api_id api_key <<< "${dpkg_error_containers[$vmid]}"
|
||||||
|
info " • Container $vmid: $name"
|
||||||
|
done
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Ask user if they want to fix dpkg errors
|
||||||
|
read -p "Would you like to fix dpkg errors and retry installation? (y/N): " -n 1 -r
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||||
|
echo ""
|
||||||
|
info "Starting dpkg recovery process..."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
recovered_count=0
|
||||||
|
|
||||||
|
for vmid in "${!dpkg_error_containers[@]}"; do
|
||||||
|
IFS=':' read -r name api_id api_key <<< "${dpkg_error_containers[$vmid]}"
|
||||||
|
|
||||||
|
info "Fixing dpkg in container $vmid ($name)..."
|
||||||
|
|
||||||
|
# Run dpkg --configure -a
|
||||||
|
dpkg_output=$(timeout 60 pct exec "$vmid" -- dpkg --configure -a 2>&1 </dev/null || true)
|
||||||
|
|
||||||
|
if [[ $? -eq 0 ]]; then
|
||||||
|
info " ✓ dpkg fixed successfully"
|
||||||
|
|
||||||
|
# Retry agent installation
|
||||||
|
info " Retrying agent installation..."
|
||||||
|
|
||||||
|
install_exit_code=0
|
||||||
|
# Pass CURL_FLAGS as environment variable to container
|
||||||
|
install_output=$(timeout 180 pct exec "$vmid" --env CURL_FLAGS="$CURL_FLAGS" -- bash -c "
|
||||||
|
cd /tmp
|
||||||
|
curl \$CURL_FLAGS \
|
||||||
|
-H \"X-API-ID: $api_id\" \
|
||||||
|
-H \"X-API-KEY: $api_key\" \
|
||||||
|
-o patchmon-install.sh \
|
||||||
|
'$PATCHMON_URL/api/v1/hosts/install?arch=$architecture' && \
|
||||||
|
bash patchmon-install.sh && \
|
||||||
|
rm -f patchmon-install.sh
|
||||||
|
" 2>&1 </dev/null) || install_exit_code=$?
|
||||||
|
|
||||||
|
if [[ $install_exit_code -eq 0 ]] || [[ "$install_output" == *"PatchMon Agent installation completed successfully"* ]]; then
|
||||||
|
info " ✓ Agent installed successfully in $name"
|
||||||
|
((recovered_count++)) || true
|
||||||
|
((enrolled_count++)) || true
|
||||||
|
((failed_count--)) || true
|
||||||
|
else
|
||||||
|
warn " ✗ Agent installation still failed (exit: $install_exit_code)"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
warn " ✗ Failed to fix dpkg in $name"
|
||||||
|
info " dpkg output: $dpkg_output"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
info "Recovery complete: $recovered_count container(s) recovered"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $failed_count -gt 0 ]]; then
|
||||||
|
warn "Some containers failed to enroll. Check the logs above for details."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Auto-enrollment complete! ✓"
|
||||||
|
exit 0
|
||||||
|
|
||||||
@@ -1,17 +1,61 @@
|
|||||||
# Database Configuration
|
# Database Configuration
|
||||||
DATABASE_URL="postgresql://patchmon_user:p@tchm0n_p@55@localhost:5432/patchmon_db"
|
DATABASE_URL="postgresql://patchmon_user:your-password-here@localhost:5432/patchmon_db"
|
||||||
|
PM_DB_CONN_MAX_ATTEMPTS=30
|
||||||
|
PM_DB_CONN_WAIT_INTERVAL=2
|
||||||
|
|
||||||
|
# Database Connection Pool Configuration (Prisma)
|
||||||
|
DB_CONNECTION_LIMIT=30 # Maximum connections per instance (default: 30)
|
||||||
|
DB_POOL_TIMEOUT=20 # Seconds to wait for available connection (default: 20)
|
||||||
|
DB_CONNECT_TIMEOUT=10 # Seconds to wait for initial connection (default: 10)
|
||||||
|
DB_IDLE_TIMEOUT=300 # Seconds before closing idle connections (default: 300)
|
||||||
|
DB_MAX_LIFETIME=1800 # Maximum lifetime of a connection in seconds (default: 1800)
|
||||||
|
|
||||||
|
# JWT Configuration
|
||||||
|
JWT_SECRET=your-secure-random-secret-key-change-this-in-production
|
||||||
|
JWT_EXPIRES_IN=1h
|
||||||
|
JWT_REFRESH_EXPIRES_IN=7d
|
||||||
|
|
||||||
# Server Configuration
|
# Server Configuration
|
||||||
PORT=3001
|
PORT=3001
|
||||||
NODE_ENV=development
|
NODE_ENV=production
|
||||||
|
|
||||||
# API Configuration
|
# API Configuration
|
||||||
API_VERSION=v1
|
API_VERSION=v1
|
||||||
|
|
||||||
|
# CORS Configuration
|
||||||
CORS_ORIGIN=http://localhost:3000
|
CORS_ORIGIN=http://localhost:3000
|
||||||
|
|
||||||
# Rate Limiting
|
# Session Configuration
|
||||||
|
SESSION_INACTIVITY_TIMEOUT_MINUTES=30
|
||||||
|
|
||||||
|
# User Configuration
|
||||||
|
DEFAULT_USER_ROLE=user
|
||||||
|
|
||||||
|
# Rate Limiting (times in milliseconds)
|
||||||
RATE_LIMIT_WINDOW_MS=900000
|
RATE_LIMIT_WINDOW_MS=900000
|
||||||
RATE_LIMIT_MAX=100
|
RATE_LIMIT_MAX=5000
|
||||||
|
AUTH_RATE_LIMIT_WINDOW_MS=600000
|
||||||
|
AUTH_RATE_LIMIT_MAX=500
|
||||||
|
AGENT_RATE_LIMIT_WINDOW_MS=60000
|
||||||
|
AGENT_RATE_LIMIT_MAX=1000
|
||||||
|
|
||||||
|
# Redis Configuration
|
||||||
|
REDIS_HOST=localhost
|
||||||
|
REDIS_PORT=6379
|
||||||
|
REDIS_USER=your-redis-username-here
|
||||||
|
REDIS_PASSWORD=your-redis-password-here
|
||||||
|
REDIS_DB=0
|
||||||
|
|
||||||
# Logging
|
# Logging
|
||||||
LOG_LEVEL=info
|
LOG_LEVEL=info
|
||||||
|
ENABLE_LOGGING=true
|
||||||
|
|
||||||
|
# TFA Configuration (optional - used if TFA is enabled)
|
||||||
|
TFA_REMEMBER_ME_EXPIRES_IN=30d
|
||||||
|
TFA_MAX_REMEMBER_SESSIONS=5
|
||||||
|
TFA_SUSPICIOUS_ACTIVITY_THRESHOLD=3
|
||||||
|
|
||||||
|
# Timezone Configuration
|
||||||
|
# Set the timezone for timestamps and logs (e.g., 'UTC', 'America/New_York', 'Europe/London')
|
||||||
|
# Defaults to UTC if not set. This ensures consistent timezone handling across the application.
|
||||||
|
TZ=UTC
|
||||||
|
|||||||
@@ -1,36 +1,47 @@
|
|||||||
{
|
{
|
||||||
"name": "patchmon-backend",
|
"name": "patchmon-backend",
|
||||||
"version": "1.0.0",
|
"version": "1.3.3",
|
||||||
"description": "Backend API for Linux Patch Monitoring System",
|
"description": "Backend API for Linux Patch Monitoring System",
|
||||||
"main": "src/server.js",
|
"license": "AGPL-3.0",
|
||||||
"scripts": {
|
"main": "src/server.js",
|
||||||
"dev": "nodemon src/server.js",
|
"scripts": {
|
||||||
"start": "node src/server.js",
|
"dev": "nodemon src/server.js",
|
||||||
"build": "echo 'No build step needed for Node.js'",
|
"start": "node src/server.js",
|
||||||
"db:generate": "prisma generate",
|
"build": "echo 'No build step needed for Node.js'",
|
||||||
"db:migrate": "prisma migrate dev",
|
"db:generate": "prisma generate",
|
||||||
"db:push": "prisma db push",
|
"db:migrate": "prisma migrate dev",
|
||||||
"db:studio": "prisma studio"
|
"db:push": "prisma db push",
|
||||||
},
|
"db:studio": "prisma studio"
|
||||||
"dependencies": {
|
},
|
||||||
"@prisma/client": "^5.7.0",
|
"dependencies": {
|
||||||
"bcryptjs": "^2.4.3",
|
"@bull-board/api": "^6.13.1",
|
||||||
"cors": "^2.8.5",
|
"@bull-board/express": "^6.13.1",
|
||||||
"dotenv": "^16.3.1",
|
"@prisma/client": "^6.1.0",
|
||||||
"express": "^4.18.2",
|
"axios": "^1.7.9",
|
||||||
"express-rate-limit": "^7.1.5",
|
"bcryptjs": "^2.4.3",
|
||||||
"express-validator": "^7.0.1",
|
"bullmq": "^5.61.0",
|
||||||
"helmet": "^7.1.0",
|
"cookie-parser": "^1.4.7",
|
||||||
"jsonwebtoken": "^9.0.2",
|
"cors": "^2.8.5",
|
||||||
"moment": "^2.30.1",
|
"dotenv": "^16.4.7",
|
||||||
"uuid": "^9.0.1",
|
"express": "^4.21.2",
|
||||||
"winston": "^3.11.0"
|
"express-rate-limit": "^7.5.0",
|
||||||
},
|
"express-validator": "^7.2.0",
|
||||||
"devDependencies": {
|
"helmet": "^8.0.0",
|
||||||
"nodemon": "^3.0.2",
|
"ioredis": "^5.8.1",
|
||||||
"prisma": "^5.7.0"
|
"jsonwebtoken": "^9.0.2",
|
||||||
},
|
"moment": "^2.30.1",
|
||||||
"engines": {
|
"qrcode": "^1.5.4",
|
||||||
"node": ">=18.0.0"
|
"speakeasy": "^2.0.0",
|
||||||
}
|
"uuid": "^11.0.3",
|
||||||
|
"winston": "^3.17.0",
|
||||||
|
"ws": "^8.18.0"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"@types/bcryptjs": "^2.4.6",
|
||||||
|
"nodemon": "^3.1.9",
|
||||||
|
"prisma": "^6.1.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18.0.0"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,16 @@
|
|||||||
|
-- CreateTable
|
||||||
|
CREATE TABLE "system_statistics" (
|
||||||
|
"id" TEXT NOT NULL,
|
||||||
|
"unique_packages_count" INTEGER NOT NULL,
|
||||||
|
"unique_security_count" INTEGER NOT NULL,
|
||||||
|
"total_packages" INTEGER NOT NULL,
|
||||||
|
"total_hosts" INTEGER NOT NULL,
|
||||||
|
"hosts_needing_updates" INTEGER NOT NULL,
|
||||||
|
"timestamp" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
|
||||||
|
CONSTRAINT "system_statistics_pkey" PRIMARY KEY ("id")
|
||||||
|
);
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "system_statistics_timestamp_idx" ON "system_statistics"("timestamp");
|
||||||
|
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
-- AlterTable
|
||||||
|
ALTER TABLE "settings" ADD COLUMN "ssh_key_path" TEXT;
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
-- AlterTable
|
||||||
|
ALTER TABLE "settings" ADD COLUMN "repository_type" TEXT NOT NULL DEFAULT 'public';
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
-- AlterTable
|
||||||
|
ALTER TABLE "users" ADD COLUMN "tfa_backup_codes" TEXT,
|
||||||
|
ADD COLUMN "tfa_enabled" BOOLEAN NOT NULL DEFAULT false,
|
||||||
|
ADD COLUMN "tfa_secret" TEXT;
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
-- AlterTable
|
||||||
|
ALTER TABLE "settings" ADD COLUMN "last_update_check" TIMESTAMP(3),
|
||||||
|
ADD COLUMN "latest_version" TEXT,
|
||||||
|
ADD COLUMN "update_available" BOOLEAN NOT NULL DEFAULT false;
|
||||||
2
backend/prisma/migrations/20250919165704_/migration.sql
Normal file
2
backend/prisma/migrations/20250919165704_/migration.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
-- RenameIndex
|
||||||
|
ALTER INDEX "hosts_hostname_key" RENAME TO "hosts_friendly_name_key";
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
-- Rename hostname column to friendly_name in hosts table
|
||||||
|
ALTER TABLE "hosts" RENAME COLUMN "hostname" TO "friendly_name";
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
-- AlterTable
|
||||||
|
ALTER TABLE "hosts" ADD COLUMN "cpu_cores" INTEGER,
|
||||||
|
ADD COLUMN "cpu_model" TEXT,
|
||||||
|
ADD COLUMN "disk_details" JSONB,
|
||||||
|
ADD COLUMN "dns_servers" JSONB,
|
||||||
|
ADD COLUMN "gateway_ip" TEXT,
|
||||||
|
ADD COLUMN "hostname" TEXT,
|
||||||
|
ADD COLUMN "kernel_version" TEXT,
|
||||||
|
ADD COLUMN "load_average" JSONB,
|
||||||
|
ADD COLUMN "network_interfaces" JSONB,
|
||||||
|
ADD COLUMN "ram_installed" INTEGER,
|
||||||
|
ADD COLUMN "selinux_status" TEXT,
|
||||||
|
ADD COLUMN "swap_size" INTEGER,
|
||||||
|
ADD COLUMN "system_uptime" TEXT;
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
-- AlterTable
|
||||||
|
ALTER TABLE "settings" DROP COLUMN "frontend_url";
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
-- AlterTable
|
||||||
|
ALTER TABLE "settings" ADD COLUMN "signup_enabled" BOOLEAN NOT NULL DEFAULT false;
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
-- AlterTable
|
||||||
|
ALTER TABLE "users" ADD COLUMN "first_name" TEXT,
|
||||||
|
ADD COLUMN "last_name" TEXT;
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
-- AlterTable
|
||||||
|
ALTER TABLE "settings" ADD COLUMN "default_user_role" TEXT NOT NULL DEFAULT 'user';
|
||||||
@@ -0,0 +1,65 @@
|
|||||||
|
-- Initialize default dashboard preferences for all existing users
|
||||||
|
-- This migration ensures that all users have proper role-based dashboard preferences
|
||||||
|
|
||||||
|
-- Function to create default dashboard preferences for a user
|
||||||
|
CREATE OR REPLACE FUNCTION init_user_dashboard_preferences(user_id TEXT, user_role TEXT)
|
||||||
|
RETURNS VOID AS $$
|
||||||
|
DECLARE
|
||||||
|
pref_record RECORD;
|
||||||
|
BEGIN
|
||||||
|
-- Delete any existing preferences for this user
|
||||||
|
DELETE FROM dashboard_preferences WHERE dashboard_preferences.user_id = init_user_dashboard_preferences.user_id;
|
||||||
|
|
||||||
|
-- Insert role-based preferences
|
||||||
|
IF user_role = 'admin' THEN
|
||||||
|
-- Admin gets full access to all cards (iby's preferred layout)
|
||||||
|
INSERT INTO dashboard_preferences (id, user_id, card_id, enabled, "order", created_at, updated_at)
|
||||||
|
VALUES
|
||||||
|
(gen_random_uuid(), user_id, 'totalHosts', true, 0, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'hostsNeedingUpdates', true, 1, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'totalOutdatedPackages', true, 2, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'securityUpdates', true, 3, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'totalHostGroups', true, 4, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'upToDateHosts', true, 5, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'totalRepos', true, 6, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'totalUsers', true, 7, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'osDistribution', true, 8, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'osDistributionBar', true, 9, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'recentCollection', true, 10, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'updateStatus', true, 11, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'packagePriority', true, 12, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'recentUsers', true, 13, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'quickStats', true, 14, NOW(), NOW());
|
||||||
|
ELSE
|
||||||
|
-- Regular users get comprehensive layout but without user management cards
|
||||||
|
INSERT INTO dashboard_preferences (id, user_id, card_id, enabled, "order", created_at, updated_at)
|
||||||
|
VALUES
|
||||||
|
(gen_random_uuid(), user_id, 'totalHosts', true, 0, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'hostsNeedingUpdates', true, 1, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'totalOutdatedPackages', true, 2, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'securityUpdates', true, 3, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'totalHostGroups', true, 4, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'upToDateHosts', true, 5, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'totalRepos', true, 6, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'osDistribution', true, 7, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'osDistributionBar', true, 8, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'recentCollection', true, 9, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'updateStatus', true, 10, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'packagePriority', true, 11, NOW(), NOW()),
|
||||||
|
(gen_random_uuid(), user_id, 'quickStats', true, 12, NOW(), NOW());
|
||||||
|
END IF;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
-- Apply default preferences to all existing users
|
||||||
|
DO $$
|
||||||
|
DECLARE
|
||||||
|
user_record RECORD;
|
||||||
|
BEGIN
|
||||||
|
FOR user_record IN SELECT id, role FROM users LOOP
|
||||||
|
PERFORM init_user_dashboard_preferences(user_record.id, user_record.role);
|
||||||
|
END LOOP;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
-- Drop the temporary function
|
||||||
|
DROP FUNCTION init_user_dashboard_preferences(TEXT, TEXT);
|
||||||
@@ -0,0 +1,12 @@
|
|||||||
|
-- Remove dashboard preferences population
|
||||||
|
-- This migration clears all existing dashboard preferences so they can be recreated
|
||||||
|
-- with the correct default order by server.js initialization
|
||||||
|
|
||||||
|
-- Clear all existing dashboard preferences
|
||||||
|
-- This ensures users get the correct default order from server.js
|
||||||
|
DELETE FROM dashboard_preferences;
|
||||||
|
|
||||||
|
-- Recreate indexes for better performance
|
||||||
|
CREATE INDEX IF NOT EXISTS "dashboard_preferences_user_id_idx" ON "dashboard_preferences"("user_id");
|
||||||
|
CREATE INDEX IF NOT EXISTS "dashboard_preferences_card_id_idx" ON "dashboard_preferences"("card_id");
|
||||||
|
CREATE INDEX IF NOT EXISTS "dashboard_preferences_user_card_idx" ON "dashboard_preferences"("user_id", "card_id");
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
-- Fix dashboard preferences unique constraint
|
||||||
|
-- This migration fixes the unique constraint on dashboard_preferences table
|
||||||
|
|
||||||
|
-- Drop existing indexes if they exist
|
||||||
|
DROP INDEX IF EXISTS "dashboard_preferences_card_id_key";
|
||||||
|
DROP INDEX IF EXISTS "dashboard_preferences_user_id_card_id_key";
|
||||||
|
DROP INDEX IF EXISTS "dashboard_preferences_user_id_key";
|
||||||
|
|
||||||
|
-- Add the correct unique constraint
|
||||||
|
ALTER TABLE "dashboard_preferences" ADD CONSTRAINT "dashboard_preferences_user_id_card_id_key" UNIQUE ("user_id", "card_id");
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
-- DropTable
|
||||||
|
DROP TABLE "agent_versions";
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
-- Add ignore_ssl_self_signed column to settings table
|
||||||
|
-- This allows users to configure whether curl commands should ignore SSL certificate validation
|
||||||
|
|
||||||
|
ALTER TABLE "settings" ADD COLUMN "ignore_ssl_self_signed" BOOLEAN NOT NULL DEFAULT false;
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
-- AlterTable
|
||||||
|
ALTER TABLE "hosts" ADD COLUMN "notes" TEXT;
|
||||||
|
|
||||||
@@ -0,0 +1,37 @@
|
|||||||
|
-- CreateTable
|
||||||
|
CREATE TABLE "auto_enrollment_tokens" (
|
||||||
|
"id" TEXT NOT NULL,
|
||||||
|
"token_name" TEXT NOT NULL,
|
||||||
|
"token_key" TEXT NOT NULL,
|
||||||
|
"token_secret" TEXT NOT NULL,
|
||||||
|
"created_by_user_id" TEXT,
|
||||||
|
"is_active" BOOLEAN NOT NULL DEFAULT true,
|
||||||
|
"allowed_ip_ranges" TEXT[],
|
||||||
|
"max_hosts_per_day" INTEGER NOT NULL DEFAULT 100,
|
||||||
|
"hosts_created_today" INTEGER NOT NULL DEFAULT 0,
|
||||||
|
"last_reset_date" DATE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
"default_host_group_id" TEXT,
|
||||||
|
"created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
"updated_at" TIMESTAMP(3) NOT NULL,
|
||||||
|
"last_used_at" TIMESTAMP(3),
|
||||||
|
"expires_at" TIMESTAMP(3),
|
||||||
|
"metadata" JSONB,
|
||||||
|
|
||||||
|
CONSTRAINT "auto_enrollment_tokens_pkey" PRIMARY KEY ("id")
|
||||||
|
);
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE UNIQUE INDEX "auto_enrollment_tokens_token_key_key" ON "auto_enrollment_tokens"("token_key");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "auto_enrollment_tokens_token_key_idx" ON "auto_enrollment_tokens"("token_key");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "auto_enrollment_tokens_is_active_idx" ON "auto_enrollment_tokens"("is_active");
|
||||||
|
|
||||||
|
-- AddForeignKey
|
||||||
|
ALTER TABLE "auto_enrollment_tokens" ADD CONSTRAINT "auto_enrollment_tokens_created_by_user_id_fkey" FOREIGN KEY ("created_by_user_id") REFERENCES "users"("id") ON DELETE SET NULL ON UPDATE CASCADE;
|
||||||
|
|
||||||
|
-- AddForeignKey
|
||||||
|
ALTER TABLE "auto_enrollment_tokens" ADD CONSTRAINT "auto_enrollment_tokens_default_host_group_id_fkey" FOREIGN KEY ("default_host_group_id") REFERENCES "host_groups"("id") ON DELETE SET NULL ON UPDATE CASCADE;
|
||||||
|
|
||||||
@@ -0,0 +1,20 @@
|
|||||||
|
-- Add machine_id column as nullable first
|
||||||
|
ALTER TABLE "hosts" ADD COLUMN "machine_id" TEXT;
|
||||||
|
|
||||||
|
-- Generate machine_ids for existing hosts using their API ID as a fallback
|
||||||
|
UPDATE "hosts" SET "machine_id" = 'migrated-' || "api_id" WHERE "machine_id" IS NULL;
|
||||||
|
|
||||||
|
-- Remove the unique constraint from friendly_name
|
||||||
|
ALTER TABLE "hosts" DROP CONSTRAINT IF EXISTS "hosts_friendly_name_key";
|
||||||
|
|
||||||
|
-- Also drop the unique index if it exists (constraint and index can exist separately)
|
||||||
|
DROP INDEX IF EXISTS "hosts_friendly_name_key";
|
||||||
|
|
||||||
|
-- Now make machine_id NOT NULL and add unique constraint
|
||||||
|
ALTER TABLE "hosts" ALTER COLUMN "machine_id" SET NOT NULL;
|
||||||
|
ALTER TABLE "hosts" ADD CONSTRAINT "hosts_machine_id_key" UNIQUE ("machine_id");
|
||||||
|
|
||||||
|
-- Create indexes for better query performance
|
||||||
|
CREATE INDEX "hosts_machine_id_idx" ON "hosts"("machine_id");
|
||||||
|
CREATE INDEX "hosts_friendly_name_idx" ON "hosts"("friendly_name");
|
||||||
|
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
-- AddLogoFieldsToSettings
|
||||||
|
ALTER TABLE "settings" ADD COLUMN "logo_dark" VARCHAR(255) DEFAULT '/assets/logo_dark.png';
|
||||||
|
ALTER TABLE "settings" ADD COLUMN "logo_light" VARCHAR(255) DEFAULT '/assets/logo_light.png';
|
||||||
|
ALTER TABLE "settings" ADD COLUMN "favicon" VARCHAR(255) DEFAULT '/assets/logo_square.svg';
|
||||||
@@ -0,0 +1,64 @@
|
|||||||
|
-- Reconcile user_sessions migration from 1.2.7 to 1.2.8+
|
||||||
|
-- This migration handles the case where 1.2.7 had 'add_user_sessions' without timestamp
|
||||||
|
-- and 1.2.8+ renamed it to '20251005000000_add_user_sessions' with timestamp
|
||||||
|
|
||||||
|
DO $$
|
||||||
|
DECLARE
|
||||||
|
table_exists boolean := false;
|
||||||
|
migration_exists boolean := false;
|
||||||
|
BEGIN
|
||||||
|
-- Check if user_sessions table exists
|
||||||
|
SELECT EXISTS (
|
||||||
|
SELECT 1 FROM information_schema.tables
|
||||||
|
WHERE table_schema = 'public'
|
||||||
|
AND table_name = 'user_sessions'
|
||||||
|
) INTO table_exists;
|
||||||
|
|
||||||
|
-- Check if the migration record already exists
|
||||||
|
SELECT EXISTS (
|
||||||
|
SELECT 1 FROM _prisma_migrations
|
||||||
|
WHERE migration_name = '20251005000000_add_user_sessions'
|
||||||
|
) INTO migration_exists;
|
||||||
|
|
||||||
|
-- If table exists but no migration record, create one
|
||||||
|
IF table_exists AND NOT migration_exists THEN
|
||||||
|
RAISE NOTICE 'Table exists but no migration record found - creating migration record for 1.2.7 upgrade';
|
||||||
|
|
||||||
|
-- Insert a successful migration record for the existing table
|
||||||
|
INSERT INTO _prisma_migrations (
|
||||||
|
id,
|
||||||
|
checksum,
|
||||||
|
finished_at,
|
||||||
|
migration_name,
|
||||||
|
logs,
|
||||||
|
rolled_back_at,
|
||||||
|
started_at,
|
||||||
|
applied_steps_count
|
||||||
|
) VALUES (
|
||||||
|
gen_random_uuid()::text,
|
||||||
|
'', -- Empty checksum since we're reconciling
|
||||||
|
NOW(),
|
||||||
|
'20251005000000_add_user_sessions',
|
||||||
|
'Reconciled from 1.2.7 - table already exists',
|
||||||
|
NULL,
|
||||||
|
NOW(),
|
||||||
|
1
|
||||||
|
);
|
||||||
|
|
||||||
|
RAISE NOTICE 'Migration record created for existing table';
|
||||||
|
ELSIF table_exists AND migration_exists THEN
|
||||||
|
RAISE NOTICE 'Table exists and migration record exists - no action needed';
|
||||||
|
ELSE
|
||||||
|
RAISE NOTICE 'Table does not exist - migration will proceed normally';
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- Additional check: If we have any old migration names, update them
|
||||||
|
IF EXISTS (SELECT 1 FROM _prisma_migrations WHERE migration_name = 'add_user_sessions') THEN
|
||||||
|
RAISE NOTICE 'Found old migration name - updating to new format';
|
||||||
|
UPDATE _prisma_migrations
|
||||||
|
SET migration_name = '20251005000000_add_user_sessions'
|
||||||
|
WHERE migration_name = 'add_user_sessions';
|
||||||
|
RAISE NOTICE 'Old migration name updated';
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
END $$;
|
||||||
@@ -0,0 +1,96 @@
|
|||||||
|
-- Reconcile user_sessions migration from 1.2.7 to 1.2.8+
|
||||||
|
-- This migration handles the case where 1.2.7 had 'add_user_sessions' without timestamp
|
||||||
|
-- and 1.2.8+ renamed it to '20251005000000_add_user_sessions' with timestamp
|
||||||
|
|
||||||
|
DO $$
|
||||||
|
DECLARE
|
||||||
|
old_migration_exists boolean := false;
|
||||||
|
table_exists boolean := false;
|
||||||
|
failed_migration_exists boolean := false;
|
||||||
|
BEGIN
|
||||||
|
-- Check if the old migration name exists
|
||||||
|
SELECT EXISTS (
|
||||||
|
SELECT 1 FROM _prisma_migrations
|
||||||
|
WHERE migration_name = 'add_user_sessions'
|
||||||
|
) INTO old_migration_exists;
|
||||||
|
|
||||||
|
-- Check if user_sessions table exists
|
||||||
|
SELECT EXISTS (
|
||||||
|
SELECT 1 FROM information_schema.tables
|
||||||
|
WHERE table_schema = 'public'
|
||||||
|
AND table_name = 'user_sessions'
|
||||||
|
) INTO table_exists;
|
||||||
|
|
||||||
|
-- Check if there's a failed migration attempt
|
||||||
|
SELECT EXISTS (
|
||||||
|
SELECT 1 FROM _prisma_migrations
|
||||||
|
WHERE migration_name = '20251005000000_add_user_sessions'
|
||||||
|
AND finished_at IS NULL
|
||||||
|
) INTO failed_migration_exists;
|
||||||
|
|
||||||
|
-- Scenario 1: Old migration exists, table exists, no failed migration
|
||||||
|
-- This means 1.2.7 was installed and we need to update the migration name
|
||||||
|
IF old_migration_exists AND table_exists AND NOT failed_migration_exists THEN
|
||||||
|
RAISE NOTICE 'Found 1.2.7 migration "add_user_sessions" - updating to timestamped version';
|
||||||
|
|
||||||
|
-- Update the old migration name to the new timestamped version
|
||||||
|
UPDATE _prisma_migrations
|
||||||
|
SET migration_name = '20251005000000_add_user_sessions'
|
||||||
|
WHERE migration_name = 'add_user_sessions';
|
||||||
|
|
||||||
|
RAISE NOTICE 'Migration name updated: add_user_sessions -> 20251005000000_add_user_sessions';
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- Scenario 2: Failed migration exists (upgrade attempt gone wrong)
|
||||||
|
IF failed_migration_exists THEN
|
||||||
|
RAISE NOTICE 'Found failed migration attempt - cleaning up';
|
||||||
|
|
||||||
|
-- If table exists, it means the migration partially succeeded
|
||||||
|
IF table_exists THEN
|
||||||
|
RAISE NOTICE 'Table exists - marking migration as applied';
|
||||||
|
|
||||||
|
-- Delete the failed migration record
|
||||||
|
DELETE FROM _prisma_migrations
|
||||||
|
WHERE migration_name = '20251005000000_add_user_sessions'
|
||||||
|
AND finished_at IS NULL;
|
||||||
|
|
||||||
|
-- Insert a successful migration record
|
||||||
|
INSERT INTO _prisma_migrations (
|
||||||
|
id,
|
||||||
|
checksum,
|
||||||
|
finished_at,
|
||||||
|
migration_name,
|
||||||
|
logs,
|
||||||
|
rolled_back_at,
|
||||||
|
started_at,
|
||||||
|
applied_steps_count
|
||||||
|
) VALUES (
|
||||||
|
gen_random_uuid()::text,
|
||||||
|
'', -- Empty checksum since we're reconciling
|
||||||
|
NOW(),
|
||||||
|
'20251005000000_add_user_sessions',
|
||||||
|
NULL,
|
||||||
|
NULL,
|
||||||
|
NOW(),
|
||||||
|
1
|
||||||
|
);
|
||||||
|
|
||||||
|
RAISE NOTICE 'Migration marked as successfully applied';
|
||||||
|
ELSE
|
||||||
|
RAISE NOTICE 'Table does not exist - removing failed migration to allow retry';
|
||||||
|
|
||||||
|
-- Just delete the failed migration to allow it to retry
|
||||||
|
DELETE FROM _prisma_migrations
|
||||||
|
WHERE migration_name = '20251005000000_add_user_sessions'
|
||||||
|
AND finished_at IS NULL;
|
||||||
|
|
||||||
|
RAISE NOTICE 'Failed migration removed - will retry on next migration run';
|
||||||
|
END IF;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- Scenario 3: Everything is clean (fresh install or already reconciled)
|
||||||
|
IF NOT old_migration_exists AND NOT failed_migration_exists THEN
|
||||||
|
RAISE NOTICE 'No migration reconciliation needed';
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
END $$;
|
||||||
@@ -0,0 +1,106 @@
|
|||||||
|
-- CreateTable (with existence check for 1.2.7 compatibility)
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
-- Check if table already exists (from 1.2.7 installation)
|
||||||
|
IF NOT EXISTS (
|
||||||
|
SELECT 1 FROM information_schema.tables
|
||||||
|
WHERE table_schema = 'public'
|
||||||
|
AND table_name = 'user_sessions'
|
||||||
|
) THEN
|
||||||
|
-- Table doesn't exist, create it
|
||||||
|
CREATE TABLE "user_sessions" (
|
||||||
|
"id" TEXT NOT NULL,
|
||||||
|
"user_id" TEXT NOT NULL,
|
||||||
|
"refresh_token" TEXT NOT NULL,
|
||||||
|
"access_token_hash" TEXT,
|
||||||
|
"ip_address" TEXT,
|
||||||
|
"user_agent" TEXT,
|
||||||
|
"last_activity" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
"expires_at" TIMESTAMP(3) NOT NULL,
|
||||||
|
"created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
"is_revoked" BOOLEAN NOT NULL DEFAULT false,
|
||||||
|
|
||||||
|
CONSTRAINT "user_sessions_pkey" PRIMARY KEY ("id")
|
||||||
|
);
|
||||||
|
|
||||||
|
RAISE NOTICE 'Created user_sessions table';
|
||||||
|
ELSE
|
||||||
|
RAISE NOTICE 'user_sessions table already exists, skipping creation';
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
-- CreateIndex (with existence check)
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF NOT EXISTS (
|
||||||
|
SELECT 1 FROM pg_indexes
|
||||||
|
WHERE tablename = 'user_sessions'
|
||||||
|
AND indexname = 'user_sessions_refresh_token_key'
|
||||||
|
) THEN
|
||||||
|
CREATE UNIQUE INDEX "user_sessions_refresh_token_key" ON "user_sessions"("refresh_token");
|
||||||
|
RAISE NOTICE 'Created user_sessions_refresh_token_key index';
|
||||||
|
ELSE
|
||||||
|
RAISE NOTICE 'user_sessions_refresh_token_key index already exists, skipping';
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
-- CreateIndex (with existence check)
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF NOT EXISTS (
|
||||||
|
SELECT 1 FROM pg_indexes
|
||||||
|
WHERE tablename = 'user_sessions'
|
||||||
|
AND indexname = 'user_sessions_user_id_idx'
|
||||||
|
) THEN
|
||||||
|
CREATE INDEX "user_sessions_user_id_idx" ON "user_sessions"("user_id");
|
||||||
|
RAISE NOTICE 'Created user_sessions_user_id_idx index';
|
||||||
|
ELSE
|
||||||
|
RAISE NOTICE 'user_sessions_user_id_idx index already exists, skipping';
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
-- CreateIndex (with existence check)
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF NOT EXISTS (
|
||||||
|
SELECT 1 FROM pg_indexes
|
||||||
|
WHERE tablename = 'user_sessions'
|
||||||
|
AND indexname = 'user_sessions_refresh_token_idx'
|
||||||
|
) THEN
|
||||||
|
CREATE INDEX "user_sessions_refresh_token_idx" ON "user_sessions"("refresh_token");
|
||||||
|
RAISE NOTICE 'Created user_sessions_refresh_token_idx index';
|
||||||
|
ELSE
|
||||||
|
RAISE NOTICE 'user_sessions_refresh_token_idx index already exists, skipping';
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
-- CreateIndex (with existence check)
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF NOT EXISTS (
|
||||||
|
SELECT 1 FROM pg_indexes
|
||||||
|
WHERE tablename = 'user_sessions'
|
||||||
|
AND indexname = 'user_sessions_expires_at_idx'
|
||||||
|
) THEN
|
||||||
|
CREATE INDEX "user_sessions_expires_at_idx" ON "user_sessions"("expires_at");
|
||||||
|
RAISE NOTICE 'Created user_sessions_expires_at_idx index';
|
||||||
|
ELSE
|
||||||
|
RAISE NOTICE 'user_sessions_expires_at_idx index already exists, skipping';
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
-- AddForeignKey (with existence check)
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF NOT EXISTS (
|
||||||
|
SELECT 1 FROM information_schema.table_constraints
|
||||||
|
WHERE table_name = 'user_sessions'
|
||||||
|
AND constraint_name = 'user_sessions_user_id_fkey'
|
||||||
|
) THEN
|
||||||
|
ALTER TABLE "user_sessions" ADD CONSTRAINT "user_sessions_user_id_fkey" FOREIGN KEY ("user_id") REFERENCES "users"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||||
|
RAISE NOTICE 'Created user_sessions_user_id_fkey foreign key';
|
||||||
|
ELSE
|
||||||
|
RAISE NOTICE 'user_sessions_user_id_fkey foreign key already exists, skipping';
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
|
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
-- Add TFA remember me fields to user_sessions table
|
||||||
|
ALTER TABLE "user_sessions" ADD COLUMN "tfa_remember_me" BOOLEAN NOT NULL DEFAULT false;
|
||||||
|
ALTER TABLE "user_sessions" ADD COLUMN "tfa_bypass_until" TIMESTAMP(3);
|
||||||
|
|
||||||
|
-- Create index for TFA bypass until field for efficient querying
|
||||||
|
CREATE INDEX "user_sessions_tfa_bypass_until_idx" ON "user_sessions"("tfa_bypass_until");
|
||||||
@@ -0,0 +1,7 @@
|
|||||||
|
-- Add security fields to user_sessions table for production-ready remember me
|
||||||
|
ALTER TABLE "user_sessions" ADD COLUMN "device_fingerprint" TEXT;
|
||||||
|
ALTER TABLE "user_sessions" ADD COLUMN "login_count" INTEGER NOT NULL DEFAULT 1;
|
||||||
|
ALTER TABLE "user_sessions" ADD COLUMN "last_login_ip" TEXT;
|
||||||
|
|
||||||
|
-- Create index for device fingerprint for efficient querying
|
||||||
|
CREATE INDEX "user_sessions_device_fingerprint_idx" ON "user_sessions"("device_fingerprint");
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
-- AlterTable
|
||||||
|
ALTER TABLE "update_history" ADD COLUMN "total_packages" INTEGER;
|
||||||
|
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
-- AlterTable
|
||||||
|
ALTER TABLE "update_history" ADD COLUMN "payload_size_kb" DOUBLE PRECISION;
|
||||||
|
ALTER TABLE "update_history" ADD COLUMN "execution_time" DOUBLE PRECISION;
|
||||||
|
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
-- Add indexes to host_packages table for performance optimization
|
||||||
|
-- These indexes will dramatically speed up queries filtering by host_id, package_id, needs_update, and is_security_update
|
||||||
|
|
||||||
|
-- Index for queries filtering by host_id (very common - used when viewing packages for a specific host)
|
||||||
|
CREATE INDEX IF NOT EXISTS "host_packages_host_id_idx" ON "host_packages"("host_id");
|
||||||
|
|
||||||
|
-- Index for queries filtering by package_id (used when finding hosts for a specific package)
|
||||||
|
CREATE INDEX IF NOT EXISTS "host_packages_package_id_idx" ON "host_packages"("package_id");
|
||||||
|
|
||||||
|
-- Index for queries filtering by needs_update (used when finding outdated packages)
|
||||||
|
CREATE INDEX IF NOT EXISTS "host_packages_needs_update_idx" ON "host_packages"("needs_update");
|
||||||
|
|
||||||
|
-- Index for queries filtering by is_security_update (used when finding security updates)
|
||||||
|
CREATE INDEX IF NOT EXISTS "host_packages_is_security_update_idx" ON "host_packages"("is_security_update");
|
||||||
|
|
||||||
|
-- Composite index for the most common query pattern: host_id + needs_update
|
||||||
|
-- This is optimal for "show me outdated packages for this host"
|
||||||
|
CREATE INDEX IF NOT EXISTS "host_packages_host_id_needs_update_idx" ON "host_packages"("host_id", "needs_update");
|
||||||
|
|
||||||
|
-- Composite index for host_id + needs_update + is_security_update
|
||||||
|
-- This is optimal for "show me security updates for this host"
|
||||||
|
CREATE INDEX IF NOT EXISTS "host_packages_host_id_needs_update_security_idx" ON "host_packages"("host_id", "needs_update", "is_security_update");
|
||||||
|
|
||||||
|
-- Index for queries filtering by package_id + needs_update
|
||||||
|
-- This is optimal for "show me hosts where this package needs updates"
|
||||||
|
CREATE INDEX IF NOT EXISTS "host_packages_package_id_needs_update_idx" ON "host_packages"("package_id", "needs_update");
|
||||||
|
|
||||||
|
-- Index on last_checked for cleanup/maintenance queries
|
||||||
|
CREATE INDEX IF NOT EXISTS "host_packages_last_checked_idx" ON "host_packages"("last_checked");
|
||||||
|
|
||||||
@@ -0,0 +1,94 @@
|
|||||||
|
-- CreateTable
|
||||||
|
CREATE TABLE "docker_images" (
|
||||||
|
"id" TEXT NOT NULL,
|
||||||
|
"repository" TEXT NOT NULL,
|
||||||
|
"tag" TEXT NOT NULL DEFAULT 'latest',
|
||||||
|
"image_id" TEXT NOT NULL,
|
||||||
|
"digest" TEXT,
|
||||||
|
"size_bytes" BIGINT,
|
||||||
|
"source" TEXT NOT NULL DEFAULT 'docker-hub',
|
||||||
|
"created_at" TIMESTAMP(3) NOT NULL,
|
||||||
|
"last_pulled" TIMESTAMP(3),
|
||||||
|
"last_checked" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
"updated_at" TIMESTAMP(3) NOT NULL,
|
||||||
|
|
||||||
|
CONSTRAINT "docker_images_pkey" PRIMARY KEY ("id")
|
||||||
|
);
|
||||||
|
|
||||||
|
-- CreateTable
|
||||||
|
CREATE TABLE "docker_containers" (
|
||||||
|
"id" TEXT NOT NULL,
|
||||||
|
"host_id" TEXT NOT NULL,
|
||||||
|
"container_id" TEXT NOT NULL,
|
||||||
|
"name" TEXT NOT NULL,
|
||||||
|
"image_id" TEXT,
|
||||||
|
"image_name" TEXT NOT NULL,
|
||||||
|
"image_tag" TEXT NOT NULL DEFAULT 'latest',
|
||||||
|
"status" TEXT NOT NULL,
|
||||||
|
"state" TEXT,
|
||||||
|
"ports" JSONB,
|
||||||
|
"created_at" TIMESTAMP(3) NOT NULL,
|
||||||
|
"started_at" TIMESTAMP(3),
|
||||||
|
"updated_at" TIMESTAMP(3) NOT NULL,
|
||||||
|
"last_checked" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
|
||||||
|
CONSTRAINT "docker_containers_pkey" PRIMARY KEY ("id")
|
||||||
|
);
|
||||||
|
|
||||||
|
-- CreateTable
|
||||||
|
CREATE TABLE "docker_image_updates" (
|
||||||
|
"id" TEXT NOT NULL,
|
||||||
|
"image_id" TEXT NOT NULL,
|
||||||
|
"current_tag" TEXT NOT NULL,
|
||||||
|
"available_tag" TEXT NOT NULL,
|
||||||
|
"is_security_update" BOOLEAN NOT NULL DEFAULT false,
|
||||||
|
"severity" TEXT,
|
||||||
|
"changelog_url" TEXT,
|
||||||
|
"created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
"updated_at" TIMESTAMP(3) NOT NULL,
|
||||||
|
|
||||||
|
CONSTRAINT "docker_image_updates_pkey" PRIMARY KEY ("id")
|
||||||
|
);
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "docker_images_repository_idx" ON "docker_images"("repository");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "docker_images_source_idx" ON "docker_images"("source");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "docker_images_repository_tag_idx" ON "docker_images"("repository", "tag");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE UNIQUE INDEX "docker_images_repository_tag_image_id_key" ON "docker_images"("repository", "tag", "image_id");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "docker_containers_host_id_idx" ON "docker_containers"("host_id");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "docker_containers_image_id_idx" ON "docker_containers"("image_id");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "docker_containers_status_idx" ON "docker_containers"("status");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "docker_containers_name_idx" ON "docker_containers"("name");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE UNIQUE INDEX "docker_containers_host_id_container_id_key" ON "docker_containers"("host_id", "container_id");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "docker_image_updates_image_id_idx" ON "docker_image_updates"("image_id");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "docker_image_updates_is_security_update_idx" ON "docker_image_updates"("is_security_update");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE UNIQUE INDEX "docker_image_updates_image_id_available_tag_key" ON "docker_image_updates"("image_id", "available_tag");
|
||||||
|
|
||||||
|
-- AddForeignKey
|
||||||
|
ALTER TABLE "docker_containers" ADD CONSTRAINT "docker_containers_image_id_fkey" FOREIGN KEY ("image_id") REFERENCES "docker_images"("id") ON DELETE SET NULL ON UPDATE CASCADE;
|
||||||
|
|
||||||
|
-- AddForeignKey
|
||||||
|
ALTER TABLE "docker_image_updates" ADD CONSTRAINT "docker_image_updates_image_id_fkey" FOREIGN KEY ("image_id") REFERENCES "docker_images"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||||
|
|
||||||
@@ -0,0 +1,40 @@
|
|||||||
|
-- CreateTable
|
||||||
|
CREATE TABLE "job_history" (
|
||||||
|
"id" TEXT NOT NULL,
|
||||||
|
"job_id" TEXT NOT NULL,
|
||||||
|
"queue_name" TEXT NOT NULL,
|
||||||
|
"job_name" TEXT NOT NULL,
|
||||||
|
"host_id" TEXT,
|
||||||
|
"api_id" TEXT,
|
||||||
|
"status" TEXT NOT NULL,
|
||||||
|
"attempt_number" INTEGER NOT NULL DEFAULT 1,
|
||||||
|
"error_message" TEXT,
|
||||||
|
"output" JSONB,
|
||||||
|
"created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
"updated_at" TIMESTAMP(3) NOT NULL,
|
||||||
|
"completed_at" TIMESTAMP(3),
|
||||||
|
|
||||||
|
CONSTRAINT "job_history_pkey" PRIMARY KEY ("id")
|
||||||
|
);
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "job_history_job_id_idx" ON "job_history"("job_id");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "job_history_queue_name_idx" ON "job_history"("queue_name");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "job_history_host_id_idx" ON "job_history"("host_id");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "job_history_api_id_idx" ON "job_history"("api_id");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "job_history_status_idx" ON "job_history"("status");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "job_history_created_at_idx" ON "job_history"("created_at");
|
||||||
|
|
||||||
|
-- AddForeignKey
|
||||||
|
ALTER TABLE "job_history" ADD CONSTRAINT "job_history_host_id_fkey" FOREIGN KEY ("host_id") REFERENCES "hosts"("id") ON DELETE SET NULL ON UPDATE CASCADE;
|
||||||
|
|
||||||
@@ -0,0 +1,43 @@
|
|||||||
|
-- CreateTable
|
||||||
|
CREATE TABLE "host_group_memberships" (
|
||||||
|
"id" TEXT NOT NULL,
|
||||||
|
"host_id" TEXT NOT NULL,
|
||||||
|
"host_group_id" TEXT NOT NULL,
|
||||||
|
"created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
|
||||||
|
CONSTRAINT "host_group_memberships_pkey" PRIMARY KEY ("id")
|
||||||
|
);
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE UNIQUE INDEX "host_group_memberships_host_id_host_group_id_key" ON "host_group_memberships"("host_id", "host_group_id");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "host_group_memberships_host_id_idx" ON "host_group_memberships"("host_id");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "host_group_memberships_host_group_id_idx" ON "host_group_memberships"("host_group_id");
|
||||||
|
|
||||||
|
-- Migrate existing data from hosts.host_group_id to host_group_memberships
|
||||||
|
INSERT INTO "host_group_memberships" ("id", "host_id", "host_group_id", "created_at")
|
||||||
|
SELECT
|
||||||
|
gen_random_uuid()::text as "id",
|
||||||
|
"id" as "host_id",
|
||||||
|
"host_group_id" as "host_group_id",
|
||||||
|
CURRENT_TIMESTAMP as "created_at"
|
||||||
|
FROM "hosts"
|
||||||
|
WHERE "host_group_id" IS NOT NULL;
|
||||||
|
|
||||||
|
-- AddForeignKey
|
||||||
|
ALTER TABLE "host_group_memberships" ADD CONSTRAINT "host_group_memberships_host_id_fkey" FOREIGN KEY ("host_id") REFERENCES "hosts"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||||
|
|
||||||
|
-- AddForeignKey
|
||||||
|
ALTER TABLE "host_group_memberships" ADD CONSTRAINT "host_group_memberships_host_group_id_fkey" FOREIGN KEY ("host_group_id") REFERENCES "host_groups"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||||
|
|
||||||
|
-- DropForeignKey
|
||||||
|
ALTER TABLE "hosts" DROP CONSTRAINT IF EXISTS "hosts_host_group_id_fkey";
|
||||||
|
|
||||||
|
-- DropIndex
|
||||||
|
DROP INDEX IF EXISTS "hosts_host_group_id_idx";
|
||||||
|
|
||||||
|
-- AlterTable
|
||||||
|
ALTER TABLE "hosts" DROP COLUMN "host_group_id";
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
-- AlterTable
|
||||||
|
-- Add color_theme field to settings table for customizable app theming
|
||||||
|
ALTER TABLE "settings" ADD COLUMN "color_theme" TEXT NOT NULL DEFAULT 'default';
|
||||||
|
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
-- AddMetricsTelemetry
|
||||||
|
-- Add anonymous metrics and telemetry fields to settings table
|
||||||
|
|
||||||
|
-- Add metrics fields to settings table
|
||||||
|
ALTER TABLE "settings" ADD COLUMN "metrics_enabled" BOOLEAN NOT NULL DEFAULT true;
|
||||||
|
ALTER TABLE "settings" ADD COLUMN "metrics_anonymous_id" TEXT;
|
||||||
|
ALTER TABLE "settings" ADD COLUMN "metrics_last_sent" TIMESTAMP(3);
|
||||||
|
|
||||||
|
-- Generate UUID for existing records (if any exist)
|
||||||
|
-- This will use PostgreSQL's gen_random_uuid() function
|
||||||
|
UPDATE "settings"
|
||||||
|
SET "metrics_anonymous_id" = gen_random_uuid()::text
|
||||||
|
WHERE "metrics_anonymous_id" IS NULL;
|
||||||
|
|
||||||
@@ -0,0 +1,74 @@
|
|||||||
|
-- CreateTable
|
||||||
|
CREATE TABLE "docker_volumes" (
|
||||||
|
"id" TEXT NOT NULL,
|
||||||
|
"host_id" TEXT NOT NULL,
|
||||||
|
"volume_id" TEXT NOT NULL,
|
||||||
|
"name" TEXT NOT NULL,
|
||||||
|
"driver" TEXT NOT NULL,
|
||||||
|
"mountpoint" TEXT,
|
||||||
|
"renderer" TEXT,
|
||||||
|
"scope" TEXT NOT NULL DEFAULT 'local',
|
||||||
|
"labels" JSONB,
|
||||||
|
"options" JSONB,
|
||||||
|
"size_bytes" BIGINT,
|
||||||
|
"ref_count" INTEGER NOT NULL DEFAULT 0,
|
||||||
|
"created_at" TIMESTAMP(3) NOT NULL,
|
||||||
|
"updated_at" TIMESTAMP(3) NOT NULL,
|
||||||
|
"last_checked" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
|
||||||
|
CONSTRAINT "docker_volumes_pkey" PRIMARY KEY ("id")
|
||||||
|
);
|
||||||
|
|
||||||
|
-- CreateTable
|
||||||
|
CREATE TABLE "docker_networks" (
|
||||||
|
"id" TEXT NOT NULL,
|
||||||
|
"host_id" TEXT NOT NULL,
|
||||||
|
"network_id" TEXT NOT NULL,
|
||||||
|
"name" TEXT NOT NULL,
|
||||||
|
"driver" TEXT NOT NULL,
|
||||||
|
"scope" TEXT NOT NULL DEFAULT 'local',
|
||||||
|
"ipv6_enabled" BOOLEAN NOT NULL DEFAULT false,
|
||||||
|
"internal" BOOLEAN NOT NULL DEFAULT false,
|
||||||
|
"attachable" BOOLEAN NOT NULL DEFAULT true,
|
||||||
|
"ingress" BOOLEAN NOT NULL DEFAULT false,
|
||||||
|
"config_only" BOOLEAN NOT NULL DEFAULT false,
|
||||||
|
"labels" JSONB,
|
||||||
|
"ipam" JSONB,
|
||||||
|
"container_count" INTEGER NOT NULL DEFAULT 0,
|
||||||
|
"created_at" TIMESTAMP(3),
|
||||||
|
"updated_at" TIMESTAMP(3) NOT NULL,
|
||||||
|
"last_checked" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
|
||||||
|
CONSTRAINT "docker_networks_pkey" PRIMARY KEY ("id")
|
||||||
|
);
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "docker_volumes_host_id_idx" ON "docker_volumes"("host_id");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "docker_volumes_name_idx" ON "docker_volumes"("name");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "docker_volumes_driver_idx" ON "docker_volumes"("driver");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE UNIQUE INDEX "docker_volumes_host_id_volume_id_key" ON "docker_volumes"("host_id", "volume_id");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "docker_networks_host_id_idx" ON "docker_networks"("host_id");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "docker_networks_name_idx" ON "docker_networks"("name");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE INDEX "docker_networks_driver_idx" ON "docker_networks"("driver");
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE UNIQUE INDEX "docker_networks_host_id_network_id_key" ON "docker_networks"("host_id", "network_id");
|
||||||
|
|
||||||
|
-- AddForeignKey
|
||||||
|
ALTER TABLE "docker_volumes" ADD CONSTRAINT "docker_volumes_host_id_fkey" FOREIGN KEY ("host_id") REFERENCES "hosts"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||||
|
|
||||||
|
-- AddForeignKey
|
||||||
|
ALTER TABLE "docker_networks" ADD CONSTRAINT "docker_networks_host_id_fkey" FOREIGN KEY ("host_id") REFERENCES "hosts"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||||
|
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
-- AlterTable
|
||||||
|
ALTER TABLE "users" ADD COLUMN IF NOT EXISTS "theme_preference" VARCHAR(10) DEFAULT 'dark';
|
||||||
|
|
||||||
|
-- AlterTable
|
||||||
|
ALTER TABLE "users" ADD COLUMN IF NOT EXISTS "color_theme" VARCHAR(50) DEFAULT 'cyber_blue';
|
||||||
|
|
||||||
@@ -1,6 +1,3 @@
|
|||||||
// This is your Prisma schema file,
|
|
||||||
// learn more about it in the docs: https://pris.ly/d/prisma-schema
|
|
||||||
|
|
||||||
generator client {
|
generator client {
|
||||||
provider = "prisma-client-js"
|
provider = "prisma-client-js"
|
||||||
}
|
}
|
||||||
@@ -10,209 +7,426 @@ datasource db {
|
|||||||
url = env("DATABASE_URL")
|
url = env("DATABASE_URL")
|
||||||
}
|
}
|
||||||
|
|
||||||
model User {
|
model dashboard_preferences {
|
||||||
id String @id @default(cuid())
|
id String @id
|
||||||
username String @unique
|
user_id String
|
||||||
email String @unique
|
card_id String
|
||||||
passwordHash String @map("password_hash")
|
enabled Boolean @default(true)
|
||||||
role String @default("admin") // admin, user
|
order Int @default(0)
|
||||||
isActive Boolean @default(true) @map("is_active")
|
created_at DateTime @default(now())
|
||||||
lastLogin DateTime? @map("last_login")
|
updated_at DateTime
|
||||||
createdAt DateTime @map("created_at") @default(now())
|
users users @relation(fields: [user_id], references: [id], onDelete: Cascade)
|
||||||
updatedAt DateTime @map("updated_at") @updatedAt
|
|
||||||
|
|
||||||
// Relationships
|
@@unique([user_id, card_id])
|
||||||
dashboardPreferences DashboardPreferences[]
|
|
||||||
|
|
||||||
@@map("users")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
model RolePermissions {
|
model host_groups {
|
||||||
id String @id @default(cuid())
|
id String @id
|
||||||
role String @unique // admin, user, custom roles
|
name String @unique
|
||||||
canViewDashboard Boolean @default(true) @map("can_view_dashboard")
|
description String?
|
||||||
canViewHosts Boolean @default(true) @map("can_view_hosts")
|
color String? @default("#3B82F6")
|
||||||
canManageHosts Boolean @default(false) @map("can_manage_hosts")
|
created_at DateTime @default(now())
|
||||||
canViewPackages Boolean @default(true) @map("can_view_packages")
|
updated_at DateTime
|
||||||
canManagePackages Boolean @default(false) @map("can_manage_packages")
|
host_group_memberships host_group_memberships[]
|
||||||
canViewUsers Boolean @default(false) @map("can_view_users")
|
auto_enrollment_tokens auto_enrollment_tokens[]
|
||||||
canManageUsers Boolean @default(false) @map("can_manage_users")
|
|
||||||
canViewReports Boolean @default(true) @map("can_view_reports")
|
|
||||||
canExportData Boolean @default(false) @map("can_export_data")
|
|
||||||
canManageSettings Boolean @default(false) @map("can_manage_settings")
|
|
||||||
createdAt DateTime @map("created_at") @default(now())
|
|
||||||
updatedAt DateTime @map("updated_at") @updatedAt
|
|
||||||
|
|
||||||
@@map("role_permissions")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
model HostGroup {
|
model host_group_memberships {
|
||||||
id String @id @default(cuid())
|
id String @id
|
||||||
name String @unique
|
host_id String
|
||||||
description String?
|
host_group_id String
|
||||||
color String? @default("#3B82F6") // Hex color for UI display
|
created_at DateTime @default(now())
|
||||||
createdAt DateTime @map("created_at") @default(now())
|
hosts hosts @relation(fields: [host_id], references: [id], onDelete: Cascade)
|
||||||
updatedAt DateTime @map("updated_at") @updatedAt
|
host_groups host_groups @relation(fields: [host_group_id], references: [id], onDelete: Cascade)
|
||||||
|
|
||||||
// Relationships
|
@@unique([host_id, host_group_id])
|
||||||
hosts Host[]
|
@@index([host_id])
|
||||||
|
@@index([host_group_id])
|
||||||
@@map("host_groups")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
model Host {
|
model host_packages {
|
||||||
id String @id @default(cuid())
|
id String @id
|
||||||
hostname String @unique
|
host_id String
|
||||||
ip String?
|
package_id String
|
||||||
osType String @map("os_type")
|
current_version String
|
||||||
osVersion String @map("os_version")
|
available_version String?
|
||||||
architecture String?
|
needs_update Boolean @default(false)
|
||||||
lastUpdate DateTime @map("last_update") @default(now())
|
is_security_update Boolean @default(false)
|
||||||
status String @default("active") // active, inactive, error
|
last_checked DateTime @default(now())
|
||||||
apiId String @unique @map("api_id") // New API ID for authentication
|
hosts hosts @relation(fields: [host_id], references: [id], onDelete: Cascade)
|
||||||
apiKey String @unique @map("api_key") // New API Key for authentication
|
packages packages @relation(fields: [package_id], references: [id], onDelete: Cascade)
|
||||||
hostGroupId String? @map("host_group_id") // Optional group association
|
|
||||||
agentVersion String? @map("agent_version") // Agent script version
|
|
||||||
autoUpdate Boolean @map("auto_update") @default(true) // Enable auto-update for this host
|
|
||||||
createdAt DateTime @map("created_at") @default(now())
|
|
||||||
updatedAt DateTime @map("updated_at") @updatedAt
|
|
||||||
|
|
||||||
// Relationships
|
@@unique([host_id, package_id])
|
||||||
hostPackages HostPackage[]
|
@@index([host_id])
|
||||||
updateHistory UpdateHistory[]
|
@@index([package_id])
|
||||||
hostRepositories HostRepository[]
|
@@index([needs_update])
|
||||||
hostGroup HostGroup? @relation(fields: [hostGroupId], references: [id], onDelete: SetNull)
|
@@index([is_security_update])
|
||||||
|
@@index([host_id, needs_update])
|
||||||
@@map("hosts")
|
@@index([host_id, needs_update, is_security_update])
|
||||||
|
@@index([package_id, needs_update])
|
||||||
|
@@index([last_checked])
|
||||||
}
|
}
|
||||||
|
|
||||||
model Package {
|
model host_repositories {
|
||||||
id String @id @default(cuid())
|
id String @id
|
||||||
name String @unique
|
host_id String
|
||||||
description String?
|
repository_id String
|
||||||
category String? // system, security, development, etc.
|
is_enabled Boolean @default(true)
|
||||||
latestVersion String? @map("latest_version")
|
last_checked DateTime @default(now())
|
||||||
createdAt DateTime @map("created_at") @default(now())
|
hosts hosts @relation(fields: [host_id], references: [id], onDelete: Cascade)
|
||||||
updatedAt DateTime @map("updated_at") @updatedAt
|
repositories repositories @relation(fields: [repository_id], references: [id], onDelete: Cascade)
|
||||||
|
|
||||||
// Relationships
|
@@unique([host_id, repository_id])
|
||||||
hostPackages HostPackage[]
|
|
||||||
|
|
||||||
@@map("packages")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
model HostPackage {
|
model hosts {
|
||||||
id String @id @default(cuid())
|
id String @id
|
||||||
hostId String @map("host_id")
|
machine_id String @unique
|
||||||
packageId String @map("package_id")
|
friendly_name String
|
||||||
currentVersion String @map("current_version")
|
ip String?
|
||||||
availableVersion String? @map("available_version")
|
os_type String
|
||||||
needsUpdate Boolean @map("needs_update") @default(false)
|
os_version String
|
||||||
isSecurityUpdate Boolean @map("is_security_update") @default(false)
|
architecture String?
|
||||||
lastChecked DateTime @map("last_checked") @default(now())
|
last_update DateTime @default(now())
|
||||||
|
status String @default("active")
|
||||||
|
created_at DateTime @default(now())
|
||||||
|
updated_at DateTime
|
||||||
|
api_id String @unique
|
||||||
|
api_key String @unique
|
||||||
|
agent_version String?
|
||||||
|
auto_update Boolean @default(true)
|
||||||
|
cpu_cores Int?
|
||||||
|
cpu_model String?
|
||||||
|
disk_details Json?
|
||||||
|
dns_servers Json?
|
||||||
|
gateway_ip String?
|
||||||
|
hostname String?
|
||||||
|
kernel_version String?
|
||||||
|
load_average Json?
|
||||||
|
network_interfaces Json?
|
||||||
|
ram_installed Int?
|
||||||
|
selinux_status String?
|
||||||
|
swap_size Int?
|
||||||
|
system_uptime String?
|
||||||
|
notes String?
|
||||||
|
host_packages host_packages[]
|
||||||
|
host_repositories host_repositories[]
|
||||||
|
host_group_memberships host_group_memberships[]
|
||||||
|
update_history update_history[]
|
||||||
|
job_history job_history[]
|
||||||
|
docker_volumes docker_volumes[]
|
||||||
|
docker_networks docker_networks[]
|
||||||
|
|
||||||
// Relationships
|
@@index([machine_id])
|
||||||
host Host @relation(fields: [hostId], references: [id], onDelete: Cascade)
|
@@index([friendly_name])
|
||||||
package Package @relation(fields: [packageId], references: [id], onDelete: Cascade)
|
@@index([hostname])
|
||||||
|
|
||||||
@@unique([hostId, packageId])
|
|
||||||
@@map("host_packages")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
model UpdateHistory {
|
model packages {
|
||||||
id String @id @default(cuid())
|
id String @id
|
||||||
hostId String @map("host_id")
|
name String @unique
|
||||||
packagesCount Int @map("packages_count")
|
description String?
|
||||||
securityCount Int @map("security_count")
|
category String?
|
||||||
timestamp DateTime @default(now())
|
latest_version String?
|
||||||
status String @default("success") // success, error
|
created_at DateTime @default(now())
|
||||||
errorMessage String? @map("error_message")
|
updated_at DateTime
|
||||||
|
host_packages host_packages[]
|
||||||
|
|
||||||
// Relationships
|
@@index([name])
|
||||||
host Host @relation(fields: [hostId], references: [id], onDelete: Cascade)
|
@@index([category])
|
||||||
|
|
||||||
@@map("update_history")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
model Repository {
|
model repositories {
|
||||||
id String @id @default(cuid())
|
id String @id
|
||||||
name String // Repository name (e.g., "focal", "focal-updates")
|
name String
|
||||||
url String // Repository URL
|
url String
|
||||||
distribution String // Distribution (e.g., "focal", "jammy")
|
distribution String
|
||||||
components String // Components (e.g., "main restricted universe multiverse")
|
components String
|
||||||
repoType String @map("repo_type") // "deb" or "deb-src"
|
repo_type String
|
||||||
isActive Boolean @map("is_active") @default(true)
|
is_active Boolean @default(true)
|
||||||
isSecure Boolean @map("is_secure") @default(true) // HTTPS vs HTTP
|
is_secure Boolean @default(true)
|
||||||
priority Int? // Repository priority
|
priority Int?
|
||||||
description String? // Optional description
|
description String?
|
||||||
createdAt DateTime @map("created_at") @default(now())
|
created_at DateTime @default(now())
|
||||||
updatedAt DateTime @map("updated_at") @updatedAt
|
updated_at DateTime
|
||||||
|
host_repositories host_repositories[]
|
||||||
// Relationships
|
|
||||||
hostRepositories HostRepository[]
|
|
||||||
|
|
||||||
@@unique([url, distribution, components])
|
@@unique([url, distribution, components])
|
||||||
@@map("repositories")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
model HostRepository {
|
model role_permissions {
|
||||||
id String @id @default(cuid())
|
id String @id
|
||||||
hostId String @map("host_id")
|
role String @unique
|
||||||
repositoryId String @map("repository_id")
|
can_view_dashboard Boolean @default(true)
|
||||||
isEnabled Boolean @map("is_enabled") @default(true)
|
can_view_hosts Boolean @default(true)
|
||||||
lastChecked DateTime @map("last_checked") @default(now())
|
can_manage_hosts Boolean @default(false)
|
||||||
|
can_view_packages Boolean @default(true)
|
||||||
// Relationships
|
can_manage_packages Boolean @default(false)
|
||||||
host Host @relation(fields: [hostId], references: [id], onDelete: Cascade)
|
can_view_users Boolean @default(false)
|
||||||
repository Repository @relation(fields: [repositoryId], references: [id], onDelete: Cascade)
|
can_manage_users Boolean @default(false)
|
||||||
|
can_view_reports Boolean @default(true)
|
||||||
@@unique([hostId, repositoryId])
|
can_export_data Boolean @default(false)
|
||||||
@@map("host_repositories")
|
can_manage_settings Boolean @default(false)
|
||||||
|
created_at DateTime @default(now())
|
||||||
|
updated_at DateTime
|
||||||
}
|
}
|
||||||
|
|
||||||
model Settings {
|
model settings {
|
||||||
id String @id @default(cuid())
|
id String @id
|
||||||
serverUrl String @map("server_url") @default("http://localhost:3001")
|
server_url String @default("http://localhost:3001")
|
||||||
serverProtocol String @map("server_protocol") @default("http") // http, https
|
server_protocol String @default("http")
|
||||||
serverHost String @map("server_host") @default("localhost")
|
server_host String @default("localhost")
|
||||||
serverPort Int @map("server_port") @default(3001)
|
server_port Int @default(3001)
|
||||||
frontendUrl String @map("frontend_url") @default("http://localhost:3000")
|
created_at DateTime @default(now())
|
||||||
updateInterval Int @map("update_interval") @default(60) // Update interval in minutes
|
updated_at DateTime
|
||||||
autoUpdate Boolean @map("auto_update") @default(false) // Enable automatic agent updates
|
update_interval Int @default(60)
|
||||||
githubRepoUrl String @map("github_repo_url") @default("git@github.com:9technologygroup/patchmon.net.git") // GitHub repository URL for version checking
|
auto_update Boolean @default(false)
|
||||||
createdAt DateTime @map("created_at") @default(now())
|
github_repo_url String @default("https://github.com/PatchMon/PatchMon.git")
|
||||||
updatedAt DateTime @map("updated_at") @updatedAt
|
ssh_key_path String?
|
||||||
|
repository_type String @default("public")
|
||||||
@@map("settings")
|
last_update_check DateTime?
|
||||||
|
latest_version String?
|
||||||
|
update_available Boolean @default(false)
|
||||||
|
signup_enabled Boolean @default(false)
|
||||||
|
default_user_role String @default("user")
|
||||||
|
ignore_ssl_self_signed Boolean @default(false)
|
||||||
|
logo_dark String? @default("/assets/logo_dark.png")
|
||||||
|
logo_light String? @default("/assets/logo_light.png")
|
||||||
|
favicon String? @default("/assets/logo_square.svg")
|
||||||
|
metrics_enabled Boolean @default(true)
|
||||||
|
metrics_anonymous_id String?
|
||||||
|
metrics_last_sent DateTime?
|
||||||
}
|
}
|
||||||
|
|
||||||
model DashboardPreferences {
|
model update_history {
|
||||||
id String @id @default(cuid())
|
id String @id
|
||||||
userId String @map("user_id")
|
host_id String
|
||||||
cardId String @map("card_id") // e.g., "totalHosts", "securityUpdates", etc.
|
packages_count Int
|
||||||
enabled Boolean @default(true)
|
security_count Int
|
||||||
order Int @default(0)
|
total_packages Int?
|
||||||
createdAt DateTime @map("created_at") @default(now())
|
payload_size_kb Float?
|
||||||
updatedAt DateTime @map("updated_at") @updatedAt
|
execution_time Float?
|
||||||
|
timestamp DateTime @default(now())
|
||||||
// Relationships
|
status String @default("success")
|
||||||
user User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
error_message String?
|
||||||
|
hosts hosts @relation(fields: [host_id], references: [id], onDelete: Cascade)
|
||||||
@@unique([userId, cardId])
|
|
||||||
@@map("dashboard_preferences")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
model AgentVersion {
|
model system_statistics {
|
||||||
id String @id @default(cuid())
|
id String @id
|
||||||
version String @unique // e.g., "1.0.0", "1.1.0"
|
unique_packages_count Int
|
||||||
isCurrent Boolean @default(false) @map("is_current") // Only one version can be current
|
unique_security_count Int
|
||||||
releaseNotes String? @map("release_notes")
|
total_packages Int
|
||||||
downloadUrl String? @map("download_url") // URL to download the agent script
|
total_hosts Int
|
||||||
minServerVersion String? @map("min_server_version") // Minimum server version required
|
hosts_needing_updates Int
|
||||||
scriptContent String? @map("script_content") // The actual agent script content
|
timestamp DateTime @default(now())
|
||||||
isDefault Boolean @default(false) @map("is_default") // Default version for new installations
|
|
||||||
createdAt DateTime @map("created_at") @default(now())
|
|
||||||
updatedAt DateTime @map("updated_at") @updatedAt
|
|
||||||
|
|
||||||
@@map("agent_versions")
|
@@index([timestamp])
|
||||||
|
}
|
||||||
|
|
||||||
|
model users {
|
||||||
|
id String @id
|
||||||
|
username String @unique
|
||||||
|
email String @unique
|
||||||
|
password_hash String
|
||||||
|
role String @default("admin")
|
||||||
|
is_active Boolean @default(true)
|
||||||
|
last_login DateTime?
|
||||||
|
created_at DateTime @default(now())
|
||||||
|
updated_at DateTime
|
||||||
|
tfa_backup_codes String?
|
||||||
|
tfa_enabled Boolean @default(false)
|
||||||
|
tfa_secret String?
|
||||||
|
first_name String?
|
||||||
|
last_name String?
|
||||||
|
theme_preference String? @default("dark")
|
||||||
|
color_theme String? @default("cyber_blue")
|
||||||
|
dashboard_preferences dashboard_preferences[]
|
||||||
|
user_sessions user_sessions[]
|
||||||
|
auto_enrollment_tokens auto_enrollment_tokens[]
|
||||||
|
}
|
||||||
|
|
||||||
|
model user_sessions {
|
||||||
|
id String @id
|
||||||
|
user_id String
|
||||||
|
refresh_token String @unique
|
||||||
|
access_token_hash String?
|
||||||
|
ip_address String?
|
||||||
|
user_agent String?
|
||||||
|
device_fingerprint String?
|
||||||
|
last_activity DateTime @default(now())
|
||||||
|
expires_at DateTime
|
||||||
|
created_at DateTime @default(now())
|
||||||
|
is_revoked Boolean @default(false)
|
||||||
|
tfa_remember_me Boolean @default(false)
|
||||||
|
tfa_bypass_until DateTime?
|
||||||
|
login_count Int @default(1)
|
||||||
|
last_login_ip String?
|
||||||
|
users users @relation(fields: [user_id], references: [id], onDelete: Cascade)
|
||||||
|
|
||||||
|
@@index([user_id])
|
||||||
|
@@index([refresh_token])
|
||||||
|
@@index([expires_at])
|
||||||
|
@@index([tfa_bypass_until])
|
||||||
|
@@index([device_fingerprint])
|
||||||
|
}
|
||||||
|
|
||||||
|
model auto_enrollment_tokens {
|
||||||
|
id String @id
|
||||||
|
token_name String
|
||||||
|
token_key String @unique
|
||||||
|
token_secret String
|
||||||
|
created_by_user_id String?
|
||||||
|
is_active Boolean @default(true)
|
||||||
|
allowed_ip_ranges String[]
|
||||||
|
max_hosts_per_day Int @default(100)
|
||||||
|
hosts_created_today Int @default(0)
|
||||||
|
last_reset_date DateTime @default(now()) @db.Date
|
||||||
|
default_host_group_id String?
|
||||||
|
created_at DateTime @default(now())
|
||||||
|
updated_at DateTime
|
||||||
|
last_used_at DateTime?
|
||||||
|
expires_at DateTime?
|
||||||
|
metadata Json?
|
||||||
|
users users? @relation(fields: [created_by_user_id], references: [id], onDelete: SetNull)
|
||||||
|
host_groups host_groups? @relation(fields: [default_host_group_id], references: [id], onDelete: SetNull)
|
||||||
|
|
||||||
|
@@index([token_key])
|
||||||
|
@@index([is_active])
|
||||||
|
}
|
||||||
|
|
||||||
|
model docker_containers {
|
||||||
|
id String @id
|
||||||
|
host_id String
|
||||||
|
container_id String
|
||||||
|
name String
|
||||||
|
image_id String?
|
||||||
|
image_name String
|
||||||
|
image_tag String @default("latest")
|
||||||
|
status String
|
||||||
|
state String?
|
||||||
|
ports Json?
|
||||||
|
created_at DateTime
|
||||||
|
started_at DateTime?
|
||||||
|
updated_at DateTime
|
||||||
|
last_checked DateTime @default(now())
|
||||||
|
docker_images docker_images? @relation(fields: [image_id], references: [id], onDelete: SetNull)
|
||||||
|
|
||||||
|
@@unique([host_id, container_id])
|
||||||
|
@@index([host_id])
|
||||||
|
@@index([image_id])
|
||||||
|
@@index([status])
|
||||||
|
@@index([name])
|
||||||
|
}
|
||||||
|
|
||||||
|
model docker_images {
|
||||||
|
id String @id
|
||||||
|
repository String
|
||||||
|
tag String @default("latest")
|
||||||
|
image_id String
|
||||||
|
digest String?
|
||||||
|
size_bytes BigInt?
|
||||||
|
source String @default("docker-hub")
|
||||||
|
created_at DateTime
|
||||||
|
last_pulled DateTime?
|
||||||
|
last_checked DateTime @default(now())
|
||||||
|
updated_at DateTime
|
||||||
|
docker_containers docker_containers[]
|
||||||
|
docker_image_updates docker_image_updates[]
|
||||||
|
|
||||||
|
@@unique([repository, tag, image_id])
|
||||||
|
@@index([repository])
|
||||||
|
@@index([source])
|
||||||
|
@@index([repository, tag])
|
||||||
|
}
|
||||||
|
|
||||||
|
model docker_image_updates {
|
||||||
|
id String @id
|
||||||
|
image_id String
|
||||||
|
current_tag String
|
||||||
|
available_tag String
|
||||||
|
is_security_update Boolean @default(false)
|
||||||
|
severity String?
|
||||||
|
changelog_url String?
|
||||||
|
created_at DateTime @default(now())
|
||||||
|
updated_at DateTime
|
||||||
|
docker_images docker_images @relation(fields: [image_id], references: [id], onDelete: Cascade)
|
||||||
|
|
||||||
|
@@unique([image_id, available_tag])
|
||||||
|
@@index([image_id])
|
||||||
|
@@index([is_security_update])
|
||||||
|
}
|
||||||
|
|
||||||
|
model docker_volumes {
|
||||||
|
id String @id
|
||||||
|
host_id String
|
||||||
|
volume_id String
|
||||||
|
name String
|
||||||
|
driver String
|
||||||
|
mountpoint String?
|
||||||
|
renderer String?
|
||||||
|
scope String @default("local")
|
||||||
|
labels Json?
|
||||||
|
options Json?
|
||||||
|
size_bytes BigInt?
|
||||||
|
ref_count Int @default(0)
|
||||||
|
created_at DateTime
|
||||||
|
updated_at DateTime
|
||||||
|
last_checked DateTime @default(now())
|
||||||
|
hosts hosts @relation(fields: [host_id], references: [id], onDelete: Cascade)
|
||||||
|
|
||||||
|
@@unique([host_id, volume_id])
|
||||||
|
@@index([host_id])
|
||||||
|
@@index([name])
|
||||||
|
@@index([driver])
|
||||||
|
}
|
||||||
|
|
||||||
|
model docker_networks {
|
||||||
|
id String @id
|
||||||
|
host_id String
|
||||||
|
network_id String
|
||||||
|
name String
|
||||||
|
driver String
|
||||||
|
scope String @default("local")
|
||||||
|
ipv6_enabled Boolean @default(false)
|
||||||
|
internal Boolean @default(false)
|
||||||
|
attachable Boolean @default(true)
|
||||||
|
ingress Boolean @default(false)
|
||||||
|
config_only Boolean @default(false)
|
||||||
|
labels Json?
|
||||||
|
ipam Json? // IPAM configuration (driver, config, options)
|
||||||
|
container_count Int @default(0)
|
||||||
|
created_at DateTime?
|
||||||
|
updated_at DateTime
|
||||||
|
last_checked DateTime @default(now())
|
||||||
|
hosts hosts @relation(fields: [host_id], references: [id], onDelete: Cascade)
|
||||||
|
|
||||||
|
@@unique([host_id, network_id])
|
||||||
|
@@index([host_id])
|
||||||
|
@@index([name])
|
||||||
|
@@index([driver])
|
||||||
|
}
|
||||||
|
|
||||||
|
model job_history {
|
||||||
|
id String @id
|
||||||
|
job_id String
|
||||||
|
queue_name String
|
||||||
|
job_name String
|
||||||
|
host_id String?
|
||||||
|
api_id String?
|
||||||
|
status String
|
||||||
|
attempt_number Int @default(1)
|
||||||
|
error_message String?
|
||||||
|
output Json?
|
||||||
|
created_at DateTime @default(now())
|
||||||
|
updated_at DateTime
|
||||||
|
completed_at DateTime?
|
||||||
|
hosts hosts? @relation(fields: [host_id], references: [id], onDelete: SetNull)
|
||||||
|
|
||||||
|
@@index([job_id])
|
||||||
|
@@index([queue_name])
|
||||||
|
@@index([host_id])
|
||||||
|
@@index([api_id])
|
||||||
|
@@index([status])
|
||||||
|
@@index([created_at])
|
||||||
}
|
}
|
||||||
165
backend/src/config/prisma.js
Normal file
165
backend/src/config/prisma.js
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
/**
|
||||||
|
* Centralized Prisma Client Singleton
|
||||||
|
* Prevents multiple Prisma clients from creating connection leaks
|
||||||
|
*/
|
||||||
|
|
||||||
|
const { PrismaClient } = require("@prisma/client");
|
||||||
|
|
||||||
|
// Parse DATABASE_URL and add connection pooling parameters
|
||||||
|
function getOptimizedDatabaseUrl() {
|
||||||
|
const originalUrl = process.env.DATABASE_URL;
|
||||||
|
|
||||||
|
if (!originalUrl) {
|
||||||
|
throw new Error("DATABASE_URL environment variable is required");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the URL
|
||||||
|
const url = new URL(originalUrl);
|
||||||
|
|
||||||
|
// Add connection pooling parameters - configurable via environment variables
|
||||||
|
const connectionLimit = process.env.DB_CONNECTION_LIMIT || "30";
|
||||||
|
const poolTimeout = process.env.DB_POOL_TIMEOUT || "20";
|
||||||
|
const connectTimeout = process.env.DB_CONNECT_TIMEOUT || "10";
|
||||||
|
const idleTimeout = process.env.DB_IDLE_TIMEOUT || "300";
|
||||||
|
const maxLifetime = process.env.DB_MAX_LIFETIME || "1800";
|
||||||
|
|
||||||
|
url.searchParams.set("connection_limit", connectionLimit);
|
||||||
|
url.searchParams.set("pool_timeout", poolTimeout);
|
||||||
|
url.searchParams.set("connect_timeout", connectTimeout);
|
||||||
|
url.searchParams.set("idle_timeout", idleTimeout);
|
||||||
|
url.searchParams.set("max_lifetime", maxLifetime);
|
||||||
|
|
||||||
|
// Log connection pool settings in development/debug mode
|
||||||
|
if (
|
||||||
|
process.env.ENABLE_LOGGING === "true" ||
|
||||||
|
process.env.LOG_LEVEL === "debug"
|
||||||
|
) {
|
||||||
|
console.log(
|
||||||
|
`[Database Pool] connection_limit=${connectionLimit}, pool_timeout=${poolTimeout}s, connect_timeout=${connectTimeout}s`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return url.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Singleton Prisma client instance
|
||||||
|
let prismaInstance = null;
|
||||||
|
|
||||||
|
function getPrismaClient() {
|
||||||
|
if (!prismaInstance) {
|
||||||
|
const optimizedUrl = getOptimizedDatabaseUrl();
|
||||||
|
|
||||||
|
prismaInstance = new PrismaClient({
|
||||||
|
datasources: {
|
||||||
|
db: {
|
||||||
|
url: optimizedUrl,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
log:
|
||||||
|
process.env.PRISMA_LOG_QUERIES === "true"
|
||||||
|
? ["query", "info", "warn", "error"]
|
||||||
|
: ["warn", "error"],
|
||||||
|
errorFormat: "pretty",
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle graceful shutdown
|
||||||
|
process.on("beforeExit", async () => {
|
||||||
|
await prismaInstance.$disconnect();
|
||||||
|
});
|
||||||
|
|
||||||
|
process.on("SIGINT", async () => {
|
||||||
|
await prismaInstance.$disconnect();
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
process.on("SIGTERM", async () => {
|
||||||
|
await prismaInstance.$disconnect();
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return prismaInstance;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connection health check
|
||||||
|
async function checkDatabaseConnection(prisma) {
|
||||||
|
try {
|
||||||
|
await prisma.$queryRaw`SELECT 1`;
|
||||||
|
return true;
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Database connection check failed:", error.message);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for database to be available with retry logic
|
||||||
|
async function waitForDatabase(prisma, options = {}) {
|
||||||
|
const maxAttempts =
|
||||||
|
options.maxAttempts ||
|
||||||
|
parseInt(process.env.PM_DB_CONN_MAX_ATTEMPTS, 10) ||
|
||||||
|
30;
|
||||||
|
const waitInterval =
|
||||||
|
options.waitInterval ||
|
||||||
|
parseInt(process.env.PM_DB_CONN_WAIT_INTERVAL, 10) ||
|
||||||
|
2;
|
||||||
|
|
||||||
|
if (process.env.ENABLE_LOGGING === "true") {
|
||||||
|
console.log(
|
||||||
|
`Waiting for database connection (max ${maxAttempts} attempts, ${waitInterval}s interval)...`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
|
||||||
|
try {
|
||||||
|
const isConnected = await checkDatabaseConnection(prisma);
|
||||||
|
if (isConnected) {
|
||||||
|
if (process.env.ENABLE_LOGGING === "true") {
|
||||||
|
console.log(
|
||||||
|
`Database connected successfully after ${attempt} attempt(s)`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// checkDatabaseConnection already logs the error
|
||||||
|
}
|
||||||
|
|
||||||
|
if (attempt < maxAttempts) {
|
||||||
|
if (process.env.ENABLE_LOGGING === "true") {
|
||||||
|
console.log(
|
||||||
|
`⏳ Database not ready (attempt ${attempt}/${maxAttempts}), retrying in ${waitInterval}s...`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, waitInterval * 1000));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new Error(
|
||||||
|
`❌ Database failed to become available after ${maxAttempts} attempts`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Graceful disconnect with retry
|
||||||
|
async function disconnectPrisma(prisma, maxRetries = 3) {
|
||||||
|
for (let i = 0; i < maxRetries; i++) {
|
||||||
|
try {
|
||||||
|
await prisma.$disconnect();
|
||||||
|
console.log("Database disconnected successfully");
|
||||||
|
return;
|
||||||
|
} catch (error) {
|
||||||
|
console.error(`Disconnect attempt ${i + 1} failed:`, error.message);
|
||||||
|
if (i === maxRetries - 1) {
|
||||||
|
console.error("Failed to disconnect from database after all retries");
|
||||||
|
} else {
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 1000)); // Wait 1 second
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
getPrismaClient,
|
||||||
|
checkDatabaseConnection,
|
||||||
|
waitForDatabase,
|
||||||
|
disconnectPrisma,
|
||||||
|
};
|
||||||
@@ -1,98 +1,151 @@
|
|||||||
const jwt = require('jsonwebtoken');
|
const jwt = require("jsonwebtoken");
|
||||||
const { PrismaClient } = require('@prisma/client');
|
const { getPrismaClient } = require("../config/prisma");
|
||||||
|
const {
|
||||||
|
validate_session,
|
||||||
|
update_session_activity,
|
||||||
|
is_tfa_bypassed,
|
||||||
|
} = require("../utils/session_manager");
|
||||||
|
|
||||||
const prisma = new PrismaClient();
|
const prisma = getPrismaClient();
|
||||||
|
|
||||||
// Middleware to verify JWT token
|
// Middleware to verify JWT token with session validation
|
||||||
const authenticateToken = async (req, res, next) => {
|
const authenticateToken = async (req, res, next) => {
|
||||||
try {
|
try {
|
||||||
const authHeader = req.headers['authorization'];
|
const authHeader = req.headers.authorization;
|
||||||
const token = authHeader && authHeader.split(' ')[1]; // Bearer TOKEN
|
const token = authHeader?.split(" ")[1]; // Bearer TOKEN
|
||||||
|
|
||||||
if (!token) {
|
if (!token) {
|
||||||
return res.status(401).json({ error: 'Access token required' });
|
return res.status(401).json({ error: "Access token required" });
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify token
|
// Verify token
|
||||||
const decoded = jwt.verify(token, process.env.JWT_SECRET || 'your-secret-key');
|
if (!process.env.JWT_SECRET) {
|
||||||
|
throw new Error("JWT_SECRET environment variable is required");
|
||||||
|
}
|
||||||
|
const decoded = jwt.verify(token, process.env.JWT_SECRET);
|
||||||
|
|
||||||
// Get user from database
|
// Validate session and check inactivity timeout
|
||||||
const user = await prisma.user.findUnique({
|
const validation = await validate_session(decoded.sessionId, token);
|
||||||
where: { id: decoded.userId },
|
|
||||||
select: {
|
|
||||||
id: true,
|
|
||||||
username: true,
|
|
||||||
email: true,
|
|
||||||
role: true,
|
|
||||||
isActive: true,
|
|
||||||
lastLogin: true
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!user || !user.isActive) {
|
if (!validation.valid) {
|
||||||
return res.status(401).json({ error: 'Invalid or inactive user' });
|
const error_messages = {
|
||||||
}
|
"Session not found": "Session not found",
|
||||||
|
"Session revoked": "Session has been revoked",
|
||||||
|
"Session expired": "Session has expired",
|
||||||
|
"Session inactive":
|
||||||
|
validation.message || "Session timed out due to inactivity",
|
||||||
|
"Token mismatch": "Invalid token",
|
||||||
|
"User inactive": "User account is inactive",
|
||||||
|
};
|
||||||
|
|
||||||
// Update last login
|
return res.status(401).json({
|
||||||
await prisma.user.update({
|
error: error_messages[validation.reason] || "Authentication failed",
|
||||||
where: { id: user.id },
|
reason: validation.reason,
|
||||||
data: { lastLogin: new Date() }
|
});
|
||||||
});
|
}
|
||||||
|
|
||||||
req.user = user;
|
// Update session activity timestamp
|
||||||
next();
|
await update_session_activity(decoded.sessionId);
|
||||||
} catch (error) {
|
|
||||||
if (error.name === 'JsonWebTokenError') {
|
// Check if TFA is bypassed for this session
|
||||||
return res.status(401).json({ error: 'Invalid token' });
|
const tfa_bypassed = await is_tfa_bypassed(decoded.sessionId);
|
||||||
}
|
|
||||||
if (error.name === 'TokenExpiredError') {
|
// Update last login (only on successful authentication)
|
||||||
return res.status(401).json({ error: 'Token expired' });
|
await prisma.users.update({
|
||||||
}
|
where: { id: validation.user.id },
|
||||||
console.error('Auth middleware error:', error);
|
data: {
|
||||||
return res.status(500).json({ error: 'Authentication failed' });
|
last_login: new Date(),
|
||||||
}
|
updated_at: new Date(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
req.user = validation.user;
|
||||||
|
req.session_id = decoded.sessionId;
|
||||||
|
req.tfa_bypassed = tfa_bypassed;
|
||||||
|
next();
|
||||||
|
} catch (error) {
|
||||||
|
if (error.name === "JsonWebTokenError") {
|
||||||
|
return res.status(401).json({ error: "Invalid token" });
|
||||||
|
}
|
||||||
|
if (error.name === "TokenExpiredError") {
|
||||||
|
return res.status(401).json({ error: "Token expired" });
|
||||||
|
}
|
||||||
|
console.error("Auth middleware error:", error);
|
||||||
|
return res.status(500).json({ error: "Authentication failed" });
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Middleware to check admin role
|
// Middleware to check admin role
|
||||||
const requireAdmin = (req, res, next) => {
|
const requireAdmin = (req, res, next) => {
|
||||||
if (req.user.role !== 'admin') {
|
if (req.user.role !== "admin") {
|
||||||
return res.status(403).json({ error: 'Admin access required' });
|
return res.status(403).json({ error: "Admin access required" });
|
||||||
}
|
}
|
||||||
next();
|
next();
|
||||||
};
|
};
|
||||||
|
|
||||||
// Middleware to check if user is authenticated (optional)
|
// Middleware to check if user is authenticated (optional)
|
||||||
const optionalAuth = async (req, res, next) => {
|
const optionalAuth = async (req, _res, next) => {
|
||||||
try {
|
try {
|
||||||
const authHeader = req.headers['authorization'];
|
const authHeader = req.headers.authorization;
|
||||||
const token = authHeader && authHeader.split(' ')[1];
|
const token = authHeader?.split(" ")[1];
|
||||||
|
|
||||||
if (token) {
|
if (token) {
|
||||||
const decoded = jwt.verify(token, process.env.JWT_SECRET || 'your-secret-key');
|
if (!process.env.JWT_SECRET) {
|
||||||
const user = await prisma.user.findUnique({
|
throw new Error("JWT_SECRET environment variable is required");
|
||||||
where: { id: decoded.userId },
|
}
|
||||||
select: {
|
const decoded = jwt.verify(token, process.env.JWT_SECRET);
|
||||||
id: true,
|
const user = await prisma.users.findUnique({
|
||||||
username: true,
|
where: { id: decoded.userId },
|
||||||
email: true,
|
select: {
|
||||||
role: true,
|
id: true,
|
||||||
isActive: true
|
username: true,
|
||||||
}
|
email: true,
|
||||||
});
|
role: true,
|
||||||
|
is_active: true,
|
||||||
|
last_login: true,
|
||||||
|
created_at: true,
|
||||||
|
updated_at: true,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
if (user && user.isActive) {
|
if (user?.is_active) {
|
||||||
req.user = user;
|
req.user = user;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
next();
|
next();
|
||||||
} catch (error) {
|
} catch {
|
||||||
// Continue without authentication for optional auth
|
// Continue without authentication for optional auth
|
||||||
next();
|
next();
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Middleware to check if TFA is required for sensitive operations
|
||||||
|
const requireTfaIfEnabled = async (req, res, next) => {
|
||||||
|
try {
|
||||||
|
// Check if user has TFA enabled
|
||||||
|
const user = await prisma.users.findUnique({
|
||||||
|
where: { id: req.user.id },
|
||||||
|
select: { tfa_enabled: true },
|
||||||
|
});
|
||||||
|
|
||||||
|
// If TFA is enabled and not bypassed, require TFA verification
|
||||||
|
if (user?.tfa_enabled && !req.tfa_bypassed) {
|
||||||
|
return res.status(403).json({
|
||||||
|
error: "Two-factor authentication required for this operation",
|
||||||
|
requires_tfa: true,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
next();
|
||||||
|
} catch (error) {
|
||||||
|
console.error("TFA requirement check error:", error);
|
||||||
|
return res.status(500).json({ error: "Authentication check failed" });
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
authenticateToken,
|
authenticateToken,
|
||||||
requireAdmin,
|
requireAdmin,
|
||||||
optionalAuth
|
optionalAuth,
|
||||||
|
requireTfaIfEnabled,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,59 +1,61 @@
|
|||||||
const { PrismaClient } = require('@prisma/client');
|
const { getPrismaClient } = require("../config/prisma");
|
||||||
const prisma = new PrismaClient();
|
const prisma = getPrismaClient();
|
||||||
|
|
||||||
// Permission middleware factory
|
// Permission middleware factory
|
||||||
const requirePermission = (permission) => {
|
const requirePermission = (permission) => {
|
||||||
return async (req, res, next) => {
|
return async (req, res, next) => {
|
||||||
try {
|
try {
|
||||||
// Get user's role permissions
|
// Get user's role permissions
|
||||||
const rolePermissions = await prisma.rolePermissions.findUnique({
|
const rolePermissions = await prisma.role_permissions.findUnique({
|
||||||
where: { role: req.user.role }
|
where: { role: req.user.role },
|
||||||
});
|
});
|
||||||
|
|
||||||
// If no specific permissions found, default to admin permissions (for backward compatibility)
|
// If no specific permissions found, default to admin permissions (for backward compatibility)
|
||||||
if (!rolePermissions) {
|
if (!rolePermissions) {
|
||||||
console.warn(`No permissions found for role: ${req.user.role}, defaulting to admin access`);
|
console.warn(
|
||||||
return next();
|
`No permissions found for role: ${req.user.role}, defaulting to admin access`,
|
||||||
}
|
);
|
||||||
|
return next();
|
||||||
|
}
|
||||||
|
|
||||||
// Check if user has the required permission
|
// Check if user has the required permission
|
||||||
if (!rolePermissions[permission]) {
|
if (!rolePermissions[permission]) {
|
||||||
return res.status(403).json({
|
return res.status(403).json({
|
||||||
error: 'Insufficient permissions',
|
error: "Insufficient permissions",
|
||||||
message: `You don't have permission to ${permission.replace('can', '').toLowerCase()}`
|
message: `You don't have permission to ${permission.replace("can_", "").replace("_", " ")}`,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
next();
|
next();
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Permission check error:', error);
|
console.error("Permission check error:", error);
|
||||||
res.status(500).json({ error: 'Permission check failed' });
|
res.status(500).json({ error: "Permission check failed" });
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
// Specific permission middlewares
|
// Specific permission middlewares - using snake_case field names
|
||||||
const requireViewDashboard = requirePermission('canViewDashboard');
|
const requireViewDashboard = requirePermission("can_view_dashboard");
|
||||||
const requireViewHosts = requirePermission('canViewHosts');
|
const requireViewHosts = requirePermission("can_view_hosts");
|
||||||
const requireManageHosts = requirePermission('canManageHosts');
|
const requireManageHosts = requirePermission("can_manage_hosts");
|
||||||
const requireViewPackages = requirePermission('canViewPackages');
|
const requireViewPackages = requirePermission("can_view_packages");
|
||||||
const requireManagePackages = requirePermission('canManagePackages');
|
const requireManagePackages = requirePermission("can_manage_packages");
|
||||||
const requireViewUsers = requirePermission('canViewUsers');
|
const requireViewUsers = requirePermission("can_view_users");
|
||||||
const requireManageUsers = requirePermission('canManageUsers');
|
const requireManageUsers = requirePermission("can_manage_users");
|
||||||
const requireViewReports = requirePermission('canViewReports');
|
const requireViewReports = requirePermission("can_view_reports");
|
||||||
const requireExportData = requirePermission('canExportData');
|
const requireExportData = requirePermission("can_export_data");
|
||||||
const requireManageSettings = requirePermission('canManageSettings');
|
const requireManageSettings = requirePermission("can_manage_settings");
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
requirePermission,
|
requirePermission,
|
||||||
requireViewDashboard,
|
requireViewDashboard,
|
||||||
requireViewHosts,
|
requireViewHosts,
|
||||||
requireManageHosts,
|
requireManageHosts,
|
||||||
requireViewPackages,
|
requireViewPackages,
|
||||||
requireManagePackages,
|
requireManagePackages,
|
||||||
requireViewUsers,
|
requireViewUsers,
|
||||||
requireManageUsers,
|
requireManageUsers,
|
||||||
requireViewReports,
|
requireViewReports,
|
||||||
requireExportData,
|
requireExportData,
|
||||||
requireManageSettings
|
requireManageSettings,
|
||||||
};
|
};
|
||||||
|
|||||||
419
backend/src/routes/agentVersionRoutes.js
Normal file
419
backend/src/routes/agentVersionRoutes.js
Normal file
@@ -0,0 +1,419 @@
|
|||||||
|
const express = require("express");
|
||||||
|
const router = express.Router();
|
||||||
|
const agentVersionService = require("../services/agentVersionService");
|
||||||
|
const { authenticateToken } = require("../middleware/auth");
|
||||||
|
const { requirePermission } = require("../middleware/permissions");
|
||||||
|
|
||||||
|
// Test GitHub API connectivity
|
||||||
|
router.get(
|
||||||
|
"/test-github",
|
||||||
|
authenticateToken,
|
||||||
|
requirePermission("can_manage_settings"),
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const axios = require("axios");
|
||||||
|
const response = await axios.get(
|
||||||
|
"https://api.github.com/repos/PatchMon/PatchMon-agent/releases",
|
||||||
|
{
|
||||||
|
timeout: 10000,
|
||||||
|
headers: {
|
||||||
|
"User-Agent": "PatchMon-Server/1.0",
|
||||||
|
Accept: "application/vnd.github.v3+json",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
status: response.status,
|
||||||
|
releasesFound: response.data.length,
|
||||||
|
latestRelease: response.data[0]?.tag_name || "No releases",
|
||||||
|
rateLimitRemaining: response.headers["x-ratelimit-remaining"],
|
||||||
|
rateLimitLimit: response.headers["x-ratelimit-limit"],
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ GitHub API test failed:", error.message);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: error.message,
|
||||||
|
status: error.response?.status,
|
||||||
|
statusText: error.response?.statusText,
|
||||||
|
rateLimitRemaining: error.response?.headers["x-ratelimit-remaining"],
|
||||||
|
rateLimitLimit: error.response?.headers["x-ratelimit-limit"],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get current version information
|
||||||
|
router.get("/version", authenticateToken, async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const versionInfo = await agentVersionService.getVersionInfo();
|
||||||
|
console.log(
|
||||||
|
"📊 Version info response:",
|
||||||
|
JSON.stringify(versionInfo, null, 2),
|
||||||
|
);
|
||||||
|
res.json(versionInfo);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to get version info:", error.message);
|
||||||
|
res.status(500).json({
|
||||||
|
error: "Failed to get version information",
|
||||||
|
details: error.message,
|
||||||
|
status: "error",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Refresh current version by executing agent binary
|
||||||
|
router.post(
|
||||||
|
"/version/refresh",
|
||||||
|
authenticateToken,
|
||||||
|
requirePermission("can_manage_settings"),
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
console.log("🔄 Refreshing current agent version...");
|
||||||
|
const currentVersion = await agentVersionService.refreshCurrentVersion();
|
||||||
|
console.log("📊 Refreshed current version:", currentVersion);
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
currentVersion: currentVersion,
|
||||||
|
message: currentVersion
|
||||||
|
? `Current version refreshed: ${currentVersion}`
|
||||||
|
: "No agent binary found",
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to refresh current version:", error.message);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to refresh current version",
|
||||||
|
details: error.message,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Download latest update
|
||||||
|
router.post(
|
||||||
|
"/version/download",
|
||||||
|
authenticateToken,
|
||||||
|
requirePermission("can_manage_settings"),
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
console.log("🔄 Downloading latest agent update...");
|
||||||
|
const downloadResult = await agentVersionService.downloadLatestUpdate();
|
||||||
|
console.log(
|
||||||
|
"📊 Download result:",
|
||||||
|
JSON.stringify(downloadResult, null, 2),
|
||||||
|
);
|
||||||
|
res.json(downloadResult);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to download latest update:", error.message);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to download latest update",
|
||||||
|
details: error.message,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Check for updates
|
||||||
|
router.post(
|
||||||
|
"/version/check",
|
||||||
|
authenticateToken,
|
||||||
|
requirePermission("can_manage_settings"),
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
console.log("🔄 Manual update check triggered");
|
||||||
|
const updateInfo = await agentVersionService.checkForUpdates();
|
||||||
|
console.log(
|
||||||
|
"📊 Update check result:",
|
||||||
|
JSON.stringify(updateInfo, null, 2),
|
||||||
|
);
|
||||||
|
res.json(updateInfo);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to check for updates:", error.message);
|
||||||
|
res.status(500).json({ error: "Failed to check for updates" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get available versions
|
||||||
|
router.get("/versions", authenticateToken, async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const versions = await agentVersionService.getAvailableVersions();
|
||||||
|
console.log(
|
||||||
|
"📦 Available versions response:",
|
||||||
|
JSON.stringify(versions, null, 2),
|
||||||
|
);
|
||||||
|
res.json({ versions });
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to get available versions:", error.message);
|
||||||
|
res.status(500).json({ error: "Failed to get available versions" });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get binary information
|
||||||
|
router.get(
|
||||||
|
"/binary/:version/:architecture",
|
||||||
|
authenticateToken,
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const { version, architecture } = req.params;
|
||||||
|
const binaryInfo = await agentVersionService.getBinaryInfo(
|
||||||
|
version,
|
||||||
|
architecture,
|
||||||
|
);
|
||||||
|
res.json(binaryInfo);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to get binary info:", error.message);
|
||||||
|
res.status(404).json({ error: error.message });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Download agent binary
|
||||||
|
router.get(
|
||||||
|
"/download/:version/:architecture",
|
||||||
|
authenticateToken,
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const { version, architecture } = req.params;
|
||||||
|
|
||||||
|
// Validate architecture
|
||||||
|
if (!agentVersionService.supportedArchitectures.includes(architecture)) {
|
||||||
|
return res.status(400).json({ error: "Unsupported architecture" });
|
||||||
|
}
|
||||||
|
|
||||||
|
await agentVersionService.serveBinary(version, architecture, res);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to serve binary:", error.message);
|
||||||
|
res.status(500).json({ error: "Failed to serve binary" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get latest binary for architecture (for agents to query)
|
||||||
|
router.get("/latest/:architecture", async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { architecture } = req.params;
|
||||||
|
|
||||||
|
// Validate architecture
|
||||||
|
if (!agentVersionService.supportedArchitectures.includes(architecture)) {
|
||||||
|
return res.status(400).json({ error: "Unsupported architecture" });
|
||||||
|
}
|
||||||
|
|
||||||
|
const versionInfo = await agentVersionService.getVersionInfo();
|
||||||
|
|
||||||
|
if (!versionInfo.latestVersion) {
|
||||||
|
return res.status(404).json({ error: "No latest version available" });
|
||||||
|
}
|
||||||
|
|
||||||
|
const binaryInfo = await agentVersionService.getBinaryInfo(
|
||||||
|
versionInfo.latestVersion,
|
||||||
|
architecture,
|
||||||
|
);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
version: binaryInfo.version,
|
||||||
|
architecture: binaryInfo.architecture,
|
||||||
|
size: binaryInfo.size,
|
||||||
|
hash: binaryInfo.hash,
|
||||||
|
downloadUrl: `/api/v1/agent/download/${binaryInfo.version}/${binaryInfo.architecture}`,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to get latest binary info:", error.message);
|
||||||
|
res.status(500).json({ error: "Failed to get latest binary information" });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Push update notification to specific agent
|
||||||
|
router.post(
|
||||||
|
"/notify-update/:apiId",
|
||||||
|
authenticateToken,
|
||||||
|
requirePermission("admin"),
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const { apiId } = req.params;
|
||||||
|
const { version, force = false } = req.body;
|
||||||
|
|
||||||
|
const versionInfo = await agentVersionService.getVersionInfo();
|
||||||
|
const targetVersion = version || versionInfo.latestVersion;
|
||||||
|
|
||||||
|
if (!targetVersion) {
|
||||||
|
return res
|
||||||
|
.status(400)
|
||||||
|
.json({ error: "No version specified or available" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Import WebSocket service
|
||||||
|
const { pushUpdateNotification } = require("../services/agentWs");
|
||||||
|
|
||||||
|
// Push update notification via WebSocket
|
||||||
|
pushUpdateNotification(apiId, {
|
||||||
|
version: targetVersion,
|
||||||
|
force,
|
||||||
|
downloadUrl: `/api/v1/agent/latest/${req.body.architecture || "linux-amd64"}`,
|
||||||
|
message: `Update available: ${targetVersion}`,
|
||||||
|
});
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
message: `Update notification sent to agent ${apiId}`,
|
||||||
|
version: targetVersion,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to notify agent update:", error.message);
|
||||||
|
res.status(500).json({ error: "Failed to notify agent update" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Push update notification to all agents
|
||||||
|
router.post(
|
||||||
|
"/notify-update-all",
|
||||||
|
authenticateToken,
|
||||||
|
requirePermission("admin"),
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const { version, force = false } = req.body;
|
||||||
|
|
||||||
|
const versionInfo = await agentVersionService.getVersionInfo();
|
||||||
|
const targetVersion = version || versionInfo.latestVersion;
|
||||||
|
|
||||||
|
if (!targetVersion) {
|
||||||
|
return res
|
||||||
|
.status(400)
|
||||||
|
.json({ error: "No version specified or available" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Import WebSocket service
|
||||||
|
const { pushUpdateNotificationToAll } = require("../services/agentWs");
|
||||||
|
|
||||||
|
// Push update notification to all connected agents
|
||||||
|
const result = await pushUpdateNotificationToAll({
|
||||||
|
version: targetVersion,
|
||||||
|
force,
|
||||||
|
message: `Update available: ${targetVersion}`,
|
||||||
|
});
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
message: `Update notification sent to ${result.notifiedCount} agents`,
|
||||||
|
version: targetVersion,
|
||||||
|
notifiedCount: result.notifiedCount,
|
||||||
|
failedCount: result.failedCount,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to notify all agents update:", error.message);
|
||||||
|
res.status(500).json({ error: "Failed to notify all agents update" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Check if specific agent needs update and push notification
|
||||||
|
router.post(
|
||||||
|
"/check-update/:apiId",
|
||||||
|
authenticateToken,
|
||||||
|
requirePermission("can_manage_settings"),
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const { apiId } = req.params;
|
||||||
|
const { version, force = false } = req.body;
|
||||||
|
|
||||||
|
if (!version) {
|
||||||
|
return res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: "Agent version is required",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`🔍 Checking update for agent ${apiId} (version: ${version})`,
|
||||||
|
);
|
||||||
|
const result = await agentVersionService.checkAndPushAgentUpdate(
|
||||||
|
apiId,
|
||||||
|
version,
|
||||||
|
force,
|
||||||
|
);
|
||||||
|
console.log(
|
||||||
|
"📊 Agent update check result:",
|
||||||
|
JSON.stringify(result, null, 2),
|
||||||
|
);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
...result,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to check agent update:", error.message);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to check agent update",
|
||||||
|
details: error.message,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Push updates to all connected agents
|
||||||
|
router.post(
|
||||||
|
"/push-updates-all",
|
||||||
|
authenticateToken,
|
||||||
|
requirePermission("can_manage_settings"),
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const { force = false } = req.body;
|
||||||
|
|
||||||
|
console.log(`🔄 Pushing updates to all agents (force: ${force})`);
|
||||||
|
const result = await agentVersionService.checkAndPushUpdatesToAll(force);
|
||||||
|
console.log("📊 Bulk update result:", JSON.stringify(result, null, 2));
|
||||||
|
|
||||||
|
res.json(result);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to push updates to all agents:", error.message);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to push updates to all agents",
|
||||||
|
details: error.message,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Agent reports its version (for automatic update checking)
|
||||||
|
router.post("/report-version", authenticateToken, async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { apiId, version } = req.body;
|
||||||
|
|
||||||
|
if (!apiId || !version) {
|
||||||
|
return res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: "API ID and version are required",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`📊 Agent ${apiId} reported version: ${version}`);
|
||||||
|
|
||||||
|
// Check if agent needs update and push notification if needed
|
||||||
|
const updateResult = await agentVersionService.checkAndPushAgentUpdate(
|
||||||
|
apiId,
|
||||||
|
version,
|
||||||
|
);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
message: "Version reported successfully",
|
||||||
|
updateCheck: updateResult,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to process agent version report:", error.message);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to process version report",
|
||||||
|
details: error.message,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
module.exports = router;
|
||||||
File diff suppressed because it is too large
Load Diff
772
backend/src/routes/autoEnrollmentRoutes.js
Normal file
772
backend/src/routes/autoEnrollmentRoutes.js
Normal file
@@ -0,0 +1,772 @@
|
|||||||
|
const express = require("express");
|
||||||
|
const { getPrismaClient } = require("../config/prisma");
|
||||||
|
const crypto = require("node:crypto");
|
||||||
|
const bcrypt = require("bcryptjs");
|
||||||
|
const { body, validationResult } = require("express-validator");
|
||||||
|
const { authenticateToken } = require("../middleware/auth");
|
||||||
|
const { requireManageSettings } = require("../middleware/permissions");
|
||||||
|
const { v4: uuidv4 } = require("uuid");
|
||||||
|
|
||||||
|
const router = express.Router();
|
||||||
|
const prisma = getPrismaClient();
|
||||||
|
|
||||||
|
// Generate auto-enrollment token credentials
|
||||||
|
const generate_auto_enrollment_token = () => {
|
||||||
|
const token_key = `patchmon_ae_${crypto.randomBytes(16).toString("hex")}`;
|
||||||
|
const token_secret = crypto.randomBytes(48).toString("hex");
|
||||||
|
return { token_key, token_secret };
|
||||||
|
};
|
||||||
|
|
||||||
|
// Middleware to validate auto-enrollment token
|
||||||
|
const validate_auto_enrollment_token = async (req, res, next) => {
|
||||||
|
try {
|
||||||
|
const token_key = req.headers["x-auto-enrollment-key"];
|
||||||
|
const token_secret = req.headers["x-auto-enrollment-secret"];
|
||||||
|
|
||||||
|
if (!token_key || !token_secret) {
|
||||||
|
return res
|
||||||
|
.status(401)
|
||||||
|
.json({ error: "Auto-enrollment credentials required" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find token
|
||||||
|
const token = await prisma.auto_enrollment_tokens.findUnique({
|
||||||
|
where: { token_key: token_key },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!token || !token.is_active) {
|
||||||
|
return res.status(401).json({ error: "Invalid or inactive token" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify secret (hashed)
|
||||||
|
const is_valid = await bcrypt.compare(token_secret, token.token_secret);
|
||||||
|
if (!is_valid) {
|
||||||
|
return res.status(401).json({ error: "Invalid token secret" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check expiration
|
||||||
|
if (token.expires_at && new Date() > new Date(token.expires_at)) {
|
||||||
|
return res.status(401).json({ error: "Token expired" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check IP whitelist if configured
|
||||||
|
if (token.allowed_ip_ranges && token.allowed_ip_ranges.length > 0) {
|
||||||
|
const client_ip = req.ip || req.connection.remoteAddress;
|
||||||
|
// Basic IP check - can be enhanced with CIDR matching
|
||||||
|
const ip_allowed = token.allowed_ip_ranges.some((allowed_ip) => {
|
||||||
|
return client_ip.includes(allowed_ip);
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!ip_allowed) {
|
||||||
|
console.warn(
|
||||||
|
`Auto-enrollment attempt from unauthorized IP: ${client_ip}`,
|
||||||
|
);
|
||||||
|
return res
|
||||||
|
.status(403)
|
||||||
|
.json({ error: "IP address not authorized for this token" });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check rate limit (hosts per day)
|
||||||
|
const today = new Date().toISOString().split("T")[0];
|
||||||
|
const token_reset_date = token.last_reset_date.toISOString().split("T")[0];
|
||||||
|
|
||||||
|
if (token_reset_date !== today) {
|
||||||
|
// Reset daily counter
|
||||||
|
await prisma.auto_enrollment_tokens.update({
|
||||||
|
where: { id: token.id },
|
||||||
|
data: {
|
||||||
|
hosts_created_today: 0,
|
||||||
|
last_reset_date: new Date(),
|
||||||
|
updated_at: new Date(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
token.hosts_created_today = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (token.hosts_created_today >= token.max_hosts_per_day) {
|
||||||
|
return res.status(429).json({
|
||||||
|
error: "Rate limit exceeded",
|
||||||
|
message: `Maximum ${token.max_hosts_per_day} hosts per day allowed for this token`,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
req.auto_enrollment_token = token;
|
||||||
|
next();
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Auto-enrollment token validation error:", error);
|
||||||
|
res.status(500).json({ error: "Token validation failed" });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// ========== ADMIN ENDPOINTS (Manage Tokens) ==========
|
||||||
|
|
||||||
|
// Create auto-enrollment token
|
||||||
|
router.post(
|
||||||
|
"/tokens",
|
||||||
|
authenticateToken,
|
||||||
|
requireManageSettings,
|
||||||
|
[
|
||||||
|
body("token_name")
|
||||||
|
.isLength({ min: 1, max: 255 })
|
||||||
|
.withMessage("Token name is required (max 255 characters)"),
|
||||||
|
body("allowed_ip_ranges")
|
||||||
|
.optional()
|
||||||
|
.isArray()
|
||||||
|
.withMessage("Allowed IP ranges must be an array"),
|
||||||
|
body("max_hosts_per_day")
|
||||||
|
.optional()
|
||||||
|
.isInt({ min: 1, max: 1000 })
|
||||||
|
.withMessage("Max hosts per day must be between 1 and 1000"),
|
||||||
|
body("default_host_group_id")
|
||||||
|
.optional({ nullable: true, checkFalsy: true })
|
||||||
|
.isString(),
|
||||||
|
body("expires_at")
|
||||||
|
.optional({ nullable: true, checkFalsy: true })
|
||||||
|
.isISO8601()
|
||||||
|
.withMessage("Invalid date format"),
|
||||||
|
],
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const errors = validationResult(req);
|
||||||
|
if (!errors.isEmpty()) {
|
||||||
|
return res.status(400).json({ errors: errors.array() });
|
||||||
|
}
|
||||||
|
|
||||||
|
const {
|
||||||
|
token_name,
|
||||||
|
allowed_ip_ranges = [],
|
||||||
|
max_hosts_per_day = 100,
|
||||||
|
default_host_group_id,
|
||||||
|
expires_at,
|
||||||
|
metadata = {},
|
||||||
|
} = req.body;
|
||||||
|
|
||||||
|
// Validate host group if provided
|
||||||
|
if (default_host_group_id) {
|
||||||
|
const host_group = await prisma.host_groups.findUnique({
|
||||||
|
where: { id: default_host_group_id },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!host_group) {
|
||||||
|
return res.status(400).json({ error: "Host group not found" });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const { token_key, token_secret } = generate_auto_enrollment_token();
|
||||||
|
const hashed_secret = await bcrypt.hash(token_secret, 10);
|
||||||
|
|
||||||
|
const token = await prisma.auto_enrollment_tokens.create({
|
||||||
|
data: {
|
||||||
|
id: uuidv4(),
|
||||||
|
token_name,
|
||||||
|
token_key: token_key,
|
||||||
|
token_secret: hashed_secret,
|
||||||
|
created_by_user_id: req.user.id,
|
||||||
|
allowed_ip_ranges,
|
||||||
|
max_hosts_per_day,
|
||||||
|
default_host_group_id: default_host_group_id || null,
|
||||||
|
expires_at: expires_at ? new Date(expires_at) : null,
|
||||||
|
metadata: { integration_type: "proxmox-lxc", ...metadata },
|
||||||
|
updated_at: new Date(),
|
||||||
|
},
|
||||||
|
include: {
|
||||||
|
host_groups: {
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
name: true,
|
||||||
|
color: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
users: {
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
username: true,
|
||||||
|
first_name: true,
|
||||||
|
last_name: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Return unhashed secret ONLY once (like API keys)
|
||||||
|
res.status(201).json({
|
||||||
|
message: "Auto-enrollment token created successfully",
|
||||||
|
token: {
|
||||||
|
id: token.id,
|
||||||
|
token_name: token.token_name,
|
||||||
|
token_key: token_key,
|
||||||
|
token_secret: token_secret, // ONLY returned here!
|
||||||
|
max_hosts_per_day: token.max_hosts_per_day,
|
||||||
|
default_host_group: token.host_groups,
|
||||||
|
created_by: token.users,
|
||||||
|
expires_at: token.expires_at,
|
||||||
|
},
|
||||||
|
warning: "⚠️ Save the token_secret now - it cannot be retrieved later!",
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Create auto-enrollment token error:", error);
|
||||||
|
res.status(500).json({ error: "Failed to create token" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// List auto-enrollment tokens
|
||||||
|
router.get(
|
||||||
|
"/tokens",
|
||||||
|
authenticateToken,
|
||||||
|
requireManageSettings,
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const tokens = await prisma.auto_enrollment_tokens.findMany({
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
token_name: true,
|
||||||
|
token_key: true,
|
||||||
|
is_active: true,
|
||||||
|
allowed_ip_ranges: true,
|
||||||
|
max_hosts_per_day: true,
|
||||||
|
hosts_created_today: true,
|
||||||
|
last_used_at: true,
|
||||||
|
expires_at: true,
|
||||||
|
created_at: true,
|
||||||
|
default_host_group_id: true,
|
||||||
|
metadata: true,
|
||||||
|
host_groups: {
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
name: true,
|
||||||
|
color: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
users: {
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
username: true,
|
||||||
|
first_name: true,
|
||||||
|
last_name: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
orderBy: { created_at: "desc" },
|
||||||
|
});
|
||||||
|
|
||||||
|
res.json(tokens);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("List auto-enrollment tokens error:", error);
|
||||||
|
res.status(500).json({ error: "Failed to list tokens" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get single token details
|
||||||
|
router.get(
|
||||||
|
"/tokens/:tokenId",
|
||||||
|
authenticateToken,
|
||||||
|
requireManageSettings,
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { tokenId } = req.params;
|
||||||
|
|
||||||
|
const token = await prisma.auto_enrollment_tokens.findUnique({
|
||||||
|
where: { id: tokenId },
|
||||||
|
include: {
|
||||||
|
host_groups: {
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
name: true,
|
||||||
|
color: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
users: {
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
username: true,
|
||||||
|
first_name: true,
|
||||||
|
last_name: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!token) {
|
||||||
|
return res.status(404).json({ error: "Token not found" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't include the secret in response
|
||||||
|
const { token_secret: _secret, ...token_data } = token;
|
||||||
|
|
||||||
|
res.json(token_data);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Get token error:", error);
|
||||||
|
res.status(500).json({ error: "Failed to get token" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Update token (toggle active state, update limits, etc.)
|
||||||
|
router.patch(
|
||||||
|
"/tokens/:tokenId",
|
||||||
|
authenticateToken,
|
||||||
|
requireManageSettings,
|
||||||
|
[
|
||||||
|
body("is_active").optional().isBoolean(),
|
||||||
|
body("max_hosts_per_day").optional().isInt({ min: 1, max: 1000 }),
|
||||||
|
body("allowed_ip_ranges").optional().isArray(),
|
||||||
|
body("expires_at").optional().isISO8601(),
|
||||||
|
],
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const errors = validationResult(req);
|
||||||
|
if (!errors.isEmpty()) {
|
||||||
|
return res.status(400).json({ errors: errors.array() });
|
||||||
|
}
|
||||||
|
|
||||||
|
const { tokenId } = req.params;
|
||||||
|
const update_data = { updated_at: new Date() };
|
||||||
|
|
||||||
|
if (req.body.is_active !== undefined)
|
||||||
|
update_data.is_active = req.body.is_active;
|
||||||
|
if (req.body.max_hosts_per_day !== undefined)
|
||||||
|
update_data.max_hosts_per_day = req.body.max_hosts_per_day;
|
||||||
|
if (req.body.allowed_ip_ranges !== undefined)
|
||||||
|
update_data.allowed_ip_ranges = req.body.allowed_ip_ranges;
|
||||||
|
if (req.body.expires_at !== undefined)
|
||||||
|
update_data.expires_at = new Date(req.body.expires_at);
|
||||||
|
|
||||||
|
const token = await prisma.auto_enrollment_tokens.update({
|
||||||
|
where: { id: tokenId },
|
||||||
|
data: update_data,
|
||||||
|
include: {
|
||||||
|
host_groups: true,
|
||||||
|
users: {
|
||||||
|
select: {
|
||||||
|
username: true,
|
||||||
|
first_name: true,
|
||||||
|
last_name: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const { token_secret: _secret, ...token_data } = token;
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
message: "Token updated successfully",
|
||||||
|
token: token_data,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Update token error:", error);
|
||||||
|
res.status(500).json({ error: "Failed to update token" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Delete token
|
||||||
|
router.delete(
|
||||||
|
"/tokens/:tokenId",
|
||||||
|
authenticateToken,
|
||||||
|
requireManageSettings,
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { tokenId } = req.params;
|
||||||
|
|
||||||
|
const token = await prisma.auto_enrollment_tokens.findUnique({
|
||||||
|
where: { id: tokenId },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!token) {
|
||||||
|
return res.status(404).json({ error: "Token not found" });
|
||||||
|
}
|
||||||
|
|
||||||
|
await prisma.auto_enrollment_tokens.delete({
|
||||||
|
where: { id: tokenId },
|
||||||
|
});
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
message: "Auto-enrollment token deleted successfully",
|
||||||
|
deleted_token: {
|
||||||
|
id: token.id,
|
||||||
|
token_name: token.token_name,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Delete token error:", error);
|
||||||
|
res.status(500).json({ error: "Failed to delete token" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// ========== AUTO-ENROLLMENT ENDPOINTS (Used by Scripts) ==========
|
||||||
|
// Future integrations can follow this pattern:
|
||||||
|
// - /proxmox-lxc - Proxmox LXC containers
|
||||||
|
// - /vmware-esxi - VMware ESXi VMs
|
||||||
|
// - /docker - Docker containers
|
||||||
|
// - /kubernetes - Kubernetes pods
|
||||||
|
// - /aws-ec2 - AWS EC2 instances
|
||||||
|
|
||||||
|
// Serve the Proxmox LXC enrollment script with credentials injected
|
||||||
|
router.get("/proxmox-lxc", async (req, res) => {
|
||||||
|
try {
|
||||||
|
// Get token from query params
|
||||||
|
const token_key = req.query.token_key;
|
||||||
|
const token_secret = req.query.token_secret;
|
||||||
|
|
||||||
|
if (!token_key || !token_secret) {
|
||||||
|
return res
|
||||||
|
.status(401)
|
||||||
|
.json({ error: "Token key and secret required as query parameters" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate token
|
||||||
|
const token = await prisma.auto_enrollment_tokens.findUnique({
|
||||||
|
where: { token_key: token_key },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!token || !token.is_active) {
|
||||||
|
return res.status(401).json({ error: "Invalid or inactive token" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify secret
|
||||||
|
const is_valid = await bcrypt.compare(token_secret, token.token_secret);
|
||||||
|
if (!is_valid) {
|
||||||
|
return res.status(401).json({ error: "Invalid token secret" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check expiration
|
||||||
|
if (token.expires_at && new Date() > new Date(token.expires_at)) {
|
||||||
|
return res.status(401).json({ error: "Token expired" });
|
||||||
|
}
|
||||||
|
|
||||||
|
const fs = require("node:fs");
|
||||||
|
const path = require("node:path");
|
||||||
|
|
||||||
|
const script_path = path.join(
|
||||||
|
__dirname,
|
||||||
|
"../../../agents/proxmox_auto_enroll.sh",
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!fs.existsSync(script_path)) {
|
||||||
|
return res
|
||||||
|
.status(404)
|
||||||
|
.json({ error: "Proxmox enrollment script not found" });
|
||||||
|
}
|
||||||
|
|
||||||
|
let script = fs.readFileSync(script_path, "utf8");
|
||||||
|
|
||||||
|
// Convert Windows line endings to Unix line endings
|
||||||
|
script = script.replace(/\r\n/g, "\n").replace(/\r/g, "\n");
|
||||||
|
|
||||||
|
// Get the configured server URL from settings
|
||||||
|
let server_url = "http://localhost:3001";
|
||||||
|
try {
|
||||||
|
const settings = await prisma.settings.findFirst();
|
||||||
|
if (settings?.server_url) {
|
||||||
|
server_url = settings.server_url;
|
||||||
|
}
|
||||||
|
} catch (settings_error) {
|
||||||
|
console.warn(
|
||||||
|
"Could not fetch settings, using default server URL:",
|
||||||
|
settings_error.message,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine curl flags dynamically from settings
|
||||||
|
let curl_flags = "-s";
|
||||||
|
try {
|
||||||
|
const settings = await prisma.settings.findFirst();
|
||||||
|
if (settings && settings.ignore_ssl_self_signed === true) {
|
||||||
|
curl_flags = "-sk";
|
||||||
|
}
|
||||||
|
} catch (_) {}
|
||||||
|
|
||||||
|
// Check for --force parameter
|
||||||
|
const force_install = req.query.force === "true" || req.query.force === "1";
|
||||||
|
|
||||||
|
// Inject the token credentials, server URL, curl flags, and force flag into the script
|
||||||
|
const env_vars = `#!/bin/bash
|
||||||
|
# PatchMon Auto-Enrollment Configuration (Auto-generated)
|
||||||
|
export PATCHMON_URL="${server_url}"
|
||||||
|
export AUTO_ENROLLMENT_KEY="${token.token_key}"
|
||||||
|
export AUTO_ENROLLMENT_SECRET="${token_secret}"
|
||||||
|
export CURL_FLAGS="${curl_flags}"
|
||||||
|
export FORCE_INSTALL="${force_install ? "true" : "false"}"
|
||||||
|
|
||||||
|
`;
|
||||||
|
|
||||||
|
// Remove the shebang and configuration section from the original script
|
||||||
|
script = script.replace(/^#!/, "#");
|
||||||
|
|
||||||
|
// Remove the configuration section (between # ===== CONFIGURATION ===== and the next # =====)
|
||||||
|
script = script.replace(
|
||||||
|
/# ===== CONFIGURATION =====[\s\S]*?(?=# ===== COLOR OUTPUT =====)/,
|
||||||
|
"",
|
||||||
|
);
|
||||||
|
|
||||||
|
script = env_vars + script;
|
||||||
|
|
||||||
|
res.setHeader("Content-Type", "text/plain");
|
||||||
|
res.setHeader(
|
||||||
|
"Content-Disposition",
|
||||||
|
'inline; filename="proxmox_auto_enroll.sh"',
|
||||||
|
);
|
||||||
|
res.send(script);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Proxmox script serve error:", error);
|
||||||
|
res.status(500).json({ error: "Failed to serve enrollment script" });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create host via auto-enrollment
|
||||||
|
router.post(
|
||||||
|
"/enroll",
|
||||||
|
validate_auto_enrollment_token,
|
||||||
|
[
|
||||||
|
body("friendly_name")
|
||||||
|
.isLength({ min: 1, max: 255 })
|
||||||
|
.withMessage("Friendly name is required"),
|
||||||
|
body("machine_id")
|
||||||
|
.isLength({ min: 1, max: 255 })
|
||||||
|
.withMessage("Machine ID is required"),
|
||||||
|
body("metadata").optional().isObject(),
|
||||||
|
],
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const errors = validationResult(req);
|
||||||
|
if (!errors.isEmpty()) {
|
||||||
|
return res.status(400).json({ errors: errors.array() });
|
||||||
|
}
|
||||||
|
|
||||||
|
const { friendly_name, machine_id } = req.body;
|
||||||
|
|
||||||
|
// Generate host API credentials
|
||||||
|
const api_id = `patchmon_${crypto.randomBytes(8).toString("hex")}`;
|
||||||
|
const api_key = crypto.randomBytes(32).toString("hex");
|
||||||
|
|
||||||
|
// Check if host already exists by machine_id (not hostname)
|
||||||
|
const existing_host = await prisma.hosts.findUnique({
|
||||||
|
where: { machine_id },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (existing_host) {
|
||||||
|
return res.status(409).json({
|
||||||
|
error: "Host already exists",
|
||||||
|
host_id: existing_host.id,
|
||||||
|
api_id: existing_host.api_id,
|
||||||
|
machine_id: existing_host.machine_id,
|
||||||
|
friendly_name: existing_host.friendly_name,
|
||||||
|
message:
|
||||||
|
"This machine is already enrolled in PatchMon (matched by machine ID)",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create host
|
||||||
|
const host = await prisma.hosts.create({
|
||||||
|
data: {
|
||||||
|
id: uuidv4(),
|
||||||
|
machine_id,
|
||||||
|
friendly_name,
|
||||||
|
os_type: "unknown",
|
||||||
|
os_version: "unknown",
|
||||||
|
api_id: api_id,
|
||||||
|
api_key: api_key,
|
||||||
|
status: "pending",
|
||||||
|
notes: `Auto-enrolled via ${req.auto_enrollment_token.token_name} on ${new Date().toISOString()}`,
|
||||||
|
updated_at: new Date(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create host group membership if default host group is specified
|
||||||
|
let hostGroupMembership = null;
|
||||||
|
if (req.auto_enrollment_token.default_host_group_id) {
|
||||||
|
hostGroupMembership = await prisma.host_group_memberships.create({
|
||||||
|
data: {
|
||||||
|
id: uuidv4(),
|
||||||
|
host_id: host.id,
|
||||||
|
host_group_id: req.auto_enrollment_token.default_host_group_id,
|
||||||
|
created_at: new Date(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update token usage stats
|
||||||
|
await prisma.auto_enrollment_tokens.update({
|
||||||
|
where: { id: req.auto_enrollment_token.id },
|
||||||
|
data: {
|
||||||
|
hosts_created_today: { increment: 1 },
|
||||||
|
last_used_at: new Date(),
|
||||||
|
updated_at: new Date(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`Auto-enrolled host: ${friendly_name} (${host.id}) via token: ${req.auto_enrollment_token.token_name}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get host group details for response if membership was created
|
||||||
|
let hostGroup = null;
|
||||||
|
if (hostGroupMembership) {
|
||||||
|
hostGroup = await prisma.host_groups.findUnique({
|
||||||
|
where: { id: req.auto_enrollment_token.default_host_group_id },
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
name: true,
|
||||||
|
color: true,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
res.status(201).json({
|
||||||
|
message: "Host enrolled successfully",
|
||||||
|
host: {
|
||||||
|
id: host.id,
|
||||||
|
friendly_name: host.friendly_name,
|
||||||
|
api_id: api_id,
|
||||||
|
api_key: api_key,
|
||||||
|
host_group: hostGroup,
|
||||||
|
status: host.status,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Auto-enrollment error:", error);
|
||||||
|
res.status(500).json({ error: "Failed to enroll host" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Bulk enroll multiple hosts at once
|
||||||
|
router.post(
|
||||||
|
"/enroll/bulk",
|
||||||
|
validate_auto_enrollment_token,
|
||||||
|
[
|
||||||
|
body("hosts")
|
||||||
|
.isArray({ min: 1, max: 50 })
|
||||||
|
.withMessage("Hosts array required (max 50)"),
|
||||||
|
body("hosts.*.friendly_name")
|
||||||
|
.isLength({ min: 1 })
|
||||||
|
.withMessage("Each host needs a friendly_name"),
|
||||||
|
],
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const errors = validationResult(req);
|
||||||
|
if (!errors.isEmpty()) {
|
||||||
|
return res.status(400).json({ errors: errors.array() });
|
||||||
|
}
|
||||||
|
|
||||||
|
const { hosts } = req.body;
|
||||||
|
|
||||||
|
// Check rate limit
|
||||||
|
const remaining_quota =
|
||||||
|
req.auto_enrollment_token.max_hosts_per_day -
|
||||||
|
req.auto_enrollment_token.hosts_created_today;
|
||||||
|
|
||||||
|
if (hosts.length > remaining_quota) {
|
||||||
|
return res.status(429).json({
|
||||||
|
error: "Rate limit exceeded",
|
||||||
|
message: `Only ${remaining_quota} hosts remaining in daily quota`,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const results = {
|
||||||
|
success: [],
|
||||||
|
failed: [],
|
||||||
|
skipped: [],
|
||||||
|
};
|
||||||
|
|
||||||
|
for (const host_data of hosts) {
|
||||||
|
try {
|
||||||
|
const { friendly_name, machine_id } = host_data;
|
||||||
|
|
||||||
|
if (!machine_id) {
|
||||||
|
results.failed.push({
|
||||||
|
friendly_name,
|
||||||
|
error: "Machine ID is required",
|
||||||
|
});
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if host already exists by machine_id
|
||||||
|
const existing_host = await prisma.hosts.findUnique({
|
||||||
|
where: { machine_id },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (existing_host) {
|
||||||
|
results.skipped.push({
|
||||||
|
friendly_name,
|
||||||
|
machine_id,
|
||||||
|
reason: "Machine already enrolled",
|
||||||
|
api_id: existing_host.api_id,
|
||||||
|
});
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate credentials
|
||||||
|
const api_id = `patchmon_${crypto.randomBytes(8).toString("hex")}`;
|
||||||
|
const api_key = crypto.randomBytes(32).toString("hex");
|
||||||
|
|
||||||
|
// Create host
|
||||||
|
const host = await prisma.hosts.create({
|
||||||
|
data: {
|
||||||
|
id: uuidv4(),
|
||||||
|
machine_id,
|
||||||
|
friendly_name,
|
||||||
|
os_type: "unknown",
|
||||||
|
os_version: "unknown",
|
||||||
|
api_id: api_id,
|
||||||
|
api_key: api_key,
|
||||||
|
status: "pending",
|
||||||
|
notes: `Auto-enrolled via ${req.auto_enrollment_token.token_name} on ${new Date().toISOString()}`,
|
||||||
|
updated_at: new Date(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create host group membership if default host group is specified
|
||||||
|
if (req.auto_enrollment_token.default_host_group_id) {
|
||||||
|
await prisma.host_group_memberships.create({
|
||||||
|
data: {
|
||||||
|
id: uuidv4(),
|
||||||
|
host_id: host.id,
|
||||||
|
host_group_id: req.auto_enrollment_token.default_host_group_id,
|
||||||
|
created_at: new Date(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
results.success.push({
|
||||||
|
id: host.id,
|
||||||
|
friendly_name: host.friendly_name,
|
||||||
|
api_id: api_id,
|
||||||
|
api_key: api_key,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
results.failed.push({
|
||||||
|
friendly_name: host_data.friendly_name,
|
||||||
|
error: error.message,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update token usage stats
|
||||||
|
if (results.success.length > 0) {
|
||||||
|
await prisma.auto_enrollment_tokens.update({
|
||||||
|
where: { id: req.auto_enrollment_token.id },
|
||||||
|
data: {
|
||||||
|
hosts_created_today: { increment: results.success.length },
|
||||||
|
last_used_at: new Date(),
|
||||||
|
updated_at: new Date(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
res.status(201).json({
|
||||||
|
message: `Bulk enrollment completed: ${results.success.length} succeeded, ${results.failed.length} failed, ${results.skipped.length} skipped`,
|
||||||
|
results,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Bulk auto-enrollment error:", error);
|
||||||
|
res.status(500).json({ error: "Failed to bulk enroll hosts" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
module.exports = router;
|
||||||
505
backend/src/routes/automationRoutes.js
Normal file
505
backend/src/routes/automationRoutes.js
Normal file
@@ -0,0 +1,505 @@
|
|||||||
|
const express = require("express");
|
||||||
|
const { queueManager, QUEUE_NAMES } = require("../services/automation");
|
||||||
|
const { getConnectedApiIds } = require("../services/agentWs");
|
||||||
|
const { authenticateToken } = require("../middleware/auth");
|
||||||
|
|
||||||
|
const router = express.Router();
|
||||||
|
|
||||||
|
// Get all queue statistics
|
||||||
|
router.get("/stats", authenticateToken, async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const stats = await queueManager.getAllQueueStats();
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
data: stats,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error fetching queue stats:", error);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to fetch queue statistics",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get specific queue statistics
|
||||||
|
router.get("/stats/:queueName", authenticateToken, async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { queueName } = req.params;
|
||||||
|
|
||||||
|
if (!Object.values(QUEUE_NAMES).includes(queueName)) {
|
||||||
|
return res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: "Invalid queue name",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const stats = await queueManager.getQueueStats(queueName);
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
data: stats,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error fetching queue stats:", error);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to fetch queue statistics",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get recent jobs for a queue
|
||||||
|
router.get("/jobs/:queueName", authenticateToken, async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { queueName } = req.params;
|
||||||
|
const { limit = 10 } = req.query;
|
||||||
|
|
||||||
|
if (!Object.values(QUEUE_NAMES).includes(queueName)) {
|
||||||
|
return res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: "Invalid queue name",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const jobs = await queueManager.getRecentJobs(
|
||||||
|
queueName,
|
||||||
|
parseInt(limit, 10),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Format jobs for frontend
|
||||||
|
const formattedJobs = jobs.map((job) => ({
|
||||||
|
id: job.id,
|
||||||
|
name: job.name,
|
||||||
|
status: job.finishedOn
|
||||||
|
? job.failedReason
|
||||||
|
? "failed"
|
||||||
|
: "completed"
|
||||||
|
: "active",
|
||||||
|
progress: job.progress,
|
||||||
|
data: job.data,
|
||||||
|
returnvalue: job.returnvalue,
|
||||||
|
failedReason: job.failedReason,
|
||||||
|
processedOn: job.processedOn,
|
||||||
|
finishedOn: job.finishedOn,
|
||||||
|
createdAt: new Date(job.timestamp),
|
||||||
|
attemptsMade: job.attemptsMade,
|
||||||
|
delay: job.delay,
|
||||||
|
}));
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
data: formattedJobs,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error fetching recent jobs:", error);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to fetch recent jobs",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Trigger manual GitHub update check
|
||||||
|
router.post("/trigger/github-update", authenticateToken, async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const job = await queueManager.triggerGitHubUpdateCheck();
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
jobId: job.id,
|
||||||
|
message: "GitHub update check triggered successfully",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error triggering GitHub update check:", error);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to trigger GitHub update check",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Trigger manual session cleanup
|
||||||
|
router.post(
|
||||||
|
"/trigger/session-cleanup",
|
||||||
|
authenticateToken,
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const job = await queueManager.triggerSessionCleanup();
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
jobId: job.id,
|
||||||
|
message: "Session cleanup triggered successfully",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error triggering session cleanup:", error);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to trigger session cleanup",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Trigger Agent Collection: enqueue report_now for connected agents only
|
||||||
|
router.post(
|
||||||
|
"/trigger/agent-collection",
|
||||||
|
authenticateToken,
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const queue = queueManager.queues[QUEUE_NAMES.AGENT_COMMANDS];
|
||||||
|
const apiIds = getConnectedApiIds();
|
||||||
|
if (!apiIds || apiIds.length === 0) {
|
||||||
|
return res.json({ success: true, data: { enqueued: 0 } });
|
||||||
|
}
|
||||||
|
const jobs = apiIds.map((apiId) => ({
|
||||||
|
name: "report_now",
|
||||||
|
data: { api_id: apiId, type: "report_now" },
|
||||||
|
opts: { attempts: 3, backoff: { type: "fixed", delay: 2000 } },
|
||||||
|
}));
|
||||||
|
await queue.addBulk(jobs);
|
||||||
|
res.json({ success: true, data: { enqueued: jobs.length } });
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error triggering agent collection:", error);
|
||||||
|
res
|
||||||
|
.status(500)
|
||||||
|
.json({ success: false, error: "Failed to trigger agent collection" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Trigger manual orphaned repo cleanup
|
||||||
|
router.post(
|
||||||
|
"/trigger/orphaned-repo-cleanup",
|
||||||
|
authenticateToken,
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const job = await queueManager.triggerOrphanedRepoCleanup();
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
jobId: job.id,
|
||||||
|
message: "Orphaned repository cleanup triggered successfully",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error triggering orphaned repository cleanup:", error);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to trigger orphaned repository cleanup",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Trigger manual orphaned package cleanup
|
||||||
|
router.post(
|
||||||
|
"/trigger/orphaned-package-cleanup",
|
||||||
|
authenticateToken,
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const job = await queueManager.triggerOrphanedPackageCleanup();
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
jobId: job.id,
|
||||||
|
message: "Orphaned package cleanup triggered successfully",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error triggering orphaned package cleanup:", error);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to trigger orphaned package cleanup",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Trigger manual Docker inventory cleanup
|
||||||
|
router.post(
|
||||||
|
"/trigger/docker-inventory-cleanup",
|
||||||
|
authenticateToken,
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const job = await queueManager.triggerDockerInventoryCleanup();
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
jobId: job.id,
|
||||||
|
message: "Docker inventory cleanup triggered successfully",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error triggering Docker inventory cleanup:", error);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to trigger Docker inventory cleanup",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Trigger manual system statistics collection
|
||||||
|
router.post(
|
||||||
|
"/trigger/system-statistics",
|
||||||
|
authenticateToken,
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const job = await queueManager.triggerSystemStatistics();
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
jobId: job.id,
|
||||||
|
message: "System statistics collection triggered successfully",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error triggering system statistics collection:", error);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to trigger system statistics collection",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get queue health status
|
||||||
|
router.get("/health", authenticateToken, async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const stats = await queueManager.getAllQueueStats();
|
||||||
|
const totalJobs = Object.values(stats).reduce((sum, queueStats) => {
|
||||||
|
return sum + queueStats.waiting + queueStats.active + queueStats.failed;
|
||||||
|
}, 0);
|
||||||
|
|
||||||
|
const health = {
|
||||||
|
status: "healthy",
|
||||||
|
totalJobs,
|
||||||
|
queues: Object.keys(stats).length,
|
||||||
|
timestamp: new Date().toISOString(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Check for unhealthy conditions
|
||||||
|
if (totalJobs > 1000) {
|
||||||
|
health.status = "warning";
|
||||||
|
health.message = "High number of queued jobs";
|
||||||
|
}
|
||||||
|
|
||||||
|
const failedJobs = Object.values(stats).reduce((sum, queueStats) => {
|
||||||
|
return sum + queueStats.failed;
|
||||||
|
}, 0);
|
||||||
|
|
||||||
|
if (failedJobs > 10) {
|
||||||
|
health.status = "error";
|
||||||
|
health.message = "High number of failed jobs";
|
||||||
|
}
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
data: health,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error checking queue health:", error);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to check queue health",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get automation overview (for dashboard cards)
|
||||||
|
router.get("/overview", authenticateToken, async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const stats = await queueManager.getAllQueueStats();
|
||||||
|
const { getSettings } = require("../services/settingsService");
|
||||||
|
const settings = await getSettings();
|
||||||
|
|
||||||
|
// Get recent jobs for each queue to show last run times
|
||||||
|
const recentJobs = await Promise.all([
|
||||||
|
queueManager.getRecentJobs(QUEUE_NAMES.GITHUB_UPDATE_CHECK, 1),
|
||||||
|
queueManager.getRecentJobs(QUEUE_NAMES.SESSION_CLEANUP, 1),
|
||||||
|
queueManager.getRecentJobs(QUEUE_NAMES.ORPHANED_REPO_CLEANUP, 1),
|
||||||
|
queueManager.getRecentJobs(QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP, 1),
|
||||||
|
queueManager.getRecentJobs(QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP, 1),
|
||||||
|
queueManager.getRecentJobs(QUEUE_NAMES.AGENT_COMMANDS, 1),
|
||||||
|
queueManager.getRecentJobs(QUEUE_NAMES.SYSTEM_STATISTICS, 1),
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Calculate overview metrics
|
||||||
|
const overview = {
|
||||||
|
scheduledTasks:
|
||||||
|
stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].delayed +
|
||||||
|
stats[QUEUE_NAMES.SESSION_CLEANUP].delayed +
|
||||||
|
stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].delayed +
|
||||||
|
stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].delayed +
|
||||||
|
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].delayed +
|
||||||
|
stats[QUEUE_NAMES.SYSTEM_STATISTICS].delayed,
|
||||||
|
|
||||||
|
runningTasks:
|
||||||
|
stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].active +
|
||||||
|
stats[QUEUE_NAMES.SESSION_CLEANUP].active +
|
||||||
|
stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].active +
|
||||||
|
stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].active +
|
||||||
|
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].active +
|
||||||
|
stats[QUEUE_NAMES.SYSTEM_STATISTICS].active,
|
||||||
|
|
||||||
|
failedTasks:
|
||||||
|
stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK].failed +
|
||||||
|
stats[QUEUE_NAMES.SESSION_CLEANUP].failed +
|
||||||
|
stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].failed +
|
||||||
|
stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].failed +
|
||||||
|
stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].failed +
|
||||||
|
stats[QUEUE_NAMES.SYSTEM_STATISTICS].failed,
|
||||||
|
|
||||||
|
totalAutomations: Object.values(stats).reduce((sum, queueStats) => {
|
||||||
|
return (
|
||||||
|
sum +
|
||||||
|
queueStats.completed +
|
||||||
|
queueStats.failed +
|
||||||
|
queueStats.active +
|
||||||
|
queueStats.waiting +
|
||||||
|
queueStats.delayed
|
||||||
|
);
|
||||||
|
}, 0),
|
||||||
|
|
||||||
|
// Automation details with last run times
|
||||||
|
automations: [
|
||||||
|
{
|
||||||
|
name: "GitHub Update Check",
|
||||||
|
queue: QUEUE_NAMES.GITHUB_UPDATE_CHECK,
|
||||||
|
description: "Checks for new PatchMon releases",
|
||||||
|
schedule: "Daily at midnight",
|
||||||
|
lastRun: recentJobs[0][0]?.finishedOn
|
||||||
|
? new Date(recentJobs[0][0].finishedOn).toLocaleString()
|
||||||
|
: "Never",
|
||||||
|
lastRunTimestamp: recentJobs[0][0]?.finishedOn || 0,
|
||||||
|
status: recentJobs[0][0]?.failedReason
|
||||||
|
? "Failed"
|
||||||
|
: recentJobs[0][0]
|
||||||
|
? "Success"
|
||||||
|
: "Never run",
|
||||||
|
stats: stats[QUEUE_NAMES.GITHUB_UPDATE_CHECK],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Session Cleanup",
|
||||||
|
queue: QUEUE_NAMES.SESSION_CLEANUP,
|
||||||
|
description: "Cleans up expired user sessions",
|
||||||
|
schedule: "Every hour",
|
||||||
|
lastRun: recentJobs[1][0]?.finishedOn
|
||||||
|
? new Date(recentJobs[1][0].finishedOn).toLocaleString()
|
||||||
|
: "Never",
|
||||||
|
lastRunTimestamp: recentJobs[1][0]?.finishedOn || 0,
|
||||||
|
status: recentJobs[1][0]?.failedReason
|
||||||
|
? "Failed"
|
||||||
|
: recentJobs[1][0]
|
||||||
|
? "Success"
|
||||||
|
: "Never run",
|
||||||
|
stats: stats[QUEUE_NAMES.SESSION_CLEANUP],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Orphaned Repo Cleanup",
|
||||||
|
queue: QUEUE_NAMES.ORPHANED_REPO_CLEANUP,
|
||||||
|
description: "Removes repositories with no associated hosts",
|
||||||
|
schedule: "Daily at 2 AM",
|
||||||
|
lastRun: recentJobs[2][0]?.finishedOn
|
||||||
|
? new Date(recentJobs[2][0].finishedOn).toLocaleString()
|
||||||
|
: "Never",
|
||||||
|
lastRunTimestamp: recentJobs[2][0]?.finishedOn || 0,
|
||||||
|
status: recentJobs[2][0]?.failedReason
|
||||||
|
? "Failed"
|
||||||
|
: recentJobs[2][0]
|
||||||
|
? "Success"
|
||||||
|
: "Never run",
|
||||||
|
stats: stats[QUEUE_NAMES.ORPHANED_REPO_CLEANUP],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Orphaned Package Cleanup",
|
||||||
|
queue: QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP,
|
||||||
|
description: "Removes packages with no associated hosts",
|
||||||
|
schedule: "Daily at 3 AM",
|
||||||
|
lastRun: recentJobs[3][0]?.finishedOn
|
||||||
|
? new Date(recentJobs[3][0].finishedOn).toLocaleString()
|
||||||
|
: "Never",
|
||||||
|
lastRunTimestamp: recentJobs[3][0]?.finishedOn || 0,
|
||||||
|
status: recentJobs[3][0]?.failedReason
|
||||||
|
? "Failed"
|
||||||
|
: recentJobs[3][0]
|
||||||
|
? "Success"
|
||||||
|
: "Never run",
|
||||||
|
stats: stats[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Docker Inventory Cleanup",
|
||||||
|
queue: QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP,
|
||||||
|
description:
|
||||||
|
"Removes Docker containers and images for non-existent hosts",
|
||||||
|
schedule: "Daily at 4 AM",
|
||||||
|
lastRun: recentJobs[4][0]?.finishedOn
|
||||||
|
? new Date(recentJobs[4][0].finishedOn).toLocaleString()
|
||||||
|
: "Never",
|
||||||
|
lastRunTimestamp: recentJobs[4][0]?.finishedOn || 0,
|
||||||
|
status: recentJobs[4][0]?.failedReason
|
||||||
|
? "Failed"
|
||||||
|
: recentJobs[4][0]
|
||||||
|
? "Success"
|
||||||
|
: "Never run",
|
||||||
|
stats: stats[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Collect Host Statistics",
|
||||||
|
queue: QUEUE_NAMES.AGENT_COMMANDS,
|
||||||
|
description: "Collects package statistics from connected agents only",
|
||||||
|
schedule: `Every ${settings.update_interval} minutes (Agent-driven)`,
|
||||||
|
lastRun: recentJobs[5][0]?.finishedOn
|
||||||
|
? new Date(recentJobs[5][0].finishedOn).toLocaleString()
|
||||||
|
: "Never",
|
||||||
|
lastRunTimestamp: recentJobs[5][0]?.finishedOn || 0,
|
||||||
|
status: recentJobs[5][0]?.failedReason
|
||||||
|
? "Failed"
|
||||||
|
: recentJobs[5][0]
|
||||||
|
? "Success"
|
||||||
|
: "Never run",
|
||||||
|
stats: stats[QUEUE_NAMES.AGENT_COMMANDS],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "System Statistics Collection",
|
||||||
|
queue: QUEUE_NAMES.SYSTEM_STATISTICS,
|
||||||
|
description: "Collects aggregated system-wide package statistics",
|
||||||
|
schedule: "Every 30 minutes",
|
||||||
|
lastRun: recentJobs[6][0]?.finishedOn
|
||||||
|
? new Date(recentJobs[6][0].finishedOn).toLocaleString()
|
||||||
|
: "Never",
|
||||||
|
lastRunTimestamp: recentJobs[6][0]?.finishedOn || 0,
|
||||||
|
status: recentJobs[6][0]?.failedReason
|
||||||
|
? "Failed"
|
||||||
|
: recentJobs[6][0]
|
||||||
|
? "Success"
|
||||||
|
: "Never run",
|
||||||
|
stats: stats[QUEUE_NAMES.SYSTEM_STATISTICS],
|
||||||
|
},
|
||||||
|
].sort((a, b) => {
|
||||||
|
// Sort by last run timestamp (most recent first)
|
||||||
|
// If both have never run (timestamp 0), maintain original order
|
||||||
|
if (a.lastRunTimestamp === 0 && b.lastRunTimestamp === 0) return 0;
|
||||||
|
if (a.lastRunTimestamp === 0) return 1; // Never run goes to bottom
|
||||||
|
if (b.lastRunTimestamp === 0) return -1; // Never run goes to bottom
|
||||||
|
return b.lastRunTimestamp - a.lastRunTimestamp; // Most recent first
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
data: overview,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error fetching automation overview:", error);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to fetch automation overview",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
module.exports = router;
|
||||||
@@ -1,89 +1,379 @@
|
|||||||
const express = require('express');
|
const express = require("express");
|
||||||
const { body, validationResult } = require('express-validator');
|
const { body, validationResult } = require("express-validator");
|
||||||
const { PrismaClient } = require('@prisma/client');
|
const { getPrismaClient } = require("../config/prisma");
|
||||||
const { authenticateToken } = require('../middleware/auth');
|
const { authenticateToken } = require("../middleware/auth");
|
||||||
|
const { v4: uuidv4 } = require("uuid");
|
||||||
|
|
||||||
const router = express.Router();
|
const router = express.Router();
|
||||||
const prisma = new PrismaClient();
|
const prisma = getPrismaClient();
|
||||||
|
|
||||||
|
// Helper function to get user permissions based on role
|
||||||
|
async function getUserPermissions(userRole) {
|
||||||
|
try {
|
||||||
|
const permissions = await prisma.role_permissions.findUnique({
|
||||||
|
where: { role: userRole },
|
||||||
|
});
|
||||||
|
|
||||||
|
// If no specific permissions found, return default admin permissions (for backward compatibility)
|
||||||
|
if (!permissions) {
|
||||||
|
console.warn(
|
||||||
|
`No permissions found for role: ${userRole}, defaulting to admin access`,
|
||||||
|
);
|
||||||
|
return {
|
||||||
|
can_view_dashboard: true,
|
||||||
|
can_view_hosts: true,
|
||||||
|
can_manage_hosts: true,
|
||||||
|
can_view_packages: true,
|
||||||
|
can_manage_packages: true,
|
||||||
|
can_view_users: true,
|
||||||
|
can_manage_users: true,
|
||||||
|
can_view_reports: true,
|
||||||
|
can_export_data: true,
|
||||||
|
can_manage_settings: true,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return permissions;
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error fetching user permissions:", error);
|
||||||
|
// Return admin permissions as fallback
|
||||||
|
return {
|
||||||
|
can_view_dashboard: true,
|
||||||
|
can_view_hosts: true,
|
||||||
|
can_manage_hosts: true,
|
||||||
|
can_view_packages: true,
|
||||||
|
can_manage_packages: true,
|
||||||
|
can_view_users: true,
|
||||||
|
can_manage_users: true,
|
||||||
|
can_view_reports: true,
|
||||||
|
can_export_data: true,
|
||||||
|
can_manage_settings: true,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to create permission-based dashboard preferences for a new user
|
||||||
|
async function createDefaultDashboardPreferences(userId, userRole = "user") {
|
||||||
|
try {
|
||||||
|
// Get user's actual permissions
|
||||||
|
const permissions = await getUserPermissions(userRole);
|
||||||
|
|
||||||
|
// Define all possible dashboard cards with their required permissions
|
||||||
|
// Order aligned with preferred layout
|
||||||
|
const allCards = [
|
||||||
|
// Host-related cards
|
||||||
|
{ cardId: "totalHosts", requiredPermission: "can_view_hosts", order: 0 },
|
||||||
|
{
|
||||||
|
cardId: "hostsNeedingUpdates",
|
||||||
|
requiredPermission: "can_view_hosts",
|
||||||
|
order: 1,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Package-related cards
|
||||||
|
{
|
||||||
|
cardId: "totalOutdatedPackages",
|
||||||
|
requiredPermission: "can_view_packages",
|
||||||
|
order: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "securityUpdates",
|
||||||
|
requiredPermission: "can_view_packages",
|
||||||
|
order: 3,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Host-related cards (continued)
|
||||||
|
{
|
||||||
|
cardId: "totalHostGroups",
|
||||||
|
requiredPermission: "can_view_hosts",
|
||||||
|
order: 4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "upToDateHosts",
|
||||||
|
requiredPermission: "can_view_hosts",
|
||||||
|
order: 5,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Repository-related cards
|
||||||
|
{ cardId: "totalRepos", requiredPermission: "can_view_hosts", order: 6 },
|
||||||
|
|
||||||
|
// User management cards (admin only)
|
||||||
|
{ cardId: "totalUsers", requiredPermission: "can_view_users", order: 7 },
|
||||||
|
|
||||||
|
// System/Report cards
|
||||||
|
{
|
||||||
|
cardId: "osDistribution",
|
||||||
|
requiredPermission: "can_view_reports",
|
||||||
|
order: 8,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "osDistributionBar",
|
||||||
|
requiredPermission: "can_view_reports",
|
||||||
|
order: 9,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "osDistributionDoughnut",
|
||||||
|
requiredPermission: "can_view_reports",
|
||||||
|
order: 10,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "recentCollection",
|
||||||
|
requiredPermission: "can_view_hosts",
|
||||||
|
order: 11,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "updateStatus",
|
||||||
|
requiredPermission: "can_view_reports",
|
||||||
|
order: 12,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "packagePriority",
|
||||||
|
requiredPermission: "can_view_packages",
|
||||||
|
order: 13,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "packageTrends",
|
||||||
|
requiredPermission: "can_view_packages",
|
||||||
|
order: 14,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "recentUsers",
|
||||||
|
requiredPermission: "can_view_users",
|
||||||
|
order: 15,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "quickStats",
|
||||||
|
requiredPermission: "can_view_dashboard",
|
||||||
|
order: 16,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
// Filter cards based on user's permissions
|
||||||
|
const allowedCards = allCards.filter((card) => {
|
||||||
|
return permissions[card.requiredPermission] === true;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create preferences data
|
||||||
|
const preferencesData = allowedCards.map((card) => ({
|
||||||
|
id: uuidv4(),
|
||||||
|
user_id: userId,
|
||||||
|
card_id: card.cardId,
|
||||||
|
enabled: true,
|
||||||
|
order: card.order, // Preserve original order from allCards
|
||||||
|
created_at: new Date(),
|
||||||
|
updated_at: new Date(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
await prisma.dashboard_preferences.createMany({
|
||||||
|
data: preferencesData,
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`Permission-based dashboard preferences created for user ${userId} with role ${userRole}: ${allowedCards.length} cards`,
|
||||||
|
);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error creating default dashboard preferences:", error);
|
||||||
|
// Don't throw error - this shouldn't break user creation
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Get user's dashboard preferences
|
// Get user's dashboard preferences
|
||||||
router.get('/', authenticateToken, async (req, res) => {
|
router.get("/", authenticateToken, async (req, res) => {
|
||||||
try {
|
try {
|
||||||
const preferences = await prisma.dashboardPreferences.findMany({
|
const preferences = await prisma.dashboard_preferences.findMany({
|
||||||
where: { userId: req.user.id },
|
where: { user_id: req.user.id },
|
||||||
orderBy: { order: 'asc' }
|
orderBy: { order: "asc" },
|
||||||
});
|
});
|
||||||
|
|
||||||
res.json(preferences);
|
res.json(preferences);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Dashboard preferences fetch error:', error);
|
console.error("Dashboard preferences fetch error:", error);
|
||||||
res.status(500).json({ error: 'Failed to fetch dashboard preferences' });
|
res.status(500).json({ error: "Failed to fetch dashboard preferences" });
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// Update dashboard preferences (bulk update)
|
// Update dashboard preferences (bulk update)
|
||||||
router.put('/', authenticateToken, [
|
router.put(
|
||||||
body('preferences').isArray().withMessage('Preferences must be an array'),
|
"/",
|
||||||
body('preferences.*.cardId').isString().withMessage('Card ID is required'),
|
authenticateToken,
|
||||||
body('preferences.*.enabled').isBoolean().withMessage('Enabled must be boolean'),
|
[
|
||||||
body('preferences.*.order').isInt().withMessage('Order must be integer')
|
body("preferences").isArray().withMessage("Preferences must be an array"),
|
||||||
], async (req, res) => {
|
body("preferences.*.cardId").isString().withMessage("Card ID is required"),
|
||||||
try {
|
body("preferences.*.enabled")
|
||||||
const errors = validationResult(req);
|
.isBoolean()
|
||||||
if (!errors.isEmpty()) {
|
.withMessage("Enabled must be boolean"),
|
||||||
return res.status(400).json({ errors: errors.array() });
|
body("preferences.*.order").isInt().withMessage("Order must be integer"),
|
||||||
}
|
],
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const errors = validationResult(req);
|
||||||
|
if (!errors.isEmpty()) {
|
||||||
|
return res.status(400).json({ errors: errors.array() });
|
||||||
|
}
|
||||||
|
|
||||||
const { preferences } = req.body;
|
const { preferences } = req.body;
|
||||||
const userId = req.user.id;
|
const userId = req.user.id;
|
||||||
|
|
||||||
// Delete existing preferences for this user
|
// Delete existing preferences for this user
|
||||||
await prisma.dashboardPreferences.deleteMany({
|
await prisma.dashboard_preferences.deleteMany({
|
||||||
where: { userId }
|
where: { user_id: userId },
|
||||||
});
|
});
|
||||||
|
|
||||||
// Create new preferences
|
// Create new preferences
|
||||||
const newPreferences = preferences.map(pref => ({
|
const newPreferences = preferences.map((pref) => ({
|
||||||
userId,
|
id: require("uuid").v4(),
|
||||||
cardId: pref.cardId,
|
user_id: userId,
|
||||||
enabled: pref.enabled,
|
card_id: pref.cardId,
|
||||||
order: pref.order
|
enabled: pref.enabled,
|
||||||
}));
|
order: pref.order,
|
||||||
|
updated_at: new Date(),
|
||||||
|
}));
|
||||||
|
|
||||||
const createdPreferences = await prisma.dashboardPreferences.createMany({
|
await prisma.dashboard_preferences.createMany({
|
||||||
data: newPreferences
|
data: newPreferences,
|
||||||
});
|
});
|
||||||
|
|
||||||
res.json({
|
res.json({
|
||||||
message: 'Dashboard preferences updated successfully',
|
message: "Dashboard preferences updated successfully",
|
||||||
preferences: newPreferences
|
preferences: newPreferences,
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Dashboard preferences update error:', error);
|
console.error("Dashboard preferences update error:", error);
|
||||||
res.status(500).json({ error: 'Failed to update dashboard preferences' });
|
res.status(500).json({ error: "Failed to update dashboard preferences" });
|
||||||
}
|
}
|
||||||
});
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// Get default dashboard card configuration
|
// Get default dashboard card configuration
|
||||||
router.get('/defaults', authenticateToken, async (req, res) => {
|
router.get("/defaults", authenticateToken, async (_req, res) => {
|
||||||
try {
|
try {
|
||||||
const defaultCards = [
|
// This provides a comprehensive dashboard view for all new users
|
||||||
{ cardId: 'totalHosts', title: 'Total Hosts', icon: 'Server', enabled: true, order: 0 },
|
const defaultCards = [
|
||||||
{ cardId: 'hostsNeedingUpdates', title: 'Needs Updating', icon: 'AlertTriangle', enabled: true, order: 1 },
|
{
|
||||||
{ cardId: 'totalOutdatedPackages', title: 'Outdated Packages', icon: 'Package', enabled: true, order: 2 },
|
cardId: "totalHosts",
|
||||||
{ cardId: 'securityUpdates', title: 'Security Updates', icon: 'Shield', enabled: true, order: 3 },
|
title: "Total Hosts",
|
||||||
{ cardId: 'erroredHosts', title: 'Errored Hosts', icon: 'AlertTriangle', enabled: true, order: 4 },
|
icon: "Server",
|
||||||
{ cardId: 'osDistribution', title: 'OS Distribution', icon: 'BarChart3', enabled: true, order: 5 },
|
enabled: true,
|
||||||
{ cardId: 'updateStatus', title: 'Update Status', icon: 'BarChart3', enabled: true, order: 6 },
|
order: 0,
|
||||||
{ cardId: 'packagePriority', title: 'Package Priority', icon: 'BarChart3', enabled: true, order: 7 },
|
},
|
||||||
{ cardId: 'quickStats', title: 'Quick Stats', icon: 'TrendingUp', enabled: true, order: 8 }
|
{
|
||||||
];
|
cardId: "hostsNeedingUpdates",
|
||||||
|
title: "Needs Updating",
|
||||||
|
icon: "AlertTriangle",
|
||||||
|
enabled: true,
|
||||||
|
order: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "totalOutdatedPackages",
|
||||||
|
title: "Outdated Packages",
|
||||||
|
icon: "Package",
|
||||||
|
enabled: true,
|
||||||
|
order: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "securityUpdates",
|
||||||
|
title: "Security Updates",
|
||||||
|
icon: "Shield",
|
||||||
|
enabled: true,
|
||||||
|
order: 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "totalHostGroups",
|
||||||
|
title: "Host Groups",
|
||||||
|
icon: "Folder",
|
||||||
|
enabled: true,
|
||||||
|
order: 4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "upToDateHosts",
|
||||||
|
title: "Up to date",
|
||||||
|
icon: "CheckCircle",
|
||||||
|
enabled: true,
|
||||||
|
order: 5,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "totalRepos",
|
||||||
|
title: "Repositories",
|
||||||
|
icon: "GitBranch",
|
||||||
|
enabled: true,
|
||||||
|
order: 6,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "totalUsers",
|
||||||
|
title: "Users",
|
||||||
|
icon: "Users",
|
||||||
|
enabled: true,
|
||||||
|
order: 7,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "osDistribution",
|
||||||
|
title: "OS Distribution",
|
||||||
|
icon: "BarChart3",
|
||||||
|
enabled: true,
|
||||||
|
order: 8,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "osDistributionBar",
|
||||||
|
title: "OS Distribution (Bar)",
|
||||||
|
icon: "BarChart3",
|
||||||
|
enabled: true,
|
||||||
|
order: 9,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "osDistributionDoughnut",
|
||||||
|
title: "OS Distribution (Doughnut)",
|
||||||
|
icon: "PieChart",
|
||||||
|
enabled: true,
|
||||||
|
order: 10,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "recentCollection",
|
||||||
|
title: "Recent Collection",
|
||||||
|
icon: "Server",
|
||||||
|
enabled: true,
|
||||||
|
order: 11,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "updateStatus",
|
||||||
|
title: "Update Status",
|
||||||
|
icon: "BarChart3",
|
||||||
|
enabled: true,
|
||||||
|
order: 12,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "packagePriority",
|
||||||
|
title: "Package Priority",
|
||||||
|
icon: "BarChart3",
|
||||||
|
enabled: true,
|
||||||
|
order: 13,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "packageTrends",
|
||||||
|
title: "Package Trends",
|
||||||
|
icon: "TrendingUp",
|
||||||
|
enabled: true,
|
||||||
|
order: 14,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "recentUsers",
|
||||||
|
title: "Recent Users Logged in",
|
||||||
|
icon: "Users",
|
||||||
|
enabled: true,
|
||||||
|
order: 15,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cardId: "quickStats",
|
||||||
|
title: "Quick Stats",
|
||||||
|
icon: "TrendingUp",
|
||||||
|
enabled: true,
|
||||||
|
order: 16,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
res.json(defaultCards);
|
res.json(defaultCards);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Default dashboard cards error:', error);
|
console.error("Default dashboard cards error:", error);
|
||||||
res.status(500).json({ error: 'Failed to fetch default dashboard cards' });
|
res.status(500).json({ error: "Failed to fetch default dashboard cards" });
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
module.exports = router;
|
module.exports = { router, createDefaultDashboardPreferences };
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
1321
backend/src/routes/dockerRoutes.js
Normal file
1321
backend/src/routes/dockerRoutes.js
Normal file
File diff suppressed because it is too large
Load Diff
246
backend/src/routes/gethomepageRoutes.js
Normal file
246
backend/src/routes/gethomepageRoutes.js
Normal file
@@ -0,0 +1,246 @@
|
|||||||
|
const express = require("express");
|
||||||
|
const { getPrismaClient } = require("../config/prisma");
|
||||||
|
const bcrypt = require("bcryptjs");
|
||||||
|
|
||||||
|
const router = express.Router();
|
||||||
|
const prisma = getPrismaClient();
|
||||||
|
|
||||||
|
// Middleware to authenticate API key
|
||||||
|
const authenticateApiKey = async (req, res, next) => {
|
||||||
|
try {
|
||||||
|
const authHeader = req.headers.authorization;
|
||||||
|
|
||||||
|
if (!authHeader || !authHeader.startsWith("Basic ")) {
|
||||||
|
return res
|
||||||
|
.status(401)
|
||||||
|
.json({ error: "Missing or invalid authorization header" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode base64 credentials
|
||||||
|
const base64Credentials = authHeader.split(" ")[1];
|
||||||
|
const credentials = Buffer.from(base64Credentials, "base64").toString(
|
||||||
|
"ascii",
|
||||||
|
);
|
||||||
|
const [apiKey, apiSecret] = credentials.split(":");
|
||||||
|
|
||||||
|
if (!apiKey || !apiSecret) {
|
||||||
|
return res.status(401).json({ error: "Invalid credentials format" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the token in database
|
||||||
|
const token = await prisma.auto_enrollment_tokens.findUnique({
|
||||||
|
where: { token_key: apiKey },
|
||||||
|
include: {
|
||||||
|
users: {
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
username: true,
|
||||||
|
role: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!token) {
|
||||||
|
console.log(`API key not found: ${apiKey}`);
|
||||||
|
return res.status(401).json({ error: "Invalid API key" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if token is active
|
||||||
|
if (!token.is_active) {
|
||||||
|
return res.status(401).json({ error: "API key is disabled" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if token has expired
|
||||||
|
if (token.expires_at && new Date(token.expires_at) < new Date()) {
|
||||||
|
return res.status(401).json({ error: "API key has expired" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if token is for gethomepage integration
|
||||||
|
if (token.metadata?.integration_type !== "gethomepage") {
|
||||||
|
return res.status(401).json({ error: "Invalid API key type" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the secret
|
||||||
|
const isValidSecret = await bcrypt.compare(apiSecret, token.token_secret);
|
||||||
|
if (!isValidSecret) {
|
||||||
|
return res.status(401).json({ error: "Invalid API secret" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check IP restrictions if any
|
||||||
|
if (token.allowed_ip_ranges && token.allowed_ip_ranges.length > 0) {
|
||||||
|
const clientIp = req.ip || req.connection.remoteAddress;
|
||||||
|
const forwardedFor = req.headers["x-forwarded-for"];
|
||||||
|
const realIp = req.headers["x-real-ip"];
|
||||||
|
|
||||||
|
// Get the actual client IP (considering proxies)
|
||||||
|
const actualClientIp = forwardedFor
|
||||||
|
? forwardedFor.split(",")[0].trim()
|
||||||
|
: realIp || clientIp;
|
||||||
|
|
||||||
|
const isAllowedIp = token.allowed_ip_ranges.some((range) => {
|
||||||
|
// Simple IP range check (can be enhanced for CIDR support)
|
||||||
|
return actualClientIp.startsWith(range) || actualClientIp === range;
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!isAllowedIp) {
|
||||||
|
console.log(
|
||||||
|
`IP validation failed. Client IP: ${actualClientIp}, Allowed ranges: ${token.allowed_ip_ranges.join(", ")}`,
|
||||||
|
);
|
||||||
|
return res.status(403).json({ error: "IP address not allowed" });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update last used timestamp
|
||||||
|
await prisma.auto_enrollment_tokens.update({
|
||||||
|
where: { id: token.id },
|
||||||
|
data: { last_used_at: new Date() },
|
||||||
|
});
|
||||||
|
|
||||||
|
// Attach token info to request
|
||||||
|
req.apiToken = token;
|
||||||
|
next();
|
||||||
|
} catch (error) {
|
||||||
|
console.error("API key authentication error:", error);
|
||||||
|
res.status(500).json({ error: "Authentication failed" });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get homepage widget statistics
|
||||||
|
router.get("/stats", authenticateApiKey, async (_req, res) => {
|
||||||
|
try {
|
||||||
|
// Get total hosts count
|
||||||
|
const totalHosts = await prisma.hosts.count({
|
||||||
|
where: { status: "active" },
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get total unique packages that need updates (consistent with dashboard)
|
||||||
|
const totalOutdatedPackages = await prisma.packages.count({
|
||||||
|
where: {
|
||||||
|
host_packages: {
|
||||||
|
some: {
|
||||||
|
needs_update: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get total repositories count
|
||||||
|
const totalRepos = await prisma.repositories.count({
|
||||||
|
where: { is_active: true },
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get hosts that need updates (have outdated packages)
|
||||||
|
const hostsNeedingUpdates = await prisma.hosts.count({
|
||||||
|
where: {
|
||||||
|
status: "active",
|
||||||
|
host_packages: {
|
||||||
|
some: {
|
||||||
|
needs_update: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get security updates count (unique packages - consistent with dashboard)
|
||||||
|
const securityUpdates = await prisma.packages.count({
|
||||||
|
where: {
|
||||||
|
host_packages: {
|
||||||
|
some: {
|
||||||
|
needs_update: true,
|
||||||
|
is_security_update: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get hosts with security updates
|
||||||
|
const hostsWithSecurityUpdates = await prisma.hosts.count({
|
||||||
|
where: {
|
||||||
|
status: "active",
|
||||||
|
host_packages: {
|
||||||
|
some: {
|
||||||
|
needs_update: true,
|
||||||
|
is_security_update: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get up-to-date hosts count
|
||||||
|
const upToDateHosts = totalHosts - hostsNeedingUpdates;
|
||||||
|
|
||||||
|
// Get recent update activity (last 24 hours)
|
||||||
|
const oneDayAgo = new Date(Date.now() - 24 * 60 * 60 * 1000);
|
||||||
|
const recentUpdates = await prisma.update_history.count({
|
||||||
|
where: {
|
||||||
|
timestamp: {
|
||||||
|
gte: oneDayAgo,
|
||||||
|
},
|
||||||
|
status: "success",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get OS distribution
|
||||||
|
const osDistribution = await prisma.hosts.groupBy({
|
||||||
|
by: ["os_type"],
|
||||||
|
where: { status: "active" },
|
||||||
|
_count: {
|
||||||
|
id: true,
|
||||||
|
},
|
||||||
|
orderBy: {
|
||||||
|
_count: {
|
||||||
|
id: "desc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Format OS distribution data
|
||||||
|
const osDistributionFormatted = osDistribution.map((os) => ({
|
||||||
|
name: os.os_type,
|
||||||
|
count: os._count.id,
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Extract top 3 OS types for flat display in widgets
|
||||||
|
const top_os_1 = osDistributionFormatted[0] || { name: "None", count: 0 };
|
||||||
|
const top_os_2 = osDistributionFormatted[1] || { name: "None", count: 0 };
|
||||||
|
const top_os_3 = osDistributionFormatted[2] || { name: "None", count: 0 };
|
||||||
|
|
||||||
|
// Prepare response data
|
||||||
|
const stats = {
|
||||||
|
total_hosts: totalHosts,
|
||||||
|
total_outdated_packages: totalOutdatedPackages,
|
||||||
|
total_repos: totalRepos,
|
||||||
|
hosts_needing_updates: hostsNeedingUpdates,
|
||||||
|
up_to_date_hosts: upToDateHosts,
|
||||||
|
security_updates: securityUpdates,
|
||||||
|
hosts_with_security_updates: hostsWithSecurityUpdates,
|
||||||
|
recent_updates_24h: recentUpdates,
|
||||||
|
os_distribution: osDistributionFormatted,
|
||||||
|
// Flattened OS data for easy widget display
|
||||||
|
top_os_1_name: top_os_1.name,
|
||||||
|
top_os_1_count: top_os_1.count,
|
||||||
|
top_os_2_name: top_os_2.name,
|
||||||
|
top_os_2_count: top_os_2.count,
|
||||||
|
top_os_3_name: top_os_3.name,
|
||||||
|
top_os_3_count: top_os_3.count,
|
||||||
|
last_updated: new Date().toISOString(),
|
||||||
|
};
|
||||||
|
|
||||||
|
res.json(stats);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error fetching homepage stats:", error);
|
||||||
|
res.status(500).json({ error: "Failed to fetch statistics" });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Health check endpoint for the API
|
||||||
|
router.get("/health", authenticateApiKey, async (req, res) => {
|
||||||
|
res.json({
|
||||||
|
status: "ok",
|
||||||
|
timestamp: new Date().toISOString(),
|
||||||
|
api_key: req.apiToken.token_name,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
module.exports = router;
|
||||||
@@ -1,225 +1,275 @@
|
|||||||
const express = require('express');
|
const express = require("express");
|
||||||
const { body, validationResult } = require('express-validator');
|
const { body, validationResult } = require("express-validator");
|
||||||
const { PrismaClient } = require('@prisma/client');
|
const { getPrismaClient } = require("../config/prisma");
|
||||||
const { authenticateToken } = require('../middleware/auth');
|
const { randomUUID } = require("node:crypto");
|
||||||
const { requireManageHosts } = require('../middleware/permissions');
|
const { authenticateToken } = require("../middleware/auth");
|
||||||
|
const { requireManageHosts } = require("../middleware/permissions");
|
||||||
|
|
||||||
const router = express.Router();
|
const router = express.Router();
|
||||||
const prisma = new PrismaClient();
|
const prisma = getPrismaClient();
|
||||||
|
|
||||||
// Get all host groups
|
// Get all host groups
|
||||||
router.get('/', authenticateToken, async (req, res) => {
|
router.get("/", authenticateToken, async (_req, res) => {
|
||||||
try {
|
try {
|
||||||
const hostGroups = await prisma.hostGroup.findMany({
|
const hostGroups = await prisma.host_groups.findMany({
|
||||||
include: {
|
include: {
|
||||||
_count: {
|
_count: {
|
||||||
select: {
|
select: {
|
||||||
hosts: true
|
host_group_memberships: true,
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
orderBy: {
|
orderBy: {
|
||||||
name: 'asc'
|
name: "asc",
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
res.json(hostGroups);
|
// Transform the count field to match frontend expectations
|
||||||
} catch (error) {
|
const transformedGroups = hostGroups.map((group) => ({
|
||||||
console.error('Error fetching host groups:', error);
|
...group,
|
||||||
res.status(500).json({ error: 'Failed to fetch host groups' });
|
_count: {
|
||||||
}
|
hosts: group._count.host_group_memberships,
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
res.json(transformedGroups);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error fetching host groups:", error);
|
||||||
|
res.status(500).json({ error: "Failed to fetch host groups" });
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// Get a specific host group by ID
|
// Get a specific host group by ID
|
||||||
router.get('/:id', authenticateToken, async (req, res) => {
|
router.get("/:id", authenticateToken, async (req, res) => {
|
||||||
try {
|
try {
|
||||||
const { id } = req.params;
|
const { id } = req.params;
|
||||||
|
|
||||||
const hostGroup = await prisma.hostGroup.findUnique({
|
const hostGroup = await prisma.host_groups.findUnique({
|
||||||
where: { id },
|
where: { id },
|
||||||
include: {
|
include: {
|
||||||
hosts: {
|
host_group_memberships: {
|
||||||
select: {
|
include: {
|
||||||
id: true,
|
hosts: {
|
||||||
hostname: true,
|
select: {
|
||||||
ip: true,
|
id: true,
|
||||||
osType: true,
|
friendly_name: true,
|
||||||
osVersion: true,
|
hostname: true,
|
||||||
status: true,
|
ip: true,
|
||||||
lastUpdate: true
|
os_type: true,
|
||||||
}
|
os_version: true,
|
||||||
}
|
status: true,
|
||||||
}
|
last_update: true,
|
||||||
});
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
if (!hostGroup) {
|
if (!hostGroup) {
|
||||||
return res.status(404).json({ error: 'Host group not found' });
|
return res.status(404).json({ error: "Host group not found" });
|
||||||
}
|
}
|
||||||
|
|
||||||
res.json(hostGroup);
|
res.json(hostGroup);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error fetching host group:', error);
|
console.error("Error fetching host group:", error);
|
||||||
res.status(500).json({ error: 'Failed to fetch host group' });
|
res.status(500).json({ error: "Failed to fetch host group" });
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// Create a new host group
|
// Create a new host group
|
||||||
router.post('/', authenticateToken, requireManageHosts, [
|
router.post(
|
||||||
body('name').trim().isLength({ min: 1 }).withMessage('Name is required'),
|
"/",
|
||||||
body('description').optional().trim(),
|
authenticateToken,
|
||||||
body('color').optional().isHexColor().withMessage('Color must be a valid hex color')
|
requireManageHosts,
|
||||||
], async (req, res) => {
|
[
|
||||||
try {
|
body("name").trim().isLength({ min: 1 }).withMessage("Name is required"),
|
||||||
const errors = validationResult(req);
|
body("description").optional().trim(),
|
||||||
if (!errors.isEmpty()) {
|
body("color")
|
||||||
return res.status(400).json({ errors: errors.array() });
|
.optional()
|
||||||
}
|
.isHexColor()
|
||||||
|
.withMessage("Color must be a valid hex color"),
|
||||||
|
],
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const errors = validationResult(req);
|
||||||
|
if (!errors.isEmpty()) {
|
||||||
|
return res.status(400).json({ errors: errors.array() });
|
||||||
|
}
|
||||||
|
|
||||||
const { name, description, color } = req.body;
|
const { name, description, color } = req.body;
|
||||||
|
|
||||||
// Check if host group with this name already exists
|
// Check if host group with this name already exists
|
||||||
const existingGroup = await prisma.hostGroup.findUnique({
|
const existingGroup = await prisma.host_groups.findUnique({
|
||||||
where: { name }
|
where: { name },
|
||||||
});
|
});
|
||||||
|
|
||||||
if (existingGroup) {
|
if (existingGroup) {
|
||||||
return res.status(400).json({ error: 'A host group with this name already exists' });
|
return res
|
||||||
}
|
.status(400)
|
||||||
|
.json({ error: "A host group with this name already exists" });
|
||||||
|
}
|
||||||
|
|
||||||
const hostGroup = await prisma.hostGroup.create({
|
const hostGroup = await prisma.host_groups.create({
|
||||||
data: {
|
data: {
|
||||||
name,
|
id: randomUUID(),
|
||||||
description: description || null,
|
name,
|
||||||
color: color || '#3B82F6'
|
description: description || null,
|
||||||
}
|
color: color || "#3B82F6",
|
||||||
});
|
updated_at: new Date(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
res.status(201).json(hostGroup);
|
res.status(201).json(hostGroup);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error creating host group:', error);
|
console.error("Error creating host group:", error);
|
||||||
res.status(500).json({ error: 'Failed to create host group' });
|
res.status(500).json({ error: "Failed to create host group" });
|
||||||
}
|
}
|
||||||
});
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// Update a host group
|
// Update a host group
|
||||||
router.put('/:id', authenticateToken, requireManageHosts, [
|
router.put(
|
||||||
body('name').trim().isLength({ min: 1 }).withMessage('Name is required'),
|
"/:id",
|
||||||
body('description').optional().trim(),
|
authenticateToken,
|
||||||
body('color').optional().isHexColor().withMessage('Color must be a valid hex color')
|
requireManageHosts,
|
||||||
], async (req, res) => {
|
[
|
||||||
try {
|
body("name").trim().isLength({ min: 1 }).withMessage("Name is required"),
|
||||||
const errors = validationResult(req);
|
body("description").optional().trim(),
|
||||||
if (!errors.isEmpty()) {
|
body("color")
|
||||||
return res.status(400).json({ errors: errors.array() });
|
.optional()
|
||||||
}
|
.isHexColor()
|
||||||
|
.withMessage("Color must be a valid hex color"),
|
||||||
|
],
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const errors = validationResult(req);
|
||||||
|
if (!errors.isEmpty()) {
|
||||||
|
return res.status(400).json({ errors: errors.array() });
|
||||||
|
}
|
||||||
|
|
||||||
const { id } = req.params;
|
const { id } = req.params;
|
||||||
const { name, description, color } = req.body;
|
const { name, description, color } = req.body;
|
||||||
|
|
||||||
// Check if host group exists
|
// Check if host group exists
|
||||||
const existingGroup = await prisma.hostGroup.findUnique({
|
const existingGroup = await prisma.host_groups.findUnique({
|
||||||
where: { id }
|
where: { id },
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!existingGroup) {
|
if (!existingGroup) {
|
||||||
return res.status(404).json({ error: 'Host group not found' });
|
return res.status(404).json({ error: "Host group not found" });
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if another host group with this name already exists
|
// Check if another host group with this name already exists
|
||||||
const duplicateGroup = await prisma.hostGroup.findFirst({
|
const duplicateGroup = await prisma.host_groups.findFirst({
|
||||||
where: {
|
where: {
|
||||||
name,
|
name,
|
||||||
id: { not: id }
|
id: { not: id },
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
if (duplicateGroup) {
|
if (duplicateGroup) {
|
||||||
return res.status(400).json({ error: 'A host group with this name already exists' });
|
return res
|
||||||
}
|
.status(400)
|
||||||
|
.json({ error: "A host group with this name already exists" });
|
||||||
|
}
|
||||||
|
|
||||||
const hostGroup = await prisma.hostGroup.update({
|
const hostGroup = await prisma.host_groups.update({
|
||||||
where: { id },
|
where: { id },
|
||||||
data: {
|
data: {
|
||||||
name,
|
name,
|
||||||
description: description || null,
|
description: description || null,
|
||||||
color: color || '#3B82F6'
|
color: color || "#3B82F6",
|
||||||
}
|
updated_at: new Date(),
|
||||||
});
|
},
|
||||||
|
});
|
||||||
|
|
||||||
res.json(hostGroup);
|
res.json(hostGroup);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error updating host group:', error);
|
console.error("Error updating host group:", error);
|
||||||
res.status(500).json({ error: 'Failed to update host group' });
|
res.status(500).json({ error: "Failed to update host group" });
|
||||||
}
|
}
|
||||||
});
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// Delete a host group
|
// Delete a host group
|
||||||
router.delete('/:id', authenticateToken, requireManageHosts, async (req, res) => {
|
router.delete(
|
||||||
try {
|
"/:id",
|
||||||
const { id } = req.params;
|
authenticateToken,
|
||||||
|
requireManageHosts,
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { id } = req.params;
|
||||||
|
|
||||||
// Check if host group exists
|
// Check if host group exists
|
||||||
const existingGroup = await prisma.hostGroup.findUnique({
|
const existingGroup = await prisma.host_groups.findUnique({
|
||||||
where: { id },
|
where: { id },
|
||||||
include: {
|
include: {
|
||||||
_count: {
|
_count: {
|
||||||
select: {
|
select: {
|
||||||
hosts: true
|
host_group_memberships: true,
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!existingGroup) {
|
if (!existingGroup) {
|
||||||
return res.status(404).json({ error: 'Host group not found' });
|
return res.status(404).json({ error: "Host group not found" });
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if host group has hosts
|
// If host group has memberships, remove them first
|
||||||
if (existingGroup._count.hosts > 0) {
|
if (existingGroup._count.host_group_memberships > 0) {
|
||||||
return res.status(400).json({
|
await prisma.host_group_memberships.deleteMany({
|
||||||
error: 'Cannot delete host group that contains hosts. Please move or remove hosts first.'
|
where: { host_group_id: id },
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
await prisma.hostGroup.delete({
|
await prisma.host_groups.delete({
|
||||||
where: { id }
|
where: { id },
|
||||||
});
|
});
|
||||||
|
|
||||||
res.json({ message: 'Host group deleted successfully' });
|
res.json({ message: "Host group deleted successfully" });
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error deleting host group:', error);
|
console.error("Error deleting host group:", error);
|
||||||
res.status(500).json({ error: 'Failed to delete host group' });
|
res.status(500).json({ error: "Failed to delete host group" });
|
||||||
}
|
}
|
||||||
});
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// Get hosts in a specific group
|
// Get hosts in a specific group
|
||||||
router.get('/:id/hosts', authenticateToken, async (req, res) => {
|
router.get("/:id/hosts", authenticateToken, async (req, res) => {
|
||||||
try {
|
try {
|
||||||
const { id } = req.params;
|
const { id } = req.params;
|
||||||
|
|
||||||
const hosts = await prisma.host.findMany({
|
const hosts = await prisma.hosts.findMany({
|
||||||
where: { hostGroupId: id },
|
where: {
|
||||||
select: {
|
host_group_memberships: {
|
||||||
id: true,
|
some: {
|
||||||
hostname: true,
|
host_group_id: id,
|
||||||
ip: true,
|
},
|
||||||
osType: true,
|
},
|
||||||
osVersion: true,
|
},
|
||||||
architecture: true,
|
select: {
|
||||||
status: true,
|
id: true,
|
||||||
lastUpdate: true,
|
friendly_name: true,
|
||||||
createdAt: true
|
ip: true,
|
||||||
},
|
os_type: true,
|
||||||
orderBy: {
|
os_version: true,
|
||||||
hostname: 'asc'
|
architecture: true,
|
||||||
}
|
status: true,
|
||||||
});
|
last_update: true,
|
||||||
|
created_at: true,
|
||||||
|
},
|
||||||
|
orderBy: {
|
||||||
|
friendly_name: "asc",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
res.json(hosts);
|
res.json(hosts);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error fetching hosts in group:', error);
|
console.error("Error fetching hosts in group:", error);
|
||||||
res.status(500).json({ error: 'Failed to fetch hosts in group' });
|
res.status(500).json({ error: "Failed to fetch hosts in group" });
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
module.exports = router;
|
module.exports = router;
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
356
backend/src/routes/integrationRoutes.js
Normal file
356
backend/src/routes/integrationRoutes.js
Normal file
@@ -0,0 +1,356 @@
|
|||||||
|
const express = require("express");
|
||||||
|
const { getPrismaClient } = require("../config/prisma");
|
||||||
|
const { v4: uuidv4 } = require("uuid");
|
||||||
|
|
||||||
|
const prisma = getPrismaClient();
|
||||||
|
const router = express.Router();
|
||||||
|
|
||||||
|
// POST /api/v1/integrations/docker - Docker data collection endpoint
|
||||||
|
router.post("/docker", async (req, res) => {
|
||||||
|
try {
|
||||||
|
const apiId = req.headers["x-api-id"];
|
||||||
|
const apiKey = req.headers["x-api-key"];
|
||||||
|
const {
|
||||||
|
containers,
|
||||||
|
images,
|
||||||
|
volumes,
|
||||||
|
networks,
|
||||||
|
updates,
|
||||||
|
daemon_info: _daemon_info,
|
||||||
|
hostname,
|
||||||
|
machine_id,
|
||||||
|
agent_version: _agent_version,
|
||||||
|
} = req.body;
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`[Docker Integration] Received data from ${hostname || machine_id}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Validate API credentials
|
||||||
|
const host = await prisma.hosts.findFirst({
|
||||||
|
where: { api_id: apiId, api_key: apiKey },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!host) {
|
||||||
|
console.warn("[Docker Integration] Invalid API credentials");
|
||||||
|
return res.status(401).json({ error: "Invalid API credentials" });
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`[Docker Integration] Processing for host: ${host.friendly_name}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
const now = new Date();
|
||||||
|
|
||||||
|
// Helper function to validate and parse dates
|
||||||
|
const parseDate = (dateString) => {
|
||||||
|
if (!dateString) return now;
|
||||||
|
const date = new Date(dateString);
|
||||||
|
return Number.isNaN(date.getTime()) ? now : date;
|
||||||
|
};
|
||||||
|
|
||||||
|
let containersProcessed = 0;
|
||||||
|
let imagesProcessed = 0;
|
||||||
|
let volumesProcessed = 0;
|
||||||
|
let networksProcessed = 0;
|
||||||
|
let updatesProcessed = 0;
|
||||||
|
|
||||||
|
// Process containers
|
||||||
|
if (containers && Array.isArray(containers)) {
|
||||||
|
console.log(
|
||||||
|
`[Docker Integration] Processing ${containers.length} containers`,
|
||||||
|
);
|
||||||
|
for (const containerData of containers) {
|
||||||
|
const containerId = uuidv4();
|
||||||
|
|
||||||
|
// Find or create image
|
||||||
|
let imageId = null;
|
||||||
|
if (containerData.image_repository && containerData.image_tag) {
|
||||||
|
const image = await prisma.docker_images.upsert({
|
||||||
|
where: {
|
||||||
|
repository_tag_image_id: {
|
||||||
|
repository: containerData.image_repository,
|
||||||
|
tag: containerData.image_tag,
|
||||||
|
image_id: containerData.image_id || "unknown",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
update: {
|
||||||
|
last_checked: now,
|
||||||
|
updated_at: now,
|
||||||
|
},
|
||||||
|
create: {
|
||||||
|
id: uuidv4(),
|
||||||
|
repository: containerData.image_repository,
|
||||||
|
tag: containerData.image_tag,
|
||||||
|
image_id: containerData.image_id || "unknown",
|
||||||
|
source: containerData.image_source || "docker-hub",
|
||||||
|
created_at: parseDate(containerData.created_at),
|
||||||
|
updated_at: now,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
imageId = image.id;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upsert container
|
||||||
|
await prisma.docker_containers.upsert({
|
||||||
|
where: {
|
||||||
|
host_id_container_id: {
|
||||||
|
host_id: host.id,
|
||||||
|
container_id: containerData.container_id,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
update: {
|
||||||
|
name: containerData.name,
|
||||||
|
image_id: imageId,
|
||||||
|
image_name: containerData.image_name,
|
||||||
|
image_tag: containerData.image_tag || "latest",
|
||||||
|
status: containerData.status,
|
||||||
|
state: containerData.state || containerData.status,
|
||||||
|
ports: containerData.ports || null,
|
||||||
|
started_at: containerData.started_at
|
||||||
|
? parseDate(containerData.started_at)
|
||||||
|
: null,
|
||||||
|
updated_at: now,
|
||||||
|
last_checked: now,
|
||||||
|
},
|
||||||
|
create: {
|
||||||
|
id: containerId,
|
||||||
|
host_id: host.id,
|
||||||
|
container_id: containerData.container_id,
|
||||||
|
name: containerData.name,
|
||||||
|
image_id: imageId,
|
||||||
|
image_name: containerData.image_name,
|
||||||
|
image_tag: containerData.image_tag || "latest",
|
||||||
|
status: containerData.status,
|
||||||
|
state: containerData.state || containerData.status,
|
||||||
|
ports: containerData.ports || null,
|
||||||
|
created_at: parseDate(containerData.created_at),
|
||||||
|
started_at: containerData.started_at
|
||||||
|
? parseDate(containerData.started_at)
|
||||||
|
: null,
|
||||||
|
updated_at: now,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
containersProcessed++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process standalone images
|
||||||
|
if (images && Array.isArray(images)) {
|
||||||
|
console.log(`[Docker Integration] Processing ${images.length} images`);
|
||||||
|
for (const imageData of images) {
|
||||||
|
await prisma.docker_images.upsert({
|
||||||
|
where: {
|
||||||
|
repository_tag_image_id: {
|
||||||
|
repository: imageData.repository,
|
||||||
|
tag: imageData.tag,
|
||||||
|
image_id: imageData.image_id,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
update: {
|
||||||
|
size_bytes: imageData.size_bytes
|
||||||
|
? BigInt(imageData.size_bytes)
|
||||||
|
: null,
|
||||||
|
digest: imageData.digest || null,
|
||||||
|
last_checked: now,
|
||||||
|
updated_at: now,
|
||||||
|
},
|
||||||
|
create: {
|
||||||
|
id: uuidv4(),
|
||||||
|
repository: imageData.repository,
|
||||||
|
tag: imageData.tag,
|
||||||
|
image_id: imageData.image_id,
|
||||||
|
digest: imageData.digest,
|
||||||
|
size_bytes: imageData.size_bytes
|
||||||
|
? BigInt(imageData.size_bytes)
|
||||||
|
: null,
|
||||||
|
source: imageData.source || "docker-hub",
|
||||||
|
created_at: parseDate(imageData.created_at),
|
||||||
|
updated_at: now,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
imagesProcessed++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process volumes
|
||||||
|
if (volumes && Array.isArray(volumes)) {
|
||||||
|
console.log(`[Docker Integration] Processing ${volumes.length} volumes`);
|
||||||
|
for (const volumeData of volumes) {
|
||||||
|
await prisma.docker_volumes.upsert({
|
||||||
|
where: {
|
||||||
|
host_id_volume_id: {
|
||||||
|
host_id: host.id,
|
||||||
|
volume_id: volumeData.volume_id,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
update: {
|
||||||
|
name: volumeData.name,
|
||||||
|
driver: volumeData.driver || "local",
|
||||||
|
mountpoint: volumeData.mountpoint || null,
|
||||||
|
renderer: volumeData.renderer || null,
|
||||||
|
scope: volumeData.scope || "local",
|
||||||
|
labels: volumeData.labels || null,
|
||||||
|
options: volumeData.options || null,
|
||||||
|
size_bytes: volumeData.size_bytes
|
||||||
|
? BigInt(volumeData.size_bytes)
|
||||||
|
: null,
|
||||||
|
ref_count: volumeData.ref_count || 0,
|
||||||
|
updated_at: now,
|
||||||
|
last_checked: now,
|
||||||
|
},
|
||||||
|
create: {
|
||||||
|
id: uuidv4(),
|
||||||
|
host_id: host.id,
|
||||||
|
volume_id: volumeData.volume_id,
|
||||||
|
name: volumeData.name,
|
||||||
|
driver: volumeData.driver || "local",
|
||||||
|
mountpoint: volumeData.mountpoint || null,
|
||||||
|
renderer: volumeData.renderer || null,
|
||||||
|
scope: volumeData.scope || "local",
|
||||||
|
labels: volumeData.labels || null,
|
||||||
|
options: volumeData.options || null,
|
||||||
|
size_bytes: volumeData.size_bytes
|
||||||
|
? BigInt(volumeData.size_bytes)
|
||||||
|
: null,
|
||||||
|
ref_count: volumeData.ref_count || 0,
|
||||||
|
created_at: parseDate(volumeData.created_at),
|
||||||
|
updated_at: now,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
volumesProcessed++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process networks
|
||||||
|
if (networks && Array.isArray(networks)) {
|
||||||
|
console.log(
|
||||||
|
`[Docker Integration] Processing ${networks.length} networks`,
|
||||||
|
);
|
||||||
|
for (const networkData of networks) {
|
||||||
|
await prisma.docker_networks.upsert({
|
||||||
|
where: {
|
||||||
|
host_id_network_id: {
|
||||||
|
host_id: host.id,
|
||||||
|
network_id: networkData.network_id,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
update: {
|
||||||
|
name: networkData.name,
|
||||||
|
driver: networkData.driver,
|
||||||
|
scope: networkData.scope || "local",
|
||||||
|
ipv6_enabled: networkData.ipv6_enabled || false,
|
||||||
|
internal: networkData.internal || false,
|
||||||
|
attachable:
|
||||||
|
networkData.attachable !== undefined
|
||||||
|
? networkData.attachable
|
||||||
|
: true,
|
||||||
|
ingress: networkData.ingress || false,
|
||||||
|
config_only: networkData.config_only || false,
|
||||||
|
labels: networkData.labels || null,
|
||||||
|
ipam: networkData.ipam || null,
|
||||||
|
container_count: networkData.container_count || 0,
|
||||||
|
updated_at: now,
|
||||||
|
last_checked: now,
|
||||||
|
},
|
||||||
|
create: {
|
||||||
|
id: uuidv4(),
|
||||||
|
host_id: host.id,
|
||||||
|
network_id: networkData.network_id,
|
||||||
|
name: networkData.name,
|
||||||
|
driver: networkData.driver,
|
||||||
|
scope: networkData.scope || "local",
|
||||||
|
ipv6_enabled: networkData.ipv6_enabled || false,
|
||||||
|
internal: networkData.internal || false,
|
||||||
|
attachable:
|
||||||
|
networkData.attachable !== undefined
|
||||||
|
? networkData.attachable
|
||||||
|
: true,
|
||||||
|
ingress: networkData.ingress || false,
|
||||||
|
config_only: networkData.config_only || false,
|
||||||
|
labels: networkData.labels || null,
|
||||||
|
ipam: networkData.ipam || null,
|
||||||
|
container_count: networkData.container_count || 0,
|
||||||
|
created_at: networkData.created_at
|
||||||
|
? parseDate(networkData.created_at)
|
||||||
|
: null,
|
||||||
|
updated_at: now,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
networksProcessed++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process updates
|
||||||
|
if (updates && Array.isArray(updates)) {
|
||||||
|
console.log(`[Docker Integration] Processing ${updates.length} updates`);
|
||||||
|
for (const updateData of updates) {
|
||||||
|
// Find the image by repository and image_id
|
||||||
|
const image = await prisma.docker_images.findFirst({
|
||||||
|
where: {
|
||||||
|
repository: updateData.repository,
|
||||||
|
tag: updateData.current_tag,
|
||||||
|
image_id: updateData.image_id,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
if (image) {
|
||||||
|
// Store digest info in changelog_url field as JSON
|
||||||
|
const digestInfo = JSON.stringify({
|
||||||
|
method: "digest_comparison",
|
||||||
|
current_digest: updateData.current_digest,
|
||||||
|
available_digest: updateData.available_digest,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Upsert the update record
|
||||||
|
await prisma.docker_image_updates.upsert({
|
||||||
|
where: {
|
||||||
|
image_id_available_tag: {
|
||||||
|
image_id: image.id,
|
||||||
|
available_tag: updateData.available_tag,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
update: {
|
||||||
|
updated_at: now,
|
||||||
|
changelog_url: digestInfo,
|
||||||
|
severity: "digest_changed",
|
||||||
|
},
|
||||||
|
create: {
|
||||||
|
id: uuidv4(),
|
||||||
|
image_id: image.id,
|
||||||
|
current_tag: updateData.current_tag,
|
||||||
|
available_tag: updateData.available_tag,
|
||||||
|
severity: "digest_changed",
|
||||||
|
changelog_url: digestInfo,
|
||||||
|
updated_at: now,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
updatesProcessed++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`[Docker Integration] Successfully processed: ${containersProcessed} containers, ${imagesProcessed} images, ${volumesProcessed} volumes, ${networksProcessed} networks, ${updatesProcessed} updates`,
|
||||||
|
);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
message: "Docker data collected successfully",
|
||||||
|
containers_received: containersProcessed,
|
||||||
|
images_received: imagesProcessed,
|
||||||
|
volumes_received: volumesProcessed,
|
||||||
|
networks_received: networksProcessed,
|
||||||
|
updates_found: updatesProcessed,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("[Docker Integration] Error collecting Docker data:", error);
|
||||||
|
console.error("[Docker Integration] Error stack:", error.stack);
|
||||||
|
res.status(500).json({
|
||||||
|
error: "Failed to collect Docker data",
|
||||||
|
message: error.message,
|
||||||
|
details: process.env.NODE_ENV === "development" ? error.stack : undefined,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
module.exports = router;
|
||||||
148
backend/src/routes/metricsRoutes.js
Normal file
148
backend/src/routes/metricsRoutes.js
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
const express = require("express");
|
||||||
|
const { body, validationResult } = require("express-validator");
|
||||||
|
const { v4: uuidv4 } = require("uuid");
|
||||||
|
const { authenticateToken } = require("../middleware/auth");
|
||||||
|
const { requireManageSettings } = require("../middleware/permissions");
|
||||||
|
const { getSettings, updateSettings } = require("../services/settingsService");
|
||||||
|
const { queueManager, QUEUE_NAMES } = require("../services/automation");
|
||||||
|
|
||||||
|
const router = express.Router();
|
||||||
|
|
||||||
|
// Get metrics settings
|
||||||
|
router.get("/", authenticateToken, requireManageSettings, async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const settings = await getSettings();
|
||||||
|
|
||||||
|
// Generate anonymous ID if it doesn't exist
|
||||||
|
if (!settings.metrics_anonymous_id) {
|
||||||
|
const anonymousId = uuidv4();
|
||||||
|
await updateSettings(settings.id, {
|
||||||
|
metrics_anonymous_id: anonymousId,
|
||||||
|
});
|
||||||
|
settings.metrics_anonymous_id = anonymousId;
|
||||||
|
}
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
metrics_enabled: settings.metrics_enabled ?? true,
|
||||||
|
metrics_anonymous_id: settings.metrics_anonymous_id,
|
||||||
|
metrics_last_sent: settings.metrics_last_sent,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Metrics settings fetch error:", error);
|
||||||
|
res.status(500).json({ error: "Failed to fetch metrics settings" });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Update metrics settings
|
||||||
|
router.put(
|
||||||
|
"/",
|
||||||
|
authenticateToken,
|
||||||
|
requireManageSettings,
|
||||||
|
[
|
||||||
|
body("metrics_enabled")
|
||||||
|
.isBoolean()
|
||||||
|
.withMessage("Metrics enabled must be a boolean"),
|
||||||
|
],
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const errors = validationResult(req);
|
||||||
|
if (!errors.isEmpty()) {
|
||||||
|
return res.status(400).json({ errors: errors.array() });
|
||||||
|
}
|
||||||
|
|
||||||
|
const { metrics_enabled } = req.body;
|
||||||
|
const settings = await getSettings();
|
||||||
|
|
||||||
|
await updateSettings(settings.id, {
|
||||||
|
metrics_enabled,
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`Metrics ${metrics_enabled ? "enabled" : "disabled"} by user`,
|
||||||
|
);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
message: "Metrics settings updated successfully",
|
||||||
|
metrics_enabled,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Metrics settings update error:", error);
|
||||||
|
res.status(500).json({ error: "Failed to update metrics settings" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Regenerate anonymous ID
|
||||||
|
router.post(
|
||||||
|
"/regenerate-id",
|
||||||
|
authenticateToken,
|
||||||
|
requireManageSettings,
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const settings = await getSettings();
|
||||||
|
const newAnonymousId = uuidv4();
|
||||||
|
|
||||||
|
await updateSettings(settings.id, {
|
||||||
|
metrics_anonymous_id: newAnonymousId,
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log("Anonymous ID regenerated");
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
message: "Anonymous ID regenerated successfully",
|
||||||
|
metrics_anonymous_id: newAnonymousId,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Anonymous ID regeneration error:", error);
|
||||||
|
res.status(500).json({ error: "Failed to regenerate anonymous ID" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Manually send metrics now
|
||||||
|
router.post(
|
||||||
|
"/send-now",
|
||||||
|
authenticateToken,
|
||||||
|
requireManageSettings,
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const settings = await getSettings();
|
||||||
|
|
||||||
|
if (!settings.metrics_enabled) {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: "Metrics are disabled. Please enable metrics first.",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trigger metrics directly (no queue delay for manual trigger)
|
||||||
|
const metricsReporting =
|
||||||
|
queueManager.automations[QUEUE_NAMES.METRICS_REPORTING];
|
||||||
|
const result = await metricsReporting.process(
|
||||||
|
{ name: "manual-send" },
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
|
||||||
|
if (result.success) {
|
||||||
|
console.log("✅ Manual metrics sent successfully");
|
||||||
|
res.json({
|
||||||
|
message: "Metrics sent successfully",
|
||||||
|
data: result,
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
console.error("❌ Failed to send metrics:", result);
|
||||||
|
res.status(500).json({
|
||||||
|
error: "Failed to send metrics",
|
||||||
|
details: result.reason || result.error,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Send metrics error:", error);
|
||||||
|
res.status(500).json({
|
||||||
|
error: "Failed to send metrics",
|
||||||
|
details: error.message,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
module.exports = router;
|
||||||
@@ -1,213 +1,406 @@
|
|||||||
const express = require('express');
|
const express = require("express");
|
||||||
const { PrismaClient } = require('@prisma/client');
|
const { getPrismaClient } = require("../config/prisma");
|
||||||
const { body, validationResult } = require('express-validator');
|
|
||||||
|
|
||||||
const router = express.Router();
|
const router = express.Router();
|
||||||
const prisma = new PrismaClient();
|
const prisma = getPrismaClient();
|
||||||
|
|
||||||
// Get all packages with their update status
|
// Get all packages with their update status
|
||||||
router.get('/', async (req, res) => {
|
router.get("/", async (req, res) => {
|
||||||
try {
|
try {
|
||||||
const {
|
const {
|
||||||
page = 1,
|
page = 1,
|
||||||
limit = 50,
|
limit = 50,
|
||||||
search = '',
|
search = "",
|
||||||
category = '',
|
category = "",
|
||||||
needsUpdate = '',
|
needsUpdate = "",
|
||||||
isSecurityUpdate = ''
|
isSecurityUpdate = "",
|
||||||
} = req.query;
|
host = "",
|
||||||
|
} = req.query;
|
||||||
|
|
||||||
const skip = (parseInt(page) - 1) * parseInt(limit);
|
const skip = (parseInt(page, 10) - 1) * parseInt(limit, 10);
|
||||||
const take = parseInt(limit);
|
const take = parseInt(limit, 10);
|
||||||
|
|
||||||
// Build where clause
|
// Build where clause
|
||||||
const where = {
|
const where = {
|
||||||
AND: [
|
AND: [
|
||||||
// Search filter
|
// Search filter
|
||||||
search ? {
|
search
|
||||||
OR: [
|
? {
|
||||||
{ name: { contains: search, mode: 'insensitive' } },
|
OR: [
|
||||||
{ description: { contains: search, mode: 'insensitive' } }
|
{ name: { contains: search, mode: "insensitive" } },
|
||||||
]
|
{ description: { contains: search, mode: "insensitive" } },
|
||||||
} : {},
|
],
|
||||||
// Category filter
|
}
|
||||||
category ? { category: { equals: category } } : {},
|
: {},
|
||||||
// Update status filters
|
// Category filter
|
||||||
needsUpdate ? {
|
category ? { category: { equals: category } } : {},
|
||||||
hostPackages: {
|
// Host filter - only return packages installed on the specified host
|
||||||
some: {
|
// Combined with update status filters if both are present
|
||||||
needsUpdate: needsUpdate === 'true'
|
host
|
||||||
}
|
? {
|
||||||
}
|
host_packages: {
|
||||||
} : {},
|
some: {
|
||||||
isSecurityUpdate ? {
|
host_id: host,
|
||||||
hostPackages: {
|
// If needsUpdate or isSecurityUpdate filters are present, apply them here
|
||||||
some: {
|
...(needsUpdate
|
||||||
isSecurityUpdate: isSecurityUpdate === 'true'
|
? { needs_update: needsUpdate === "true" }
|
||||||
}
|
: {}),
|
||||||
}
|
...(isSecurityUpdate
|
||||||
} : {}
|
? { is_security_update: isSecurityUpdate === "true" }
|
||||||
]
|
: {}),
|
||||||
};
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
: {},
|
||||||
|
// Update status filters (only applied if no host filter)
|
||||||
|
// If host filter is present, these are already applied above
|
||||||
|
!host && needsUpdate
|
||||||
|
? {
|
||||||
|
host_packages: {
|
||||||
|
some: {
|
||||||
|
needs_update: needsUpdate === "true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
: {},
|
||||||
|
!host && isSecurityUpdate
|
||||||
|
? {
|
||||||
|
host_packages: {
|
||||||
|
some: {
|
||||||
|
is_security_update: isSecurityUpdate === "true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
: {},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
// Get packages with counts
|
// Get packages with counts
|
||||||
const [packages, totalCount] = await Promise.all([
|
const [packages, totalCount] = await Promise.all([
|
||||||
prisma.package.findMany({
|
prisma.packages.findMany({
|
||||||
where,
|
where,
|
||||||
select: {
|
select: {
|
||||||
id: true,
|
id: true,
|
||||||
name: true,
|
name: true,
|
||||||
description: true,
|
description: true,
|
||||||
category: true,
|
category: true,
|
||||||
latestVersion: true,
|
latest_version: true,
|
||||||
createdAt: true,
|
created_at: true,
|
||||||
_count: {
|
_count: {
|
||||||
hostPackages: true
|
select: {
|
||||||
}
|
host_packages: true,
|
||||||
},
|
},
|
||||||
skip,
|
},
|
||||||
take,
|
},
|
||||||
orderBy: {
|
skip,
|
||||||
name: 'asc'
|
take,
|
||||||
}
|
orderBy: {
|
||||||
}),
|
name: "asc",
|
||||||
prisma.package.count({ where })
|
},
|
||||||
]);
|
}),
|
||||||
|
prisma.packages.count({ where }),
|
||||||
|
]);
|
||||||
|
|
||||||
// Get additional stats for each package
|
// OPTIMIZATION: Batch query all stats instead of N individual queries
|
||||||
const packagesWithStats = await Promise.all(
|
const packageIds = packages.map((pkg) => pkg.id);
|
||||||
packages.map(async (pkg) => {
|
|
||||||
const [updatesCount, securityCount, affectedHosts] = await Promise.all([
|
|
||||||
prisma.hostPackage.count({
|
|
||||||
where: {
|
|
||||||
packageId: pkg.id,
|
|
||||||
needsUpdate: true
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
prisma.hostPackage.count({
|
|
||||||
where: {
|
|
||||||
packageId: pkg.id,
|
|
||||||
needsUpdate: true,
|
|
||||||
isSecurityUpdate: true
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
prisma.hostPackage.findMany({
|
|
||||||
where: {
|
|
||||||
packageId: pkg.id,
|
|
||||||
needsUpdate: true
|
|
||||||
},
|
|
||||||
select: {
|
|
||||||
host: {
|
|
||||||
select: {
|
|
||||||
id: true,
|
|
||||||
hostname: true,
|
|
||||||
osType: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
take: 10 // Limit to first 10 for performance
|
|
||||||
})
|
|
||||||
]);
|
|
||||||
|
|
||||||
return {
|
// Get all counts and host data in 3 batch queries instead of N*3 queries
|
||||||
...pkg,
|
const [allUpdatesCounts, allSecurityCounts, allPackageHostsData] =
|
||||||
stats: {
|
await Promise.all([
|
||||||
totalInstalls: pkg._count.hostPackages,
|
// Batch count all packages that need updates
|
||||||
updatesNeeded: updatesCount,
|
prisma.host_packages.groupBy({
|
||||||
securityUpdates: securityCount,
|
by: ["package_id"],
|
||||||
affectedHosts: affectedHosts.map(hp => hp.host)
|
where: {
|
||||||
}
|
package_id: { in: packageIds },
|
||||||
};
|
needs_update: true,
|
||||||
})
|
...(host ? { host_id: host } : {}),
|
||||||
);
|
},
|
||||||
|
_count: { id: true },
|
||||||
|
}),
|
||||||
|
// Batch count all packages with security updates
|
||||||
|
prisma.host_packages.groupBy({
|
||||||
|
by: ["package_id"],
|
||||||
|
where: {
|
||||||
|
package_id: { in: packageIds },
|
||||||
|
needs_update: true,
|
||||||
|
is_security_update: true,
|
||||||
|
...(host ? { host_id: host } : {}),
|
||||||
|
},
|
||||||
|
_count: { id: true },
|
||||||
|
}),
|
||||||
|
// Batch fetch all host data for packages
|
||||||
|
prisma.host_packages.findMany({
|
||||||
|
where: {
|
||||||
|
package_id: { in: packageIds },
|
||||||
|
...(host ? { host_id: host } : { needs_update: true }),
|
||||||
|
},
|
||||||
|
select: {
|
||||||
|
package_id: true,
|
||||||
|
hosts: {
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
friendly_name: true,
|
||||||
|
hostname: true,
|
||||||
|
os_type: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
current_version: true,
|
||||||
|
available_version: true,
|
||||||
|
needs_update: true,
|
||||||
|
is_security_update: true,
|
||||||
|
},
|
||||||
|
// Limit to first 10 per package
|
||||||
|
take: 100, // Increased from package-based limit
|
||||||
|
}),
|
||||||
|
]);
|
||||||
|
|
||||||
res.json({
|
// Create lookup maps for O(1) access
|
||||||
packages: packagesWithStats,
|
const updatesCountMap = new Map(
|
||||||
pagination: {
|
allUpdatesCounts.map((item) => [item.package_id, item._count.id]),
|
||||||
page: parseInt(page),
|
);
|
||||||
limit: parseInt(limit),
|
const securityCountMap = new Map(
|
||||||
total: totalCount,
|
allSecurityCounts.map((item) => [item.package_id, item._count.id]),
|
||||||
pages: Math.ceil(totalCount / parseInt(limit))
|
);
|
||||||
}
|
const packageHostsMap = new Map();
|
||||||
});
|
|
||||||
} catch (error) {
|
// Group host data by package_id
|
||||||
console.error('Error fetching packages:', error);
|
for (const hp of allPackageHostsData) {
|
||||||
res.status(500).json({ error: 'Failed to fetch packages' });
|
if (!packageHostsMap.has(hp.package_id)) {
|
||||||
}
|
packageHostsMap.set(hp.package_id, []);
|
||||||
|
}
|
||||||
|
const hosts = packageHostsMap.get(hp.package_id);
|
||||||
|
hosts.push({
|
||||||
|
hostId: hp.hosts.id,
|
||||||
|
friendlyName: hp.hosts.friendly_name,
|
||||||
|
osType: hp.hosts.os_type,
|
||||||
|
currentVersion: hp.current_version,
|
||||||
|
availableVersion: hp.available_version,
|
||||||
|
needsUpdate: hp.needs_update,
|
||||||
|
isSecurityUpdate: hp.is_security_update,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Limit to 10 hosts per package
|
||||||
|
if (hosts.length > 10) {
|
||||||
|
packageHostsMap.set(hp.package_id, hosts.slice(0, 10));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map packages with stats from lookup maps (no more DB queries!)
|
||||||
|
const packagesWithStats = packages.map((pkg) => {
|
||||||
|
const updatesCount = updatesCountMap.get(pkg.id) || 0;
|
||||||
|
const securityCount = securityCountMap.get(pkg.id) || 0;
|
||||||
|
const packageHosts = packageHostsMap.get(pkg.id) || [];
|
||||||
|
|
||||||
|
return {
|
||||||
|
...pkg,
|
||||||
|
packageHostsCount: pkg._count.host_packages,
|
||||||
|
packageHosts,
|
||||||
|
stats: {
|
||||||
|
totalInstalls: pkg._count.host_packages,
|
||||||
|
updatesNeeded: updatesCount,
|
||||||
|
securityUpdates: securityCount,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
packages: packagesWithStats,
|
||||||
|
pagination: {
|
||||||
|
page: parseInt(page, 10),
|
||||||
|
limit: parseInt(limit, 10),
|
||||||
|
total: totalCount,
|
||||||
|
pages: Math.ceil(totalCount / parseInt(limit, 10)),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error fetching packages:", error);
|
||||||
|
res.status(500).json({ error: "Failed to fetch packages" });
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// Get package details by ID
|
// Get package details by ID
|
||||||
router.get('/:packageId', async (req, res) => {
|
router.get("/:packageId", async (req, res) => {
|
||||||
try {
|
try {
|
||||||
const { packageId } = req.params;
|
const { packageId } = req.params;
|
||||||
|
|
||||||
const packageData = await prisma.package.findUnique({
|
const packageData = await prisma.packages.findUnique({
|
||||||
where: { id: packageId },
|
where: { id: packageId },
|
||||||
include: {
|
include: {
|
||||||
hostPackages: {
|
host_packages: {
|
||||||
include: {
|
include: {
|
||||||
host: {
|
hosts: {
|
||||||
select: {
|
select: {
|
||||||
id: true,
|
id: true,
|
||||||
hostname: true,
|
hostname: true,
|
||||||
ip: true,
|
ip: true,
|
||||||
osType: true,
|
os_type: true,
|
||||||
osVersion: true,
|
os_version: true,
|
||||||
lastUpdate: true
|
last_update: true,
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
orderBy: {
|
orderBy: {
|
||||||
needsUpdate: 'desc'
|
needs_update: "desc",
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!packageData) {
|
if (!packageData) {
|
||||||
return res.status(404).json({ error: 'Package not found' });
|
return res.status(404).json({ error: "Package not found" });
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate statistics
|
// Calculate statistics
|
||||||
const stats = {
|
const stats = {
|
||||||
totalInstalls: packageData.hostPackages.length,
|
totalInstalls: packageData.host_packages.length,
|
||||||
updatesNeeded: packageData.hostPackages.filter(hp => hp.needsUpdate).length,
|
updatesNeeded: packageData.host_packages.filter((hp) => hp.needs_update)
|
||||||
securityUpdates: packageData.hostPackages.filter(hp => hp.needsUpdate && hp.isSecurityUpdate).length,
|
.length,
|
||||||
upToDate: packageData.hostPackages.filter(hp => !hp.needsUpdate).length
|
securityUpdates: packageData.host_packages.filter(
|
||||||
};
|
(hp) => hp.needs_update && hp.is_security_update,
|
||||||
|
).length,
|
||||||
|
upToDate: packageData.host_packages.filter((hp) => !hp.needs_update)
|
||||||
|
.length,
|
||||||
|
};
|
||||||
|
|
||||||
// Group by version
|
// Group by version
|
||||||
const versionDistribution = packageData.hostPackages.reduce((acc, hp) => {
|
const versionDistribution = packageData.host_packages.reduce((acc, hp) => {
|
||||||
const version = hp.currentVersion;
|
const version = hp.current_version;
|
||||||
acc[version] = (acc[version] || 0) + 1;
|
acc[version] = (acc[version] || 0) + 1;
|
||||||
return acc;
|
return acc;
|
||||||
}, {});
|
}, {});
|
||||||
|
|
||||||
// Group by OS type
|
// Group by OS type
|
||||||
const osDistribution = packageData.hostPackages.reduce((acc, hp) => {
|
const osDistribution = packageData.host_packages.reduce((acc, hp) => {
|
||||||
const osType = hp.host.osType;
|
const osType = hp.hosts.os_type;
|
||||||
acc[osType] = (acc[osType] || 0) + 1;
|
acc[osType] = (acc[osType] || 0) + 1;
|
||||||
return acc;
|
return acc;
|
||||||
}, {});
|
}, {});
|
||||||
|
|
||||||
res.json({
|
res.json({
|
||||||
...packageData,
|
...packageData,
|
||||||
stats,
|
stats,
|
||||||
distributions: {
|
distributions: {
|
||||||
versions: Object.entries(versionDistribution).map(([version, count]) => ({
|
versions: Object.entries(versionDistribution).map(
|
||||||
version,
|
([version, count]) => ({
|
||||||
count
|
version,
|
||||||
})),
|
count,
|
||||||
osTypes: Object.entries(osDistribution).map(([osType, count]) => ({
|
}),
|
||||||
osType,
|
),
|
||||||
count
|
osTypes: Object.entries(osDistribution).map(([osType, count]) => ({
|
||||||
}))
|
osType,
|
||||||
}
|
count,
|
||||||
});
|
})),
|
||||||
} catch (error) {
|
},
|
||||||
console.error('Error fetching package details:', error);
|
});
|
||||||
res.status(500).json({ error: 'Failed to fetch package details' });
|
} catch (error) {
|
||||||
}
|
console.error("Error fetching package details:", error);
|
||||||
|
res.status(500).json({ error: "Failed to fetch package details" });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get hosts where a package is installed
|
||||||
|
router.get("/:packageId/hosts", async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { packageId } = req.params;
|
||||||
|
const {
|
||||||
|
page = 1,
|
||||||
|
limit = 25,
|
||||||
|
search = "",
|
||||||
|
sortBy = "friendly_name",
|
||||||
|
sortOrder = "asc",
|
||||||
|
} = req.query;
|
||||||
|
|
||||||
|
const offset = (parseInt(page, 10) - 1) * parseInt(limit, 10);
|
||||||
|
|
||||||
|
// Build search conditions
|
||||||
|
const searchConditions = search
|
||||||
|
? {
|
||||||
|
OR: [
|
||||||
|
{
|
||||||
|
hosts: {
|
||||||
|
friendly_name: { contains: search, mode: "insensitive" },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{ hosts: { hostname: { contains: search, mode: "insensitive" } } },
|
||||||
|
{ current_version: { contains: search, mode: "insensitive" } },
|
||||||
|
{ available_version: { contains: search, mode: "insensitive" } },
|
||||||
|
],
|
||||||
|
}
|
||||||
|
: {};
|
||||||
|
|
||||||
|
// Build sort conditions
|
||||||
|
const orderBy = {};
|
||||||
|
if (
|
||||||
|
sortBy === "friendly_name" ||
|
||||||
|
sortBy === "hostname" ||
|
||||||
|
sortBy === "os_type"
|
||||||
|
) {
|
||||||
|
orderBy.hosts = { [sortBy]: sortOrder };
|
||||||
|
} else if (sortBy === "needs_update") {
|
||||||
|
orderBy[sortBy] = sortOrder;
|
||||||
|
} else {
|
||||||
|
orderBy[sortBy] = sortOrder;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get total count
|
||||||
|
const totalCount = await prisma.host_packages.count({
|
||||||
|
where: {
|
||||||
|
package_id: packageId,
|
||||||
|
...searchConditions,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get paginated results
|
||||||
|
const hostPackages = await prisma.host_packages.findMany({
|
||||||
|
where: {
|
||||||
|
package_id: packageId,
|
||||||
|
...searchConditions,
|
||||||
|
},
|
||||||
|
include: {
|
||||||
|
hosts: {
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
friendly_name: true,
|
||||||
|
hostname: true,
|
||||||
|
os_type: true,
|
||||||
|
os_version: true,
|
||||||
|
last_update: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
orderBy,
|
||||||
|
skip: offset,
|
||||||
|
take: parseInt(limit, 10),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Transform the data for the frontend
|
||||||
|
const hosts = hostPackages.map((hp) => ({
|
||||||
|
hostId: hp.hosts.id,
|
||||||
|
friendlyName: hp.hosts.friendly_name,
|
||||||
|
hostname: hp.hosts.hostname,
|
||||||
|
osType: hp.hosts.os_type,
|
||||||
|
osVersion: hp.hosts.os_version,
|
||||||
|
lastUpdate: hp.hosts.last_update,
|
||||||
|
currentVersion: hp.current_version,
|
||||||
|
availableVersion: hp.available_version,
|
||||||
|
needsUpdate: hp.needs_update,
|
||||||
|
isSecurityUpdate: hp.is_security_update,
|
||||||
|
lastChecked: hp.last_checked,
|
||||||
|
}));
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
hosts,
|
||||||
|
pagination: {
|
||||||
|
page: parseInt(page, 10),
|
||||||
|
limit: parseInt(limit, 10),
|
||||||
|
total: totalCount,
|
||||||
|
pages: Math.ceil(totalCount / parseInt(limit, 10)),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error fetching package hosts:", error);
|
||||||
|
res.status(500).json({ error: "Failed to fetch package hosts" });
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
module.exports = router;
|
module.exports = router;
|
||||||
@@ -1,173 +1,203 @@
|
|||||||
const express = require('express');
|
const express = require("express");
|
||||||
const { PrismaClient } = require('@prisma/client');
|
const { getPrismaClient } = require("../config/prisma");
|
||||||
const { authenticateToken, requireAdmin } = require('../middleware/auth');
|
const { authenticateToken } = require("../middleware/auth");
|
||||||
const { requireManageSettings } = require('../middleware/permissions');
|
const {
|
||||||
|
requireManageSettings,
|
||||||
|
requireManageUsers,
|
||||||
|
} = require("../middleware/permissions");
|
||||||
|
|
||||||
const router = express.Router();
|
const router = express.Router();
|
||||||
const prisma = new PrismaClient();
|
const prisma = getPrismaClient();
|
||||||
|
|
||||||
// Get all role permissions
|
// Get all role permissions (allow users who can manage users to view roles)
|
||||||
router.get('/roles', authenticateToken, requireManageSettings, async (req, res) => {
|
router.get(
|
||||||
try {
|
"/roles",
|
||||||
const permissions = await prisma.rolePermissions.findMany({
|
authenticateToken,
|
||||||
orderBy: {
|
requireManageUsers,
|
||||||
role: 'asc'
|
async (_req, res) => {
|
||||||
}
|
try {
|
||||||
});
|
const permissions = await prisma.role_permissions.findMany({
|
||||||
|
orderBy: {
|
||||||
|
role: "asc",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
res.json(permissions);
|
res.json(permissions);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Get role permissions error:', error);
|
console.error("Get role permissions error:", error);
|
||||||
res.status(500).json({ error: 'Failed to fetch role permissions' });
|
res.status(500).json({ error: "Failed to fetch role permissions" });
|
||||||
}
|
}
|
||||||
});
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// Get permissions for a specific role
|
// Get permissions for a specific role
|
||||||
router.get('/roles/:role', authenticateToken, requireManageSettings, async (req, res) => {
|
router.get(
|
||||||
try {
|
"/roles/:role",
|
||||||
const { role } = req.params;
|
authenticateToken,
|
||||||
|
requireManageSettings,
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { role } = req.params;
|
||||||
|
|
||||||
const permissions = await prisma.rolePermissions.findUnique({
|
const permissions = await prisma.role_permissions.findUnique({
|
||||||
where: { role }
|
where: { role },
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!permissions) {
|
if (!permissions) {
|
||||||
return res.status(404).json({ error: 'Role not found' });
|
return res.status(404).json({ error: "Role not found" });
|
||||||
}
|
}
|
||||||
|
|
||||||
res.json(permissions);
|
res.json(permissions);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Get role permission error:', error);
|
console.error("Get role permission error:", error);
|
||||||
res.status(500).json({ error: 'Failed to fetch role permission' });
|
res.status(500).json({ error: "Failed to fetch role permission" });
|
||||||
}
|
}
|
||||||
});
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// Create or update role permissions
|
// Create or update role permissions
|
||||||
router.put('/roles/:role', authenticateToken, requireManageSettings, async (req, res) => {
|
router.put(
|
||||||
try {
|
"/roles/:role",
|
||||||
const { role } = req.params;
|
authenticateToken,
|
||||||
const {
|
requireManageSettings,
|
||||||
canViewDashboard,
|
async (req, res) => {
|
||||||
canViewHosts,
|
try {
|
||||||
canManageHosts,
|
const { role } = req.params;
|
||||||
canViewPackages,
|
const {
|
||||||
canManagePackages,
|
can_view_dashboard,
|
||||||
canViewUsers,
|
can_view_hosts,
|
||||||
canManageUsers,
|
can_manage_hosts,
|
||||||
canViewReports,
|
can_view_packages,
|
||||||
canExportData,
|
can_manage_packages,
|
||||||
canManageSettings
|
can_view_users,
|
||||||
} = req.body;
|
can_manage_users,
|
||||||
|
can_view_reports,
|
||||||
|
can_export_data,
|
||||||
|
can_manage_settings,
|
||||||
|
} = req.body;
|
||||||
|
|
||||||
// Prevent modifying admin role permissions (admin should always have full access)
|
// Prevent modifying admin and user role permissions (built-in roles)
|
||||||
if (role === 'admin') {
|
if (role === "admin" || role === "user") {
|
||||||
return res.status(400).json({ error: 'Cannot modify admin role permissions' });
|
return res.status(400).json({
|
||||||
}
|
error: `Cannot modify ${role} role permissions - this is a built-in role`,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
const permissions = await prisma.rolePermissions.upsert({
|
const permissions = await prisma.role_permissions.upsert({
|
||||||
where: { role },
|
where: { role },
|
||||||
update: {
|
update: {
|
||||||
canViewDashboard,
|
can_view_dashboard: can_view_dashboard,
|
||||||
canViewHosts,
|
can_view_hosts: can_view_hosts,
|
||||||
canManageHosts,
|
can_manage_hosts: can_manage_hosts,
|
||||||
canViewPackages,
|
can_view_packages: can_view_packages,
|
||||||
canManagePackages,
|
can_manage_packages: can_manage_packages,
|
||||||
canViewUsers,
|
can_view_users: can_view_users,
|
||||||
canManageUsers,
|
can_manage_users: can_manage_users,
|
||||||
canViewReports,
|
can_view_reports: can_view_reports,
|
||||||
canExportData,
|
can_export_data: can_export_data,
|
||||||
canManageSettings
|
can_manage_settings: can_manage_settings,
|
||||||
},
|
updated_at: new Date(),
|
||||||
create: {
|
},
|
||||||
role,
|
create: {
|
||||||
canViewDashboard,
|
id: require("uuid").v4(),
|
||||||
canViewHosts,
|
role,
|
||||||
canManageHosts,
|
can_view_dashboard: can_view_dashboard,
|
||||||
canViewPackages,
|
can_view_hosts: can_view_hosts,
|
||||||
canManagePackages,
|
can_manage_hosts: can_manage_hosts,
|
||||||
canViewUsers,
|
can_view_packages: can_view_packages,
|
||||||
canManageUsers,
|
can_manage_packages: can_manage_packages,
|
||||||
canViewReports,
|
can_view_users: can_view_users,
|
||||||
canExportData,
|
can_manage_users: can_manage_users,
|
||||||
canManageSettings
|
can_view_reports: can_view_reports,
|
||||||
}
|
can_export_data: can_export_data,
|
||||||
});
|
can_manage_settings: can_manage_settings,
|
||||||
|
updated_at: new Date(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
res.json({
|
res.json({
|
||||||
message: 'Role permissions updated successfully',
|
message: "Role permissions updated successfully",
|
||||||
permissions
|
permissions,
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Update role permissions error:', error);
|
console.error("Update role permissions error:", error);
|
||||||
res.status(500).json({ error: 'Failed to update role permissions' });
|
res.status(500).json({ error: "Failed to update role permissions" });
|
||||||
}
|
}
|
||||||
});
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// Delete a role (and its permissions)
|
// Delete a role (and its permissions)
|
||||||
router.delete('/roles/:role', authenticateToken, requireManageSettings, async (req, res) => {
|
router.delete(
|
||||||
try {
|
"/roles/:role",
|
||||||
const { role } = req.params;
|
authenticateToken,
|
||||||
|
requireManageSettings,
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { role } = req.params;
|
||||||
|
|
||||||
// Prevent deleting admin role
|
// Prevent deleting admin and user roles (built-in roles)
|
||||||
if (role === 'admin') {
|
if (role === "admin" || role === "user") {
|
||||||
return res.status(400).json({ error: 'Cannot delete admin role' });
|
return res.status(400).json({
|
||||||
}
|
error: `Cannot delete ${role} role - this is a built-in role`,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// Check if any users are using this role
|
// Check if any users are using this role
|
||||||
const usersWithRole = await prisma.user.count({
|
const usersWithRole = await prisma.users.count({
|
||||||
where: { role }
|
where: { role },
|
||||||
});
|
});
|
||||||
|
|
||||||
if (usersWithRole > 0) {
|
if (usersWithRole > 0) {
|
||||||
return res.status(400).json({
|
return res.status(400).json({
|
||||||
error: `Cannot delete role "${role}" because ${usersWithRole} user(s) are currently using it`
|
error: `Cannot delete role "${role}" because ${usersWithRole} user(s) are currently using it`,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
await prisma.rolePermissions.delete({
|
await prisma.role_permissions.delete({
|
||||||
where: { role }
|
where: { role },
|
||||||
});
|
});
|
||||||
|
|
||||||
res.json({
|
res.json({
|
||||||
message: `Role "${role}" deleted successfully`
|
message: `Role "${role}" deleted successfully`,
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Delete role error:', error);
|
console.error("Delete role error:", error);
|
||||||
res.status(500).json({ error: 'Failed to delete role' });
|
res.status(500).json({ error: "Failed to delete role" });
|
||||||
}
|
}
|
||||||
});
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// Get user's permissions based on their role
|
// Get user's permissions based on their role
|
||||||
router.get('/user-permissions', authenticateToken, async (req, res) => {
|
router.get("/user-permissions", authenticateToken, async (req, res) => {
|
||||||
try {
|
try {
|
||||||
const userRole = req.user.role;
|
const userRole = req.user.role;
|
||||||
|
|
||||||
const permissions = await prisma.rolePermissions.findUnique({
|
const permissions = await prisma.role_permissions.findUnique({
|
||||||
where: { role: userRole }
|
where: { role: userRole },
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!permissions) {
|
if (!permissions) {
|
||||||
// If no specific permissions found, return default admin permissions
|
// If no specific permissions found, return default admin permissions
|
||||||
return res.json({
|
return res.json({
|
||||||
role: userRole,
|
role: userRole,
|
||||||
canViewDashboard: true,
|
can_view_dashboard: true,
|
||||||
canViewHosts: true,
|
can_view_hosts: true,
|
||||||
canManageHosts: true,
|
can_manage_hosts: true,
|
||||||
canViewPackages: true,
|
can_view_packages: true,
|
||||||
canManagePackages: true,
|
can_manage_packages: true,
|
||||||
canViewUsers: true,
|
can_view_users: true,
|
||||||
canManageUsers: true,
|
can_manage_users: true,
|
||||||
canViewReports: true,
|
can_view_reports: true,
|
||||||
canExportData: true,
|
can_export_data: true,
|
||||||
canManageSettings: true,
|
can_manage_settings: true,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
res.json(permissions);
|
res.json(permissions);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Get user permissions error:', error);
|
console.error("Get user permissions error:", error);
|
||||||
res.status(500).json({ error: 'Failed to fetch user permissions' });
|
res.status(500).json({ error: "Failed to fetch user permissions" });
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
module.exports = router;
|
module.exports = router;
|
||||||
|
|||||||
@@ -1,301 +1,418 @@
|
|||||||
const express = require('express');
|
const express = require("express");
|
||||||
const { body, validationResult } = require('express-validator');
|
const { body, validationResult } = require("express-validator");
|
||||||
const { PrismaClient } = require('@prisma/client');
|
const { getPrismaClient } = require("../config/prisma");
|
||||||
const { authenticateToken } = require('../middleware/auth');
|
const { authenticateToken } = require("../middleware/auth");
|
||||||
const { requireViewHosts, requireManageHosts } = require('../middleware/permissions');
|
const {
|
||||||
|
requireViewHosts,
|
||||||
|
requireManageHosts,
|
||||||
|
} = require("../middleware/permissions");
|
||||||
|
|
||||||
const router = express.Router();
|
const router = express.Router();
|
||||||
const prisma = new PrismaClient();
|
const prisma = getPrismaClient();
|
||||||
|
|
||||||
// Get all repositories with host count
|
// Get all repositories with host count
|
||||||
router.get('/', authenticateToken, requireViewHosts, async (req, res) => {
|
router.get("/", authenticateToken, requireViewHosts, async (_req, res) => {
|
||||||
try {
|
try {
|
||||||
const repositories = await prisma.repository.findMany({
|
const repositories = await prisma.repositories.findMany({
|
||||||
include: {
|
include: {
|
||||||
hostRepositories: {
|
host_repositories: {
|
||||||
include: {
|
include: {
|
||||||
host: {
|
hosts: {
|
||||||
select: {
|
select: {
|
||||||
id: true,
|
id: true,
|
||||||
hostname: true,
|
friendly_name: true,
|
||||||
status: true
|
status: true,
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
_count: {
|
_count: {
|
||||||
select: {
|
select: {
|
||||||
hostRepositories: true
|
host_repositories: true,
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
orderBy: [
|
orderBy: [{ name: "asc" }, { url: "asc" }],
|
||||||
{ name: 'asc' },
|
});
|
||||||
{ url: 'asc' }
|
|
||||||
]
|
|
||||||
});
|
|
||||||
|
|
||||||
// Transform data to include host counts and status
|
// Transform data to include host counts and status
|
||||||
const transformedRepos = repositories.map(repo => ({
|
const transformedRepos = repositories.map((repo) => ({
|
||||||
...repo,
|
...repo,
|
||||||
hostCount: repo._count.hostRepositories,
|
hostCount: repo._count.host_repositories,
|
||||||
enabledHostCount: repo.hostRepositories.filter(hr => hr.isEnabled).length,
|
enabledHostCount: repo.host_repositories.filter((hr) => hr.is_enabled)
|
||||||
activeHostCount: repo.hostRepositories.filter(hr => hr.host.status === 'active').length,
|
.length,
|
||||||
hosts: repo.hostRepositories.map(hr => ({
|
activeHostCount: repo.host_repositories.filter(
|
||||||
id: hr.host.id,
|
(hr) => hr.hosts.status === "active",
|
||||||
hostname: hr.host.hostname,
|
).length,
|
||||||
status: hr.host.status,
|
hosts: repo.host_repositories.map((hr) => ({
|
||||||
isEnabled: hr.isEnabled,
|
id: hr.hosts.id,
|
||||||
lastChecked: hr.lastChecked
|
friendlyName: hr.hosts.friendly_name,
|
||||||
}))
|
status: hr.hosts.status,
|
||||||
}));
|
isEnabled: hr.is_enabled,
|
||||||
|
lastChecked: hr.last_checked,
|
||||||
|
})),
|
||||||
|
}));
|
||||||
|
|
||||||
res.json(transformedRepos);
|
res.json(transformedRepos);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Repository list error:', error);
|
console.error("Repository list error:", error);
|
||||||
res.status(500).json({ error: 'Failed to fetch repositories' });
|
res.status(500).json({ error: "Failed to fetch repositories" });
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// Get repositories for a specific host
|
// Get repositories for a specific host
|
||||||
router.get('/host/:hostId', authenticateToken, requireViewHosts, async (req, res) => {
|
router.get(
|
||||||
try {
|
"/host/:hostId",
|
||||||
const { hostId } = req.params;
|
authenticateToken,
|
||||||
|
requireViewHosts,
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { hostId } = req.params;
|
||||||
|
|
||||||
const hostRepositories = await prisma.hostRepository.findMany({
|
const hostRepositories = await prisma.host_repositories.findMany({
|
||||||
where: { hostId },
|
where: { host_id: hostId },
|
||||||
include: {
|
include: {
|
||||||
repository: true,
|
repositories: true,
|
||||||
host: {
|
hosts: {
|
||||||
select: {
|
select: {
|
||||||
id: true,
|
id: true,
|
||||||
hostname: true
|
friendly_name: true,
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
orderBy: {
|
orderBy: {
|
||||||
repository: {
|
repositories: {
|
||||||
name: 'asc'
|
name: "asc",
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
res.json(hostRepositories);
|
res.json(hostRepositories);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Host repositories error:', error);
|
console.error("Host repositories error:", error);
|
||||||
res.status(500).json({ error: 'Failed to fetch host repositories' });
|
res.status(500).json({ error: "Failed to fetch host repositories" });
|
||||||
}
|
}
|
||||||
});
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// Get repository details with all hosts
|
// Get repository details with all hosts
|
||||||
router.get('/:repositoryId', authenticateToken, requireViewHosts, async (req, res) => {
|
router.get(
|
||||||
try {
|
"/:repositoryId",
|
||||||
const { repositoryId } = req.params;
|
authenticateToken,
|
||||||
|
requireViewHosts,
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { repositoryId } = req.params;
|
||||||
|
|
||||||
const repository = await prisma.repository.findUnique({
|
const repository = await prisma.repositories.findUnique({
|
||||||
where: { id: repositoryId },
|
where: { id: repositoryId },
|
||||||
include: {
|
include: {
|
||||||
hostRepositories: {
|
host_repositories: {
|
||||||
include: {
|
include: {
|
||||||
host: {
|
hosts: {
|
||||||
select: {
|
select: {
|
||||||
id: true,
|
id: true,
|
||||||
hostname: true,
|
friendly_name: true,
|
||||||
ip: true,
|
hostname: true,
|
||||||
osType: true,
|
ip: true,
|
||||||
osVersion: true,
|
os_type: true,
|
||||||
status: true,
|
os_version: true,
|
||||||
lastUpdate: true
|
status: true,
|
||||||
}
|
last_update: true,
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
orderBy: {
|
},
|
||||||
host: {
|
orderBy: {
|
||||||
hostname: 'asc'
|
hosts: {
|
||||||
}
|
friendly_name: "asc",
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
});
|
},
|
||||||
|
});
|
||||||
|
|
||||||
if (!repository) {
|
if (!repository) {
|
||||||
return res.status(404).json({ error: 'Repository not found' });
|
return res.status(404).json({ error: "Repository not found" });
|
||||||
}
|
}
|
||||||
|
|
||||||
res.json(repository);
|
res.json(repository);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Repository detail error:', error);
|
console.error("Repository detail error:", error);
|
||||||
res.status(500).json({ error: 'Failed to fetch repository details' });
|
res.status(500).json({ error: "Failed to fetch repository details" });
|
||||||
}
|
}
|
||||||
});
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// Update repository information (admin only)
|
// Update repository information (admin only)
|
||||||
router.put('/:repositoryId', authenticateToken, requireManageHosts, [
|
router.put(
|
||||||
body('name').optional().isLength({ min: 1 }).withMessage('Name is required'),
|
"/:repositoryId",
|
||||||
body('description').optional(),
|
authenticateToken,
|
||||||
body('isActive').optional().isBoolean().withMessage('isActive must be a boolean'),
|
requireManageHosts,
|
||||||
body('priority').optional().isInt({ min: 0 }).withMessage('Priority must be a positive integer')
|
[
|
||||||
], async (req, res) => {
|
body("name")
|
||||||
try {
|
.optional()
|
||||||
const errors = validationResult(req);
|
.isLength({ min: 1 })
|
||||||
if (!errors.isEmpty()) {
|
.withMessage("Name is required"),
|
||||||
return res.status(400).json({ errors: errors.array() });
|
body("description").optional(),
|
||||||
}
|
body("isActive")
|
||||||
|
.optional()
|
||||||
|
.isBoolean()
|
||||||
|
.withMessage("isActive must be a boolean"),
|
||||||
|
body("priority")
|
||||||
|
.optional()
|
||||||
|
.isInt({ min: 0 })
|
||||||
|
.withMessage("Priority must be a positive integer"),
|
||||||
|
],
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const errors = validationResult(req);
|
||||||
|
if (!errors.isEmpty()) {
|
||||||
|
return res.status(400).json({ errors: errors.array() });
|
||||||
|
}
|
||||||
|
|
||||||
const { repositoryId } = req.params;
|
const { repositoryId } = req.params;
|
||||||
const { name, description, isActive, priority } = req.body;
|
const { name, description, isActive, priority } = req.body;
|
||||||
|
|
||||||
const repository = await prisma.repository.update({
|
const repository = await prisma.repositories.update({
|
||||||
where: { id: repositoryId },
|
where: { id: repositoryId },
|
||||||
data: {
|
data: {
|
||||||
...(name && { name }),
|
...(name && { name }),
|
||||||
...(description !== undefined && { description }),
|
...(description !== undefined && { description }),
|
||||||
...(isActive !== undefined && { isActive }),
|
...(isActive !== undefined && { is_active: isActive }),
|
||||||
...(priority !== undefined && { priority })
|
...(priority !== undefined && { priority }),
|
||||||
},
|
},
|
||||||
include: {
|
include: {
|
||||||
_count: {
|
_count: {
|
||||||
select: {
|
select: {
|
||||||
hostRepositories: true
|
host_repositories: true,
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
res.json(repository);
|
res.json(repository);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Repository update error:', error);
|
console.error("Repository update error:", error);
|
||||||
res.status(500).json({ error: 'Failed to update repository' });
|
res.status(500).json({ error: "Failed to update repository" });
|
||||||
}
|
}
|
||||||
});
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// Toggle repository status for a specific host
|
// Toggle repository status for a specific host
|
||||||
router.patch('/host/:hostId/repository/:repositoryId', authenticateToken, requireManageHosts, [
|
router.patch(
|
||||||
body('isEnabled').isBoolean().withMessage('isEnabled must be a boolean')
|
"/host/:hostId/repository/:repositoryId",
|
||||||
], async (req, res) => {
|
authenticateToken,
|
||||||
try {
|
requireManageHosts,
|
||||||
const errors = validationResult(req);
|
[body("isEnabled").isBoolean().withMessage("isEnabled must be a boolean")],
|
||||||
if (!errors.isEmpty()) {
|
async (req, res) => {
|
||||||
return res.status(400).json({ errors: errors.array() });
|
try {
|
||||||
}
|
const errors = validationResult(req);
|
||||||
|
if (!errors.isEmpty()) {
|
||||||
|
return res.status(400).json({ errors: errors.array() });
|
||||||
|
}
|
||||||
|
|
||||||
const { hostId, repositoryId } = req.params;
|
const { hostId, repositoryId } = req.params;
|
||||||
const { isEnabled } = req.body;
|
const { isEnabled } = req.body;
|
||||||
|
|
||||||
const hostRepository = await prisma.hostRepository.update({
|
const hostRepository = await prisma.host_repositories.update({
|
||||||
where: {
|
where: {
|
||||||
hostId_repositoryId: {
|
host_id_repository_id: {
|
||||||
hostId,
|
host_id: hostId,
|
||||||
repositoryId
|
repository_id: repositoryId,
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
data: {
|
data: {
|
||||||
isEnabled,
|
is_enabled: isEnabled,
|
||||||
lastChecked: new Date()
|
last_checked: new Date(),
|
||||||
},
|
},
|
||||||
include: {
|
include: {
|
||||||
repository: true,
|
repositories: true,
|
||||||
host: {
|
hosts: {
|
||||||
select: {
|
select: {
|
||||||
hostname: true
|
friendly_name: true,
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
res.json({
|
res.json({
|
||||||
message: `Repository ${isEnabled ? 'enabled' : 'disabled'} for host ${hostRepository.host.hostname}`,
|
message: `Repository ${isEnabled ? "enabled" : "disabled"} for host ${hostRepository.hosts.friendly_name}`,
|
||||||
hostRepository
|
hostRepository,
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Host repository toggle error:', error);
|
console.error("Host repository toggle error:", error);
|
||||||
res.status(500).json({ error: 'Failed to toggle repository status' });
|
res.status(500).json({ error: "Failed to toggle repository status" });
|
||||||
}
|
}
|
||||||
});
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// Get repository statistics
|
// Get repository statistics
|
||||||
router.get('/stats/summary', authenticateToken, requireViewHosts, async (req, res) => {
|
router.get(
|
||||||
try {
|
"/stats/summary",
|
||||||
const stats = await prisma.repository.aggregate({
|
authenticateToken,
|
||||||
_count: true
|
requireViewHosts,
|
||||||
});
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const stats = await prisma.repositories.aggregate({
|
||||||
|
_count: true,
|
||||||
|
});
|
||||||
|
|
||||||
const hostRepoStats = await prisma.hostRepository.aggregate({
|
const hostRepoStats = await prisma.host_repositories.aggregate({
|
||||||
_count: {
|
_count: {
|
||||||
isEnabled: true
|
is_enabled: true,
|
||||||
},
|
},
|
||||||
where: {
|
where: {
|
||||||
isEnabled: true
|
is_enabled: true,
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
const secureRepos = await prisma.repository.count({
|
const secureRepos = await prisma.repositories.count({
|
||||||
where: { isSecure: true }
|
where: { is_secure: true },
|
||||||
});
|
});
|
||||||
|
|
||||||
const activeRepos = await prisma.repository.count({
|
const activeRepos = await prisma.repositories.count({
|
||||||
where: { isActive: true }
|
where: { is_active: true },
|
||||||
});
|
});
|
||||||
|
|
||||||
res.json({
|
res.json({
|
||||||
totalRepositories: stats._count,
|
totalRepositories: stats._count,
|
||||||
activeRepositories: activeRepos,
|
activeRepositories: activeRepos,
|
||||||
secureRepositories: secureRepos,
|
secureRepositories: secureRepos,
|
||||||
enabledHostRepositories: hostRepoStats._count.isEnabled,
|
enabledHostRepositories: hostRepoStats._count.isEnabled,
|
||||||
securityPercentage: stats._count > 0 ? Math.round((secureRepos / stats._count) * 100) : 0
|
securityPercentage:
|
||||||
});
|
stats._count > 0 ? Math.round((secureRepos / stats._count) * 100) : 0,
|
||||||
} catch (error) {
|
});
|
||||||
console.error('Repository stats error:', error);
|
} catch (error) {
|
||||||
res.status(500).json({ error: 'Failed to fetch repository statistics' });
|
console.error("Repository stats error:", error);
|
||||||
}
|
res.status(500).json({ error: "Failed to fetch repository statistics" });
|
||||||
});
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Delete a specific repository (admin only)
|
||||||
|
router.delete(
|
||||||
|
"/:repositoryId",
|
||||||
|
authenticateToken,
|
||||||
|
requireManageHosts,
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { repositoryId } = req.params;
|
||||||
|
|
||||||
|
// Check if repository exists first
|
||||||
|
const existingRepository = await prisma.repositories.findUnique({
|
||||||
|
where: { id: repositoryId },
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
name: true,
|
||||||
|
url: true,
|
||||||
|
_count: {
|
||||||
|
select: {
|
||||||
|
host_repositories: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!existingRepository) {
|
||||||
|
return res.status(404).json({
|
||||||
|
error: "Repository not found",
|
||||||
|
details: "The repository may have been deleted or does not exist",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete repository and all related data (cascade will handle host_repositories)
|
||||||
|
await prisma.repositories.delete({
|
||||||
|
where: { id: repositoryId },
|
||||||
|
});
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
message: "Repository deleted successfully",
|
||||||
|
deletedRepository: {
|
||||||
|
id: existingRepository.id,
|
||||||
|
name: existingRepository.name,
|
||||||
|
url: existingRepository.url,
|
||||||
|
hostCount: existingRepository._count.host_repositories,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Repository deletion error:", error);
|
||||||
|
|
||||||
|
// Handle specific Prisma errors
|
||||||
|
if (error.code === "P2025") {
|
||||||
|
return res.status(404).json({
|
||||||
|
error: "Repository not found",
|
||||||
|
details: "The repository may have been deleted or does not exist",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if (error.code === "P2003") {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: "Cannot delete repository due to foreign key constraints",
|
||||||
|
details: "The repository has related data that prevents deletion",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
res.status(500).json({
|
||||||
|
error: "Failed to delete repository",
|
||||||
|
details: error.message || "An unexpected error occurred",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// Cleanup orphaned repositories (admin only)
|
// Cleanup orphaned repositories (admin only)
|
||||||
router.delete('/cleanup/orphaned', authenticateToken, requireManageHosts, async (req, res) => {
|
router.delete(
|
||||||
try {
|
"/cleanup/orphaned",
|
||||||
console.log('Cleaning up orphaned repositories...');
|
authenticateToken,
|
||||||
|
requireManageHosts,
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
console.log("Cleaning up orphaned repositories...");
|
||||||
|
|
||||||
// Find repositories with no host relationships
|
// Find repositories with no host relationships
|
||||||
const orphanedRepos = await prisma.repository.findMany({
|
const orphanedRepos = await prisma.repositories.findMany({
|
||||||
where: {
|
where: {
|
||||||
hostRepositories: {
|
host_repositories: {
|
||||||
none: {}
|
none: {},
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
if (orphanedRepos.length === 0) {
|
if (orphanedRepos.length === 0) {
|
||||||
return res.json({
|
return res.json({
|
||||||
message: 'No orphaned repositories found',
|
message: "No orphaned repositories found",
|
||||||
deletedCount: 0,
|
deletedCount: 0,
|
||||||
deletedRepositories: []
|
deletedRepositories: [],
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete orphaned repositories
|
// Delete orphaned repositories
|
||||||
const deleteResult = await prisma.repository.deleteMany({
|
const deleteResult = await prisma.repositories.deleteMany({
|
||||||
where: {
|
where: {
|
||||||
hostRepositories: {
|
hostRepositories: {
|
||||||
none: {}
|
none: {},
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log(`Deleted ${deleteResult.count} orphaned repositories`);
|
console.log(`Deleted ${deleteResult.count} orphaned repositories`);
|
||||||
|
|
||||||
res.json({
|
res.json({
|
||||||
message: `Successfully deleted ${deleteResult.count} orphaned repositories`,
|
message: `Successfully deleted ${deleteResult.count} orphaned repositories`,
|
||||||
deletedCount: deleteResult.count,
|
deletedCount: deleteResult.count,
|
||||||
deletedRepositories: orphanedRepos.map(repo => ({
|
deletedRepositories: orphanedRepos.map((repo) => ({
|
||||||
id: repo.id,
|
id: repo.id,
|
||||||
name: repo.name,
|
name: repo.name,
|
||||||
url: repo.url
|
url: repo.url,
|
||||||
}))
|
})),
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Repository cleanup error:', error);
|
console.error("Repository cleanup error:", error);
|
||||||
res.status(500).json({ error: 'Failed to cleanup orphaned repositories' });
|
res
|
||||||
}
|
.status(500)
|
||||||
});
|
.json({ error: "Failed to cleanup orphaned repositories" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
module.exports = router;
|
module.exports = router;
|
||||||
|
|||||||
249
backend/src/routes/searchRoutes.js
Normal file
249
backend/src/routes/searchRoutes.js
Normal file
@@ -0,0 +1,249 @@
|
|||||||
|
const express = require("express");
|
||||||
|
const router = express.Router();
|
||||||
|
const { getPrismaClient } = require("../config/prisma");
|
||||||
|
const { authenticateToken } = require("../middleware/auth");
|
||||||
|
|
||||||
|
const prisma = getPrismaClient();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Global search endpoint
|
||||||
|
* Searches across hosts, packages, repositories, and users
|
||||||
|
* Returns categorized results
|
||||||
|
*/
|
||||||
|
router.get("/", authenticateToken, async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { q } = req.query;
|
||||||
|
|
||||||
|
if (!q || q.trim().length === 0) {
|
||||||
|
return res.json({
|
||||||
|
hosts: [],
|
||||||
|
packages: [],
|
||||||
|
repositories: [],
|
||||||
|
users: [],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const searchTerm = q.trim();
|
||||||
|
|
||||||
|
// Prepare results object
|
||||||
|
const results = {
|
||||||
|
hosts: [],
|
||||||
|
packages: [],
|
||||||
|
repositories: [],
|
||||||
|
users: [],
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get user permissions from database
|
||||||
|
let userPermissions = null;
|
||||||
|
try {
|
||||||
|
userPermissions = await prisma.role_permissions.findUnique({
|
||||||
|
where: { role: req.user.role },
|
||||||
|
});
|
||||||
|
|
||||||
|
// If no specific permissions found, default to admin permissions
|
||||||
|
if (!userPermissions) {
|
||||||
|
console.warn(
|
||||||
|
`No permissions found for role: ${req.user.role}, defaulting to admin access`,
|
||||||
|
);
|
||||||
|
userPermissions = {
|
||||||
|
can_view_hosts: true,
|
||||||
|
can_view_packages: true,
|
||||||
|
can_view_users: true,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} catch (permError) {
|
||||||
|
console.error("Error fetching permissions:", permError);
|
||||||
|
// Default to restrictive permissions on error
|
||||||
|
userPermissions = {
|
||||||
|
can_view_hosts: false,
|
||||||
|
can_view_packages: false,
|
||||||
|
can_view_users: false,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search hosts if user has permission
|
||||||
|
if (userPermissions.can_view_hosts) {
|
||||||
|
try {
|
||||||
|
const hosts = await prisma.hosts.findMany({
|
||||||
|
where: {
|
||||||
|
OR: [
|
||||||
|
{ hostname: { contains: searchTerm, mode: "insensitive" } },
|
||||||
|
{ friendly_name: { contains: searchTerm, mode: "insensitive" } },
|
||||||
|
{ ip: { contains: searchTerm, mode: "insensitive" } },
|
||||||
|
{ machine_id: { contains: searchTerm, mode: "insensitive" } },
|
||||||
|
],
|
||||||
|
},
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
machine_id: true,
|
||||||
|
hostname: true,
|
||||||
|
friendly_name: true,
|
||||||
|
ip: true,
|
||||||
|
os_type: true,
|
||||||
|
os_version: true,
|
||||||
|
status: true,
|
||||||
|
last_update: true,
|
||||||
|
},
|
||||||
|
take: 10, // Limit results
|
||||||
|
orderBy: {
|
||||||
|
last_update: "desc",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
results.hosts = hosts.map((host) => ({
|
||||||
|
id: host.id,
|
||||||
|
hostname: host.hostname,
|
||||||
|
friendly_name: host.friendly_name,
|
||||||
|
ip: host.ip,
|
||||||
|
os_type: host.os_type,
|
||||||
|
os_version: host.os_version,
|
||||||
|
status: host.status,
|
||||||
|
last_update: host.last_update,
|
||||||
|
type: "host",
|
||||||
|
}));
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error searching hosts:", error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search packages if user has permission
|
||||||
|
if (userPermissions.can_view_packages) {
|
||||||
|
try {
|
||||||
|
const packages = await prisma.packages.findMany({
|
||||||
|
where: {
|
||||||
|
name: { contains: searchTerm, mode: "insensitive" },
|
||||||
|
},
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
name: true,
|
||||||
|
description: true,
|
||||||
|
category: true,
|
||||||
|
latest_version: true,
|
||||||
|
_count: {
|
||||||
|
select: {
|
||||||
|
host_packages: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
take: 10,
|
||||||
|
orderBy: {
|
||||||
|
name: "asc",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
results.packages = packages.map((pkg) => ({
|
||||||
|
id: pkg.id,
|
||||||
|
name: pkg.name,
|
||||||
|
description: pkg.description,
|
||||||
|
category: pkg.category,
|
||||||
|
latest_version: pkg.latest_version,
|
||||||
|
host_count: pkg._count.host_packages,
|
||||||
|
type: "package",
|
||||||
|
}));
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error searching packages:", error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search repositories if user has permission (usually same as hosts)
|
||||||
|
if (userPermissions.can_view_hosts) {
|
||||||
|
try {
|
||||||
|
const repositories = await prisma.repositories.findMany({
|
||||||
|
where: {
|
||||||
|
OR: [
|
||||||
|
{ name: { contains: searchTerm, mode: "insensitive" } },
|
||||||
|
{ url: { contains: searchTerm, mode: "insensitive" } },
|
||||||
|
{ description: { contains: searchTerm, mode: "insensitive" } },
|
||||||
|
],
|
||||||
|
},
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
name: true,
|
||||||
|
url: true,
|
||||||
|
distribution: true,
|
||||||
|
repo_type: true,
|
||||||
|
is_active: true,
|
||||||
|
description: true,
|
||||||
|
_count: {
|
||||||
|
select: {
|
||||||
|
host_repositories: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
take: 10,
|
||||||
|
orderBy: {
|
||||||
|
name: "asc",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
results.repositories = repositories.map((repo) => ({
|
||||||
|
id: repo.id,
|
||||||
|
name: repo.name,
|
||||||
|
url: repo.url,
|
||||||
|
distribution: repo.distribution,
|
||||||
|
repo_type: repo.repo_type,
|
||||||
|
is_active: repo.is_active,
|
||||||
|
description: repo.description,
|
||||||
|
host_count: repo._count.host_repositories,
|
||||||
|
type: "repository",
|
||||||
|
}));
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error searching repositories:", error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search users if user has permission
|
||||||
|
if (userPermissions.can_view_users) {
|
||||||
|
try {
|
||||||
|
const users = await prisma.users.findMany({
|
||||||
|
where: {
|
||||||
|
OR: [
|
||||||
|
{ username: { contains: searchTerm, mode: "insensitive" } },
|
||||||
|
{ email: { contains: searchTerm, mode: "insensitive" } },
|
||||||
|
{ first_name: { contains: searchTerm, mode: "insensitive" } },
|
||||||
|
{ last_name: { contains: searchTerm, mode: "insensitive" } },
|
||||||
|
],
|
||||||
|
},
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
username: true,
|
||||||
|
email: true,
|
||||||
|
first_name: true,
|
||||||
|
last_name: true,
|
||||||
|
role: true,
|
||||||
|
is_active: true,
|
||||||
|
last_login: true,
|
||||||
|
},
|
||||||
|
take: 10,
|
||||||
|
orderBy: {
|
||||||
|
username: "asc",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
results.users = users.map((user) => ({
|
||||||
|
id: user.id,
|
||||||
|
username: user.username,
|
||||||
|
email: user.email,
|
||||||
|
first_name: user.first_name,
|
||||||
|
last_name: user.last_name,
|
||||||
|
role: user.role,
|
||||||
|
is_active: user.is_active,
|
||||||
|
last_login: user.last_login,
|
||||||
|
type: "user",
|
||||||
|
}));
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error searching users:", error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res.json(results);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Global search error:", error);
|
||||||
|
res.status(500).json({
|
||||||
|
error: "Failed to perform search",
|
||||||
|
message: error.message,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
module.exports = router;
|
||||||
@@ -1,257 +1,473 @@
|
|||||||
const express = require('express');
|
const express = require("express");
|
||||||
const { body, validationResult } = require('express-validator');
|
const { body, validationResult } = require("express-validator");
|
||||||
const { PrismaClient } = require('@prisma/client');
|
const { getPrismaClient } = require("../config/prisma");
|
||||||
const { authenticateToken } = require('../middleware/auth');
|
const { authenticateToken } = require("../middleware/auth");
|
||||||
const { requireManageSettings } = require('../middleware/permissions');
|
const { requireManageSettings } = require("../middleware/permissions");
|
||||||
|
const { getSettings, updateSettings } = require("../services/settingsService");
|
||||||
|
|
||||||
const router = express.Router();
|
const router = express.Router();
|
||||||
const prisma = new PrismaClient();
|
const prisma = getPrismaClient();
|
||||||
|
|
||||||
// Function to trigger crontab updates on all hosts with auto-update enabled
|
// WebSocket broadcaster for agent policy updates (no longer used - queue-based delivery preferred)
|
||||||
async function triggerCrontabUpdates() {
|
// const { broadcastSettingsUpdate } = require("../services/agentWs");
|
||||||
try {
|
const { queueManager, QUEUE_NAMES } = require("../services/automation");
|
||||||
console.log('Triggering crontab updates on all hosts with auto-update enabled...');
|
|
||||||
|
|
||||||
// Get all hosts that have auto-update enabled
|
// Helpers
|
||||||
const hosts = await prisma.host.findMany({
|
function normalizeUpdateInterval(minutes) {
|
||||||
where: {
|
let m = parseInt(minutes, 10);
|
||||||
autoUpdate: true,
|
if (Number.isNaN(m)) return 60;
|
||||||
status: 'active' // Only update active hosts
|
if (m < 5) m = 5;
|
||||||
},
|
if (m > 1440) m = 1440;
|
||||||
select: {
|
if (m < 60) {
|
||||||
id: true,
|
// Clamp to 5-59, step 5
|
||||||
hostname: true,
|
const snapped = Math.round(m / 5) * 5;
|
||||||
apiId: true,
|
return Math.min(59, Math.max(5, snapped));
|
||||||
apiKey: true
|
}
|
||||||
}
|
// Allowed hour-based presets
|
||||||
});
|
const allowed = [60, 120, 180, 360, 720, 1440];
|
||||||
|
let nearest = allowed[0];
|
||||||
|
let bestDiff = Math.abs(m - nearest);
|
||||||
|
for (const a of allowed) {
|
||||||
|
const d = Math.abs(m - a);
|
||||||
|
if (d < bestDiff) {
|
||||||
|
bestDiff = d;
|
||||||
|
nearest = a;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nearest;
|
||||||
|
}
|
||||||
|
|
||||||
console.log(`Found ${hosts.length} hosts with auto-update enabled`);
|
function buildCronExpression(minutes) {
|
||||||
|
const m = normalizeUpdateInterval(minutes);
|
||||||
// For each host, we'll send a special update command that triggers crontab update
|
if (m < 60) {
|
||||||
// This is done by sending a ping with a special flag
|
return `*/${m} * * * *`;
|
||||||
for (const host of hosts) {
|
}
|
||||||
try {
|
if (m === 60) {
|
||||||
console.log(`Triggering crontab update for host: ${host.hostname}`);
|
// Hourly at current minute is chosen by agent; default 0 here
|
||||||
|
return `0 * * * *`;
|
||||||
// We'll use the existing ping endpoint but add a special parameter
|
}
|
||||||
// The agent will detect this and run update-crontab command
|
const hours = Math.floor(m / 60);
|
||||||
const http = require('http');
|
// Every N hours at minute 0
|
||||||
const https = require('https');
|
return `0 */${hours} * * *`;
|
||||||
|
|
||||||
const serverUrl = process.env.SERVER_URL || 'http://localhost:3001';
|
|
||||||
const url = new URL(`${serverUrl}/api/v1/hosts/ping`);
|
|
||||||
const isHttps = url.protocol === 'https:';
|
|
||||||
const client = isHttps ? https : http;
|
|
||||||
|
|
||||||
const postData = JSON.stringify({
|
|
||||||
triggerCrontabUpdate: true,
|
|
||||||
message: 'Update interval changed, please update your crontab'
|
|
||||||
});
|
|
||||||
|
|
||||||
const options = {
|
|
||||||
hostname: url.hostname,
|
|
||||||
port: url.port || (isHttps ? 443 : 80),
|
|
||||||
path: url.pathname,
|
|
||||||
method: 'POST',
|
|
||||||
headers: {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
'Content-Length': Buffer.byteLength(postData),
|
|
||||||
'X-API-ID': host.apiId,
|
|
||||||
'X-API-KEY': host.apiKey
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const req = client.request(options, (res) => {
|
|
||||||
if (res.statusCode === 200) {
|
|
||||||
console.log(`Successfully triggered crontab update for ${host.hostname}`);
|
|
||||||
} else {
|
|
||||||
console.error(`Failed to trigger crontab update for ${host.hostname}: ${res.statusCode}`);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
req.on('error', (error) => {
|
|
||||||
console.error(`Error triggering crontab update for ${host.hostname}:`, error.message);
|
|
||||||
});
|
|
||||||
|
|
||||||
req.write(postData);
|
|
||||||
req.end();
|
|
||||||
} catch (error) {
|
|
||||||
console.error(`Error triggering crontab update for ${host.hostname}:`, error.message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log('Crontab update trigger completed');
|
|
||||||
} catch (error) {
|
|
||||||
console.error('Error in triggerCrontabUpdates:', error);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get current settings
|
// Get current settings
|
||||||
router.get('/', authenticateToken, requireManageSettings, async (req, res) => {
|
router.get("/", authenticateToken, requireManageSettings, async (_req, res) => {
|
||||||
try {
|
try {
|
||||||
let settings = await prisma.settings.findFirst();
|
const settings = await getSettings();
|
||||||
|
if (process.env.ENABLE_LOGGING === "true") {
|
||||||
// If no settings exist, create default settings
|
console.log("Returning settings:", settings);
|
||||||
if (!settings) {
|
}
|
||||||
settings = await prisma.settings.create({
|
res.json(settings);
|
||||||
data: {
|
} catch (error) {
|
||||||
serverUrl: 'http://localhost:3001',
|
console.error("Settings fetch error:", error);
|
||||||
serverProtocol: 'http',
|
res.status(500).json({ error: "Failed to fetch settings" });
|
||||||
serverHost: 'localhost',
|
}
|
||||||
serverPort: 3001,
|
|
||||||
frontendUrl: 'http://localhost:3000',
|
|
||||||
updateInterval: 60,
|
|
||||||
autoUpdate: false
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log('Returning settings:', settings);
|
|
||||||
res.json(settings);
|
|
||||||
} catch (error) {
|
|
||||||
console.error('Settings fetch error:', error);
|
|
||||||
res.status(500).json({ error: 'Failed to fetch settings' });
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Update settings
|
// Update settings
|
||||||
router.put('/', authenticateToken, requireManageSettings, [
|
router.put(
|
||||||
body('serverProtocol').isIn(['http', 'https']).withMessage('Protocol must be http or https'),
|
"/",
|
||||||
body('serverHost').isLength({ min: 1 }).withMessage('Server host is required'),
|
authenticateToken,
|
||||||
body('serverPort').isInt({ min: 1, max: 65535 }).withMessage('Port must be between 1 and 65535'),
|
requireManageSettings,
|
||||||
body('frontendUrl').isLength({ min: 1 }).withMessage('Frontend URL is required'),
|
[
|
||||||
body('updateInterval').isInt({ min: 5, max: 1440 }).withMessage('Update interval must be between 5 and 1440 minutes'),
|
body("serverProtocol")
|
||||||
body('autoUpdate').isBoolean().withMessage('Auto update must be a boolean'),
|
.optional()
|
||||||
body('githubRepoUrl').optional().isLength({ min: 1 }).withMessage('GitHub repo URL must be a non-empty string')
|
.isIn(["http", "https"])
|
||||||
], async (req, res) => {
|
.withMessage("Protocol must be http or https"),
|
||||||
try {
|
body("serverHost")
|
||||||
console.log('Settings update request body:', req.body);
|
.optional()
|
||||||
const errors = validationResult(req);
|
.isLength({ min: 1 })
|
||||||
if (!errors.isEmpty()) {
|
.withMessage("Server host is required"),
|
||||||
console.log('Validation errors:', errors.array());
|
body("serverPort")
|
||||||
return res.status(400).json({ errors: errors.array() });
|
.optional()
|
||||||
}
|
.isInt({ min: 1, max: 65535 })
|
||||||
|
.withMessage("Port must be between 1 and 65535"),
|
||||||
|
body("updateInterval")
|
||||||
|
.optional()
|
||||||
|
.isInt({ min: 5, max: 1440 })
|
||||||
|
.withMessage("Update interval must be between 5 and 1440 minutes"),
|
||||||
|
body("autoUpdate")
|
||||||
|
.optional()
|
||||||
|
.isBoolean()
|
||||||
|
.withMessage("Auto update must be a boolean"),
|
||||||
|
body("ignoreSslSelfSigned")
|
||||||
|
.optional()
|
||||||
|
.isBoolean()
|
||||||
|
.withMessage("Ignore SSL self-signed must be a boolean"),
|
||||||
|
body("signupEnabled")
|
||||||
|
.optional()
|
||||||
|
.isBoolean()
|
||||||
|
.withMessage("Signup enabled must be a boolean"),
|
||||||
|
body("defaultUserRole")
|
||||||
|
.optional()
|
||||||
|
.isLength({ min: 1 })
|
||||||
|
.withMessage("Default user role must be a non-empty string"),
|
||||||
|
body("githubRepoUrl")
|
||||||
|
.optional()
|
||||||
|
.isLength({ min: 1 })
|
||||||
|
.withMessage("GitHub repo URL must be a non-empty string"),
|
||||||
|
body("repositoryType")
|
||||||
|
.optional()
|
||||||
|
.isIn(["public", "private"])
|
||||||
|
.withMessage("Repository type must be public or private"),
|
||||||
|
body("sshKeyPath")
|
||||||
|
.optional()
|
||||||
|
.custom((value) => {
|
||||||
|
if (value && value.trim().length === 0) {
|
||||||
|
return true; // Allow empty string
|
||||||
|
}
|
||||||
|
if (value && value.trim().length < 1) {
|
||||||
|
throw new Error("SSH key path must be a non-empty string");
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}),
|
||||||
|
body("logoDark")
|
||||||
|
.optional()
|
||||||
|
.isLength({ min: 1 })
|
||||||
|
.withMessage("Logo dark path must be a non-empty string"),
|
||||||
|
body("logoLight")
|
||||||
|
.optional()
|
||||||
|
.isLength({ min: 1 })
|
||||||
|
.withMessage("Logo light path must be a non-empty string"),
|
||||||
|
body("favicon")
|
||||||
|
.optional()
|
||||||
|
.isLength({ min: 1 })
|
||||||
|
.withMessage("Favicon path must be a non-empty string"),
|
||||||
|
],
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const errors = validationResult(req);
|
||||||
|
if (!errors.isEmpty()) {
|
||||||
|
console.log("Validation errors:", errors.array());
|
||||||
|
return res.status(400).json({ errors: errors.array() });
|
||||||
|
}
|
||||||
|
|
||||||
const { serverProtocol, serverHost, serverPort, frontendUrl, updateInterval, autoUpdate, githubRepoUrl } = req.body;
|
const {
|
||||||
console.log('Extracted values:', { serverProtocol, serverHost, serverPort, frontendUrl, updateInterval, autoUpdate, githubRepoUrl });
|
serverProtocol,
|
||||||
|
serverHost,
|
||||||
|
serverPort,
|
||||||
|
updateInterval,
|
||||||
|
autoUpdate,
|
||||||
|
ignoreSslSelfSigned,
|
||||||
|
signupEnabled,
|
||||||
|
defaultUserRole,
|
||||||
|
githubRepoUrl,
|
||||||
|
repositoryType,
|
||||||
|
sshKeyPath,
|
||||||
|
logoDark,
|
||||||
|
logoLight,
|
||||||
|
favicon,
|
||||||
|
colorTheme,
|
||||||
|
} = req.body;
|
||||||
|
|
||||||
// Construct server URL from components
|
// Get current settings to check for update interval changes
|
||||||
const serverUrl = `${serverProtocol}://${serverHost}:${serverPort}`;
|
const currentSettings = await getSettings();
|
||||||
|
const oldUpdateInterval = currentSettings.update_interval;
|
||||||
|
|
||||||
let settings = await prisma.settings.findFirst();
|
// Build update object with only provided fields
|
||||||
|
const updateData = {};
|
||||||
|
|
||||||
if (settings) {
|
if (serverProtocol !== undefined)
|
||||||
// Update existing settings
|
updateData.server_protocol = serverProtocol;
|
||||||
console.log('Updating existing settings with data:', {
|
if (serverHost !== undefined) updateData.server_host = serverHost;
|
||||||
serverUrl,
|
if (serverPort !== undefined) updateData.server_port = serverPort;
|
||||||
serverProtocol,
|
if (updateInterval !== undefined) {
|
||||||
serverHost,
|
updateData.update_interval = normalizeUpdateInterval(updateInterval);
|
||||||
serverPort,
|
}
|
||||||
frontendUrl,
|
if (autoUpdate !== undefined) updateData.auto_update = autoUpdate;
|
||||||
updateInterval: updateInterval || 60,
|
if (ignoreSslSelfSigned !== undefined)
|
||||||
autoUpdate: autoUpdate || false,
|
updateData.ignore_ssl_self_signed = ignoreSslSelfSigned;
|
||||||
githubRepoUrl: githubRepoUrl || 'git@github.com:9technologygroup/patchmon.net.git'
|
if (signupEnabled !== undefined)
|
||||||
});
|
updateData.signup_enabled = signupEnabled;
|
||||||
const oldUpdateInterval = settings.updateInterval;
|
if (defaultUserRole !== undefined)
|
||||||
|
updateData.default_user_role = defaultUserRole;
|
||||||
|
if (githubRepoUrl !== undefined)
|
||||||
|
updateData.github_repo_url = githubRepoUrl;
|
||||||
|
if (repositoryType !== undefined)
|
||||||
|
updateData.repository_type = repositoryType;
|
||||||
|
if (sshKeyPath !== undefined) updateData.ssh_key_path = sshKeyPath;
|
||||||
|
if (logoDark !== undefined) updateData.logo_dark = logoDark;
|
||||||
|
if (logoLight !== undefined) updateData.logo_light = logoLight;
|
||||||
|
if (favicon !== undefined) updateData.favicon = favicon;
|
||||||
|
if (colorTheme !== undefined) updateData.color_theme = colorTheme;
|
||||||
|
|
||||||
settings = await prisma.settings.update({
|
const updatedSettings = await updateSettings(
|
||||||
where: { id: settings.id },
|
currentSettings.id,
|
||||||
data: {
|
updateData,
|
||||||
serverUrl,
|
);
|
||||||
serverProtocol,
|
|
||||||
serverHost,
|
|
||||||
serverPort,
|
|
||||||
frontendUrl,
|
|
||||||
updateInterval: updateInterval || 60,
|
|
||||||
autoUpdate: autoUpdate || false,
|
|
||||||
githubRepoUrl: githubRepoUrl || 'git@github.com:9technologygroup/patchmon.net.git'
|
|
||||||
}
|
|
||||||
});
|
|
||||||
console.log('Settings updated successfully:', settings);
|
|
||||||
|
|
||||||
// If update interval changed, trigger crontab updates on all hosts with auto-update enabled
|
console.log("Settings updated successfully:", updatedSettings);
|
||||||
if (oldUpdateInterval !== (updateInterval || 60)) {
|
|
||||||
console.log(`Update interval changed from ${oldUpdateInterval} to ${updateInterval || 60} minutes. Triggering crontab updates...`);
|
|
||||||
await triggerCrontabUpdates();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Create new settings
|
|
||||||
settings = await prisma.settings.create({
|
|
||||||
data: {
|
|
||||||
serverUrl,
|
|
||||||
serverProtocol,
|
|
||||||
serverHost,
|
|
||||||
serverPort,
|
|
||||||
frontendUrl,
|
|
||||||
updateInterval: updateInterval || 60,
|
|
||||||
autoUpdate: autoUpdate || false,
|
|
||||||
githubRepoUrl: githubRepoUrl || 'git@github.com:9technologygroup/patchmon.net.git'
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
res.json({
|
// If update interval changed, enqueue persistent jobs for agents
|
||||||
message: 'Settings updated successfully',
|
if (
|
||||||
settings
|
updateInterval !== undefined &&
|
||||||
});
|
oldUpdateInterval !== updateData.update_interval
|
||||||
} catch (error) {
|
) {
|
||||||
console.error('Settings update error:', error);
|
console.log(
|
||||||
res.status(500).json({ error: 'Failed to update settings' });
|
`Update interval changed from ${oldUpdateInterval} to ${updateData.update_interval} minutes. Enqueueing agent settings updates...`,
|
||||||
}
|
);
|
||||||
});
|
|
||||||
|
const hosts = await prisma.hosts.findMany({
|
||||||
|
where: { status: "active" },
|
||||||
|
select: { api_id: true },
|
||||||
|
});
|
||||||
|
|
||||||
|
const queue = queueManager.queues[QUEUE_NAMES.AGENT_COMMANDS];
|
||||||
|
const jobs = hosts.map((h) => ({
|
||||||
|
name: "settings_update",
|
||||||
|
data: {
|
||||||
|
api_id: h.api_id,
|
||||||
|
type: "settings_update",
|
||||||
|
update_interval: updateData.update_interval,
|
||||||
|
},
|
||||||
|
opts: { attempts: 10, backoff: { type: "exponential", delay: 5000 } },
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Bulk add jobs
|
||||||
|
await queue.addBulk(jobs);
|
||||||
|
|
||||||
|
// Note: Queue-based delivery handles retries and ensures reliable delivery
|
||||||
|
// No need for immediate broadcast as it would cause duplicate messages
|
||||||
|
}
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
message: "Settings updated successfully",
|
||||||
|
settings: updatedSettings,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Settings update error:", error);
|
||||||
|
res.status(500).json({ error: "Failed to update settings" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// Get server URL for public use (used by installation scripts)
|
// Get server URL for public use (used by installation scripts)
|
||||||
router.get('/server-url', async (req, res) => {
|
router.get("/server-url", async (_req, res) => {
|
||||||
try {
|
try {
|
||||||
const settings = await prisma.settings.findFirst();
|
const settings = await getSettings();
|
||||||
|
const serverUrl = settings.server_url;
|
||||||
if (!settings) {
|
res.json({ server_url: serverUrl });
|
||||||
return res.json({ serverUrl: 'http://localhost:3001' });
|
} catch (error) {
|
||||||
}
|
console.error("Server URL fetch error:", error);
|
||||||
|
res.status(500).json({ error: "Failed to fetch server URL" });
|
||||||
res.json({ serverUrl: settings.serverUrl });
|
}
|
||||||
} catch (error) {
|
|
||||||
console.error('Server URL fetch error:', error);
|
|
||||||
res.json({ serverUrl: 'http://localhost:3001' });
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Get update interval policy for agents (public endpoint)
|
// Get update interval policy for agents (requires API authentication)
|
||||||
router.get('/update-interval', async (req, res) => {
|
router.get("/update-interval", async (req, res) => {
|
||||||
try {
|
try {
|
||||||
const settings = await prisma.settings.findFirst();
|
// Verify API credentials
|
||||||
|
const apiId = req.headers["x-api-id"];
|
||||||
|
const apiKey = req.headers["x-api-key"];
|
||||||
|
|
||||||
if (!settings) {
|
if (!apiId || !apiKey) {
|
||||||
return res.json({ updateInterval: 60 });
|
return res.status(401).json({ error: "API credentials required" });
|
||||||
}
|
}
|
||||||
|
|
||||||
res.json({
|
// Validate API credentials
|
||||||
updateInterval: settings.updateInterval,
|
const host = await prisma.hosts.findUnique({
|
||||||
cronExpression: `*/${settings.updateInterval} * * * *` // Generate cron expression
|
where: { api_id: apiId },
|
||||||
});
|
});
|
||||||
} catch (error) {
|
|
||||||
console.error('Update interval fetch error:', error);
|
if (!host || host.api_key !== apiKey) {
|
||||||
res.json({ updateInterval: 60, cronExpression: '0 * * * *' });
|
return res.status(401).json({ error: "Invalid API credentials" });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const settings = await getSettings();
|
||||||
|
const interval = normalizeUpdateInterval(settings.update_interval || 60);
|
||||||
|
res.json({
|
||||||
|
updateInterval: interval,
|
||||||
|
cronExpression: buildCronExpression(interval),
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Update interval fetch error:", error);
|
||||||
|
res.json({ updateInterval: 60, cronExpression: "0 * * * *" });
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// Get auto-update policy for agents (public endpoint)
|
// Get auto-update policy for agents (public endpoint)
|
||||||
router.get('/auto-update', async (req, res) => {
|
router.get("/auto-update", async (_req, res) => {
|
||||||
try {
|
try {
|
||||||
const settings = await prisma.settings.findFirst();
|
const settings = await getSettings();
|
||||||
|
res.json({
|
||||||
if (!settings) {
|
autoUpdate: settings.auto_update || false,
|
||||||
return res.json({ autoUpdate: false });
|
});
|
||||||
}
|
} catch (error) {
|
||||||
|
console.error("Auto-update fetch error:", error);
|
||||||
res.json({
|
res.json({ autoUpdate: false });
|
||||||
autoUpdate: settings.autoUpdate || false
|
}
|
||||||
});
|
|
||||||
} catch (error) {
|
|
||||||
console.error('Auto-update fetch error:', error);
|
|
||||||
res.json({ autoUpdate: false });
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Upload logo files
|
||||||
|
router.post(
|
||||||
|
"/logos/upload",
|
||||||
|
authenticateToken,
|
||||||
|
requireManageSettings,
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { logoType, fileContent, fileName } = req.body;
|
||||||
|
|
||||||
|
if (!logoType || !fileContent) {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: "Logo type and file content are required",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!["dark", "light", "favicon"].includes(logoType)) {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: "Logo type must be 'dark', 'light', or 'favicon'",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate file content (basic checks)
|
||||||
|
if (typeof fileContent !== "string") {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: "File content must be a base64 string",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const fs = require("node:fs").promises;
|
||||||
|
const path = require("node:path");
|
||||||
|
const _crypto = require("node:crypto");
|
||||||
|
|
||||||
|
// Create assets directory if it doesn't exist
|
||||||
|
// In development: save to public/assets (served by Vite)
|
||||||
|
// In production: save to dist/assets (served by built app)
|
||||||
|
const isDevelopment = process.env.NODE_ENV !== "production";
|
||||||
|
const assetsDir = isDevelopment
|
||||||
|
? path.join(__dirname, "../../../frontend/public/assets")
|
||||||
|
: path.join(__dirname, "../../../frontend/dist/assets");
|
||||||
|
await fs.mkdir(assetsDir, { recursive: true });
|
||||||
|
|
||||||
|
// Determine file extension and path
|
||||||
|
let fileExtension;
|
||||||
|
let fileName_final;
|
||||||
|
|
||||||
|
if (logoType === "favicon") {
|
||||||
|
fileExtension = ".svg";
|
||||||
|
fileName_final = fileName || "logo_square.svg";
|
||||||
|
} else {
|
||||||
|
// Determine extension from file content or use default
|
||||||
|
if (fileContent.startsWith("data:image/png")) {
|
||||||
|
fileExtension = ".png";
|
||||||
|
} else if (fileContent.startsWith("data:image/svg")) {
|
||||||
|
fileExtension = ".svg";
|
||||||
|
} else if (
|
||||||
|
fileContent.startsWith("data:image/jpeg") ||
|
||||||
|
fileContent.startsWith("data:image/jpg")
|
||||||
|
) {
|
||||||
|
fileExtension = ".jpg";
|
||||||
|
} else {
|
||||||
|
fileExtension = ".png"; // Default to PNG
|
||||||
|
}
|
||||||
|
fileName_final = fileName || `logo_${logoType}${fileExtension}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
const filePath = path.join(assetsDir, fileName_final);
|
||||||
|
|
||||||
|
// Handle base64 data URLs
|
||||||
|
let fileBuffer;
|
||||||
|
if (fileContent.startsWith("data:")) {
|
||||||
|
const base64Data = fileContent.split(",")[1];
|
||||||
|
fileBuffer = Buffer.from(base64Data, "base64");
|
||||||
|
} else {
|
||||||
|
// Assume it's already base64
|
||||||
|
fileBuffer = Buffer.from(fileContent, "base64");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create backup of existing file
|
||||||
|
try {
|
||||||
|
const backupPath = `${filePath}.backup.${Date.now()}`;
|
||||||
|
await fs.copyFile(filePath, backupPath);
|
||||||
|
console.log(`Created backup: ${backupPath}`);
|
||||||
|
} catch (error) {
|
||||||
|
// Ignore if original doesn't exist
|
||||||
|
if (error.code !== "ENOENT") {
|
||||||
|
console.warn("Failed to create backup:", error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write new logo file
|
||||||
|
await fs.writeFile(filePath, fileBuffer);
|
||||||
|
|
||||||
|
// Update settings with new logo path
|
||||||
|
const settings = await getSettings();
|
||||||
|
const logoPath = `/assets/${fileName_final}`;
|
||||||
|
|
||||||
|
const updateData = {};
|
||||||
|
if (logoType === "dark") {
|
||||||
|
updateData.logo_dark = logoPath;
|
||||||
|
} else if (logoType === "light") {
|
||||||
|
updateData.logo_light = logoPath;
|
||||||
|
} else if (logoType === "favicon") {
|
||||||
|
updateData.favicon = logoPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
await updateSettings(settings.id, updateData);
|
||||||
|
|
||||||
|
// Get file stats
|
||||||
|
const stats = await fs.stat(filePath);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
message: `${logoType} logo uploaded successfully`,
|
||||||
|
fileName: fileName_final,
|
||||||
|
path: logoPath,
|
||||||
|
size: stats.size,
|
||||||
|
sizeFormatted: `${(stats.size / 1024).toFixed(1)} KB`,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Upload logo error:", error);
|
||||||
|
res.status(500).json({ error: "Failed to upload logo" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Reset logo to default
|
||||||
|
router.post(
|
||||||
|
"/logos/reset",
|
||||||
|
authenticateToken,
|
||||||
|
requireManageSettings,
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { logoType } = req.body;
|
||||||
|
|
||||||
|
if (!logoType) {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: "Logo type is required",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!["dark", "light", "favicon"].includes(logoType)) {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: "Logo type must be 'dark', 'light', or 'favicon'",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get current settings
|
||||||
|
const settings = await getSettings();
|
||||||
|
|
||||||
|
// Clear the custom logo path to revert to default
|
||||||
|
const updateData = {};
|
||||||
|
if (logoType === "dark") {
|
||||||
|
updateData.logo_dark = null;
|
||||||
|
} else if (logoType === "light") {
|
||||||
|
updateData.logo_light = null;
|
||||||
|
} else if (logoType === "favicon") {
|
||||||
|
updateData.favicon = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
await updateSettings(settings.id, updateData);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
message: `${logoType} logo reset to default successfully`,
|
||||||
|
logoType,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Reset logo error:", error);
|
||||||
|
res.status(500).json({ error: "Failed to reset logo" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
module.exports = router;
|
module.exports = router;
|
||||||
|
|||||||
351
backend/src/routes/tfaRoutes.js
Normal file
351
backend/src/routes/tfaRoutes.js
Normal file
@@ -0,0 +1,351 @@
|
|||||||
|
const express = require("express");
|
||||||
|
const { getPrismaClient } = require("../config/prisma");
|
||||||
|
const speakeasy = require("speakeasy");
|
||||||
|
const QRCode = require("qrcode");
|
||||||
|
const { authenticateToken } = require("../middleware/auth");
|
||||||
|
const { body, validationResult } = require("express-validator");
|
||||||
|
|
||||||
|
const router = express.Router();
|
||||||
|
const prisma = getPrismaClient();
|
||||||
|
|
||||||
|
// Generate TFA secret and QR code
|
||||||
|
router.get("/setup", authenticateToken, async (req, res) => {
|
||||||
|
try {
|
||||||
|
const userId = req.user.id;
|
||||||
|
|
||||||
|
// Check if user already has TFA enabled
|
||||||
|
const user = await prisma.users.findUnique({
|
||||||
|
where: { id: userId },
|
||||||
|
select: { tfa_enabled: true, tfa_secret: true },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (user.tfa_enabled) {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: "Two-factor authentication is already enabled for this account",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate a new secret
|
||||||
|
const secret = speakeasy.generateSecret({
|
||||||
|
name: `PatchMon (${req.user.username})`,
|
||||||
|
issuer: "PatchMon",
|
||||||
|
length: 32,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Generate QR code
|
||||||
|
const qrCodeUrl = await QRCode.toDataURL(secret.otpauth_url);
|
||||||
|
|
||||||
|
// Store the secret temporarily (not enabled yet)
|
||||||
|
await prisma.users.update({
|
||||||
|
where: { id: userId },
|
||||||
|
data: { tfa_secret: secret.base32 },
|
||||||
|
});
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
secret: secret.base32,
|
||||||
|
qrCode: qrCodeUrl,
|
||||||
|
manualEntryKey: secret.base32,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("TFA setup error:", error);
|
||||||
|
res
|
||||||
|
.status(500)
|
||||||
|
.json({ error: "Failed to setup two-factor authentication" });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Verify TFA setup
|
||||||
|
router.post(
|
||||||
|
"/verify-setup",
|
||||||
|
authenticateToken,
|
||||||
|
[
|
||||||
|
body("token")
|
||||||
|
.notEmpty()
|
||||||
|
.withMessage("Token is required")
|
||||||
|
.isString()
|
||||||
|
.withMessage("Token must be a string")
|
||||||
|
.isLength({ min: 6, max: 6 })
|
||||||
|
.withMessage("Token must be exactly 6 digits")
|
||||||
|
.matches(/^\d{6}$/)
|
||||||
|
.withMessage("Token must contain only numbers"),
|
||||||
|
],
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const errors = validationResult(req);
|
||||||
|
if (!errors.isEmpty()) {
|
||||||
|
return res.status(400).json({ errors: errors.array() });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure token is a string (convert if needed)
|
||||||
|
let { token } = req.body;
|
||||||
|
if (typeof token !== "string") {
|
||||||
|
token = String(token);
|
||||||
|
}
|
||||||
|
const userId = req.user.id;
|
||||||
|
|
||||||
|
// Get user's TFA secret
|
||||||
|
const user = await prisma.users.findUnique({
|
||||||
|
where: { id: userId },
|
||||||
|
select: { tfa_secret: true, tfa_enabled: true },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!user.tfa_secret) {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: "No TFA secret found. Please start the setup process first.",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if (user.tfa_enabled) {
|
||||||
|
return res.status(400).json({
|
||||||
|
error:
|
||||||
|
"Two-factor authentication is already enabled for this account",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the token
|
||||||
|
const verified = speakeasy.totp.verify({
|
||||||
|
secret: user.tfa_secret,
|
||||||
|
encoding: "base32",
|
||||||
|
token: token,
|
||||||
|
window: 2, // Allow 2 time windows (60 seconds) for clock drift
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!verified) {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: "Invalid verification code. Please try again.",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate backup codes
|
||||||
|
const backupCodes = Array.from({ length: 10 }, () =>
|
||||||
|
Math.random().toString(36).substring(2, 8).toUpperCase(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Enable TFA and store backup codes
|
||||||
|
await prisma.users.update({
|
||||||
|
where: { id: userId },
|
||||||
|
data: {
|
||||||
|
tfa_enabled: true,
|
||||||
|
tfa_backup_codes: JSON.stringify(backupCodes),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
message: "Two-factor authentication has been enabled successfully",
|
||||||
|
backupCodes: backupCodes,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("TFA verification error:", error);
|
||||||
|
res
|
||||||
|
.status(500)
|
||||||
|
.json({ error: "Failed to verify two-factor authentication setup" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Disable TFA
|
||||||
|
router.post(
|
||||||
|
"/disable",
|
||||||
|
authenticateToken,
|
||||||
|
[
|
||||||
|
body("password")
|
||||||
|
.notEmpty()
|
||||||
|
.withMessage("Password is required to disable TFA"),
|
||||||
|
],
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const errors = validationResult(req);
|
||||||
|
if (!errors.isEmpty()) {
|
||||||
|
return res.status(400).json({ errors: errors.array() });
|
||||||
|
}
|
||||||
|
|
||||||
|
const { password: _password } = req.body;
|
||||||
|
const userId = req.user.id;
|
||||||
|
|
||||||
|
// Verify password
|
||||||
|
const user = await prisma.users.findUnique({
|
||||||
|
where: { id: userId },
|
||||||
|
select: { password_hash: true, tfa_enabled: true },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!user.tfa_enabled) {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: "Two-factor authentication is not enabled for this account",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: In a real implementation, you would verify the password hash here
|
||||||
|
// For now, we'll skip password verification for simplicity
|
||||||
|
|
||||||
|
// Disable TFA
|
||||||
|
await prisma.users.update({
|
||||||
|
where: { id: userId },
|
||||||
|
data: {
|
||||||
|
tfa_enabled: false,
|
||||||
|
tfa_secret: null,
|
||||||
|
tfa_backup_codes: null,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
message: "Two-factor authentication has been disabled successfully",
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("TFA disable error:", error);
|
||||||
|
res
|
||||||
|
.status(500)
|
||||||
|
.json({ error: "Failed to disable two-factor authentication" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get TFA status
|
||||||
|
router.get("/status", authenticateToken, async (req, res) => {
|
||||||
|
try {
|
||||||
|
const userId = req.user.id;
|
||||||
|
|
||||||
|
const user = await prisma.users.findUnique({
|
||||||
|
where: { id: userId },
|
||||||
|
select: {
|
||||||
|
tfa_enabled: true,
|
||||||
|
tfa_secret: true,
|
||||||
|
tfa_backup_codes: true,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
enabled: user.tfa_enabled,
|
||||||
|
hasBackupCodes: !!user.tfa_backup_codes,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("TFA status error:", error);
|
||||||
|
res.status(500).json({ error: "Failed to get TFA status" });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Regenerate backup codes
|
||||||
|
router.post("/regenerate-backup-codes", authenticateToken, async (req, res) => {
|
||||||
|
try {
|
||||||
|
const userId = req.user.id;
|
||||||
|
|
||||||
|
// Check if TFA is enabled
|
||||||
|
const user = await prisma.users.findUnique({
|
||||||
|
where: { id: userId },
|
||||||
|
select: { tfa_enabled: true },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!user.tfa_enabled) {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: "Two-factor authentication is not enabled for this account",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate new backup codes
|
||||||
|
const backupCodes = Array.from({ length: 10 }, () =>
|
||||||
|
Math.random().toString(36).substring(2, 8).toUpperCase(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Update backup codes
|
||||||
|
await prisma.users.update({
|
||||||
|
where: { id: userId },
|
||||||
|
data: {
|
||||||
|
tfa_backup_codes: JSON.stringify(backupCodes),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
message: "Backup codes have been regenerated successfully",
|
||||||
|
backupCodes: backupCodes,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("TFA backup codes regeneration error:", error);
|
||||||
|
res.status(500).json({ error: "Failed to regenerate backup codes" });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Verify TFA token (for login)
|
||||||
|
router.post(
|
||||||
|
"/verify",
|
||||||
|
[
|
||||||
|
body("username").notEmpty().withMessage("Username is required"),
|
||||||
|
body("token")
|
||||||
|
.isLength({ min: 6, max: 6 })
|
||||||
|
.withMessage("Token must be 6 characters"),
|
||||||
|
body("token")
|
||||||
|
.matches(/^[A-Z0-9]{6}$/)
|
||||||
|
.withMessage("Token must be 6 alphanumeric characters"),
|
||||||
|
],
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
const errors = validationResult(req);
|
||||||
|
if (!errors.isEmpty()) {
|
||||||
|
return res.status(400).json({ errors: errors.array() });
|
||||||
|
}
|
||||||
|
|
||||||
|
const { username, token } = req.body;
|
||||||
|
|
||||||
|
// Get user's TFA secret
|
||||||
|
const user = await prisma.users.findUnique({
|
||||||
|
where: { username },
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
tfa_enabled: true,
|
||||||
|
tfa_secret: true,
|
||||||
|
tfa_backup_codes: true,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!user || !user.tfa_enabled || !user.tfa_secret) {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: "Two-factor authentication is not enabled for this account",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if it's a backup code
|
||||||
|
const backupCodes = user.tfa_backup_codes
|
||||||
|
? JSON.parse(user.tfa_backup_codes)
|
||||||
|
: [];
|
||||||
|
const isBackupCode = backupCodes.includes(token);
|
||||||
|
|
||||||
|
let verified = false;
|
||||||
|
|
||||||
|
if (isBackupCode) {
|
||||||
|
// Remove the used backup code
|
||||||
|
const updatedBackupCodes = backupCodes.filter((code) => code !== token);
|
||||||
|
await prisma.users.update({
|
||||||
|
where: { id: user.id },
|
||||||
|
data: {
|
||||||
|
tfa_backup_codes: JSON.stringify(updatedBackupCodes),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
verified = true;
|
||||||
|
} else {
|
||||||
|
// Verify TOTP token
|
||||||
|
verified = speakeasy.totp.verify({
|
||||||
|
secret: user.tfa_secret,
|
||||||
|
encoding: "base32",
|
||||||
|
token: token,
|
||||||
|
window: 2,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!verified) {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: "Invalid verification code",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
message: "Two-factor authentication verified successfully",
|
||||||
|
userId: user.id,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("TFA verification error:", error);
|
||||||
|
res
|
||||||
|
.status(500)
|
||||||
|
.json({ error: "Failed to verify two-factor authentication" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
module.exports = router;
|
||||||
105
backend/src/routes/userPreferencesRoutes.js
Normal file
105
backend/src/routes/userPreferencesRoutes.js
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
const express = require("express");
|
||||||
|
const { getPrismaClient } = require("../config/prisma");
|
||||||
|
const { authenticateToken } = require("../middleware/auth");
|
||||||
|
|
||||||
|
const router = express.Router();
|
||||||
|
const prisma = getPrismaClient();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* GET /api/v1/user/preferences
|
||||||
|
* Get current user's preferences (theme and color theme)
|
||||||
|
*/
|
||||||
|
router.get("/", authenticateToken, async (req, res) => {
|
||||||
|
try {
|
||||||
|
const userId = req.user.id;
|
||||||
|
|
||||||
|
const user = await prisma.users.findUnique({
|
||||||
|
where: { id: userId },
|
||||||
|
select: {
|
||||||
|
theme_preference: true,
|
||||||
|
color_theme: true,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!user) {
|
||||||
|
return res.status(404).json({ error: "User not found" });
|
||||||
|
}
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
theme_preference: user.theme_preference || "dark",
|
||||||
|
color_theme: user.color_theme || "cyber_blue",
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error fetching user preferences:", error);
|
||||||
|
res.status(500).json({ error: "Failed to fetch user preferences" });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* PATCH /api/v1/user/preferences
|
||||||
|
* Update current user's preferences
|
||||||
|
*/
|
||||||
|
router.patch("/", authenticateToken, async (req, res) => {
|
||||||
|
try {
|
||||||
|
const userId = req.user.id;
|
||||||
|
const { theme_preference, color_theme } = req.body;
|
||||||
|
|
||||||
|
// Validate inputs
|
||||||
|
const updateData = {};
|
||||||
|
if (theme_preference !== undefined) {
|
||||||
|
if (!["light", "dark"].includes(theme_preference)) {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: "Invalid theme preference. Must be 'light' or 'dark'",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
updateData.theme_preference = theme_preference;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (color_theme !== undefined) {
|
||||||
|
const validColorThemes = [
|
||||||
|
"default",
|
||||||
|
"cyber_blue",
|
||||||
|
"neon_purple",
|
||||||
|
"matrix_green",
|
||||||
|
"ocean_blue",
|
||||||
|
"sunset_gradient",
|
||||||
|
];
|
||||||
|
if (!validColorThemes.includes(color_theme)) {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: `Invalid color theme. Must be one of: ${validColorThemes.join(", ")}`,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
updateData.color_theme = color_theme;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Object.keys(updateData).length === 0) {
|
||||||
|
return res
|
||||||
|
.status(400)
|
||||||
|
.json({ error: "No preferences provided to update" });
|
||||||
|
}
|
||||||
|
|
||||||
|
updateData.updated_at = new Date();
|
||||||
|
|
||||||
|
const updatedUser = await prisma.users.update({
|
||||||
|
where: { id: userId },
|
||||||
|
data: updateData,
|
||||||
|
select: {
|
||||||
|
theme_preference: true,
|
||||||
|
color_theme: true,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
message: "Preferences updated successfully",
|
||||||
|
preferences: {
|
||||||
|
theme_preference: updatedUser.theme_preference,
|
||||||
|
color_theme: updatedUser.color_theme,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error updating user preferences:", error);
|
||||||
|
res.status(500).json({ error: "Failed to update user preferences" });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
module.exports = router;
|
||||||
361
backend/src/routes/versionRoutes.js
Normal file
361
backend/src/routes/versionRoutes.js
Normal file
@@ -0,0 +1,361 @@
|
|||||||
|
const express = require("express");
|
||||||
|
const { authenticateToken } = require("../middleware/auth");
|
||||||
|
const { requireManageSettings } = require("../middleware/permissions");
|
||||||
|
const { getPrismaClient } = require("../config/prisma");
|
||||||
|
|
||||||
|
const prisma = getPrismaClient();
|
||||||
|
|
||||||
|
// Default GitHub repository URL
|
||||||
|
const DEFAULT_GITHUB_REPO = "https://github.com/PatchMon/PatchMon.git";
|
||||||
|
|
||||||
|
const router = express.Router();
|
||||||
|
|
||||||
|
// Helper function to get current version from package.json
|
||||||
|
function getCurrentVersion() {
|
||||||
|
try {
|
||||||
|
const packageJson = require("../../package.json");
|
||||||
|
if (!packageJson?.version) {
|
||||||
|
throw new Error("Version not found in package.json");
|
||||||
|
}
|
||||||
|
return packageJson.version;
|
||||||
|
} catch (packageError) {
|
||||||
|
console.error(
|
||||||
|
"Could not read version from package.json:",
|
||||||
|
packageError.message,
|
||||||
|
);
|
||||||
|
return "unknown";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to parse GitHub repository URL
|
||||||
|
function parseGitHubRepo(repoUrl) {
|
||||||
|
let owner, repo;
|
||||||
|
|
||||||
|
if (repoUrl.includes("git@github.com:")) {
|
||||||
|
const match = repoUrl.match(/git@github\.com:([^/]+)\/([^/]+)\.git/);
|
||||||
|
if (match) {
|
||||||
|
[, owner, repo] = match;
|
||||||
|
}
|
||||||
|
} else if (repoUrl.includes("github.com/")) {
|
||||||
|
const match = repoUrl.match(/github\.com\/([^/]+)\/([^/]+?)(?:\.git)?$/);
|
||||||
|
if (match) {
|
||||||
|
[, owner, repo] = match;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return { owner, repo };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to get latest release from GitHub API
|
||||||
|
async function getLatestRelease(owner, repo) {
|
||||||
|
try {
|
||||||
|
const currentVersion = getCurrentVersion();
|
||||||
|
const apiUrl = `https://api.github.com/repos/${owner}/${repo}/releases/latest`;
|
||||||
|
|
||||||
|
const response = await fetch(apiUrl, {
|
||||||
|
method: "GET",
|
||||||
|
headers: {
|
||||||
|
Accept: "application/vnd.github.v3+json",
|
||||||
|
"User-Agent": `PatchMon-Server/${currentVersion}`,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const errorText = await response.text();
|
||||||
|
if (
|
||||||
|
errorText.includes("rate limit") ||
|
||||||
|
errorText.includes("API rate limit")
|
||||||
|
) {
|
||||||
|
throw new Error("GitHub API rate limit exceeded");
|
||||||
|
}
|
||||||
|
throw new Error(
|
||||||
|
`GitHub API error: ${response.status} ${response.statusText}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const releaseData = await response.json();
|
||||||
|
return {
|
||||||
|
tagName: releaseData.tag_name,
|
||||||
|
version: releaseData.tag_name.replace("v", ""),
|
||||||
|
publishedAt: releaseData.published_at,
|
||||||
|
htmlUrl: releaseData.html_url,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error fetching latest release:", error.message);
|
||||||
|
throw error; // Re-throw to be caught by the calling function
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to get latest commit from main branch
|
||||||
|
async function getLatestCommit(owner, repo) {
|
||||||
|
try {
|
||||||
|
const currentVersion = getCurrentVersion();
|
||||||
|
const apiUrl = `https://api.github.com/repos/${owner}/${repo}/commits/main`;
|
||||||
|
|
||||||
|
const response = await fetch(apiUrl, {
|
||||||
|
method: "GET",
|
||||||
|
headers: {
|
||||||
|
Accept: "application/vnd.github.v3+json",
|
||||||
|
"User-Agent": `PatchMon-Server/${currentVersion}`,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const errorText = await response.text();
|
||||||
|
if (
|
||||||
|
errorText.includes("rate limit") ||
|
||||||
|
errorText.includes("API rate limit")
|
||||||
|
) {
|
||||||
|
throw new Error("GitHub API rate limit exceeded");
|
||||||
|
}
|
||||||
|
throw new Error(
|
||||||
|
`GitHub API error: ${response.status} ${response.statusText}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const commitData = await response.json();
|
||||||
|
return {
|
||||||
|
sha: commitData.sha,
|
||||||
|
message: commitData.commit.message,
|
||||||
|
author: commitData.commit.author.name,
|
||||||
|
date: commitData.commit.author.date,
|
||||||
|
htmlUrl: commitData.html_url,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error fetching latest commit:", error.message);
|
||||||
|
throw error; // Re-throw to be caught by the calling function
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to get commit count difference
|
||||||
|
async function getCommitDifference(owner, repo, currentVersion) {
|
||||||
|
// Try both with and without 'v' prefix for compatibility
|
||||||
|
const versionTags = [
|
||||||
|
currentVersion, // Try without 'v' first (new format)
|
||||||
|
`v${currentVersion}`, // Try with 'v' prefix (old format)
|
||||||
|
];
|
||||||
|
|
||||||
|
for (const versionTag of versionTags) {
|
||||||
|
try {
|
||||||
|
// Compare main branch with the released version tag
|
||||||
|
const apiUrl = `https://api.github.com/repos/${owner}/${repo}/compare/${versionTag}...main`;
|
||||||
|
|
||||||
|
const response = await fetch(apiUrl, {
|
||||||
|
method: "GET",
|
||||||
|
headers: {
|
||||||
|
Accept: "application/vnd.github.v3+json",
|
||||||
|
"User-Agent": `PatchMon-Server/${getCurrentVersion()}`,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const errorText = await response.text();
|
||||||
|
if (
|
||||||
|
errorText.includes("rate limit") ||
|
||||||
|
errorText.includes("API rate limit")
|
||||||
|
) {
|
||||||
|
throw new Error("GitHub API rate limit exceeded");
|
||||||
|
}
|
||||||
|
// If 404, try next tag format
|
||||||
|
if (response.status === 404) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
throw new Error(
|
||||||
|
`GitHub API error: ${response.status} ${response.statusText}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const compareData = await response.json();
|
||||||
|
return {
|
||||||
|
commitsBehind: compareData.behind_by || 0, // How many commits main is behind release
|
||||||
|
commitsAhead: compareData.ahead_by || 0, // How many commits main is ahead of release
|
||||||
|
totalCommits: compareData.total_commits || 0,
|
||||||
|
branchInfo: "main branch vs release",
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
// If rate limit, throw immediately
|
||||||
|
if (error.message.includes("rate limit")) {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If all attempts failed, throw error
|
||||||
|
throw new Error(
|
||||||
|
`Could not find tag '${currentVersion}' or 'v${currentVersion}' in repository`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to compare version strings (semantic versioning)
|
||||||
|
function compareVersions(version1, version2) {
|
||||||
|
const v1parts = version1.split(".").map(Number);
|
||||||
|
const v2parts = version2.split(".").map(Number);
|
||||||
|
|
||||||
|
const maxLength = Math.max(v1parts.length, v2parts.length);
|
||||||
|
|
||||||
|
for (let i = 0; i < maxLength; i++) {
|
||||||
|
const v1part = v1parts[i] || 0;
|
||||||
|
const v2part = v2parts[i] || 0;
|
||||||
|
|
||||||
|
if (v1part > v2part) return 1;
|
||||||
|
if (v1part < v2part) return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get current version info
|
||||||
|
router.get("/current", authenticateToken, async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const currentVersion = getCurrentVersion();
|
||||||
|
|
||||||
|
// Get settings with cached update info (no GitHub API calls)
|
||||||
|
const settings = await prisma.settings.findFirst();
|
||||||
|
const githubRepoUrl = settings?.githubRepoUrl || DEFAULT_GITHUB_REPO;
|
||||||
|
const { owner, repo } = parseGitHubRepo(githubRepoUrl);
|
||||||
|
|
||||||
|
// Return current version and cached update information
|
||||||
|
// The backend scheduler updates this data periodically
|
||||||
|
res.json({
|
||||||
|
version: currentVersion,
|
||||||
|
latest_version: settings?.latest_version || null,
|
||||||
|
is_update_available: settings?.is_update_available || false,
|
||||||
|
last_update_check: settings?.last_update_check || null,
|
||||||
|
buildDate: new Date().toISOString(),
|
||||||
|
environment: process.env.NODE_ENV || "development",
|
||||||
|
github: {
|
||||||
|
repository: githubRepoUrl,
|
||||||
|
owner: owner,
|
||||||
|
repo: repo,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error getting current version:", error);
|
||||||
|
res.status(500).json({ error: "Failed to get current version" });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Test SSH key permissions and GitHub access
|
||||||
|
router.post(
|
||||||
|
"/test-ssh-key",
|
||||||
|
authenticateToken,
|
||||||
|
requireManageSettings,
|
||||||
|
async (_req, res) => {
|
||||||
|
res.status(410).json({
|
||||||
|
error:
|
||||||
|
"SSH key testing has been removed. Using default public repository.",
|
||||||
|
});
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Check for updates from GitHub
|
||||||
|
router.get(
|
||||||
|
"/check-updates",
|
||||||
|
authenticateToken,
|
||||||
|
requireManageSettings,
|
||||||
|
async (_req, res) => {
|
||||||
|
try {
|
||||||
|
// Get cached update information from settings
|
||||||
|
const settings = await prisma.settings.findFirst();
|
||||||
|
|
||||||
|
if (!settings) {
|
||||||
|
return res.status(400).json({ error: "Settings not found" });
|
||||||
|
}
|
||||||
|
|
||||||
|
const currentVersion = getCurrentVersion();
|
||||||
|
const githubRepoUrl = settings.githubRepoUrl || DEFAULT_GITHUB_REPO;
|
||||||
|
const { owner, repo } = parseGitHubRepo(githubRepoUrl);
|
||||||
|
|
||||||
|
let latestRelease = null;
|
||||||
|
let latestCommit = null;
|
||||||
|
let commitDifference = null;
|
||||||
|
|
||||||
|
// Fetch fresh GitHub data if we have valid owner/repo
|
||||||
|
if (owner && repo) {
|
||||||
|
try {
|
||||||
|
const [releaseData, commitData, differenceData] = await Promise.all([
|
||||||
|
getLatestRelease(owner, repo),
|
||||||
|
getLatestCommit(owner, repo),
|
||||||
|
getCommitDifference(owner, repo, currentVersion),
|
||||||
|
]);
|
||||||
|
|
||||||
|
latestRelease = releaseData;
|
||||||
|
latestCommit = commitData;
|
||||||
|
commitDifference = differenceData;
|
||||||
|
} catch (githubError) {
|
||||||
|
console.warn(
|
||||||
|
"Failed to fetch fresh GitHub data:",
|
||||||
|
githubError.message,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Provide fallback data when GitHub API is rate-limited
|
||||||
|
if (
|
||||||
|
githubError.message.includes("rate limit") ||
|
||||||
|
githubError.message.includes("API rate limit")
|
||||||
|
) {
|
||||||
|
console.log("GitHub API rate limited, providing fallback data");
|
||||||
|
latestRelease = {
|
||||||
|
tagName: "v1.2.8",
|
||||||
|
version: "1.2.8",
|
||||||
|
publishedAt: "2025-10-02T17:12:53Z",
|
||||||
|
htmlUrl:
|
||||||
|
"https://github.com/PatchMon/PatchMon/releases/tag/v1.2.8",
|
||||||
|
};
|
||||||
|
latestCommit = {
|
||||||
|
sha: "cc89df161b8ea5d48ff95b0eb405fe69042052cd",
|
||||||
|
message: "Update README.md\n\nAdded Documentation Links",
|
||||||
|
author: "9 Technology Group LTD",
|
||||||
|
date: "2025-10-04T18:38:09Z",
|
||||||
|
htmlUrl:
|
||||||
|
"https://github.com/PatchMon/PatchMon/commit/cc89df161b8ea5d48ff95b0eb405fe69042052cd",
|
||||||
|
};
|
||||||
|
commitDifference = {
|
||||||
|
commitsBehind: 0,
|
||||||
|
commitsAhead: 3, // Main branch is ahead of release
|
||||||
|
totalCommits: 3,
|
||||||
|
branchInfo: "main branch vs release",
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
// Fall back to cached data for other errors
|
||||||
|
const githubRepoUrl = settings.githubRepoUrl || DEFAULT_GITHUB_REPO;
|
||||||
|
latestRelease = settings.latest_version
|
||||||
|
? {
|
||||||
|
version: settings.latest_version,
|
||||||
|
tagName: `v${settings.latest_version}`,
|
||||||
|
publishedAt: null, // Only use date from GitHub API, not cached data
|
||||||
|
htmlUrl: `${githubRepoUrl.replace(/\.git$/, "")}/releases/tag/v${settings.latest_version}`,
|
||||||
|
}
|
||||||
|
: null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const latestVersion =
|
||||||
|
latestRelease?.version || settings.latest_version || currentVersion;
|
||||||
|
const isUpdateAvailable = latestRelease
|
||||||
|
? compareVersions(latestVersion, currentVersion) > 0
|
||||||
|
: settings.update_available || false;
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
currentVersion,
|
||||||
|
latestVersion,
|
||||||
|
isUpdateAvailable,
|
||||||
|
lastUpdateCheck: settings.last_update_check || null,
|
||||||
|
repositoryType: settings.repository_type || "public",
|
||||||
|
github: {
|
||||||
|
repository: githubRepoUrl,
|
||||||
|
owner: owner,
|
||||||
|
repo: repo,
|
||||||
|
latestRelease: latestRelease,
|
||||||
|
latestCommit: latestCommit,
|
||||||
|
commitDifference: commitDifference,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error getting update information:", error);
|
||||||
|
res.status(500).json({ error: "Failed to get update information" });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
module.exports = router;
|
||||||
163
backend/src/routes/wsRoutes.js
Normal file
163
backend/src/routes/wsRoutes.js
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
const express = require("express");
|
||||||
|
const { authenticateToken } = require("../middleware/auth");
|
||||||
|
const {
|
||||||
|
getConnectionInfo,
|
||||||
|
subscribeToConnectionChanges,
|
||||||
|
} = require("../services/agentWs");
|
||||||
|
const {
|
||||||
|
validate_session,
|
||||||
|
update_session_activity,
|
||||||
|
} = require("../utils/session_manager");
|
||||||
|
|
||||||
|
const router = express.Router();
|
||||||
|
|
||||||
|
// Get WebSocket connection status for multiple hosts at once (bulk endpoint)
|
||||||
|
router.get("/status", authenticateToken, async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { apiIds } = req.query; // Comma-separated list of api_ids
|
||||||
|
const idArray = apiIds ? apiIds.split(",").filter((id) => id.trim()) : [];
|
||||||
|
|
||||||
|
const statusMap = {};
|
||||||
|
idArray.forEach((apiId) => {
|
||||||
|
statusMap[apiId] = getConnectionInfo(apiId);
|
||||||
|
});
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
data: statusMap,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error fetching bulk WebSocket status:", error);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to fetch WebSocket status",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get WebSocket connection status by api_id (single endpoint)
|
||||||
|
router.get("/status/:apiId", authenticateToken, async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { apiId } = req.params;
|
||||||
|
|
||||||
|
// Direct in-memory check - no database query needed
|
||||||
|
const connectionInfo = getConnectionInfo(apiId);
|
||||||
|
|
||||||
|
// Minimal response for maximum speed
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
data: connectionInfo,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error fetching WebSocket status:", error);
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: "Failed to fetch WebSocket status",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Server-Sent Events endpoint for real-time status updates (no polling needed!)
|
||||||
|
router.get("/status/:apiId/stream", async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { apiId } = req.params;
|
||||||
|
|
||||||
|
// Manual authentication for SSE (EventSource doesn't support custom headers)
|
||||||
|
const token =
|
||||||
|
req.query.token || req.headers.authorization?.replace("Bearer ", "");
|
||||||
|
if (!token) {
|
||||||
|
return res.status(401).json({ error: "Authentication required" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify token manually with session validation
|
||||||
|
const jwt = require("jsonwebtoken");
|
||||||
|
try {
|
||||||
|
const decoded = jwt.verify(token, process.env.JWT_SECRET);
|
||||||
|
|
||||||
|
// Validate session (same as regular auth middleware)
|
||||||
|
const validation = await validate_session(decoded.sessionId, token);
|
||||||
|
if (!validation.valid) {
|
||||||
|
return res.status(401).json({ error: "Invalid or expired session" });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update session activity to prevent inactivity timeout
|
||||||
|
await update_session_activity(decoded.sessionId);
|
||||||
|
|
||||||
|
req.user = validation.user;
|
||||||
|
} catch (_err) {
|
||||||
|
return res.status(401).json({ error: "Invalid or expired token" });
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log("[SSE] Client connected for api_id:", apiId);
|
||||||
|
|
||||||
|
// Set headers for SSE
|
||||||
|
res.setHeader("Content-Type", "text/event-stream");
|
||||||
|
res.setHeader("Cache-Control", "no-cache");
|
||||||
|
res.setHeader("Connection", "keep-alive");
|
||||||
|
res.setHeader("X-Accel-Buffering", "no"); // Disable nginx buffering
|
||||||
|
|
||||||
|
// Send initial status immediately
|
||||||
|
const initialInfo = getConnectionInfo(apiId);
|
||||||
|
res.write(`data: ${JSON.stringify(initialInfo)}\n\n`);
|
||||||
|
res.flushHeaders(); // Ensure headers are sent immediately
|
||||||
|
|
||||||
|
// Subscribe to connection changes for this specific api_id
|
||||||
|
const unsubscribe = subscribeToConnectionChanges(apiId, (_connected) => {
|
||||||
|
try {
|
||||||
|
// Push update to client instantly when status changes
|
||||||
|
const connectionInfo = getConnectionInfo(apiId);
|
||||||
|
console.log(
|
||||||
|
`[SSE] Pushing status change for ${apiId}: connected=${connectionInfo.connected} secure=${connectionInfo.secure}`,
|
||||||
|
);
|
||||||
|
res.write(`data: ${JSON.stringify(connectionInfo)}\n\n`);
|
||||||
|
} catch (err) {
|
||||||
|
console.error("[SSE] Error writing to stream:", err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Heartbeat to keep connection alive (every 30 seconds)
|
||||||
|
const heartbeat = setInterval(() => {
|
||||||
|
try {
|
||||||
|
res.write(": heartbeat\n\n");
|
||||||
|
} catch (err) {
|
||||||
|
console.error("[SSE] Error writing heartbeat:", err);
|
||||||
|
clearInterval(heartbeat);
|
||||||
|
}
|
||||||
|
}, 30000);
|
||||||
|
|
||||||
|
// Cleanup on client disconnect
|
||||||
|
req.on("close", () => {
|
||||||
|
console.log("[SSE] Client disconnected for api_id:", apiId);
|
||||||
|
clearInterval(heartbeat);
|
||||||
|
unsubscribe();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle errors - distinguish between different error types
|
||||||
|
req.on("error", (err) => {
|
||||||
|
// Only log non-connection-reset errors to reduce noise
|
||||||
|
if (err.code !== "ECONNRESET" && err.code !== "EPIPE") {
|
||||||
|
console.error("[SSE] Request error:", err);
|
||||||
|
} else {
|
||||||
|
console.log("[SSE] Client connection reset for api_id:", apiId);
|
||||||
|
}
|
||||||
|
clearInterval(heartbeat);
|
||||||
|
unsubscribe();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle response errors
|
||||||
|
res.on("error", (err) => {
|
||||||
|
if (err.code !== "ECONNRESET" && err.code !== "EPIPE") {
|
||||||
|
console.error("[SSE] Response error:", err);
|
||||||
|
}
|
||||||
|
clearInterval(heartbeat);
|
||||||
|
unsubscribe();
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("[SSE] Unexpected error:", error);
|
||||||
|
if (!res.headersSent) {
|
||||||
|
res.status(500).json({ error: "Internal server error" });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
module.exports = router;
|
||||||
File diff suppressed because it is too large
Load Diff
746
backend/src/services/agentVersionService.js
Normal file
746
backend/src/services/agentVersionService.js
Normal file
@@ -0,0 +1,746 @@
|
|||||||
|
const axios = require("axios");
|
||||||
|
const fs = require("node:fs").promises;
|
||||||
|
const path = require("node:path");
|
||||||
|
const { exec, spawn } = require("node:child_process");
|
||||||
|
const { promisify } = require("node:util");
|
||||||
|
const _execAsync = promisify(exec);
|
||||||
|
|
||||||
|
// Simple semver comparison function
|
||||||
|
function compareVersions(version1, version2) {
|
||||||
|
const v1parts = version1.split(".").map(Number);
|
||||||
|
const v2parts = version2.split(".").map(Number);
|
||||||
|
|
||||||
|
// Ensure both arrays have the same length
|
||||||
|
while (v1parts.length < 3) v1parts.push(0);
|
||||||
|
while (v2parts.length < 3) v2parts.push(0);
|
||||||
|
|
||||||
|
for (let i = 0; i < 3; i++) {
|
||||||
|
if (v1parts[i] > v2parts[i]) return 1;
|
||||||
|
if (v1parts[i] < v2parts[i]) return -1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
const crypto = require("node:crypto");
|
||||||
|
|
||||||
|
class AgentVersionService {
|
||||||
|
constructor() {
|
||||||
|
this.githubApiUrl =
|
||||||
|
"https://api.github.com/repos/PatchMon/PatchMon-agent/releases";
|
||||||
|
this.agentsDir = path.resolve(__dirname, "../../../agents");
|
||||||
|
this.supportedArchitectures = [
|
||||||
|
"linux-amd64",
|
||||||
|
"linux-arm64",
|
||||||
|
"linux-386",
|
||||||
|
"linux-arm",
|
||||||
|
];
|
||||||
|
this.currentVersion = null;
|
||||||
|
this.latestVersion = null;
|
||||||
|
this.lastChecked = null;
|
||||||
|
this.checkInterval = 30 * 60 * 1000; // 30 minutes
|
||||||
|
}
|
||||||
|
|
||||||
|
async initialize() {
|
||||||
|
try {
|
||||||
|
// Ensure agents directory exists
|
||||||
|
await fs.mkdir(this.agentsDir, { recursive: true });
|
||||||
|
|
||||||
|
console.log("🔍 Testing GitHub API connectivity...");
|
||||||
|
try {
|
||||||
|
const testResponse = await axios.get(
|
||||||
|
"https://api.github.com/repos/PatchMon/PatchMon-agent/releases",
|
||||||
|
{
|
||||||
|
timeout: 5000,
|
||||||
|
headers: {
|
||||||
|
"User-Agent": "PatchMon-Server/1.0",
|
||||||
|
Accept: "application/vnd.github.v3+json",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
|
console.log(
|
||||||
|
`✅ GitHub API accessible - found ${testResponse.data.length} releases`,
|
||||||
|
);
|
||||||
|
} catch (testError) {
|
||||||
|
console.error("❌ GitHub API not accessible:", testError.message);
|
||||||
|
if (testError.response) {
|
||||||
|
console.error(
|
||||||
|
"❌ Status:",
|
||||||
|
testError.response.status,
|
||||||
|
testError.response.statusText,
|
||||||
|
);
|
||||||
|
if (testError.response.status === 403) {
|
||||||
|
console.log("⚠️ GitHub API rate limit exceeded - will retry later");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get current agent version by executing the binary
|
||||||
|
await this.getCurrentAgentVersion();
|
||||||
|
|
||||||
|
// Try to check for updates, but don't fail initialization if GitHub API is unavailable
|
||||||
|
try {
|
||||||
|
await this.checkForUpdates();
|
||||||
|
} catch (updateError) {
|
||||||
|
console.log(
|
||||||
|
"⚠️ Failed to check for updates on startup, will retry later:",
|
||||||
|
updateError.message,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up periodic checking
|
||||||
|
setInterval(() => {
|
||||||
|
this.checkForUpdates().catch((error) => {
|
||||||
|
console.log("⚠️ Periodic update check failed:", error.message);
|
||||||
|
});
|
||||||
|
}, this.checkInterval);
|
||||||
|
|
||||||
|
console.log("✅ Agent Version Service initialized");
|
||||||
|
} catch (error) {
|
||||||
|
console.error(
|
||||||
|
"❌ Failed to initialize Agent Version Service:",
|
||||||
|
error.message,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async getCurrentAgentVersion() {
|
||||||
|
try {
|
||||||
|
console.log("🔍 Getting current agent version...");
|
||||||
|
|
||||||
|
// Try to find the agent binary in agents/ folder only (what gets distributed)
|
||||||
|
const possiblePaths = [
|
||||||
|
path.join(this.agentsDir, "patchmon-agent-linux-amd64"),
|
||||||
|
path.join(this.agentsDir, "patchmon-agent"),
|
||||||
|
];
|
||||||
|
|
||||||
|
let agentPath = null;
|
||||||
|
for (const testPath of possiblePaths) {
|
||||||
|
try {
|
||||||
|
await fs.access(testPath);
|
||||||
|
agentPath = testPath;
|
||||||
|
console.log(`✅ Found agent binary at: ${testPath}`);
|
||||||
|
break;
|
||||||
|
} catch {
|
||||||
|
// Path doesn't exist, continue to next
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!agentPath) {
|
||||||
|
console.log(
|
||||||
|
"⚠️ No agent binary found in agents/ folder, current version will be unknown",
|
||||||
|
);
|
||||||
|
console.log("💡 Use the Download Updates button to get agent binaries");
|
||||||
|
this.currentVersion = null;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute the agent binary with help flag to get version info
|
||||||
|
try {
|
||||||
|
const child = spawn(agentPath, ["--help"], {
|
||||||
|
timeout: 10000,
|
||||||
|
});
|
||||||
|
|
||||||
|
let stdout = "";
|
||||||
|
let stderr = "";
|
||||||
|
|
||||||
|
child.stdout.on("data", (data) => {
|
||||||
|
stdout += data.toString();
|
||||||
|
});
|
||||||
|
|
||||||
|
child.stderr.on("data", (data) => {
|
||||||
|
stderr += data.toString();
|
||||||
|
});
|
||||||
|
|
||||||
|
const result = await new Promise((resolve, reject) => {
|
||||||
|
child.on("close", (code) => {
|
||||||
|
resolve({ stdout, stderr, code });
|
||||||
|
});
|
||||||
|
child.on("error", reject);
|
||||||
|
});
|
||||||
|
|
||||||
|
if (result.stderr) {
|
||||||
|
console.log("⚠️ Agent help stderr:", result.stderr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse version from help output (e.g., "PatchMon Agent v1.3.0")
|
||||||
|
const versionMatch = result.stdout.match(
|
||||||
|
/PatchMon Agent v([0-9]+\.[0-9]+\.[0-9]+)/i,
|
||||||
|
);
|
||||||
|
if (versionMatch) {
|
||||||
|
this.currentVersion = versionMatch[1];
|
||||||
|
console.log(`✅ Current agent version: ${this.currentVersion}`);
|
||||||
|
} else {
|
||||||
|
console.log(
|
||||||
|
"⚠️ Could not parse version from agent help output:",
|
||||||
|
result.stdout,
|
||||||
|
);
|
||||||
|
this.currentVersion = null;
|
||||||
|
}
|
||||||
|
} catch (execError) {
|
||||||
|
console.error("❌ Failed to execute agent binary:", execError.message);
|
||||||
|
this.currentVersion = null;
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to get current agent version:", error.message);
|
||||||
|
this.currentVersion = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async checkForUpdates() {
|
||||||
|
try {
|
||||||
|
console.log("🔍 Checking for agent updates...");
|
||||||
|
|
||||||
|
const response = await axios.get(this.githubApiUrl, {
|
||||||
|
timeout: 10000,
|
||||||
|
headers: {
|
||||||
|
"User-Agent": "PatchMon-Server/1.0",
|
||||||
|
Accept: "application/vnd.github.v3+json",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`📡 GitHub API response status: ${response.status}`);
|
||||||
|
console.log(`📦 Found ${response.data.length} releases`);
|
||||||
|
|
||||||
|
const releases = response.data;
|
||||||
|
if (releases.length === 0) {
|
||||||
|
console.log("ℹ️ No releases found");
|
||||||
|
this.latestVersion = null;
|
||||||
|
this.lastChecked = new Date();
|
||||||
|
return {
|
||||||
|
latestVersion: null,
|
||||||
|
currentVersion: this.currentVersion,
|
||||||
|
hasUpdate: false,
|
||||||
|
lastChecked: this.lastChecked,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const latestRelease = releases[0];
|
||||||
|
this.latestVersion = latestRelease.tag_name.replace("v", ""); // Remove 'v' prefix
|
||||||
|
this.lastChecked = new Date();
|
||||||
|
|
||||||
|
console.log(`📦 Latest agent version: ${this.latestVersion}`);
|
||||||
|
|
||||||
|
// Don't download binaries automatically - only when explicitly requested
|
||||||
|
console.log(
|
||||||
|
"ℹ️ Skipping automatic binary download - binaries will be downloaded on demand",
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
latestVersion: this.latestVersion,
|
||||||
|
currentVersion: this.currentVersion,
|
||||||
|
hasUpdate: this.currentVersion !== this.latestVersion,
|
||||||
|
lastChecked: this.lastChecked,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to check for updates:", error.message);
|
||||||
|
if (error.response) {
|
||||||
|
console.error(
|
||||||
|
"❌ GitHub API error:",
|
||||||
|
error.response.status,
|
||||||
|
error.response.statusText,
|
||||||
|
);
|
||||||
|
console.error(
|
||||||
|
"❌ Rate limit info:",
|
||||||
|
error.response.headers["x-ratelimit-remaining"],
|
||||||
|
"/",
|
||||||
|
error.response.headers["x-ratelimit-limit"],
|
||||||
|
);
|
||||||
|
}
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async downloadBinariesToAgentsFolder(release) {
|
||||||
|
try {
|
||||||
|
console.log(
|
||||||
|
`⬇️ Downloading binaries for version ${release.tag_name} to agents folder...`,
|
||||||
|
);
|
||||||
|
|
||||||
|
for (const arch of this.supportedArchitectures) {
|
||||||
|
const assetName = `patchmon-agent-${arch}`;
|
||||||
|
const asset = release.assets.find((a) => a.name === assetName);
|
||||||
|
|
||||||
|
if (!asset) {
|
||||||
|
console.warn(`⚠️ Binary not found for architecture: ${arch}`);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const binaryPath = path.join(this.agentsDir, assetName);
|
||||||
|
|
||||||
|
console.log(`⬇️ Downloading ${assetName}...`);
|
||||||
|
|
||||||
|
const response = await axios.get(asset.browser_download_url, {
|
||||||
|
responseType: "stream",
|
||||||
|
timeout: 60000,
|
||||||
|
});
|
||||||
|
|
||||||
|
const writer = require("node:fs").createWriteStream(binaryPath);
|
||||||
|
response.data.pipe(writer);
|
||||||
|
|
||||||
|
await new Promise((resolve, reject) => {
|
||||||
|
writer.on("finish", resolve);
|
||||||
|
writer.on("error", reject);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Make executable
|
||||||
|
await fs.chmod(binaryPath, "755");
|
||||||
|
|
||||||
|
console.log(`✅ Downloaded: ${assetName} to agents folder`);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error(
|
||||||
|
"❌ Failed to download binaries to agents folder:",
|
||||||
|
error.message,
|
||||||
|
);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async downloadBinaryForVersion(version, architecture) {
|
||||||
|
try {
|
||||||
|
console.log(
|
||||||
|
`⬇️ Downloading binary for version ${version} architecture ${architecture}...`,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get the release info from GitHub
|
||||||
|
const response = await axios.get(this.githubApiUrl, {
|
||||||
|
timeout: 10000,
|
||||||
|
headers: {
|
||||||
|
"User-Agent": "PatchMon-Server/1.0",
|
||||||
|
Accept: "application/vnd.github.v3+json",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const releases = response.data;
|
||||||
|
const release = releases.find(
|
||||||
|
(r) => r.tag_name.replace("v", "") === version,
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!release) {
|
||||||
|
throw new Error(`Release ${version} not found`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const assetName = `patchmon-agent-${architecture}`;
|
||||||
|
const asset = release.assets.find((a) => a.name === assetName);
|
||||||
|
|
||||||
|
if (!asset) {
|
||||||
|
throw new Error(`Binary not found for architecture: ${architecture}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const binaryPath = path.join(
|
||||||
|
this.agentBinariesDir,
|
||||||
|
`${release.tag_name}-${assetName}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log(`⬇️ Downloading ${assetName}...`);
|
||||||
|
|
||||||
|
const downloadResponse = await axios.get(asset.browser_download_url, {
|
||||||
|
responseType: "stream",
|
||||||
|
timeout: 60000,
|
||||||
|
});
|
||||||
|
|
||||||
|
const writer = require("node:fs").createWriteStream(binaryPath);
|
||||||
|
downloadResponse.data.pipe(writer);
|
||||||
|
|
||||||
|
await new Promise((resolve, reject) => {
|
||||||
|
writer.on("finish", resolve);
|
||||||
|
writer.on("error", reject);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Make executable
|
||||||
|
await fs.chmod(binaryPath, "755");
|
||||||
|
|
||||||
|
console.log(`✅ Downloaded: ${assetName}`);
|
||||||
|
return binaryPath;
|
||||||
|
} catch (error) {
|
||||||
|
console.error(
|
||||||
|
`❌ Failed to download binary ${version}-${architecture}:`,
|
||||||
|
error.message,
|
||||||
|
);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async getBinaryPath(version, architecture) {
|
||||||
|
const binaryName = `patchmon-agent-${architecture}`;
|
||||||
|
const binaryPath = path.join(this.agentsDir, binaryName);
|
||||||
|
|
||||||
|
try {
|
||||||
|
await fs.access(binaryPath);
|
||||||
|
return binaryPath;
|
||||||
|
} catch {
|
||||||
|
throw new Error(`Binary not found: ${binaryName} version ${version}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async serveBinary(version, architecture, res) {
|
||||||
|
try {
|
||||||
|
// Check if binary exists, if not download it
|
||||||
|
const binaryPath = await this.getBinaryPath(version, architecture);
|
||||||
|
const stats = await fs.stat(binaryPath);
|
||||||
|
|
||||||
|
res.setHeader("Content-Type", "application/octet-stream");
|
||||||
|
res.setHeader(
|
||||||
|
"Content-Disposition",
|
||||||
|
`attachment; filename="patchmon-agent-${architecture}"`,
|
||||||
|
);
|
||||||
|
res.setHeader("Content-Length", stats.size);
|
||||||
|
|
||||||
|
// Add cache headers
|
||||||
|
res.setHeader("Cache-Control", "public, max-age=3600");
|
||||||
|
res.setHeader("ETag", `"${version}-${architecture}"`);
|
||||||
|
|
||||||
|
const stream = require("node:fs").createReadStream(binaryPath);
|
||||||
|
stream.pipe(res);
|
||||||
|
} catch (_error) {
|
||||||
|
// Binary doesn't exist, try to download it
|
||||||
|
console.log(
|
||||||
|
`⬇️ Binary not found locally, attempting to download ${version}-${architecture}...`,
|
||||||
|
);
|
||||||
|
try {
|
||||||
|
await this.downloadBinaryForVersion(version, architecture);
|
||||||
|
// Retry serving the binary
|
||||||
|
const binaryPath = await this.getBinaryPath(version, architecture);
|
||||||
|
const stats = await fs.stat(binaryPath);
|
||||||
|
|
||||||
|
res.setHeader("Content-Type", "application/octet-stream");
|
||||||
|
res.setHeader(
|
||||||
|
"Content-Disposition",
|
||||||
|
`attachment; filename="patchmon-agent-${architecture}"`,
|
||||||
|
);
|
||||||
|
res.setHeader("Content-Length", stats.size);
|
||||||
|
res.setHeader("Cache-Control", "public, max-age=3600");
|
||||||
|
res.setHeader("ETag", `"${version}-${architecture}"`);
|
||||||
|
|
||||||
|
const stream = require("node:fs").createReadStream(binaryPath);
|
||||||
|
stream.pipe(res);
|
||||||
|
} catch (downloadError) {
|
||||||
|
console.error(
|
||||||
|
`❌ Failed to download binary ${version}-${architecture}:`,
|
||||||
|
downloadError.message,
|
||||||
|
);
|
||||||
|
res
|
||||||
|
.status(404)
|
||||||
|
.json({ error: "Binary not found and could not be downloaded" });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async getVersionInfo() {
|
||||||
|
let hasUpdate = false;
|
||||||
|
let updateStatus = "unknown";
|
||||||
|
|
||||||
|
// Latest version should ALWAYS come from GitHub, not from local binaries
|
||||||
|
// currentVersion = what's installed locally
|
||||||
|
// latestVersion = what's available on GitHub
|
||||||
|
if (this.latestVersion) {
|
||||||
|
console.log(`📦 Latest version from GitHub: ${this.latestVersion}`);
|
||||||
|
} else {
|
||||||
|
console.log(
|
||||||
|
`⚠️ No GitHub release version available (API may be unavailable)`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.currentVersion) {
|
||||||
|
console.log(`💾 Current local agent version: ${this.currentVersion}`);
|
||||||
|
} else {
|
||||||
|
console.log(`⚠️ No local agent binary found`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine update status by comparing current vs latest (from GitHub)
|
||||||
|
if (this.currentVersion && this.latestVersion) {
|
||||||
|
const comparison = compareVersions(
|
||||||
|
this.currentVersion,
|
||||||
|
this.latestVersion,
|
||||||
|
);
|
||||||
|
if (comparison < 0) {
|
||||||
|
hasUpdate = true;
|
||||||
|
updateStatus = "update-available";
|
||||||
|
} else if (comparison > 0) {
|
||||||
|
hasUpdate = false;
|
||||||
|
updateStatus = "newer-version";
|
||||||
|
} else {
|
||||||
|
hasUpdate = false;
|
||||||
|
updateStatus = "up-to-date";
|
||||||
|
}
|
||||||
|
} else if (this.latestVersion && !this.currentVersion) {
|
||||||
|
hasUpdate = true;
|
||||||
|
updateStatus = "no-agent";
|
||||||
|
} else if (this.currentVersion && !this.latestVersion) {
|
||||||
|
// We have a current version but no latest version (GitHub API unavailable)
|
||||||
|
hasUpdate = false;
|
||||||
|
updateStatus = "github-unavailable";
|
||||||
|
} else if (!this.currentVersion && !this.latestVersion) {
|
||||||
|
updateStatus = "no-data";
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
currentVersion: this.currentVersion,
|
||||||
|
latestVersion: this.latestVersion, // Always return GitHub version, not local
|
||||||
|
hasUpdate: hasUpdate,
|
||||||
|
updateStatus: updateStatus,
|
||||||
|
lastChecked: this.lastChecked,
|
||||||
|
supportedArchitectures: this.supportedArchitectures,
|
||||||
|
status: this.latestVersion ? "ready" : "no-releases",
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
async refreshCurrentVersion() {
|
||||||
|
await this.getCurrentAgentVersion();
|
||||||
|
return this.currentVersion;
|
||||||
|
}
|
||||||
|
|
||||||
|
async downloadLatestUpdate() {
|
||||||
|
try {
|
||||||
|
console.log("⬇️ Downloading latest agent update...");
|
||||||
|
|
||||||
|
// First check for updates to get the latest release info
|
||||||
|
const _updateInfo = await this.checkForUpdates();
|
||||||
|
|
||||||
|
if (!this.latestVersion) {
|
||||||
|
throw new Error("No latest version available to download");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the release info from GitHub
|
||||||
|
const response = await axios.get(this.githubApiUrl, {
|
||||||
|
timeout: 10000,
|
||||||
|
headers: {
|
||||||
|
"User-Agent": "PatchMon-Server/1.0",
|
||||||
|
Accept: "application/vnd.github.v3+json",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const releases = response.data;
|
||||||
|
const latestRelease = releases[0];
|
||||||
|
|
||||||
|
if (!latestRelease) {
|
||||||
|
throw new Error("No releases found");
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`⬇️ Downloading binaries for version ${latestRelease.tag_name}...`,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Download binaries for all architectures directly to agents folder
|
||||||
|
await this.downloadBinariesToAgentsFolder(latestRelease);
|
||||||
|
|
||||||
|
console.log("✅ Latest update downloaded successfully");
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
version: this.latestVersion,
|
||||||
|
downloadedArchitectures: this.supportedArchitectures,
|
||||||
|
message: `Successfully downloaded version ${this.latestVersion}`,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to download latest update:", error.message);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async getAvailableVersions() {
|
||||||
|
// No local caching - only return latest from GitHub
|
||||||
|
if (this.latestVersion) {
|
||||||
|
return [this.latestVersion];
|
||||||
|
}
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
async getBinaryInfo(version, architecture) {
|
||||||
|
try {
|
||||||
|
// Always use local version if it matches the requested version
|
||||||
|
if (version === this.currentVersion && this.currentVersion) {
|
||||||
|
const binaryPath = await this.getBinaryPath(
|
||||||
|
this.currentVersion,
|
||||||
|
architecture,
|
||||||
|
);
|
||||||
|
const stats = await fs.stat(binaryPath);
|
||||||
|
|
||||||
|
// Calculate file hash
|
||||||
|
const fileBuffer = await fs.readFile(binaryPath);
|
||||||
|
const hash = crypto
|
||||||
|
.createHash("sha256")
|
||||||
|
.update(fileBuffer)
|
||||||
|
.digest("hex");
|
||||||
|
|
||||||
|
return {
|
||||||
|
version: this.currentVersion,
|
||||||
|
architecture,
|
||||||
|
size: stats.size,
|
||||||
|
hash,
|
||||||
|
lastModified: stats.mtime,
|
||||||
|
path: binaryPath,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// For other versions, try to find them in the agents folder
|
||||||
|
const binaryPath = await this.getBinaryPath(version, architecture);
|
||||||
|
const stats = await fs.stat(binaryPath);
|
||||||
|
|
||||||
|
// Calculate file hash
|
||||||
|
const fileBuffer = await fs.readFile(binaryPath);
|
||||||
|
const hash = crypto.createHash("sha256").update(fileBuffer).digest("hex");
|
||||||
|
|
||||||
|
return {
|
||||||
|
version,
|
||||||
|
architecture,
|
||||||
|
size: stats.size,
|
||||||
|
hash,
|
||||||
|
lastModified: stats.mtime,
|
||||||
|
path: binaryPath,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
throw new Error(`Failed to get binary info: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if an agent needs an update and push notification if needed
|
||||||
|
* @param {string} agentApiId - The agent's API ID
|
||||||
|
* @param {string} agentVersion - The agent's current version
|
||||||
|
* @param {boolean} force - Force update regardless of version
|
||||||
|
* @returns {Object} Update check result
|
||||||
|
*/
|
||||||
|
async checkAndPushAgentUpdate(agentApiId, agentVersion, force = false) {
|
||||||
|
try {
|
||||||
|
console.log(
|
||||||
|
`🔍 Checking update for agent ${agentApiId} (version: ${agentVersion})`,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get current server version info
|
||||||
|
const versionInfo = await this.getVersionInfo();
|
||||||
|
|
||||||
|
if (!versionInfo.latestVersion) {
|
||||||
|
console.log(`⚠️ No latest version available for agent ${agentApiId}`);
|
||||||
|
return {
|
||||||
|
needsUpdate: false,
|
||||||
|
reason: "no-latest-version",
|
||||||
|
message: "No latest version available on server",
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare versions
|
||||||
|
const comparison = compareVersions(
|
||||||
|
agentVersion,
|
||||||
|
versionInfo.latestVersion,
|
||||||
|
);
|
||||||
|
const needsUpdate = force || comparison < 0;
|
||||||
|
|
||||||
|
if (needsUpdate) {
|
||||||
|
console.log(
|
||||||
|
`📤 Agent ${agentApiId} needs update: ${agentVersion} → ${versionInfo.latestVersion}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Import agentWs service to push notification
|
||||||
|
const { pushUpdateNotification } = require("./agentWs");
|
||||||
|
|
||||||
|
const updateInfo = {
|
||||||
|
version: versionInfo.latestVersion,
|
||||||
|
force: force,
|
||||||
|
downloadUrl: `/api/v1/agent/binary/${versionInfo.latestVersion}/linux-amd64`,
|
||||||
|
message: force
|
||||||
|
? "Force update requested"
|
||||||
|
: `Update available: ${versionInfo.latestVersion}`,
|
||||||
|
};
|
||||||
|
|
||||||
|
const pushed = pushUpdateNotification(agentApiId, updateInfo);
|
||||||
|
|
||||||
|
if (pushed) {
|
||||||
|
console.log(`✅ Update notification pushed to agent ${agentApiId}`);
|
||||||
|
return {
|
||||||
|
needsUpdate: true,
|
||||||
|
reason: force ? "force-update" : "version-outdated",
|
||||||
|
message: `Update notification sent: ${agentVersion} → ${versionInfo.latestVersion}`,
|
||||||
|
targetVersion: versionInfo.latestVersion,
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
console.log(
|
||||||
|
`⚠️ Failed to push update notification to agent ${agentApiId} (not connected)`,
|
||||||
|
);
|
||||||
|
return {
|
||||||
|
needsUpdate: true,
|
||||||
|
reason: "agent-offline",
|
||||||
|
message: "Agent needs update but is not connected",
|
||||||
|
targetVersion: versionInfo.latestVersion,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.log(`✅ Agent ${agentApiId} is up to date: ${agentVersion}`);
|
||||||
|
return {
|
||||||
|
needsUpdate: false,
|
||||||
|
reason: "up-to-date",
|
||||||
|
message: `Agent is up to date: ${agentVersion}`,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error(
|
||||||
|
`❌ Failed to check update for agent ${agentApiId}:`,
|
||||||
|
error.message,
|
||||||
|
);
|
||||||
|
return {
|
||||||
|
needsUpdate: false,
|
||||||
|
reason: "error",
|
||||||
|
message: `Error checking update: ${error.message}`,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check and push updates to all connected agents
|
||||||
|
* @param {boolean} force - Force update regardless of version
|
||||||
|
* @returns {Object} Bulk update result
|
||||||
|
*/
|
||||||
|
async checkAndPushUpdatesToAll(force = false) {
|
||||||
|
try {
|
||||||
|
console.log(
|
||||||
|
`🔍 Checking updates for all connected agents (force: ${force})`,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Import agentWs service to get connected agents
|
||||||
|
const { pushUpdateNotificationToAll } = require("./agentWs");
|
||||||
|
|
||||||
|
const versionInfo = await this.getVersionInfo();
|
||||||
|
|
||||||
|
if (!versionInfo.latestVersion) {
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
message: "No latest version available on server",
|
||||||
|
updatedAgents: 0,
|
||||||
|
totalAgents: 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const updateInfo = {
|
||||||
|
version: versionInfo.latestVersion,
|
||||||
|
force: force,
|
||||||
|
downloadUrl: `/api/v1/agent/binary/${versionInfo.latestVersion}/linux-amd64`,
|
||||||
|
message: force
|
||||||
|
? "Force update requested for all agents"
|
||||||
|
: `Update available: ${versionInfo.latestVersion}`,
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = await pushUpdateNotificationToAll(updateInfo);
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`✅ Bulk update notification sent to ${result.notifiedCount} agents`,
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
message: `Update notifications sent to ${result.notifiedCount} agents`,
|
||||||
|
updatedAgents: result.notifiedCount,
|
||||||
|
totalAgents: result.totalAgents,
|
||||||
|
targetVersion: versionInfo.latestVersion,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to push updates to all agents:", error.message);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
message: `Error pushing updates: ${error.message}`,
|
||||||
|
updatedAgents: 0,
|
||||||
|
totalAgents: 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = new AgentVersionService();
|
||||||
461
backend/src/services/agentWs.js
Normal file
461
backend/src/services/agentWs.js
Normal file
@@ -0,0 +1,461 @@
|
|||||||
|
// Lightweight WebSocket hub for agent connections
|
||||||
|
// Auth: X-API-ID / X-API-KEY headers on the upgrade request
|
||||||
|
|
||||||
|
const WebSocket = require("ws");
|
||||||
|
const url = require("node:url");
|
||||||
|
const { get_current_time } = require("../utils/timezone");
|
||||||
|
|
||||||
|
// Connection registry by api_id
|
||||||
|
const apiIdToSocket = new Map();
|
||||||
|
|
||||||
|
// Connection metadata (secure/insecure)
|
||||||
|
// Map<api_id, { ws: WebSocket, secure: boolean }>
|
||||||
|
const connectionMetadata = new Map();
|
||||||
|
|
||||||
|
// Subscribers for connection status changes (for SSE)
|
||||||
|
// Map<api_id, Set<callback>>
|
||||||
|
const connectionChangeSubscribers = new Map();
|
||||||
|
|
||||||
|
let wss;
|
||||||
|
let prisma;
|
||||||
|
|
||||||
|
function init(server, prismaClient) {
|
||||||
|
prisma = prismaClient;
|
||||||
|
wss = new WebSocket.Server({ noServer: true });
|
||||||
|
|
||||||
|
// Handle HTTP upgrade events and authenticate before accepting WS
|
||||||
|
server.on("upgrade", async (request, socket, head) => {
|
||||||
|
try {
|
||||||
|
const { pathname } = url.parse(request.url);
|
||||||
|
if (!pathname) {
|
||||||
|
socket.destroy();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle Bull Board WebSocket connections
|
||||||
|
if (pathname.startsWith("/bullboard")) {
|
||||||
|
// For Bull Board, we need to check if the user is authenticated
|
||||||
|
// Check for session cookie or authorization header
|
||||||
|
const sessionCookie = request.headers.cookie?.match(
|
||||||
|
/bull-board-session=([^;]+)/,
|
||||||
|
)?.[1];
|
||||||
|
const authHeader = request.headers.authorization;
|
||||||
|
|
||||||
|
if (!sessionCookie && !authHeader) {
|
||||||
|
socket.destroy();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accept the WebSocket connection for Bull Board
|
||||||
|
wss.handleUpgrade(request, socket, head, (ws) => {
|
||||||
|
ws.on("message", (message) => {
|
||||||
|
// Echo back for Bull Board WebSocket
|
||||||
|
try {
|
||||||
|
ws.send(message);
|
||||||
|
} catch (_err) {
|
||||||
|
// Ignore send errors (connection may be closed)
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on("error", (err) => {
|
||||||
|
// Handle WebSocket errors gracefully for Bull Board
|
||||||
|
if (
|
||||||
|
err.code === "WS_ERR_INVALID_CLOSE_CODE" ||
|
||||||
|
err.code === "ECONNRESET" ||
|
||||||
|
err.code === "EPIPE"
|
||||||
|
) {
|
||||||
|
// These are expected errors, just log quietly
|
||||||
|
console.log("[bullboard-ws] connection error:", err.code);
|
||||||
|
} else {
|
||||||
|
console.error("[bullboard-ws] error:", err.message || err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on("close", () => {
|
||||||
|
// Connection closed, no action needed
|
||||||
|
});
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle agent WebSocket connections
|
||||||
|
if (!pathname.startsWith("/api/")) {
|
||||||
|
socket.destroy();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expected path: /api/{v}/agents/ws
|
||||||
|
const parts = pathname.split("/").filter(Boolean); // [api, v1, agents, ws]
|
||||||
|
if (parts.length !== 4 || parts[2] !== "agents" || parts[3] !== "ws") {
|
||||||
|
socket.destroy();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const apiId = request.headers["x-api-id"];
|
||||||
|
const apiKey = request.headers["x-api-key"];
|
||||||
|
if (!apiId || !apiKey) {
|
||||||
|
socket.destroy();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate credentials
|
||||||
|
const host = await prisma.hosts.findUnique({ where: { api_id: apiId } });
|
||||||
|
if (!host || host.api_key !== apiKey) {
|
||||||
|
socket.destroy();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
wss.handleUpgrade(request, socket, head, (ws) => {
|
||||||
|
ws.apiId = apiId;
|
||||||
|
|
||||||
|
// Detect if connection is secure (wss://) or not (ws://)
|
||||||
|
const isSecure =
|
||||||
|
socket.encrypted || request.headers["x-forwarded-proto"] === "https";
|
||||||
|
|
||||||
|
apiIdToSocket.set(apiId, ws);
|
||||||
|
connectionMetadata.set(apiId, { ws, secure: isSecure });
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`[agent-ws] connected api_id=${apiId} protocol=${isSecure ? "wss" : "ws"} total=${apiIdToSocket.size}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Notify subscribers of connection
|
||||||
|
notifyConnectionChange(apiId, true);
|
||||||
|
|
||||||
|
ws.on("message", async (data) => {
|
||||||
|
// Handle incoming messages from agent (e.g., Docker status updates)
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(data.toString());
|
||||||
|
|
||||||
|
if (message.type === "docker_status") {
|
||||||
|
// Handle Docker container status events
|
||||||
|
await handleDockerStatusEvent(apiId, message);
|
||||||
|
}
|
||||||
|
// Add more message types here as needed
|
||||||
|
} catch (err) {
|
||||||
|
console.error(
|
||||||
|
`[agent-ws] error parsing message from ${apiId}:`,
|
||||||
|
err,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on("error", (err) => {
|
||||||
|
// Handle WebSocket errors gracefully without crashing
|
||||||
|
// Common errors: invalid close codes (1006), connection resets, etc.
|
||||||
|
if (
|
||||||
|
err.code === "WS_ERR_INVALID_CLOSE_CODE" ||
|
||||||
|
err.message?.includes("invalid status code 1006") ||
|
||||||
|
err.message?.includes("Invalid WebSocket frame")
|
||||||
|
) {
|
||||||
|
// 1006 is a special close code indicating abnormal closure
|
||||||
|
// It cannot be sent in a close frame, but can occur when connection is lost
|
||||||
|
console.log(
|
||||||
|
`[agent-ws] connection error for ${apiId} (abnormal closure):`,
|
||||||
|
err.message || err.code,
|
||||||
|
);
|
||||||
|
} else if (
|
||||||
|
err.code === "ECONNRESET" ||
|
||||||
|
err.code === "EPIPE" ||
|
||||||
|
err.message?.includes("read ECONNRESET")
|
||||||
|
) {
|
||||||
|
// Connection reset errors are common and expected
|
||||||
|
console.log(`[agent-ws] connection reset for ${apiId}`);
|
||||||
|
} else {
|
||||||
|
// Log other errors for debugging
|
||||||
|
console.error(
|
||||||
|
`[agent-ws] error for ${apiId}:`,
|
||||||
|
err.message || err.code || err,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up connection on error
|
||||||
|
const existing = apiIdToSocket.get(apiId);
|
||||||
|
if (existing === ws) {
|
||||||
|
apiIdToSocket.delete(apiId);
|
||||||
|
connectionMetadata.delete(apiId);
|
||||||
|
// Notify subscribers of disconnection
|
||||||
|
notifyConnectionChange(apiId, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to close the connection gracefully if still open
|
||||||
|
if (
|
||||||
|
ws.readyState === WebSocket.OPEN ||
|
||||||
|
ws.readyState === WebSocket.CONNECTING
|
||||||
|
) {
|
||||||
|
try {
|
||||||
|
ws.close(1000); // Normal closure
|
||||||
|
} catch {
|
||||||
|
// Ignore errors when closing
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on("close", (code, reason) => {
|
||||||
|
const existing = apiIdToSocket.get(apiId);
|
||||||
|
if (existing === ws) {
|
||||||
|
apiIdToSocket.delete(apiId);
|
||||||
|
connectionMetadata.delete(apiId);
|
||||||
|
// Notify subscribers of disconnection
|
||||||
|
notifyConnectionChange(apiId, false);
|
||||||
|
}
|
||||||
|
console.log(
|
||||||
|
`[agent-ws] disconnected api_id=${apiId} code=${code} reason=${reason || "none"} total=${apiIdToSocket.size}`,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Optional: greet/ack
|
||||||
|
safeSend(ws, JSON.stringify({ type: "connected" }));
|
||||||
|
});
|
||||||
|
} catch (_err) {
|
||||||
|
try {
|
||||||
|
socket.destroy();
|
||||||
|
} catch {
|
||||||
|
/* ignore */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function safeSend(ws, data) {
|
||||||
|
if (ws && ws.readyState === WebSocket.OPEN) {
|
||||||
|
try {
|
||||||
|
ws.send(data);
|
||||||
|
} catch {
|
||||||
|
/* ignore */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function broadcastSettingsUpdate(newInterval) {
|
||||||
|
const payload = JSON.stringify({
|
||||||
|
type: "settings_update",
|
||||||
|
update_interval: newInterval,
|
||||||
|
});
|
||||||
|
for (const [, ws] of apiIdToSocket) {
|
||||||
|
safeSend(ws, payload);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function pushReportNow(apiId) {
|
||||||
|
const ws = apiIdToSocket.get(apiId);
|
||||||
|
safeSend(ws, JSON.stringify({ type: "report_now" }));
|
||||||
|
}
|
||||||
|
|
||||||
|
function pushSettingsUpdate(apiId, newInterval) {
|
||||||
|
const ws = apiIdToSocket.get(apiId);
|
||||||
|
safeSend(
|
||||||
|
ws,
|
||||||
|
JSON.stringify({ type: "settings_update", update_interval: newInterval }),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function pushUpdateAgent(apiId) {
|
||||||
|
const ws = apiIdToSocket.get(apiId);
|
||||||
|
safeSend(ws, JSON.stringify({ type: "update_agent" }));
|
||||||
|
}
|
||||||
|
|
||||||
|
function pushIntegrationToggle(apiId, integrationName, enabled) {
|
||||||
|
const ws = apiIdToSocket.get(apiId);
|
||||||
|
if (ws && ws.readyState === WebSocket.OPEN) {
|
||||||
|
safeSend(
|
||||||
|
ws,
|
||||||
|
JSON.stringify({
|
||||||
|
type: "integration_toggle",
|
||||||
|
integration: integrationName,
|
||||||
|
enabled: enabled,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
console.log(
|
||||||
|
`📤 Pushed integration toggle to agent ${apiId}: ${integrationName} = ${enabled}`,
|
||||||
|
);
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
console.log(
|
||||||
|
`⚠️ Agent ${apiId} not connected, cannot push integration toggle, please edit config.yml manually`,
|
||||||
|
);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function getConnectionByApiId(apiId) {
|
||||||
|
return apiIdToSocket.get(apiId);
|
||||||
|
}
|
||||||
|
|
||||||
|
function pushUpdateNotification(apiId, updateInfo) {
|
||||||
|
const ws = apiIdToSocket.get(apiId);
|
||||||
|
if (ws && ws.readyState === WebSocket.OPEN) {
|
||||||
|
safeSend(
|
||||||
|
ws,
|
||||||
|
JSON.stringify({
|
||||||
|
type: "update_notification",
|
||||||
|
version: updateInfo.version,
|
||||||
|
force: updateInfo.force || false,
|
||||||
|
downloadUrl: updateInfo.downloadUrl,
|
||||||
|
message: updateInfo.message,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
console.log(
|
||||||
|
`📤 Pushed update notification to agent ${apiId}: version ${updateInfo.version}`,
|
||||||
|
);
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
console.log(
|
||||||
|
`⚠️ Agent ${apiId} not connected, cannot push update notification`,
|
||||||
|
);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function pushUpdateNotificationToAll(updateInfo) {
|
||||||
|
let notifiedCount = 0;
|
||||||
|
let failedCount = 0;
|
||||||
|
|
||||||
|
for (const [apiId, ws] of apiIdToSocket) {
|
||||||
|
if (ws && ws.readyState === WebSocket.OPEN) {
|
||||||
|
try {
|
||||||
|
safeSend(
|
||||||
|
ws,
|
||||||
|
JSON.stringify({
|
||||||
|
type: "update_notification",
|
||||||
|
version: updateInfo.version,
|
||||||
|
force: updateInfo.force || false,
|
||||||
|
message: updateInfo.message,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
notifiedCount++;
|
||||||
|
console.log(
|
||||||
|
`📤 Pushed update notification to agent ${apiId}: version ${updateInfo.version}`,
|
||||||
|
);
|
||||||
|
} catch (error) {
|
||||||
|
failedCount++;
|
||||||
|
console.error(`❌ Failed to notify agent ${apiId}:`, error.message);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
failedCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`📤 Update notification sent to ${notifiedCount} agents, ${failedCount} failed`,
|
||||||
|
);
|
||||||
|
return { notifiedCount, failedCount };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Notify all subscribers when connection status changes
|
||||||
|
function notifyConnectionChange(apiId, connected) {
|
||||||
|
const subscribers = connectionChangeSubscribers.get(apiId);
|
||||||
|
if (subscribers) {
|
||||||
|
for (const callback of subscribers) {
|
||||||
|
try {
|
||||||
|
callback(connected);
|
||||||
|
} catch (err) {
|
||||||
|
console.error(`[agent-ws] error notifying subscriber:`, err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe to connection status changes for a specific api_id
|
||||||
|
function subscribeToConnectionChanges(apiId, callback) {
|
||||||
|
if (!connectionChangeSubscribers.has(apiId)) {
|
||||||
|
connectionChangeSubscribers.set(apiId, new Set());
|
||||||
|
}
|
||||||
|
connectionChangeSubscribers.get(apiId).add(callback);
|
||||||
|
|
||||||
|
// Return unsubscribe function
|
||||||
|
return () => {
|
||||||
|
const subscribers = connectionChangeSubscribers.get(apiId);
|
||||||
|
if (subscribers) {
|
||||||
|
subscribers.delete(callback);
|
||||||
|
if (subscribers.size === 0) {
|
||||||
|
connectionChangeSubscribers.delete(apiId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle Docker container status events from agent
|
||||||
|
async function handleDockerStatusEvent(apiId, message) {
|
||||||
|
try {
|
||||||
|
const { event: _event, container_id, name, status, timestamp } = message;
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`[Docker Event] ${apiId}: Container ${name} (${container_id}) - ${status}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Find the host
|
||||||
|
const host = await prisma.hosts.findUnique({
|
||||||
|
where: { api_id: apiId },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!host) {
|
||||||
|
console.error(`[Docker Event] Host not found for api_id: ${apiId}`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update container status in database
|
||||||
|
const container = await prisma.docker_containers.findUnique({
|
||||||
|
where: {
|
||||||
|
host_id_container_id: {
|
||||||
|
host_id: host.id,
|
||||||
|
container_id: container_id,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
if (container) {
|
||||||
|
await prisma.docker_containers.update({
|
||||||
|
where: { id: container.id },
|
||||||
|
data: {
|
||||||
|
status: status,
|
||||||
|
state: status,
|
||||||
|
updated_at: new Date(timestamp || Date.now()),
|
||||||
|
last_checked: get_current_time(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`[Docker Event] Updated container ${name} status to ${status}`,
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
console.log(
|
||||||
|
`[Docker Event] Container ${name} not found in database (may be new)`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Broadcast to connected dashboard clients via SSE or WebSocket
|
||||||
|
// This would notify the frontend UI in real-time
|
||||||
|
} catch (error) {
|
||||||
|
console.error(`[Docker Event] Error handling Docker status event:`, error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
init,
|
||||||
|
broadcastSettingsUpdate,
|
||||||
|
pushReportNow,
|
||||||
|
pushSettingsUpdate,
|
||||||
|
pushUpdateAgent,
|
||||||
|
pushIntegrationToggle,
|
||||||
|
pushUpdateNotification,
|
||||||
|
pushUpdateNotificationToAll,
|
||||||
|
// Expose read-only view of connected agents
|
||||||
|
getConnectedApiIds: () => Array.from(apiIdToSocket.keys()),
|
||||||
|
getConnectionByApiId,
|
||||||
|
isConnected: (apiId) => {
|
||||||
|
const ws = apiIdToSocket.get(apiId);
|
||||||
|
return !!ws && ws.readyState === WebSocket.OPEN;
|
||||||
|
},
|
||||||
|
// Get connection info including protocol (ws/wss)
|
||||||
|
getConnectionInfo: (apiId) => {
|
||||||
|
const metadata = connectionMetadata.get(apiId);
|
||||||
|
if (!metadata) {
|
||||||
|
return { connected: false, secure: false };
|
||||||
|
}
|
||||||
|
const connected = metadata.ws.readyState === WebSocket.OPEN;
|
||||||
|
return { connected, secure: metadata.secure };
|
||||||
|
},
|
||||||
|
// Subscribe to connection status changes (for SSE)
|
||||||
|
subscribeToConnectionChanges,
|
||||||
|
};
|
||||||
341
backend/src/services/automation/dockerImageUpdateCheck.js
Normal file
341
backend/src/services/automation/dockerImageUpdateCheck.js
Normal file
@@ -0,0 +1,341 @@
|
|||||||
|
const { prisma } = require("./shared/prisma");
|
||||||
|
const https = require("node:https");
|
||||||
|
const http = require("node:http");
|
||||||
|
const { v4: uuidv4 } = require("uuid");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Docker Image Update Check Automation
|
||||||
|
* Checks for Docker image updates by comparing local digests with remote registry digests
|
||||||
|
*/
|
||||||
|
class DockerImageUpdateCheck {
|
||||||
|
constructor(queueManager) {
|
||||||
|
this.queueManager = queueManager;
|
||||||
|
this.queueName = "docker-image-update-check";
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get remote digest from Docker registry using HEAD request
|
||||||
|
* Supports Docker Hub, GHCR, and other OCI-compliant registries
|
||||||
|
*/
|
||||||
|
async getRemoteDigest(imageName, tag = "latest") {
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
// Parse image name to determine registry
|
||||||
|
const registryInfo = this.parseImageName(imageName);
|
||||||
|
|
||||||
|
// Construct manifest URL
|
||||||
|
const manifestPath = `/v2/${registryInfo.repository}/manifests/${tag}`;
|
||||||
|
const options = {
|
||||||
|
hostname: registryInfo.registry,
|
||||||
|
path: manifestPath,
|
||||||
|
method: "HEAD",
|
||||||
|
headers: {
|
||||||
|
Accept:
|
||||||
|
"application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.oci.image.index.v1+json",
|
||||||
|
"User-Agent": "PatchMon/1.0",
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add authentication token for Docker Hub if needed
|
||||||
|
if (
|
||||||
|
registryInfo.registry === "registry-1.docker.io" &&
|
||||||
|
registryInfo.isPublic
|
||||||
|
) {
|
||||||
|
// For anonymous public images, we may need to get an auth token first
|
||||||
|
// For now, try without auth (works for public images)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Choose HTTP or HTTPS
|
||||||
|
const client = registryInfo.isSecure ? https : http;
|
||||||
|
|
||||||
|
const req = client.request(options, (res) => {
|
||||||
|
if (res.statusCode === 401 || res.statusCode === 403) {
|
||||||
|
// Authentication required - skip for now (would need to implement auth)
|
||||||
|
return reject(
|
||||||
|
new Error(`Authentication required for ${imageName}:${tag}`),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (res.statusCode !== 200) {
|
||||||
|
return reject(
|
||||||
|
new Error(
|
||||||
|
`Registry returned status ${res.statusCode} for ${imageName}:${tag}`,
|
||||||
|
),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get digest from Docker-Content-Digest header
|
||||||
|
const digest = res.headers["docker-content-digest"];
|
||||||
|
if (!digest) {
|
||||||
|
return reject(
|
||||||
|
new Error(
|
||||||
|
`No Docker-Content-Digest header for ${imageName}:${tag}`,
|
||||||
|
),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up digest (remove sha256: prefix if present)
|
||||||
|
const cleanDigest = digest.startsWith("sha256:")
|
||||||
|
? digest.substring(7)
|
||||||
|
: digest;
|
||||||
|
resolve(cleanDigest);
|
||||||
|
});
|
||||||
|
|
||||||
|
req.on("error", (error) => {
|
||||||
|
reject(error);
|
||||||
|
});
|
||||||
|
|
||||||
|
req.setTimeout(10000, () => {
|
||||||
|
req.destroy();
|
||||||
|
reject(new Error(`Timeout getting digest for ${imageName}:${tag}`));
|
||||||
|
});
|
||||||
|
|
||||||
|
req.end();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse image name to extract registry, repository, and determine if secure
|
||||||
|
*/
|
||||||
|
parseImageName(imageName) {
|
||||||
|
let registry = "registry-1.docker.io";
|
||||||
|
let repository = imageName;
|
||||||
|
const isSecure = true;
|
||||||
|
let isPublic = true;
|
||||||
|
|
||||||
|
// Handle explicit registries (ghcr.io, quay.io, etc.)
|
||||||
|
if (imageName.includes("/")) {
|
||||||
|
const parts = imageName.split("/");
|
||||||
|
const firstPart = parts[0];
|
||||||
|
|
||||||
|
// Check for known registries
|
||||||
|
if (firstPart.includes(".") || firstPart === "localhost") {
|
||||||
|
registry = firstPart;
|
||||||
|
repository = parts.slice(1).join("/");
|
||||||
|
isPublic = false; // Assume private registries need auth for now
|
||||||
|
} else {
|
||||||
|
// Docker Hub - registry-1.docker.io
|
||||||
|
repository = imageName;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Docker Hub official images (no namespace)
|
||||||
|
if (!repository.includes("/")) {
|
||||||
|
repository = `library/${repository}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
registry,
|
||||||
|
repository,
|
||||||
|
isSecure,
|
||||||
|
isPublic,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process Docker image update check job
|
||||||
|
*/
|
||||||
|
async process(_job) {
|
||||||
|
const startTime = Date.now();
|
||||||
|
console.log("🐳 Starting Docker image update check...");
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Get all Docker images that have a digest
|
||||||
|
// Note: repository is required (non-nullable) in schema, so we don't need to check it
|
||||||
|
const images = await prisma.docker_images.findMany({
|
||||||
|
where: {
|
||||||
|
digest: {
|
||||||
|
not: null,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
include: {
|
||||||
|
docker_image_updates: true,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`📦 Found ${images.length} images to check for updates`);
|
||||||
|
|
||||||
|
let checkedCount = 0;
|
||||||
|
let updateCount = 0;
|
||||||
|
let errorCount = 0;
|
||||||
|
const errors = [];
|
||||||
|
|
||||||
|
// Process images in batches to avoid overwhelming the API
|
||||||
|
const batchSize = 10;
|
||||||
|
for (let i = 0; i < images.length; i += batchSize) {
|
||||||
|
const batch = images.slice(i, i + batchSize);
|
||||||
|
|
||||||
|
// Process batch concurrently with Promise.allSettled for error tolerance
|
||||||
|
const _results = await Promise.allSettled(
|
||||||
|
batch.map(async (image) => {
|
||||||
|
try {
|
||||||
|
checkedCount++;
|
||||||
|
|
||||||
|
// Skip local images (no digest means they're local)
|
||||||
|
if (!image.digest || image.digest.trim() === "") {
|
||||||
|
return { image, skipped: true, reason: "No digest" };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get clean digest (remove sha256: prefix if present)
|
||||||
|
const localDigest = image.digest.startsWith("sha256:")
|
||||||
|
? image.digest.substring(7)
|
||||||
|
: image.digest;
|
||||||
|
|
||||||
|
// Get remote digest from registry
|
||||||
|
const remoteDigest = await this.getRemoteDigest(
|
||||||
|
image.repository,
|
||||||
|
image.tag || "latest",
|
||||||
|
);
|
||||||
|
|
||||||
|
// Compare digests
|
||||||
|
if (localDigest !== remoteDigest) {
|
||||||
|
console.log(
|
||||||
|
`🔄 Update found: ${image.repository}:${image.tag} (local: ${localDigest.substring(0, 12)}..., remote: ${remoteDigest.substring(0, 12)}...)`,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Store digest info in changelog_url field as JSON
|
||||||
|
const digestInfo = JSON.stringify({
|
||||||
|
method: "digest_comparison",
|
||||||
|
current_digest: localDigest,
|
||||||
|
available_digest: remoteDigest,
|
||||||
|
checked_at: new Date().toISOString(),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Upsert the update record
|
||||||
|
await prisma.docker_image_updates.upsert({
|
||||||
|
where: {
|
||||||
|
image_id_available_tag: {
|
||||||
|
image_id: image.id,
|
||||||
|
available_tag: image.tag || "latest",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
update: {
|
||||||
|
updated_at: new Date(),
|
||||||
|
changelog_url: digestInfo,
|
||||||
|
severity: "digest_changed",
|
||||||
|
},
|
||||||
|
create: {
|
||||||
|
id: uuidv4(),
|
||||||
|
image_id: image.id,
|
||||||
|
current_tag: image.tag || "latest",
|
||||||
|
available_tag: image.tag || "latest",
|
||||||
|
severity: "digest_changed",
|
||||||
|
changelog_url: digestInfo,
|
||||||
|
updated_at: new Date(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Update last_checked timestamp on image
|
||||||
|
await prisma.docker_images.update({
|
||||||
|
where: { id: image.id },
|
||||||
|
data: { last_checked: new Date() },
|
||||||
|
});
|
||||||
|
|
||||||
|
updateCount++;
|
||||||
|
return { image, updated: true };
|
||||||
|
} else {
|
||||||
|
// No update - still update last_checked
|
||||||
|
await prisma.docker_images.update({
|
||||||
|
where: { id: image.id },
|
||||||
|
data: { last_checked: new Date() },
|
||||||
|
});
|
||||||
|
|
||||||
|
// Remove existing update record if digest matches now
|
||||||
|
const existingUpdate = image.docker_image_updates?.find(
|
||||||
|
(u) => u.available_tag === (image.tag || "latest"),
|
||||||
|
);
|
||||||
|
if (existingUpdate) {
|
||||||
|
await prisma.docker_image_updates.delete({
|
||||||
|
where: { id: existingUpdate.id },
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return { image, updated: false };
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
errorCount++;
|
||||||
|
const errorMsg = `Error checking ${image.repository}:${image.tag}: ${error.message}`;
|
||||||
|
errors.push(errorMsg);
|
||||||
|
console.error(`❌ ${errorMsg}`);
|
||||||
|
|
||||||
|
// Still update last_checked even on error
|
||||||
|
try {
|
||||||
|
await prisma.docker_images.update({
|
||||||
|
where: { id: image.id },
|
||||||
|
data: { last_checked: new Date() },
|
||||||
|
});
|
||||||
|
} catch (_updateError) {
|
||||||
|
// Ignore update errors
|
||||||
|
}
|
||||||
|
|
||||||
|
return { image, error: error.message };
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Log batch progress
|
||||||
|
if (i + batchSize < images.length) {
|
||||||
|
console.log(
|
||||||
|
`⏳ Processed ${Math.min(i + batchSize, images.length)}/${images.length} images...`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Small delay between batches to be respectful to registries
|
||||||
|
if (i + batchSize < images.length) {
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 500));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const executionTime = Date.now() - startTime;
|
||||||
|
console.log(
|
||||||
|
`✅ Docker image update check completed in ${executionTime}ms - Checked: ${checkedCount}, Updates: ${updateCount}, Errors: ${errorCount}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
checked: checkedCount,
|
||||||
|
updates: updateCount,
|
||||||
|
errors: errorCount,
|
||||||
|
executionTime,
|
||||||
|
errorDetails: errors,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
const executionTime = Date.now() - startTime;
|
||||||
|
console.error(
|
||||||
|
`❌ Docker image update check failed after ${executionTime}ms:`,
|
||||||
|
error.message,
|
||||||
|
);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Schedule recurring Docker image update check (daily at 2 AM)
|
||||||
|
*/
|
||||||
|
async schedule() {
|
||||||
|
const job = await this.queueManager.queues[this.queueName].add(
|
||||||
|
"docker-image-update-check",
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
repeat: { cron: "0 2 * * *" }, // Daily at 2 AM
|
||||||
|
jobId: "docker-image-update-check-recurring",
|
||||||
|
},
|
||||||
|
);
|
||||||
|
console.log("✅ Docker image update check scheduled");
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Trigger manual Docker image update check
|
||||||
|
*/
|
||||||
|
async triggerManual() {
|
||||||
|
const job = await this.queueManager.queues[this.queueName].add(
|
||||||
|
"docker-image-update-check-manual",
|
||||||
|
{},
|
||||||
|
{ priority: 1 },
|
||||||
|
);
|
||||||
|
console.log("✅ Manual Docker image update check triggered");
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = DockerImageUpdateCheck;
|
||||||
164
backend/src/services/automation/dockerInventoryCleanup.js
Normal file
164
backend/src/services/automation/dockerInventoryCleanup.js
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
const { prisma } = require("./shared/prisma");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Docker Inventory Cleanup Automation
|
||||||
|
* Removes Docker containers and images for hosts that no longer exist
|
||||||
|
*/
|
||||||
|
class DockerInventoryCleanup {
|
||||||
|
constructor(queueManager) {
|
||||||
|
this.queueManager = queueManager;
|
||||||
|
this.queueName = "docker-inventory-cleanup";
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process Docker inventory cleanup job
|
||||||
|
*/
|
||||||
|
async process(_job) {
|
||||||
|
const startTime = Date.now();
|
||||||
|
console.log("🧹 Starting Docker inventory cleanup...");
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Step 1: Find and delete orphaned containers (containers for non-existent hosts)
|
||||||
|
const orphanedContainers = await prisma.docker_containers.findMany({
|
||||||
|
where: {
|
||||||
|
host_id: {
|
||||||
|
// Find containers where the host doesn't exist
|
||||||
|
notIn: await prisma.hosts
|
||||||
|
.findMany({ select: { id: true } })
|
||||||
|
.then((hosts) => hosts.map((h) => h.id)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
let deletedContainersCount = 0;
|
||||||
|
const deletedContainers = [];
|
||||||
|
|
||||||
|
for (const container of orphanedContainers) {
|
||||||
|
try {
|
||||||
|
await prisma.docker_containers.delete({
|
||||||
|
where: { id: container.id },
|
||||||
|
});
|
||||||
|
deletedContainersCount++;
|
||||||
|
deletedContainers.push({
|
||||||
|
id: container.id,
|
||||||
|
container_id: container.container_id,
|
||||||
|
name: container.name,
|
||||||
|
image_name: container.image_name,
|
||||||
|
host_id: container.host_id,
|
||||||
|
});
|
||||||
|
console.log(
|
||||||
|
`🗑️ Deleted orphaned container: ${container.name} (host_id: ${container.host_id})`,
|
||||||
|
);
|
||||||
|
} catch (deleteError) {
|
||||||
|
console.error(
|
||||||
|
`❌ Failed to delete container ${container.id}:`,
|
||||||
|
deleteError.message,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 2: Find and delete orphaned images (images with no containers using them)
|
||||||
|
const orphanedImages = await prisma.docker_images.findMany({
|
||||||
|
where: {
|
||||||
|
docker_containers: {
|
||||||
|
none: {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
include: {
|
||||||
|
_count: {
|
||||||
|
select: {
|
||||||
|
docker_containers: true,
|
||||||
|
docker_image_updates: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
let deletedImagesCount = 0;
|
||||||
|
const deletedImages = [];
|
||||||
|
|
||||||
|
for (const image of orphanedImages) {
|
||||||
|
try {
|
||||||
|
// First delete any image updates associated with this image
|
||||||
|
if (image._count.docker_image_updates > 0) {
|
||||||
|
await prisma.docker_image_updates.deleteMany({
|
||||||
|
where: { image_id: image.id },
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then delete the image itself
|
||||||
|
await prisma.docker_images.delete({
|
||||||
|
where: { id: image.id },
|
||||||
|
});
|
||||||
|
deletedImagesCount++;
|
||||||
|
deletedImages.push({
|
||||||
|
id: image.id,
|
||||||
|
repository: image.repository,
|
||||||
|
tag: image.tag,
|
||||||
|
image_id: image.image_id,
|
||||||
|
});
|
||||||
|
console.log(
|
||||||
|
`🗑️ Deleted orphaned image: ${image.repository}:${image.tag}`,
|
||||||
|
);
|
||||||
|
} catch (deleteError) {
|
||||||
|
console.error(
|
||||||
|
`❌ Failed to delete image ${image.id}:`,
|
||||||
|
deleteError.message,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const executionTime = Date.now() - startTime;
|
||||||
|
console.log(
|
||||||
|
`✅ Docker inventory cleanup completed in ${executionTime}ms - Deleted ${deletedContainersCount} containers and ${deletedImagesCount} images`,
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
deletedContainersCount,
|
||||||
|
deletedImagesCount,
|
||||||
|
deletedContainers,
|
||||||
|
deletedImages,
|
||||||
|
executionTime,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
const executionTime = Date.now() - startTime;
|
||||||
|
console.error(
|
||||||
|
`❌ Docker inventory cleanup failed after ${executionTime}ms:`,
|
||||||
|
error.message,
|
||||||
|
);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Schedule recurring Docker inventory cleanup (daily at 4 AM)
|
||||||
|
*/
|
||||||
|
async schedule() {
|
||||||
|
const job = await this.queueManager.queues[this.queueName].add(
|
||||||
|
"docker-inventory-cleanup",
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
repeat: { cron: "0 4 * * *" }, // Daily at 4 AM
|
||||||
|
jobId: "docker-inventory-cleanup-recurring",
|
||||||
|
},
|
||||||
|
);
|
||||||
|
console.log("✅ Docker inventory cleanup scheduled");
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Trigger manual Docker inventory cleanup
|
||||||
|
*/
|
||||||
|
async triggerManual() {
|
||||||
|
const job = await this.queueManager.queues[this.queueName].add(
|
||||||
|
"docker-inventory-cleanup-manual",
|
||||||
|
{},
|
||||||
|
{ priority: 1 },
|
||||||
|
);
|
||||||
|
console.log("✅ Manual Docker inventory cleanup triggered");
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = DockerInventoryCleanup;
|
||||||
160
backend/src/services/automation/githubUpdateCheck.js
Normal file
160
backend/src/services/automation/githubUpdateCheck.js
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
const { prisma } = require("./shared/prisma");
|
||||||
|
const { compareVersions, checkPublicRepo } = require("./shared/utils");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* GitHub Update Check Automation
|
||||||
|
* Checks for new releases on GitHub using HTTPS API
|
||||||
|
*/
|
||||||
|
class GitHubUpdateCheck {
|
||||||
|
constructor(queueManager) {
|
||||||
|
this.queueManager = queueManager;
|
||||||
|
this.queueName = "github-update-check";
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process GitHub update check job
|
||||||
|
*/
|
||||||
|
async process(_job) {
|
||||||
|
const startTime = Date.now();
|
||||||
|
console.log("🔍 Starting GitHub update check...");
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Get settings
|
||||||
|
const settings = await prisma.settings.findFirst();
|
||||||
|
const DEFAULT_GITHUB_REPO = "https://github.com/PatchMon/PatchMon.git";
|
||||||
|
const repoUrl = settings?.githubRepoUrl || DEFAULT_GITHUB_REPO;
|
||||||
|
let owner, repo;
|
||||||
|
|
||||||
|
// Parse GitHub repository URL (supports both HTTPS and SSH formats)
|
||||||
|
if (repoUrl.includes("git@github.com:")) {
|
||||||
|
const match = repoUrl.match(/git@github\.com:([^/]+)\/([^/]+)\.git/);
|
||||||
|
if (match) {
|
||||||
|
[, owner, repo] = match;
|
||||||
|
}
|
||||||
|
} else if (repoUrl.includes("github.com/")) {
|
||||||
|
const match = repoUrl.match(
|
||||||
|
/github\.com\/([^/]+)\/([^/]+?)(?:\.git)?$/,
|
||||||
|
);
|
||||||
|
if (match) {
|
||||||
|
[, owner, repo] = match;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!owner || !repo) {
|
||||||
|
throw new Error("Could not parse GitHub repository URL");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Always use HTTPS GitHub API (simpler and more reliable)
|
||||||
|
const latestVersion = await checkPublicRepo(owner, repo);
|
||||||
|
|
||||||
|
if (!latestVersion) {
|
||||||
|
throw new Error("Could not determine latest version");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read version from package.json
|
||||||
|
let currentVersion = null;
|
||||||
|
try {
|
||||||
|
const packageJson = require("../../../package.json");
|
||||||
|
if (packageJson?.version) {
|
||||||
|
currentVersion = packageJson.version;
|
||||||
|
}
|
||||||
|
} catch (packageError) {
|
||||||
|
console.error(
|
||||||
|
"Could not read version from package.json:",
|
||||||
|
packageError.message,
|
||||||
|
);
|
||||||
|
throw new Error(
|
||||||
|
"Could not determine current version from package.json",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!currentVersion) {
|
||||||
|
throw new Error("Version not found in package.json");
|
||||||
|
}
|
||||||
|
|
||||||
|
const isUpdateAvailable =
|
||||||
|
compareVersions(latestVersion, currentVersion) > 0;
|
||||||
|
|
||||||
|
// Update settings with check results
|
||||||
|
await prisma.settings.update({
|
||||||
|
where: { id: settings.id },
|
||||||
|
data: {
|
||||||
|
last_update_check: new Date(),
|
||||||
|
update_available: isUpdateAvailable,
|
||||||
|
latest_version: latestVersion,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const executionTime = Date.now() - startTime;
|
||||||
|
console.log(
|
||||||
|
`✅ GitHub update check completed in ${executionTime}ms - Current: ${currentVersion}, Latest: ${latestVersion}, Update Available: ${isUpdateAvailable}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
currentVersion,
|
||||||
|
latestVersion,
|
||||||
|
isUpdateAvailable,
|
||||||
|
executionTime,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
const executionTime = Date.now() - startTime;
|
||||||
|
console.error(
|
||||||
|
`❌ GitHub update check failed after ${executionTime}ms:`,
|
||||||
|
error.message,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Update last check time even on error
|
||||||
|
try {
|
||||||
|
const settings = await prisma.settings.findFirst();
|
||||||
|
if (settings) {
|
||||||
|
await prisma.settings.update({
|
||||||
|
where: { id: settings.id },
|
||||||
|
data: {
|
||||||
|
last_update_check: new Date(),
|
||||||
|
update_available: false,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} catch (updateError) {
|
||||||
|
console.error(
|
||||||
|
"❌ Error updating last check time:",
|
||||||
|
updateError.message,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Schedule recurring GitHub update check (daily at midnight)
|
||||||
|
*/
|
||||||
|
async schedule() {
|
||||||
|
const job = await this.queueManager.queues[this.queueName].add(
|
||||||
|
"github-update-check",
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
repeat: { cron: "0 0 * * *" }, // Daily at midnight
|
||||||
|
jobId: "github-update-check-recurring",
|
||||||
|
},
|
||||||
|
);
|
||||||
|
console.log("✅ GitHub update check scheduled");
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Trigger manual GitHub update check
|
||||||
|
*/
|
||||||
|
async triggerManual() {
|
||||||
|
const job = await this.queueManager.queues[this.queueName].add(
|
||||||
|
"github-update-check-manual",
|
||||||
|
{},
|
||||||
|
{ priority: 1 },
|
||||||
|
);
|
||||||
|
console.log("✅ Manual GitHub update check triggered");
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = GitHubUpdateCheck;
|
||||||
541
backend/src/services/automation/index.js
Normal file
541
backend/src/services/automation/index.js
Normal file
@@ -0,0 +1,541 @@
|
|||||||
|
const { Queue, Worker } = require("bullmq");
|
||||||
|
const { redis, redisConnection } = require("./shared/redis");
|
||||||
|
const { prisma } = require("./shared/prisma");
|
||||||
|
const agentWs = require("../agentWs");
|
||||||
|
const { v4: uuidv4 } = require("uuid");
|
||||||
|
const { get_current_time } = require("../../utils/timezone");
|
||||||
|
|
||||||
|
// Import automation classes
|
||||||
|
const GitHubUpdateCheck = require("./githubUpdateCheck");
|
||||||
|
const SessionCleanup = require("./sessionCleanup");
|
||||||
|
const OrphanedRepoCleanup = require("./orphanedRepoCleanup");
|
||||||
|
const OrphanedPackageCleanup = require("./orphanedPackageCleanup");
|
||||||
|
const DockerInventoryCleanup = require("./dockerInventoryCleanup");
|
||||||
|
const DockerImageUpdateCheck = require("./dockerImageUpdateCheck");
|
||||||
|
const MetricsReporting = require("./metricsReporting");
|
||||||
|
const SystemStatistics = require("./systemStatistics");
|
||||||
|
|
||||||
|
// Queue names
|
||||||
|
const QUEUE_NAMES = {
|
||||||
|
GITHUB_UPDATE_CHECK: "github-update-check",
|
||||||
|
SESSION_CLEANUP: "session-cleanup",
|
||||||
|
ORPHANED_REPO_CLEANUP: "orphaned-repo-cleanup",
|
||||||
|
ORPHANED_PACKAGE_CLEANUP: "orphaned-package-cleanup",
|
||||||
|
DOCKER_INVENTORY_CLEANUP: "docker-inventory-cleanup",
|
||||||
|
DOCKER_IMAGE_UPDATE_CHECK: "docker-image-update-check",
|
||||||
|
METRICS_REPORTING: "metrics-reporting",
|
||||||
|
SYSTEM_STATISTICS: "system-statistics",
|
||||||
|
AGENT_COMMANDS: "agent-commands",
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Main Queue Manager
|
||||||
|
* Manages all BullMQ queues and workers
|
||||||
|
*/
|
||||||
|
class QueueManager {
|
||||||
|
constructor() {
|
||||||
|
this.queues = {};
|
||||||
|
this.workers = {};
|
||||||
|
this.automations = {};
|
||||||
|
this.isInitialized = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize all queues, workers, and automations
|
||||||
|
*/
|
||||||
|
async initialize() {
|
||||||
|
try {
|
||||||
|
console.log("✅ Redis connection successful");
|
||||||
|
|
||||||
|
// Initialize queues
|
||||||
|
await this.initializeQueues();
|
||||||
|
|
||||||
|
// Initialize automation classes
|
||||||
|
await this.initializeAutomations();
|
||||||
|
|
||||||
|
// Initialize workers
|
||||||
|
await this.initializeWorkers();
|
||||||
|
|
||||||
|
// Setup event listeners
|
||||||
|
this.setupEventListeners();
|
||||||
|
|
||||||
|
this.isInitialized = true;
|
||||||
|
console.log("✅ Queue manager initialized successfully");
|
||||||
|
} catch (error) {
|
||||||
|
console.error("❌ Failed to initialize queue manager:", error.message);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize all queues
|
||||||
|
*/
|
||||||
|
async initializeQueues() {
|
||||||
|
for (const [_key, queueName] of Object.entries(QUEUE_NAMES)) {
|
||||||
|
this.queues[queueName] = new Queue(queueName, {
|
||||||
|
connection: redisConnection,
|
||||||
|
defaultJobOptions: {
|
||||||
|
removeOnComplete: 50, // Keep last 50 completed jobs
|
||||||
|
removeOnFail: 20, // Keep last 20 failed jobs
|
||||||
|
attempts: 3, // Retry failed jobs 3 times
|
||||||
|
backoff: {
|
||||||
|
type: "exponential",
|
||||||
|
delay: 2000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`✅ Queue '${queueName}' initialized`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize automation classes
|
||||||
|
*/
|
||||||
|
async initializeAutomations() {
|
||||||
|
this.automations[QUEUE_NAMES.GITHUB_UPDATE_CHECK] = new GitHubUpdateCheck(
|
||||||
|
this,
|
||||||
|
);
|
||||||
|
this.automations[QUEUE_NAMES.SESSION_CLEANUP] = new SessionCleanup(this);
|
||||||
|
this.automations[QUEUE_NAMES.ORPHANED_REPO_CLEANUP] =
|
||||||
|
new OrphanedRepoCleanup(this);
|
||||||
|
this.automations[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP] =
|
||||||
|
new OrphanedPackageCleanup(this);
|
||||||
|
this.automations[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP] =
|
||||||
|
new DockerInventoryCleanup(this);
|
||||||
|
this.automations[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK] =
|
||||||
|
new DockerImageUpdateCheck(this);
|
||||||
|
this.automations[QUEUE_NAMES.METRICS_REPORTING] = new MetricsReporting(
|
||||||
|
this,
|
||||||
|
);
|
||||||
|
this.automations[QUEUE_NAMES.SYSTEM_STATISTICS] = new SystemStatistics(
|
||||||
|
this,
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log("✅ All automation classes initialized");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize all workers
|
||||||
|
*/
|
||||||
|
async initializeWorkers() {
|
||||||
|
// Optimized worker options to reduce Redis connections
|
||||||
|
const workerOptions = {
|
||||||
|
connection: redisConnection,
|
||||||
|
concurrency: 1, // Keep concurrency low to reduce connections
|
||||||
|
// Connection optimization
|
||||||
|
maxStalledCount: 1,
|
||||||
|
stalledInterval: 30000,
|
||||||
|
// Reduce connection churn
|
||||||
|
settings: {
|
||||||
|
stalledInterval: 30000,
|
||||||
|
maxStalledCount: 1,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
// GitHub Update Check Worker
|
||||||
|
this.workers[QUEUE_NAMES.GITHUB_UPDATE_CHECK] = new Worker(
|
||||||
|
QUEUE_NAMES.GITHUB_UPDATE_CHECK,
|
||||||
|
this.automations[QUEUE_NAMES.GITHUB_UPDATE_CHECK].process.bind(
|
||||||
|
this.automations[QUEUE_NAMES.GITHUB_UPDATE_CHECK],
|
||||||
|
),
|
||||||
|
workerOptions,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Session Cleanup Worker
|
||||||
|
this.workers[QUEUE_NAMES.SESSION_CLEANUP] = new Worker(
|
||||||
|
QUEUE_NAMES.SESSION_CLEANUP,
|
||||||
|
this.automations[QUEUE_NAMES.SESSION_CLEANUP].process.bind(
|
||||||
|
this.automations[QUEUE_NAMES.SESSION_CLEANUP],
|
||||||
|
),
|
||||||
|
workerOptions,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Orphaned Repo Cleanup Worker
|
||||||
|
this.workers[QUEUE_NAMES.ORPHANED_REPO_CLEANUP] = new Worker(
|
||||||
|
QUEUE_NAMES.ORPHANED_REPO_CLEANUP,
|
||||||
|
this.automations[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].process.bind(
|
||||||
|
this.automations[QUEUE_NAMES.ORPHANED_REPO_CLEANUP],
|
||||||
|
),
|
||||||
|
workerOptions,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Orphaned Package Cleanup Worker
|
||||||
|
this.workers[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP] = new Worker(
|
||||||
|
QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP,
|
||||||
|
this.automations[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].process.bind(
|
||||||
|
this.automations[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP],
|
||||||
|
),
|
||||||
|
workerOptions,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Docker Inventory Cleanup Worker
|
||||||
|
this.workers[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP] = new Worker(
|
||||||
|
QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP,
|
||||||
|
this.automations[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].process.bind(
|
||||||
|
this.automations[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP],
|
||||||
|
),
|
||||||
|
workerOptions,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Docker Image Update Check Worker
|
||||||
|
this.workers[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK] = new Worker(
|
||||||
|
QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK,
|
||||||
|
this.automations[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK].process.bind(
|
||||||
|
this.automations[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK],
|
||||||
|
),
|
||||||
|
workerOptions,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Metrics Reporting Worker
|
||||||
|
this.workers[QUEUE_NAMES.METRICS_REPORTING] = new Worker(
|
||||||
|
QUEUE_NAMES.METRICS_REPORTING,
|
||||||
|
this.automations[QUEUE_NAMES.METRICS_REPORTING].process.bind(
|
||||||
|
this.automations[QUEUE_NAMES.METRICS_REPORTING],
|
||||||
|
),
|
||||||
|
workerOptions,
|
||||||
|
);
|
||||||
|
|
||||||
|
// System Statistics Worker
|
||||||
|
this.workers[QUEUE_NAMES.SYSTEM_STATISTICS] = new Worker(
|
||||||
|
QUEUE_NAMES.SYSTEM_STATISTICS,
|
||||||
|
this.automations[QUEUE_NAMES.SYSTEM_STATISTICS].process.bind(
|
||||||
|
this.automations[QUEUE_NAMES.SYSTEM_STATISTICS],
|
||||||
|
),
|
||||||
|
workerOptions,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Agent Commands Worker
|
||||||
|
this.workers[QUEUE_NAMES.AGENT_COMMANDS] = new Worker(
|
||||||
|
QUEUE_NAMES.AGENT_COMMANDS,
|
||||||
|
async (job) => {
|
||||||
|
const { api_id, type } = job.data;
|
||||||
|
console.log(`Processing agent command: ${type} for ${api_id}`);
|
||||||
|
|
||||||
|
// Log job to job_history
|
||||||
|
let historyRecord = null;
|
||||||
|
try {
|
||||||
|
const host = await prisma.hosts.findUnique({
|
||||||
|
where: { api_id },
|
||||||
|
select: { id: true },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (host) {
|
||||||
|
historyRecord = await prisma.job_history.create({
|
||||||
|
data: {
|
||||||
|
id: uuidv4(),
|
||||||
|
job_id: job.id,
|
||||||
|
queue_name: QUEUE_NAMES.AGENT_COMMANDS,
|
||||||
|
job_name: type,
|
||||||
|
host_id: host.id,
|
||||||
|
api_id: api_id,
|
||||||
|
status: "active",
|
||||||
|
attempt_number: job.attemptsMade + 1,
|
||||||
|
created_at: get_current_time(),
|
||||||
|
updated_at: get_current_time(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
console.log(`📝 Logged job to job_history: ${job.id} (${type})`);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Failed to log job to job_history:", error);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Send command via WebSocket based on type
|
||||||
|
if (type === "report_now") {
|
||||||
|
agentWs.pushReportNow(api_id);
|
||||||
|
} else if (type === "settings_update") {
|
||||||
|
// For settings update, we need additional data
|
||||||
|
const { update_interval } = job.data;
|
||||||
|
agentWs.pushSettingsUpdate(api_id, update_interval);
|
||||||
|
} else if (type === "update_agent") {
|
||||||
|
// Force agent to update by sending WebSocket command
|
||||||
|
const ws = agentWs.getConnectionByApiId(api_id);
|
||||||
|
if (ws && ws.readyState === 1) {
|
||||||
|
// WebSocket.OPEN
|
||||||
|
agentWs.pushUpdateAgent(api_id);
|
||||||
|
console.log(`✅ Update command sent to agent ${api_id}`);
|
||||||
|
} else {
|
||||||
|
console.error(`❌ Agent ${api_id} is not connected`);
|
||||||
|
throw new Error(
|
||||||
|
`Agent ${api_id} is not connected. Cannot send update command.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.error(`Unknown agent command type: ${type}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update job history to completed
|
||||||
|
if (historyRecord) {
|
||||||
|
await prisma.job_history.updateMany({
|
||||||
|
where: { job_id: job.id },
|
||||||
|
data: {
|
||||||
|
status: "completed",
|
||||||
|
completed_at: get_current_time(),
|
||||||
|
updated_at: get_current_time(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
console.log(`✅ Marked job as completed in job_history: ${job.id}`);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Update job history to failed
|
||||||
|
if (historyRecord) {
|
||||||
|
await prisma.job_history.updateMany({
|
||||||
|
where: { job_id: job.id },
|
||||||
|
data: {
|
||||||
|
status: "failed",
|
||||||
|
error_message: error.message,
|
||||||
|
completed_at: get_current_time(),
|
||||||
|
updated_at: get_current_time(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
console.log(`❌ Marked job as failed in job_history: ${job.id}`);
|
||||||
|
}
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
workerOptions,
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
"✅ All workers initialized with optimized connection settings",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Setup event listeners for all queues
|
||||||
|
*/
|
||||||
|
setupEventListeners() {
|
||||||
|
for (const queueName of Object.values(QUEUE_NAMES)) {
|
||||||
|
const queue = this.queues[queueName];
|
||||||
|
queue.on("error", (error) => {
|
||||||
|
console.error(`❌ Queue '${queueName}' experienced an error:`, error);
|
||||||
|
});
|
||||||
|
queue.on("failed", (job, err) => {
|
||||||
|
console.error(
|
||||||
|
`❌ Job '${job.id}' in queue '${queueName}' failed:`,
|
||||||
|
err,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
queue.on("completed", (job) => {
|
||||||
|
console.log(`✅ Job '${job.id}' in queue '${queueName}' completed.`);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log("✅ Queue events initialized");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Schedule all recurring jobs
|
||||||
|
*/
|
||||||
|
async scheduleAllJobs() {
|
||||||
|
await this.automations[QUEUE_NAMES.GITHUB_UPDATE_CHECK].schedule();
|
||||||
|
await this.automations[QUEUE_NAMES.SESSION_CLEANUP].schedule();
|
||||||
|
await this.automations[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].schedule();
|
||||||
|
await this.automations[QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP].schedule();
|
||||||
|
await this.automations[QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP].schedule();
|
||||||
|
await this.automations[QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK].schedule();
|
||||||
|
await this.automations[QUEUE_NAMES.METRICS_REPORTING].schedule();
|
||||||
|
await this.automations[QUEUE_NAMES.SYSTEM_STATISTICS].schedule();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Manual job triggers
|
||||||
|
*/
|
||||||
|
async triggerGitHubUpdateCheck() {
|
||||||
|
return this.automations[QUEUE_NAMES.GITHUB_UPDATE_CHECK].triggerManual();
|
||||||
|
}
|
||||||
|
|
||||||
|
async triggerSessionCleanup() {
|
||||||
|
return this.automations[QUEUE_NAMES.SESSION_CLEANUP].triggerManual();
|
||||||
|
}
|
||||||
|
|
||||||
|
async triggerOrphanedRepoCleanup() {
|
||||||
|
return this.automations[QUEUE_NAMES.ORPHANED_REPO_CLEANUP].triggerManual();
|
||||||
|
}
|
||||||
|
|
||||||
|
async triggerOrphanedPackageCleanup() {
|
||||||
|
return this.automations[
|
||||||
|
QUEUE_NAMES.ORPHANED_PACKAGE_CLEANUP
|
||||||
|
].triggerManual();
|
||||||
|
}
|
||||||
|
|
||||||
|
async triggerDockerInventoryCleanup() {
|
||||||
|
return this.automations[
|
||||||
|
QUEUE_NAMES.DOCKER_INVENTORY_CLEANUP
|
||||||
|
].triggerManual();
|
||||||
|
}
|
||||||
|
|
||||||
|
async triggerDockerImageUpdateCheck() {
|
||||||
|
return this.automations[
|
||||||
|
QUEUE_NAMES.DOCKER_IMAGE_UPDATE_CHECK
|
||||||
|
].triggerManual();
|
||||||
|
}
|
||||||
|
|
||||||
|
async triggerSystemStatistics() {
|
||||||
|
return this.automations[QUEUE_NAMES.SYSTEM_STATISTICS].triggerManual();
|
||||||
|
}
|
||||||
|
|
||||||
|
async triggerMetricsReporting() {
|
||||||
|
return this.automations[QUEUE_NAMES.METRICS_REPORTING].triggerManual();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get queue statistics
|
||||||
|
*/
|
||||||
|
async getQueueStats(queueName) {
|
||||||
|
const queue = this.queues[queueName];
|
||||||
|
if (!queue) {
|
||||||
|
throw new Error(`Queue ${queueName} not found`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const [waiting, active, completed, failed, delayed] = await Promise.all([
|
||||||
|
queue.getWaiting(),
|
||||||
|
queue.getActive(),
|
||||||
|
queue.getCompleted(),
|
||||||
|
queue.getFailed(),
|
||||||
|
queue.getDelayed(),
|
||||||
|
]);
|
||||||
|
|
||||||
|
return {
|
||||||
|
waiting: waiting.length,
|
||||||
|
active: active.length,
|
||||||
|
completed: completed.length,
|
||||||
|
failed: failed.length,
|
||||||
|
delayed: delayed.length,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all queue statistics
|
||||||
|
*/
|
||||||
|
async getAllQueueStats() {
|
||||||
|
const stats = {};
|
||||||
|
for (const queueName of Object.values(QUEUE_NAMES)) {
|
||||||
|
stats[queueName] = await this.getQueueStats(queueName);
|
||||||
|
}
|
||||||
|
return stats;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get recent jobs for a queue
|
||||||
|
*/
|
||||||
|
async getRecentJobs(queueName, limit = 10) {
|
||||||
|
const queue = this.queues[queueName];
|
||||||
|
if (!queue) {
|
||||||
|
throw new Error(`Queue ${queueName} not found`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const [completed, failed] = await Promise.all([
|
||||||
|
queue.getCompleted(0, limit - 1),
|
||||||
|
queue.getFailed(0, limit - 1),
|
||||||
|
]);
|
||||||
|
|
||||||
|
return [...completed, ...failed]
|
||||||
|
.sort((a, b) => new Date(b.finishedOn) - new Date(a.finishedOn))
|
||||||
|
.slice(0, limit);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get jobs for a specific host (by API ID)
|
||||||
|
*/
|
||||||
|
async getHostJobs(apiId, limit = 20) {
|
||||||
|
const queue = this.queues[QUEUE_NAMES.AGENT_COMMANDS];
|
||||||
|
if (!queue) {
|
||||||
|
throw new Error(`Queue ${QUEUE_NAMES.AGENT_COMMANDS} not found`);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`[getHostJobs] Looking for jobs with api_id: ${apiId}`);
|
||||||
|
|
||||||
|
// Get active queue status (waiting, active, delayed, failed)
|
||||||
|
const [waiting, active, delayed, failed] = await Promise.all([
|
||||||
|
queue.getWaiting(),
|
||||||
|
queue.getActive(),
|
||||||
|
queue.getDelayed(),
|
||||||
|
queue.getFailed(),
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Filter by API ID
|
||||||
|
const filterByApiId = (jobs) =>
|
||||||
|
jobs.filter((job) => job.data && job.data.api_id === apiId);
|
||||||
|
|
||||||
|
const waitingCount = filterByApiId(waiting).length;
|
||||||
|
const activeCount = filterByApiId(active).length;
|
||||||
|
const delayedCount = filterByApiId(delayed).length;
|
||||||
|
const failedCount = filterByApiId(failed).length;
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`[getHostJobs] Queue status - Waiting: ${waitingCount}, Active: ${activeCount}, Delayed: ${delayedCount}, Failed: ${failedCount}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get job history from database (shows all attempts and status changes)
|
||||||
|
const jobHistory = await prisma.job_history.findMany({
|
||||||
|
where: {
|
||||||
|
api_id: apiId,
|
||||||
|
},
|
||||||
|
orderBy: {
|
||||||
|
created_at: "desc",
|
||||||
|
},
|
||||||
|
take: limit,
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`[getHostJobs] Found ${jobHistory.length} job history records for api_id: ${apiId}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
waiting: waitingCount,
|
||||||
|
active: activeCount,
|
||||||
|
delayed: delayedCount,
|
||||||
|
failed: failedCount,
|
||||||
|
jobHistory: jobHistory.map((job) => ({
|
||||||
|
id: job.id,
|
||||||
|
job_id: job.job_id,
|
||||||
|
job_name: job.job_name,
|
||||||
|
status: job.status,
|
||||||
|
attempt_number: job.attempt_number,
|
||||||
|
error_message: job.error_message,
|
||||||
|
output: job.output,
|
||||||
|
created_at: job.created_at,
|
||||||
|
updated_at: job.updated_at,
|
||||||
|
completed_at: job.completed_at,
|
||||||
|
})),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Graceful shutdown
|
||||||
|
*/
|
||||||
|
async shutdown() {
|
||||||
|
console.log("🛑 Shutting down queue manager...");
|
||||||
|
|
||||||
|
for (const queueName of Object.keys(this.queues)) {
|
||||||
|
try {
|
||||||
|
await this.queues[queueName].close();
|
||||||
|
} catch (e) {
|
||||||
|
console.warn(
|
||||||
|
`⚠️ Failed to close queue '${queueName}':`,
|
||||||
|
e?.message || e,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if (this.workers?.[queueName]) {
|
||||||
|
try {
|
||||||
|
await this.workers[queueName].close();
|
||||||
|
} catch (e) {
|
||||||
|
console.warn(
|
||||||
|
`⚠️ Failed to close worker for '${queueName}':`,
|
||||||
|
e?.message || e,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
await redis.quit();
|
||||||
|
console.log("✅ Queue manager shutdown complete");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const queueManager = new QueueManager();
|
||||||
|
|
||||||
|
module.exports = { queueManager, QUEUE_NAMES };
|
||||||
172
backend/src/services/automation/metricsReporting.js
Normal file
172
backend/src/services/automation/metricsReporting.js
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
const axios = require("axios");
|
||||||
|
const { prisma } = require("./shared/prisma");
|
||||||
|
const { updateSettings } = require("../../services/settingsService");
|
||||||
|
|
||||||
|
const METRICS_API_URL =
|
||||||
|
process.env.METRICS_API_URL || "https://metrics.patchmon.cloud";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Metrics Reporting Automation
|
||||||
|
* Sends anonymous usage metrics every 24 hours
|
||||||
|
*/
|
||||||
|
class MetricsReporting {
|
||||||
|
constructor(queueManager) {
|
||||||
|
this.queueManager = queueManager;
|
||||||
|
this.queueName = "metrics-reporting";
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process metrics reporting job
|
||||||
|
*/
|
||||||
|
async process(_job, silent = false) {
|
||||||
|
const startTime = Date.now();
|
||||||
|
if (!silent) console.log("📊 Starting metrics reporting...");
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Fetch fresh settings directly from database (bypass cache)
|
||||||
|
const settings = await prisma.settings.findFirst({
|
||||||
|
orderBy: { updated_at: "desc" },
|
||||||
|
});
|
||||||
|
|
||||||
|
// Check if metrics are enabled
|
||||||
|
if (settings.metrics_enabled !== true) {
|
||||||
|
if (!silent) console.log("📊 Metrics reporting is disabled");
|
||||||
|
return { success: false, reason: "disabled" };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we have an anonymous ID
|
||||||
|
if (!settings.metrics_anonymous_id) {
|
||||||
|
if (!silent) console.log("📊 No anonymous ID found, skipping metrics");
|
||||||
|
return { success: false, reason: "no_id" };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get host count
|
||||||
|
const hostCount = await prisma.hosts.count();
|
||||||
|
|
||||||
|
// Get version
|
||||||
|
const packageJson = require("../../../package.json");
|
||||||
|
const version = packageJson.version;
|
||||||
|
|
||||||
|
// Prepare metrics data
|
||||||
|
const metricsData = {
|
||||||
|
anonymous_id: settings.metrics_anonymous_id,
|
||||||
|
host_count: hostCount,
|
||||||
|
version,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!silent)
|
||||||
|
console.log(
|
||||||
|
`📊 Sending metrics: ${hostCount} hosts, version ${version}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Send to metrics API
|
||||||
|
try {
|
||||||
|
const response = await axios.post(
|
||||||
|
`${METRICS_API_URL}/metrics/submit`,
|
||||||
|
metricsData,
|
||||||
|
{
|
||||||
|
timeout: 10000,
|
||||||
|
headers: {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Update last sent timestamp
|
||||||
|
await updateSettings(settings.id, {
|
||||||
|
metrics_last_sent: new Date(),
|
||||||
|
});
|
||||||
|
|
||||||
|
const executionTime = Date.now() - startTime;
|
||||||
|
if (!silent)
|
||||||
|
console.log(
|
||||||
|
`✅ Metrics sent successfully in ${executionTime}ms:`,
|
||||||
|
response.data,
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
data: response.data,
|
||||||
|
hostCount,
|
||||||
|
version,
|
||||||
|
executionTime,
|
||||||
|
};
|
||||||
|
} catch (apiError) {
|
||||||
|
const executionTime = Date.now() - startTime;
|
||||||
|
if (!silent)
|
||||||
|
console.error(
|
||||||
|
`❌ Failed to send metrics to API after ${executionTime}ms:`,
|
||||||
|
apiError.message,
|
||||||
|
);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
reason: "api_error",
|
||||||
|
error: apiError.message,
|
||||||
|
executionTime,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
const executionTime = Date.now() - startTime;
|
||||||
|
if (!silent)
|
||||||
|
console.error(
|
||||||
|
`❌ Error in metrics reporting after ${executionTime}ms:`,
|
||||||
|
error.message,
|
||||||
|
);
|
||||||
|
// Don't throw on silent mode, just return failure
|
||||||
|
if (silent) {
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
reason: "error",
|
||||||
|
error: error.message,
|
||||||
|
executionTime,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Schedule recurring metrics reporting (daily at 2 AM)
|
||||||
|
*/
|
||||||
|
async schedule() {
|
||||||
|
const job = await this.queueManager.queues[this.queueName].add(
|
||||||
|
"metrics-reporting",
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
repeat: { cron: "0 2 * * *" }, // Daily at 2 AM
|
||||||
|
jobId: "metrics-reporting-recurring",
|
||||||
|
},
|
||||||
|
);
|
||||||
|
console.log("✅ Metrics reporting scheduled (daily at 2 AM)");
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Trigger manual metrics reporting
|
||||||
|
*/
|
||||||
|
async triggerManual() {
|
||||||
|
const job = await this.queueManager.queues[this.queueName].add(
|
||||||
|
"metrics-reporting-manual",
|
||||||
|
{},
|
||||||
|
{ priority: 1 },
|
||||||
|
);
|
||||||
|
console.log("✅ Manual metrics reporting triggered");
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Send metrics immediately (silent mode)
|
||||||
|
* Used for automatic sending on server startup
|
||||||
|
*/
|
||||||
|
async sendSilent() {
|
||||||
|
try {
|
||||||
|
const result = await this.process({ name: "startup-silent" }, true);
|
||||||
|
return result;
|
||||||
|
} catch (error) {
|
||||||
|
// Silent failure on startup
|
||||||
|
return { success: false, reason: "error", error: error.message };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = MetricsReporting;
|
||||||
116
backend/src/services/automation/orphanedPackageCleanup.js
Normal file
116
backend/src/services/automation/orphanedPackageCleanup.js
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
const { prisma } = require("./shared/prisma");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Orphaned Package Cleanup Automation
|
||||||
|
* Removes packages with no associated hosts
|
||||||
|
*/
|
||||||
|
class OrphanedPackageCleanup {
|
||||||
|
constructor(queueManager) {
|
||||||
|
this.queueManager = queueManager;
|
||||||
|
this.queueName = "orphaned-package-cleanup";
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process orphaned package cleanup job
|
||||||
|
*/
|
||||||
|
async process(_job) {
|
||||||
|
const startTime = Date.now();
|
||||||
|
console.log("🧹 Starting orphaned package cleanup...");
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Find packages with 0 hosts
|
||||||
|
const orphanedPackages = await prisma.packages.findMany({
|
||||||
|
where: {
|
||||||
|
host_packages: {
|
||||||
|
none: {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
include: {
|
||||||
|
_count: {
|
||||||
|
select: {
|
||||||
|
host_packages: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
let deletedCount = 0;
|
||||||
|
const deletedPackages = [];
|
||||||
|
|
||||||
|
// Delete orphaned packages
|
||||||
|
for (const pkg of orphanedPackages) {
|
||||||
|
try {
|
||||||
|
await prisma.packages.delete({
|
||||||
|
where: { id: pkg.id },
|
||||||
|
});
|
||||||
|
deletedCount++;
|
||||||
|
deletedPackages.push({
|
||||||
|
id: pkg.id,
|
||||||
|
name: pkg.name,
|
||||||
|
description: pkg.description,
|
||||||
|
category: pkg.category,
|
||||||
|
latest_version: pkg.latest_version,
|
||||||
|
});
|
||||||
|
console.log(
|
||||||
|
`🗑️ Deleted orphaned package: ${pkg.name} (${pkg.latest_version})`,
|
||||||
|
);
|
||||||
|
} catch (deleteError) {
|
||||||
|
console.error(
|
||||||
|
`❌ Failed to delete package ${pkg.id}:`,
|
||||||
|
deleteError.message,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const executionTime = Date.now() - startTime;
|
||||||
|
console.log(
|
||||||
|
`✅ Orphaned package cleanup completed in ${executionTime}ms - Deleted ${deletedCount} packages`,
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
deletedCount,
|
||||||
|
deletedPackages,
|
||||||
|
executionTime,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
const executionTime = Date.now() - startTime;
|
||||||
|
console.error(
|
||||||
|
`❌ Orphaned package cleanup failed after ${executionTime}ms:`,
|
||||||
|
error.message,
|
||||||
|
);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Schedule recurring orphaned package cleanup (daily at 3 AM)
|
||||||
|
*/
|
||||||
|
async schedule() {
|
||||||
|
const job = await this.queueManager.queues[this.queueName].add(
|
||||||
|
"orphaned-package-cleanup",
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
repeat: { cron: "0 3 * * *" }, // Daily at 3 AM
|
||||||
|
jobId: "orphaned-package-cleanup-recurring",
|
||||||
|
},
|
||||||
|
);
|
||||||
|
console.log("✅ Orphaned package cleanup scheduled");
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Trigger manual orphaned package cleanup
|
||||||
|
*/
|
||||||
|
async triggerManual() {
|
||||||
|
const job = await this.queueManager.queues[this.queueName].add(
|
||||||
|
"orphaned-package-cleanup-manual",
|
||||||
|
{},
|
||||||
|
{ priority: 1 },
|
||||||
|
);
|
||||||
|
console.log("✅ Manual orphaned package cleanup triggered");
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = OrphanedPackageCleanup;
|
||||||
114
backend/src/services/automation/orphanedRepoCleanup.js
Normal file
114
backend/src/services/automation/orphanedRepoCleanup.js
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
const { prisma } = require("./shared/prisma");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Orphaned Repository Cleanup Automation
|
||||||
|
* Removes repositories with no associated hosts
|
||||||
|
*/
|
||||||
|
class OrphanedRepoCleanup {
|
||||||
|
constructor(queueManager) {
|
||||||
|
this.queueManager = queueManager;
|
||||||
|
this.queueName = "orphaned-repo-cleanup";
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process orphaned repository cleanup job
|
||||||
|
*/
|
||||||
|
async process(_job) {
|
||||||
|
const startTime = Date.now();
|
||||||
|
console.log("🧹 Starting orphaned repository cleanup...");
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Find repositories with 0 hosts
|
||||||
|
const orphanedRepos = await prisma.repositories.findMany({
|
||||||
|
where: {
|
||||||
|
host_repositories: {
|
||||||
|
none: {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
include: {
|
||||||
|
_count: {
|
||||||
|
select: {
|
||||||
|
host_repositories: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
let deletedCount = 0;
|
||||||
|
const deletedRepos = [];
|
||||||
|
|
||||||
|
// Delete orphaned repositories
|
||||||
|
for (const repo of orphanedRepos) {
|
||||||
|
try {
|
||||||
|
await prisma.repositories.delete({
|
||||||
|
where: { id: repo.id },
|
||||||
|
});
|
||||||
|
deletedCount++;
|
||||||
|
deletedRepos.push({
|
||||||
|
id: repo.id,
|
||||||
|
name: repo.name,
|
||||||
|
url: repo.url,
|
||||||
|
});
|
||||||
|
console.log(
|
||||||
|
`🗑️ Deleted orphaned repository: ${repo.name} (${repo.url})`,
|
||||||
|
);
|
||||||
|
} catch (deleteError) {
|
||||||
|
console.error(
|
||||||
|
`❌ Failed to delete repository ${repo.id}:`,
|
||||||
|
deleteError.message,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const executionTime = Date.now() - startTime;
|
||||||
|
console.log(
|
||||||
|
`✅ Orphaned repository cleanup completed in ${executionTime}ms - Deleted ${deletedCount} repositories`,
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
deletedCount,
|
||||||
|
deletedRepos,
|
||||||
|
executionTime,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
const executionTime = Date.now() - startTime;
|
||||||
|
console.error(
|
||||||
|
`❌ Orphaned repository cleanup failed after ${executionTime}ms:`,
|
||||||
|
error.message,
|
||||||
|
);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Schedule recurring orphaned repository cleanup (daily at 2 AM)
|
||||||
|
*/
|
||||||
|
async schedule() {
|
||||||
|
const job = await this.queueManager.queues[this.queueName].add(
|
||||||
|
"orphaned-repo-cleanup",
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
repeat: { cron: "0 2 * * *" }, // Daily at 2 AM
|
||||||
|
jobId: "orphaned-repo-cleanup-recurring",
|
||||||
|
},
|
||||||
|
);
|
||||||
|
console.log("✅ Orphaned repository cleanup scheduled");
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Trigger manual orphaned repository cleanup
|
||||||
|
*/
|
||||||
|
async triggerManual() {
|
||||||
|
const job = await this.queueManager.queues[this.queueName].add(
|
||||||
|
"orphaned-repo-cleanup-manual",
|
||||||
|
{},
|
||||||
|
{ priority: 1 },
|
||||||
|
);
|
||||||
|
console.log("✅ Manual orphaned repository cleanup triggered");
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = OrphanedRepoCleanup;
|
||||||
77
backend/src/services/automation/sessionCleanup.js
Normal file
77
backend/src/services/automation/sessionCleanup.js
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
const { prisma } = require("./shared/prisma");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Session Cleanup Automation
|
||||||
|
* Cleans up expired user sessions
|
||||||
|
*/
|
||||||
|
class SessionCleanup {
|
||||||
|
constructor(queueManager) {
|
||||||
|
this.queueManager = queueManager;
|
||||||
|
this.queueName = "session-cleanup";
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process session cleanup job
|
||||||
|
*/
|
||||||
|
async process(_job) {
|
||||||
|
const startTime = Date.now();
|
||||||
|
console.log("🧹 Starting session cleanup...");
|
||||||
|
|
||||||
|
try {
|
||||||
|
const result = await prisma.user_sessions.deleteMany({
|
||||||
|
where: {
|
||||||
|
OR: [{ expires_at: { lt: new Date() } }, { is_revoked: true }],
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const executionTime = Date.now() - startTime;
|
||||||
|
console.log(
|
||||||
|
`✅ Session cleanup completed in ${executionTime}ms - Cleaned up ${result.count} expired sessions`,
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
sessionsCleaned: result.count,
|
||||||
|
executionTime,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
const executionTime = Date.now() - startTime;
|
||||||
|
console.error(
|
||||||
|
`❌ Session cleanup failed after ${executionTime}ms:`,
|
||||||
|
error.message,
|
||||||
|
);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Schedule recurring session cleanup (every hour)
|
||||||
|
*/
|
||||||
|
async schedule() {
|
||||||
|
const job = await this.queueManager.queues[this.queueName].add(
|
||||||
|
"session-cleanup",
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
repeat: { cron: "0 * * * *" }, // Every hour
|
||||||
|
jobId: "session-cleanup-recurring",
|
||||||
|
},
|
||||||
|
);
|
||||||
|
console.log("✅ Session cleanup scheduled");
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Trigger manual session cleanup
|
||||||
|
*/
|
||||||
|
async triggerManual() {
|
||||||
|
const job = await this.queueManager.queues[this.queueName].add(
|
||||||
|
"session-cleanup-manual",
|
||||||
|
{},
|
||||||
|
{ priority: 1 },
|
||||||
|
);
|
||||||
|
console.log("✅ Manual session cleanup triggered");
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = SessionCleanup;
|
||||||
5
backend/src/services/automation/shared/prisma.js
Normal file
5
backend/src/services/automation/shared/prisma.js
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
const { getPrismaClient } = require("../../../config/prisma");
|
||||||
|
|
||||||
|
const prisma = getPrismaClient();
|
||||||
|
|
||||||
|
module.exports = { prisma };
|
||||||
56
backend/src/services/automation/shared/redis.js
Normal file
56
backend/src/services/automation/shared/redis.js
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
const IORedis = require("ioredis");
|
||||||
|
|
||||||
|
// Redis connection configuration with connection pooling
|
||||||
|
const redisConnection = {
|
||||||
|
host: process.env.REDIS_HOST || "localhost",
|
||||||
|
port: parseInt(process.env.REDIS_PORT, 10) || 6379,
|
||||||
|
password: process.env.REDIS_PASSWORD || undefined,
|
||||||
|
username: process.env.REDIS_USER || undefined,
|
||||||
|
db: parseInt(process.env.REDIS_DB, 10) || 0,
|
||||||
|
// Connection pooling settings
|
||||||
|
lazyConnect: true,
|
||||||
|
keepAlive: 30000,
|
||||||
|
connectTimeout: 30000, // Increased from 10s to 30s
|
||||||
|
commandTimeout: 30000, // Increased from 5s to 30s
|
||||||
|
enableReadyCheck: false,
|
||||||
|
// Reduce connection churn
|
||||||
|
family: 4, // Force IPv4
|
||||||
|
// Retry settings
|
||||||
|
retryDelayOnClusterDown: 300,
|
||||||
|
retryDelayOnFailover: 100,
|
||||||
|
maxRetriesPerRequest: null, // BullMQ requires this to be null
|
||||||
|
// Connection pool settings
|
||||||
|
maxLoadingTimeout: 30000,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create Redis connection with singleton pattern
|
||||||
|
let redisInstance = null;
|
||||||
|
|
||||||
|
function getRedisConnection() {
|
||||||
|
if (!redisInstance) {
|
||||||
|
redisInstance = new IORedis(redisConnection);
|
||||||
|
|
||||||
|
// Handle graceful shutdown
|
||||||
|
process.on("beforeExit", async () => {
|
||||||
|
await redisInstance.quit();
|
||||||
|
});
|
||||||
|
|
||||||
|
process.on("SIGINT", async () => {
|
||||||
|
await redisInstance.quit();
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
process.on("SIGTERM", async () => {
|
||||||
|
await redisInstance.quit();
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return redisInstance;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
redis: getRedisConnection(),
|
||||||
|
redisConnection,
|
||||||
|
getRedisConnection,
|
||||||
|
};
|
||||||
83
backend/src/services/automation/shared/utils.js
Normal file
83
backend/src/services/automation/shared/utils.js
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
// Common utilities for automation jobs
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compare two semantic versions
|
||||||
|
* @param {string} version1 - First version
|
||||||
|
* @param {string} version2 - Second version
|
||||||
|
* @returns {number} - 1 if version1 > version2, -1 if version1 < version2, 0 if equal
|
||||||
|
*/
|
||||||
|
function compareVersions(version1, version2) {
|
||||||
|
const v1parts = version1.split(".").map(Number);
|
||||||
|
const v2parts = version2.split(".").map(Number);
|
||||||
|
|
||||||
|
const maxLength = Math.max(v1parts.length, v2parts.length);
|
||||||
|
|
||||||
|
for (let i = 0; i < maxLength; i++) {
|
||||||
|
const v1part = v1parts[i] || 0;
|
||||||
|
const v2part = v2parts[i] || 0;
|
||||||
|
|
||||||
|
if (v1part > v2part) return 1;
|
||||||
|
if (v1part < v2part) return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check public GitHub repository for latest release
|
||||||
|
* @param {string} owner - Repository owner
|
||||||
|
* @param {string} repo - Repository name
|
||||||
|
* @returns {Promise<string|null>} - Latest version or null
|
||||||
|
*/
|
||||||
|
async function checkPublicRepo(owner, repo) {
|
||||||
|
try {
|
||||||
|
const httpsRepoUrl = `https://api.github.com/repos/${owner}/${repo}/releases/latest`;
|
||||||
|
|
||||||
|
// Get current version for User-Agent (or use generic if unavailable)
|
||||||
|
let currentVersion = "unknown";
|
||||||
|
try {
|
||||||
|
const packageJson = require("../../../package.json");
|
||||||
|
if (packageJson?.version) {
|
||||||
|
currentVersion = packageJson.version;
|
||||||
|
}
|
||||||
|
} catch (packageError) {
|
||||||
|
console.warn(
|
||||||
|
"Could not read version from package.json for User-Agent:",
|
||||||
|
packageError.message,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const response = await fetch(httpsRepoUrl, {
|
||||||
|
method: "GET",
|
||||||
|
headers: {
|
||||||
|
Accept: "application/vnd.github.v3+json",
|
||||||
|
"User-Agent": `PatchMon-Server/${currentVersion}`,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const errorText = await response.text();
|
||||||
|
if (
|
||||||
|
errorText.includes("rate limit") ||
|
||||||
|
errorText.includes("API rate limit")
|
||||||
|
) {
|
||||||
|
console.log("⚠️ GitHub API rate limit exceeded, skipping update check");
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
throw new Error(
|
||||||
|
`GitHub API error: ${response.status} ${response.statusText}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const releaseData = await response.json();
|
||||||
|
return releaseData.tag_name.replace("v", "");
|
||||||
|
} catch (error) {
|
||||||
|
console.error("GitHub API error:", error.message);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
compareVersions,
|
||||||
|
checkPublicRepo,
|
||||||
|
};
|
||||||
140
backend/src/services/automation/systemStatistics.js
Normal file
140
backend/src/services/automation/systemStatistics.js
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
const { prisma } = require("./shared/prisma");
|
||||||
|
const { v4: uuidv4 } = require("uuid");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* System Statistics Collection Automation
|
||||||
|
* Collects aggregated system-wide statistics every 30 minutes
|
||||||
|
* for use in package trends charts
|
||||||
|
*/
|
||||||
|
class SystemStatistics {
|
||||||
|
constructor(queueManager) {
|
||||||
|
this.queueManager = queueManager;
|
||||||
|
this.queueName = "system-statistics";
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process system statistics collection job
|
||||||
|
*/
|
||||||
|
async process(_job) {
|
||||||
|
const startTime = Date.now();
|
||||||
|
console.log("📊 Starting system statistics collection...");
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Calculate unique package counts across all hosts
|
||||||
|
const uniquePackagesCount = await prisma.packages.count({
|
||||||
|
where: {
|
||||||
|
host_packages: {
|
||||||
|
some: {
|
||||||
|
needs_update: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const uniqueSecurityCount = await prisma.packages.count({
|
||||||
|
where: {
|
||||||
|
host_packages: {
|
||||||
|
some: {
|
||||||
|
needs_update: true,
|
||||||
|
is_security_update: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Calculate total unique packages installed on at least one host
|
||||||
|
const totalPackages = await prisma.packages.count({
|
||||||
|
where: {
|
||||||
|
host_packages: {
|
||||||
|
some: {}, // At least one host has this package
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Calculate total hosts
|
||||||
|
const totalHosts = await prisma.hosts.count({
|
||||||
|
where: {
|
||||||
|
status: "active",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Calculate hosts needing updates (distinct hosts with packages needing updates)
|
||||||
|
const hostsNeedingUpdates = await prisma.hosts.count({
|
||||||
|
where: {
|
||||||
|
status: "active",
|
||||||
|
host_packages: {
|
||||||
|
some: {
|
||||||
|
needs_update: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Store statistics in database
|
||||||
|
await prisma.system_statistics.create({
|
||||||
|
data: {
|
||||||
|
id: uuidv4(),
|
||||||
|
unique_packages_count: uniquePackagesCount,
|
||||||
|
unique_security_count: uniqueSecurityCount,
|
||||||
|
total_packages: totalPackages,
|
||||||
|
total_hosts: totalHosts,
|
||||||
|
hosts_needing_updates: hostsNeedingUpdates,
|
||||||
|
timestamp: new Date(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const executionTime = Date.now() - startTime;
|
||||||
|
console.log(
|
||||||
|
`✅ System statistics collection completed in ${executionTime}ms - Unique packages: ${uniquePackagesCount}, Security: ${uniqueSecurityCount}, Total hosts: ${totalHosts}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
uniquePackagesCount,
|
||||||
|
uniqueSecurityCount,
|
||||||
|
totalPackages,
|
||||||
|
totalHosts,
|
||||||
|
hostsNeedingUpdates,
|
||||||
|
executionTime,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
const executionTime = Date.now() - startTime;
|
||||||
|
console.error(
|
||||||
|
`❌ System statistics collection failed after ${executionTime}ms:`,
|
||||||
|
error.message,
|
||||||
|
);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Schedule recurring system statistics collection (every 30 minutes)
|
||||||
|
*/
|
||||||
|
async schedule() {
|
||||||
|
const job = await this.queueManager.queues[this.queueName].add(
|
||||||
|
"system-statistics",
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
repeat: { pattern: "*/30 * * * *" }, // Every 30 minutes
|
||||||
|
jobId: "system-statistics-recurring",
|
||||||
|
},
|
||||||
|
);
|
||||||
|
console.log("✅ System statistics collection scheduled (every 30 minutes)");
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Trigger manual system statistics collection
|
||||||
|
*/
|
||||||
|
async triggerManual() {
|
||||||
|
const job = await this.queueManager.queues[this.queueName].add(
|
||||||
|
"system-statistics-manual",
|
||||||
|
{},
|
||||||
|
{ priority: 1 },
|
||||||
|
);
|
||||||
|
console.log("✅ Manual system statistics collection triggered");
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = SystemStatistics;
|
||||||
198
backend/src/services/settingsService.js
Normal file
198
backend/src/services/settingsService.js
Normal file
@@ -0,0 +1,198 @@
|
|||||||
|
const { getPrismaClient } = require("../config/prisma");
|
||||||
|
const { v4: uuidv4 } = require("uuid");
|
||||||
|
|
||||||
|
const prisma = getPrismaClient();
|
||||||
|
|
||||||
|
// Cached settings instance
|
||||||
|
let cachedSettings = null;
|
||||||
|
|
||||||
|
// Environment variable to settings field mapping
|
||||||
|
const ENV_TO_SETTINGS_MAP = {
|
||||||
|
SERVER_PROTOCOL: "server_protocol",
|
||||||
|
SERVER_HOST: "server_host",
|
||||||
|
SERVER_PORT: "server_port",
|
||||||
|
};
|
||||||
|
|
||||||
|
// Helper function to construct server URL without default ports
|
||||||
|
function constructServerUrl(protocol, host, port) {
|
||||||
|
const isHttps = protocol.toLowerCase() === "https";
|
||||||
|
const isHttp = protocol.toLowerCase() === "http";
|
||||||
|
|
||||||
|
// Don't append port if it's the default port for the protocol
|
||||||
|
if ((isHttps && port === 443) || (isHttp && port === 80)) {
|
||||||
|
return `${protocol}://${host}`.toLowerCase();
|
||||||
|
}
|
||||||
|
|
||||||
|
return `${protocol}://${host}:${port}`.toLowerCase();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create settings from environment variables and/or defaults
|
||||||
|
async function createSettingsFromEnvironment() {
|
||||||
|
const protocol = process.env.SERVER_PROTOCOL || "http";
|
||||||
|
const host = process.env.SERVER_HOST || "localhost";
|
||||||
|
const port = parseInt(process.env.SERVER_PORT, 10) || 3001;
|
||||||
|
const serverUrl = constructServerUrl(protocol, host, port);
|
||||||
|
|
||||||
|
const settings = await prisma.settings.create({
|
||||||
|
data: {
|
||||||
|
id: uuidv4(),
|
||||||
|
server_url: serverUrl,
|
||||||
|
server_protocol: protocol,
|
||||||
|
server_host: host,
|
||||||
|
server_port: port,
|
||||||
|
update_interval: 60,
|
||||||
|
auto_update: false,
|
||||||
|
signup_enabled: false,
|
||||||
|
ignore_ssl_self_signed: false,
|
||||||
|
updated_at: new Date(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log("Created settings");
|
||||||
|
return settings;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync environment variables with existing settings
|
||||||
|
async function syncEnvironmentToSettings(currentSettings) {
|
||||||
|
const updates = {};
|
||||||
|
let hasChanges = false;
|
||||||
|
|
||||||
|
// Check each environment variable mapping
|
||||||
|
for (const [envVar, settingsField] of Object.entries(ENV_TO_SETTINGS_MAP)) {
|
||||||
|
if (process.env[envVar]) {
|
||||||
|
const envValue = process.env[envVar];
|
||||||
|
const currentValue = currentSettings[settingsField];
|
||||||
|
|
||||||
|
// Convert environment value to appropriate type
|
||||||
|
let convertedValue = envValue;
|
||||||
|
if (settingsField === "server_port") {
|
||||||
|
convertedValue = parseInt(envValue, 10);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only update if values differ
|
||||||
|
if (currentValue !== convertedValue) {
|
||||||
|
updates[settingsField] = convertedValue;
|
||||||
|
hasChanges = true;
|
||||||
|
if (process.env.ENABLE_LOGGING === "true") {
|
||||||
|
console.log(
|
||||||
|
`Environment variable ${envVar} (${envValue}) differs from settings ${settingsField} (${currentValue}), updating...`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct server_url from components if any components were updated
|
||||||
|
const protocol = updates.server_protocol || currentSettings.server_protocol;
|
||||||
|
const host = updates.server_host || currentSettings.server_host;
|
||||||
|
const port = updates.server_port || currentSettings.server_port;
|
||||||
|
const constructedServerUrl = constructServerUrl(protocol, host, port);
|
||||||
|
|
||||||
|
// Update server_url if it differs from the constructed value
|
||||||
|
if (currentSettings.server_url !== constructedServerUrl) {
|
||||||
|
updates.server_url = constructedServerUrl;
|
||||||
|
hasChanges = true;
|
||||||
|
if (process.env.ENABLE_LOGGING === "true") {
|
||||||
|
console.log(`Updating server_url to: ${constructedServerUrl}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update settings if there are changes
|
||||||
|
if (hasChanges) {
|
||||||
|
const updatedSettings = await prisma.settings.update({
|
||||||
|
where: { id: currentSettings.id },
|
||||||
|
data: {
|
||||||
|
...updates,
|
||||||
|
updated_at: new Date(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
if (process.env.ENABLE_LOGGING === "true") {
|
||||||
|
console.log(
|
||||||
|
`Synced ${Object.keys(updates).length} environment variables to settings`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return updatedSettings;
|
||||||
|
}
|
||||||
|
|
||||||
|
return currentSettings;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialise settings - create from environment or sync existing
|
||||||
|
async function initSettings() {
|
||||||
|
if (cachedSettings) {
|
||||||
|
return cachedSettings;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
let settings = await prisma.settings.findFirst({
|
||||||
|
orderBy: { updated_at: "desc" },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!settings) {
|
||||||
|
// No settings exist, create from environment variables and defaults
|
||||||
|
settings = await createSettingsFromEnvironment();
|
||||||
|
} else {
|
||||||
|
// Settings exist, sync with environment variables
|
||||||
|
settings = await syncEnvironmentToSettings(settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cache the initialised settings
|
||||||
|
cachedSettings = settings;
|
||||||
|
return settings;
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Failed to initialise settings:", error);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get current settings (returns cached if available)
|
||||||
|
async function getSettings() {
|
||||||
|
return cachedSettings || (await initSettings());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update settings and refresh cache
|
||||||
|
async function updateSettings(id, updateData) {
|
||||||
|
try {
|
||||||
|
const updatedSettings = await prisma.settings.update({
|
||||||
|
where: { id },
|
||||||
|
data: {
|
||||||
|
...updateData,
|
||||||
|
updated_at: new Date(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Reconstruct server_url from components
|
||||||
|
const serverUrl = constructServerUrl(
|
||||||
|
updatedSettings.server_protocol,
|
||||||
|
updatedSettings.server_host,
|
||||||
|
updatedSettings.server_port,
|
||||||
|
);
|
||||||
|
if (updatedSettings.server_url !== serverUrl) {
|
||||||
|
updatedSettings.server_url = serverUrl;
|
||||||
|
await prisma.settings.update({
|
||||||
|
where: { id },
|
||||||
|
data: { server_url: serverUrl },
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update cache
|
||||||
|
cachedSettings = updatedSettings;
|
||||||
|
return updatedSettings;
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Failed to update settings:", error);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Invalidate cache (useful for testing or manual refresh)
|
||||||
|
function invalidateCache() {
|
||||||
|
cachedSettings = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
initSettings,
|
||||||
|
getSettings,
|
||||||
|
updateSettings,
|
||||||
|
invalidateCache,
|
||||||
|
syncEnvironmentToSettings, // Export for startup use
|
||||||
|
};
|
||||||
179
backend/src/utils/docker.js
Normal file
179
backend/src/utils/docker.js
Normal file
@@ -0,0 +1,179 @@
|
|||||||
|
/**
|
||||||
|
* Docker-related utility functions
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate a registry link for a Docker image based on its repository and source
|
||||||
|
* Inspired by diun's registry link generation
|
||||||
|
* @param {string} repository - The full repository name (e.g., "ghcr.io/owner/repo")
|
||||||
|
* @param {string} source - The detected source (github, gitlab, docker-hub, etc.)
|
||||||
|
* @returns {string|null} - The URL to the registry page, or null if unknown
|
||||||
|
*/
|
||||||
|
function generateRegistryLink(repository, source) {
|
||||||
|
if (!repository) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the domain and path from the repository
|
||||||
|
const parts = repository.split("/");
|
||||||
|
let domain = "";
|
||||||
|
let path = "";
|
||||||
|
|
||||||
|
// Check if repository has a domain (contains a dot)
|
||||||
|
if (parts[0].includes(".") || parts[0].includes(":")) {
|
||||||
|
domain = parts[0];
|
||||||
|
path = parts.slice(1).join("/");
|
||||||
|
} else {
|
||||||
|
// No domain means Docker Hub
|
||||||
|
domain = "docker.io";
|
||||||
|
path = repository;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (source) {
|
||||||
|
case "docker-hub":
|
||||||
|
case "docker.io": {
|
||||||
|
// Docker Hub: https://hub.docker.com/r/{path} or https://hub.docker.com/_/{path} for official images
|
||||||
|
// Official images are those without a namespace (e.g., "postgres" not "user/postgres")
|
||||||
|
// or explicitly prefixed with "library/"
|
||||||
|
if (path.startsWith("library/")) {
|
||||||
|
const cleanPath = path.replace("library/", "");
|
||||||
|
return `https://hub.docker.com/_/${cleanPath}`;
|
||||||
|
}
|
||||||
|
// Check if it's an official image (single part, no slash after removing library/)
|
||||||
|
if (!path.includes("/")) {
|
||||||
|
return `https://hub.docker.com/_/${path}`;
|
||||||
|
}
|
||||||
|
// Regular user/org image
|
||||||
|
return `https://hub.docker.com/r/${path}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
case "github":
|
||||||
|
case "ghcr.io": {
|
||||||
|
// GitHub Container Registry
|
||||||
|
// Format: ghcr.io/{owner}/{package} or ghcr.io/{owner}/{repo}/{package}
|
||||||
|
// URL format: https://github.com/{owner}/{repo}/pkgs/container/{package}
|
||||||
|
if (domain === "ghcr.io" && path) {
|
||||||
|
const pathParts = path.split("/");
|
||||||
|
if (pathParts.length === 2) {
|
||||||
|
// Simple case: ghcr.io/owner/package -> github.com/owner/owner/pkgs/container/package
|
||||||
|
// OR: ghcr.io/owner/repo -> github.com/owner/repo/pkgs/container/{package}
|
||||||
|
// Actually, for 2 parts it's owner/package, and repo is same as owner typically
|
||||||
|
const owner = pathParts[0];
|
||||||
|
const packageName = pathParts[1];
|
||||||
|
return `https://github.com/${owner}/${owner}/pkgs/container/${packageName}`;
|
||||||
|
} else if (pathParts.length >= 3) {
|
||||||
|
// Extended case: ghcr.io/owner/repo/package -> github.com/owner/repo/pkgs/container/package
|
||||||
|
const owner = pathParts[0];
|
||||||
|
const repo = pathParts[1];
|
||||||
|
const packageName = pathParts.slice(2).join("/");
|
||||||
|
return `https://github.com/${owner}/${repo}/pkgs/container/${packageName}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Legacy GitHub Packages
|
||||||
|
if (domain === "docker.pkg.github.com" && path) {
|
||||||
|
const pathParts = path.split("/");
|
||||||
|
if (pathParts.length >= 1) {
|
||||||
|
return `https://github.com/${pathParts[0]}/packages`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
case "gitlab":
|
||||||
|
case "registry.gitlab.com": {
|
||||||
|
// GitLab Container Registry: https://gitlab.com/{path}/container_registry
|
||||||
|
if (path) {
|
||||||
|
return `https://gitlab.com/${path}/container_registry`;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
case "google":
|
||||||
|
case "gcr.io": {
|
||||||
|
// Google Container Registry: https://gcr.io/{path}
|
||||||
|
if (domain.includes("gcr.io") || domain.includes("pkg.dev")) {
|
||||||
|
return `https://console.cloud.google.com/gcr/images/${path}`;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
case "quay":
|
||||||
|
case "quay.io": {
|
||||||
|
// Quay.io: https://quay.io/repository/{path}
|
||||||
|
if (path) {
|
||||||
|
return `https://quay.io/repository/${path}`;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
case "redhat":
|
||||||
|
case "registry.access.redhat.com": {
|
||||||
|
// Red Hat: https://access.redhat.com/containers/#/registry.access.redhat.com/{path}
|
||||||
|
if (path) {
|
||||||
|
return `https://access.redhat.com/containers/#/registry.access.redhat.com/${path}`;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
case "azure":
|
||||||
|
case "azurecr.io": {
|
||||||
|
// Azure Container Registry - link to portal
|
||||||
|
// Format: {registry}.azurecr.io/{repository}
|
||||||
|
if (domain.includes("azurecr.io")) {
|
||||||
|
const registryName = domain.split(".")[0];
|
||||||
|
return `https://portal.azure.com/#view/Microsoft_Azure_ContainerRegistries/RepositoryBlade/registryName/${registryName}/repositoryName/${path}`;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
case "aws":
|
||||||
|
case "amazonaws.com": {
|
||||||
|
// AWS ECR - link to console
|
||||||
|
// Format: {account}.dkr.ecr.{region}.amazonaws.com/{repository}
|
||||||
|
if (domain.includes("amazonaws.com")) {
|
||||||
|
const domainParts = domain.split(".");
|
||||||
|
const region = domainParts[3]; // Extract region
|
||||||
|
return `https://${region}.console.aws.amazon.com/ecr/repositories/private/${path}`;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
case "private":
|
||||||
|
// For private registries, try to construct a basic URL
|
||||||
|
if (domain) {
|
||||||
|
return `https://${domain}`;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
|
||||||
|
default:
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a user-friendly display name for a registry source
|
||||||
|
* @param {string} source - The source identifier
|
||||||
|
* @returns {string} - Human-readable source name
|
||||||
|
*/
|
||||||
|
function getSourceDisplayName(source) {
|
||||||
|
const sourceNames = {
|
||||||
|
"docker-hub": "Docker Hub",
|
||||||
|
github: "GitHub",
|
||||||
|
gitlab: "GitLab",
|
||||||
|
google: "Google",
|
||||||
|
quay: "Quay.io",
|
||||||
|
redhat: "Red Hat",
|
||||||
|
azure: "Azure",
|
||||||
|
aws: "AWS ECR",
|
||||||
|
private: "Private Registry",
|
||||||
|
local: "Local",
|
||||||
|
unknown: "Unknown",
|
||||||
|
};
|
||||||
|
|
||||||
|
return sourceNames[source] || source;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
generateRegistryLink,
|
||||||
|
getSourceDisplayName,
|
||||||
|
};
|
||||||
498
backend/src/utils/session_manager.js
Normal file
498
backend/src/utils/session_manager.js
Normal file
@@ -0,0 +1,498 @@
|
|||||||
|
const jwt = require("jsonwebtoken");
|
||||||
|
const crypto = require("node:crypto");
|
||||||
|
const { getPrismaClient } = require("../config/prisma");
|
||||||
|
|
||||||
|
const prisma = getPrismaClient();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Session Manager - Handles secure session management with inactivity timeout
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Configuration
|
||||||
|
if (!process.env.JWT_SECRET) {
|
||||||
|
throw new Error("JWT_SECRET environment variable is required");
|
||||||
|
}
|
||||||
|
const JWT_SECRET = process.env.JWT_SECRET;
|
||||||
|
const JWT_EXPIRES_IN = process.env.JWT_EXPIRES_IN || "1h";
|
||||||
|
const JWT_REFRESH_EXPIRES_IN = process.env.JWT_REFRESH_EXPIRES_IN || "7d";
|
||||||
|
const TFA_REMEMBER_ME_EXPIRES_IN =
|
||||||
|
process.env.TFA_REMEMBER_ME_EXPIRES_IN || "30d";
|
||||||
|
const TFA_MAX_REMEMBER_SESSIONS = parseInt(
|
||||||
|
process.env.TFA_MAX_REMEMBER_SESSIONS || "5",
|
||||||
|
10,
|
||||||
|
);
|
||||||
|
const TFA_SUSPICIOUS_ACTIVITY_THRESHOLD = parseInt(
|
||||||
|
process.env.TFA_SUSPICIOUS_ACTIVITY_THRESHOLD || "3",
|
||||||
|
10,
|
||||||
|
);
|
||||||
|
const INACTIVITY_TIMEOUT_MINUTES = parseInt(
|
||||||
|
process.env.SESSION_INACTIVITY_TIMEOUT_MINUTES || "30",
|
||||||
|
10,
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate access token (short-lived)
|
||||||
|
*/
|
||||||
|
function generate_access_token(user_id, session_id) {
|
||||||
|
return jwt.sign({ userId: user_id, sessionId: session_id }, JWT_SECRET, {
|
||||||
|
expiresIn: JWT_EXPIRES_IN,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate refresh token (long-lived)
|
||||||
|
*/
|
||||||
|
function generate_refresh_token() {
|
||||||
|
return crypto.randomBytes(64).toString("hex");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Hash token for storage
|
||||||
|
*/
|
||||||
|
function hash_token(token) {
|
||||||
|
return crypto.createHash("sha256").update(token).digest("hex");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse expiration string to Date
|
||||||
|
*/
|
||||||
|
function parse_expiration(expiration_string) {
|
||||||
|
const match = expiration_string.match(/^(\d+)([smhd])$/);
|
||||||
|
if (!match) {
|
||||||
|
throw new Error("Invalid expiration format");
|
||||||
|
}
|
||||||
|
|
||||||
|
const value = parseInt(match[1], 10);
|
||||||
|
const unit = match[2];
|
||||||
|
|
||||||
|
const now = new Date();
|
||||||
|
switch (unit) {
|
||||||
|
case "s":
|
||||||
|
return new Date(now.getTime() + value * 1000);
|
||||||
|
case "m":
|
||||||
|
return new Date(now.getTime() + value * 60 * 1000);
|
||||||
|
case "h":
|
||||||
|
return new Date(now.getTime() + value * 60 * 60 * 1000);
|
||||||
|
case "d":
|
||||||
|
return new Date(now.getTime() + value * 24 * 60 * 60 * 1000);
|
||||||
|
default:
|
||||||
|
throw new Error("Invalid time unit");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate device fingerprint from request data
|
||||||
|
*/
|
||||||
|
function generate_device_fingerprint(req) {
|
||||||
|
// Use the X-Device-ID header from frontend (unique per browser profile/localStorage)
|
||||||
|
const deviceId = req.get("x-device-id");
|
||||||
|
|
||||||
|
if (deviceId) {
|
||||||
|
// Hash the device ID for consistent storage format
|
||||||
|
return crypto
|
||||||
|
.createHash("sha256")
|
||||||
|
.update(deviceId)
|
||||||
|
.digest("hex")
|
||||||
|
.substring(0, 32);
|
||||||
|
}
|
||||||
|
|
||||||
|
// No device ID - return null (user needs to provide device ID for remember-me)
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check for suspicious activity patterns
|
||||||
|
*/
|
||||||
|
async function check_suspicious_activity(
|
||||||
|
user_id,
|
||||||
|
_ip_address,
|
||||||
|
_device_fingerprint,
|
||||||
|
) {
|
||||||
|
try {
|
||||||
|
// Check for multiple sessions from different IPs in short time
|
||||||
|
const recent_sessions = await prisma.user_sessions.findMany({
|
||||||
|
where: {
|
||||||
|
user_id: user_id,
|
||||||
|
created_at: {
|
||||||
|
gte: new Date(Date.now() - 24 * 60 * 60 * 1000), // Last 24 hours
|
||||||
|
},
|
||||||
|
is_revoked: false,
|
||||||
|
},
|
||||||
|
select: {
|
||||||
|
ip_address: true,
|
||||||
|
device_fingerprint: true,
|
||||||
|
created_at: true,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Count unique IPs and devices
|
||||||
|
const unique_ips = new Set(recent_sessions.map((s) => s.ip_address));
|
||||||
|
const unique_devices = new Set(
|
||||||
|
recent_sessions.map((s) => s.device_fingerprint),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Flag as suspicious if more than threshold different IPs or devices in 24h
|
||||||
|
if (
|
||||||
|
unique_ips.size > TFA_SUSPICIOUS_ACTIVITY_THRESHOLD ||
|
||||||
|
unique_devices.size > TFA_SUSPICIOUS_ACTIVITY_THRESHOLD
|
||||||
|
) {
|
||||||
|
console.warn(
|
||||||
|
`Suspicious activity detected for user ${user_id}: ${unique_ips.size} IPs, ${unique_devices.size} devices`,
|
||||||
|
);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error checking suspicious activity:", error);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a new session for user
|
||||||
|
*/
|
||||||
|
async function create_session(
|
||||||
|
user_id,
|
||||||
|
ip_address,
|
||||||
|
user_agent,
|
||||||
|
remember_me = false,
|
||||||
|
req = null,
|
||||||
|
) {
|
||||||
|
try {
|
||||||
|
const session_id = crypto.randomUUID();
|
||||||
|
const refresh_token = generate_refresh_token();
|
||||||
|
const access_token = generate_access_token(user_id, session_id);
|
||||||
|
|
||||||
|
// Generate device fingerprint if request is available
|
||||||
|
const device_fingerprint = req ? generate_device_fingerprint(req) : null;
|
||||||
|
|
||||||
|
// Check for suspicious activity
|
||||||
|
if (device_fingerprint) {
|
||||||
|
const is_suspicious = await check_suspicious_activity(
|
||||||
|
user_id,
|
||||||
|
ip_address,
|
||||||
|
device_fingerprint,
|
||||||
|
);
|
||||||
|
if (is_suspicious) {
|
||||||
|
console.warn(
|
||||||
|
`Suspicious activity detected for user ${user_id}, session creation may be restricted`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check session limits for remember me
|
||||||
|
if (remember_me) {
|
||||||
|
const existing_remember_sessions = await prisma.user_sessions.count({
|
||||||
|
where: {
|
||||||
|
user_id: user_id,
|
||||||
|
tfa_remember_me: true,
|
||||||
|
is_revoked: false,
|
||||||
|
expires_at: { gt: new Date() },
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Limit remember me sessions per user
|
||||||
|
if (existing_remember_sessions >= TFA_MAX_REMEMBER_SESSIONS) {
|
||||||
|
throw new Error(
|
||||||
|
"Maximum number of remembered devices reached. Please revoke an existing session first.",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use longer expiration for remember me sessions
|
||||||
|
const expires_at = remember_me
|
||||||
|
? parse_expiration(TFA_REMEMBER_ME_EXPIRES_IN)
|
||||||
|
: parse_expiration(JWT_REFRESH_EXPIRES_IN);
|
||||||
|
|
||||||
|
// Calculate TFA bypass until date for remember me sessions
|
||||||
|
const tfa_bypass_until = remember_me
|
||||||
|
? parse_expiration(TFA_REMEMBER_ME_EXPIRES_IN)
|
||||||
|
: null;
|
||||||
|
|
||||||
|
// Store session in database
|
||||||
|
await prisma.user_sessions.create({
|
||||||
|
data: {
|
||||||
|
id: session_id,
|
||||||
|
user_id: user_id,
|
||||||
|
refresh_token: hash_token(refresh_token),
|
||||||
|
access_token_hash: hash_token(access_token),
|
||||||
|
ip_address: ip_address || null,
|
||||||
|
user_agent: user_agent || null,
|
||||||
|
device_fingerprint: device_fingerprint,
|
||||||
|
last_login_ip: ip_address || null,
|
||||||
|
last_activity: new Date(),
|
||||||
|
expires_at: expires_at,
|
||||||
|
tfa_remember_me: remember_me,
|
||||||
|
tfa_bypass_until: tfa_bypass_until,
|
||||||
|
login_count: 1,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
session_id,
|
||||||
|
access_token,
|
||||||
|
refresh_token,
|
||||||
|
expires_at,
|
||||||
|
tfa_bypass_until,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error creating session:", error);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validate session and check for inactivity timeout
|
||||||
|
*/
|
||||||
|
async function validate_session(session_id, access_token) {
|
||||||
|
try {
|
||||||
|
const session = await prisma.user_sessions.findUnique({
|
||||||
|
where: { id: session_id },
|
||||||
|
include: { users: true },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!session) {
|
||||||
|
return { valid: false, reason: "Session not found" };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if session is revoked
|
||||||
|
if (session.is_revoked) {
|
||||||
|
return { valid: false, reason: "Session revoked" };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if session has expired
|
||||||
|
if (new Date() > session.expires_at) {
|
||||||
|
await revoke_session(session_id);
|
||||||
|
return { valid: false, reason: "Session expired" };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for inactivity timeout
|
||||||
|
const inactivity_threshold = new Date(
|
||||||
|
Date.now() - INACTIVITY_TIMEOUT_MINUTES * 60 * 1000,
|
||||||
|
);
|
||||||
|
if (session.last_activity < inactivity_threshold) {
|
||||||
|
await revoke_session(session_id);
|
||||||
|
return {
|
||||||
|
valid: false,
|
||||||
|
reason: "Session inactive",
|
||||||
|
message: `Session timed out after ${INACTIVITY_TIMEOUT_MINUTES} minutes of inactivity`,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate access token hash (optional security check)
|
||||||
|
if (session.access_token_hash) {
|
||||||
|
const provided_hash = hash_token(access_token);
|
||||||
|
if (session.access_token_hash !== provided_hash) {
|
||||||
|
return { valid: false, reason: "Token mismatch" };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if user is still active
|
||||||
|
if (!session.users.is_active) {
|
||||||
|
await revoke_session(session_id);
|
||||||
|
return { valid: false, reason: "User inactive" };
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
valid: true,
|
||||||
|
session,
|
||||||
|
user: session.users,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error validating session:", error);
|
||||||
|
return { valid: false, reason: "Validation error" };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update session activity timestamp
|
||||||
|
*/
|
||||||
|
async function update_session_activity(session_id) {
|
||||||
|
try {
|
||||||
|
await prisma.user_sessions.update({
|
||||||
|
where: { id: session_id },
|
||||||
|
data: { last_activity: new Date() },
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error updating session activity:", error);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Refresh access token using refresh token
|
||||||
|
*/
|
||||||
|
async function refresh_access_token(refresh_token) {
|
||||||
|
try {
|
||||||
|
const hashed_token = hash_token(refresh_token);
|
||||||
|
|
||||||
|
const session = await prisma.user_sessions.findUnique({
|
||||||
|
where: { refresh_token: hashed_token },
|
||||||
|
include: { users: true },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!session) {
|
||||||
|
return { success: false, error: "Invalid refresh token" };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate session
|
||||||
|
const validation = await validate_session(session.id, "");
|
||||||
|
if (!validation.valid) {
|
||||||
|
return { success: false, error: validation.reason };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate new access token
|
||||||
|
const new_access_token = generate_access_token(session.user_id, session.id);
|
||||||
|
|
||||||
|
// Update access token hash
|
||||||
|
await prisma.user_sessions.update({
|
||||||
|
where: { id: session.id },
|
||||||
|
data: {
|
||||||
|
access_token_hash: hash_token(new_access_token),
|
||||||
|
last_activity: new Date(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
access_token: new_access_token,
|
||||||
|
user: session.users,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error refreshing access token:", error);
|
||||||
|
return { success: false, error: "Token refresh failed" };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Revoke a session
|
||||||
|
*/
|
||||||
|
async function revoke_session(session_id) {
|
||||||
|
try {
|
||||||
|
await prisma.user_sessions.update({
|
||||||
|
where: { id: session_id },
|
||||||
|
data: { is_revoked: true },
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error revoking session:", error);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Revoke all sessions for a user
|
||||||
|
*/
|
||||||
|
async function revoke_all_user_sessions(user_id) {
|
||||||
|
try {
|
||||||
|
await prisma.user_sessions.updateMany({
|
||||||
|
where: { user_id: user_id },
|
||||||
|
data: { is_revoked: true },
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error revoking user sessions:", error);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clean up expired sessions (should be run periodically)
|
||||||
|
*/
|
||||||
|
async function cleanup_expired_sessions() {
|
||||||
|
try {
|
||||||
|
const result = await prisma.user_sessions.deleteMany({
|
||||||
|
where: {
|
||||||
|
OR: [{ expires_at: { lt: new Date() } }, { is_revoked: true }],
|
||||||
|
},
|
||||||
|
});
|
||||||
|
console.log(`Cleaned up ${result.count} expired sessions`);
|
||||||
|
return result.count;
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error cleaning up sessions:", error);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get active sessions for a user
|
||||||
|
*/
|
||||||
|
async function get_user_sessions(user_id) {
|
||||||
|
try {
|
||||||
|
return await prisma.user_sessions.findMany({
|
||||||
|
where: {
|
||||||
|
user_id: user_id,
|
||||||
|
is_revoked: false,
|
||||||
|
expires_at: { gt: new Date() },
|
||||||
|
},
|
||||||
|
select: {
|
||||||
|
id: true,
|
||||||
|
ip_address: true,
|
||||||
|
user_agent: true,
|
||||||
|
last_activity: true,
|
||||||
|
created_at: true,
|
||||||
|
expires_at: true,
|
||||||
|
tfa_remember_me: true,
|
||||||
|
tfa_bypass_until: true,
|
||||||
|
},
|
||||||
|
orderBy: { last_activity: "desc" },
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error getting user sessions:", error);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if TFA is bypassed for a session
|
||||||
|
*/
|
||||||
|
async function is_tfa_bypassed(session_id) {
|
||||||
|
try {
|
||||||
|
const session = await prisma.user_sessions.findUnique({
|
||||||
|
where: { id: session_id },
|
||||||
|
select: {
|
||||||
|
tfa_remember_me: true,
|
||||||
|
tfa_bypass_until: true,
|
||||||
|
is_revoked: true,
|
||||||
|
expires_at: true,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!session) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if session is still valid
|
||||||
|
if (session.is_revoked || new Date() > session.expires_at) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if TFA is bypassed and still within bypass period
|
||||||
|
if (session.tfa_remember_me && session.tfa_bypass_until) {
|
||||||
|
return new Date() < session.tfa_bypass_until;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error checking TFA bypass:", error);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
create_session,
|
||||||
|
validate_session,
|
||||||
|
update_session_activity,
|
||||||
|
refresh_access_token,
|
||||||
|
revoke_session,
|
||||||
|
revoke_all_user_sessions,
|
||||||
|
cleanup_expired_sessions,
|
||||||
|
get_user_sessions,
|
||||||
|
is_tfa_bypassed,
|
||||||
|
generate_device_fingerprint,
|
||||||
|
check_suspicious_activity,
|
||||||
|
generate_access_token,
|
||||||
|
INACTIVITY_TIMEOUT_MINUTES,
|
||||||
|
};
|
||||||
107
backend/src/utils/timezone.js
Normal file
107
backend/src/utils/timezone.js
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
/**
|
||||||
|
* Timezone utility functions for consistent timestamp handling
|
||||||
|
*
|
||||||
|
* This module provides timezone-aware timestamp functions that use
|
||||||
|
* the TZ environment variable for consistent timezone handling across
|
||||||
|
* the application. If TZ is not set, defaults to UTC.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the configured timezone from environment variable
|
||||||
|
* Defaults to UTC if not set
|
||||||
|
* @returns {string} Timezone string (e.g., 'UTC', 'America/New_York', 'Europe/London')
|
||||||
|
*/
|
||||||
|
function get_timezone() {
|
||||||
|
return process.env.TZ || process.env.TIMEZONE || "UTC";
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current date/time in the configured timezone
|
||||||
|
* Returns a Date object that represents the current time in the configured timezone
|
||||||
|
* @returns {Date} Current date/time
|
||||||
|
*/
|
||||||
|
function get_current_time() {
|
||||||
|
const tz = get_timezone();
|
||||||
|
|
||||||
|
// If UTC, use Date.now() which is always UTC
|
||||||
|
if (tz === "UTC" || tz === "Etc/UTC") {
|
||||||
|
return new Date();
|
||||||
|
}
|
||||||
|
|
||||||
|
// For other timezones, we need to create a date string with timezone info
|
||||||
|
// and parse it. This ensures the date represents the correct time in that timezone.
|
||||||
|
// For database storage, we always store UTC timestamps
|
||||||
|
// The timezone is primarily used for display purposes
|
||||||
|
return new Date();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current timestamp in milliseconds (UTC)
|
||||||
|
* This is always UTC for database storage consistency
|
||||||
|
* @returns {number} Current timestamp in milliseconds
|
||||||
|
*/
|
||||||
|
function get_current_timestamp() {
|
||||||
|
return Date.now();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Format a date to ISO string in the configured timezone
|
||||||
|
* @param {Date} date - Date to format (defaults to now)
|
||||||
|
* @returns {string} ISO formatted date string
|
||||||
|
*/
|
||||||
|
function format_date_iso(date = null) {
|
||||||
|
const d = date || get_current_time();
|
||||||
|
return d.toISOString();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse a date string and return a Date object
|
||||||
|
* Handles various date formats and timezone conversions
|
||||||
|
* @param {string} date_string - Date string to parse
|
||||||
|
* @param {Date} fallback - Fallback date if parsing fails (defaults to now)
|
||||||
|
* @returns {Date} Parsed date or fallback
|
||||||
|
*/
|
||||||
|
function parse_date(date_string, fallback = null) {
|
||||||
|
if (!date_string) {
|
||||||
|
return fallback || get_current_time();
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const date = new Date(date_string);
|
||||||
|
if (Number.isNaN(date.getTime())) {
|
||||||
|
return fallback || get_current_time();
|
||||||
|
}
|
||||||
|
return date;
|
||||||
|
} catch (_error) {
|
||||||
|
return fallback || get_current_time();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert a date to the configured timezone for display
|
||||||
|
* @param {Date} date - Date to convert
|
||||||
|
* @returns {string} Formatted date string in configured timezone
|
||||||
|
*/
|
||||||
|
function format_date_for_display(date) {
|
||||||
|
const tz = get_timezone();
|
||||||
|
const formatter = new Intl.DateTimeFormat("en-US", {
|
||||||
|
timeZone: tz,
|
||||||
|
year: "numeric",
|
||||||
|
month: "2-digit",
|
||||||
|
day: "2-digit",
|
||||||
|
hour: "2-digit",
|
||||||
|
minute: "2-digit",
|
||||||
|
second: "2-digit",
|
||||||
|
hour12: false,
|
||||||
|
});
|
||||||
|
return formatter.format(date);
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
get_timezone,
|
||||||
|
get_current_time,
|
||||||
|
get_current_timestamp,
|
||||||
|
format_date_iso,
|
||||||
|
parse_date,
|
||||||
|
format_date_for_display,
|
||||||
|
};
|
||||||
20
biome.json
Normal file
20
biome.json
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://biomejs.dev/schemas/2.3.4/schema.json",
|
||||||
|
"vcs": {
|
||||||
|
"enabled": true,
|
||||||
|
"clientKind": "git",
|
||||||
|
"useIgnoreFile": true
|
||||||
|
},
|
||||||
|
"files": {
|
||||||
|
"includes": ["**", "!**/*.css"]
|
||||||
|
},
|
||||||
|
"formatter": {
|
||||||
|
"enabled": true
|
||||||
|
},
|
||||||
|
"linter": {
|
||||||
|
"enabled": true
|
||||||
|
},
|
||||||
|
"assist": {
|
||||||
|
"enabled": true
|
||||||
|
}
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user