Compare commits

...

958 Commits

Author SHA1 Message Date
wh1te909
834e602686 Release 0.14.8 2022-09-12 06:57:39 +00:00
wh1te909
1f693ca4f6 bump versions 2022-09-12 06:50:55 +00:00
wh1te909
97a0bc6045 optimize query to use less ram 2022-09-12 05:33:07 +00:00
wh1te909
8b75cdfefd update bin 2022-09-12 01:44:21 +00:00
wh1te909
917aecf1ff update deps and go 1.19 2022-09-12 01:42:24 +00:00
wh1te909
663dcd0396 fix tests 2022-09-11 20:51:57 +00:00
wh1te909
8f2dffb1ad update reqs 2022-09-11 20:29:45 +00:00
wh1te909
20228e3d19 add postgres ready check 2022-09-11 20:23:18 +00:00
wh1te909
81c6cc11b3 update supported version 2022-09-11 01:37:45 +00:00
wh1te909
2ccacbe5f3 fix for newer mesh 2022-09-11 01:33:00 +00:00
Dan
a5345e8468 Merge pull request #1277 from af7567/winupdatesfix
When checking if patches are to be installed, don't exit the function after finding a patched machine.
2022-09-06 12:02:26 -07:00
Dan
8f5d62bb81 Merge pull request #1273 from silversword411/develop
Adding client and site to mgmt command find_software
2022-09-06 11:59:07 -07:00
Adam
28f6838560 continue to next agent rather than return from function, otherwise other agents won't receive updates
Signed-off-by: Adam <adam@csparker.co.uk>
2022-09-04 18:09:58 +01:00
silversword411
c86aacb31c format tweak 2022-09-02 17:24:33 -04:00
silversword411
f62f5192d6 Adding client and site to mgmt command find_software 2022-09-02 14:28:45 -04:00
wh1te909
b14ea1fe3e fix maintenance mode and assigning policy via agent context menu not saving 2022-08-24 08:29:13 +00:00
wh1te909
552633a00b fix configs 2022-08-24 08:27:50 +00:00
wh1te909
7faba2a690 add migrations for updated pytz 2022-08-24 07:33:55 +00:00
wh1te909
db910aff06 back to dev 2022-08-23 06:02:48 +00:00
wh1te909
72126052ad Release 0.14.7 2022-08-23 05:59:29 +00:00
wh1te909
75d9f6a7e7 bump versions 2022-08-23 05:09:55 +00:00
wh1te909
de677294c6 refactor 2022-08-23 00:54:07 +00:00
wh1te909
da1e6b8259 update reqs 2022-08-22 06:47:52 +00:00
wh1te909
a9633b3990 update reqs 2022-08-20 20:09:53 +00:00
sadnub
7ac9af1cc1 fix sed command to do an inplace update of nginx.conf 2022-08-17 11:52:54 -04:00
Dan
00d8b8cd61 Merge pull request #1244 from dinger1986/develop
Update troubleshoot_server.sh
2022-08-17 08:43:13 -07:00
dinger1986
af7ff7f5cf Update troubleshoot_server.sh
Change SSL check as wasnt working and add output
2022-08-17 11:42:05 +01:00
wh1te909
2a20719130 fix demo script check history graph 2022-08-17 06:57:05 +00:00
sadnub
f481940180 fix tests 2022-08-14 21:56:09 -04:00
sadnub
ca8824d1e3 fix clients not filtering by role in policy overview 2022-08-14 21:49:38 -04:00
sadnub
f4be199b77 fix checks/tasks return cache values for other plats 2022-08-14 19:45:11 -04:00
sadnub
6bcef8334e fix nginx entypoint 2022-08-14 10:26:55 -04:00
wh1te909
3955eff683 more dev setup 2022-08-14 08:17:08 +00:00
wh1te909
aa0f6ecd75 python 3.10.6 2022-08-14 08:14:59 +00:00
wh1te909
ef4a94ed78 more ansible 2022-08-12 17:21:25 +00:00
wh1te909
b5c803ce65 update reqs 2022-08-12 17:19:04 +00:00
wh1te909
d4325ed82e more ansible dev 2022-08-12 06:15:21 +00:00
wh1te909
3805fb8f26 expose more fields in search amidaware/tacticalrmm-web@93dbc74e33 closes #652 2022-08-12 01:23:33 +00:00
wh1te909
d4d938c655 update paths 2022-08-12 01:06:56 +00:00
wh1te909
1c6911e361 start moving to nested serializers 2022-08-10 07:15:23 +00:00
wh1te909
a1b364f337 drop support for agent < 2.0.0 2022-08-10 07:12:51 +00:00
wh1te909
ece5c3da86 update reqs 2022-08-10 00:49:20 +00:00
wh1te909
5d1ae6047b back to dev 2022-08-10 00:38:13 +00:00
wh1te909
5605c72253 Release 0.14.6 2022-08-09 21:47:20 +00:00
wh1te909
66bbcf0733 fix tests 2022-08-09 21:35:23 +00:00
wh1te909
acc23ea7bb bump versions 2022-08-09 21:18:41 +00:00
wh1te909
663bd0c9f0 remove dead code 2022-08-09 21:16:18 +00:00
wh1te909
39b1025dfa fix tests 2022-08-05 17:35:40 +00:00
sadnub
d2875e90b2 fix docker dev 2022-08-05 12:10:52 -04:00
wh1te909
ff461d1d02 fixes #1174 amidaware/tacticalrmm-web@76f330fb9c 2022-08-05 07:22:46 +00:00
wh1te909
58164ea2d3 dev 2022-08-05 05:57:38 +00:00
wh1te909
1bf4834004 Django 4.1 2022-08-04 23:43:57 +00:00
wh1te909
bf58d78281 fix return tuple formatting 2022-08-04 23:40:35 +00:00
wh1te909
0dc749bb3d Release 0.14.5 2022-08-01 22:57:01 +00:00
wh1te909
a8aedfde55 bump version 2022-08-01 22:56:21 +00:00
wh1te909
b174a89032 Release 0.14.4 2022-08-01 18:09:18 +00:00
wh1te909
9b92d1b673 bump version 2022-08-01 17:50:33 +00:00
wh1te909
febc9aed11 feat: run as user amidaware/tacticalrmm-web@137a5648ce amidaware/rmmagent@50cebb950d 2022-07-31 22:23:19 +00:00
wh1te909
de2462677e fix working dir 2022-07-31 21:31:58 +00:00
wh1te909
8bd94d46eb fix empty statement 2022-07-28 17:28:49 +00:00
wh1te909
d43cefe28f add file associations for yaml [skip ci] 2022-07-27 07:31:54 +00:00
wh1te909
b82874e261 back to develop 2022-07-27 07:30:53 +00:00
wh1te909
8554cb5d6c Release 0.14.3 2022-07-27 07:18:55 +00:00
wh1te909
f901614056 bump version 2022-07-27 06:11:41 +00:00
wh1te909
b555d217ab remove check 2022-07-27 06:10:33 +00:00
wh1te909
775c600234 docker nginx changes 2022-07-27 04:19:17 +00:00
wh1te909
128f2570b8 catch exception if mesh is down 2022-07-27 02:01:29 +00:00
wh1te909
3cd53e79b4 add signing key 2022-07-27 01:59:55 +00:00
wh1te909
ebba84ffda switch to official nginx repo to get latest version 2022-07-26 08:09:49 +00:00
wh1te909
1e1a42fe98 update web ver 2022-07-26 08:08:43 +00:00
wh1te909
8a744a440d update reqs 2022-07-26 07:47:21 +00:00
wh1te909
f4fc3c7d55 unused var 2022-07-26 04:39:11 +00:00
wh1te909
0594d121de add agent ver to status closes #1224 2022-07-24 01:18:54 +00:00
wh1te909
12c85d6234 start ansible role to deploy dev environment 2022-07-20 07:16:47 +00:00
wh1te909
5e37728f66 remove devskum 2022-07-18 17:22:24 +00:00
wh1te909
e8e19fede7 don't allow dates in past #1174 2022-07-18 08:04:15 +00:00
wh1te909
e565dbfa66 invalidate cache on policy script change 2022-07-12 20:16:07 +00:00
wh1te909
d180d6820c back to develop 2022-07-10 03:35:15 +00:00
wh1te909
7f252e9b7c Release 0.14.2 2022-07-10 03:34:02 +00:00
wh1te909
41db8681f8 no sudo 2022-07-10 00:38:09 +00:00
wh1te909
26cd58fd6d bump version 2022-07-10 00:16:32 +00:00
wh1te909
63c7e1aa9d update reqs 2022-07-10 00:16:13 +00:00
wh1te909
d5a6063e5e remove extra space 2022-07-09 09:31:05 +00:00
wh1te909
00affdbdec update supported version 2022-07-09 09:30:58 +00:00
wh1te909
db3f0bbd4f increase nginx open file limit 2022-07-09 08:09:24 +00:00
wh1te909
020a59cb97 remove un-needed expose 2022-07-09 08:08:09 +00:00
wh1te909
ff4fa6402d back to dev 2022-07-08 06:40:55 +00:00
wh1te909
80f7555499 Release 0.14.1 2022-07-08 06:40:15 +00:00
wh1te909
10cc187c5d bump versions 2022-07-08 06:38:20 +00:00
wh1te909
def4a8a67e Release 0.14.0 2022-07-07 21:37:42 +00:00
wh1te909
25843edb48 bump script ver 2022-07-07 20:34:47 +00:00
wh1te909
54294141b0 bump versions 2022-07-07 19:44:11 +00:00
wh1te909
f3a8886b50 remove check for 4222 2022-07-07 16:31:01 +00:00
wh1te909
268cfaf234 bump web ver 2022-07-07 03:06:52 +00:00
wh1te909
651ae20304 forgot decorator 2022-07-06 23:37:52 +00:00
wh1te909
e22f69a5dc fix variable 2022-07-06 01:15:55 +00:00
wh1te909
a39808f44c update reqs 2022-07-05 18:07:20 +00:00
wh1te909
fcb541f734 update nginx conf for nats websocket 2022-07-03 22:44:59 +00:00
wh1te909
79ca0f1684 bump web ver 2022-07-03 01:48:39 +00:00
wh1te909
f2ebc38044 don't load swagger app unless enabled 2022-07-03 01:36:26 +00:00
wh1te909
d4335675f1 update nats-api and add debug info 2022-06-30 06:37:58 +00:00
wh1te909
be4b05423e update reqs 2022-06-30 06:27:45 +00:00
wh1te909
d9fe8db2a7 bump mesh 2022-06-30 06:27:28 +00:00
wh1te909
f92e780765 add delete endpoint 2022-06-30 06:21:13 +00:00
wh1te909
7aebdb7c78 add back middleware for django admin 2022-06-30 06:20:38 +00:00
wh1te909
abb2dd842b bigint for retcode 2022-06-29 07:57:58 +00:00
wh1te909
75713c8015 bump mesh and web 2022-06-27 08:09:55 +00:00
wh1te909
42e1717455 add nats websocket support 2022-06-26 22:16:28 +00:00
wh1te909
bfb19a9eb7 bump docker nats 2022-06-26 21:14:27 +00:00
wh1te909
3e08585114 add endpoint for trmm server monitoring and stats 2022-06-26 07:35:11 +00:00
wh1te909
12e82c7a8d update channels 2022-06-26 07:33:23 +00:00
wh1te909
0fcc683903 remove debug print 2022-06-26 07:02:15 +00:00
wh1te909
ed7a8dc0f5 add websocket endpoint for sendcmd 2022-06-25 09:05:26 +00:00
wh1te909
0a9d29c98d add dev check to version 2022-06-25 09:05:26 +00:00
wh1te909
f63e801608 auto scale uwsgi workers based on load 2022-06-25 09:05:26 +00:00
Dan
77f04e1a32 Merge pull request #1186 from silversword411/develop
Syncing readme with docs
2022-06-23 11:04:09 -07:00
silversword411
362819ce16 Syncing readme with docs 2022-06-23 12:14:38 -04:00
wh1te909
1d9165a627 more tests 2022-06-23 05:25:18 +00:00
wh1te909
7ee8aaa027 update reqs 2022-06-23 05:16:34 +00:00
wh1te909
516e279fc3 add migration 2022-06-23 05:15:08 +00:00
wh1te909
880611eddb forgot to call 2022-06-23 01:21:33 +00:00
wh1te909
c4bf776069 fix coverage 2022-06-23 00:33:15 +00:00
wh1te909
097d6464c0 update coverage 2022-06-23 00:23:53 +00:00
wh1te909
b86e4e017f add back coverage 2022-06-22 22:41:47 +00:00
wh1te909
bbec17d498 isort 2022-06-22 22:18:04 +00:00
wh1te909
3b7b5f4ec3 more tests 2022-06-22 22:10:03 +00:00
wh1te909
0986efef29 tests 2022-06-22 06:51:25 +00:00
wh1te909
06091cbf1c fix undefined var 2022-06-22 03:07:41 +00:00
wh1te909
b588bab268 start refactor for mac agent and break all the tests 2022-06-20 19:35:51 +00:00
wh1te909
0736cfe959 update reqs 2022-06-17 06:04:52 +00:00
Dan
400352254a Merge pull request #1173 from sarog/develop
Generify shell script interpreters
2022-06-15 13:39:56 -07:00
Saro G
259c3dc781 Generify shell script interpreters. 2022-06-15 16:04:58 -04:00
wh1te909
506055a815 update reqs 2022-06-07 16:53:51 +00:00
wh1te909
3edf6c57ba add goarch constant 2022-06-06 04:05:19 +00:00
wh1te909
c404ae7ac8 add bulk recovery endpoint, tests 2022-06-03 00:36:19 +00:00
wh1te909
312774e472 add tests 2022-06-01 19:14:30 +00:00
wh1te909
c540f802b0 add mgmt command to help with restores 2022-05-30 07:33:55 +00:00
wh1te909
6a2a2761e1 update docker for new web build 2022-05-29 07:40:49 +00:00
wh1te909
2508458c80 update reqs 2022-05-29 07:40:15 +00:00
wh1te909
025d9e0141 add find service 2022-05-26 18:54:02 +00:00
wh1te909
734b3b07ab update scripts for frontend repo move 2022-05-24 22:03:24 +00:00
wh1te909
e4250a857a add web version 2022-05-24 21:02:19 +00:00
wh1te909
56d1b2716c add mgmt command for scripts 2022-05-24 19:17:48 +00:00
wh1te909
c5d7e61e6c optimize query 2022-05-24 19:16:28 +00:00
wh1te909
6222a127bd update reqs 2022-05-24 19:15:33 +00:00
wh1te909
f0b7e515b6 fix workflows 2022-05-19 04:00:37 +00:00
wh1te909
98d8c23868 web now in separate repo 2022-05-19 03:45:31 +00:00
wh1te909
978bb9afd0 update certifi 2022-05-18 19:46:13 +00:00
wh1te909
058598b5f3 custom field enums 2022-05-18 19:20:28 +00:00
wh1te909
5b7ab3a10f more task enums 2022-05-18 18:37:35 +00:00
wh1te909
e42243c78b task type enum 2022-05-18 07:18:53 +00:00
wh1te909
c650ee8498 alert enums 2022-05-18 06:49:29 +00:00
wh1te909
50f8968901 agent history enum, remove unused model field 2022-05-18 06:11:40 +00:00
wh1te909
b0fa2e6d80 mon type enum 2022-05-18 05:48:02 +00:00
wh1te909
d59589425e status const 2022-05-18 05:18:06 +00:00
wh1te909
6c810e514b fix tests 2022-05-18 04:14:20 +00:00
wh1te909
efa41dbd22 add plat enum 2022-05-18 03:52:51 +00:00
sadnub
f34bcfd56d move to quasar vite plugin 2022-05-16 22:42:01 -04:00
sadnub
8ff2e3fb29 Fix removal of orphaned win tasks not completing if having issues communicating with an agent 2022-05-15 14:38:28 -04:00
wh1te909
033c04a0f2 back to develop [skip-ci] 2022-05-15 08:21:41 +00:00
wh1te909
6ae2da22c1 Release 0.13.4 2022-05-15 07:16:14 +00:00
wh1te909
cef1ab9512 bump versions 2022-05-15 07:01:47 +00:00
wh1te909
94f02bfca3 only if exists 2022-05-15 06:59:00 +00:00
sadnub
a941bb1744 disable auto quoting when using variable substitution on batch scripts. Fixes #1020 2022-05-14 22:45:25 -04:00
sadnub
6ff591427a Add watcher in agent view for route changes and set new active agent. Fixes #1110 2022-05-14 22:23:03 -04:00
sadnub
809e172280 Fixes check policy copy issue where copied checks are all of type diskspace 2022-05-14 22:01:29 -04:00
wh1te909
17aedae0a9 recreate env 2022-05-14 23:59:32 +00:00
wh1te909
ef817ccb3a isort 2022-05-14 23:30:01 +00:00
wh1te909
0fb55b0bee remove middleware 2022-05-14 23:23:24 +00:00
wh1te909
a1a6eddc31 update reqs 2022-05-14 23:00:31 +00:00
wh1te909
ff3d0b6b57 handle empty payload, add monitoring endpoint 2022-05-13 00:50:53 +00:00
wh1te909
dd64cef4c4 optimize endpoints, remove deprecated endpoint, start reworking tests 2022-05-11 22:38:03 +00:00
Dan
9796848079 Merge pull request #1127 from silversword411/develop
typo fix
2022-05-11 14:54:05 -07:00
silversword411
fea7eb4312 typo fix 2022-05-11 17:10:06 -04:00
wh1te909
c12cd0e755 fix task run doesn't show name in history tab fixes #1097 2022-05-10 19:21:49 +00:00
wh1te909
d86a72f858 remove py 3.10.2 2022-05-10 18:52:47 +00:00
wh1te909
50cd7f219a update reqs 2022-05-10 18:45:33 +00:00
wh1te909
8252b3eccc more enum 2022-05-10 17:09:06 +00:00
wh1te909
d0c6e3a158 move tests 2022-05-10 16:29:09 +00:00
wh1te909
1505fa547e update nats reqs and change mod name 2022-05-10 16:27:51 +00:00
wh1te909
9017bad884 fix test 2022-05-09 17:35:07 +00:00
wh1te909
2ac5e316a5 refactor recovery func 2022-05-09 06:43:04 +00:00
wh1te909
29f9113062 update coverage 2022-05-09 06:40:33 +00:00
wh1te909
46349672d8 optimize checkrunner result endpoint 2022-05-09 06:23:59 +00:00
wh1te909
4787be2db0 fix tabs in popout view #1110 2022-05-04 22:17:45 +00:00
wh1te909
f0a8c5d732 fix site sorting fixes #1118 2022-05-04 15:46:05 +00:00
wh1te909
9ad520bf7c remove unnecessary middleware 2022-05-02 00:57:40 +00:00
wh1te909
bd0cc51554 update asgi 2022-05-02 00:57:01 +00:00
wh1te909
12f599f974 add linux mint 2022-05-01 20:34:31 +00:00
wh1te909
0118d5fb40 log enums 2022-04-30 02:01:20 +00:00
wh1te909
65cadb311a more enum 2022-04-29 19:09:17 +00:00
wh1te909
dd75bd197d add back dummy cache 2022-04-29 17:22:10 +00:00
wh1te909
7e155bdb43 typing 2022-04-29 06:45:20 +00:00
wh1te909
993b6fddf4 redundant 2022-04-29 06:41:40 +00:00
wh1te909
6ba51df6a7 add sh to download 2022-04-29 06:29:43 +00:00
wh1te909
1185ac58e1 more enum 2022-04-29 06:21:48 +00:00
wh1te909
f835997f49 switch runners and use redis cache during testing 2022-04-29 05:23:50 +00:00
wh1te909
a597dba775 fix role cache 2022-04-29 05:21:51 +00:00
wh1te909
3194e83a66 update reqs 2022-04-29 00:27:01 +00:00
wh1te909
096c3cdd34 more enum 2022-04-28 18:01:11 +00:00
wh1te909
3a1ea42333 fix settings 2022-04-28 17:33:51 +00:00
wh1te909
64877d4299 fix black not auto formatting 2022-04-28 17:25:21 +00:00
wh1te909
e957dc5e2c black 2022-04-28 17:24:45 +00:00
wh1te909
578d5c5830 more enum 2022-04-28 17:07:58 +00:00
sadnub
96284f9508 make eslint error on warnings 2022-04-28 09:50:41 -04:00
wh1te909
698b38dcba fix warning 2022-04-28 07:07:33 +00:00
wh1te909
6db826befe fix dir again 2022-04-28 07:02:38 +00:00
wh1te909
1a3d412d73 pwd 2022-04-28 07:01:44 +00:00
wh1te909
b8461c9dd8 fix dir 2022-04-28 07:00:09 +00:00
wh1te909
699bd9de10 fix workflow 2022-04-28 06:56:03 +00:00
Dan
54b6866e21 Merge pull request #1102 from sadnub/typescript
Typescript prep and add some linting
2022-04-27 23:45:33 -07:00
sadnub
afd155e9c1 fix actions 2022-04-27 22:48:29 -04:00
sadnub
910a717230 add name to gh action 2022-04-27 22:34:52 -04:00
sadnub
70fbd33d61 add lint and formatting gh action 2022-04-27 21:18:52 -04:00
sadnub
2da0d5ee21 add typescript support and stricter formatting/linting 2022-04-27 20:47:32 -04:00
wh1te909
98f64e057a update reqs 2022-04-27 05:07:22 +00:00
wh1te909
3d9d936c56 update url 2022-04-27 05:06:21 +00:00
wh1te909
2b4cb59df8 add more typing to dev reqs 2022-04-26 23:34:10 +00:00
wh1te909
9d80da52e3 more demo stuff 2022-04-26 23:33:45 +00:00
wh1te909
fd176d2c64 start moving to enums for choicefields 2022-04-26 01:13:18 +00:00
sadnub
538b6de36b set default alert severities for checks and tasks so that blank alert templates being applied to agent don't stop all alerts 2022-04-25 14:38:34 -04:00
sadnub
f7eca8aee0 Ignore timezone when editing automated tasks 2022-04-25 14:17:57 -04:00
wh1te909
a754d94c2c remove test multiprocessing 2022-04-25 08:15:06 +00:00
wh1te909
5e3493e6a9 fix tests 2022-04-25 07:54:31 +00:00
wh1te909
619a14c26b refactor action_type 2022-04-25 07:47:58 +00:00
wh1te909
7d9a8decf0 start choice field refactor, remove deprecated model fields 2022-04-25 07:10:33 +00:00
wh1te909
d11e14ad89 add migration 2022-04-25 06:51:42 +00:00
wh1te909
69189cf2af isort 2022-04-25 06:50:48 +00:00
wh1te909
6e7d2f19d2 update middleware for django 4, refactor a func to fix circular import, start fixing fake_agents script 2022-04-25 06:48:14 +00:00
wh1te909
d99ebf5d6a remove deprecated model field 2022-04-25 06:43:58 +00:00
wh1te909
ef2d19e95b back to develop 2022-04-25 01:36:12 +00:00
wh1te909
e3a66f017e Release 0.13.3 2022-04-25 01:32:11 +00:00
wh1te909
9e544ad471 bump version 2022-04-25 01:31:09 +00:00
sadnub
5f19aa527a fix running policy script checks 2022-04-24 21:02:28 -04:00
sadnub
bfd5bc5c26 remove port changes for persistent mesh configurations 2022-04-24 20:51:18 -04:00
wh1te909
2d0ec3accd back to dev 2022-04-25 00:38:47 +00:00
wh1te909
0999d98225 Release 0.13.2 2022-04-25 00:33:40 +00:00
wh1te909
d8dd3e133f bump version 2022-04-25 00:31:14 +00:00
wh1te909
528470c37f fix slow query 2022-04-25 00:17:34 +00:00
wh1te909
c03cd53853 fix deprecated function 2022-04-24 23:20:39 +00:00
wh1te909
b57fc8a29c testing num queries 2022-04-24 23:14:37 +00:00
wh1te909
a04ed5c3ca remove duplicate settings 2022-04-24 23:09:44 +00:00
Dan
3ad1df14f6 Merge pull request #1085 from dinger1986/patch-1
Update troubleshoot_server.sh
2022-04-24 15:26:47 -07:00
wh1te909
d8caf12fdc optimize policy query 2022-04-24 22:07:24 +00:00
wh1te909
5ca9d30d5f add a test and optimize some queries 2022-04-24 21:23:33 +00:00
sadnub
a7a71b4a46 fix default tab not working if 'servers' is selected 2022-04-24 16:35:42 -04:00
sadnub
638603ac6b fix variable substitution when running policy tasks 2022-04-24 16:35:18 -04:00
wh1te909
1d70c15027 wrong related 2022-04-24 01:45:03 +00:00
wh1te909
7a5f03d672 fix slow query 2022-04-23 23:24:36 +00:00
wh1te909
39e97c5589 Release 0.13.1 2022-04-23 22:59:35 +00:00
wh1te909
1943d8367e bump version 2022-04-23 22:57:09 +00:00
wh1te909
f91c5af9a1 optimize query 2022-04-23 22:51:45 +00:00
wh1te909
2be71fc877 increase chunk size 2022-04-23 22:50:06 +00:00
wh1te909
f5f5b4a8db fixed 'Save and test email' always returning success even if it failed 2022-04-23 18:50:23 +00:00
sadnub
ac9cfd09ea fix init container not having access to the redis service 2022-04-23 12:28:13 -04:00
sadnub
4cfc85dbfd fix agent sorting by last response 2022-04-23 12:21:18 -04:00
sadnub
1f3d2f47b1 increase max_length on customfield name field to 100 2022-04-23 10:26:34 -04:00
dinger1986
653c482ff7 Update troubleshoot_server.sh
add in output of afew log log files
2022-04-23 15:03:58 +01:00
wh1te909
4b069cc2b0 fix pending actions showing agent pending update even though it was already updated 2022-04-23 07:43:30 +00:00
wh1te909
c89349a43a update badge 2022-04-23 03:49:37 +00:00
wh1te909
6e92d6c62c testing codecov 2022-04-23 03:44:45 +00:00
wh1te909
5d3d3e9076 fix tests? 2022-04-23 02:07:00 +00:00
wh1te909
b440c772d6 forgot pytest.ini 2022-04-23 02:01:34 +00:00
wh1te909
2895560b30 testing pytest/codecov 2022-04-23 02:00:12 +00:00
wh1te909
bedcecb2e1 fix deprecations 2022-04-23 00:19:00 +00:00
wh1te909
656ac829a4 black 2022-04-22 23:22:41 +00:00
wh1te909
4d83debc0e also sort in db 2022-04-22 23:19:42 +00:00
wh1te909
4ff5d19979 fix sorting of clients tree 2022-04-22 22:48:54 +00:00
wh1te909
2216ee422e Release 0.13.0 2022-04-22 20:19:43 +00:00
wh1te909
9acda5696e bump versions 2022-04-22 19:00:14 +00:00
wh1te909
dc6255048a fix wrong token variable being passed to func 2022-04-22 18:19:26 +00:00
wh1te909
2acde429d7 lower intervals 2022-04-22 16:35:51 +00:00
wh1te909
efcac1adac fix runaway memory during migration 2022-04-22 08:43:06 +00:00
wh1te909
81d5ecd758 add a run checks button to checks tab 2022-04-22 08:09:21 +00:00
wh1te909
d9ff004454 add trailing slash 2022-04-22 08:08:59 +00:00
wh1te909
d57135d793 lower intervals 2022-04-22 08:08:49 +00:00
wh1te909
bb5a0023af update nats-api 2022-04-21 21:26:22 +00:00
wh1te909
e3c25a167e nats 2.8.1 2022-04-21 21:16:31 +00:00
wh1te909
5be93ae17d update nats-server 2022-04-21 21:01:59 +00:00
wh1te909
3a2511d4a1 remove deprecated certbot flag 2022-04-21 20:45:47 +00:00
wh1te909
8ec7d98eef remove old docker instructions 2022-04-21 20:45:06 +00:00
wh1te909
9421ae25f7 update scripts and reqs 2022-04-21 20:32:41 +00:00
wh1te909
5b288b6fa1 prevent dialog error when script hasn't run yet 2022-04-21 19:17:57 +00:00
wh1te909
d35ed2980b add helper to run all celery tasks 2022-04-21 19:16:45 +00:00
wh1te909
6d8df6d2b9 lower intervals 2022-04-21 18:18:21 +00:00
wh1te909
a839513f7f more optimization 2022-04-21 17:29:38 +00:00
wh1te909
97b37b4742 make policy tabs match agent tabs UI 2022-04-21 17:14:07 +00:00
wh1te909
4894031219 fix for policy tasks not deleting on agents 2022-04-21 07:33:58 +00:00
wh1te909
8985b5511c remove import 2022-04-21 07:25:13 +00:00
wh1te909
b3c2a6a0cc fix typing 2022-04-21 07:08:33 +00:00
wh1te909
7291b440bb fix sync status 2022-04-21 06:34:06 +00:00
wh1te909
d75f134677 fix comment 2022-04-21 06:32:31 +00:00
wh1te909
e60069ec1d fix assigned task not running when the check it was assigned to was a policy check 2022-04-21 06:28:22 +00:00
wh1te909
034f49573d fix slow query 2022-04-21 01:08:02 +00:00
wh1te909
973d37a237 improve wording and add tooltip 2022-04-20 22:11:47 +00:00
wh1te909
d2ec609e68 add some default services 2022-04-20 21:30:10 +00:00
wh1te909
6b410399cd add function to clear cache 2022-04-20 19:47:49 +00:00
wh1te909
0c010570b9 make agent overdue alert button text more clear 2022-04-20 19:09:40 +00:00
wh1te909
78fc7faa13 fix SynchronousOnlyOperation when calling task from celery, due to wrong query 2022-04-20 18:43:04 +00:00
wh1te909
7671cce263 refactor and add tests for constants 2022-04-20 17:21:26 +00:00
wh1te909
a43a66a2d3 add daphne lock to gitignore 2022-04-20 17:20:11 +00:00
wh1te909
2190a2ed25 use isinstance 2022-04-20 17:03:01 +00:00
wh1te909
227636b705 always send recovery alert at agent level 2022-04-20 16:52:31 +00:00
wh1te909
5032170362 fix cache when deleting global policy 2022-04-20 16:38:43 +00:00
sadnub
b94c3961eb fix availability alert filter to get overdue alerts working 2022-04-20 08:51:17 -04:00
sadnub
46c7e89a94 fix displaying correct alert severity 2022-04-19 22:37:56 -04:00
sadnub
80861fd620 revert back to psycopg2 package 2022-04-19 22:37:56 -04:00
wh1te909
44f9390790 fix task call 2022-04-20 00:35:02 +00:00
wh1te909
8eca6c409a update reqs 2022-04-19 23:12:12 +00:00
wh1te909
4907c01191 refactor stuff 2022-04-19 22:33:51 +00:00
wh1te909
04bf314c61 fix task name 2022-04-19 22:26:27 +00:00
sadnub
57d92b276b some more django orm magic 2022-04-18 13:04:19 -04:00
sadnub
6a8efddab5 add category label to filtered custom field results 2022-04-18 12:36:54 -04:00
sadnub
fd908494ae make supported dropdowns filterable 2022-04-18 12:33:33 -04:00
sadnub
d617b23c2f add monitoring_type /agents filter and add index for monitoring_type field 2022-04-18 12:33:11 -04:00
sadnub
27874728bc return scripts from api sorted by category. #1066 2022-04-16 22:30:50 -04:00
wh1te909
56a0345260 fix arg 2022-04-16 23:18:39 +00:00
wh1te909
c412839165 ctx is no longer needed 2022-04-16 23:02:41 +00:00
wh1te909
b77f927ad5 remove old field 2022-04-16 23:00:57 +00:00
sadnub
8edd7f6a56 show the task name in agent history output. #1000 2022-04-16 17:31:29 -04:00
sadnub
c6915d0291 make agent count take permissions into account. #1028 2022-04-16 17:27:20 -04:00
sadnub
388eb94014 make site dropdown filterable #1068 2022-04-16 17:06:46 -04:00
sadnub
9ab80553e1 fix ui bug where policy checks weren't displaying if they had associated tasks 2022-04-16 16:47:52 -04:00
sadnub
86d639ee6a fix migration 2022-04-16 16:31:35 -04:00
sadnub
979fd8a249 fix migrations 2022-04-16 16:22:41 -04:00
sadnub
e65ab58f84 fix patches pending field on agent table 2022-04-16 16:22:26 -04:00
sadnub
8414bdbab1 prune orphaned tasks on agents daily 2022-04-16 15:24:07 -04:00
sadnub
d037b09128 fix Alerts model not existing during initial migratio run 2022-04-16 15:23:51 -04:00
sadnub
9a687fec9b add meshcentral port alias to fix chat 2022-04-16 14:37:07 -04:00
sadnub
e9d71f169c optimize the cache_db_values task 2022-04-16 14:36:31 -04:00
sadnub
e09c307d58 fix tests 2022-04-15 17:42:45 -04:00
sadnub
d23d641b1b fix alert_severity on check_result defaulting to warning 2022-04-15 17:29:31 -04:00
sadnub
b1301091f9 add migration to remove duplicate win_task_names and make it unique 2022-04-15 17:06:33 -04:00
sadnub
2458eb3960 create migration that will fix duplicate win_task_names and make win_task_name unique 2022-04-15 14:52:37 -04:00
sadnub
fa836d88c7 fix migrating task/check results 2022-04-15 14:17:57 -04:00
sadnub
e26349f2fc more improvements loading the clients tree 2022-04-14 22:41:10 -04:00
sadnub
daa4e4d566 move maintenance mode and agent count checks to annotate to load client tree faster 2022-04-14 22:26:15 -04:00
sadnub
8e75df686d fix check/task results not being added to the model 2022-04-14 19:08:14 -04:00
sadnub
53537e7b3a make gh use the correct cache 2022-04-14 18:20:52 -04:00
sadnub
4beddc2271 fix tests? 2022-04-14 18:12:09 -04:00
sadnub
a6e4a774e0 fix tests 2022-04-14 18:03:39 -04:00
sadnub
dacc1c5770 fix cache not hitting if it returns an empty list 2022-04-14 17:58:51 -04:00
sadnub
25e922bc4c reduce queries on agent table load and bust cache on policy changes 2022-04-14 17:17:40 -04:00
wh1te909
c877c9b0fb back to latest 2022-04-13 16:15:02 +00:00
wh1te909
56bb206f25 back to latest 2022-04-13 16:14:40 +00:00
wh1te909
740a9ceaa7 add dev version 2022-04-13 07:43:36 +00:00
wh1te909
64e936127a revert docker version #1062 2022-04-13 05:38:10 +00:00
wh1te909
bd4549f389 revert docker version 2022-04-13 05:35:42 +00:00
sadnub
b1f7bd3ead fix tests 2022-04-12 16:38:57 -04:00
sadnub
b5e3b16e3a add option to disable mesh autologin 2022-04-12 16:18:31 -04:00
wh1te909
96a72a2cd7 0.12.4 fix login token 2022-04-12 19:09:04 +00:00
sadnub
c155da858e fix submitting saving date times and convert to native date input versus quasar 2022-04-12 13:38:53 -04:00
wh1te909
5e20a5cd71 0.12.4 fix login token 2022-04-12 16:49:00 +00:00
wh1te909
c1b2bbd152 update reqs 2022-04-12 06:14:46 +00:00
Dan
e3b5f418d6 Merge pull request #1008 from sadnub/develop
Policy rework, Global datetime format and other GH issue fixes
2022-04-11 16:21:05 -07:00
wh1te909
f82b589d03 Release 0.12.3 2022-04-11 23:16:28 +00:00
wh1te909
cddac4d0fb bump version 2022-04-11 22:21:57 +00:00
sadnub
dd6f92e54d supress the redis key length warning 2022-04-10 23:23:04 -04:00
sadnub
5d4558bddf fix caching tasks 2022-04-10 23:18:47 -04:00
sadnub
5aa7b5a337 add custom cache backend for deleting many keys with a pattern 2022-04-10 22:42:10 -04:00
wh1te909
2fe0b5b90d testing coverage exludes 2022-04-10 20:26:00 +00:00
wh1te909
aa6997990c fix import 2022-04-10 19:21:03 +00:00
sadnub
c02ab50a0a clear role cache on save 2022-04-10 10:46:47 -04:00
sadnub
7cb16b2259 add caching for coresettings and roles 2022-04-10 10:44:51 -04:00
wh1te909
3173dc83a5 Merge branch 'develop' of https://github.com/sadnub/tacticalrmm into sadnub-develop 2022-04-10 05:09:03 +00:00
wh1te909
baddc29bb8 fix tests 2022-04-10 05:08:20 +00:00
Dan
612cbe6be4 Merge branch 'develop' into develop 2022-04-09 21:46:43 -07:00
sadnub
4c1d2ab1bb fix agent not getting alert template when policies change 2022-04-09 23:56:04 -04:00
sadnub
6b4704b2e2 fix caching tasks 2022-04-09 22:01:07 -04:00
wh1te909
c2286cde01 fix reqs 2022-04-09 23:09:42 +00:00
wh1te909
24a17712e7 skip on empty dict 2022-04-09 23:09:21 +00:00
wh1te909
27d537e7bb do not silently continue on exception 2022-04-09 23:08:34 +00:00
wh1te909
dbd89c72a3 django 4 2022-04-09 17:18:35 +00:00
wh1te909
ff41bbd0e5 adjust celery config 2022-04-09 17:09:54 +00:00
wh1te909
4bdb6ae84e fix graphics 2022-04-09 17:09:09 +00:00
sadnub
cece7b79ad remove silk profile config 2022-04-09 12:22:22 -04:00
sadnub
8d09d95fc3 fix ci attempt 2 2022-04-09 12:19:38 -04:00
sadnub
752542a1d1 fix ci and fix caching 2022-04-09 09:04:56 -04:00
sadnub
dd077383f7 fix docker dev settings 2022-04-08 23:39:18 -04:00
sadnub
6e808dbb0f add meshctrl to dev dependencies 2022-04-08 23:27:32 -04:00
sadnub
4ef3441f70 typo 2022-04-08 23:25:24 -04:00
sadnub
82624d6657 fix tests and more typing 2022-04-08 23:23:10 -04:00
sadnub
62e2b5230c configure mypy vscode extension and start fixing some types 2022-04-08 23:23:10 -04:00
sadnub
3325c30f29 Django 4 upgrade and add REDIS as cache backend 2022-04-08 23:22:33 -04:00
sadnub
18a06168f1 wip improving query times 2022-04-08 23:20:51 -04:00
sadnub
27e93e499f add django-silk to the docker dev setup 2022-04-08 23:20:51 -04:00
sadnub
90644a21a3 add date formatting helper icon that opens to quasar's site 2022-04-08 23:19:35 -04:00
sadnub
7e31f43ef1 allow date_format customization on the user level 2022-04-08 23:19:35 -04:00
sadnub
b13fc1fba4 fix websockets issues 2022-04-08 23:19:35 -04:00
sadnub
5d9109e526 add tests for handling multple alert objects returned 2022-04-08 23:19:35 -04:00
sadnub
78dfa36b2a fix negative index issue 2022-04-08 23:19:35 -04:00
sadnub
dc05d87b44 add redis volume back 2022-04-08 23:19:35 -04:00
wh1te909
2c323a13c1 typo 2022-04-08 23:19:35 -04:00
sadnub
d4c5e38857 fix redis background save issue 2022-04-08 23:19:35 -04:00
sadnub
fb80e5c367 remove aof from redis 2022-04-08 23:19:35 -04:00
sadnub
beb08a3afb add script action migrate and remove deprecated fields 2022-04-08 23:19:35 -04:00
sadnub
7b2de8cbbd return 400 error if agent_id is missing from check results 2022-04-08 23:19:35 -04:00
sadnub
83e63bc87c migrate check/task result data to new table and create post update tasks to remove checks/tasks managed by policy 2022-04-08 23:19:35 -04:00
sadnub
4f5da33fd6 add some more typing info 2022-04-08 23:19:35 -04:00
sadnub
d00d003a67 fix typo 2022-04-08 23:18:02 -04:00
sadnub
002f24be10 fix fake agents script 2022-04-08 23:18:02 -04:00
sadnub
04992a1d95 add a helper to get global settings and work to remove all of the 'ignore: type' 2022-04-08 23:18:02 -04:00
sadnub
3c7cf2446e fix auto resolve alerts task to get policy checks as well 2022-04-08 23:14:05 -04:00
sadnub
29774ac014 fixed the rest of the tests and more bug fixes 2022-04-08 23:14:05 -04:00
sadnub
562d580987 fixed/implemented more tests and more bug fixes 2022-04-08 23:14:05 -04:00
sadnub
d8ad6c0cb0 code formatting 2022-04-08 23:14:05 -04:00
sadnub
7897b0ebe9 fix some tests and fixed tons of bugs 2022-04-08 23:14:04 -04:00
sadnub
e38af9fd16 rework task create/modify/delete/running and fix checks tests 2022-04-08 23:14:04 -04:00
sadnub
6ffdf5c251 Fixed Check History graph and reworked task sync_status 2022-04-08 23:14:04 -04:00
sadnub
69ef7676af finalize the schema and fix ui for checks 2022-04-08 23:14:04 -04:00
sadnub
b0ac57040c cleanup migrations and rework all checks/tasks to use the task/results table. fix alerts 2022-04-08 23:14:04 -04:00
sadnub
826ac7f185 returned tasks/checks in agent runner serializer and saving results 2022-04-08 23:14:04 -04:00
sadnub
0623f53f5d fix date format string empty 2022-04-08 23:14:04 -04:00
sadnub
b5ae875589 fix issue with multiple alert object being returned 2022-04-08 23:14:04 -04:00
sadnub
c152e18e1a policy rework init 2022-04-08 23:14:04 -04:00
sadnub
903f0e5e19 implement global datetime format. #1007 2022-04-08 23:14:04 -04:00
sadnub
6fefd5589c Allow canceling other pending actions. Fixes #958 2022-04-08 23:14:04 -04:00
wh1te909
58fe14bd31 add coverage badge 2022-04-09 02:10:51 +00:00
wh1te909
97f362ed1e fix for multiprocessing 2022-04-09 01:26:04 +00:00
wh1te909
b63e87ecb6 add parallel 2022-04-09 01:01:32 +00:00
wh1te909
ac3550dfd7 add lcov 2022-04-09 00:48:00 +00:00
wh1te909
8278a4cfd9 remove run 2022-04-09 00:45:02 +00:00
wh1te909
f161a2bbc8 more coveralls 2022-04-09 00:43:47 +00:00
wh1te909
6a94489df0 testing coveralls 2022-04-09 00:26:22 +00:00
wh1te909
c3a0b9192f update reqs 2022-04-08 19:34:38 +00:00
wh1te909
69ff70a9ce typo [skip ci] 2022-04-08 18:49:15 +00:00
wh1te909
5284eb0af8 validate mesh username 2022-04-08 18:47:57 +00:00
wh1te909
58384ae136 update supported version 2022-04-08 18:45:53 +00:00
wh1te909
054cc78e65 add meshctrl 2022-04-08 18:30:17 +00:00
wh1te909
8c283281d6 remove lower() from mesh username 2022-04-08 16:06:44 +00:00
wh1te909
241fe41756 fix env 2022-04-05 22:44:41 +00:00
wh1te909
e50e0626fa also check env 2022-04-05 21:31:13 +00:00
wh1te909
c9135f1573 add option to specify sslmode for nats-api pg connection closes #1049 2022-04-05 21:14:22 +00:00
wh1te909
ec2663a152 Release 0.12.2 2022-04-05 03:44:17 +00:00
wh1te909
7567042c8a bump versions 2022-04-05 03:41:18 +00:00
wh1te909
c99ceb155f update CI badge and supported agent versions 2022-04-04 22:25:13 +00:00
wh1te909
f44c92f0d3 switch to gh actions for tests 2022-04-04 21:54:46 +00:00
wh1te909
492701ec62 change dir 2022-04-04 07:08:57 +00:00
wh1te909
a6d0acaa4d black 2022-04-03 23:14:44 +00:00
wh1te909
f84b4e7274 remove dev deps from pipelines 2022-04-03 23:09:08 +00:00
wh1te909
b7ef5b82d8 add silk to dev 2022-04-03 22:48:27 +00:00
wh1te909
a854d2c38c add authorization to NATS 2022-04-03 22:47:43 +00:00
wh1te909
5140499bbd update uwsgi conf 2022-04-01 06:12:19 +00:00
wh1te909
7183e9ee85 attempt 2 2022-03-31 06:07:23 +00:00
wh1te909
11885e0aca attemp 1 to fix pipelines 2022-03-31 05:54:30 +00:00
wh1te909
2bda4e822c move pipelines 2022-03-31 05:34:50 +00:00
wh1te909
8867d12ec7 Release 0.12.1 2022-03-25 01:42:49 +00:00
wh1te909
154149a068 bump versions 2022-03-25 00:59:28 +00:00
wh1te909
c96985af03 add mesh troubleshooting script 2022-03-25 00:33:39 +00:00
wh1te909
e282420a6a handle locale, add --debug for linux install script 2022-03-24 23:53:58 +00:00
wh1te909
b9a207ea71 update reqs 2022-03-24 06:44:16 +00:00
wh1te909
28d52b5e7a tmpdir #1017 2022-03-24 02:10:21 +00:00
wh1te909
9761f1ae29 fix for older nix machines without updated certs 2022-03-24 00:48:03 +00:00
wh1te909
e62c8cc2e2 set platform on double click fixes #1013 2022-03-21 21:08:42 +00:00
wh1te909
b5aea92791 purge celery during update 2022-03-20 09:00:33 +00:00
wh1te909
2d7724383f Release 0.12.0 2022-03-19 20:08:16 +00:00
wh1te909
03f35c1975 update readme 2022-03-19 20:04:17 +00:00
wh1te909
bc7dad77f4 update reqs 2022-03-19 01:31:21 +00:00
wh1te909
aaa2540114 update backup/restore 2022-03-19 01:24:17 +00:00
wh1te909
f46787839a remove extra print statement 2022-03-19 00:03:03 +00:00
wh1te909
228be95af1 bump banner 2022-03-18 22:47:37 +00:00
wh1te909
a22d7e40e5 no longer need to backup mesh exes 2022-03-18 19:40:14 +00:00
wh1te909
d0f87c0980 remove docs workflow 2022-03-18 18:13:51 +00:00
wh1te909
5142783db9 update for new repo 2022-03-18 18:09:58 +00:00
wh1te909
4aea16ca8c remove check 2022-03-17 19:55:31 +00:00
wh1te909
d91d372fc5 Merge branch 'develop' of https://github.com/wh1te909/tacticalrmm into develop 2022-03-17 19:54:58 +00:00
sadnub
7405d884de black and fix settings.py 2022-03-17 15:47:28 -04:00
wh1te909
a9ae63043e uncomment 2022-03-17 19:46:52 +00:00
sadnub
6b943866ef add migration 2022-03-17 15:03:43 -04:00
sadnub
c7bb94d82a add api key auth to swagger 2022-03-17 11:30:45 -04:00
sadnub
30fb855200 black 2022-03-17 00:11:15 -04:00
sadnub
80f9e56e3f Fixes #859 2022-03-16 23:58:57 -04:00
sadnub
d301d967c7 Fixes #912 2022-03-16 23:44:50 -04:00
sadnub
7b7bdc4e9c implements #827 2022-03-16 21:55:12 -04:00
wh1te909
796ebca74c fix undefined 2022-03-17 01:19:13 +00:00
sadnub
3150bc316a change to inline if/else 2022-03-16 20:20:58 -04:00
sadnub
0a91b12e6e fix alert template name rendering and circular import issue 2022-03-16 20:17:51 -04:00
sadnub
918e2cc1a9 implement #819 and cleanup my garbage code XD 2022-03-16 19:56:49 -04:00
sadnub
fb71f83d6d Addresses #957. Will check if the script within a task action doesn't exist and will remove it when running the task on an agent 2022-03-16 14:21:36 -04:00
Dan
82470bf04f Merge pull request #1004 from silversword411/develop
Commenting scripts
2022-03-15 20:39:47 -07:00
silversword411
0ac75092e6 fix print to echo 2022-03-15 00:20:34 -04:00
silversword411
e898163aff Commenting scripts 2022-03-15 00:02:57 -04:00
wh1te909
418c7e1d9e fix notes 2022-03-15 01:09:09 +00:00
wh1te909
24cbabeaf0 fix type 2022-03-14 15:53:02 +00:00
wh1te909
91069b989d set uwsgi procs/threads dynamically in docker 2022-03-14 07:27:44 +00:00
wh1te909
1b7902894a remove un-used column 2022-03-14 06:49:49 +00:00
wh1te909
47e022897e typo 2022-03-14 06:41:04 +00:00
wh1te909
9aada993b1 nginx/celery changes and bump docker deps 2022-03-14 04:20:41 +00:00
sadnub
cf837b6d05 black 2022-03-13 16:18:52 -04:00
sadnub
09192da4fc fix downloading mesh agent on docker 2022-03-13 14:29:39 -04:00
wh1te909
3a792765cd bump 2022-03-13 04:56:23 +00:00
sadnub
a8f1b1c8bc update mesh agent port docker 2022-03-12 16:34:04 -05:00
wh1te909
8ffdc6bbf8 isort 2022-03-12 08:06:39 +00:00
wh1te909
945370bc25 black 2022-03-12 08:04:26 +00:00
wh1te909
ed4b3b0b9c fix tests 2022-03-12 07:52:00 +00:00
wh1te909
83a4268441 fix more tests 2022-03-12 03:13:56 +00:00
wh1te909
2938be7a70 more recovery rework 2022-03-12 02:30:32 +00:00
wh1te909
e3b2ee44ca add recovery for linux agent 2022-03-12 01:54:28 +00:00
wh1te909
f0c4658c9f update deps 2022-03-12 01:52:52 +00:00
wh1te909
0a4b236293 linux install changes [skip ci] 2022-03-11 22:34:56 +00:00
wh1te909
bc7b53c3d4 add supported os to bulk actions 2022-03-11 21:33:43 +00:00
wh1te909
5535e26eec require shebang for linux/mac scripts, refactor middleware/posix 2022-03-11 20:57:04 +00:00
sadnub
c84c3d58db fix category removal function and fix vuex store with agent action menu 2022-03-11 15:40:00 -05:00
wh1te909
d6caac51dd change paths for linux agent [skip ci] 2022-03-11 18:49:20 +00:00
sadnub
979e7a5e08 fix script lists and filtering for platform 2022-03-10 22:55:44 -05:00
sadnub
40f16eb984 add supported_platforms and hidden field to scripts and filter in script dialogs and allow editing in script forms 2022-03-10 20:01:24 -05:00
wh1te909
c17ad1b989 start fixing tests 2022-03-10 07:24:42 +00:00
wh1te909
24bfa062da update python and node 2022-03-10 03:07:37 +00:00
wh1te909
765f675da9 remove meshagent from db during agent uninstall closes #147 2022-03-10 02:24:34 +00:00
wh1te909
c0650d2ef0 update reqs 2022-03-10 01:52:51 +00:00
wh1te909
168434739f remove badge 2022-03-10 01:52:43 +00:00
wh1te909
337eaa46e3 switch pipelines to 3.10 2022-03-10 01:50:37 +00:00
wh1te909
94d42503b7 update reqs 2022-03-10 01:50:18 +00:00
wh1te909
202edc0588 v0.12.0 2022-03-10 00:57:55 +00:00
wh1te909
c95d11da47 testing new coverage 2022-03-04 07:17:56 +00:00
wh1te909
4f8615398c update reqs 2022-02-14 07:25:23 +00:00
wh1te909
f3b5f0128f update reqs 2022-02-13 00:30:35 +00:00
wh1te909
ab5e50c29c fix vscode deprecations / remove mypy 2022-02-11 19:57:49 +00:00
wh1te909
f9236bf92f don't show version banner if error 2022-02-08 22:17:08 +00:00
wh1te909
2522968b04 update bin 2022-02-08 21:00:09 +00:00
wh1te909
9c1900963d update reqs 2022-02-08 17:27:16 +00:00
wh1te909
82ff41e0bb always retry websocket reconnect even on close code 1000 because daphne sucks 2022-02-03 06:35:06 +00:00
wh1te909
fb86c14d77 update reqs 2022-02-03 03:16:17 +00:00
sadnub
c6c0159ee4 remove mkdocs container from docker dev 2022-02-01 22:20:11 -05:00
sadnub
fe5bba18a2 fix dev containers for non-root containers 2022-02-01 22:15:38 -05:00
sadnub
f61329b5de update mesh ports for persistent mesh configuration 2022-02-01 22:15:08 -05:00
sadnub
fbc04afa5b fix typo 2022-02-01 22:14:34 -05:00
sadnub
2f5bcf2263 change community script link to new repo 2022-02-01 22:14:19 -05:00
sadnub
92882c337c Merge pull request #939 from lcsnetworks/non_root_containers
Non root containers
2022-02-01 22:11:02 -05:00
wh1te909
bd41f69a1c fix async call and update nats-py 2022-01-30 22:14:47 +00:00
wh1te909
f801709587 update reqs 2022-01-28 07:30:17 +00:00
wh1te909
1cb37d29df change scripts dir 2022-01-24 05:07:08 +00:00
wh1te909
2d7db408fd update reqs 2022-01-24 04:51:44 +00:00
sadnub
ef1afc99c6 remove community script tests 2022-01-22 00:12:10 -05:00
sadnub
5682c9a5b2 fix install.sh 2022-01-22 00:07:15 -05:00
sadnub
c525b18a02 remove unused import 2022-01-22 00:00:19 -05:00
sadnub
72159cb94d fix docker entrypoint when copying community scripts 2022-01-22 00:00:19 -05:00
sadnub
39e31a1039 change git urls back 2022-01-22 00:00:19 -05:00
sadnub
734177fecc delete community scripts from repo 2022-01-22 00:00:19 -05:00
sadnub
39311099df community-script rework 2022-01-22 00:00:18 -05:00
Dan
b8653e6601 Merge pull request #951 from silversword411/develop
Adding all services to troubleshooting_server.sh and docs additions
2022-01-21 15:53:40 -08:00
sadnub
cb4b1971e6 add print output for django commands 2022-01-21 13:23:00 -05:00
silversword411
63c60ba716 docs - Adding troubleshooting notes around 2022-01-21 11:52:25 -05:00
silversword411
50435425e5 adding all services to troubleshooting script 2022-01-21 11:51:54 -05:00
Joel DeTeves
ff192f102d Ensure external Mesh link defaults to port 443 2022-01-20 14:35:59 -08:00
Joel DeTeves
99cdaa1305 Forgot to update the container ports in the docker-compose file 2022-01-20 13:54:15 -08:00
Dan
7fc897dba9 Merge pull request #948 from silversword411/develop
docs - api example fix thx bc24fl
2022-01-19 22:54:00 -08:00
silversword411
3bedd65ad8 docs - how it works agent debug 2022-01-19 22:44:25 -05:00
silversword411
a46175ce53 docs - api example fix thx bc24fl 2022-01-19 21:55:02 -05:00
Joel DeTeves
dba3bf8ce9 Clean up volume inits, fix missing init for certs volume 2022-01-18 15:10:46 -08:00
Dan
3f32234c93 Merge pull request #945 from silversword411/develop
docs - enable keys and FAQ tweaks
2022-01-17 22:23:18 -08:00
silversword411
2863e64e3b docs - faq 2022-01-18 01:09:46 -05:00
silversword411
68ec78e01c docs - FAQ tweaks 2022-01-18 00:58:54 -05:00
silversword411
3a7c506a8f docs - enabling keys 2022-01-18 00:04:20 -05:00
Dan
1ca63ed2d2 Merge pull request #944 from silversword411/develop
docs - api examples and more
2022-01-17 20:28:32 -08:00
silversword411
e9e98ebcfc docs - api examples and more 2022-01-17 23:23:42 -05:00
Dan
04de7998af Merge pull request #941 from silversword411/develop
docs - docker backup options
2022-01-17 19:57:54 -08:00
Dan
a5d02dc34a Merge pull request #940 from Yamacore/develop
fixed tooltip error
2022-01-17 19:57:32 -08:00
Dan
6181b0466e Merge pull request #938 from iamkhris/develop
Add files via upload
2022-01-17 19:56:57 -08:00
Joel DeTeves
810d8f637d Set redis container to run as non-root 2022-01-17 15:18:21 -08:00
Joel DeTeves
223b3e81d5 Make NGINX_HOST_PORT configurable for K8s load balancer compatibility 2022-01-17 12:28:33 -08:00
silversword411
3a8b5bbd3f docs - docker backup options 2022-01-17 12:53:19 -05:00
Yamacore
ecf3b33ca7 fixed tooltip error
instead of "Continue if task if an action fails"
changed to "Continue task if an action fails"
2022-01-17 16:12:37 +01:00
Joel DeTeves
006b20351e Use uniform UID (1000) + fix permission for tactical-frontend container 2022-01-17 01:12:18 -08:00
Joel DeTeves
4b577c9541 Set docker-compose to run as non-root on all applicable containers 2022-01-17 01:03:21 -08:00
Joel DeTeves
8db59458a8 Make init container volume mount paths more accurate to avoid potential conflicts 2022-01-17 00:56:52 -08:00
Joel DeTeves
7eed5f09aa Fix permissions for mongodb container 2022-01-17 00:30:39 -08:00
Joel DeTeves
a1bb265222 Make NATS & NGINX container run as same UID (1000), fix NATS supervisord permission 2022-01-17 00:08:31 -08:00
Joel DeTeves
0235f33f8b Fix incorrect ports for nginx & mesh inits 2022-01-16 23:34:54 -08:00
Joel DeTeves
3d6fca85db Fix permissions for NGINX container, remove duplicate initialization for TACTICAL_DIR 2022-01-16 23:00:52 -08:00
Joel DeTeves
4c06da0646 Fix permissions for meshcentral container 2022-01-16 22:46:49 -08:00
Christopher Phillips
f63603eb84 Add files via upload
Sends Windows 10 Toast alert when password expiration reaches 7, 3, 2, and 1 days.  Works with both local and domain accounts.  Best to setup as a scheduled task, but can also be run manually.  On 1 day alert, an "Urgent" BurntToastLogo is downloaded and used instead of the regular logo to indicate importance.  These files are hosted on a site you have access to.
2022-01-16 14:54:59 -07:00
Joel DeTeves
44418ef295 Switch tactical-meshcentral to run as non-root 2022-01-16 11:13:27 -08:00
wh1te909
2a67218a34 fix lockfile version 2022-01-16 08:02:58 +00:00
wh1te909
911586ed0b update reqs 2022-01-16 07:47:59 +00:00
Dan
9d6a6620e3 Merge pull request #935 from silversword411/develop
docs - adding to how it all works
2022-01-15 23:44:32 -08:00
Joel DeTeves
598d0acd8e Fix incorrect ports on tactical-nginx container 2022-01-15 21:25:33 -08:00
Joel DeTeves
f16ece6207 Switch tactical-nats to run as non-root 2022-01-15 21:21:58 -08:00
Joel DeTeves
9b55bc9892 Switch tactical-nginx to nginx-unprivileged container 2022-01-15 20:20:53 -08:00
Joel DeTeves
707e67918b Switch tactical-frontend to nginx-unprivileged container 2022-01-15 19:12:04 -08:00
Joel DeTeves
faac572c30 Change tactical container uwsgi ports 2022-01-15 17:38:55 -08:00
silversword411
571b37695b docs - adding to how it all works 2022-01-15 13:17:16 -05:00
wh1te909
227adc459f update demo 2022-01-15 02:39:30 +00:00
wh1te909
2ee36f1a9c fix old version refresh needed banner not displaying 2022-01-14 07:51:09 +00:00
wh1te909
31830dc67d Release 0.11.3 2022-01-14 05:35:42 +00:00
wh1te909
d0ce2a46ac bump version 2022-01-14 05:33:07 +00:00
wh1te909
7e5bc4e1ce add back debug tab 2022-01-14 05:29:24 +00:00
wh1te909
d2b6d0a0ff make field required 2022-01-14 05:29:01 +00:00
wh1te909
542b0658b8 fix reboot now/later fixes #933 2022-01-13 23:07:25 +00:00
Dan
e73c7e19b5 Merge pull request #934 from iamkhris/develop
Add files via upload
2022-01-13 14:43:57 -08:00
Dan
6a32ed7d7b Merge pull request #932 from bbrendon/patch-3
probably a copy paste error
2022-01-13 14:43:04 -08:00
Christopher Phillips
a63001f17c Add files via upload
Sends Windows 10 Toast alert when password expiration reaches 7, 3, 2, and 1 days.  Works with both local and domain accounts.  Best to setup as a scheduled task, but can also be run manually.  On 1 day alert, an "Urgent" BurntToastLogo is downloaded and used instead of the regular logo to indicate importance.  These files are hosted on a site you have access to.
2022-01-13 11:42:26 -07:00
bbrendon
4d1ad9c832 probably a copy paste error 2022-01-13 08:06:59 -08:00
wh1te909
455bf53ba6 Release 0.11.2 2022-01-13 02:48:32 +00:00
wh1te909
454aa6ccda bump version 2022-01-13 02:48:22 +00:00
wh1te909
85ffebb3fa fix post update tasks for policy tasks 2022-01-13 02:47:34 +00:00
wh1te909
bc99434574 Release 0.11.1 2022-01-13 02:03:22 +00:00
wh1te909
9e86020ef7 bump version 2022-01-13 02:03:13 +00:00
wh1te909
6e9bb0c4f4 Release 0.11.0 2022-01-13 01:47:27 +00:00
wh1te909
d66a41a8a3 bump versions 2022-01-12 23:24:52 +00:00
wh1te909
90914bff14 bump mesh 2022-01-12 21:42:40 +00:00
Dan
62414848f4 Merge pull request #930 from silversword411/develop
docs - av eset
2022-01-12 13:11:25 -08:00
Dan
d4ece6ecd7 Merge pull request #929 from iamkhris/develop
Add files via upload
2022-01-12 13:11:11 -08:00
silversword411
d1ec60bb63 docs - av eset 2022-01-12 15:31:34 -05:00
Christopher Phillips
4f672c736b Add files via upload
Sends Windows 10 Toast alert when password expiration reaches 7, 3, 2, and 1 days.  Works with both local and domain accounts.  Best to setup as a scheduled task, but can also be run manually.  On 1 day alert, an "Urgent" BurntToastLogo is downloaded and used instead of the regular logo to indicate importance.  These files are hosted on a site you have access to.
2022-01-12 09:51:06 -07:00
Dan
2e5c351d8b Merge pull request #927 from silversword411/develop
docs - av and faq additions
2022-01-11 14:14:55 -08:00
silversword411
3562553346 Merge branch 'develop' of https://github.com/silversword411/tacticalrmm into develop 2022-01-11 16:58:35 -05:00
silversword411
4750b292a5 docs - av, faq additions 2022-01-11 16:58:33 -05:00
Dan
3eb0561e90 Merge pull request #925 from yaroz/patch-1
Get Anydesk ID for licensed or unlicensed installs.
2022-01-11 09:39:59 -08:00
Dan
abb118c8ca Merge pull request #924 from lcsnetworks/mesh_smtp_settings
Make Mesh SMTP settings configurable
2022-01-11 09:38:53 -08:00
wh1te909
2818a229b6 fix bit to string 2022-01-11 03:16:11 +00:00
sadnub
a9b8af3677 fix last day of month 2022-01-10 20:28:26 -05:00
yaroz
0354da00da Update Win_AnyDesk_Get_Anynet_ID.ps1
Licensed AnyDesk installs do not save system.conf file to the $GoodPath\AnyDesk directory.  Searches subdirectories of $GoodPath for system.conf and uses the result to get the ID from.
2022-01-10 16:07:35 -05:00
Joel DeTeves
b179587475 Make Mesh SMTP settings configurable 2022-01-10 12:53:33 -08:00
wh1te909
3021f90bc5 agent ver check 2022-01-10 20:00:11 +00:00
wh1te909
a14b0278c8 remove dead code for unsupported agents 2022-01-10 19:32:48 +00:00
wh1te909
80070b333e fix exception in automation svc manual mode 2022-01-10 18:08:35 +00:00
Dan
3aa8dcac11 Merge pull request #923 from silversword411/develop
docs - docker 2fa reset
2022-01-10 09:34:51 -08:00
silversword411
e920f05611 docs - docker 2fa reset 2022-01-10 09:24:15 -05:00
wh1te909
3594afd3aa update reqs 2022-01-10 08:49:22 +00:00
wh1te909
9daaee8212 demo stuff 2022-01-10 08:37:30 +00:00
wh1te909
d022707349 remove unused imports 2022-01-10 08:09:36 +00:00
wh1te909
3948605ae6 remove deprecated endpoint 2022-01-10 08:00:20 +00:00
wh1te909
f2ded5fdd6 fix tz aware 2022-01-10 07:26:27 +00:00
wh1te909
00b47be181 add loading 2022-01-10 06:07:56 +00:00
sadnub
a2fac5d946 changes nats file watcher to use the NATS_CONFIG variable 2022-01-09 21:36:21 -05:00
sadnub
a00b5bb36b convert old scheduled type tasks to daily 2022-01-09 21:25:26 -05:00
wh1te909
d4fbc34085 fix collector task button when editing task 2022-01-10 01:49:59 +00:00
wh1te909
e9e3031992 add migration 2022-01-10 01:13:41 +00:00
sadnub
c2c7553f56 convert task to new format 2022-01-09 16:04:00 -05:00
sadnub
4e60cb89c9 fix remove if not scheduled 2022-01-09 12:17:07 -05:00
sadnub
ec4523240f remove inotify installation from nats docker image 2022-01-09 11:49:07 -05:00
sadnub
1655ddbcaa fix/add tests and add additional serverside validation for tasks 2022-01-09 11:41:52 -05:00
sadnub
997c677f30 wip 2022-01-08 21:46:32 -05:00
wh1te909
d5fc8a2d7e reduce the watcher interval to 1 second to prevent nats auth errors on first check-in 2022-01-09 01:11:57 +00:00
wh1te909
3bcd0302a8 fix django admin error 2022-01-09 01:02:31 +00:00
wh1te909
de91b7e8af update reqs 2022-01-08 10:34:02 +00:00
Dan
7efd1d7c9e Merge pull request #921 from silversword411/develop
docs - sophos and index update
2022-01-08 02:02:10 -08:00
Dan
b5151a2178 Merge pull request #920 from lcsnetworks/nats_config_watcher
Replace inotify with custom config watcher script
2022-01-08 02:01:21 -08:00
silversword411
c8432020c6 docs - Add Script Variables to Index 2022-01-07 12:35:05 -05:00
wh1te909
2c9d413a1a update nats docker 2022-01-07 08:04:37 +00:00
Joel DeTeves
cdf842e7ad Make NATS config watcher interval adjustable 2022-01-06 23:15:11 -08:00
Joel DeTeves
c917007949 Replace inotify with custom config watcher script 2022-01-06 22:49:01 -08:00
wh1te909
64278c6b3c update reqs 2022-01-07 06:46:55 +00:00
wh1te909
10a01ed14a add migration 2022-01-07 06:44:24 +00:00
sadnub
ba3bd1407b fix cert path in dev and prod 2022-01-06 09:56:33 -05:00
sadnub
73666c9a04 remove check for cert path exists for docker 2022-01-06 09:35:54 -05:00
sadnub
eae24083c9 fix docker dev with latest prod docker image options 2022-01-05 21:17:59 -05:00
silversword411
a644510c27 docs - Sophos rules 2022-01-05 11:12:14 -05:00
Dan
57859d0da2 Merge pull request #910 from silversword411/develop
docs - script temp, support, updating script run info, mesh agent blank data
2022-01-03 13:28:31 -08:00
silversword411
057f0ff648 docs - support template typo 2022-01-03 12:00:10 -05:00
sadnub
05d1c867f2 Merge pull request #911 from lcsnetworks/mesh-ws-mask-override
Add option to enable webSocketMaskOverride in MeshCentral config
2022-01-01 11:49:31 -05:00
Joel DeTeves
a2238fa435 Add option to enable webSocketMaskOverride in MeshCentral config 2021-12-31 19:31:18 -08:00
silversword411
12b7426a7c docs - mesh agent blank data 2021-12-31 17:38:47 -05:00
silversword411
5148d613a7 docs - updating script run info 2021-12-31 17:23:09 -05:00
sadnub
f455c15882 fix task edits not involving task_type 2021-12-31 15:11:33 -05:00
silversword411
618fdabd0e docs - support template update 2021-12-31 14:15:24 -05:00
silversword411
3b69e2896c docs - script temp location 2021-12-31 13:38:13 -05:00
sadnub
7306b63ab1 set some default values for task settings 2021-12-31 13:33:59 -05:00
sadnub
7e3133caa2 automated task rework 2021-12-30 23:56:37 -05:00
sadnub
560901d714 Merge pull request #909 from lcsnetworks/fix_get_mesh_exe_url
Update get_mesh_exe_url command to use MESH_WS_URL for docker
2021-12-30 20:19:20 -05:00
Joel DeTeves
166ce9ae78 Update get_mesh_exe_url command to use MESH_WS_URL for docker 2021-12-30 15:53:31 -08:00
Dan
d3395a685e Merge pull request #908 from silversword411/develop
Docs - update faq and mesh agent recovery
2021-12-30 13:55:31 -08:00
silversword411
6d5e9a8566 docs - agent repair 2021-12-30 16:43:05 -05:00
silversword411
69ec03feb4 docs - faq updates 2021-12-30 16:42:50 -05:00
sadnub
f92982cd5a fix quotes around cert path in tactical docker image 2021-12-30 08:50:10 -05:00
sadnub
5570f2b464 Merge pull request #907 from lcsnetworks/set_ssl_path
Make cert pub & private paths configurable
2021-12-29 21:56:25 -05:00
Joel DeTeves
ad19dc0240 Make cert pub & private paths configurable 2021-12-29 18:01:37 -08:00
sadnub
9b1d4faff8 fix typo in nginx config 2021-12-29 09:26:15 -05:00
Dan
76756d20e9 Merge pull request #904 from silversword411/develop
docs - nats-api update
2021-12-28 15:50:30 -08:00
sadnub
e564500480 update mesh initial setup command to use MESH_WS_URL for docker 2021-12-28 00:35:02 -05:00
sadnub
19c15ce58d Add configurable mesh websocket url 2021-12-28 00:31:35 -05:00
sadnub
a027785098 Merge pull request #905 from lcsnetworks/nginx_config_vars
Replace hardcoded services & DNS resolvers in NGINX config with variables
2021-12-28 00:27:02 -05:00
Joel DeTeves
36a9f10aae Replace hardcoded services in NGINX config with variables 2021-12-27 15:46:11 -08:00
silversword411
99a11a4b53 docs - nats-api update 2021-12-27 17:01:05 -05:00
wh1te909
55cac4465c speed up client/site tree loading and add popout button to summary tab 2021-12-26 09:36:56 +00:00
wh1te909
ff395fd074 update api docs 2021-12-24 21:15:43 +00:00
wh1te909
972b6e09c7 update docs fixes #896 2021-12-24 21:00:58 +00:00
Dan
e793a33b15 Merge pull request #901 from silversword411/develop
docs - enable swagger
2021-12-24 12:04:04 -08:00
silversword411
e70d4ff3f3 docs - api swagger tweaks 2021-12-24 14:50:02 -05:00
silversword411
cd0635d3a0 docs - enable swagger 2021-12-24 11:37:36 -05:00
wh1te909
81702d8595 update middleware method 2021-12-23 23:07:19 +00:00
sadnub
aaa4a65b04 fix tests 2021-12-23 14:47:44 -05:00
sadnub
430797e626 add take control button to summary 2021-12-23 14:44:04 -05:00
sadnub
d454001f49 fixed #874 2021-12-23 14:00:05 -05:00
sadnub
bd90ee1f58 added agent status page and added #332 2021-12-23 13:57:37 -05:00
wh1te909
196aaa5427 more demo stuff 2021-12-23 07:15:34 +00:00
wh1te909
6e42233b33 fix export button not working fixes #895 2021-12-22 22:10:52 +00:00
Dan
8e44df8525 Merge pull request #894 from MalteKiefer/feature/script-update-lenovo-drivers
added script for lenovo driver updates
2021-12-22 12:03:00 -08:00
wh1te909
a8a1536941 black 2021-12-22 19:58:09 +00:00
silversword411
99d1728c70 scripts_wip - working on newer version w/features 2021-12-22 12:57:55 -05:00
Malte Kiefer
6bbb92cdb9 move script to correct location 2021-12-22 16:30:35 +01:00
Malte Kiefer
b80e7c06bf added script for lenovo driver updates 2021-12-22 15:47:52 +01:00
wh1te909
bf467b874c make site dropdown required 2021-12-22 06:19:28 +00:00
wh1te909
43c9f6be56 add script to generate fake agents for demo 2021-12-22 06:15:40 +00:00
Dan
6811a4f4ae Merge pull request #890 from silversword411/develop
docs - link add
2021-12-21 22:13:18 -08:00
silversword411
1f16dd9c43 docs - av 2021-12-22 00:27:29 -05:00
silversword411
63a43ce104 docs - link add 2021-12-21 22:23:26 -05:00
wh1te909
bd7ce5417e add demo middleware 2021-12-22 02:34:36 +00:00
Dan
941ee54a97 Merge pull request #889 from silversword411/develop
docs - av exclusions
2021-12-21 11:04:42 -08:00
wh1te909
a5d4a64f47 nginx conf updates for older installs fixes #888 2021-12-21 19:04:00 +00:00
silversword411
d96fcd4a98 docs - av exclusions 2021-12-21 12:26:24 -05:00
Dan
de42e2f747 Merge pull request #887 from silversword411/develop
docs - sc and install server, and script_wip
2021-12-20 16:59:59 -08:00
silversword411
822a93aeb6 docs - sc and install server, and script_wip 2021-12-20 16:06:54 -05:00
Dan
c31b4aaeff Merge pull request #886 from silversword411/develop
script_wip - admin LAPS script
2021-12-20 10:51:01 -08:00
Dan
8c9a386054 Merge pull request #884 from MalteKiefer/feature/script-get-securepoint-deviceid
added script to get securepoint deviceid
2021-12-20 10:50:46 -08:00
silversword411
8c90933615 docs - fix TOC and typo 2021-12-20 12:35:43 -05:00
silversword411
6f8c242333 script_wip - admin LAPS script 2021-12-20 12:20:43 -05:00
Malte Kiefer
fe8b66873a added script to get securepoint deviceid 2021-12-20 10:32:08 +01:00
wh1te909
00c5f1365a bump versions 2021-12-20 06:48:02 +00:00
wh1te909
f7d317328a update reqs 2021-12-20 06:48:02 +00:00
wh1te909
3ccd705225 bump backup script version 2021-12-20 06:48:02 +00:00
diskraider
9e439fffaa Change timeout method
The current timeout results in an error "ERROR: Input redirection is not supported, exiting the process immediately.".

Reusing the ping tool to act as a timeout resolves this error because the batch script is not producing a user interruptable timeout but will still produce a 4-5 second timeout.
2021-12-20 06:48:02 +00:00
wh1te909
859dc170e7 update uninstall params 2021-12-20 06:48:02 +00:00
silversword411
1932d8fad9 docs - backup and silent uninstall tweaks 2021-12-20 06:48:02 +00:00
wh1te909
0c814ae436 reduce ram reqs 2021-12-20 06:48:02 +00:00
sadnub
89313d8a37 make post_update_tasks run on init container start 2021-12-20 06:48:02 +00:00
silversword411
2b85722222 docs - mesh download multiple 2021-12-20 06:48:02 +00:00
David Randall
57e5b0188c Fixes #872: backup.sh does not have EOL
Add EOL to backup.sh so CRON doesn't fail.
2021-12-20 06:48:02 +00:00
silversword411
2d7c830e70 docs code signing emphasis 2021-12-20 06:48:02 +00:00
silversword411
ccaa1790a9 docs - sys req info 2021-12-20 06:48:02 +00:00
silversword411
f6531d905e docs cron backups 2021-12-20 06:48:02 +00:00
silversword411
64a31879d3 docs - video of updating server 2021-12-20 06:48:02 +00:00
silversword411
0c6a4b1ed2 script - tweak AUOptions revert 2021-12-20 06:48:02 +00:00
silversword411
67801f39fe docs - 2rd party Screenconnect AIO 2021-12-20 06:48:02 +00:00
silversword411
892a0d67bf docs updating install agent script 2021-12-20 06:48:02 +00:00
silversword411
9fc0b7d5cc script_wip 2021-12-20 06:48:02 +00:00
bc24fl
22a614ef54 Added Printer Restart Jobs Community Script 2021-12-20 06:48:02 +00:00
silversword411
cd257b8e4d docs faq log4j 2021-12-20 06:48:02 +00:00
silversword
fa1ee2ca14 docs - updating index 2021-12-20 06:48:02 +00:00
wh1te909
34ea1adde6 sorting fixes #857 2021-12-20 06:48:02 +00:00
wh1te909
41cf8abb1f update reqs 2021-12-20 06:48:02 +00:00
silversword411
c0ffec1a4c docs - howitallworks nats server service 2021-12-20 06:48:02 +00:00
bc24fl
65779b8eaf Added Sophos Endpoint Install Community Script 2021-12-20 06:48:02 +00:00
bc24fl
c47bdb2d56 Added Sophos Endpoint Install Community Script 2021-12-20 06:48:02 +00:00
Michael Maertzdorf
d47ae642e7 Create SECURITY.md 2021-12-20 06:48:02 +00:00
Michael Maertzdorf
39c4609cc6 Create devskim-analysis.yml 2021-12-20 06:48:02 +00:00
dependabot[bot]
3ebba02a10 Bump django from 3.2.9 to 3.2.10 in /api/tacticalrmm
Bumps [django](https://github.com/django/django) from 3.2.9 to 3.2.10.
- [Release notes](https://github.com/django/django/releases)
- [Commits](https://github.com/django/django/compare/3.2.9...3.2.10)

---
updated-dependencies:
- dependency-name: django
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2021-12-20 06:48:02 +00:00
Michael Maertzdorf
4dc7a96e79 Create codeql-analysis.yml 2021-12-20 06:48:02 +00:00
silversword411
5a49a29110 docs - nginx proxy info 2021-12-20 06:48:02 +00:00
wh1te909
983a5c2034 bump versions 2021-12-20 05:47:00 +00:00
wh1te909
15829f04a3 update reqs 2021-12-20 05:46:12 +00:00
wh1te909
934618bc1c bump backup script version 2021-12-20 04:27:12 +00:00
Dan
2c5ec75b88 Merge pull request #881 from diskraider/patch-1
Change timeout method
2021-12-19 19:56:03 -08:00
diskraider
df11fd744f Change timeout method
The current timeout results in an error "ERROR: Input redirection is not supported, exiting the process immediately.".

Reusing the ping tool to act as a timeout resolves this error because the batch script is not producing a user interruptable timeout but will still produce a 4-5 second timeout.
2021-12-19 22:10:14 -05:00
wh1te909
4dba0fb43d update uninstall params 2021-12-20 00:48:55 +00:00
Dan
7a0d86b8dd Merge pull request #878 from silversword411/develop
docs - backup and silent uninstall tweaks
2021-12-19 16:42:53 -08:00
silversword411
a94cd98e0f docs - backup and silent uninstall tweaks 2021-12-19 13:48:25 -05:00
wh1te909
8e95e51edc reduce ram reqs 2021-12-19 00:36:32 +00:00
sadnub
6f1b00284a make post_update_tasks run on init container start 2021-12-17 18:25:53 -05:00
Dan
58549a6cac Merge pull request #875 from silversword411/develop
docs - mesh download multiple
2021-12-17 14:15:41 -08:00
silversword411
acc9a6118f docs - mesh download multiple 2021-12-17 17:04:50 -05:00
Dan
c7811e861c Merge pull request #873 from NiceGuyIT/pr-872
Fixes #872: backup.sh does not have EOL
2021-12-16 15:48:28 -08:00
Dan
55cf766ff0 Merge pull request #871 from silversword411/develop
docs - 3rd party Screenconnect AIO, update video, cron backups, sys reqs. Script update windows update settings removal
2021-12-16 15:48:13 -08:00
silversword411
a1eaf38324 docs code signing emphasis 2021-12-16 18:10:01 -05:00
silversword411
c6788092d3 docs - sys req info 2021-12-16 14:58:15 -05:00
David Randall
f89f74ef3f Fixes #872: backup.sh does not have EOL
Add EOL to backup.sh so CRON doesn't fail.
2021-12-16 14:22:43 -05:00
silversword411
3e40f02001 docs cron backups 2021-12-16 14:19:23 -05:00
silversword411
c169967c1b docs - video of updating server 2021-12-16 13:17:16 -05:00
silversword411
2830e7c569 script - tweak AUOptions revert 2021-12-16 12:57:09 -05:00
silversword411
415f08ba3a docs - 2rd party Screenconnect AIO 2021-12-16 12:27:24 -05:00
Dan
d726bcdc19 Merge pull request #866 from silversword411/develop
docs: gpo script update and wip script for api example
2021-12-14 12:12:41 -08:00
silversword411
f259c25a70 docs updating install agent script 2021-12-14 13:20:36 -05:00
silversword411
4db937cf1f Merge branch 'develop' of https://github.com/silversword411/tacticalrmm into develop 2021-12-14 10:42:22 -05:00
silversword411
dad9d0660c script_wip 2021-12-14 10:42:15 -05:00
Dan
0c450a5bb2 Merge pull request #863 from silversword411/develop
docs faq log4j
2021-12-13 23:25:23 -08:00
silversword411
ef59819c01 Merge branch 'develop' of https://github.com/silversword411/tacticalrmm into develop 2021-12-13 22:07:02 -05:00
silversword411
c651e7c84b docs faq log4j 2021-12-13 22:07:00 -05:00
Dan
20b8debb1c Merge pull request #862 from silversword411/develop
docs - updating index
2021-12-13 17:52:16 -08:00
Dan
dd5743f0a1 Merge pull request #861 from bc24fl/develop
Added Printer Restart Jobs Community Script
2021-12-13 17:51:57 -08:00
silversword
7da2b51fae docs - updating index 2021-12-13 20:12:09 -05:00
bc24fl
0236800392 Added Printer Restart Jobs Community Script 2021-12-13 19:52:10 -05:00
wh1te909
4f822878f7 sorting fixes #857 2021-12-13 22:50:16 +00:00
wh1te909
c2810e5fe5 update reqs 2021-12-13 22:49:26 +00:00
Dan
b89ba4b801 Merge pull request #860 from silversword411/develop
docs - howitallworks nats server service
2021-12-13 00:09:48 -08:00
silversword411
07c680b839 docs - howitallworks nats server service 2021-12-13 03:02:14 -05:00
Dan
fd50db4eab Merge pull request #856 from bc24fl/develop
Added Sophos Endpoint Install Community Script
2021-12-12 23:08:50 -08:00
Dan
0ee95b36a6 Merge pull request #853 from Data4ITBV/develop
Vulnerability fix
2021-12-12 23:08:25 -08:00
Dan
b8cf07149e Merge pull request #852 from silversword411/develop
docs - nginx proxy info
2021-12-12 23:01:05 -08:00
silversword411
1b699f1a87 Merge branch 'wh1te909:develop' into develop 2021-12-12 13:46:10 -05:00
Michael Maertzdorf
d3bfd238d3 Create SECURITY.md 2021-12-12 01:54:18 +01:00
Michael Maertzdorf
1f43abb3c8 Create devskim-analysis.yml 2021-12-12 01:44:04 +01:00
bc24fl
287c753e4a Added Sophos Endpoint Install Community Script 2021-12-11 13:34:53 -05:00
bc24fl
8a5374d31a Added Sophos Endpoint Install Community Script 2021-12-11 13:31:39 -05:00
Michael Maertzdorf
e219eaa934 Merge pull request #1 from Data4ITBV/dependabot/pip/api/tacticalrmm/django-3.2.10
Bump django from 3.2.9 to 3.2.10 in /api/tacticalrmm
2021-12-11 11:49:27 +01:00
Michael Maertzdorf
fd314480ca Create codeql-analysis.yml 2021-12-11 11:48:49 +01:00
dependabot[bot]
dd45396cf3 Bump django from 3.2.9 to 3.2.10 in /api/tacticalrmm
Bumps [django](https://github.com/django/django) from 3.2.9 to 3.2.10.
- [Release notes](https://github.com/django/django/releases)
- [Commits](https://github.com/django/django/compare/3.2.9...3.2.10)

---
updated-dependencies:
- dependency-name: django
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2021-12-11 10:48:42 +00:00
sadnub
1e2a56c5e9 Release 0.10.4 2021-12-10 21:59:35 -05:00
sadnub
8011773af4 bump versions 2021-12-10 19:12:45 -05:00
sadnub
ddc69c692e formatting 2021-12-10 19:09:52 -05:00
sadnub
df925c9744 fix script tests 2021-12-10 19:04:20 -05:00
sadnub
1726341aad remove script hashing since it was erroring out on some characters 2021-12-10 18:51:32 -05:00
sadnub
63b1ccc7a7 fix deleted community scripts not being removed from database 2021-12-10 18:50:51 -05:00
silversword411
ee5db31518 docs - nginx proxy info 2021-12-10 16:21:53 -05:00
Dan
e80397c857 Merge pull request #847 from silversword411/develop
Community scripts - adding software install report, parameters to task scheduler, bluescreen report
2021-12-09 09:25:03 -08:00
silversword411
81aa7ca1a4 community scripts - user enable/disable 2021-12-09 01:44:40 -05:00
silversword411
f0f7695890 community script - Windows Update revert to MS Auto managed 2021-12-09 01:16:49 -05:00
silversword411
e7e8ce2f7a community script - adding chocolately list installed 2021-12-09 01:09:47 -05:00
silversword411
ba37a3f18d script library - task scheduler adding parameters 2021-12-09 01:00:41 -05:00
silversword411
60b11a7a5d community scripts - new user monitor add parameters 2021-12-09 00:56:13 -05:00
silversword411
29461c20a7 script library - Bluescreen Report 2021-12-09 00:50:43 -05:00
silversword411
2ff1f34543 Community scripts - adding software install report 2021-12-09 00:40:37 -05:00
wh1te909
b75d7f970f use getattr with a default for optional settings 2021-12-09 00:08:49 +00:00
wh1te909
204681f097 fix openfile limit with 1k+ agents 2021-12-09 00:06:33 +00:00
wh1te909
e239fe95a4 remove old checks from update script 2021-12-08 18:37:44 +00:00
Dan
0a101f061a Merge pull request #844 from silversword411/develop
adding docs tips n trick and fixing PR #833
2021-12-07 20:03:07 -08:00
silversword411
f112a17afa Fixing community scripts and docs from PR #833 2021-12-07 22:29:37 -05:00
silversword411
54658a66d2 docs - Adding tips n tricks 2021-12-07 22:12:44 -05:00
sadnub
6b8f5a76e4 Merge pull request #833 from r3die/develop
Splashtop 3rd party integration docs and script
2021-12-07 19:47:52 -05:00
Dan
623a5d338d Merge pull request #842 from silversword411/develop
Adding Repo to Help menu
2021-12-06 20:30:38 -08:00
silversword411
9c5565cfd5 Merge branch 'develop' of https://github.com/silversword411/tacticalrmm into develop 2021-12-06 22:50:12 -05:00
silversword411
722f2efaee Adding Github repo to Help menu 2021-12-06 22:49:45 -05:00
Dan
4928264204 Merge pull request #841 from silversword411/develop
docs update: mgmt commands
2021-12-04 01:14:20 -08:00
silversword411
12d62ddc2a docs - adding mgmt commands for docker 2021-12-03 11:13:44 -05:00
wh1te909
da54e97217 Release 0.10.3 2021-12-02 08:19:38 +00:00
wh1te909
9c0993dac8 bump version 2021-12-02 07:50:52 +00:00
wh1te909
175486b7c4 fix bug where reboot_required field was not being updated when agent didn't have a patch policy and setting was set to 'inherit' 2021-12-02 01:39:41 +00:00
wh1te909
4760a287f6 update docs 2021-12-01 19:53:37 +00:00
wh1te909
0237b48c87 update reqs 2021-12-01 19:37:53 +00:00
Dan
95c9f22e6c Merge pull request #837 from silversword411/develop
docs tweak
2021-12-01 09:31:22 -08:00
silversword411
9b001219d5 docs tweak 2021-12-01 12:27:52 -05:00
Dan
6ff15efc7b Merge pull request #835 from silversword411/develop
docs outbound firewall rules
2021-11-30 16:07:56 -08:00
silversword411
6fe1dccc7e docs outbound firewall rules 2021-11-30 18:15:49 -05:00
sadnub
1c80f6f3fa Don't allow script arg variable assignment to callable attributes. Fixes #726 2021-11-29 22:18:05 -05:00
sadnub
54d3177fdd also don't include callable attributes with variable substitutions on alert scripts 2021-11-29 22:15:07 -05:00
r3die
a24ad245d2 splashtop 4rd party integration docs and script
splashtop 4rd party integration docs and script
2021-11-29 15:20:29 -08:00
wh1te909
f38cfdcadf fix test script 2021-11-29 18:51:18 +00:00
Dan
92e4ad8ccd Merge pull request #830 from silversword411/develop
docs updates
2021-11-29 09:20:14 -08:00
silversword411
3f3ab088d2 docs - adding bulk delete 2021-11-29 09:47:38 -05:00
sadnub
2c2cbaa175 formatting 2021-11-28 21:08:41 -05:00
sadnub
911b6bf863 fix sorting process cpu percentage. Fixes #831 2021-11-28 21:07:09 -05:00
sadnub
31462cab64 fix tests and also check for the correct script hash 2021-11-28 20:59:21 -05:00
silversword411
1ee35da62d docs updates 2021-11-28 15:31:37 -05:00
sadnub
edf4815595 make script file encoding consistent. utf-8 2021-11-28 13:23:47 -05:00
sadnub
06ccee5d18 add script hash field and calculate hash on script changes. Also removed storing scripts in DB as base64 strings. Should fix #634 2021-11-28 13:23:10 -05:00
sadnub
d5ad85725f fix duplicate package in dev requirements 2021-11-27 23:26:44 -05:00
sadnub
4d5bddb413 rework script form and add syntax field 2021-11-27 22:59:18 -05:00
Dan
2f4da7c381 Merge pull request #829 from ssteeltm/develop
Update unsupported_scripts.md
2021-11-26 16:11:50 -08:00
Dan
8b845fce03 Merge pull request #826 from NiceGuyIT/docs-howitallworks-services
Document server services and configuration
2021-11-26 16:11:24 -08:00
Dan
9fd15c38a9 Merge pull request #825 from silversword411/develop
scripts and docs
2021-11-26 16:11:00 -08:00
silversword411
ec1573d01f Merge branch 'develop' of https://github.com/silversword411/tacticalrmm into develop 2021-11-26 18:05:02 -05:00
silversword411
92ec1cc9e7 docs - add howitallworks to index 2021-11-26 18:05:00 -05:00
Hugo Sampaio
8b2f9665ce Update unsupported_scripts.md
Added info about how I run rmm behind Apache Proxy
( discord Hugo )
2021-11-26 17:25:42 -03:00
silversword411
cb388a5a78 scripts - adding demo server scripts 2021-11-26 14:35:33 -05:00
David Randall
7f4389ae08 Docs: Server services
Document the server services and configuration.
2021-11-25 16:59:19 -05:00
silversword411
76d71beaa2 script_wip addition 2021-11-25 15:06:15 -05:00
silversword411
31bb9c2197 docs - tips and tricks add mesh connection logs 2021-11-25 08:59:21 -05:00
wh1te909
6a2cd5c45a reduce celery memory usage and optimize a query 2021-11-25 06:20:06 +00:00
Dan
520632514b Merge pull request #823 from silversword411/develop
docs - video embed #1 and getting started started
2021-11-24 15:25:46 -08:00
silversword411
f998b28d0b docs - numbering fix 2021-11-24 17:34:43 -05:00
silversword411
1a6587e9e6 Merge branch 'develop' of https://github.com/silversword411/tacticalrmm into develop 2021-11-24 17:13:09 -05:00
silversword411
9b4b729d19 undo vscode spellcheck 2021-11-24 17:12:58 -05:00
silversword411
e80345295e script library - adding security audit 2021-11-24 14:06:44 -05:00
silversword411
026c259a2e added vscode spellcheck, shouldn't go public 2021-11-24 11:32:04 -05:00
silversword411
63474c2269 community scripts - adding syntax to defender enable 2021-11-24 11:30:03 -05:00
silversword411
faa1a9312f scripts - adding parameter check 2021-11-24 11:25:15 -05:00
silversword411
23fa0726d5 docs - v1 of getting started guide 2021-11-24 10:10:46 -05:00
silversword411
22210eaf7d Merge branch 'wh1te909:develop' into develop 2021-11-23 23:40:47 -05:00
silversword411
dcd8bee676 docs - video embed 2021-11-23 23:40:29 -05:00
silversword411
06f0fa8f0e Revert "docs - video embed testing"
This reverts commit 6d0f9e2cd5.
2021-11-23 23:37:54 -05:00
silversword411
6d0f9e2cd5 docs - video embed testing 2021-11-23 23:33:25 -05:00
sadnub
732afdb65d move custom fields to tab in edit agent modal 2021-11-23 21:35:11 -05:00
sadnub
1a9e8742f7 remove the need to type agent name to delete agents in dashboard 2021-11-23 21:35:11 -05:00
sadnub
b8eda37339 Fix setting alert template when policy assignment changes 2021-11-23 21:35:11 -05:00
sadnub
5107db6169 add drf_spectacular to dev requirements 2021-11-23 21:35:11 -05:00
wh1te909
2c8f207454 add mgmt command to bulk delete agents 2021-11-22 20:26:10 +00:00
wh1te909
489bc9c3b3 optimize some queries 2021-11-22 17:24:48 +00:00
wh1te909
514713e883 don't log swagger 2021-11-22 17:23:38 +00:00
wh1te909
17cc0cd09c forgot to check core settings fixes #816 2021-11-22 17:18:58 +00:00
Dan
4475df1295 Merge pull request #815 from tremor021/develop
Update Defender script
2021-11-22 09:06:04 -08:00
Dan
fdad267cfd Merge pull request #814 from silversword411/develop
docs updates
2021-11-22 09:05:19 -08:00
silversword411
3684fc80f0 docs - spellchecking 2021-11-22 08:07:49 -05:00
silversword411
e97a5fef94 script library - adding syntax to tooltip helper 2021-11-22 00:14:19 -05:00
silversword411
de2972631f docs - tips about running scripts syntax 2021-11-21 23:02:47 -05:00
tremor021
e5b8fd67c8 Update Defender script 2021-11-22 02:14:11 +01:00
silversword411
5fade89e2d docs - fixing install and restore docs to eliminate confusion 2021-11-21 15:18:18 -05:00
wh1te909
2eefedadb3 Release 0.10.2 2021-11-21 02:24:29 +00:00
wh1te909
e63d7a0b8a bump version 2021-11-21 02:24:07 +00:00
wh1te909
2a1b1849fa fix nats-api not working in docker 2021-11-21 02:02:29 +00:00
wh1te909
0461cb7f19 update docs 2021-11-20 22:21:02 +00:00
Dan
0932e0be03 Merge pull request #811 from silversword411/develop
docs updates
2021-11-20 14:17:11 -08:00
silversword411
4638ac9474 docs - reiterating no root and backup 2021-11-20 12:59:42 -05:00
silversword411
d8d7255029 docs - filter tips 2021-11-20 12:50:10 -05:00
wh1te909
fa05276c3f black 2021-11-19 20:00:22 +00:00
silversword411
e50a5d51d8 docs - troubleshooting enhancements 2021-11-19 14:14:12 -05:00
sadnub
c03ba78587 make swagger views optional 2021-11-19 13:58:38 -05:00
wh1te909
ff07c69e7d Release 0.10.1 2021-11-19 17:41:12 +00:00
wh1te909
735b84b26d bump version 2021-11-19 17:39:14 +00:00
sadnub
8dd069ad67 push models.py file update for scripts 2021-11-19 12:13:20 -05:00
sadnub
1857e68003 change filename db field to not be required 2021-11-19 10:46:40 -05:00
wh1te909
ff2508382a Release 0.10.0 2021-11-19 08:37:39 +00:00
wh1te909
9cb952b116 bump version 2021-11-19 08:04:25 +00:00
wh1te909
105e8089bb trigger an agent update task after rmm update 2021-11-19 07:25:32 +00:00
wh1te909
730f37f247 add debian 11 support and update reqs 2021-11-19 06:58:18 +00:00
wh1te909
284716751f update docs for new service 2021-11-19 06:32:15 +00:00
sadnub
8d0db699bf remove dynamic agent options function 2021-11-18 21:11:53 -05:00
Dan
53cf1cae58 Merge pull request #807 from silversword411/develop
docs and script adds
2021-11-18 12:32:22 -08:00
silversword411
307e4719e0 wip script - user enable/disabling 2021-11-18 12:23:52 -05:00
silversword411
5effae787a Community scripts - Fixing Drive Volume check 2021-11-18 10:45:25 -05:00
silversword411
6532be0b52 docs - reverting content tabs 2021-11-18 10:05:01 -05:00
silversword411
fb225a5347 community scripts add - Win11 check 2021-11-18 05:24:22 -05:00
silversword411
b83830a45e docs moving position 2021-11-18 05:20:12 -05:00
wh1te909
ca28288c33 add missing onMounted 2021-11-18 08:42:26 +00:00
wh1te909
b6f8d9cb25 change drive color based on percent closes #802 2021-11-18 07:46:17 +00:00
Dan
9cad0f11e5 Merge pull request #803 from silversword411/develop
Scripts and docs
2021-11-17 11:24:29 -08:00
silversword411
807be08566 docs - adding how to invalidate all auth tokens 2021-11-17 10:43:52 -05:00
sadnub
67f6a985f8 increase font size on script editors and fix import error 2021-11-16 21:16:03 -05:00
sadnub
f87d54ae8d move imports for styles and select light or dark theme for editor depending on if dark mode is enabled 2021-11-16 20:45:42 -05:00
sadnub
d894bf7271 move to ace text editor. Fixes script line wrap issue and more features. Fixes #712 2021-11-16 20:19:46 -05:00
sadnub
56e0e5cace formatting 2021-11-15 21:17:28 -05:00
sadnub
685084e784 add agent counts to client/site tooltip. Closes #426 2021-11-15 21:16:18 -05:00
sadnub
cbeec5a973 swagger api documentation start 2021-11-15 17:50:59 -05:00
sadnub
3fff56bcd7 cleanup script manager and snippet modals and move agent select dropdown for test script to script form 2021-11-15 17:50:26 -05:00
silversword411
c504c23eec docs add mesh token recovery 2021-11-15 16:47:18 -05:00
silversword411
16dae5a655 docs Updating index and adding permissions and considerations for choosing install type 2021-11-15 15:42:02 -05:00
silversword411
e512c5ae7d Merge branch 'wh1te909:develop' into develop 2021-11-15 15:39:56 -05:00
silversword411
094078b928 scripts wip adding disk status 2021-11-15 15:26:07 -05:00
wh1te909
34fc3ff919 fix issue where emails/sms were not being sent if recipients in global settings were empty, even if they were present in an alert template recipients 2021-11-15 00:05:42 +00:00
wh1te909
4391f48e78 add some tests 2021-11-14 19:52:21 +00:00
wh1te909
775608a3c0 update reqs 2021-11-14 19:51:28 +00:00
Dan
b326228901 Merge pull request #800 from silversword411/develop
script library - fixing choco
2021-11-14 11:27:40 -08:00
silversword411
b2e98173a8 script library - fixing choco 2021-11-14 13:04:37 -05:00
wh1te909
65c9b7952c have task runs appear in history tab closes #716 2021-11-14 09:18:32 +00:00
wh1te909
b9dc9e7d62 speed up some views 2021-11-14 09:15:43 +00:00
Dan
ce178d0354 Merge pull request #799 from silversword411/develop
Community scripts: Adding syntax for tooltip
2021-11-14 00:54:15 -08:00
sadnub
a3ff6efebc remove nats-api from api dev image 2021-11-13 16:56:50 -05:00
wh1te909
6a9bc56723 update for new service 2021-11-13 21:30:01 +00:00
wh1te909
c9ac158d25 Merge branch 'develop' of https://github.com/wh1te909/tacticalrmm into develop 2021-11-13 20:18:18 +00:00
silversword411
4b937a0fe8 Community scripts: Adding syntax for tooltip 2021-11-13 14:10:05 -05:00
sadnub
405bf26ac5 formatting 2021-11-13 13:40:26 -05:00
sadnub
5dcda0e0a0 allow q-select slots in tactical-dropdown. Fix info icon on run script dialog 2021-11-13 13:39:38 -05:00
sadnub
83e9b60308 when filtering agents add category to the side of options 2021-11-13 12:55:09 -05:00
sadnub
10b40b4730 script syntax highlighting. Resolves #702 2021-11-13 12:55:09 -05:00
wh1te909
79d6d804ef stringify errors before saving to db 2021-11-13 08:31:45 +00:00
wh1te909
e9c7b6d8f8 fix tests 2021-11-13 01:25:25 +00:00
wh1te909
4fcfbfb3f4 more go rework 2021-11-13 00:45:28 +00:00
wh1te909
30cde14ed3 update go mod 2021-11-13 00:36:57 +00:00
wh1te909
cf76e6f538 remove deprecated endpoint, add another deprecation 2021-11-13 00:33:52 +00:00
wh1te909
d0f600ec8d filter_software now handled by agent 2021-11-13 00:32:44 +00:00
wh1te909
675f9e956f remove some celery tasks now handled by agent/go 2021-11-13 00:32:03 +00:00
wh1te909
381605a6bb remove tests 2021-11-13 00:31:06 +00:00
wh1te909
0fce66062b remove some utils now handled by agent 2021-11-13 00:30:45 +00:00
wh1te909
747cc9e5da remove tasks 2021-11-13 00:27:34 +00:00
sadnub
25a1b464da Fix block inheritance on client/site 2021-11-10 22:45:25 -05:00
Dan
3b6738b547 Merge pull request #798 from silversword411/develop
Wip script additions
2021-11-10 11:12:28 -08:00
silversword411
fc93e3e97f Merge branch 'wh1te909:develop' into develop 2021-11-10 11:01:34 -05:00
silversword411
0edbb13d48 scripts wip revert windows update to default settings 2021-11-10 11:00:44 -05:00
silversword411
673687341c scripts wip adding 2021-11-10 09:03:17 -05:00
wh1te909
3969208942 Release 0.9.2 2021-11-09 06:11:51 +00:00
wh1te909
3fa89b58df bump version 2021-11-09 06:11:33 +00:00
wh1te909
a43a9c8543 remove old wording 2021-11-09 05:54:17 +00:00
sadnub
45deda4dea fix saving agents 2021-11-08 22:25:07 -05:00
sadnub
6ec46f02a9 fix deleting automation task #791 2021-11-08 21:17:41 -05:00
sadnub
d643c17ff1 fix remote background height and adjust other items to fit to screen 2021-11-08 21:13:01 -05:00
wh1te909
e5de89c6b4 more loading fixes 2021-11-09 01:23:01 +00:00
wh1te909
c21e7c632d fix debug log loading 2021-11-09 01:06:11 +00:00
wh1te909
6ae771682a fix task loading fixes #790 2021-11-08 22:00:40 +00:00
wh1te909
bf2075b902 pending actions loading fixes #789 2021-11-08 21:56:57 +00:00
765 changed files with 28982 additions and 73673 deletions

View File

@@ -23,7 +23,7 @@ POSTGRES_USER=postgres
POSTGRES_PASS=postgrespass
# DEV SETTINGS
APP_PORT=80
APP_PORT=443
API_PORT=80
HTTP_PROTOCOL=https
DOCKER_NETWORK=172.21.0.0/24

View File

@@ -1,4 +1,11 @@
FROM python:3.9.6-slim
# pulls community scripts from git repo
FROM python:3.10.6-slim AS GET_SCRIPTS_STAGE
RUN apt-get update && \
apt-get install -y --no-install-recommends git && \
git clone https://github.com/amidaware/community-scripts.git /community-scripts
FROM python:3.10.6-slim
ENV TACTICAL_DIR /opt/tactical
ENV TACTICAL_READY_FILE ${TACTICAL_DIR}/tmp/tactical.ready
@@ -10,12 +17,14 @@ ENV PYTHONUNBUFFERED=1
EXPOSE 8000 8383 8005
RUN apt-get update && \
apt-get install -y build-essential
RUN groupadd -g 1000 tactical && \
useradd -u 1000 -g 1000 tactical
# Copy nats-api file
COPY natsapi/bin/nats-api /usr/local/bin/
RUN chmod +x /usr/local/bin/nats-api
# copy community scripts
COPY --from=GET_SCRIPTS_STAGE /community-scripts /community-scripts
# Copy dev python reqs
COPY .devcontainer/requirements.txt /

View File

@@ -1,19 +0,0 @@
version: '3.4'
services:
api-dev:
image: api-dev
build:
context: .
dockerfile: ./api.dockerfile
command: ["sh", "-c", "pip install debugpy -t /tmp && python /tmp/debugpy --wait-for-client --listen 0.0.0.0:5678 manage.py runserver 0.0.0.0:8000 --nothreading --noreload"]
ports:
- 8000:8000
- 5678:5678
volumes:
- tactical-data-dev:/opt/tactical
- ..:/workspace:cached
networks:
dev:
aliases:
- tactical-backend

View File

@@ -5,10 +5,11 @@ services:
container_name: trmm-api-dev
image: api-dev
restart: always
user: 1000:1000
build:
context: ..
dockerfile: .devcontainer/api.dockerfile
command: ["tactical-api"]
command: [ "tactical-api" ]
environment:
API_PORT: ${API_PORT}
ports:
@@ -18,29 +19,15 @@ services:
- ..:/workspace:cached
networks:
dev:
aliases:
aliases:
- tactical-backend
app-dev:
container_name: trmm-app-dev
image: node:14-alpine
restart: always
command: /bin/sh -c "npm install npm@latest -g && npm install && npm run serve -- --host 0.0.0.0 --port ${APP_PORT}"
working_dir: /workspace/web
volumes:
- ..:/workspace:cached
ports:
- "8080:${APP_PORT}"
networks:
dev:
aliases:
- tactical-frontend
# nats
nats-dev:
container_name: trmm-nats-dev
image: ${IMAGE_REPO}tactical-nats:${VERSION}
restart: always
user: 1000:1000
environment:
API_HOST: ${API_HOST}
API_PORT: ${API_PORT}
@@ -61,7 +48,8 @@ services:
container_name: trmm-meshcentral-dev
image: ${IMAGE_REPO}tactical-meshcentral:${VERSION}
restart: always
environment:
user: 1000:1000
environment:
MESH_HOST: ${MESH_HOST}
MESH_USER: ${MESH_USER}
MESH_PASS: ${MESH_PASS}
@@ -84,6 +72,7 @@ services:
container_name: trmm-mongodb-dev
image: mongo:4.4
restart: always
user: 1000:1000
environment:
MONGO_INITDB_ROOT_USERNAME: ${MONGODB_USER}
MONGO_INITDB_ROOT_PASSWORD: ${MONGODB_PASSWORD}
@@ -101,7 +90,7 @@ services:
image: postgres:13-alpine
restart: always
environment:
POSTGRES_DB: tacticalrmm
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASS}
volumes:
@@ -115,9 +104,10 @@ services:
redis-dev:
container_name: trmm-redis-dev
restart: always
command: redis-server --appendonly yes
user: 1000:1000
command: redis-server
image: redis:6.0-alpine
volumes:
volumes:
- redis-data-dev:/data
networks:
dev:
@@ -128,7 +118,7 @@ services:
container_name: trmm-init-dev
image: api-dev
restart: on-failure
command: ["tactical-init-dev"]
command: [ "tactical-init-dev" ]
environment:
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASS: ${POSTGRES_PASS}
@@ -140,6 +130,7 @@ services:
TRMM_PASS: ${TRMM_PASS}
HTTP_PROTOCOL: ${HTTP_PROTOCOL}
APP_PORT: ${APP_PORT}
POSTGRES_DB: ${POSTGRES_DB}
depends_on:
- postgres-dev
- meshcentral-dev
@@ -147,14 +138,18 @@ services:
- dev
volumes:
- tactical-data-dev:/opt/tactical
- mesh-data-dev:/meshcentral-data
- redis-data-dev:/redis/data
- mongo-dev-data:/mongo/data/db
- ..:/workspace:cached
# container for celery worker service
celery-dev:
container_name: trmm-celery-dev
image: api-dev
command: ["tactical-celery-dev"]
command: [ "tactical-celery-dev" ]
restart: always
user: 1000:1000
networks:
- dev
volumes:
@@ -168,8 +163,9 @@ services:
celerybeat-dev:
container_name: trmm-celerybeat-dev
image: api-dev
command: ["tactical-celerybeat-dev"]
command: [ "tactical-celerybeat-dev" ]
restart: always
user: 1000:1000
networks:
- dev
volumes:
@@ -183,8 +179,9 @@ services:
websockets-dev:
container_name: trmm-websockets-dev
image: api-dev
command: ["tactical-websockets-dev"]
command: [ "tactical-websockets-dev" ]
restart: always
user: 1000:1000
networks:
dev:
aliases:
@@ -201,6 +198,7 @@ services:
container_name: trmm-nginx-dev
image: ${IMAGE_REPO}tactical-nginx:${VERSION}
restart: always
user: 1000:1000
environment:
APP_HOST: ${APP_HOST}
API_HOST: ${API_HOST}
@@ -214,29 +212,17 @@ services:
dev:
ipv4_address: ${DOCKER_NGINX_IP}
ports:
- "80:80"
- "443:443"
- "80:8080"
- "443:4443"
volumes:
- tactical-data-dev:/opt/tactical
mkdocs-dev:
container_name: trmm-mkdocs-dev
image: api-dev
restart: always
command: ["tactical-mkdocs-dev"]
ports:
- "8005:8005"
volumes:
- ..:/workspace:cached
networks:
- dev
volumes:
tactical-data-dev:
postgres-data-dev:
mongo-dev-data:
mesh-data-dev:
redis-data-dev:
tactical-data-dev: null
postgres-data-dev: null
mongo-dev-data: null
mesh-data-dev: null
redis-data-dev: null
networks:
dev:

View File

@@ -9,17 +9,18 @@ set -e
: "${POSTGRES_USER:=tactical}"
: "${POSTGRES_PASS:=tactical}"
: "${POSTGRES_DB:=tacticalrmm}"
: "${MESH_CONTAINER:=tactical-meshcentral}"
: "${MESH_SERVICE:=tactical-meshcentral}"
: "${MESH_WS_URL:=ws://${MESH_SERVICE}:4443}"
: "${MESH_USER:=meshcentral}"
: "${MESH_PASS:=meshcentralpass}"
: "${MESH_HOST:=tactical-meshcentral}"
: "${API_HOST:=tactical-backend}"
: "${APP_HOST:=tactical-frontend}"
: "${REDIS_HOST:=tactical-redis}"
: "${HTTP_PROTOCOL:=http}"
: "${APP_PORT:=8080}"
: "${API_PORT:=8000}"
: "${CERT_PRIV_PATH:=${TACTICAL_DIR}/certs/privkey.pem}"
: "${CERT_PUB_PATH:=${TACTICAL_DIR}/certs/fullchain.pem}"
# Add python venv to path
export PATH="${VIRTUAL_ENV}/bin:$PATH"
@@ -37,7 +38,7 @@ function django_setup {
sleep 5
done
until (echo > /dev/tcp/"${MESH_CONTAINER}"/443) &> /dev/null; do
until (echo > /dev/tcp/"${MESH_SERVICE}"/4443) &> /dev/null; do
echo "waiting for meshcentral container to be ready..."
sleep 5
done
@@ -56,10 +57,12 @@ DEBUG = True
DOCKER_BUILD = True
CERT_FILE = '/opt/tactical/certs/fullchain.pem'
KEY_FILE = '/opt/tactical/certs/privkey.pem'
SWAGGER_ENABLED = True
SCRIPTS_DIR = '${WORKSPACE_DIR}/scripts'
CERT_FILE = '${CERT_PUB_PATH}'
KEY_FILE = '${CERT_PRIV_PATH}'
SCRIPTS_DIR = '/community-scripts'
ALLOWED_HOSTS = ['${API_HOST}', '*']
@@ -82,6 +85,7 @@ MESH_USERNAME = '${MESH_USER}'
MESH_SITE = 'https://${MESH_HOST}'
MESH_TOKEN_KEY = '${MESH_TOKEN}'
REDIS_HOST = '${REDIS_HOST}'
MESH_WS_URL = '${MESH_WS_URL}'
ADMIN_ENABLED = True
EOF
)"
@@ -89,6 +93,7 @@ EOF
echo "${localvars}" > ${WORKSPACE_DIR}/api/tacticalrmm/tacticalrmm/local_settings.py
# run migrations and init scripts
"${VIRTUAL_ENV}"/bin/python manage.py pre_update_tasks
"${VIRTUAL_ENV}"/bin/python manage.py migrate --no-input
"${VIRTUAL_ENV}"/bin/python manage.py collectstatic --no-input
"${VIRTUAL_ENV}"/bin/python manage.py initial_db_setup
@@ -96,7 +101,10 @@ EOF
"${VIRTUAL_ENV}"/bin/python manage.py load_chocos
"${VIRTUAL_ENV}"/bin/python manage.py load_community_scripts
"${VIRTUAL_ENV}"/bin/python manage.py reload_nats
"${VIRTUAL_ENV}"/bin/python manage.py create_natsapi_conf
"${VIRTUAL_ENV}"/bin/python manage.py create_installer_user
"${VIRTUAL_ENV}"/bin/python manage.py post_update_tasks
# create super user
echo "from accounts.models import User; User.objects.create_superuser('${TRMM_USER}', 'admin@example.com', '${TRMM_PASS}') if not User.objects.filter(username='${TRMM_USER}').exists() else 0;" | python manage.py shell
@@ -109,22 +117,28 @@ if [ "$1" = 'tactical-init-dev' ]; then
test -f "${TACTICAL_READY_FILE}" && rm "${TACTICAL_READY_FILE}"
mkdir -p /meshcentral-data
mkdir -p ${TACTICAL_DIR}/tmp
mkdir -p ${TACTICAL_DIR}/certs
mkdir -p /mongo/data/db
mkdir -p /redis/data
touch /meshcentral-data/.initialized && chown -R 1000:1000 /meshcentral-data
touch ${TACTICAL_DIR}/tmp/.initialized && chown -R 1000:1000 ${TACTICAL_DIR}
touch ${TACTICAL_DIR}/certs/.initialized && chown -R 1000:1000 ${TACTICAL_DIR}/certs
touch /mongo/data/db/.initialized && chown -R 1000:1000 /mongo/data/db
touch /redis/data/.initialized && chown -R 1000:1000 /redis/data
mkdir -p ${TACTICAL_DIR}/api/tacticalrmm/private/exe
mkdir -p ${TACTICAL_DIR}/api/tacticalrmm/private/log
touch ${TACTICAL_DIR}/api/tacticalrmm/private/log/django_debug.log
# setup Python virtual env and install dependencies
! test -e "${VIRTUAL_ENV}" && python -m venv ${VIRTUAL_ENV}
"${VIRTUAL_ENV}"/bin/python -m pip install --upgrade pip
"${VIRTUAL_ENV}"/bin/pip install --no-cache-dir setuptools wheel
"${VIRTUAL_ENV}"/bin/pip install --no-cache-dir -r /requirements.txt
django_setup
# create .env file for frontend
webenv="$(cat << EOF
PROD_URL = "${HTTP_PROTOCOL}://${API_HOST}"
DEV_URL = "${HTTP_PROTOCOL}://${API_HOST}"
APP_URL = "https://${APP_HOST}"
DOCKER_BUILD = 1
EOF
)"
echo "${webenv}" | tee "${WORKSPACE_DIR}"/web/.env > /dev/null
# chown everything to tactical user
chown -R "${TACTICAL_USER}":"${TACTICAL_USER}" "${WORKSPACE_DIR}"
chown -R "${TACTICAL_USER}":"${TACTICAL_USER}" "${TACTICAL_DIR}"
@@ -153,8 +167,3 @@ if [ "$1" = 'tactical-websockets-dev' ]; then
check_tactical_ready
"${VIRTUAL_ENV}"/bin/daphne tacticalrmm.asgi:application --port 8383 -b 0.0.0.0
fi
if [ "$1" = 'tactical-mkdocs-dev' ]; then
cd "${WORKSPACE_DIR}/docs"
"${VIRTUAL_ENV}"/bin/mkdocs serve
fi

View File

@@ -1,37 +1,3 @@
# To ensure app dependencies are ported from your virtual environment/host machine into your container, run 'pip freeze > requirements.txt' in the terminal to overwrite this file
asyncio-nats-client
celery
channels
channels_redis
django-ipware
Django
django-cors-headers
django-rest-knox
djangorestframework
loguru
msgpack
psycopg2-binary
pycparser
pycryptodome
pyotp
pyparsing
pytz
qrcode
redis
twilio
packaging
validators
websockets
black
Werkzeug
django-extensions
coverage
coveralls
model_bakery
mkdocs
mkdocs-material
pymdown-extensions
Pygments
mypy
pysnooper
isort
-r /workspace/api/tacticalrmm/requirements.txt
-r /workspace/api/tacticalrmm/requirements-dev.txt
-r /workspace/api/tacticalrmm/requirements-test.txt

73
.github/workflows/ci-tests.yml vendored Normal file
View File

@@ -0,0 +1,73 @@
name: Tests CI
on:
push:
branches:
- "*"
pull_request:
branches:
- "*"
jobs:
test:
runs-on: ubuntu-latest
name: Tests
strategy:
matrix:
python-version: ["3.10.6"]
steps:
- uses: actions/checkout@v3
- uses: harmon758/postgresql-action@v1
with:
postgresql version: "14"
postgresql db: "pipeline"
postgresql user: "pipeline"
postgresql password: "pipeline123456"
- name: Setup Python ${{ matrix.python-version }}
uses: actions/setup-python@v3
with:
python-version: ${{ matrix.python-version }}
- name: Install redis
run: |
sudo apt update
sudo apt install -y redis
redis-server --version
- name: Install requirements
working-directory: api/tacticalrmm
run: |
python --version
SETTINGS_FILE="tacticalrmm/settings.py"
SETUPTOOLS_VER=$(grep "^SETUPTOOLS_VER" "$SETTINGS_FILE" | awk -F'[= "]' '{print $5}')
WHEEL_VER=$(grep "^WHEEL_VER" "$SETTINGS_FILE" | awk -F'[= "]' '{print $5}')
pip install --upgrade pip
pip install setuptools==${SETUPTOOLS_VER} wheel==${WHEEL_VER}
pip install -r requirements.txt -r requirements-test.txt
- name: Codestyle black
working-directory: api
run: |
black --exclude migrations/ --check tacticalrmm
if [ $? -ne 0 ]; then
exit 1
fi
- name: Run django tests
env:
GHACTIONS: "yes"
working-directory: api/tacticalrmm
run: |
pytest
if [ $? -ne 0 ]; then
exit 1
fi
- uses: codecov/codecov-action@v3
with:
directory: ./api/tacticalrmm
files: ./api/tacticalrmm/coverage.xml
verbose: true

70
.github/workflows/codeql-analysis.yml vendored Normal file
View File

@@ -0,0 +1,70 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ develop ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ develop ]
schedule:
- cron: '19 14 * * 6'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go', 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Learn more about CodeQL language support at https://git.io/codeql-language-support
steps:
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

View File

@@ -1,22 +0,0 @@
name: Deploy Docs
on:
push:
branches:
- master
defaults:
run:
working-directory: docs
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.x
- run: pip install --upgrade pip
- run: pip install --upgrade setuptools wheel
- run: pip install mkdocs mkdocs-material pymdown-extensions
- run: mkdocs gh-deploy --force

7
.gitignore vendored
View File

@@ -49,3 +49,10 @@ nats-rmm.conf
docs/site/
reset_db.sh
run_go_cmd.py
nats-api.conf
ignore/
coverage.lcov
daphne.sock.lock
.pytest_cache
coverage.xml
setup_dev.yml

23
.vscode/extensions.json vendored Normal file
View File

@@ -0,0 +1,23 @@
{
"recommendations": [
// frontend
"dbaeumer.vscode-eslint",
"esbenp.prettier-vscode",
"editorconfig.editorconfig",
"vue.volar",
"wayou.vscode-todo-highlight",
// python
"matangover.mypy",
"ms-python.python",
// golang
"golang.go"
],
"unwantedRecommendations": [
"octref.vetur",
"hookyqr.beautify",
"dbaeumer.jshint",
"ms-vscode.vscode-typescript-tslint-plugin"
]
}

137
.vscode/settings.json vendored
View File

@@ -1,70 +1,73 @@
{
"python.pythonPath": "api/tacticalrmm/env/bin/python",
"python.languageServer": "Pylance",
"python.analysis.extraPaths": [
"api/tacticalrmm",
"api/env",
],
"python.analysis.diagnosticSeverityOverrides": {
"reportUnusedImport": "error",
"reportDuplicateImport": "error",
},
"python.analysis.memory.keepLibraryAst": true,
"python.linting.mypyEnabled": true,
"python.analysis.typeCheckingMode": "basic",
"python.formatting.provider": "black",
"editor.formatOnSave": true,
"vetur.format.defaultFormatter.js": "prettier",
"vetur.format.defaultFormatterOptions": {
"prettier": {
"semi": true,
"printWidth": 120,
"tabWidth": 2,
"useTabs": false,
"arrowParens": "avoid",
}
},
"vetur.format.options.tabSize": 2,
"vetur.format.options.useTabs": false,
"python.defaultInterpreterPath": "api/tacticalrmm/env/bin/python",
"python.languageServer": "Pylance",
"python.analysis.extraPaths": ["api/tacticalrmm", "api/env"],
"python.analysis.diagnosticSeverityOverrides": {
"reportUnusedImport": "error",
"reportDuplicateImport": "error",
"reportGeneralTypeIssues": "none"
},
"python.analysis.typeCheckingMode": "basic",
"python.linting.enabled": true,
"python.linting.mypyEnabled": true,
"python.linting.mypyArgs": [
"--ignore-missing-imports",
"--follow-imports=silent",
"--show-column-numbers",
"--strict"
],
"python.linting.ignorePatterns": [
"**/site-packages/**/*.py",
".vscode/*.py",
"**env/**"
],
"python.formatting.provider": "black",
"mypy.targets": ["api/tacticalrmm"],
"mypy.runUsingActiveInterpreter": true,
"editor.bracketPairColorization.enabled": true,
"editor.guides.bracketPairs": true,
"editor.formatOnSave": true,
"files.associations": {
"**/ansible/**/*.yml": "ansible",
"**/docker/**/docker-compose*.yml": "dockercompose"
},
"files.watcherExclude": {
"files.watcherExclude": {
"files.watcherExclude": {
"**/.git/objects/**": true,
"**/.git/subtree-cache/**": true,
"**/node_modules/": true,
"/node_modules/**": true,
"**/env/": true,
"/env/**": true,
"**/__pycache__": true,
"/__pycache__/**": true,
"**/.cache": true,
"**/.eggs": true,
"**/.ipynb_checkpoints": true,
"**/.mypy_cache": true,
"**/.pytest_cache": true,
"**/*.egg-info": true,
"**/*.feather": true,
"**/*.parquet*": true,
"**/*.pyc": true,
"**/*.zip": true
},
},
"go.useLanguageServer": true,
"[go]": {
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.organizeImports": false,
},
"editor.snippetSuggestions": "none",
},
"[go.mod]": {
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.organizeImports": true,
},
},
"gopls": {
"usePlaceholders": true,
"completeUnimported": true,
"staticcheck": true,
"**/.git/objects/**": true,
"**/.git/subtree-cache/**": true,
"**/node_modules/": true,
"/node_modules/**": true,
"**/env/": true,
"/env/**": true,
"**/__pycache__": true,
"/__pycache__/**": true,
"**/.cache": true,
"**/.eggs": true,
"**/.ipynb_checkpoints": true,
"**/.mypy_cache": true,
"**/.pytest_cache": true,
"**/*.egg-info": true,
"**/*.feather": true,
"**/*.parquet*": true,
"**/*.pyc": true,
"**/*.zip": true
}
}
},
"go.useLanguageServer": true,
"[go]": {
"editor.codeActionsOnSave": {
"source.organizeImports": false
},
"editor.snippetSuggestions": "none"
},
"[go.mod]": {
"editor.codeActionsOnSave": {
"source.organizeImports": true
}
},
"gopls": {
"usePlaceholders": true,
"completeUnimported": true,
"staticcheck": true
}
}

23
.vscode/tasks.json vendored
View File

@@ -1,23 +0,0 @@
{
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"tasks": [
{
"label": "docker debug",
"type": "shell",
"command": "docker-compose",
"args": [
"-p",
"trmm",
"-f",
".devcontainer/docker-compose.yml",
"-f",
".devcontainer/docker-compose.debug.yml",
"up",
"-d",
"--build"
]
}
]
}

21
LICENSE
View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2019-present wh1te909
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

74
LICENSE.md Normal file
View File

@@ -0,0 +1,74 @@
### Tactical RMM License Version 1.0
Text of license:&emsp;&emsp;&emsp;Copyright © 2022 AmidaWare LLC. All rights reserved.<br>
&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&nbsp;Amending the text of this license is not permitted.
Trade Mark:&emsp;&emsp;&emsp;&emsp;"Tactical RMM" is a trade mark of AmidaWare LLC.
Licensor:&emsp;&emsp;&emsp;&emsp;&emsp;&nbsp;&nbsp;AmidaWare LLC of 1968 S Coast Hwy PMB 3847 Laguna Beach, CA, USA.
Licensed Software:&emsp;&nbsp;The software known as Tactical RMM Version v0.12.0 (and all subsequent releases and versions) and the Tactical RMM Agent v2.0.0 (and all subsequent releases and versions).
### 1. Preamble
The Licensed Software is designed to facilitate the remote monitoring and management (RMM) of networks, systems, servers, computers and other devices. The Licensed Software is made available primarily for use by organisations and managed service providers for monitoring and management purposes.
The Tactical RMM License is not an open-source software license. This license contains certain restrictions on the use of the Licensed Software. For example the functionality of the Licensed Software may not be made available as part of a SaaS (Software-as-a-Service) service or product to provide a commercial or for-profit service without the express prior permission of the Licensor.
### 2. License Grant
Permission is hereby granted, free of charge, on a non-exclusive basis, to copy, modify, create derivative works and use the Licensed Software in source and binary forms subject to the following terms and conditions. No additional rights will be implied under this license.
* The hosting and use of the Licensed Software to monitor and manage in-house networks/systems and/or customer networks/systems is permitted.
This license does not allow the functionality of the Licensed Software (whether in whole or in part) or a modified version of the Licensed Software or a derivative work to be used or otherwise made available as part of any other commercial or for-profit service, including, without limitation, any of the following:
* a service allowing third parties to interact remotely through a computer network;
* as part of a SaaS service or product;
* as part of the provision of a managed hosting service or product;
* the offering of installation and/or configuration services;
* the offer for sale, distribution or sale of any service or product (whether or not branded as Tactical RMM).
The prior written approval of AmidaWare LLC must be obtained for all commercial use and/or for-profit service use of the (i) Licensed Software (whether in whole or in part), (ii) a modified version of the Licensed Software and/or (iii) a derivative work.
The terms of this license apply to all copies of the Licensed Software (including modified versions) and derivative works.
All use of the Licensed Software must immediately cease if use breaches the terms of this license.
### 3. Derivative Works
If a derivative work is created which is based on or otherwise incorporates all or any part of the Licensed Software, and the derivative work is made available to any other person, the complete corresponding machine readable source code (including all changes made to the Licensed Software) must accompany the derivative work and be made publicly available online.
### 4. Copyright Notice
The following copyright notice shall be included in all copies of the Licensed Software:
&emsp;&emsp;&emsp;Copyright © 2022 AmidaWare LLC.
&emsp;&emsp;&emsp;Licensed under the Tactical RMM License Version 1.0 (the “License”).<br>
&emsp;&emsp;&emsp;You may only use the Licensed Software in accordance with the License.<br>
&emsp;&emsp;&emsp;A copy of the License is available at: https://license.tacticalrmm.com
### 5. Disclaimer of Warranty
THE LICENSED SOFTWARE IS PROVIDED "AS IS". TO THE FULLEST EXTENT PERMISSIBLE AT LAW ALL CONDITIONS, WARRANTIES OR OTHER TERMS OF ANY KIND WHICH MIGHT HAVE EFFECT OR BE IMPLIED OR INCORPORATED, WHETHER BY STATUTE, COMMON LAW OR OTHERWISE ARE HEREBY EXCLUDED, INCLUDING THE CONDITIONS, WARRANTIES OR OTHER TERMS AS TO SATISFACTORY QUALITY AND/OR MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, THE USE OF REASONABLE SKILL AND CARE AND NON-INFRINGEMENT.
### 6. Limits of Liability
THE FOLLOWING EXCLUSIONS SHALL APPLY TO THE FULLEST EXTENT PERMISSIBLE AT LAW. NEITHER THE AUTHORS NOR THE COPYRIGHT HOLDERS SHALL IN ANY CIRCUMSTANCES HAVE ANY LIABILITY FOR ANY CLAIM, LOSSES, DAMAGES OR OTHER LIABILITY, WHETHER THE SAME ARE SUFFERED DIRECTLY OR INDIRECTLY OR ARE IMMEDIATE OR CONSEQUENTIAL, AND WHETHER THE SAME ARISE IN CONTRACT, TORT OR DELICT (INCLUDING NEGLIGENCE) OR OTHERWISE HOWSOEVER ARISING FROM, OUT OF OR IN CONNECTION WITH THE LICENSED SOFTWARE OR THE USE OR INABILITY TO USE THE LICENSED SOFTWARE OR OTHER DEALINGS IN THE LICENSED SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH LOSS OR DAMAGE. THE FOREGOING EXCLUSIONS SHALL INCLUDE, WITHOUT LIMITATION, LIABILITY FOR ANY LOSSES OR DAMAGES WHICH FALL WITHIN ANY OF THE FOLLOWING CATEGORIES: SPECIAL, EXEMPLARY, OR INCIDENTAL LOSS OR DAMAGE, LOSS OF PROFITS, LOSS OF ANTICIPATED SAVINGS, LOSS OF BUSINESS OPPORTUNITY, LOSS OF GOODWILL, AND LOSS OR CORRUPTION OF DATA.
### 7. Termination
This license shall terminate with immediate effect if there is a material breach of any of its terms.
### 8. No partnership, agency or joint venture
Nothing in this license agreement is intended to, or shall be deemed to, establish any partnership or joint venture or any relationship of agency between AmidaWare LLC and any other person.
### 9. No endorsement
The names of the authors and/or the copyright holders must not be used to promote or endorse any products or services which are in any way derived from the Licensed Software without prior written consent.
### 10. Trademarks
No permission is granted to use the trademark “Tactical RMM” or any other trade name, trademark, service mark or product name of AmidaWare LLC except to the extent necessary to comply with the notice requirements in Section 4 (Copyright Notice).
### 11. Entire agreement
This license contains the whole agreement relating to its subject matter.
### 12. Severance
If any provision or part-provision of this license is or becomes invalid, illegal or unenforceable, it shall be deemed deleted, but that shall not affect the validity and enforceability of the rest of this license.
### 13. Acceptance of these terms
The terms and conditions of this license are accepted by copying, downloading, installing, redistributing, or otherwise using the Licensed Software.

View File

@@ -1,19 +1,18 @@
# Tactical RMM
[![Build Status](https://dev.azure.com/dcparsi/Tactical%20RMM/_apis/build/status/wh1te909.tacticalrmm?branchName=develop)](https://dev.azure.com/dcparsi/Tactical%20RMM/_build/latest?definitionId=4&branchName=develop)
[![Coverage Status](https://coveralls.io/repos/github/wh1te909/tacticalrmm/badge.png?branch=develop&kill_cache=1)](https://coveralls.io/github/wh1te909/tacticalrmm?branch=develop)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
![CI Tests](https://github.com/amidaware/tacticalrmm/actions/workflows/ci-tests.yml/badge.svg?branch=develop)
[![codecov](https://codecov.io/gh/amidaware/tacticalrmm/branch/develop/graph/badge.svg?token=8ACUPVPTH6)](https://codecov.io/gh/amidaware/tacticalrmm)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/python/black)
Tactical RMM is a remote monitoring & management tool for Windows computers, built with Django and Vue.\
It uses an [agent](https://github.com/wh1te909/rmmagent) written in golang and integrates with [MeshCentral](https://github.com/Ylianst/MeshCentral)
Tactical RMM is a remote monitoring & management tool, built with Django and Vue.\
It uses an [agent](https://github.com/amidaware/rmmagent) written in golang and integrates with [MeshCentral](https://github.com/Ylianst/MeshCentral)
# [LIVE DEMO](https://rmm.tacticalrmm.io/)
# [LIVE DEMO](https://demo.tacticalrmm.com/)
Demo database resets every hour. A lot of features are disabled for obvious reasons due to the nature of this app.
### [Discord Chat](https://discord.gg/upGTkWp)
### [Documentation](https://wh1te909.github.io/tacticalrmm/)
### [Documentation](https://docs.tacticalrmm.com)
## Features
@@ -29,10 +28,13 @@ Demo database resets every hour. A lot of features are disabled for obvious reas
- Remote software installation via chocolatey
- Software and hardware inventory
## Windows versions supported
## Windows agent versions supported
- Windows 7, 8.1, 10, Server 2008R2, 2012R2, 2016, 2019
- Windows 7, 8.1, 10, 11, Server 2008R2, 2012R2, 2016, 2019, 2022
## Linux agent versions supported
- Any distro with systemd which includes but is not limited to: Debian (10, 11), Ubuntu x86_64 (18.04, 20.04, 22.04), Synology 7, centos, freepbx and more!
## Installation / Backup / Restore / Usage
### Refer to the [documentation](https://wh1te909.github.io/tacticalrmm/)
### Refer to the [documentation](https://docs.tacticalrmm.com)

9
SECURITY.md Normal file
View File

@@ -0,0 +1,9 @@
# Security Policy
## Supported Versions
[Latest](https://github.com/amidaware/tacticalrmm/releases/latest) release
## Reporting a Vulnerability
https://docs.tacticalrmm.com/security

3
ansible/README.md Normal file
View File

@@ -0,0 +1,3 @@
### tacticalrmm ansible WIP
ansible role to setup a Debian 11 VM for tacticalrmm local development

View File

@@ -0,0 +1,40 @@
---
user: "tactical"
python_ver: "3.10.6"
go_ver: "1.18.5"
backend_repo: "https://github.com/amidaware/tacticalrmm.git"
frontend_repo: "https://github.com/amidaware/tacticalrmm-web.git"
scripts_repo: "https://github.com/amidaware/community-scripts.git"
backend_dir: "/opt/trmm"
frontend_dir: "/opt/trmm-web"
scripts_dir: "/opt/trmm-community-scripts"
trmm_dir: "{{ backend_dir }}/api/tacticalrmm/tacticalrmm"
mesh_dir: "/opt/meshcentral"
settings_file: "{{ trmm_dir }}/settings.py"
local_settings_file: "{{ trmm_dir }}/local_settings.py"
fullchain_dest: /etc/ssl/certs/fullchain.pem
privkey_dest: /etc/ssl/certs/privkey.pem
base_pkgs:
- build-essential
- curl
- wget
- dirmngr
- gnupg
- openssl
- gcc
- g++
- make
- ca-certificates
- git
python_pkgs:
- zlib1g-dev
- libncurses5-dev
- libgdbm-dev
- libnss3-dev
- libssl-dev
- libreadline-dev
- libffi-dev
- libsqlite3-dev
- libbz2-dev

View File

@@ -0,0 +1,31 @@
worker_rlimit_nofile 1000000;
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 4096;
}
http {
sendfile on;
server_tokens off;
tcp_nopush on;
types_hash_max_size 2048;
server_names_hash_bucket_size 64;
include /etc/nginx/mime.types;
default_type application/octet-stream;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
ssl_ciphers EECDH+AESGCM:EDH+AESGCM;
ssl_ecdh_curve secp384r1;
ssl_stapling on;
ssl_stapling_verify on;
add_header X-Content-Type-Options nosniff;
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
gzip on;
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}

View File

@@ -0,0 +1,2 @@
deb https://nginx.org/packages/debian/ bullseye nginx
deb-src https://nginx.org/packages/debian/ bullseye nginx

View File

@@ -0,0 +1,20 @@
" This file loads the default vim options at the beginning and prevents
" that they are being loaded again later. All other options that will be set,
" are added, or overwrite the default settings. Add as many options as you
" whish at the end of this file.
" Load the defaults
source $VIMRUNTIME/defaults.vim
" Prevent the defaults from being loaded again later, if the user doesn't
" have a local vimrc (~/.vimrc)
let skip_defaults_vim = 1
" Set more options (overwrites settings from /usr/share/vim/vim80/defaults.vim)
" Add as many options as you whish
" Set the mouse mode to 'r'
if has('mouse')
set mouse=r
endif

View File

@@ -0,0 +1,633 @@
---
- name: set mouse mode for vim
tags: vim
become: yes
ansible.builtin.copy:
src: vimrc.local
dest: /etc/vim/vimrc.local
owner: "root"
group: "root"
mode: "0644"
- name: set max_user_watches
tags: sysctl
become: yes
ansible.builtin.lineinfile:
path: /etc/sysctl.conf
line: fs.inotify.max_user_watches=524288
- name: reload sysctl
tags: sysctl
become: yes
ansible.builtin.command:
cmd: sysctl -p
- name: install base packages
tags: base
become: yes
ansible.builtin.apt:
pkg: "{{ item }}"
state: present
update_cache: yes
with_items:
- "{{ base_pkgs }}"
- name: download and install golang
tags: golang
become: yes
ansible.builtin.unarchive:
src: "https://go.dev/dl/go{{ go_ver }}.linux-amd64.tar.gz"
dest: /usr/local
remote_src: yes
- name: add golang to path
become: yes
tags: golang
ansible.builtin.copy:
dest: /etc/profile.d/golang.sh
content: "PATH=$PATH:/usr/local/go/bin"
- name: install python prereqs
tags: python
become: yes
ansible.builtin.apt:
pkg: "{{ item }}"
state: present
with_items:
- "{{ python_pkgs }}"
- name: get cpu core count
tags: python
ansible.builtin.command: nproc
register: numprocs
- name: Create python tmpdir
tags: python
ansible.builtin.tempfile:
state: directory
suffix: python
register: python_tmp
- name: download and extract python
tags: python
ansible.builtin.unarchive:
src: "https://www.python.org/ftp/python/{{ python_ver }}/Python-{{ python_ver }}.tgz"
dest: "{{ python_tmp.path }}"
remote_src: yes
- name: compile python
tags: python
ansible.builtin.shell:
chdir: "{{ python_tmp.path }}/Python-{{ python_ver }}"
cmd: |
./configure --enable-optimizations
make -j {{ numprocs.stdout }}
- name: alt install python
tags: python
become: yes
ansible.builtin.shell:
chdir: "{{ python_tmp.path }}/Python-{{ python_ver }}"
cmd: |
make altinstall
- name: install redis
tags: redis
become: yes
ansible.builtin.apt:
pkg: redis
state: present
- name: create postgres repo
tags: postgres
become: yes
ansible.builtin.copy:
content: "deb http://apt.postgresql.org/pub/repos/apt bullseye-pgdg main"
dest: /etc/apt/sources.list.d/pgdg.list
owner: root
group: root
mode: "0644"
- name: import postgres repo signing key
tags: postgres
become: yes
ansible.builtin.apt_key:
url: https://www.postgresql.org/media/keys/ACCC4CF8.asc
state: present
- name: install postgresql
tags: postgres
become: yes
ansible.builtin.apt:
pkg: postgresql-14
state: present
update_cache: yes
- name: ensure postgres enabled and started
tags: postgres
become: yes
ansible.builtin.service:
name: postgresql
enabled: yes
state: started
- name: setup database
tags: postgres
become: yes
become_user: postgres
ansible.builtin.shell:
cmd: |
psql -c "CREATE DATABASE tacticalrmm"
psql -c "CREATE USER {{ db_user }} WITH PASSWORD '{{ db_passwd }}'"
psql -c "ALTER ROLE {{ db_user }} SET client_encoding TO 'utf8'"
psql -c "ALTER ROLE {{ db_user }} SET default_transaction_isolation TO 'read committed'"
psql -c "ALTER ROLE {{ db_user }} SET timezone TO 'UTC'"
psql -c "ALTER ROLE {{ db_user }} CREATEDB"
psql -c "GRANT ALL PRIVILEGES ON DATABASE tacticalrmm TO {{ db_user }}"
- name: create repo dirs
become: yes
tags: git
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ user }}"
group: "{{ user }}"
mode: "0755"
with_items:
- "{{ backend_dir }}"
- "{{ frontend_dir }}"
- "{{ scripts_dir }}"
- name: git clone repos
tags: git
ansible.builtin.git:
repo: "{{ item.repo }}"
dest: "{{ item.dest }}"
version: "{{ item.version }}"
with_items:
- {
repo: "{{ backend_repo }}",
dest: "{{ backend_dir }}",
version: develop,
}
- {
repo: "{{ frontend_repo }}",
dest: "{{ frontend_dir }}",
version: develop,
}
- { repo: "{{ scripts_repo }}", dest: "{{ scripts_dir }}", version: main }
- name: get nats_server_ver
tags: nats
ansible.builtin.shell: grep "^NATS_SERVER_VER" {{ settings_file }} | awk -F'[= "]' '{print $5}'
register: nats_server_ver
- name: Create nats tmpdir
tags: nats
ansible.builtin.tempfile:
state: directory
suffix: nats
register: nats_tmp
- name: download and extract nats
tags: nats
ansible.builtin.unarchive:
src: "https://github.com/nats-io/nats-server/releases/download/v{{ nats_server_ver.stdout }}/nats-server-v{{ nats_server_ver.stdout }}-linux-amd64.tar.gz"
dest: "{{ nats_tmp.path }}"
remote_src: yes
- name: install nats
tags: nats
become: yes
ansible.builtin.copy:
remote_src: yes
src: "{{ nats_tmp.path }}/nats-server-v{{ nats_server_ver.stdout }}-linux-amd64/nats-server"
dest: /usr/local/bin/nats-server
owner: "{{ user }}"
group: "{{ user }}"
mode: "0755"
- name: Create nodejs tmpdir
tags: nodejs
ansible.builtin.tempfile:
state: directory
suffix: nodejs
register: nodejs_tmp
- name: download nodejs setup
tags: nodejs
ansible.builtin.get_url:
url: https://deb.nodesource.com/setup_16.x
dest: "{{ nodejs_tmp.path }}/setup_node.sh"
mode: "0755"
- name: run node setup script
tags: nodejs
become: yes
ansible.builtin.command:
cmd: "{{ nodejs_tmp.path }}/setup_node.sh"
- name: install nodejs
tags: nodejs
become: yes
ansible.builtin.apt:
pkg: nodejs
state: present
update_cache: yes
- name: update npm
tags: nodejs
become: yes
ansible.builtin.shell:
cmd: npm install -g npm
- name: install quasar cli
tags: quasar
become: yes
ansible.builtin.shell:
cmd: npm install -g @quasar/cli
- name: install frontend
tags: quasar
ansible.builtin.shell:
chdir: "{{ frontend_dir }}"
cmd: npm install
- name: add quasar env
tags: quasar
ansible.builtin.template:
src: quasar.env.j2
dest: "{{ frontend_dir }}/.env"
owner: "{{ user }}"
group: "{{ user }}"
mode: "0644"
- name: remove tempdirs
tags: cleanup
become: yes
ignore_errors: yes
ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
- "{{ nats_tmp.path }}"
- "{{ python_tmp.path }}"
- "{{ nodejs_tmp.path }}"
- name: deploy fullchain
tags: certs
become: yes
ansible.builtin.copy:
src: "{{ fullchain_src }}"
dest: "{{ fullchain_dest }}"
owner: "{{ user }}"
group: "{{ user }}"
mode: "0440"
- name: deploy privkey
tags: certs
become: yes
ansible.builtin.copy:
src: "{{ privkey_src }}"
dest: "{{ privkey_dest }}"
owner: "{{ user }}"
group: "{{ user }}"
mode: "0440"
- name: import nginx signing key
tags: nginx
become: yes
ansible.builtin.apt_key:
url: https://nginx.org/packages/keys/nginx_signing.key
state: present
- name: add nginx repo
tags: nginx
become: yes
ansible.builtin.copy:
src: nginx.repo
dest: /etc/apt/sources.list.d/nginx.list
owner: "root"
group: "root"
mode: "0644"
- name: install nginx
tags: nginx
become: yes
ansible.builtin.apt:
pkg: nginx
state: present
update_cache: yes
- name: set nginx default conf
tags: nginx
become: yes
ansible.builtin.copy:
src: nginx-default.conf
dest: /etc/nginx/nginx.conf
owner: "root"
group: "root"
mode: "0644"
- name: create nginx dirs
become: yes
tags: nginx
ansible.builtin.file:
state: directory
path: "{{ item }}"
mode: "0755"
with_items:
- /etc/nginx/sites-available
- /etc/nginx/sites-enabled
- name: deploy nginx sites
become: yes
tags: nginx
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "0644"
owner: root
group: root
with_items:
- { src: backend.nginx.j2, dest: /etc/nginx/sites-available/backend.conf }
- { src: mesh.nginx.j2, dest: /etc/nginx/sites-available/mesh.conf }
- name: enable nginx sites
become: yes
tags: nginx
ansible.builtin.file:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "0644"
owner: root
group: root
state: link
with_items:
- {
src: /etc/nginx/sites-available/backend.conf,
dest: /etc/nginx/sites-enabled/backend.conf,
}
- {
src: /etc/nginx/sites-available/mesh.conf,
dest: /etc/nginx/sites-enabled/mesh.conf,
}
- name: ensure nginx enabled and restarted
tags: nginx
become: yes
ansible.builtin.service:
name: nginx
enabled: yes
state: restarted
- name: copy nats-api bin
tags: nats-api
become: yes
ansible.builtin.copy:
remote_src: yes
src: "{{ backend_dir }}/natsapi/bin/nats-api"
dest: /usr/local/bin/nats-api
owner: "{{ user }}"
group: "{{ user }}"
mode: "0755"
- name: get setuptools_ver
tags: pip
ansible.builtin.shell: grep "^SETUPTOOLS_VER" {{ settings_file }} | awk -F'[= "]' '{print $5}'
register: setuptools_ver
- name: get wheel_ver
tags: pip
ansible.builtin.shell: grep "^WHEEL_VER" {{ settings_file }} | awk -F'[= "]' '{print $5}'
register: wheel_ver
- name: setup virtual env
tags: pip
ansible.builtin.shell:
chdir: "{{ backend_dir }}/api"
cmd: python3.10 -m venv env
- name: update pip to latest
tags: pip
ansible.builtin.pip:
virtualenv: "{{ backend_dir }}/api/env"
name: pip
state: latest
- name: install setuptools and wheel
tags: pip
ansible.builtin.pip:
virtualenv: "{{ backend_dir }}/api/env"
name: "{{ item }}"
with_items:
- "setuptools=={{ setuptools_ver.stdout }}"
- "wheel=={{ wheel_ver.stdout }}"
- name: install python packages
tags: pip
ansible.builtin.pip:
virtualenv: "{{ backend_dir }}/api/env"
chdir: "{{ backend_dir }}/api/tacticalrmm"
requirements: "{{ item }}"
with_items:
- requirements.txt
- requirements-dev.txt
- requirements-test.txt
- name: deploy django local settings
tags: django
ansible.builtin.template:
src: local_settings.j2
dest: "{{ local_settings_file }}"
mode: "0644"
owner: "{{ user }}"
group: "{{ user }}"
- name: setup django
tags: django
ansible.builtin.shell:
chdir: "{{ backend_dir }}/api/tacticalrmm"
cmd: |
. ../env/bin/activate
python manage.py migrate --no-input
python manage.py collectstatic --no-input
python manage.py create_natsapi_conf
python manage.py load_chocos
python manage.py load_community_scripts
echo "from accounts.models import User; User.objects.create_superuser('{{ django_user }}', '{{ github_email }}', '{{ django_password }}') if not User.objects.filter(username='{{ django_user }}').exists() else 0;" | python manage.py shell
python manage.py create_installer_user
- name: deploy services
tags: services
become: yes
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "0644"
owner: "root"
group: "root"
with_items:
- { src: nats-api.systemd.j2, dest: /etc/systemd/system/nats-api.service }
- { src: nats-server.systemd.j2, dest: /etc/systemd/system/nats.service }
- { src: mesh.systemd.j2, dest: /etc/systemd/system/meshcentral.service }
- name: import mongodb repo signing key
tags: mongo
become: yes
ansible.builtin.apt_key:
url: https://www.mongodb.org/static/pgp/server-4.4.asc
state: present
- name: setup mongodb repo
tags: mongo
become: yes
ansible.builtin.copy:
content: "deb https://repo.mongodb.org/apt/debian buster/mongodb-org/4.4 main"
dest: /etc/apt/sources.list.d/mongodb-org-4.4.list
owner: root
group: root
mode: "0644"
- name: install mongodb
tags: mongo
become: yes
ansible.builtin.apt:
pkg: mongodb-org
state: present
update_cache: yes
- name: ensure mongodb enabled and started
tags: mongo
become: yes
ansible.builtin.service:
name: mongod
enabled: yes
state: started
- name: get mesh_ver
tags: mesh
ansible.builtin.shell: grep "^MESH_VER" {{ settings_file }} | awk -F'[= "]' '{print $5}'
register: mesh_ver
- name: create meshcentral data directory
tags: mesh
become: yes
ansible.builtin.file:
path: "{{ mesh_dir }}/meshcentral-data"
state: directory
owner: "{{ user }}"
group: "{{ user }}"
mode: "0755"
- name: install meshcentral
tags: mesh
ansible.builtin.command:
chdir: "{{ mesh_dir }}"
cmd: "npm install meshcentral@{{ mesh_ver.stdout }}"
- name: deploy mesh config
tags: mesh
ansible.builtin.template:
src: mesh.cfg.j2
dest: "{{ mesh_dir }}/meshcentral-data/config.json"
mode: "0644"
owner: "{{ user }}"
group: "{{ user }}"
- name: start meshcentral
tags: mesh
become: yes
ansible.builtin.systemd:
name: meshcentral.service
state: started
enabled: yes
daemon_reload: yes
- name: wait for meshcentral to be ready
tags: mesh
uri:
url: "https://{{ mesh }}"
return_content: yes
validate_certs: yes
status_code: 200
register: mesh_status
until: mesh_status.status == 200
retries: 20
delay: 3
- name: get meshcentral login token key
tags: mesh_key
ansible.builtin.command:
chdir: "{{ mesh_dir }}"
cmd: node node_modules/meshcentral --logintokenkey
register: mesh_token_key
- name: add mesh key to django settings file
tags: mesh_key
ansible.builtin.lineinfile:
path: "{{ local_settings_file }}"
line: 'MESH_TOKEN_KEY = "{{ mesh_token_key.stdout }}"'
- name: stop meshcentral service
tags: mesh_user
become: yes
ansible.builtin.service:
name: meshcentral.service
state: stopped
- name: create mesh user
tags: mesh_user
ansible.builtin.shell:
chdir: "{{ mesh_dir }}"
cmd: |
node node_modules/meshcentral --createaccount {{ mesh_user }} --pass {{ mesh_password }} --email {{ github_email }}
node node_modules/meshcentral --adminaccount {{ mesh_user }}
- name: start meshcentral service
tags: mesh_user
become: yes
ansible.builtin.service:
name: meshcentral.service
state: started
- name: wait for meshcentral to be ready
tags: mesh_user
uri:
url: "https://{{ mesh }}"
return_content: yes
validate_certs: yes
status_code: 200
register: mesh_status
until: mesh_status.status == 200
retries: 20
delay: 3
- name: create mesh device group
tags: mesh_user
ansible.builtin.shell:
chdir: "{{ mesh_dir }}"
cmd: |
node node_modules/meshcentral/meshctrl.js --url wss://{{ mesh }}:443 --loginuser {{ mesh_user }} --loginpass {{ mesh_password }} AddDeviceGroup --name TacticalRMM
- name: finish up django
tags: mesh_user
ansible.builtin.shell:
chdir: "{{ backend_dir }}/api/tacticalrmm"
cmd: |
. ../env/bin/activate
python manage.py initial_db_setup
python manage.py reload_nats
- name: restart services
tags: services
become: yes
ansible.builtin.systemd:
daemon_reload: yes
enabled: yes
state: restarted
name: "{{ item }}.service"
with_items:
- nats
- nats-api

View File

@@ -0,0 +1,20 @@
server {
listen 443 ssl reuseport;
listen [::]:443 ssl;
server_name {{ api }};
client_max_body_size 300M;
ssl_certificate {{ fullchain_dest }};
ssl_certificate_key {{ privkey_dest }};
location ~ ^/natsws {
proxy_pass http://127.0.0.1:9235;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Forwarded-Host $host:$server_port;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}

View File

@@ -0,0 +1,21 @@
SECRET_KEY = "{{ django_secret }}"
DEBUG = True
ALLOWED_HOSTS = ['{{ api }}']
ADMIN_URL = "admin/"
CORS_ORIGIN_ALLOW_ALL = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'tacticalrmm',
'USER': '{{ db_user }}',
'PASSWORD': '{{ db_passwd }}',
'HOST': 'localhost',
'PORT': '5432',
}
}
REDIS_HOST = "localhost"
ADMIN_ENABLED = True
CERT_FILE = "{{ fullchain_src }}"
KEY_FILE = "{{ privkey_src }}"
MESH_USERNAME = "{{ mesh_user }}"
MESH_SITE = "https://{{ mesh }}"

View File

@@ -0,0 +1,33 @@
{
"settings": {
"Cert": "{{ mesh }}",
"MongoDb": "mongodb://127.0.0.1:27017",
"MongoDbName": "meshcentral",
"WANonly": true,
"Minify": 1,
"Port": 4430,
"AliasPort": 443,
"RedirPort": 800,
"AllowLoginToken": true,
"AllowFraming": true,
"AgentPong": 300,
"AllowHighQualityDesktop": true,
"TlsOffload": "127.0.0.1",
"agentCoreDump": false,
"Compression": true,
"WsCompression": true,
"AgentWsCompression": true,
"MaxInvalidLogin": { "time": 5, "count": 5, "coolofftime": 30 }
},
"domains": {
"": {
"Title": "Tactical RMM",
"Title2": "Tactical RMM",
"NewAccounts": false,
"CertUrl": "https://{{ mesh }}:443/",
"GeoLocation": true,
"CookieIpCheck": false,
"mstsc": true
}
}
}

View File

@@ -0,0 +1,22 @@
server {
listen 443 ssl;
listen [::]:443 ssl;
proxy_send_timeout 330s;
proxy_read_timeout 330s;
server_name {{ mesh }};
ssl_certificate {{ fullchain_dest }};
ssl_certificate_key {{ privkey_dest }};
ssl_session_cache shared:WEBSSL:10m;
location / {
proxy_pass http://127.0.0.1:4430/;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Forwarded-Host $host:$server_port;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}

View File

@@ -0,0 +1,17 @@
[Unit]
Description=MeshCentral Server
After=network.target mongod.service nginx.service
[Service]
Type=simple
LimitNOFILE=1000000
ExecStart=/usr/bin/node node_modules/meshcentral
Environment=NODE_ENV=production
WorkingDirectory={{ mesh_dir }}
User={{ user }}
Group={{ user }}
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,14 @@
[Unit]
Description=TacticalRMM Nats Api
After=nats.service
[Service]
Type=simple
ExecStart=/usr/local/bin/nats-api -config {{ backend_dir }}/api/tacticalrmm/nats-api.conf
User={{ user }}
Group={{ user }}
Restart=always
RestartSec=5s
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,18 @@
[Unit]
Description=NATS Server
After=network.target
[Service]
PrivateTmp=true
Type=simple
ExecStart=/usr/local/bin/nats-server -c {{ backend_dir }}/api/tacticalrmm/nats-rmm.conf
ExecReload=/usr/bin/kill -s HUP $MAINPID
ExecStop=/usr/bin/kill -s SIGINT $MAINPID
User={{ user }}
Group={{ user }}
Restart=always
RestartSec=5s
LimitNOFILE=1000000
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,4 @@
DEV_URL = "http://{{ api }}:8000"
DEV_HOST = "{{ rmm }}"
DEV_PORT = "8080"
USE_HTTPS = false

View File

@@ -0,0 +1,20 @@
---
- hosts: "{{ target }}"
vars:
ansible_user: tactical
fullchain_src: /path/to/fullchain.pem
privkey_src: /path/to/privkey.pem
api: "api.example.com"
rmm: "rmm.example.com"
mesh: "mesh.example.com"
github_username: "changeme"
github_email: "changeme@example.com"
mesh_user: "changeme"
mesh_password: "changeme"
db_user: "changeme"
db_passwd: "changeme"
django_secret: "changeme"
django_user: "changeme"
django_password: "changeme"
roles:
- trmm_dev

View File

@@ -1,24 +1,15 @@
[run]
source = .
[report]
show_missing = True
include = *.py
omit =
tacticalrmm/asgi.py
tacticalrmm/wsgi.py
manage.py
*/__pycache__/*
*/env/*
*/management/*
*/migrations/*
*/static/*
manage.py
*/local_settings.py
*/apps.py
*/admin.py
*/celery.py
*/wsgi.py
*/settings.py
*/baker_recipes.py
*/urls.py
*/tests.py
*/test.py
checks/utils.py
/usr/local/lib/*
**/migrations/*
**/test*.py
[report]
show_missing = True

View File

@@ -1,7 +1,7 @@
from django.contrib import admin
from rest_framework.authtoken.admin import TokenAdmin
from .models import User, Role
from .models import Role, User
admin.site.register(User)
TokenAdmin.raw_id_fields = ("user",)

View File

@@ -1,19 +1,23 @@
import uuid
from django.core.management.base import BaseCommand
from accounts.models import User
class Command(BaseCommand):
help = "Creates the installer user"
def handle(self, *args, **kwargs):
def handle(self, *args, **kwargs): # type: ignore
self.stdout.write("Checking if installer user has been created...")
if User.objects.filter(is_installer_user=True).exists():
self.stdout.write("Installer user already exists")
return
User.objects.create_user( # type: ignore
User.objects.create_user(
username=uuid.uuid4().hex,
is_installer_user=True,
password=User.objects.make_random_password(60), # type: ignore
password=User.objects.make_random_password(60),
block_dashboard_login=True,
)
self.stdout.write("Installer user has been created")

View File

@@ -6,7 +6,7 @@ from knox.models import AuthToken
class Command(BaseCommand):
help = "Deletes all knox web tokens"
def handle(self, *args, **kwargs):
def handle(self, *args, **kwargs): # type: ignore
# only delete web tokens, not any generated by the installer or deployments
dont_delete = djangotime.now() + djangotime.timedelta(hours=23)
tokens = AuthToken.objects.exclude(deploytokens__isnull=False).filter(

View File

@@ -1,4 +1,5 @@
from django.core.management.base import BaseCommand
from accounts.models import User

View File

@@ -1,7 +1,7 @@
# Generated by Django 3.2.1 on 2021-05-11 02:33
from django.db import migrations, models
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):

View File

@@ -1,7 +1,7 @@
# Generated by Django 3.2.6 on 2021-09-03 00:54
from django.db import migrations, models
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):

View File

@@ -1,7 +1,7 @@
# Generated by Django 3.2.6 on 2021-10-10 02:49
from django.db import migrations, models
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):

View File

@@ -0,0 +1,18 @@
# Generated by Django 3.2.12 on 2022-04-02 15:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0030_auto_20211104_0221'),
]
operations = [
migrations.AddField(
model_name='user',
name='date_format',
field=models.CharField(blank=True, max_length=30, null=True),
),
]

View File

@@ -1,26 +1,17 @@
from typing import Optional
from django.contrib.auth.models import AbstractUser
from django.core.cache import cache
from django.db import models
from django.db.models.fields import CharField, DateTimeField
from logs.models import BaseAuditModel
AGENT_DBLCLICK_CHOICES = [
("editagent", "Edit Agent"),
("takecontrol", "Take Control"),
("remotebg", "Remote Background"),
("urlaction", "URL Action"),
]
AGENT_TBL_TAB_CHOICES = [
("server", "Servers"),
("workstation", "Workstations"),
("mixed", "Mixed"),
]
CLIENT_TREE_SORT_CHOICES = [
("alphafail", "Move failing clients to the top"),
("alpha", "Sort alphabetically"),
]
from tacticalrmm.constants import (
ROLE_CACHE_PREFIX,
AgentDblClick,
AgentTableTabs,
ClientTreeSort,
)
class User(AbstractUser, BaseAuditModel):
@@ -29,8 +20,8 @@ class User(AbstractUser, BaseAuditModel):
totp_key = models.CharField(max_length=50, null=True, blank=True)
dark_mode = models.BooleanField(default=True)
show_community_scripts = models.BooleanField(default=True)
agent_dblclick_action = models.CharField(
max_length=50, choices=AGENT_DBLCLICK_CHOICES, default="editagent"
agent_dblclick_action: "AgentDblClick" = models.CharField(
max_length=50, choices=AgentDblClick.choices, default=AgentDblClick.EDIT_AGENT
)
url_action = models.ForeignKey(
"core.URLAction",
@@ -40,15 +31,16 @@ class User(AbstractUser, BaseAuditModel):
on_delete=models.SET_NULL,
)
default_agent_tbl_tab = models.CharField(
max_length=50, choices=AGENT_TBL_TAB_CHOICES, default="server"
max_length=50, choices=AgentTableTabs.choices, default=AgentTableTabs.SERVER
)
agents_per_page = models.PositiveIntegerField(default=50) # not currently used
client_tree_sort = models.CharField(
max_length=50, choices=CLIENT_TREE_SORT_CHOICES, default="alphafail"
max_length=50, choices=ClientTreeSort.choices, default=ClientTreeSort.ALPHA_FAIL
)
client_tree_splitter = models.PositiveIntegerField(default=11)
loading_bar_color = models.CharField(max_length=255, default="red")
clear_search_when_switching = models.BooleanField(default=True)
date_format = models.CharField(max_length=30, blank=True, null=True)
is_installer_user = models.BooleanField(default=False)
last_login_ip = models.GenericIPAddressField(default=None, blank=True, null=True)
@@ -75,6 +67,23 @@ class User(AbstractUser, BaseAuditModel):
return UserSerializer(user).data
def get_and_set_role_cache(self) -> "Optional[Role]":
role = cache.get(f"{ROLE_CACHE_PREFIX}{self.role}")
if role and isinstance(role, Role):
return role
elif not role and not self.role:
return None
else:
models.prefetch_related_objects(
[self.role],
"can_view_clients",
"can_view_sites",
)
cache.set(f"{ROLE_CACHE_PREFIX}{self.role}", self.role, 600)
return self.role
class Role(BaseAuditModel):
name = models.CharField(max_length=255, unique=True)
@@ -175,6 +184,12 @@ class Role(BaseAuditModel):
def __str__(self):
return self.name
def save(self, *args, **kwargs) -> None:
# delete cache on save
cache.delete(f"{ROLE_CACHE_PREFIX}{self.name}")
super(BaseAuditModel, self).save(*args, **kwargs)
@staticmethod
def serialize(role):
# serializes the agent and returns json

View File

@@ -4,7 +4,7 @@ from tacticalrmm.permissions import _has_perm
class AccountsPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
if r.method == "GET":
return _has_perm(r, "can_list_accounts")
else:
@@ -28,7 +28,7 @@ class AccountsPerms(permissions.BasePermission):
class RolesPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
if r.method == "GET":
return _has_perm(r, "can_list_roles")
else:
@@ -36,7 +36,7 @@ class RolesPerms(permissions.BasePermission):
class APIKeyPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
if r.method == "GET":
return _has_perm(r, "can_list_api_keys")

View File

@@ -1,11 +1,11 @@
import pyotp
from rest_framework.serializers import (
ModelSerializer,
SerializerMethodField,
ReadOnlyField,
SerializerMethodField,
)
from .models import APIKey, User, Role
from .models import APIKey, Role, User
class UserUISerializer(ModelSerializer):
@@ -22,6 +22,7 @@ class UserUISerializer(ModelSerializer):
"loading_bar_color",
"clear_search_when_switching",
"block_dashboard_login",
"date_format",
]
@@ -39,6 +40,7 @@ class UserSerializer(ModelSerializer):
"last_login_ip",
"role",
"block_dashboard_login",
"date_format",
]

View File

@@ -2,15 +2,16 @@ from unittest.mock import patch
from django.test import override_settings
from model_bakery import baker, seq
from accounts.models import User, APIKey
from tacticalrmm.test import TacticalTestCase
from accounts.models import APIKey, User
from accounts.serializers import APIKeySerializer
from tacticalrmm.constants import AgentDblClick, AgentTableTabs, ClientTreeSort
from tacticalrmm.test import TacticalTestCase
class TestAccounts(TacticalTestCase):
def setUp(self):
self.client_setup()
self.setup_client()
self.bob = User(username="bob")
self.bob.set_password("hunter2")
self.bob.save()
@@ -69,17 +70,17 @@ class TestAccounts(TacticalTestCase):
self.assertEqual(r.status_code, 400)
self.assertIn("non_field_errors", r.data.keys())
@override_settings(DEBUG=True)
@patch("pyotp.TOTP.verify")
def test_debug_login_view(self, mock_verify):
url = "/login/"
mock_verify.return_value = True
# @override_settings(DEBUG=True)
# @patch("pyotp.TOTP.verify")
# def test_debug_login_view(self, mock_verify):
# url = "/login/"
# mock_verify.return_value = True
data = {"username": "bob", "password": "hunter2", "twofactor": "sekret"}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertIn("expiry", r.data.keys())
self.assertIn("token", r.data.keys())
# data = {"username": "bob", "password": "hunter2", "twofactor": "sekret"}
# r = self.client.post(url, data, format="json")
# self.assertEqual(r.status_code, 200)
# self.assertIn("expiry", r.data.keys())
# self.assertIn("token", r.data.keys())
class TestGetAddUsers(TacticalTestCase):
@@ -283,9 +284,9 @@ class TestUserAction(TacticalTestCase):
data = {
"dark_mode": True,
"show_community_scripts": True,
"agent_dblclick_action": "editagent",
"default_agent_tbl_tab": "mixed",
"client_tree_sort": "alpha",
"agent_dblclick_action": AgentDblClick.EDIT_AGENT,
"default_agent_tbl_tab": AgentTableTabs.MIXED,
"client_tree_sort": ClientTreeSort.ALPHA,
"client_tree_splitter": 14,
"loading_bar_color": "green",
"clear_search_when_switching": False,
@@ -308,7 +309,7 @@ class TestAPIKeyViews(TacticalTestCase):
serializer = APIKeySerializer(apikeys, many=True)
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(serializer.data, resp.data) # type: ignore
self.assertEqual(serializer.data, resp.data)
self.check_not_authenticated("get", url)
@@ -331,14 +332,14 @@ class TestAPIKeyViews(TacticalTestCase):
self.assertEqual(resp.status_code, 404)
apikey = baker.make("accounts.APIKey", name="Test")
url = f"/accounts/apikeys/{apikey.pk}/" # type: ignore
url = f"/accounts/apikeys/{apikey.pk}/"
data = {"name": "New Name"} # type: ignore
data = {"name": "New Name"}
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
apikey = APIKey.objects.get(pk=apikey.pk) # type: ignore
self.assertEquals(apikey.name, "New Name")
apikey = APIKey.objects.get(pk=apikey.pk)
self.assertEqual(apikey.name, "New Name")
self.check_not_authenticated("put", url)
@@ -349,11 +350,11 @@ class TestAPIKeyViews(TacticalTestCase):
# test delete api key
apikey = baker.make("accounts.APIKey")
url = f"/accounts/apikeys/{apikey.pk}/" # type: ignore
url = f"/accounts/apikeys/{apikey.pk}/"
resp = self.client.delete(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertFalse(APIKey.objects.filter(pk=apikey.pk).exists()) # type: ignore
self.assertFalse(APIKey.objects.filter(pk=apikey.pk).exists())
self.check_not_authenticated("delete", url)
@@ -393,7 +394,7 @@ class TestAPIAuthentication(TacticalTestCase):
name="Test Token", key="123456", user=self.user
)
self.client_setup()
self.setup_client()
def test_api_auth(self):
url = "/clients/"

View File

@@ -0,0 +1,18 @@
from typing import TYPE_CHECKING
from django.conf import settings
if TYPE_CHECKING:
from django.http import HttpRequest
from accounts.models import User
def is_root_user(*, request: "HttpRequest", user: "User") -> bool:
root = (
hasattr(settings, "ROOT_USER")
and request.user != user
and user.username == settings.ROOT_USER
)
demo = (
getattr(settings, "DEMO", False) and request.user.username == settings.ROOT_USER
)
return root or demo

View File

@@ -5,16 +5,16 @@ from django.db import IntegrityError
from django.shortcuts import get_object_or_404
from ipware import get_client_ip
from knox.views import LoginView as KnoxLoginView
from logs.models import AuditLog
from rest_framework import status
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from tacticalrmm.utils import notify_error
from logs.models import AuditLog
from tacticalrmm.helpers import notify_error
from .models import APIKey, Role, User
from .permissions import APIKeyPerms, AccountsPerms, RolesPerms
from .permissions import AccountsPerms, APIKeyPerms, RolesPerms
from .serializers import (
APIKeySerializer,
RoleSerializer,
@@ -22,14 +22,7 @@ from .serializers import (
UserSerializer,
UserUISerializer,
)
def _is_root_user(request, user) -> bool:
return (
hasattr(settings, "ROOT_USER")
and request.user != user
and user.username == settings.ROOT_USER
)
from accounts.utils import is_root_user
class CheckCreds(KnoxLoginView):
@@ -80,6 +73,8 @@ class LoginView(KnoxLoginView):
if settings.DEBUG and token == "sekret":
valid = True
elif getattr(settings, "DEMO", False):
valid = True
elif totp.verify(token, valid_window=10):
valid = True
@@ -87,7 +82,7 @@ class LoginView(KnoxLoginView):
login(request, user)
# save ip information
client_ip, is_routable = get_client_ip(request)
client_ip, _ = get_client_ip(request)
user.last_login_ip = client_ip
user.save()
@@ -153,7 +148,7 @@ class GetUpdateDeleteUser(APIView):
def put(self, request, pk):
user = get_object_or_404(User, pk=pk)
if _is_root_user(request, user):
if is_root_user(request=request, user=user):
return notify_error("The root user cannot be modified from the UI")
serializer = UserSerializer(instance=user, data=request.data, partial=True)
@@ -164,7 +159,7 @@ class GetUpdateDeleteUser(APIView):
def delete(self, request, pk):
user = get_object_or_404(User, pk=pk)
if _is_root_user(request, user):
if is_root_user(request=request, user=user):
return notify_error("The root user cannot be deleted from the UI")
user.delete()
@@ -177,7 +172,7 @@ class UserActions(APIView):
# reset password
def post(self, request):
user = get_object_or_404(User, pk=request.data["id"])
if _is_root_user(request, user):
if is_root_user(request=request, user=user):
return notify_error("The root user cannot be modified from the UI")
user.set_password(request.data["password"])
@@ -188,7 +183,7 @@ class UserActions(APIView):
# reset two factor token
def put(self, request):
user = get_object_or_404(User, pk=request.data["id"])
if _is_root_user(request, user):
if is_root_user(request=request, user=user):
return notify_error("The root user cannot be modified from the UI")
user.totp_key = ""

View File

@@ -1,9 +1,8 @@
from django.contrib import admin
from .models import Agent, AgentCustomField, Note, RecoveryAction, AgentHistory
from .models import Agent, AgentCustomField, AgentHistory, Note
admin.site.register(Agent)
admin.site.register(RecoveryAction)
admin.site.register(Note)
admin.site.register(AgentCustomField)
admin.site.register(AgentHistory)

View File

@@ -1,6 +1,6 @@
import json
import os
import random
import secrets
import string
from itertools import cycle
@@ -8,10 +8,11 @@ from django.conf import settings
from django.utils import timezone as djangotime
from model_bakery.recipe import Recipe, foreign_key, seq
from tacticalrmm.constants import AgentMonType, AgentPlat
def generate_agent_id(hostname):
rand = "".join(random.choice(string.ascii_letters) for _ in range(35))
return f"{rand}-{hostname}"
def generate_agent_id() -> str:
return "".join(secrets.choice(string.ascii_letters) for i in range(39))
site = Recipe("clients.Site")
@@ -24,25 +25,34 @@ def get_wmi_data():
return json.load(f)
def get_win_svcs():
svcs = settings.BASE_DIR.joinpath("tacticalrmm/test_data/winsvcs.json")
with open(svcs) as f:
return json.load(f)
agent = Recipe(
"agents.Agent",
site=foreign_key(site),
hostname="DESKTOP-TEST123",
version="1.3.0",
monitoring_type=cycle(["workstation", "server"]),
agent_id=seq(generate_agent_id("DESKTOP-TEST123")),
monitoring_type=cycle(AgentMonType.values),
agent_id=seq(generate_agent_id()),
last_seen=djangotime.now() - djangotime.timedelta(days=5),
plat=AgentPlat.WINDOWS,
)
server_agent = agent.extend(
monitoring_type="server",
monitoring_type=AgentMonType.SERVER,
)
workstation_agent = agent.extend(
monitoring_type="workstation",
monitoring_type=AgentMonType.WORKSTATION,
)
online_agent = agent.extend(last_seen=djangotime.now())
online_agent = agent.extend(
last_seen=djangotime.now(), services=get_win_svcs(), wmi_detail=get_wmi_data()
)
offline_agent = agent.extend(
last_seen=djangotime.now() - djangotime.timedelta(minutes=7)
@@ -77,4 +87,4 @@ agent_with_services = agent.extend(
],
)
agent_with_wmi = agent.extend(wmi=get_wmi_data())
agent_with_wmi = agent.extend(wmi_detail=get_wmi_data())

View File

@@ -0,0 +1,83 @@
from agents.models import Agent, AgentHistory
from channels.db import database_sync_to_async
from channels.generic.websocket import AsyncJsonWebsocketConsumer
from django.contrib.auth.models import AnonymousUser
from django.shortcuts import get_object_or_404
from tacticalrmm.constants import AGENT_DEFER, AgentHistoryType
from tacticalrmm.permissions import _has_perm_on_agent
class SendCMD(AsyncJsonWebsocketConsumer):
async def connect(self):
self.user = self.scope["user"]
if isinstance(self.user, AnonymousUser):
await self.close()
await self.accept()
async def receive_json(self, payload, **kwargs):
auth = await self.has_perm(payload["agent_id"])
if not auth:
await self.send_json(
{"ret": "You do not have permission to perform this action."}
)
return
agent = await self.get_agent(payload["agent_id"])
timeout = int(payload["timeout"])
if payload["shell"] == "custom" and payload["custom_shell"]:
shell = payload["custom_shell"]
else:
shell = payload["shell"]
hist_pk = await self.get_history_id(agent, payload["cmd"])
data = {
"func": "rawcmd",
"timeout": timeout,
"payload": {
"command": payload["cmd"],
"shell": shell,
},
"id": hist_pk,
}
ret = await agent.nats_cmd(data, timeout=timeout + 2)
await self.send_json({"ret": ret})
async def disconnect(self, _):
await self.close()
def _has_perm(self, perm: str) -> bool:
if self.user.is_superuser or (
self.user.role and getattr(self.user.role, "is_superuser")
):
return True
# make sure non-superusers with empty roles aren't permitted
elif not self.user.role:
return False
return self.user.role and getattr(self.user.role, perm)
@database_sync_to_async # type: ignore
def get_agent(self, agent_id: str) -> "Agent":
return get_object_or_404(Agent.objects.defer(*AGENT_DEFER), agent_id=agent_id)
@database_sync_to_async # type: ignore
def get_history_id(self, agent: "Agent", cmd: str) -> int:
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.CMD_RUN,
command=cmd,
username=self.user.username[:50],
)
return hist.pk
@database_sync_to_async # type: ignore
def has_perm(self, agent_id: str) -> bool:
return self._has_perm("can_send_cmd") and _has_perm_on_agent(
self.user, agent_id
)

View File

@@ -0,0 +1,82 @@
import asyncio
from django.core.management.base import BaseCommand
from django.utils import timezone as djangotime
from packaging import version as pyver
from agents.models import Agent
from tacticalrmm.constants import AGENT_DEFER
from tacticalrmm.utils import reload_nats
class Command(BaseCommand):
help = "Delete old agents"
def add_arguments(self, parser):
parser.add_argument(
"--days",
type=int,
help="Delete agents that have not checked in for this many days",
)
parser.add_argument(
"--agentver",
type=str,
help="Delete agents that equal to or less than this version",
)
parser.add_argument(
"--delete",
action="store_true",
help="This will delete agents",
)
def handle(self, *args, **kwargs):
days = kwargs["days"]
agentver = kwargs["agentver"]
delete = kwargs["delete"]
if not days and not agentver:
self.stdout.write(
self.style.ERROR("Must have at least one parameter: days or agentver")
)
return
q = Agent.objects.defer(*AGENT_DEFER)
agents = []
if days:
overdue = djangotime.now() - djangotime.timedelta(days=days)
agents = [i for i in q if i.last_seen < overdue]
if agentver:
agents = [i for i in q if pyver.parse(i.version) <= pyver.parse(agentver)]
if not agents:
self.stdout.write(self.style.ERROR("No agents matched"))
return
deleted_count = 0
for agent in agents:
s = f"{agent.hostname} | Version {agent.version} | Last Seen {agent.last_seen} | {agent.client} > {agent.site}"
if delete:
s = "Deleting " + s
self.stdout.write(self.style.SUCCESS(s))
asyncio.run(agent.nats_cmd({"func": "uninstall"}, wait=False))
try:
agent.delete()
except Exception as e:
err = f"Failed to delete agent {agent.hostname}: {str(e)}"
self.stdout.write(self.style.ERROR(err))
else:
deleted_count += 1
else:
self.stdout.write(self.style.WARNING(s))
if delete:
reload_nats()
self.stdout.write(self.style.SUCCESS(f"Deleted {deleted_count} agents"))
else:
self.stdout.write(
self.style.SUCCESS(
"The above agents would be deleted. Run again with --delete to actually delete them."
)
)

View File

@@ -0,0 +1,33 @@
# import datetime as dt
import random
from django.core.management.base import BaseCommand
from django.utils import timezone as djangotime
from agents.models import Agent
from core.tasks import cache_db_fields_task, handle_resolved_stuff
class Command(BaseCommand):
help = "stuff for demo site in cron"
def handle(self, *args, **kwargs):
random_dates = []
now = djangotime.now()
for _ in range(20):
rand = now - djangotime.timedelta(minutes=random.randint(1, 2))
random_dates.append(rand)
for _ in range(5):
rand = now - djangotime.timedelta(minutes=random.randint(10, 20))
random_dates.append(rand)
agents = Agent.objects.only("last_seen")
for agent in agents:
agent.last_seen = random.choice(random_dates)
agent.save(update_fields=["last_seen"])
cache_db_fields_task()
handle_resolved_stuff()

View File

@@ -0,0 +1,836 @@
import datetime as dt
import json
import random
import string
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.utils import timezone as djangotime
from accounts.models import User
from agents.models import Agent, AgentHistory
from automation.models import Policy
from autotasks.models import AutomatedTask, TaskResult
from checks.models import Check, CheckHistory, CheckResult
from clients.models import Client, Site
from logs.models import AuditLog, PendingAction
from scripts.models import Script
from software.models import InstalledSoftware
from tacticalrmm.constants import (
AgentHistoryType,
AgentMonType,
AgentPlat,
AlertSeverity,
CheckStatus,
CheckType,
EvtLogFailWhen,
EvtLogNames,
EvtLogTypes,
PAAction,
ScriptShell,
TaskSyncStatus,
TaskType,
)
from tacticalrmm.demo_data import (
check_network_loc_aware_ps1,
check_storage_pool_health_ps1,
clear_print_spool_bat,
disks,
disks_linux_deb,
disks_linux_pi,
ping_fail_output,
ping_success_output,
restart_nla_ps1,
show_temp_dir_py,
spooler_stdout,
temp_dir_stdout,
wmi_deb,
wmi_pi,
)
from winupdate.models import WinUpdate, WinUpdatePolicy
AGENTS_TO_GENERATE = 20
SVCS = settings.BASE_DIR.joinpath("tacticalrmm/test_data/winsvcs.json")
WMI_1 = settings.BASE_DIR.joinpath("tacticalrmm/test_data/wmi1.json")
WMI_2 = settings.BASE_DIR.joinpath("tacticalrmm/test_data/wmi2.json")
WMI_3 = settings.BASE_DIR.joinpath("tacticalrmm/test_data/wmi3.json")
SW_1 = settings.BASE_DIR.joinpath("tacticalrmm/test_data/software1.json")
SW_2 = settings.BASE_DIR.joinpath("tacticalrmm/test_data/software2.json")
WIN_UPDATES = settings.BASE_DIR.joinpath("tacticalrmm/test_data/winupdates.json")
EVT_LOG_FAIL = settings.BASE_DIR.joinpath(
"tacticalrmm/test_data/eventlog_check_fail.json"
)
class Command(BaseCommand):
help = "populate database with fake agents"
def rand_string(self, length: int) -> str:
chars = string.ascii_letters
return "".join(random.choice(chars) for _ in range(length))
def handle(self, *args, **kwargs) -> None:
user = User.objects.first()
if user:
user.totp_key = "ABSA234234"
user.save(update_fields=["totp_key"])
Agent.objects.all().delete()
Client.objects.all().delete()
Check.objects.all().delete()
Script.objects.all().delete()
AutomatedTask.objects.all().delete()
CheckHistory.objects.all().delete()
Policy.objects.all().delete()
AuditLog.objects.all().delete()
PendingAction.objects.all().delete()
call_command("load_community_scripts")
call_command("initial_db_setup")
call_command("load_chocos")
call_command("create_installer_user")
# policies
check_policy = Policy()
check_policy.name = "Demo Checks Policy"
check_policy.desc = "Demo Checks Policy"
check_policy.active = True
check_policy.enforced = True
check_policy.save()
patch_policy = Policy()
patch_policy.name = "Demo Patch Policy"
patch_policy.desc = "Demo Patch Policy"
patch_policy.active = True
patch_policy.enforced = True
patch_policy.save()
update_policy = WinUpdatePolicy()
update_policy.policy = patch_policy
update_policy.critical = "approve"
update_policy.important = "approve"
update_policy.moderate = "approve"
update_policy.low = "ignore"
update_policy.other = "ignore"
update_policy.run_time_days = [6, 0, 2]
update_policy.run_time_day = 1
update_policy.reboot_after_install = "required"
update_policy.reprocess_failed = True
update_policy.email_if_fail = True
update_policy.save()
clients = (
"Company 1",
"Company 2",
"Company 3",
"Company 4",
"Company 5",
"Company 6",
)
sites1 = ("HQ1", "LA Office 1", "NY Office 1")
sites2 = ("HQ2", "LA Office 2", "NY Office 2")
sites3 = ("HQ3", "LA Office 3", "NY Office 3")
sites4 = ("HQ4", "LA Office 4", "NY Office 4")
sites5 = ("HQ5", "LA Office 5", "NY Office 5")
sites6 = ("HQ6", "LA Office 6", "NY Office 6")
client1 = Client(name=clients[0])
client2 = Client(name=clients[1])
client3 = Client(name=clients[2])
client4 = Client(name=clients[3])
client5 = Client(name=clients[4])
client6 = Client(name=clients[5])
client1.save()
client2.save()
client3.save()
client4.save()
client5.save()
client6.save()
for site in sites1:
Site(client=client1, name=site).save()
for site in sites2:
Site(client=client2, name=site).save()
for site in sites3:
Site(client=client3, name=site).save()
for site in sites4:
Site(client=client4, name=site).save()
for site in sites5:
Site(client=client5, name=site).save()
for site in sites6:
Site(client=client6, name=site).save()
hostnames = (
"DC-1",
"DC-2",
"FSV-1",
"FSV-2",
"WSUS",
"DESKTOP-12345",
"LAPTOP-55443",
)
descriptions = ("Bob's computer", "Primary DC", "File Server", "Karen's Laptop")
modes = AgentMonType.values
op_systems_servers = (
"Microsoft Windows Server 2016 Standard, 64bit (build 14393)",
"Microsoft Windows Server 2012 R2 Standard, 64bit (build 9600)",
"Microsoft Windows Server 2019 Standard, 64bit (build 17763)",
)
op_systems_workstations = (
"Microsoft Windows 8.1 Pro, 64bit (build 9600)",
"Microsoft Windows 10 Pro for Workstations, 64bit (build 18363)",
"Microsoft Windows 10 Pro, 64bit (build 18363)",
)
linux_deb_os = "Debian 11.2 x86_64 5.10.0-11-amd64"
linux_pi_os = "Raspbian 11.2 armv7l 5.10.92-v7+"
public_ips = ("65.234.22.4", "74.123.43.5", "44.21.134.45")
total_rams = (4, 8, 16, 32, 64, 128)
now = dt.datetime.now()
django_now = djangotime.now()
boot_times = []
for _ in range(15):
rand_hour = now - dt.timedelta(hours=random.randint(1, 22))
boot_times.append(str(rand_hour.timestamp()))
for _ in range(5):
rand_days = now - dt.timedelta(days=random.randint(2, 50))
boot_times.append(str(rand_days.timestamp()))
user_names = ("None", "Karen", "Steve", "jsmith", "jdoe")
with open(SVCS) as f:
services = json.load(f)
# WMI
with open(WMI_1) as f:
wmi1 = json.load(f)
with open(WMI_2) as f:
wmi2 = json.load(f)
with open(WMI_3) as f:
wmi3 = json.load(f)
wmi_details = [i for i in (wmi1, wmi2, wmi3)]
# software
with open(SW_1) as f:
software1 = json.load(f)
with open(SW_2) as f:
software2 = json.load(f)
softwares = [i for i in (software1, software2)]
# windows updates
with open(WIN_UPDATES) as f:
windows_updates = json.load(f)["samplecomputer"]
# event log check fail data
with open(EVT_LOG_FAIL) as f:
eventlog_check_fail_data = json.load(f)
# create scripts
clear_spool = Script()
clear_spool.name = "Clear Print Spooler"
clear_spool.description = "clears the print spooler. Fuck printers"
clear_spool.filename = "clear_print_spool.bat"
clear_spool.shell = ScriptShell.CMD
clear_spool.script_body = clear_print_spool_bat
clear_spool.save()
check_net_aware = Script()
check_net_aware.name = "Check Network Location Awareness"
check_net_aware.description = "Check's network location awareness on domain computers, should always be domain profile and not public or private. Sometimes happens when computer restarts before domain available. This script will return 0 if check passes or 1 if it fails."
check_net_aware.filename = "check_network_loc_aware.ps1"
check_net_aware.shell = ScriptShell.POWERSHELL
check_net_aware.script_body = check_network_loc_aware_ps1
check_net_aware.save()
check_pool_health = Script()
check_pool_health.name = "Check storage spool health"
check_pool_health.description = "loops through all storage pools and will fail if any of them are not healthy"
check_pool_health.filename = "check_storage_pool_health.ps1"
check_pool_health.shell = ScriptShell.POWERSHELL
check_pool_health.script_body = check_storage_pool_health_ps1
check_pool_health.save()
restart_nla = Script()
restart_nla.name = "Restart NLA Service"
restart_nla.description = "restarts the Network Location Awareness windows service to fix the nic profile. Run this after the check network service fails"
restart_nla.filename = "restart_nla.ps1"
restart_nla.shell = ScriptShell.POWERSHELL
restart_nla.script_body = restart_nla_ps1
restart_nla.save()
show_tmp_dir_script = Script()
show_tmp_dir_script.name = "Check temp dir"
show_tmp_dir_script.description = "shows files in temp dir using python"
show_tmp_dir_script.filename = "show_temp_dir.py"
show_tmp_dir_script.shell = ScriptShell.PYTHON
show_tmp_dir_script.script_body = show_temp_dir_py
show_tmp_dir_script.save()
for count_agents in range(AGENTS_TO_GENERATE):
client = random.choice(clients)
if client == clients[0]:
site = random.choice(sites1)
elif client == clients[1]:
site = random.choice(sites2)
elif client == clients[2]:
site = random.choice(sites3)
elif client == clients[3]:
site = random.choice(sites4)
elif client == clients[4]:
site = random.choice(sites5)
elif client == clients[5]:
site = random.choice(sites6)
agent = Agent()
plat_pick = random.randint(1, 15)
if plat_pick in (7, 11):
agent.plat = AgentPlat.LINUX
mode = AgentMonType.SERVER
# pi arm
if plat_pick == 7:
agent.goarch = "arm"
agent.wmi_detail = wmi_pi
agent.disks = disks_linux_pi
agent.operating_system = linux_pi_os
else:
agent.goarch = "amd64"
agent.wmi_detail = wmi_deb
agent.disks = disks_linux_deb
agent.operating_system = linux_deb_os
else:
agent.plat = AgentPlat.WINDOWS
agent.goarch = "amd64"
mode = random.choice(modes)
agent.wmi_detail = random.choice(wmi_details)
agent.services = services
agent.disks = random.choice(disks)
if mode == AgentMonType.SERVER:
agent.operating_system = random.choice(op_systems_servers)
else:
agent.operating_system = random.choice(op_systems_workstations)
agent.hostname = random.choice(hostnames)
agent.version = settings.LATEST_AGENT_VER
agent.site = Site.objects.get(name=site)
agent.agent_id = self.rand_string(40)
agent.description = random.choice(descriptions)
agent.monitoring_type = mode
agent.public_ip = random.choice(public_ips)
agent.last_seen = django_now
agent.total_ram = random.choice(total_rams)
agent.boot_time = random.choice(boot_times)
agent.logged_in_username = random.choice(user_names)
agent.mesh_node_id = (
"3UiLhe420@kaVQ0rswzBeonW$WY0xrFFUDBQlcYdXoriLXzvPmBpMrV99vRHXFlb"
)
agent.overdue_email_alert = random.choice([True, False])
agent.overdue_text_alert = random.choice([True, False])
agent.needs_reboot = random.choice([True, False])
agent.save()
if agent.plat == AgentPlat.WINDOWS:
InstalledSoftware(agent=agent, software=random.choice(softwares)).save()
if mode == AgentMonType.WORKSTATION:
WinUpdatePolicy(agent=agent, run_time_days=[5, 6]).save()
else:
WinUpdatePolicy(agent=agent).save()
if agent.plat == AgentPlat.WINDOWS:
# windows updates load
guids = [i for i in windows_updates.keys()]
for i in guids:
WinUpdate(
agent=agent,
guid=i,
kb=windows_updates[i]["KBs"][0],
title=windows_updates[i]["Title"],
installed=windows_updates[i]["Installed"],
downloaded=windows_updates[i]["Downloaded"],
description=windows_updates[i]["Description"],
severity=windows_updates[i]["Severity"],
).save()
# agent histories
hist = AgentHistory()
hist.agent = agent
hist.type = AgentHistoryType.CMD_RUN
hist.command = "ping google.com"
hist.username = "demo"
hist.results = ping_success_output
hist.save()
hist1 = AgentHistory()
hist1.agent = agent
hist1.type = AgentHistoryType.SCRIPT_RUN
hist1.script = clear_spool
hist1.script_results = {
"id": 1,
"stderr": "",
"stdout": spooler_stdout,
"execution_time": 3.5554593,
"retcode": 0,
}
hist1.save()
if agent.plat == AgentPlat.WINDOWS:
# disk space check
check1 = Check()
check1.agent = agent
check1.check_type = CheckType.DISK_SPACE
check1.warning_threshold = 25
check1.error_threshold = 10
check1.disk = "C:"
check1.email_alert = random.choice([True, False])
check1.text_alert = random.choice([True, False])
check1.save()
check_result1 = CheckResult()
check_result1.agent = agent
check_result1.assigned_check = check1
check_result1.status = CheckStatus.PASSING
check_result1.last_run = django_now
check_result1.more_info = "Total: 498.7GB, Free: 287.4GB"
check_result1.save()
for i in range(30):
check1_history = CheckHistory()
check1_history.check_id = check1.pk
check1_history.agent_id = agent.agent_id
check1_history.x = django_now - djangotime.timedelta(minutes=i * 2)
check1_history.y = random.randint(13, 40)
check1_history.save()
# ping check
check2 = Check()
check_result2 = CheckResult()
check2.agent = agent
check2.check_type = CheckType.PING
check2.email_alert = random.choice([True, False])
check2.text_alert = random.choice([True, False])
check_result2.agent = agent
check_result2.assigned_check = check2
check_result2.last_run = django_now
if site in sites5:
check2.name = "Synology NAS"
check2.alert_severity = AlertSeverity.ERROR
check_result2.status = CheckStatus.FAILING
check2.ip = "172.17.14.26"
check_result2.more_info = ping_fail_output
else:
check2.name = "Google"
check_result2.status = CheckStatus.PASSING
check2.ip = "8.8.8.8"
check_result2.more_info = ping_success_output
check2.save()
check_result2.save()
for i in range(30):
check2_history = CheckHistory()
check2_history.check_id = check2.pk
check2_history.agent_id = agent.agent_id
check2_history.x = django_now - djangotime.timedelta(minutes=i * 2)
if site in sites5:
check2_history.y = 1
check2_history.results = ping_fail_output
else:
check2_history.y = 0
check2_history.results = ping_success_output
check2_history.save()
# cpu load check
check3 = Check()
check3.agent = agent
check3.check_type = CheckType.CPU_LOAD
check3.warning_threshold = 70
check3.error_threshold = 90
check3.email_alert = random.choice([True, False])
check3.text_alert = random.choice([True, False])
check3.save()
check_result3 = CheckResult()
check_result3.agent = agent
check_result3.assigned_check = check3
check_result3.status = CheckStatus.PASSING
check_result3.last_run = django_now
check_result3.history = [
15,
23,
16,
22,
22,
27,
15,
23,
23,
20,
10,
10,
13,
34,
]
check_result3.save()
for i in range(30):
check3_history = CheckHistory()
check3_history.check_id = check3.pk
check3_history.agent_id = agent.agent_id
check3_history.x = django_now - djangotime.timedelta(minutes=i * 2)
check3_history.y = random.randint(2, 79)
check3_history.save()
# memory check
check4 = Check()
check4.agent = agent
check4.check_type = CheckType.MEMORY
check4.warning_threshold = 70
check4.error_threshold = 85
check4.email_alert = random.choice([True, False])
check4.text_alert = random.choice([True, False])
check4.save()
check_result4 = CheckResult()
check_result4.agent = agent
check_result4.assigned_check = check4
check_result4.status = CheckStatus.PASSING
check_result4.last_run = django_now
check_result4.history = [34, 34, 35, 36, 34, 34, 34, 34, 34, 34]
check_result4.save()
for i in range(30):
check4_history = CheckHistory()
check4_history.check_id = check4.pk
check4_history.agent_id = agent.agent_id
check4_history.x = django_now - djangotime.timedelta(minutes=i * 2)
check4_history.y = random.randint(2, 79)
check4_history.save()
# script check storage pool
check5 = Check()
check5.agent = agent
check5.check_type = CheckType.SCRIPT
check5.email_alert = random.choice([True, False])
check5.text_alert = random.choice([True, False])
check5.timeout = 120
check5.script = check_pool_health
check5.save()
check_result5 = CheckResult()
check_result5.agent = agent
check_result5.assigned_check = check5
check_result5.status = CheckStatus.PASSING
check_result5.last_run = django_now
check_result5.retcode = 0
check_result5.execution_time = "4.0000"
check_result5.save()
for i in range(30):
check5_history = CheckHistory()
check5_history.check_id = check5.pk
check5_history.agent_id = agent.agent_id
check5_history.x = django_now - djangotime.timedelta(minutes=i * 2)
if i == 10 or i == 18:
check5_history.y = 1
else:
check5_history.y = 0
check5_history.results = {
"retcode": 0,
"stdout": None,
"stderr": None,
"execution_time": "4.0000",
}
check5_history.save()
check6 = Check()
check6.agent = agent
check6.check_type = CheckType.SCRIPT
check6.email_alert = random.choice([True, False])
check6.text_alert = random.choice([True, False])
check6.timeout = 120
check6.script = check_net_aware
check6.save()
check_result6 = CheckResult()
check_result6.agent = agent
check_result6.assigned_check = check6
check_result6.status = CheckStatus.PASSING
check_result6.last_run = django_now
check_result6.retcode = 0
check_result6.execution_time = "4.0000"
check_result6.save()
for i in range(30):
check6_history = CheckHistory()
check6_history.check_id = check6.pk
check6_history.agent_id = agent.agent_id
check6_history.x = django_now - djangotime.timedelta(minutes=i * 2)
check6_history.y = 0
check6_history.results = {
"retcode": 0,
"stdout": None,
"stderr": None,
"execution_time": "4.0000",
}
check6_history.save()
nla_task = AutomatedTask()
nla_task.agent = agent
actions = [
{
"name": restart_nla.name,
"type": "script",
"script": restart_nla.pk,
"timeout": 90,
"script_args": [],
}
]
nla_task.actions = actions
nla_task.assigned_check = check6
nla_task.name = "Restart NLA"
nla_task.task_type = TaskType.CHECK_FAILURE
nla_task.save()
nla_task_result = TaskResult()
nla_task_result.task = nla_task
nla_task_result.agent = agent
nla_task_result.execution_time = "1.8443"
nla_task_result.last_run = django_now
nla_task_result.stdout = "no stdout"
nla_task_result.retcode = 0
nla_task_result.sync_status = TaskSyncStatus.SYNCED
nla_task_result.save()
spool_task = AutomatedTask()
spool_task.agent = agent
actions = [
{
"name": clear_spool.name,
"type": "script",
"script": clear_spool.pk,
"timeout": 90,
"script_args": [],
}
]
spool_task.actions = actions
spool_task.name = "Clear the print spooler"
spool_task.task_type = TaskType.DAILY
spool_task.run_time_date = django_now + djangotime.timedelta(minutes=10)
spool_task.expire_date = django_now + djangotime.timedelta(days=753)
spool_task.daily_interval = 1
spool_task.weekly_interval = 1
spool_task.task_repetition_duration = "2h"
spool_task.task_repetition_interval = "25m"
spool_task.random_task_delay = "3m"
spool_task.save()
spool_task_result = TaskResult()
spool_task_result.task = spool_task
spool_task_result.agent = agent
spool_task_result.last_run = django_now
spool_task_result.retcode = 0
spool_task_result.stdout = spooler_stdout
spool_task_result.sync_status = TaskSyncStatus.SYNCED
spool_task_result.save()
tmp_dir_task = AutomatedTask()
tmp_dir_task.agent = agent
tmp_dir_task.name = "show temp dir files"
actions = [
{
"name": show_tmp_dir_script.name,
"type": "script",
"script": show_tmp_dir_script.pk,
"timeout": 90,
"script_args": [],
}
]
tmp_dir_task.actions = actions
tmp_dir_task.task_type = TaskType.MANUAL
tmp_dir_task.save()
tmp_dir_task_result = TaskResult()
tmp_dir_task_result.task = tmp_dir_task
tmp_dir_task_result.agent = agent
tmp_dir_task_result.last_run = django_now
tmp_dir_task_result.stdout = temp_dir_stdout
tmp_dir_task_result.retcode = 0
tmp_dir_task_result.sync_status = TaskSyncStatus.SYNCED
tmp_dir_task_result.save()
check7 = Check()
check7.agent = agent
check7.check_type = CheckType.SCRIPT
check7.email_alert = random.choice([True, False])
check7.text_alert = random.choice([True, False])
check7.timeout = 120
check7.script = clear_spool
check7.save()
check_result7 = CheckResult()
check_result7.assigned_check = check7
check_result7.agent = agent
check_result7.status = CheckStatus.PASSING
check_result7.last_run = django_now
check_result7.retcode = 0
check_result7.execution_time = "3.1337"
check_result7.stdout = spooler_stdout
check_result7.save()
for i in range(30):
check7_history = CheckHistory()
check7_history.check_id = check7.pk
check7_history.agent_id = agent.agent_id
check7_history.x = django_now - djangotime.timedelta(minutes=i * 2)
check7_history.y = 0
check7_history.results = {
"retcode": 0,
"stdout": spooler_stdout,
"stderr": None,
"execution_time": "3.1337",
}
check7_history.save()
if agent.plat == AgentPlat.WINDOWS:
check8 = Check()
check8.agent = agent
check8.check_type = CheckType.WINSVC
check8.email_alert = random.choice([True, False])
check8.text_alert = random.choice([True, False])
check8.fails_b4_alert = 4
check8.svc_name = "Spooler"
check8.svc_display_name = "Print Spooler"
check8.pass_if_start_pending = False
check8.restart_if_stopped = True
check8.save()
check_result8 = CheckResult()
check_result8.assigned_check = check8
check_result8.agent = agent
check_result8.status = CheckStatus.PASSING
check_result8.last_run = django_now
check_result8.more_info = "Status RUNNING"
check_result8.save()
for i in range(30):
check8_history = CheckHistory()
check8_history.check_id = check8.pk
check8_history.agent_id = agent.agent_id
check8_history.x = django_now - djangotime.timedelta(minutes=i * 2)
if i == 10 or i == 18:
check8_history.y = 1
check8_history.results = "Status STOPPED"
else:
check8_history.y = 0
check8_history.results = "Status RUNNING"
check8_history.save()
check9 = Check()
check9.agent = agent
check9.check_type = CheckType.EVENT_LOG
check9.name = "unexpected shutdown"
check9.email_alert = random.choice([True, False])
check9.text_alert = random.choice([True, False])
check9.fails_b4_alert = 2
check9.log_name = EvtLogNames.APPLICATION
check9.event_id = 1001
check9.event_type = EvtLogTypes.INFO
check9.fail_when = EvtLogFailWhen.CONTAINS
check9.search_last_days = 30
check_result9 = CheckResult()
check_result9.agent = agent
check_result9.assigned_check = check9
check_result9.last_run = django_now
if site in sites5:
check_result9.extra_details = eventlog_check_fail_data
check_result9.status = CheckStatus.FAILING
else:
check_result9.extra_details = {"log": []}
check_result9.status = CheckStatus.PASSING
check9.save()
check_result9.save()
for i in range(30):
check9_history = CheckHistory()
check9_history.check_id = check9.pk
check9_history.agent_id = agent.agent_id
check9_history.x = django_now - djangotime.timedelta(minutes=i * 2)
if i == 10 or i == 18:
check9_history.y = 1
check9_history.results = "Events Found: 16"
else:
check9_history.y = 0
check9_history.results = "Events Found: 0"
check9_history.save()
pick = random.randint(1, 10)
if pick == 5 or pick == 3:
reboot_time = django_now + djangotime.timedelta(
minutes=random.randint(1000, 500000)
)
date_obj = dt.datetime.strftime(reboot_time, "%Y-%m-%d %H:%M")
obj = dt.datetime.strptime(date_obj, "%Y-%m-%d %H:%M")
task_name = "TacticalRMM_SchedReboot_" + "".join(
random.choice(string.ascii_letters) for _ in range(10)
)
sched_reboot = PendingAction()
sched_reboot.agent = agent
sched_reboot.action_type = PAAction.SCHED_REBOOT
sched_reboot.details = {
"time": str(obj),
"taskname": task_name,
}
sched_reboot.save()
self.stdout.write(self.style.SUCCESS(f"Added agent # {count_agents + 1}"))
self.stdout.write("done")

View File

@@ -0,0 +1,30 @@
from django.core.management.base import BaseCommand
from agents.models import Agent
from tacticalrmm.constants import AGENT_DEFER
class Command(BaseCommand):
help = "Find all agents that have a certain service installed"
def add_arguments(self, parser):
parser.add_argument("name", type=str)
def handle(self, *args, **kwargs):
search = kwargs["name"].lower()
agents = Agent.objects.defer(*AGENT_DEFER)
for agent in agents:
try:
for svc in agent.services:
if (
search in svc["name"].lower()
or search in svc["display_name"].lower()
):
self.stdout.write(
self.style.SUCCESS(
f"{agent.hostname} - {svc['name']} ({svc['display_name']}) - {svc['status']}"
)
)
except:
continue

View File

@@ -1,16 +0,0 @@
from django.core.management.base import BaseCommand
from agents.models import Agent
class Command(BaseCommand):
help = "Changes existing agents salt_id from a property to a model field"
def handle(self, *args, **kwargs):
agents = Agent.objects.filter(salt_id=None)
for agent in agents:
self.stdout.write(
self.style.SUCCESS(f"Setting salt_id on {agent.hostname}")
)
agent.salt_id = f"{agent.hostname}-{agent.pk}"
agent.save(update_fields=["salt_id"])

View File

@@ -2,16 +2,16 @@ from django.conf import settings
from django.core.management.base import BaseCommand
from agents.models import Agent
from tacticalrmm.constants import AGENT_STATUS_ONLINE, ONLINE_AGENTS
class Command(BaseCommand):
help = "Shows online agents that are not on the latest version"
def handle(self, *args, **kwargs):
q = Agent.objects.exclude(version=settings.LATEST_AGENT_VER).only(
"pk", "version", "last_seen", "overdue_time", "offline_time"
)
agents = [i for i in q if i.status == "online"]
only = ONLINE_AGENTS + ("hostname",)
q = Agent.objects.exclude(version=settings.LATEST_AGENT_VER).only(*only)
agents = [i for i in q if i.status == AGENT_STATUS_ONLINE]
for agent in agents:
self.stdout.write(
self.style.SUCCESS(f"{agent.hostname} - v{agent.version}")

View File

@@ -0,0 +1,26 @@
from django.conf import settings
from django.core.management.base import BaseCommand
from packaging import version as pyver
from agents.models import Agent
from agents.tasks import send_agent_update_task
from core.utils import get_core_settings, token_is_valid
from tacticalrmm.constants import AGENT_DEFER
class Command(BaseCommand):
help = "Triggers an agent update task to run"
def handle(self, *args, **kwargs):
core = get_core_settings()
if not core.agent_auto_update:
return
q = Agent.objects.defer(*AGENT_DEFER).exclude(version=settings.LATEST_AGENT_VER)
agent_ids: list[str] = [
i.agent_id
for i in q
if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER)
]
token, _ = token_is_valid()
send_agent_update_task.delay(agent_ids=agent_ids, token=token, force=False)

View File

@@ -1,7 +1,7 @@
# Generated by Django 3.2.1 on 2021-07-06 02:01
from django.db import migrations, models
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):

View File

@@ -1,7 +1,7 @@
# Generated by Django 3.2.5 on 2021-07-14 07:38
from django.db import migrations, models
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):

View File

@@ -0,0 +1,25 @@
# Generated by Django 3.2.12 on 2022-02-27 05:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agents', '0042_alter_agent_time_zone'),
]
operations = [
migrations.RemoveField(
model_name='agent',
name='antivirus',
),
migrations.RemoveField(
model_name='agent',
name='local_ip',
),
migrations.RemoveField(
model_name='agent',
name='used_ram',
),
]

View File

@@ -0,0 +1,22 @@
# Generated by Django 3.2.12 on 2022-02-27 07:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agents', '0043_auto_20220227_0554'),
]
operations = [
migrations.RenameField(
model_name='agent',
old_name='salt_id',
new_name='goarch',
),
migrations.RemoveField(
model_name='agent',
name='salt_ver',
),
]

View File

@@ -0,0 +1,16 @@
# Generated by Django 3.2.12 on 2022-03-12 02:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agents', '0044_auto_20220227_0717'),
]
operations = [
migrations.DeleteModel(
name='RecoveryAction',
),
]

View File

@@ -0,0 +1,18 @@
# Generated by Django 3.2.12 on 2022-03-17 17:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agents', '0045_delete_recoveryaction'),
]
operations = [
migrations.AlterField(
model_name='agenthistory',
name='command',
field=models.TextField(blank=True, default='', null=True),
),
]

View File

@@ -0,0 +1,26 @@
# Generated by Django 4.0.3 on 2022-04-07 17:28
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clients', '0020_auto_20211226_0547'),
('agents', '0046_alter_agenthistory_command'),
]
operations = [
migrations.AlterField(
model_name='agent',
name='plat',
field=models.CharField(default='windows', max_length=255),
),
migrations.AlterField(
model_name='agent',
name='site',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.RESTRICT, related_name='agents', to='clients.site'),
preserve_default=False,
),
]

View File

@@ -0,0 +1,21 @@
# Generated by Django 4.0.3 on 2022-04-16 17:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agents', '0047_alter_agent_plat_alter_agent_site'),
]
operations = [
migrations.RemoveField(
model_name='agent',
name='has_patches_pending',
),
migrations.RemoveField(
model_name='agent',
name='pending_actions_count',
),
]

View File

@@ -0,0 +1,17 @@
# Generated by Django 4.0.3 on 2022-04-18 14:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agents', '0048_remove_agent_has_patches_pending_and_more'),
]
operations = [
migrations.AddIndex(
model_name='agent',
index=models.Index(fields=['monitoring_type'], name='agents_agen_monitor_df8816_idx'),
),
]

View File

@@ -0,0 +1,17 @@
# Generated by Django 4.0.4 on 2022-04-25 06:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agents', '0049_agent_agents_agen_monitor_df8816_idx'),
]
operations = [
migrations.RemoveField(
model_name='agent',
name='plat_release',
),
]

View File

@@ -0,0 +1,18 @@
# Generated by Django 4.0.4 on 2022-05-18 03:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agents', '0050_remove_agent_plat_release'),
]
operations = [
migrations.AlterField(
model_name='agent',
name='plat',
field=models.CharField(choices=[('windows', 'Windows'), ('linux', 'Linux'), ('darwin', 'macOS')], default='windows', max_length=255),
),
]

View File

@@ -0,0 +1,18 @@
# Generated by Django 4.0.4 on 2022-05-18 05:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agents', '0051_alter_agent_plat'),
]
operations = [
migrations.AlterField(
model_name='agent',
name='monitoring_type',
field=models.CharField(choices=[('server', 'Server'), ('workstation', 'Workstation')], default='server', max_length=30),
),
]

View File

@@ -0,0 +1,17 @@
# Generated by Django 4.0.4 on 2022-05-18 06:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agents', '0052_alter_agent_monitoring_type'),
]
operations = [
migrations.RemoveField(
model_name='agenthistory',
name='status',
),
]

View File

@@ -0,0 +1,18 @@
# Generated by Django 4.0.4 on 2022-06-06 04:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agents', '0053_remove_agenthistory_status'),
]
operations = [
migrations.AlterField(
model_name='agent',
name='goarch',
field=models.CharField(blank=True, choices=[('amd64', 'amd64'), ('386', '386'), ('arm64', 'arm64'), ('arm', 'arm')], max_length=255, null=True),
),
]

View File

@@ -0,0 +1,631 @@
# Generated by Django 4.1 on 2022-08-24 07:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("agents", "0054_alter_agent_goarch"),
]
operations = [
migrations.AlterField(
model_name="agent",
name="time_zone",
field=models.CharField(
blank=True,
choices=[
("Africa/Abidjan", "Africa/Abidjan"),
("Africa/Accra", "Africa/Accra"),
("Africa/Addis_Ababa", "Africa/Addis_Ababa"),
("Africa/Algiers", "Africa/Algiers"),
("Africa/Asmara", "Africa/Asmara"),
("Africa/Asmera", "Africa/Asmera"),
("Africa/Bamako", "Africa/Bamako"),
("Africa/Bangui", "Africa/Bangui"),
("Africa/Banjul", "Africa/Banjul"),
("Africa/Bissau", "Africa/Bissau"),
("Africa/Blantyre", "Africa/Blantyre"),
("Africa/Brazzaville", "Africa/Brazzaville"),
("Africa/Bujumbura", "Africa/Bujumbura"),
("Africa/Cairo", "Africa/Cairo"),
("Africa/Casablanca", "Africa/Casablanca"),
("Africa/Ceuta", "Africa/Ceuta"),
("Africa/Conakry", "Africa/Conakry"),
("Africa/Dakar", "Africa/Dakar"),
("Africa/Dar_es_Salaam", "Africa/Dar_es_Salaam"),
("Africa/Djibouti", "Africa/Djibouti"),
("Africa/Douala", "Africa/Douala"),
("Africa/El_Aaiun", "Africa/El_Aaiun"),
("Africa/Freetown", "Africa/Freetown"),
("Africa/Gaborone", "Africa/Gaborone"),
("Africa/Harare", "Africa/Harare"),
("Africa/Johannesburg", "Africa/Johannesburg"),
("Africa/Juba", "Africa/Juba"),
("Africa/Kampala", "Africa/Kampala"),
("Africa/Khartoum", "Africa/Khartoum"),
("Africa/Kigali", "Africa/Kigali"),
("Africa/Kinshasa", "Africa/Kinshasa"),
("Africa/Lagos", "Africa/Lagos"),
("Africa/Libreville", "Africa/Libreville"),
("Africa/Lome", "Africa/Lome"),
("Africa/Luanda", "Africa/Luanda"),
("Africa/Lubumbashi", "Africa/Lubumbashi"),
("Africa/Lusaka", "Africa/Lusaka"),
("Africa/Malabo", "Africa/Malabo"),
("Africa/Maputo", "Africa/Maputo"),
("Africa/Maseru", "Africa/Maseru"),
("Africa/Mbabane", "Africa/Mbabane"),
("Africa/Mogadishu", "Africa/Mogadishu"),
("Africa/Monrovia", "Africa/Monrovia"),
("Africa/Nairobi", "Africa/Nairobi"),
("Africa/Ndjamena", "Africa/Ndjamena"),
("Africa/Niamey", "Africa/Niamey"),
("Africa/Nouakchott", "Africa/Nouakchott"),
("Africa/Ouagadougou", "Africa/Ouagadougou"),
("Africa/Porto-Novo", "Africa/Porto-Novo"),
("Africa/Sao_Tome", "Africa/Sao_Tome"),
("Africa/Timbuktu", "Africa/Timbuktu"),
("Africa/Tripoli", "Africa/Tripoli"),
("Africa/Tunis", "Africa/Tunis"),
("Africa/Windhoek", "Africa/Windhoek"),
("America/Adak", "America/Adak"),
("America/Anchorage", "America/Anchorage"),
("America/Anguilla", "America/Anguilla"),
("America/Antigua", "America/Antigua"),
("America/Araguaina", "America/Araguaina"),
(
"America/Argentina/Buenos_Aires",
"America/Argentina/Buenos_Aires",
),
("America/Argentina/Catamarca", "America/Argentina/Catamarca"),
(
"America/Argentina/ComodRivadavia",
"America/Argentina/ComodRivadavia",
),
("America/Argentina/Cordoba", "America/Argentina/Cordoba"),
("America/Argentina/Jujuy", "America/Argentina/Jujuy"),
("America/Argentina/La_Rioja", "America/Argentina/La_Rioja"),
("America/Argentina/Mendoza", "America/Argentina/Mendoza"),
(
"America/Argentina/Rio_Gallegos",
"America/Argentina/Rio_Gallegos",
),
("America/Argentina/Salta", "America/Argentina/Salta"),
("America/Argentina/San_Juan", "America/Argentina/San_Juan"),
("America/Argentina/San_Luis", "America/Argentina/San_Luis"),
("America/Argentina/Tucuman", "America/Argentina/Tucuman"),
("America/Argentina/Ushuaia", "America/Argentina/Ushuaia"),
("America/Aruba", "America/Aruba"),
("America/Asuncion", "America/Asuncion"),
("America/Atikokan", "America/Atikokan"),
("America/Atka", "America/Atka"),
("America/Bahia", "America/Bahia"),
("America/Bahia_Banderas", "America/Bahia_Banderas"),
("America/Barbados", "America/Barbados"),
("America/Belem", "America/Belem"),
("America/Belize", "America/Belize"),
("America/Blanc-Sablon", "America/Blanc-Sablon"),
("America/Boa_Vista", "America/Boa_Vista"),
("America/Bogota", "America/Bogota"),
("America/Boise", "America/Boise"),
("America/Buenos_Aires", "America/Buenos_Aires"),
("America/Cambridge_Bay", "America/Cambridge_Bay"),
("America/Campo_Grande", "America/Campo_Grande"),
("America/Cancun", "America/Cancun"),
("America/Caracas", "America/Caracas"),
("America/Catamarca", "America/Catamarca"),
("America/Cayenne", "America/Cayenne"),
("America/Cayman", "America/Cayman"),
("America/Chicago", "America/Chicago"),
("America/Chihuahua", "America/Chihuahua"),
("America/Coral_Harbour", "America/Coral_Harbour"),
("America/Cordoba", "America/Cordoba"),
("America/Costa_Rica", "America/Costa_Rica"),
("America/Creston", "America/Creston"),
("America/Cuiaba", "America/Cuiaba"),
("America/Curacao", "America/Curacao"),
("America/Danmarkshavn", "America/Danmarkshavn"),
("America/Dawson", "America/Dawson"),
("America/Dawson_Creek", "America/Dawson_Creek"),
("America/Denver", "America/Denver"),
("America/Detroit", "America/Detroit"),
("America/Dominica", "America/Dominica"),
("America/Edmonton", "America/Edmonton"),
("America/Eirunepe", "America/Eirunepe"),
("America/El_Salvador", "America/El_Salvador"),
("America/Ensenada", "America/Ensenada"),
("America/Fort_Nelson", "America/Fort_Nelson"),
("America/Fort_Wayne", "America/Fort_Wayne"),
("America/Fortaleza", "America/Fortaleza"),
("America/Glace_Bay", "America/Glace_Bay"),
("America/Godthab", "America/Godthab"),
("America/Goose_Bay", "America/Goose_Bay"),
("America/Grand_Turk", "America/Grand_Turk"),
("America/Grenada", "America/Grenada"),
("America/Guadeloupe", "America/Guadeloupe"),
("America/Guatemala", "America/Guatemala"),
("America/Guayaquil", "America/Guayaquil"),
("America/Guyana", "America/Guyana"),
("America/Halifax", "America/Halifax"),
("America/Havana", "America/Havana"),
("America/Hermosillo", "America/Hermosillo"),
("America/Indiana/Indianapolis", "America/Indiana/Indianapolis"),
("America/Indiana/Knox", "America/Indiana/Knox"),
("America/Indiana/Marengo", "America/Indiana/Marengo"),
("America/Indiana/Petersburg", "America/Indiana/Petersburg"),
("America/Indiana/Tell_City", "America/Indiana/Tell_City"),
("America/Indiana/Vevay", "America/Indiana/Vevay"),
("America/Indiana/Vincennes", "America/Indiana/Vincennes"),
("America/Indiana/Winamac", "America/Indiana/Winamac"),
("America/Indianapolis", "America/Indianapolis"),
("America/Inuvik", "America/Inuvik"),
("America/Iqaluit", "America/Iqaluit"),
("America/Jamaica", "America/Jamaica"),
("America/Jujuy", "America/Jujuy"),
("America/Juneau", "America/Juneau"),
("America/Kentucky/Louisville", "America/Kentucky/Louisville"),
("America/Kentucky/Monticello", "America/Kentucky/Monticello"),
("America/Knox_IN", "America/Knox_IN"),
("America/Kralendijk", "America/Kralendijk"),
("America/La_Paz", "America/La_Paz"),
("America/Lima", "America/Lima"),
("America/Los_Angeles", "America/Los_Angeles"),
("America/Louisville", "America/Louisville"),
("America/Lower_Princes", "America/Lower_Princes"),
("America/Maceio", "America/Maceio"),
("America/Managua", "America/Managua"),
("America/Manaus", "America/Manaus"),
("America/Marigot", "America/Marigot"),
("America/Martinique", "America/Martinique"),
("America/Matamoros", "America/Matamoros"),
("America/Mazatlan", "America/Mazatlan"),
("America/Mendoza", "America/Mendoza"),
("America/Menominee", "America/Menominee"),
("America/Merida", "America/Merida"),
("America/Metlakatla", "America/Metlakatla"),
("America/Mexico_City", "America/Mexico_City"),
("America/Miquelon", "America/Miquelon"),
("America/Moncton", "America/Moncton"),
("America/Monterrey", "America/Monterrey"),
("America/Montevideo", "America/Montevideo"),
("America/Montreal", "America/Montreal"),
("America/Montserrat", "America/Montserrat"),
("America/Nassau", "America/Nassau"),
("America/New_York", "America/New_York"),
("America/Nipigon", "America/Nipigon"),
("America/Nome", "America/Nome"),
("America/Noronha", "America/Noronha"),
("America/North_Dakota/Beulah", "America/North_Dakota/Beulah"),
("America/North_Dakota/Center", "America/North_Dakota/Center"),
(
"America/North_Dakota/New_Salem",
"America/North_Dakota/New_Salem",
),
("America/Nuuk", "America/Nuuk"),
("America/Ojinaga", "America/Ojinaga"),
("America/Panama", "America/Panama"),
("America/Pangnirtung", "America/Pangnirtung"),
("America/Paramaribo", "America/Paramaribo"),
("America/Phoenix", "America/Phoenix"),
("America/Port-au-Prince", "America/Port-au-Prince"),
("America/Port_of_Spain", "America/Port_of_Spain"),
("America/Porto_Acre", "America/Porto_Acre"),
("America/Porto_Velho", "America/Porto_Velho"),
("America/Puerto_Rico", "America/Puerto_Rico"),
("America/Punta_Arenas", "America/Punta_Arenas"),
("America/Rainy_River", "America/Rainy_River"),
("America/Rankin_Inlet", "America/Rankin_Inlet"),
("America/Recife", "America/Recife"),
("America/Regina", "America/Regina"),
("America/Resolute", "America/Resolute"),
("America/Rio_Branco", "America/Rio_Branco"),
("America/Rosario", "America/Rosario"),
("America/Santa_Isabel", "America/Santa_Isabel"),
("America/Santarem", "America/Santarem"),
("America/Santiago", "America/Santiago"),
("America/Santo_Domingo", "America/Santo_Domingo"),
("America/Sao_Paulo", "America/Sao_Paulo"),
("America/Scoresbysund", "America/Scoresbysund"),
("America/Shiprock", "America/Shiprock"),
("America/Sitka", "America/Sitka"),
("America/St_Barthelemy", "America/St_Barthelemy"),
("America/St_Johns", "America/St_Johns"),
("America/St_Kitts", "America/St_Kitts"),
("America/St_Lucia", "America/St_Lucia"),
("America/St_Thomas", "America/St_Thomas"),
("America/St_Vincent", "America/St_Vincent"),
("America/Swift_Current", "America/Swift_Current"),
("America/Tegucigalpa", "America/Tegucigalpa"),
("America/Thule", "America/Thule"),
("America/Thunder_Bay", "America/Thunder_Bay"),
("America/Tijuana", "America/Tijuana"),
("America/Toronto", "America/Toronto"),
("America/Tortola", "America/Tortola"),
("America/Vancouver", "America/Vancouver"),
("America/Virgin", "America/Virgin"),
("America/Whitehorse", "America/Whitehorse"),
("America/Winnipeg", "America/Winnipeg"),
("America/Yakutat", "America/Yakutat"),
("America/Yellowknife", "America/Yellowknife"),
("Antarctica/Casey", "Antarctica/Casey"),
("Antarctica/Davis", "Antarctica/Davis"),
("Antarctica/DumontDUrville", "Antarctica/DumontDUrville"),
("Antarctica/Macquarie", "Antarctica/Macquarie"),
("Antarctica/Mawson", "Antarctica/Mawson"),
("Antarctica/McMurdo", "Antarctica/McMurdo"),
("Antarctica/Palmer", "Antarctica/Palmer"),
("Antarctica/Rothera", "Antarctica/Rothera"),
("Antarctica/South_Pole", "Antarctica/South_Pole"),
("Antarctica/Syowa", "Antarctica/Syowa"),
("Antarctica/Troll", "Antarctica/Troll"),
("Antarctica/Vostok", "Antarctica/Vostok"),
("Arctic/Longyearbyen", "Arctic/Longyearbyen"),
("Asia/Aden", "Asia/Aden"),
("Asia/Almaty", "Asia/Almaty"),
("Asia/Amman", "Asia/Amman"),
("Asia/Anadyr", "Asia/Anadyr"),
("Asia/Aqtau", "Asia/Aqtau"),
("Asia/Aqtobe", "Asia/Aqtobe"),
("Asia/Ashgabat", "Asia/Ashgabat"),
("Asia/Ashkhabad", "Asia/Ashkhabad"),
("Asia/Atyrau", "Asia/Atyrau"),
("Asia/Baghdad", "Asia/Baghdad"),
("Asia/Bahrain", "Asia/Bahrain"),
("Asia/Baku", "Asia/Baku"),
("Asia/Bangkok", "Asia/Bangkok"),
("Asia/Barnaul", "Asia/Barnaul"),
("Asia/Beirut", "Asia/Beirut"),
("Asia/Bishkek", "Asia/Bishkek"),
("Asia/Brunei", "Asia/Brunei"),
("Asia/Calcutta", "Asia/Calcutta"),
("Asia/Chita", "Asia/Chita"),
("Asia/Choibalsan", "Asia/Choibalsan"),
("Asia/Chongqing", "Asia/Chongqing"),
("Asia/Chungking", "Asia/Chungking"),
("Asia/Colombo", "Asia/Colombo"),
("Asia/Dacca", "Asia/Dacca"),
("Asia/Damascus", "Asia/Damascus"),
("Asia/Dhaka", "Asia/Dhaka"),
("Asia/Dili", "Asia/Dili"),
("Asia/Dubai", "Asia/Dubai"),
("Asia/Dushanbe", "Asia/Dushanbe"),
("Asia/Famagusta", "Asia/Famagusta"),
("Asia/Gaza", "Asia/Gaza"),
("Asia/Harbin", "Asia/Harbin"),
("Asia/Hebron", "Asia/Hebron"),
("Asia/Ho_Chi_Minh", "Asia/Ho_Chi_Minh"),
("Asia/Hong_Kong", "Asia/Hong_Kong"),
("Asia/Hovd", "Asia/Hovd"),
("Asia/Irkutsk", "Asia/Irkutsk"),
("Asia/Istanbul", "Asia/Istanbul"),
("Asia/Jakarta", "Asia/Jakarta"),
("Asia/Jayapura", "Asia/Jayapura"),
("Asia/Jerusalem", "Asia/Jerusalem"),
("Asia/Kabul", "Asia/Kabul"),
("Asia/Kamchatka", "Asia/Kamchatka"),
("Asia/Karachi", "Asia/Karachi"),
("Asia/Kashgar", "Asia/Kashgar"),
("Asia/Kathmandu", "Asia/Kathmandu"),
("Asia/Katmandu", "Asia/Katmandu"),
("Asia/Khandyga", "Asia/Khandyga"),
("Asia/Kolkata", "Asia/Kolkata"),
("Asia/Krasnoyarsk", "Asia/Krasnoyarsk"),
("Asia/Kuala_Lumpur", "Asia/Kuala_Lumpur"),
("Asia/Kuching", "Asia/Kuching"),
("Asia/Kuwait", "Asia/Kuwait"),
("Asia/Macao", "Asia/Macao"),
("Asia/Macau", "Asia/Macau"),
("Asia/Magadan", "Asia/Magadan"),
("Asia/Makassar", "Asia/Makassar"),
("Asia/Manila", "Asia/Manila"),
("Asia/Muscat", "Asia/Muscat"),
("Asia/Nicosia", "Asia/Nicosia"),
("Asia/Novokuznetsk", "Asia/Novokuznetsk"),
("Asia/Novosibirsk", "Asia/Novosibirsk"),
("Asia/Omsk", "Asia/Omsk"),
("Asia/Oral", "Asia/Oral"),
("Asia/Phnom_Penh", "Asia/Phnom_Penh"),
("Asia/Pontianak", "Asia/Pontianak"),
("Asia/Pyongyang", "Asia/Pyongyang"),
("Asia/Qatar", "Asia/Qatar"),
("Asia/Qostanay", "Asia/Qostanay"),
("Asia/Qyzylorda", "Asia/Qyzylorda"),
("Asia/Rangoon", "Asia/Rangoon"),
("Asia/Riyadh", "Asia/Riyadh"),
("Asia/Saigon", "Asia/Saigon"),
("Asia/Sakhalin", "Asia/Sakhalin"),
("Asia/Samarkand", "Asia/Samarkand"),
("Asia/Seoul", "Asia/Seoul"),
("Asia/Shanghai", "Asia/Shanghai"),
("Asia/Singapore", "Asia/Singapore"),
("Asia/Srednekolymsk", "Asia/Srednekolymsk"),
("Asia/Taipei", "Asia/Taipei"),
("Asia/Tashkent", "Asia/Tashkent"),
("Asia/Tbilisi", "Asia/Tbilisi"),
("Asia/Tehran", "Asia/Tehran"),
("Asia/Tel_Aviv", "Asia/Tel_Aviv"),
("Asia/Thimbu", "Asia/Thimbu"),
("Asia/Thimphu", "Asia/Thimphu"),
("Asia/Tokyo", "Asia/Tokyo"),
("Asia/Tomsk", "Asia/Tomsk"),
("Asia/Ujung_Pandang", "Asia/Ujung_Pandang"),
("Asia/Ulaanbaatar", "Asia/Ulaanbaatar"),
("Asia/Ulan_Bator", "Asia/Ulan_Bator"),
("Asia/Urumqi", "Asia/Urumqi"),
("Asia/Ust-Nera", "Asia/Ust-Nera"),
("Asia/Vientiane", "Asia/Vientiane"),
("Asia/Vladivostok", "Asia/Vladivostok"),
("Asia/Yakutsk", "Asia/Yakutsk"),
("Asia/Yangon", "Asia/Yangon"),
("Asia/Yekaterinburg", "Asia/Yekaterinburg"),
("Asia/Yerevan", "Asia/Yerevan"),
("Atlantic/Azores", "Atlantic/Azores"),
("Atlantic/Bermuda", "Atlantic/Bermuda"),
("Atlantic/Canary", "Atlantic/Canary"),
("Atlantic/Cape_Verde", "Atlantic/Cape_Verde"),
("Atlantic/Faeroe", "Atlantic/Faeroe"),
("Atlantic/Faroe", "Atlantic/Faroe"),
("Atlantic/Jan_Mayen", "Atlantic/Jan_Mayen"),
("Atlantic/Madeira", "Atlantic/Madeira"),
("Atlantic/Reykjavik", "Atlantic/Reykjavik"),
("Atlantic/South_Georgia", "Atlantic/South_Georgia"),
("Atlantic/St_Helena", "Atlantic/St_Helena"),
("Atlantic/Stanley", "Atlantic/Stanley"),
("Australia/ACT", "Australia/ACT"),
("Australia/Adelaide", "Australia/Adelaide"),
("Australia/Brisbane", "Australia/Brisbane"),
("Australia/Broken_Hill", "Australia/Broken_Hill"),
("Australia/Canberra", "Australia/Canberra"),
("Australia/Currie", "Australia/Currie"),
("Australia/Darwin", "Australia/Darwin"),
("Australia/Eucla", "Australia/Eucla"),
("Australia/Hobart", "Australia/Hobart"),
("Australia/LHI", "Australia/LHI"),
("Australia/Lindeman", "Australia/Lindeman"),
("Australia/Lord_Howe", "Australia/Lord_Howe"),
("Australia/Melbourne", "Australia/Melbourne"),
("Australia/NSW", "Australia/NSW"),
("Australia/North", "Australia/North"),
("Australia/Perth", "Australia/Perth"),
("Australia/Queensland", "Australia/Queensland"),
("Australia/South", "Australia/South"),
("Australia/Sydney", "Australia/Sydney"),
("Australia/Tasmania", "Australia/Tasmania"),
("Australia/Victoria", "Australia/Victoria"),
("Australia/West", "Australia/West"),
("Australia/Yancowinna", "Australia/Yancowinna"),
("Brazil/Acre", "Brazil/Acre"),
("Brazil/DeNoronha", "Brazil/DeNoronha"),
("Brazil/East", "Brazil/East"),
("Brazil/West", "Brazil/West"),
("CET", "CET"),
("CST6CDT", "CST6CDT"),
("Canada/Atlantic", "Canada/Atlantic"),
("Canada/Central", "Canada/Central"),
("Canada/Eastern", "Canada/Eastern"),
("Canada/Mountain", "Canada/Mountain"),
("Canada/Newfoundland", "Canada/Newfoundland"),
("Canada/Pacific", "Canada/Pacific"),
("Canada/Saskatchewan", "Canada/Saskatchewan"),
("Canada/Yukon", "Canada/Yukon"),
("Chile/Continental", "Chile/Continental"),
("Chile/EasterIsland", "Chile/EasterIsland"),
("Cuba", "Cuba"),
("EET", "EET"),
("EST", "EST"),
("EST5EDT", "EST5EDT"),
("Egypt", "Egypt"),
("Eire", "Eire"),
("Etc/GMT", "Etc/GMT"),
("Etc/GMT+0", "Etc/GMT+0"),
("Etc/GMT+1", "Etc/GMT+1"),
("Etc/GMT+10", "Etc/GMT+10"),
("Etc/GMT+11", "Etc/GMT+11"),
("Etc/GMT+12", "Etc/GMT+12"),
("Etc/GMT+2", "Etc/GMT+2"),
("Etc/GMT+3", "Etc/GMT+3"),
("Etc/GMT+4", "Etc/GMT+4"),
("Etc/GMT+5", "Etc/GMT+5"),
("Etc/GMT+6", "Etc/GMT+6"),
("Etc/GMT+7", "Etc/GMT+7"),
("Etc/GMT+8", "Etc/GMT+8"),
("Etc/GMT+9", "Etc/GMT+9"),
("Etc/GMT-0", "Etc/GMT-0"),
("Etc/GMT-1", "Etc/GMT-1"),
("Etc/GMT-10", "Etc/GMT-10"),
("Etc/GMT-11", "Etc/GMT-11"),
("Etc/GMT-12", "Etc/GMT-12"),
("Etc/GMT-13", "Etc/GMT-13"),
("Etc/GMT-14", "Etc/GMT-14"),
("Etc/GMT-2", "Etc/GMT-2"),
("Etc/GMT-3", "Etc/GMT-3"),
("Etc/GMT-4", "Etc/GMT-4"),
("Etc/GMT-5", "Etc/GMT-5"),
("Etc/GMT-6", "Etc/GMT-6"),
("Etc/GMT-7", "Etc/GMT-7"),
("Etc/GMT-8", "Etc/GMT-8"),
("Etc/GMT-9", "Etc/GMT-9"),
("Etc/GMT0", "Etc/GMT0"),
("Etc/Greenwich", "Etc/Greenwich"),
("Etc/UCT", "Etc/UCT"),
("Etc/UTC", "Etc/UTC"),
("Etc/Universal", "Etc/Universal"),
("Etc/Zulu", "Etc/Zulu"),
("Europe/Amsterdam", "Europe/Amsterdam"),
("Europe/Andorra", "Europe/Andorra"),
("Europe/Astrakhan", "Europe/Astrakhan"),
("Europe/Athens", "Europe/Athens"),
("Europe/Belfast", "Europe/Belfast"),
("Europe/Belgrade", "Europe/Belgrade"),
("Europe/Berlin", "Europe/Berlin"),
("Europe/Bratislava", "Europe/Bratislava"),
("Europe/Brussels", "Europe/Brussels"),
("Europe/Bucharest", "Europe/Bucharest"),
("Europe/Budapest", "Europe/Budapest"),
("Europe/Busingen", "Europe/Busingen"),
("Europe/Chisinau", "Europe/Chisinau"),
("Europe/Copenhagen", "Europe/Copenhagen"),
("Europe/Dublin", "Europe/Dublin"),
("Europe/Gibraltar", "Europe/Gibraltar"),
("Europe/Guernsey", "Europe/Guernsey"),
("Europe/Helsinki", "Europe/Helsinki"),
("Europe/Isle_of_Man", "Europe/Isle_of_Man"),
("Europe/Istanbul", "Europe/Istanbul"),
("Europe/Jersey", "Europe/Jersey"),
("Europe/Kaliningrad", "Europe/Kaliningrad"),
("Europe/Kiev", "Europe/Kiev"),
("Europe/Kirov", "Europe/Kirov"),
("Europe/Kyiv", "Europe/Kyiv"),
("Europe/Lisbon", "Europe/Lisbon"),
("Europe/Ljubljana", "Europe/Ljubljana"),
("Europe/London", "Europe/London"),
("Europe/Luxembourg", "Europe/Luxembourg"),
("Europe/Madrid", "Europe/Madrid"),
("Europe/Malta", "Europe/Malta"),
("Europe/Mariehamn", "Europe/Mariehamn"),
("Europe/Minsk", "Europe/Minsk"),
("Europe/Monaco", "Europe/Monaco"),
("Europe/Moscow", "Europe/Moscow"),
("Europe/Nicosia", "Europe/Nicosia"),
("Europe/Oslo", "Europe/Oslo"),
("Europe/Paris", "Europe/Paris"),
("Europe/Podgorica", "Europe/Podgorica"),
("Europe/Prague", "Europe/Prague"),
("Europe/Riga", "Europe/Riga"),
("Europe/Rome", "Europe/Rome"),
("Europe/Samara", "Europe/Samara"),
("Europe/San_Marino", "Europe/San_Marino"),
("Europe/Sarajevo", "Europe/Sarajevo"),
("Europe/Saratov", "Europe/Saratov"),
("Europe/Simferopol", "Europe/Simferopol"),
("Europe/Skopje", "Europe/Skopje"),
("Europe/Sofia", "Europe/Sofia"),
("Europe/Stockholm", "Europe/Stockholm"),
("Europe/Tallinn", "Europe/Tallinn"),
("Europe/Tirane", "Europe/Tirane"),
("Europe/Tiraspol", "Europe/Tiraspol"),
("Europe/Ulyanovsk", "Europe/Ulyanovsk"),
("Europe/Uzhgorod", "Europe/Uzhgorod"),
("Europe/Vaduz", "Europe/Vaduz"),
("Europe/Vatican", "Europe/Vatican"),
("Europe/Vienna", "Europe/Vienna"),
("Europe/Vilnius", "Europe/Vilnius"),
("Europe/Volgograd", "Europe/Volgograd"),
("Europe/Warsaw", "Europe/Warsaw"),
("Europe/Zagreb", "Europe/Zagreb"),
("Europe/Zaporozhye", "Europe/Zaporozhye"),
("Europe/Zurich", "Europe/Zurich"),
("GB", "GB"),
("GB-Eire", "GB-Eire"),
("GMT", "GMT"),
("GMT+0", "GMT+0"),
("GMT-0", "GMT-0"),
("GMT0", "GMT0"),
("Greenwich", "Greenwich"),
("HST", "HST"),
("Hongkong", "Hongkong"),
("Iceland", "Iceland"),
("Indian/Antananarivo", "Indian/Antananarivo"),
("Indian/Chagos", "Indian/Chagos"),
("Indian/Christmas", "Indian/Christmas"),
("Indian/Cocos", "Indian/Cocos"),
("Indian/Comoro", "Indian/Comoro"),
("Indian/Kerguelen", "Indian/Kerguelen"),
("Indian/Mahe", "Indian/Mahe"),
("Indian/Maldives", "Indian/Maldives"),
("Indian/Mauritius", "Indian/Mauritius"),
("Indian/Mayotte", "Indian/Mayotte"),
("Indian/Reunion", "Indian/Reunion"),
("Iran", "Iran"),
("Israel", "Israel"),
("Jamaica", "Jamaica"),
("Japan", "Japan"),
("Kwajalein", "Kwajalein"),
("Libya", "Libya"),
("MET", "MET"),
("MST", "MST"),
("MST7MDT", "MST7MDT"),
("Mexico/BajaNorte", "Mexico/BajaNorte"),
("Mexico/BajaSur", "Mexico/BajaSur"),
("Mexico/General", "Mexico/General"),
("NZ", "NZ"),
("NZ-CHAT", "NZ-CHAT"),
("Navajo", "Navajo"),
("PRC", "PRC"),
("PST8PDT", "PST8PDT"),
("Pacific/Apia", "Pacific/Apia"),
("Pacific/Auckland", "Pacific/Auckland"),
("Pacific/Bougainville", "Pacific/Bougainville"),
("Pacific/Chatham", "Pacific/Chatham"),
("Pacific/Chuuk", "Pacific/Chuuk"),
("Pacific/Easter", "Pacific/Easter"),
("Pacific/Efate", "Pacific/Efate"),
("Pacific/Enderbury", "Pacific/Enderbury"),
("Pacific/Fakaofo", "Pacific/Fakaofo"),
("Pacific/Fiji", "Pacific/Fiji"),
("Pacific/Funafuti", "Pacific/Funafuti"),
("Pacific/Galapagos", "Pacific/Galapagos"),
("Pacific/Gambier", "Pacific/Gambier"),
("Pacific/Guadalcanal", "Pacific/Guadalcanal"),
("Pacific/Guam", "Pacific/Guam"),
("Pacific/Honolulu", "Pacific/Honolulu"),
("Pacific/Johnston", "Pacific/Johnston"),
("Pacific/Kanton", "Pacific/Kanton"),
("Pacific/Kiritimati", "Pacific/Kiritimati"),
("Pacific/Kosrae", "Pacific/Kosrae"),
("Pacific/Kwajalein", "Pacific/Kwajalein"),
("Pacific/Majuro", "Pacific/Majuro"),
("Pacific/Marquesas", "Pacific/Marquesas"),
("Pacific/Midway", "Pacific/Midway"),
("Pacific/Nauru", "Pacific/Nauru"),
("Pacific/Niue", "Pacific/Niue"),
("Pacific/Norfolk", "Pacific/Norfolk"),
("Pacific/Noumea", "Pacific/Noumea"),
("Pacific/Pago_Pago", "Pacific/Pago_Pago"),
("Pacific/Palau", "Pacific/Palau"),
("Pacific/Pitcairn", "Pacific/Pitcairn"),
("Pacific/Pohnpei", "Pacific/Pohnpei"),
("Pacific/Ponape", "Pacific/Ponape"),
("Pacific/Port_Moresby", "Pacific/Port_Moresby"),
("Pacific/Rarotonga", "Pacific/Rarotonga"),
("Pacific/Saipan", "Pacific/Saipan"),
("Pacific/Samoa", "Pacific/Samoa"),
("Pacific/Tahiti", "Pacific/Tahiti"),
("Pacific/Tarawa", "Pacific/Tarawa"),
("Pacific/Tongatapu", "Pacific/Tongatapu"),
("Pacific/Truk", "Pacific/Truk"),
("Pacific/Wake", "Pacific/Wake"),
("Pacific/Wallis", "Pacific/Wallis"),
("Pacific/Yap", "Pacific/Yap"),
("Poland", "Poland"),
("Portugal", "Portugal"),
("ROC", "ROC"),
("ROK", "ROK"),
("Singapore", "Singapore"),
("Turkey", "Turkey"),
("UCT", "UCT"),
("US/Alaska", "US/Alaska"),
("US/Aleutian", "US/Aleutian"),
("US/Arizona", "US/Arizona"),
("US/Central", "US/Central"),
("US/East-Indiana", "US/East-Indiana"),
("US/Eastern", "US/Eastern"),
("US/Hawaii", "US/Hawaii"),
("US/Indiana-Starke", "US/Indiana-Starke"),
("US/Michigan", "US/Michigan"),
("US/Mountain", "US/Mountain"),
("US/Pacific", "US/Pacific"),
("US/Samoa", "US/Samoa"),
("UTC", "UTC"),
("Universal", "Universal"),
("W-SU", "W-SU"),
("WET", "WET"),
("Zulu", "Zulu"),
],
max_length=255,
null=True,
),
),
]

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,7 @@ from tacticalrmm.permissions import _has_perm, _has_perm_on_agent
class AgentPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
if r.method == "GET":
if "agent_id" in view.kwargs.keys():
return _has_perm(r, "can_list_agents") and _has_perm_on_agent(
@@ -26,73 +26,76 @@ class AgentPerms(permissions.BasePermission):
class RecoverAgentPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
if "agent_id" not in view.kwargs.keys():
return _has_perm(r, "can_recover_agents")
return _has_perm(r, "can_recover_agents") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class MeshPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_use_mesh") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class UpdateAgentPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_update_agents")
class PingAgentPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_ping_agents") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class ManageProcPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_manage_procs") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class EvtLogPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_view_eventlogs") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class SendCMDPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_send_cmd") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class RebootAgentPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_reboot_agents") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class InstallAgentPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_install_agents")
class RunScriptPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_run_scripts") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class AgentNotesPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
# permissions for GET /agents/notes/ endpoint
if r.method == "GET":
@@ -109,12 +112,12 @@ class AgentNotesPerms(permissions.BasePermission):
class RunBulkPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_run_bulk")
class AgentHistoryPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
if "agent_id" in view.kwargs.keys():
return _has_perm(r, "can_list_agent_history") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]

View File

@@ -1,8 +1,10 @@
import pytz
from rest_framework import serializers
from tacticalrmm.constants import AGENT_STATUS_ONLINE
from winupdate.serializers import WinUpdatePolicySerializer
from .models import Agent, AgentCustomField, Note, AgentHistory
from .models import Agent, AgentCustomField, AgentHistory, Note
class AgentCustomFieldSerializer(serializers.ModelSerializer):
@@ -38,25 +40,61 @@ class AgentSerializer(serializers.ModelSerializer):
client = serializers.ReadOnlyField(source="client.name")
site_name = serializers.ReadOnlyField(source="site.name")
custom_fields = AgentCustomFieldSerializer(many=True, read_only=True)
patches_last_installed = serializers.ReadOnlyField()
last_seen = serializers.ReadOnlyField()
applied_policies = serializers.SerializerMethodField()
effective_patch_policy = serializers.SerializerMethodField()
alert_template = serializers.SerializerMethodField()
def get_alert_template(self, obj):
from alerts.serializers import AlertTemplateSerializer
return (
AlertTemplateSerializer(obj.alert_template).data
if obj.alert_template
else None
)
def get_effective_patch_policy(self, obj):
return WinUpdatePolicySerializer(obj.get_patch_policy()).data
def get_applied_policies(self, obj):
from automation.serializers import PolicySerializer
policies = obj.get_agent_policies()
# need to serialize model objects manually
for key, policy in policies.items():
if policy:
policies[key] = PolicySerializer(policy).data
return policies
def get_all_timezones(self, obj):
return pytz.all_timezones
class Meta:
model = Agent
exclude = ["last_seen", "id"]
exclude = ["id"]
class AgentTableSerializer(serializers.ModelSerializer):
status = serializers.ReadOnlyField()
checks = serializers.ReadOnlyField()
last_seen = serializers.SerializerMethodField()
client_name = serializers.ReadOnlyField(source="client.name")
site_name = serializers.ReadOnlyField(source="site.name")
logged_username = serializers.SerializerMethodField()
italic = serializers.SerializerMethodField()
policy = serializers.ReadOnlyField(source="policy.id")
alert_template = serializers.SerializerMethodField()
last_seen = serializers.ReadOnlyField()
pending_actions_count = serializers.ReadOnlyField()
has_patches_pending = serializers.ReadOnlyField()
cpu_model = serializers.ReadOnlyField()
graphics = serializers.ReadOnlyField()
local_ips = serializers.ReadOnlyField()
make_model = serializers.ReadOnlyField()
physical_disks = serializers.ReadOnlyField()
def get_alert_template(self, obj):
@@ -70,16 +108,8 @@ class AgentTableSerializer(serializers.ModelSerializer):
"always_alert": obj.alert_template.agent_always_alert,
}
def get_last_seen(self, obj) -> str:
if obj.time_zone is not None:
agent_tz = pytz.timezone(obj.time_zone)
else:
agent_tz = self.context["default_tz"]
return obj.last_seen.astimezone(agent_tz).strftime("%m %d %Y %H:%M")
def get_logged_username(self, obj) -> str:
if obj.logged_in_username == "None" and obj.status == "online":
if obj.logged_in_username == "None" and obj.status == AGENT_STATUS_ONLINE:
return obj.last_logged_in_user
elif obj.logged_in_username != "None":
return obj.logged_in_username
@@ -87,7 +117,7 @@ class AgentTableSerializer(serializers.ModelSerializer):
return "-"
def get_italic(self, obj) -> bool:
return obj.logged_in_username == "None" and obj.status == "online"
return obj.logged_in_username == "None" and obj.status == AGENT_STATUS_ONLINE
class Meta:
model = Agent
@@ -100,7 +130,6 @@ class AgentTableSerializer(serializers.ModelSerializer):
"monitoring_type",
"description",
"needs_reboot",
"has_patches_pending",
"pending_actions_count",
"status",
"overdue_text_alert",
@@ -114,16 +143,21 @@ class AgentTableSerializer(serializers.ModelSerializer):
"italic",
"policy",
"block_policy_inheritance",
"plat",
"goarch",
"has_patches_pending",
"version",
"operating_system",
"public_ip",
"cpu_model",
"graphics",
"local_ips",
"make_model",
"physical_disks",
]
depth = 2
class WinAgentSerializer(serializers.ModelSerializer):
class Meta:
model = Agent
fields = "__all__"
class AgentHostnameSerializer(serializers.ModelSerializer):
client = serializers.ReadOnlyField(source="client.name")
site = serializers.ReadOnlyField(source="site.name")
@@ -150,17 +184,12 @@ class AgentNoteSerializer(serializers.ModelSerializer):
class AgentHistorySerializer(serializers.ModelSerializer):
time = serializers.SerializerMethodField(read_only=True)
script_name = serializers.ReadOnlyField(source="script.name")
class Meta:
model = AgentHistory
fields = "__all__"
def get_time(self, history):
tz = self.context["default_tz"]
return history.time.astimezone(tz).strftime("%m %d %Y %H:%M:%S")
class AgentAuditSerializer(serializers.ModelSerializer):
class Meta:

View File

@@ -1,122 +1,52 @@
import asyncio
import datetime as dt
import random
from time import sleep
from typing import Union
from typing import TYPE_CHECKING, Optional
from alerts.models import Alert
from core.models import CoreSettings
from django.conf import settings
from django.core.management import call_command
from django.utils import timezone as djangotime
from logs.models import DebugLog, PendingAction
from packaging import version as pyver
from scripts.models import Script
from tacticalrmm.celery import app
from tacticalrmm.utils import run_nats_api_cmd
from agents.models import Agent
from agents.utils import get_winagent_url
from core.utils import get_core_settings
from logs.models import DebugLog
from scripts.models import Script
from tacticalrmm.celery import app
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_OVERDUE,
CheckStatus,
DebugLogType,
)
def agent_update(agent_id: str, force: bool = False) -> str:
agent = Agent.objects.get(agent_id=agent_id)
if pyver.parse(agent.version) <= pyver.parse("1.3.0"):
return "not supported"
# skip if we can't determine the arch
if agent.arch is None:
DebugLog.warning(
agent=agent,
log_type="agent_issues",
message=f"Unable to determine arch on {agent.hostname}({agent.agent_id}). Skipping agent update.",
)
return "noarch"
version = settings.LATEST_AGENT_VER
inno = agent.win_inno_exe
url = get_winagent_url(agent.arch)
if not force:
if agent.pendingactions.filter(
action_type="agentupdate", status="pending"
).exists():
agent.pendingactions.filter(
action_type="agentupdate", status="pending"
).delete()
PendingAction.objects.create(
agent=agent,
action_type="agentupdate",
details={
"url": url,
"version": version,
"inno": inno,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": version,
"inno": inno,
},
}
asyncio.run(agent.nats_cmd(nats_data, wait=False))
return "created"
if TYPE_CHECKING:
from django.db.models.query import QuerySet
@app.task
def force_code_sign(agent_ids: list[str]) -> None:
chunks = (agent_ids[i : i + 50] for i in range(0, len(agent_ids), 50))
for chunk in chunks:
for agent_id in chunk:
agent_update(agent_id=agent_id, force=True)
sleep(0.05)
sleep(4)
@app.task
def send_agent_update_task(agent_ids: list[str]) -> None:
chunks = (agent_ids[i : i + 30] for i in range(0, len(agent_ids), 30))
for chunk in chunks:
for agent_id in chunk:
agent_update(agent_id)
sleep(0.05)
sleep(4)
def send_agent_update_task(*, agent_ids: list[str], token: str, force: bool) -> None:
agents: "QuerySet[Agent]" = Agent.objects.defer(*AGENT_DEFER).filter(
agent_id__in=agent_ids
)
for agent in agents:
agent.do_update(token=token, force=force)
@app.task
def auto_self_agent_update_task() -> None:
core = CoreSettings.objects.first()
if not core.agent_auto_update: # type:ignore
return
q = Agent.objects.only("agent_id", "version")
agent_ids: list[str] = [
i.agent_id
for i in q
if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER)
]
chunks = (agent_ids[i : i + 30] for i in range(0, len(agent_ids), 30))
for chunk in chunks:
for agent_id in chunk:
agent_update(agent_id)
sleep(0.05)
sleep(4)
call_command("update_agents")
@app.task
def agent_outage_email_task(pk: int, alert_interval: Union[float, None] = None) -> str:
def agent_outage_email_task(pk: int, alert_interval: Optional[float] = None) -> str:
from alerts.models import Alert
alert = Alert.objects.get(pk=pk)
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
if not alert.email_sent:
sleep(random.randint(1, 15))
sleep(random.randint(1, 5))
alert.agent.send_outage_email()
alert.email_sent = djangotime.now()
alert.save(update_fields=["email_sent"])
@@ -125,7 +55,7 @@ def agent_outage_email_task(pk: int, alert_interval: Union[float, None] = None)
# send an email only if the last email sent is older than alert interval
delta = djangotime.now() - dt.timedelta(days=alert_interval)
if alert.email_sent < delta:
sleep(random.randint(1, 10))
sleep(random.randint(1, 5))
alert.agent.send_outage_email()
alert.email_sent = djangotime.now()
alert.save(update_fields=["email_sent"])
@@ -137,8 +67,13 @@ def agent_outage_email_task(pk: int, alert_interval: Union[float, None] = None)
def agent_recovery_email_task(pk: int) -> str:
from alerts.models import Alert
sleep(random.randint(1, 15))
alert = Alert.objects.get(pk=pk)
sleep(random.randint(1, 5))
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
alert.agent.send_recovery_email()
alert.resolved_email_sent = djangotime.now()
alert.save(update_fields=["resolved_email_sent"])
@@ -147,13 +82,16 @@ def agent_recovery_email_task(pk: int) -> str:
@app.task
def agent_outage_sms_task(pk: int, alert_interval: Union[float, None] = None) -> str:
def agent_outage_sms_task(pk: int, alert_interval: Optional[float] = None) -> str:
from alerts.models import Alert
alert = Alert.objects.get(pk=pk)
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
if not alert.sms_sent:
sleep(random.randint(1, 15))
sleep(random.randint(1, 3))
alert.agent.send_outage_sms()
alert.sms_sent = djangotime.now()
alert.save(update_fields=["sms_sent"])
@@ -162,7 +100,7 @@ def agent_outage_sms_task(pk: int, alert_interval: Union[float, None] = None) ->
# send an sms only if the last sms sent is older than alert interval
delta = djangotime.now() - dt.timedelta(days=alert_interval)
if alert.sms_sent < delta:
sleep(random.randint(1, 10))
sleep(random.randint(1, 3))
alert.agent.send_outage_sms()
alert.sms_sent = djangotime.now()
alert.save(update_fields=["sms_sent"])
@@ -175,7 +113,11 @@ def agent_recovery_sms_task(pk: int) -> str:
from alerts.models import Alert
sleep(random.randint(1, 3))
alert = Alert.objects.get(pk=pk)
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
alert.agent.send_recovery_sms()
alert.resolved_sms_sent = djangotime.now()
alert.save(update_fields=["resolved_sms_sent"])
@@ -199,7 +141,7 @@ def agent_outages_task() -> None:
)
for agent in agents:
if agent.status == "overdue":
if agent.status == AGENT_STATUS_OVERDUE:
Alert.handle_alert_failure(agent)
@@ -211,6 +153,7 @@ def run_script_email_results_task(
emails: list[str],
args: list[str] = [],
history_pk: int = 0,
run_as_user: bool = False,
):
agent = Agent.objects.get(pk=agentpk)
script = Script.objects.get(pk=scriptpk)
@@ -221,16 +164,17 @@ def run_script_email_results_task(
timeout=nats_timeout,
wait=True,
history_pk=history_pk,
run_as_user=run_as_user,
)
if r == "timeout":
DebugLog.error(
agent=agent,
log_type="scripting",
log_type=DebugLogType.SCRIPTING,
message=f"{agent.hostname}({agent.pk}) timed out running script.",
)
return
CORE = CoreSettings.objects.first()
CORE = get_core_settings()
subject = f"{agent.hostname} {script.name} Results"
exec_time = "{:.4f}".format(r["execution_time"])
body = (
@@ -243,48 +187,48 @@ def run_script_email_results_task(
msg = EmailMessage()
msg["Subject"] = subject
msg["From"] = CORE.smtp_from_email # type:ignore
msg["From"] = CORE.smtp_from_email
if emails:
msg["To"] = ", ".join(emails)
else:
msg["To"] = ", ".join(CORE.email_alert_recipients) # type:ignore
msg["To"] = ", ".join(CORE.email_alert_recipients)
msg.set_content(body)
try:
with smtplib.SMTP(
CORE.smtp_host, CORE.smtp_port, timeout=20 # type:ignore
) as server: # type:ignore
if CORE.smtp_requires_auth: # type:ignore
with smtplib.SMTP(CORE.smtp_host, CORE.smtp_port, timeout=20) as server:
if CORE.smtp_requires_auth:
server.ehlo()
server.starttls()
server.login(
CORE.smtp_host_user, CORE.smtp_host_password # type:ignore
) # type:ignore
server.login(CORE.smtp_host_user, CORE.smtp_host_password)
server.send_message(msg)
server.quit()
else:
server.send_message(msg)
server.quit()
except Exception as e:
DebugLog.error(message=e)
DebugLog.error(message=str(e))
@app.task
def clear_faults_task(older_than_days: int) -> None:
# https://github.com/wh1te909/tacticalrmm/issues/484
from alerts.models import Alert
# https://github.com/amidaware/tacticalrmm/issues/484
agents = Agent.objects.exclude(last_seen__isnull=True).filter(
last_seen__lt=djangotime.now() - djangotime.timedelta(days=older_than_days)
)
for agent in agents:
if agent.agentchecks.exists():
for check in agent.agentchecks.all():
# reset check status
check.status = "passing"
check.save(update_fields=["status"])
if check.alert.filter(resolved=False).exists():
check.alert.get(resolved=False).resolve()
for check in agent.get_checks_with_policies():
# reset check status
if check.check_result:
check.check_result.status = CheckStatus.PASSING
check.check_result.save(update_fields=["status"])
if check.alert.filter(agent=agent, resolved=False).exists():
alert = Alert.create_or_return_check_alert(check, agent=agent)
if alert:
alert.resolve()
# reset overdue alerts
agent.overdue_email_alert = False
@@ -299,25 +243,6 @@ def clear_faults_task(older_than_days: int) -> None:
)
@app.task
def get_wmi_task() -> None:
agents = Agent.objects.only(
"pk", "agent_id", "last_seen", "overdue_time", "offline_time"
)
ids = [i.agent_id for i in agents if i.status == "online"]
run_nats_api_cmd("wmi", ids, timeout=45)
@app.task
def agent_checkin_task() -> None:
run_nats_api_cmd("checkin", timeout=30)
@app.task
def agent_getinfo_task() -> None:
run_nats_api_cmd("agentinfo", timeout=30)
@app.task
def prune_agent_history(older_than_days: int) -> str:
from .models import AgentHistory
@@ -330,42 +255,5 @@ def prune_agent_history(older_than_days: int) -> str:
@app.task
def handle_agents_task() -> None:
q = Agent.objects.prefetch_related("pendingactions", "autotasks").only(
"pk", "agent_id", "version", "last_seen", "overdue_time", "offline_time"
)
agents = [
i
for i in q
if pyver.parse(i.version) >= pyver.parse("1.6.0") and i.status == "online"
]
for agent in agents:
# change agent update pending status to completed if agent has just updated
if (
pyver.parse(agent.version) == pyver.parse(settings.LATEST_AGENT_VER)
and agent.pendingactions.filter(
action_type="agentupdate", status="pending"
).exists()
):
agent.pendingactions.filter(
action_type="agentupdate", status="pending"
).update(status="completed")
# sync scheduled tasks
if agent.autotasks.exclude(sync_status="synced").exists(): # type: ignore
tasks = agent.autotasks.exclude(sync_status="synced") # type: ignore
for task in tasks:
if task.sync_status == "pendingdeletion":
task.delete_task_on_agent()
elif task.sync_status == "initial":
task.modify_task_on_agent()
elif task.sync_status == "notsynced":
task.create_task_on_agent()
# handles any alerting actions
if Alert.objects.filter(agent=agent, resolved=False).exists():
try:
Alert.handle_alert_resolve(agent)
except:
continue
def bulk_recover_agents_task() -> None:
call_command("bulk_restart_agents")

View File

@@ -0,0 +1,106 @@
from unittest.mock import patch
from rest_framework.response import Response
from tacticalrmm.test import TacticalTestCase
class TestAgentInstalls(TacticalTestCase):
def setUp(self) -> None:
self.authenticate()
self.setup_coresettings()
self.setup_base_instance()
@patch("agents.utils.generate_linux_install")
@patch("knox.models.AuthToken.objects.create")
@patch("tacticalrmm.utils.generate_winagent_exe")
@patch("core.utils.token_is_valid")
@patch("agents.utils.get_agent_url")
def test_install_agent(
self,
mock_agent_url,
mock_token_valid,
mock_gen_win_exe,
mock_auth,
mock_linux_install,
):
mock_agent_url.return_value = "https://example.com"
mock_token_valid.return_value = "", False
mock_gen_win_exe.return_value = Response("ok")
mock_auth.return_value = "", "token"
mock_linux_install.return_value = Response("ok")
url = "/agents/installer/"
# test windows dynamic exe
data = {
"installMethod": "exe",
"client": self.site2.client.pk,
"site": self.site2.pk,
"expires": 24,
"agenttype": "server",
"power": 0,
"rdp": 1,
"ping": 0,
"goarch": "amd64",
"api": "https://api.example.com",
"fileName": "rmm-client-site-server.exe",
"plat": "windows",
}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
mock_gen_win_exe.assert_called_with(
client=self.site2.client.pk,
site=self.site2.pk,
agent_type="server",
rdp=1,
ping=0,
power=0,
goarch="amd64",
token="token",
api="https://api.example.com",
file_name="rmm-client-site-server.exe",
)
# test linux no code sign
data["plat"] = "linux"
data["installMethod"] = "bash"
data["rdp"] = 0
data["agenttype"] = "workstation"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 400)
# test linux
mock_token_valid.return_value = "token123", True
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
mock_linux_install.assert_called_with(
client=str(self.site2.client.pk),
site=str(self.site2.pk),
agent_type="workstation",
arch="amd64",
token="token",
api="https://api.example.com",
download_url="https://example.com",
)
# test manual
data["rdp"] = 1
data["installMethod"] = "manual"
r = self.client.post(url, data, format="json")
self.assertIn("rdp", r.json()["cmd"])
self.assertNotIn("power", r.json()["cmd"])
data.update({"ping": 1, "power": 1})
r = self.client.post(url, data, format="json")
self.assertIn("power", r.json()["cmd"])
self.assertIn("ping", r.json()["cmd"])
# test powershell
data["installMethod"] = "powershell"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.check_not_authenticated("post", url)

View File

@@ -0,0 +1,313 @@
from unittest.mock import patch
from django.conf import settings
from django.core.management import call_command
from model_bakery import baker
from packaging import version as pyver
from agents.models import Agent
from agents.tasks import auto_self_agent_update_task, send_agent_update_task
from logs.models import PendingAction
from tacticalrmm.constants import (
AGENT_DEFER,
AgentMonType,
AgentPlat,
GoArch,
PAAction,
PAStatus,
)
from tacticalrmm.test import TacticalTestCase
class TestAgentUpdate(TacticalTestCase):
def setUp(self) -> None:
self.authenticate()
self.setup_coresettings()
self.setup_base_instance()
@patch("agents.management.commands.update_agents.send_agent_update_task.delay")
@patch("agents.management.commands.update_agents.token_is_valid")
@patch("agents.management.commands.update_agents.get_core_settings")
def test_update_agents_mgmt_command(self, mock_core, mock_token, mock_update):
mock_token.return_value = ("token123", True)
baker.make_recipe(
"agents.online_agent",
site=self.site1,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
version="2.0.3",
_quantity=6,
)
baker.make_recipe(
"agents.online_agent",
site=self.site3,
monitoring_type=AgentMonType.WORKSTATION,
plat=AgentPlat.LINUX,
version="2.0.3",
_quantity=5,
)
baker.make_recipe(
"agents.online_agent",
site=self.site2,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
version=settings.LATEST_AGENT_VER,
_quantity=8,
)
mock_core.return_value.agent_auto_update = False
call_command("update_agents")
mock_update.assert_not_called()
mock_core.return_value.agent_auto_update = True
call_command("update_agents")
ids = list(
Agent.objects.defer(*AGENT_DEFER)
.exclude(version=settings.LATEST_AGENT_VER)
.values_list("agent_id", flat=True)
)
mock_update.assert_called_with(agent_ids=ids, token="token123", force=False)
@patch("agents.models.Agent.nats_cmd")
@patch("agents.models.get_agent_url")
def test_do_update(self, mock_agent_url, mock_nats_cmd):
mock_agent_url.return_value = "https://example.com/123"
# test noarch
agent_noarch = baker.make_recipe(
"agents.online_agent",
site=self.site1,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
version="2.1.1",
)
r = agent_noarch.do_update(token="", force=True)
self.assertEqual(r, "noarch")
# test too old
agent_old = baker.make_recipe(
"agents.online_agent",
site=self.site2,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
version="1.3.0",
goarch=GoArch.AMD64,
)
r = agent_old.do_update(token="", force=True)
self.assertEqual(r, "not supported")
win = baker.make_recipe(
"agents.online_agent",
site=self.site1,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
version="2.1.1",
goarch=GoArch.AMD64,
)
lin = baker.make_recipe(
"agents.online_agent",
site=self.site3,
monitoring_type=AgentMonType.WORKSTATION,
plat=AgentPlat.LINUX,
version="2.1.1",
goarch=GoArch.ARM32,
)
# test windows agent update
r = win.do_update(token="", force=False)
self.assertEqual(r, "created")
mock_nats_cmd.assert_called_with(
{
"func": "agentupdate",
"payload": {
"url": "https://example.com/123",
"version": settings.LATEST_AGENT_VER,
"inno": f"tacticalagent-v{settings.LATEST_AGENT_VER}-windows-amd64.exe",
},
},
wait=False,
)
action1 = PendingAction.objects.get(agent__agent_id=win.agent_id)
self.assertEqual(action1.action_type, PAAction.AGENT_UPDATE)
self.assertEqual(action1.status, PAStatus.PENDING)
self.assertEqual(action1.details["url"], "https://example.com/123")
self.assertEqual(
action1.details["inno"],
f"tacticalagent-v{settings.LATEST_AGENT_VER}-windows-amd64.exe",
)
self.assertEqual(action1.details["version"], settings.LATEST_AGENT_VER)
mock_nats_cmd.reset_mock()
# test linux agent update
r = lin.do_update(token="", force=False)
mock_nats_cmd.assert_called_with(
{
"func": "agentupdate",
"payload": {
"url": "https://example.com/123",
"version": settings.LATEST_AGENT_VER,
"inno": f"tacticalagent-v{settings.LATEST_AGENT_VER}-linux-arm.exe",
},
},
wait=False,
)
action2 = PendingAction.objects.get(agent__agent_id=lin.agent_id)
self.assertEqual(action2.action_type, PAAction.AGENT_UPDATE)
self.assertEqual(action2.status, PAStatus.PENDING)
self.assertEqual(action2.details["url"], "https://example.com/123")
self.assertEqual(
action2.details["inno"],
f"tacticalagent-v{settings.LATEST_AGENT_VER}-linux-arm.exe",
)
self.assertEqual(action2.details["version"], settings.LATEST_AGENT_VER)
# check if old agent update pending actions are being deleted
# should only be 1 pending action at all times
pa_count = win.pendingactions.filter(
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).count()
self.assertEqual(pa_count, 1)
for _ in range(4):
win.do_update(token="", force=False)
pa_count = win.pendingactions.filter(
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).count()
self.assertEqual(pa_count, 1)
def test_auto_self_agent_update_task(self):
auto_self_agent_update_task()
@patch("agents.models.Agent.do_update")
def test_send_agent_update_task(self, mock_update):
baker.make_recipe(
"agents.online_agent",
site=self.site2,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
version="2.1.1",
goarch=GoArch.AMD64,
_quantity=6,
)
ids = list(
Agent.objects.defer(*AGENT_DEFER)
.exclude(version=settings.LATEST_AGENT_VER)
.values_list("agent_id", flat=True)
)
send_agent_update_task(agent_ids=ids, token="", force=False)
self.assertEqual(mock_update.call_count, 6)
@patch("agents.views.token_is_valid")
@patch("agents.tasks.send_agent_update_task.delay")
def test_update_agents(self, mock_update, mock_token):
mock_token.return_value = ("", False)
url = "/agents/update/"
baker.make_recipe(
"agents.online_agent",
site=self.site2,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
version="2.1.1",
goarch=GoArch.AMD64,
_quantity=7,
)
baker.make_recipe(
"agents.online_agent",
site=self.site2,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
version=settings.LATEST_AGENT_VER,
goarch=GoArch.AMD64,
_quantity=3,
)
baker.make_recipe(
"agents.online_agent",
site=self.site2,
monitoring_type=AgentMonType.WORKSTATION,
plat=AgentPlat.LINUX,
version="2.0.1",
goarch=GoArch.ARM32,
_quantity=9,
)
agent_ids: list[str] = list(
Agent.objects.only("agent_id").values_list("agent_id", flat=True)
)
data = {"agent_ids": agent_ids}
expected: list[str] = [
i.agent_id
for i in Agent.objects.only("agent_id", "version")
if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER)
]
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
mock_update.assert_called_with(agent_ids=expected, token="", force=False)
self.check_not_authenticated("post", url)
@patch("agents.views.token_is_valid")
@patch("agents.tasks.send_agent_update_task.delay")
def test_agent_update_permissions(self, update_task, mock_token):
mock_token.return_value = ("", False)
agents = baker.make_recipe("agents.agent", _quantity=5)
other_agents = baker.make_recipe("agents.agent", _quantity=7)
url = f"/agents/update/"
data = {
"agent_ids": [agent.agent_id for agent in agents]
+ [agent.agent_id for agent in other_agents]
}
# test superuser access
self.check_authorized_superuser("post", url, data)
update_task.assert_called_with(
agent_ids=data["agent_ids"], token="", force=False
)
update_task.reset_mock()
user = self.create_user_with_roles([])
self.client.force_authenticate(user=user)
self.check_not_authorized("post", url, data)
update_task.assert_not_called()
user.role.can_update_agents = True
user.role.save()
self.check_authorized("post", url, data)
update_task.assert_called_with(
agent_ids=data["agent_ids"], token="", force=False
)
update_task.reset_mock()
# limit to client
# user.role.can_view_clients.set([agents[0].client])
# self.check_authorized("post", url, data)
# update_task.assert_called_with(agent_ids=[agent.agent_id for agent in agents])
# update_task.reset_mock()
# add site
# user.role.can_view_sites.set([other_agents[0].site])
# self.check_authorized("post", url, data)
# update_task.assert_called_with(agent_ids=data["agent_ids"])
# update_task.reset_mock()
# remove client permissions
# user.role.can_view_clients.clear()
# self.check_authorized("post", url, data)
# update_task.assert_called_with(
# agent_ids=[agent.agent_id for agent in other_agents]
# )

View File

@@ -0,0 +1,60 @@
from unittest.mock import patch, AsyncMock
from django.conf import settings
from rest_framework.response import Response
from agents.utils import generate_linux_install, get_agent_url
from tacticalrmm.test import TacticalTestCase
class TestAgentUtils(TacticalTestCase):
def setUp(self) -> None:
self.authenticate()
self.setup_coresettings()
self.setup_base_instance()
def test_get_agent_url(self):
ver = settings.LATEST_AGENT_VER
# test without token
r = get_agent_url(goarch="amd64", plat="windows", token="")
expected = f"https://github.com/amidaware/rmmagent/releases/download/v{ver}/tacticalagent-v{ver}-windows-amd64.exe"
self.assertEqual(r, expected)
# test with token
r = get_agent_url(goarch="386", plat="linux", token="token123")
expected = f"https://{settings.AGENTS_URL}version={ver}&arch=386&token=token123&plat=linux&api=api.example.com"
@patch("agents.utils.get_mesh_device_id")
@patch("agents.utils.asyncio.run")
@patch("agents.utils.get_mesh_ws_url")
@patch("agents.utils.get_core_settings")
def test_generate_linux_install(
self, mock_core, mock_mesh, mock_async_run, mock_mesh_device_id
):
mock_mesh_device_id.return_value = "meshdeviceid"
mock_core.return_value.mesh_site = "meshsite"
mock_async_run.return_value = "meshid"
mock_mesh.return_value = "meshws"
r = generate_linux_install(
client="1",
site="1",
agent_type="server",
arch="amd64",
token="token123",
api="api.example.com",
download_url="asdasd3423",
)
ret = r.getvalue().decode("utf-8")
self.assertIn(r"agentDL='asdasd3423'", ret)
self.assertIn(
r"meshDL='meshsite/meshagents?id=meshid&installflags=2&meshinstall=6'", ret
)
self.assertIn(r"apiURL='api.example.com'", ret)
self.assertIn(r"agentDL='asdasd3423'", ret)
self.assertIn(r"token='token123'", ret)
self.assertIn(r"clientID='1'", ret)
self.assertIn(r"siteID='1'", ret)
self.assertIn(r"agentType='server'", ret)

View File

@@ -0,0 +1,46 @@
from unittest.mock import call, patch
from django.core.management import call_command
from model_bakery import baker
from tacticalrmm.constants import AgentMonType, AgentPlat
from tacticalrmm.test import TacticalTestCase
class TestBulkRestartAgents(TacticalTestCase):
def setUp(self) -> None:
self.authenticate()
self.setup_coresettings()
self.setup_base_instance()
@patch("core.management.commands.bulk_restart_agents.sleep")
@patch("agents.models.Agent.recover")
@patch("core.management.commands.bulk_restart_agents.get_mesh_ws_url")
def test_bulk_restart_agents_mgmt_cmd(
self, get_mesh_ws_url, recover, mock_sleep
) -> None:
get_mesh_ws_url.return_value = "https://mesh.example.com/test"
baker.make_recipe(
"agents.online_agent",
site=self.site1,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
)
baker.make_recipe(
"agents.online_agent",
site=self.site3,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.LINUX,
)
calls = [
call("tacagent", "https://mesh.example.com/test", wait=False),
call("mesh", "", wait=False),
]
call_command("bulk_restart_agents")
recover.assert_has_calls(calls)
mock_sleep.assert_called_with(10)

View File

@@ -0,0 +1,63 @@
from typing import TYPE_CHECKING
from unittest.mock import patch
from model_bakery import baker
from tacticalrmm.constants import AgentMonType, AgentPlat
from tacticalrmm.test import TacticalTestCase
if TYPE_CHECKING:
from clients.models import Client, Site
class TestRecovery(TacticalTestCase):
def setUp(self) -> None:
self.authenticate()
self.setup_coresettings()
self.client1: "Client" = baker.make("clients.Client")
self.site1: "Site" = baker.make("clients.Site", client=self.client1)
@patch("agents.models.Agent.recover")
@patch("agents.views.get_mesh_ws_url")
def test_recover(self, get_mesh_ws_url, recover) -> None:
get_mesh_ws_url.return_value = "https://mesh.example.com"
agent = baker.make_recipe(
"agents.online_agent",
site=self.site1,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
)
url = f"/agents/{agent.agent_id}/recover/"
# test successfull tacticalagent recovery
data = {"mode": "tacagent"}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
recover.assert_called_with("tacagent", "https://mesh.example.com", wait=False)
get_mesh_ws_url.assert_called_once()
# reset mocks
recover.reset_mock()
get_mesh_ws_url.reset_mock()
# test successfull mesh agent recovery
data = {"mode": "mesh"}
recover.return_value = ("ok", False)
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
get_mesh_ws_url.assert_not_called()
recover.assert_called_with("mesh", "")
# reset mocks
recover.reset_mock()
get_mesh_ws_url.reset_mock()
# test failed mesh agent recovery
data = {"mode": "mesh"}
recover.return_value = ("Unable to contact the agent", True)
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 400)
self.check_not_authenticated("post", url)

View File

@@ -1,10 +1,11 @@
from django.urls import path
from . import views
from checks.views import GetAddChecks
from autotasks.views import GetAddAutoTasks
from checks.views import GetAddChecks
from logs.views import PendingActions
from . import views
urlpatterns = [
# agent views
path("", views.GetAgents.as_view()),
@@ -40,5 +41,5 @@ urlpatterns = [
path("versions/", views.get_agent_versions),
path("update/", views.update_agents),
path("installer/", views.install_agent),
path("<str:arch>/getmeshexe/", views.get_mesh_exe),
path("bulkrecovery/", views.bulk_agent_recovery),
]

View File

@@ -1,40 +1,81 @@
import random
import asyncio
import tempfile
import urllib.parse
import requests
from django.conf import settings
from core.models import CodeSignToken
from django.http import FileResponse
from core.utils import get_core_settings, get_mesh_device_id, get_mesh_ws_url
from tacticalrmm.constants import MeshAgentIdent
def get_exegen_url() -> str:
urls: list[str] = settings.EXE_GEN_URLS
for url in urls:
try:
r = requests.get(url, timeout=10)
except:
continue
def get_agent_url(*, goarch: str, plat: str, token: str = "") -> str:
ver = settings.LATEST_AGENT_VER
if token:
params = {
"version": ver,
"arch": goarch,
"token": token,
"plat": plat,
"api": settings.ALLOWED_HOSTS[0],
}
return settings.AGENTS_URL + urllib.parse.urlencode(params)
if r.status_code == 200:
return url
return random.choice(urls)
return f"https://github.com/amidaware/rmmagent/releases/download/v{ver}/tacticalagent-v{ver}-{plat}-{goarch}.exe"
def get_winagent_url(arch: str) -> str:
def generate_linux_install(
client: str,
site: str,
agent_type: str,
arch: str,
token: str,
api: str,
download_url: str,
) -> FileResponse:
dl_url = settings.DL_32 if arch == "32" else settings.DL_64
match arch:
case "amd64":
arch_id = MeshAgentIdent.LINUX64
case "386":
arch_id = MeshAgentIdent.LINUX32
case "arm64":
arch_id = MeshAgentIdent.LINUX_ARM_64
case "arm":
arch_id = MeshAgentIdent.LINUX_ARM_HF
case _:
arch_id = "not_found"
try:
t: CodeSignToken = CodeSignToken.objects.first() # type: ignore
if t.is_valid:
base_url = get_exegen_url() + "/api/v1/winagents/?"
params = {
"version": settings.LATEST_AGENT_VER,
"arch": arch,
"token": t.token,
}
dl_url = base_url + urllib.parse.urlencode(params)
except:
pass
core = get_core_settings()
return dl_url
uri = get_mesh_ws_url()
mesh_id = asyncio.run(get_mesh_device_id(uri, core.mesh_device_group))
mesh_dl = (
f"{core.mesh_site}/meshagents?id={mesh_id}&installflags=2&meshinstall={arch_id}"
)
sh = settings.LINUX_AGENT_SCRIPT
with open(sh, "r") as f:
text = f.read()
replace = {
"agentDLChange": download_url,
"meshDLChange": mesh_dl,
"clientIDChange": client,
"siteIDChange": site,
"agentTypeChange": agent_type,
"tokenChange": token,
"apiURLChange": api,
}
for i, j in replace.items():
text = text.replace(i, j)
with tempfile.NamedTemporaryFile() as fp:
with open(fp.name, "w") as f:
f.write(text)
f.write("\n")
return FileResponse(
open(fp.name, "rb"), as_attachment=True, filename="linux_agent_install.sh"
)

View File

@@ -6,67 +6,105 @@ import string
import time
from django.conf import settings
from django.db.models import Count, Exists, OuterRef, Prefetch, Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.db.models import Q
from django.utils import timezone as djangotime
from meshctrl.utils import get_login_token
from packaging import version as pyver
from rest_framework import serializers
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.exceptions import PermissionDenied
from core.models import CoreSettings
from core.utils import (
get_core_settings,
get_mesh_ws_url,
remove_mesh_agent,
token_is_valid,
)
from logs.models import AuditLog, DebugLog, PendingAction
from scripts.models import Script
from scripts.tasks import handle_bulk_command_task, handle_bulk_script_task
from tacticalrmm.utils import get_default_timezone, notify_error, reload_nats
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_TABLE_DEFER,
AGENT_STATUS_OFFLINE,
AGENT_STATUS_ONLINE,
AgentHistoryType,
AgentMonType,
AgentPlat,
CustomFieldModel,
DebugLogType,
EvtLogNames,
PAAction,
PAStatus,
)
from tacticalrmm.helpers import date_is_in_past, notify_error
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from tacticalrmm.utils import get_default_timezone, reload_nats
from winupdate.models import WinUpdate
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
from .models import Agent, AgentCustomField, Note, RecoveryAction, AgentHistory
from .models import Agent, AgentCustomField, AgentHistory, Note
from .permissions import (
AgentHistoryPerms,
AgentNotesPerms,
AgentPerms,
EvtLogPerms,
InstallAgentPerms,
RecoverAgentPerms,
AgentNotesPerms,
ManageProcPerms,
MeshPerms,
PingAgentPerms,
RebootAgentPerms,
RecoverAgentPerms,
RunBulkPerms,
RunScriptPerms,
SendCMDPerms,
PingAgentPerms,
UpdateAgentPerms,
)
from .serializers import (
AgentCustomFieldSerializer,
AgentHistorySerializer,
AgentHostnameSerializer,
AgentNoteSerializer,
AgentSerializer,
AgentTableSerializer,
AgentNoteSerializer,
)
from .tasks import run_script_email_results_task, send_agent_update_task
from .tasks import (
bulk_recover_agents_task,
run_script_email_results_task,
send_agent_update_task,
)
class GetAgents(APIView):
permission_classes = [IsAuthenticated, AgentPerms]
def get(self, request):
from checks.models import Check, CheckResult
monitoring_type_filter = Q()
client_site_filter = Q()
monitoring_type = request.query_params.get("monitoring_type", None)
if monitoring_type:
if monitoring_type in AgentMonType.values:
monitoring_type_filter = Q(monitoring_type=monitoring_type)
else:
return notify_error("monitoring type does not exist")
if "site" in request.query_params.keys():
filter = Q(site_id=request.query_params["site"])
client_site_filter = Q(site_id=request.query_params["site"])
elif "client" in request.query_params.keys():
filter = Q(site__client_id=request.query_params["client"])
else:
filter = Q()
client_site_filter = Q(site__client_id=request.query_params["client"])
# by default detail=true
if (
@@ -75,44 +113,52 @@ class GetAgents(APIView):
and request.query_params["detail"] == "true"
):
agents = (
Agent.objects.filter_by_role(request.user)
.select_related("site", "policy", "alert_template")
.prefetch_related("agentchecks")
.filter(filter)
.only(
"pk",
"hostname",
"agent_id",
"site",
Agent.objects.filter_by_role(request.user) # type: ignore
.filter(monitoring_type_filter)
.filter(client_site_filter)
.defer(*AGENT_TABLE_DEFER)
.select_related(
"site__server_policy",
"site__workstation_policy",
"site__client__server_policy",
"site__client__workstation_policy",
"policy",
"alert_template",
"monitoring_type",
"description",
"needs_reboot",
"overdue_text_alert",
"overdue_email_alert",
"overdue_time",
"offline_time",
"last_seen",
"boot_time",
"logged_in_username",
"last_logged_in_user",
"time_zone",
"maintenance_mode",
"pending_actions_count",
"has_patches_pending",
)
.prefetch_related(
Prefetch(
"agentchecks",
queryset=Check.objects.select_related("script"),
),
Prefetch(
"checkresults",
queryset=CheckResult.objects.select_related("assigned_check"),
),
)
.annotate(
pending_actions_count=Count(
"pendingactions",
filter=Q(pendingactions__status=PAStatus.PENDING),
)
)
.annotate(
has_patches_pending=Exists(
WinUpdate.objects.filter(
agent_id=OuterRef("pk"), action="approve", installed=False
)
)
)
)
ctx = {"default_tz": get_default_timezone()}
serializer = AgentTableSerializer(agents, many=True, context=ctx)
serializer = AgentTableSerializer(agents, many=True)
# if detail=false
else:
agents = (
Agent.objects.filter_by_role(request.user)
.select_related("site")
.filter(filter)
.only("agent_id", "hostname", "site")
Agent.objects.filter_by_role(request.user) # type: ignore
.defer(*AGENT_DEFER)
.select_related("site__client")
.filter(monitoring_type_filter)
.filter(client_site_filter)
)
serializer = AgentHostnameSerializer(agents, many=True)
@@ -122,20 +168,36 @@ class GetAgents(APIView):
class GetUpdateDeleteAgent(APIView):
permission_classes = [IsAuthenticated, AgentPerms]
class InputSerializer(serializers.ModelSerializer):
class Meta:
model = Agent
fields = [
"maintenance_mode", # TODO separate this
"policy", # TODO separate this
"monitoring_type",
"description",
"overdue_email_alert",
"overdue_text_alert",
"overdue_dashboard_alert",
"offline_time",
"overdue_time",
"check_interval",
"time_zone",
"site",
]
# get agent details
def get(self, request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
return Response(
AgentSerializer(agent, context={"default_tz": get_default_timezone()}).data
)
return Response(AgentSerializer(agent).data)
# edit agent
def put(self, request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
a_serializer = AgentSerializer(instance=agent, data=request.data, partial=True)
a_serializer.is_valid(raise_exception=True)
a_serializer.save()
s = self.InputSerializer(instance=agent, data=request.data, partial=True)
s.is_valid(raise_exception=True)
s.save()
if "winupdatepolicy" in request.data.keys():
policy = agent.winupdatepolicy.get() # type: ignore
@@ -150,13 +212,13 @@ class GetUpdateDeleteAgent(APIView):
for field in request.data["custom_fields"]:
custom_field = field
custom_field["agent"] = agent.id # type: ignore
custom_field["agent"] = agent.pk
if AgentCustomField.objects.filter(
field=field["field"], agent=agent.id # type: ignore
field=field["field"], agent=agent.pk
):
value = AgentCustomField.objects.get(
field=field["field"], agent=agent.id # type: ignore
field=field["field"], agent=agent.pk
)
serializer = AgentCustomFieldSerializer(
instance=value, data=custom_field
@@ -173,10 +235,25 @@ class GetUpdateDeleteAgent(APIView):
# uninstall agent
def delete(self, request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
asyncio.run(agent.nats_cmd({"func": "uninstall"}, wait=False))
code = "foo"
if agent.plat == AgentPlat.LINUX:
with open(settings.LINUX_AGENT_SCRIPT, "r") as f:
code = f.read()
asyncio.run(agent.nats_cmd({"func": "uninstall", "code": code}, wait=False))
name = agent.hostname
mesh_id = agent.mesh_node_id
agent.delete()
reload_nats()
try:
uri = get_mesh_ws_url()
asyncio.run(remove_mesh_agent(uri, mesh_id))
except Exception as e:
DebugLog.error(
message=f"Unable to remove agent {name} from meshcentral database: {str(e)}",
log_type=DebugLogType.AGENT_ISSUES,
)
return Response(f"{name} will now be uninstalled.")
@@ -185,6 +262,11 @@ class AgentProcesses(APIView):
# list agent processes
def get(self, request, agent_id):
if getattr(settings, "DEMO", False):
from tacticalrmm.demo_views import demo_get_procs
return demo_get_procs()
agent = get_object_or_404(Agent, agent_id=agent_id)
r = asyncio.run(agent.nats_cmd(data={"func": "procs"}, timeout=5))
if r == "timeout" or r == "natsdown":
@@ -212,19 +294,19 @@ class AgentMeshCentral(APIView):
# get mesh urls
def get(self, request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
core = CoreSettings.objects.first()
core = get_core_settings()
token = agent.get_login_token(
key=core.mesh_token,
user=f"user//{core.mesh_username.lower()}", # type:ignore
)
if not core.mesh_disable_auto_login:
token = get_login_token(
key=core.mesh_token, user=f"user//{core.mesh_username}"
)
token_param = f"login={token}&"
else:
token_param = ""
if token == "err":
return notify_error("Invalid mesh token")
control = f"{core.mesh_site}/?login={token}&gotonode={agent.mesh_node_id}&viewmode=11&hide=31" # type:ignore
terminal = f"{core.mesh_site}/?login={token}&gotonode={agent.mesh_node_id}&viewmode=12&hide=31" # type:ignore
file = f"{core.mesh_site}/?login={token}&gotonode={agent.mesh_node_id}&viewmode=13&hide=31" # type:ignore
control = f"{core.mesh_site}/?{token_param}gotonode={agent.mesh_node_id}&viewmode=11&hide=31"
terminal = f"{core.mesh_site}/?{token_param}gotonode={agent.mesh_node_id}&viewmode=12&hide=31"
file = f"{core.mesh_site}/?{token_param}gotonode={agent.mesh_node_id}&viewmode=13&hide=31"
AuditLog.audit_mesh_session(
username=request.user.username,
@@ -258,9 +340,9 @@ class AgentMeshCentral(APIView):
@permission_classes([IsAuthenticated, AgentPerms])
def get_agent_versions(request):
agents = (
Agent.objects.filter_by_role(request.user)
.prefetch_related("site")
.only("pk", "hostname")
Agent.objects.defer(*AGENT_DEFER)
.filter_by_role(request.user) # type: ignore
.select_related("site__client")
)
return Response(
{
@@ -274,7 +356,7 @@ def get_agent_versions(request):
@permission_classes([IsAuthenticated, UpdateAgentPerms])
def update_agents(request):
q = (
Agent.objects.filter_by_role(request.user)
Agent.objects.filter_by_role(request.user) # type: ignore
.filter(agent_id__in=request.data["agent_ids"])
.only("agent_id", "version")
)
@@ -283,7 +365,9 @@ def update_agents(request):
for i in q
if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER)
]
send_agent_update_task.delay(agent_ids=agent_ids)
token, _ = token_is_valid()
send_agent_update_task.delay(agent_ids=agent_ids, token=token, force=False)
return Response("ok")
@@ -291,18 +375,18 @@ def update_agents(request):
@permission_classes([IsAuthenticated, PingAgentPerms])
def ping(request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
status = "offline"
status = AGENT_STATUS_OFFLINE
attempts = 0
while 1:
r = asyncio.run(agent.nats_cmd({"func": "ping"}, timeout=2))
if r == "pong":
status = "online"
status = AGENT_STATUS_ONLINE
break
else:
attempts += 1
time.sleep(1)
time.sleep(0.5)
if attempts >= 5:
if attempts >= 3:
break
return Response({"name": agent.hostname, "status": status})
@@ -311,8 +395,13 @@ def ping(request, agent_id):
@api_view(["GET"])
@permission_classes([IsAuthenticated, EvtLogPerms])
def get_event_log(request, agent_id, logtype, days):
if getattr(settings, "DEMO", False):
from tacticalrmm.demo_views import demo_get_eventlog
return demo_get_eventlog()
agent = get_object_or_404(Agent, agent_id=agent_id)
timeout = 180 if logtype == "Security" else 30
timeout = 180 if logtype == EvtLogNames.SECURITY else 30
data = {
"func": "eventlog",
@@ -334,23 +423,28 @@ def get_event_log(request, agent_id, logtype, days):
def send_raw_cmd(request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
timeout = int(request.data["timeout"])
if request.data["shell"] == "custom" and request.data["custom_shell"]:
shell = request.data["custom_shell"]
else:
shell = request.data["shell"]
data = {
"func": "rawcmd",
"timeout": timeout,
"payload": {
"command": request.data["cmd"],
"shell": request.data["shell"],
"shell": shell,
},
"run_as_user": request.data["run_as_user"],
}
if pyver.parse(agent.version) >= pyver.parse("1.6.0"):
hist = AgentHistory.objects.create(
agent=agent,
type="cmd_run",
command=request.data["cmd"],
username=request.user.username[:50],
)
data["id"] = hist.pk
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.CMD_RUN,
command=request.data["cmd"],
username=request.user.username[:50],
)
data["id"] = hist.pk
r = asyncio.run(agent.nats_cmd(data, timeout=timeout + 2))
@@ -361,7 +455,7 @@ def send_raw_cmd(request, agent_id):
username=request.user.username,
agent=agent,
cmd=request.data["cmd"],
shell=request.data["shell"],
shell=shell,
debug_info={"ip": request._client_ip},
)
@@ -382,28 +476,43 @@ class Reboot(APIView):
# reboot later
def patch(self, request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
if agent.is_posix:
return notify_error(f"Not currently implemented for {agent.plat}")
try:
obj = dt.datetime.strptime(request.data["datetime"], "%Y-%m-%d %H:%M")
obj = dt.datetime.strptime(request.data["datetime"], "%Y-%m-%dT%H:%M")
except Exception:
return notify_error("Invalid date")
if date_is_in_past(datetime_obj=obj, agent_tz=agent.timezone):
return notify_error("Date cannot be set in the past")
task_name = "TacticalRMM_SchedReboot_" + "".join(
random.choice(string.ascii_letters) for _ in range(10)
)
expire_date = obj + djangotime.timedelta(minutes=5)
nats_data = {
"func": "schedtask",
"schedtaskpayload": {
"type": "schedreboot",
"deleteafter": True,
"trigger": "once",
"enabled": True,
"delete_expired_task_after": True,
"start_when_available": False,
"multiple_instances": 2,
"trigger": "runonce",
"name": task_name,
"year": int(dt.datetime.strftime(obj, "%Y")),
"month": dt.datetime.strftime(obj, "%B"),
"day": int(dt.datetime.strftime(obj, "%d")),
"hour": int(dt.datetime.strftime(obj, "%H")),
"min": int(dt.datetime.strftime(obj, "%M")),
"start_year": int(dt.datetime.strftime(obj, "%Y")),
"start_month": int(dt.datetime.strftime(obj, "%-m")),
"start_day": int(dt.datetime.strftime(obj, "%-d")),
"start_hour": int(dt.datetime.strftime(obj, "%-H")),
"start_min": int(dt.datetime.strftime(obj, "%-M")),
"expire_year": int(expire_date.strftime("%Y")),
"expire_month": int(expire_date.strftime("%-m")),
"expire_day": int(expire_date.strftime("%-d")),
"expire_hour": int(expire_date.strftime("%-H")),
"expire_min": int(expire_date.strftime("%-M")),
},
}
@@ -413,7 +522,7 @@ class Reboot(APIView):
details = {"taskname": task_name, "time": str(obj)}
PendingAction.objects.create(
agent=agent, action_type="schedreboot", details=details
agent=agent, action_type=PAAction.SCHED_REBOOT, details=details
)
nice_time = dt.datetime.strftime(obj, "%B %d, %Y at %I:%M %p")
return Response(
@@ -425,38 +534,24 @@ class Reboot(APIView):
@permission_classes([IsAuthenticated, InstallAgentPerms])
def install_agent(request):
from knox.models import AuthToken
from accounts.models import User
from agents.utils import get_winagent_url
from accounts.models import User
from agents.utils import get_agent_url
from core.utils import token_is_valid
client_id = request.data["client"]
site_id = request.data["site"]
version = settings.LATEST_AGENT_VER
arch = request.data["arch"]
goarch = request.data["goarch"]
plat = request.data["plat"]
if not _has_perm_on_site(request.user, site_id):
raise PermissionDenied()
# response type is blob so we have to use
# status codes and render error message on the frontend
if arch == "64" and not os.path.exists(
os.path.join(settings.EXE_DIR, "meshagent.exe")
):
return notify_error(
"Missing 64 bit meshagent.exe. Upload it from Settings > Global Settings > MeshCentral"
)
codesign_token, is_valid = token_is_valid()
if arch == "32" and not os.path.exists(
os.path.join(settings.EXE_DIR, "meshagent-x86.exe")
):
return notify_error(
"Missing 32 bit meshagent.exe. Upload it from Settings > Global Settings > MeshCentral"
)
inno = (
f"winagent-v{version}.exe" if arch == "64" else f"winagent-v{version}-x86.exe"
)
download_url = get_winagent_url(arch)
inno = f"tacticalagent-v{version}-{plat}-{goarch}.exe"
download_url = get_agent_url(goarch=goarch, plat=plat, token=codesign_token)
installer_user = User.objects.filter(is_installer_user=True).first()
@@ -474,12 +569,34 @@ def install_agent(request):
rdp=request.data["rdp"],
ping=request.data["ping"],
power=request.data["power"],
arch=arch,
goarch=goarch,
token=token,
api=request.data["api"],
file_name=request.data["fileName"],
)
elif request.data["installMethod"] == "bash":
# TODO
# linux agents are in beta for now, only available for sponsors for testing
# remove this after it's out of beta
if not is_valid:
return notify_error(
"Missing code signing token, or token is no longer valid. Please read the docs for more info."
)
from agents.utils import generate_linux_install
return generate_linux_install(
client=str(client_id),
site=str(site_id),
agent_type=request.data["agenttype"],
arch=goarch,
token=token,
api=request.data["api"],
download_url=download_url,
)
elif request.data["installMethod"] == "manual":
cmd = [
inno,
@@ -569,40 +686,23 @@ def install_agent(request):
@api_view(["POST"])
@permission_classes([IsAuthenticated, RecoverAgentPerms])
def recover(request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
def recover(request, agent_id: str) -> Response:
agent: Agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=agent_id
)
mode = request.data["mode"]
# attempt a realtime recovery, otherwise fall back to old recovery method
if mode == "tacagent" or mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
r = asyncio.run(agent.nats_cmd(data, timeout=10))
if r == "ok":
return Response("Successfully completed recovery")
if agent.recoveryactions.filter(last_run=None).exists(): # type: ignore
return notify_error(
"A recovery action is currently pending. Please wait for the next agent check-in."
)
if mode == "command" and not request.data["cmd"]:
return notify_error("Command is required")
# if we've made it this far and realtime recovery didn't work,
# tacagent service is the fallback recovery so we obv can't use that to recover itself if it's down
if mode == "tacagent":
return notify_error(
"Requires RPC service to be functional. Please recover that first"
)
uri = get_mesh_ws_url()
agent.recover(mode, uri, wait=False)
return Response("Recovery will be attempted shortly")
# we should only get here if all other methods fail
RecoveryAction(
agent=agent,
mode=mode,
command=request.data["cmd"] if mode == "command" else None,
).save()
elif mode == "mesh":
r, err = agent.recover(mode, "")
if err:
return notify_error(f"Unable to complete recovery: {r}")
return Response("Recovery will be attempted on the agent's next check-in")
return Response("Successfully completed recovery")
@api_view(["POST"])
@@ -612,6 +712,7 @@ def run_script(request, agent_id):
script = get_object_or_404(Script, pk=request.data["script"])
output = request.data["output"]
args = request.data["args"]
run_as_user: bool = request.data["run_as_user"]
req_timeout = int(request.data["timeout"]) + 3
AuditLog.audit_script_run(
@@ -621,15 +722,13 @@ def run_script(request, agent_id):
debug_info={"ip": request._client_ip},
)
history_pk = 0
if pyver.parse(agent.version) >= pyver.parse("1.6.0"):
hist = AgentHistory.objects.create(
agent=agent,
type="script_run",
script=script,
username=request.user.username[:50],
)
history_pk = hist.pk
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=script,
username=request.user.username[:50],
)
history_pk = hist.pk
if output == "wait":
r = agent.run_script(
@@ -638,6 +737,7 @@ def run_script(request, agent_id):
timeout=req_timeout,
wait=True,
history_pk=history_pk,
run_as_user=run_as_user,
)
return Response(r)
@@ -651,6 +751,7 @@ def run_script(request, agent_id):
nats_timeout=req_timeout,
emails=emails,
args=args,
run_as_user=run_as_user,
)
elif output == "collector":
from core.models import CustomField
@@ -661,15 +762,16 @@ def run_script(request, agent_id):
timeout=req_timeout,
wait=True,
history_pk=history_pk,
run_as_user=run_as_user,
)
custom_field = CustomField.objects.get(pk=request.data["custom_field"])
if custom_field.model == "agent":
if custom_field.model == CustomFieldModel.AGENT:
field = custom_field.get_or_create_field_value(agent)
elif custom_field.model == "client":
elif custom_field.model == CustomFieldModel.CLIENT:
field = custom_field.get_or_create_field_value(agent.client)
elif custom_field.model == "site":
elif custom_field.model == CustomFieldModel.SITE:
field = custom_field.get_or_create_field_value(agent.site)
else:
return notify_error("Custom Field was invalid")
@@ -689,39 +791,23 @@ def run_script(request, agent_id):
timeout=req_timeout,
wait=True,
history_pk=history_pk,
run_as_user=run_as_user,
)
Note.objects.create(agent=agent, user=request.user, note=r)
return Response(r)
else:
agent.run_script(
scriptpk=script.pk, args=args, timeout=req_timeout, history_pk=history_pk
scriptpk=script.pk,
args=args,
timeout=req_timeout,
history_pk=history_pk,
run_as_user=run_as_user,
)
return Response(f"{script.name} will now be run on {agent.hostname}")
@api_view(["POST"])
def get_mesh_exe(request, arch):
filename = "meshagent.exe" if arch == "64" else "meshagent-x86.exe"
mesh_exe = os.path.join(settings.EXE_DIR, filename)
if not os.path.exists(mesh_exe):
return notify_error(f"File {filename} has not been uploaded.")
if settings.DEBUG:
with open(mesh_exe, "rb") as f:
response = HttpResponse(
f.read(), content_type="application/vnd.microsoft.portable-executable"
)
response["Content-Disposition"] = f"inline; filename={filename}"
return response
else:
response = HttpResponse()
response["Content-Disposition"] = f"attachment; filename={filename}"
response["X-Accel-Redirect"] = f"/private/exe/{filename}"
return response
class GetAddNotes(APIView):
permission_classes = [IsAuthenticated, AgentNotesPerms]
@@ -730,7 +816,7 @@ class GetAddNotes(APIView):
agent = get_object_or_404(Agent, agent_id=agent_id)
notes = Note.objects.filter(agent=agent)
else:
notes = Note.objects.filter_by_role(request.user)
notes = Note.objects.filter_by_role(request.user) # type: ignore
return Response(AgentNoteSerializer(notes, many=True).data)
@@ -739,6 +825,9 @@ class GetAddNotes(APIView):
if not _has_perm_on_agent(request.user, agent.agent_id):
raise PermissionDenied()
if "note" not in request.data.keys():
return notify_error("Cannot add an empty note")
data = {
"note": request.data["note"],
"agent": agent.pk,
@@ -792,32 +881,37 @@ def bulk(request):
if request.data["target"] == "client":
if not _has_perm_on_client(request.user, request.data["client"]):
raise PermissionDenied()
q = Agent.objects.filter_by_role(request.user).filter(
q = Agent.objects.filter_by_role(request.user).filter( # type: ignore
site__client_id=request.data["client"]
)
elif request.data["target"] == "site":
if not _has_perm_on_site(request.user, request.data["site"]):
raise PermissionDenied()
q = Agent.objects.filter_by_role(request.user).filter(
q = Agent.objects.filter_by_role(request.user).filter( # type: ignore
site_id=request.data["site"]
)
elif request.data["target"] == "agents":
q = Agent.objects.filter_by_role(request.user).filter(
q = Agent.objects.filter_by_role(request.user).filter( # type: ignore
agent_id__in=request.data["agents"]
)
elif request.data["target"] == "all":
q = Agent.objects.filter_by_role(request.user).only("pk", "monitoring_type")
q = Agent.objects.filter_by_role(request.user).only("pk", "monitoring_type") # type: ignore
else:
return notify_error("Something went wrong")
if request.data["monType"] == "servers":
q = q.filter(monitoring_type="server")
q = q.filter(monitoring_type=AgentMonType.SERVER)
elif request.data["monType"] == "workstations":
q = q.filter(monitoring_type="workstation")
q = q.filter(monitoring_type=AgentMonType.WORKSTATION)
if request.data["osType"] == AgentPlat.WINDOWS:
q = q.filter(plat=AgentPlat.WINDOWS)
elif request.data["osType"] == AgentPlat.LINUX:
q = q.filter(plat=AgentPlat.LINUX)
agents: list[int] = [agent.pk for agent in q]
@@ -832,13 +926,18 @@ def bulk(request):
)
if request.data["mode"] == "command":
if request.data["shell"] == "custom" and request.data["custom_shell"]:
shell = request.data["custom_shell"]
else:
shell = request.data["shell"]
handle_bulk_command_task.delay(
agents,
request.data["cmd"],
request.data["shell"],
shell,
request.data["timeout"],
request.user.username[:50],
run_on_offline=request.data["offlineAgents"],
request.data["run_as_user"],
)
return Response(f"Command will now be run on {len(agents)} agents")
@@ -850,6 +949,7 @@ def bulk(request):
request.data["args"],
request.data["timeout"],
request.user.username[:50],
request.data["run_as_user"],
)
return Response(f"{script.name} will now be run on {len(agents)} agents")
@@ -876,7 +976,7 @@ def agent_maintenance(request):
raise PermissionDenied()
count = (
Agent.objects.filter_by_role(request.user)
Agent.objects.filter_by_role(request.user) # type: ignore
.filter(site__client_id=request.data["id"])
.update(maintenance_mode=request.data["action"])
)
@@ -886,7 +986,7 @@ def agent_maintenance(request):
raise PermissionDenied()
count = (
Agent.objects.filter_by_role(request.user)
Agent.objects.filter_by_role(request.user) # type: ignore
.filter(site_id=request.data["id"])
.update(maintenance_mode=request.data["action"])
)
@@ -903,6 +1003,13 @@ def agent_maintenance(request):
)
@api_view(["GET"])
@permission_classes([IsAuthenticated, RecoverAgentPerms])
def bulk_agent_recovery(request):
bulk_recover_agents_task.delay()
return Response("Agents will now be recovered")
class WMI(APIView):
permission_classes = [IsAuthenticated, AgentPerms]
@@ -922,6 +1029,6 @@ class AgentHistoryView(APIView):
agent = get_object_or_404(Agent, agent_id=agent_id)
history = AgentHistory.objects.filter(agent=agent)
else:
history = AgentHistory.objects.filter_by_role(request.user)
history = AgentHistory.objects.filter_by_role(request.user) # type: ignore
ctx = {"default_tz": get_default_timezone()}
return Response(AgentHistorySerializer(history, many=True, context=ctx).data)

View File

@@ -0,0 +1,24 @@
# Generated by Django 4.0.3 on 2022-04-07 17:28
import django.db.models.deletion
from django.db import migrations, models
def delete_alerts_without_agent(apps, schema):
Alert = apps.get_model("alerts", "Alert")
Alert.objects.filter(agent=None).delete()
class Migration(migrations.Migration):
dependencies = [
("agents", "0047_alter_agent_plat_alter_agent_site"),
("alerts", "0010_auto_20210917_1954"),
]
operations = [
migrations.RunPython(
delete_alerts_without_agent, reverse_code=migrations.RunPython.noop
),
]

View File

@@ -0,0 +1,23 @@
# Generated by Django 4.0.5 on 2022-06-29 07:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alerts', '0011_alter_alert_agent'),
]
operations = [
migrations.AlterField(
model_name='alert',
name='action_retcode',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='alert',
name='resolved_action_retcode',
field=models.BigIntegerField(blank=True, null=True),
),
]

View File

@@ -1,7 +1,7 @@
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Union
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union, cast
from django.contrib.postgres.fields import ArrayField
from django.db import models
@@ -9,26 +9,20 @@ from django.db.models.fields import BooleanField, PositiveIntegerField
from django.utils import timezone as djangotime
from logs.models import BaseAuditModel, DebugLog
from tacticalrmm.constants import (
AgentMonType,
AlertSeverity,
AlertType,
CheckType,
DebugLogType,
)
from tacticalrmm.models import PermissionQuerySet
if TYPE_CHECKING:
from agents.models import Agent
from autotasks.models import AutomatedTask
from checks.models import Check
SEVERITY_CHOICES = [
("info", "Informational"),
("warning", "Warning"),
("error", "Error"),
]
ALERT_TYPE_CHOICES = [
("availability", "Availability"),
("check", "Check"),
("task", "Task"),
("custom", "Custom"),
]
from autotasks.models import AutomatedTask, TaskResult
from checks.models import Check, CheckResult
from clients.models import Client, Site
class Alert(models.Model):
@@ -56,7 +50,7 @@ class Alert(models.Model):
blank=True,
)
alert_type = models.CharField(
max_length=20, choices=ALERT_TYPE_CHOICES, default="availability"
max_length=20, choices=AlertType.choices, default=AlertType.AVAILABILITY
)
message = models.TextField(null=True, blank=True)
alert_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
@@ -64,7 +58,9 @@ class Alert(models.Model):
snooze_until = models.DateTimeField(null=True, blank=True)
resolved = models.BooleanField(default=False)
resolved_on = models.DateTimeField(null=True, blank=True)
severity = models.CharField(max_length=30, choices=SEVERITY_CHOICES, default="info")
severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_sent = models.DateTimeField(null=True, blank=True)
resolved_email_sent = models.DateTimeField(null=True, blank=True)
sms_sent = models.DateTimeField(null=True, blank=True)
@@ -73,72 +69,208 @@ class Alert(models.Model):
action_run = models.DateTimeField(null=True, blank=True)
action_stdout = models.TextField(null=True, blank=True)
action_stderr = models.TextField(null=True, blank=True)
action_retcode = models.IntegerField(null=True, blank=True)
action_retcode = models.BigIntegerField(null=True, blank=True)
action_execution_time = models.CharField(max_length=100, null=True, blank=True)
resolved_action_run = models.DateTimeField(null=True, blank=True)
resolved_action_stdout = models.TextField(null=True, blank=True)
resolved_action_stderr = models.TextField(null=True, blank=True)
resolved_action_retcode = models.IntegerField(null=True, blank=True)
resolved_action_retcode = models.BigIntegerField(null=True, blank=True)
resolved_action_execution_time = models.CharField(
max_length=100, null=True, blank=True
)
def __str__(self):
return self.message
def __str__(self) -> str:
return f"{self.alert_type} - {self.message}"
def resolve(self):
@property
def assigned_agent(self) -> "Optional[Agent]":
return self.agent
@property
def site(self) -> "Site":
return self.agent.site
@property
def client(self) -> "Client":
return self.agent.client
def resolve(self) -> None:
self.resolved = True
self.resolved_on = djangotime.now()
self.snoozed = False
self.snooze_until = None
self.save()
self.save(update_fields=["resolved", "resolved_on", "snoozed", "snooze_until"])
@classmethod
def create_or_return_availability_alert(cls, agent):
if not cls.objects.filter(agent=agent, resolved=False).exists():
return cls.objects.create(
agent=agent,
alert_type="availability",
severity="error",
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
if not cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
agent=agent,
alert_type=AlertType.AVAILABILITY,
severity=AlertSeverity.ERROR,
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
),
)
else:
return cls.objects.get(agent=agent, resolved=False)
try:
return cast(
Alert,
cls.objects.get(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
@classmethod
def create_or_return_check_alert(cls, check):
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
if not cls.objects.filter(assigned_check=check, resolved=False).exists():
return cls.objects.create(
assigned_check=check,
alert_type="check",
severity=check.alert_severity,
message=f"{check.agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
# need to pass agent if the check is a policy
if not cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_check=check,
agent=agent,
alert_type=AlertType.CHECK,
severity=check.alert_severity
if check.check_type
not in [
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
]
else alert_severity,
message=f"{agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
),
)
else:
return cls.objects.get(assigned_check=check, resolved=False)
try:
return cast(
Alert,
cls.objects.get(
assigned_check=check,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
@classmethod
def create_or_return_task_alert(cls, task):
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
if not cls.objects.filter(assigned_task=task, resolved=False).exists():
return cls.objects.create(
assigned_task=task,
alert_type="task",
severity=task.alert_severity,
message=f"{task.agent.hostname} has task: {task.name} that failed.",
hidden=True,
if not cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_task=task,
agent=agent,
alert_type=AlertType.TASK,
severity=task.alert_severity,
message=f"{agent.hostname} has task: {task.name} that failed.",
hidden=True,
),
)
else:
return cls.objects.get(assigned_task=task, resolved=False)
try:
return cast(
Alert,
cls.objects.get(
assigned_task=task,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
@classmethod
def handle_alert_failure(cls, instance: Union[Agent, AutomatedTask, Check]) -> None:
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent
from autotasks.models import AutomatedTask
from checks.models import Check
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
dashboard_severities = None
@@ -150,6 +282,7 @@ class Alert(models.Model):
alert_interval = None
email_task = None
text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
@@ -163,30 +296,21 @@ class Alert(models.Model):
dashboard_alert = instance.overdue_dashboard_alert
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
alert_severity = "error"
alert_severity = AlertSeverity.ERROR
agent = instance
dashboard_severities = [AlertSeverity.ERROR]
email_severities = [AlertSeverity.ERROR]
text_severities = [AlertSeverity.ERROR]
# set alert_template settings
if alert_template:
dashboard_severities = ["error"]
email_severities = ["error"]
text_severities = ["error"]
always_dashboard = alert_template.agent_always_alert
always_email = alert_template.agent_always_email
always_text = alert_template.agent_always_text
alert_interval = alert_template.agent_periodic_alert_days
run_script_action = alert_template.agent_script_actions
if instance.should_create_alert(alert_template):
alert = cls.create_or_return_availability_alert(instance)
else:
# check if there is an alert that exists
if cls.objects.filter(agent=instance, resolved=False).exists():
alert = cls.objects.get(agent=instance, resolved=False)
else:
alert = None
elif isinstance(instance, Check):
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_check_email_alert_task,
handle_check_sms_alert_task,
@@ -195,75 +319,98 @@ class Alert(models.Model):
email_task = handle_check_email_alert_task
text_task = handle_check_sms_alert_task
email_alert = instance.email_alert
text_alert = instance.text_alert
dashboard_alert = instance.dashboard_alert
email_alert = instance.assigned_check.email_alert
text_alert = instance.assigned_check.text_alert
dashboard_alert = instance.assigned_check.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = instance.alert_severity
alert_severity = (
instance.assigned_check.alert_severity
if instance.assigned_check.check_type
not in [
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
]
else instance.alert_severity
)
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = alert_template.check_dashboard_alert_severity
email_severities = alert_template.check_email_alert_severity
text_severities = alert_template.check_text_alert_severity
dashboard_severities = (
alert_template.check_dashboard_alert_severity
if alert_template.check_dashboard_alert_severity
else [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
AlertSeverity.INFO,
]
)
email_severities = (
alert_template.check_email_alert_severity
if alert_template.check_email_alert_severity
else [AlertSeverity.ERROR, AlertSeverity.WARNING]
)
text_severities = (
alert_template.check_text_alert_severity
if alert_template.check_text_alert_severity
else [AlertSeverity.ERROR, AlertSeverity.WARNING]
)
always_dashboard = alert_template.check_always_alert
always_email = alert_template.check_always_email
always_text = alert_template.check_always_text
alert_interval = alert_template.check_periodic_alert_days
run_script_action = alert_template.check_script_actions
if instance.should_create_alert(alert_template):
alert = cls.create_or_return_check_alert(instance)
else:
# check if there is an alert that exists
if cls.objects.filter(assigned_check=instance, resolved=False).exists():
alert = cls.objects.get(assigned_check=instance, resolved=False)
else:
alert = None
elif isinstance(instance, AutomatedTask):
elif isinstance(instance, TaskResult):
from autotasks.tasks import handle_task_email_alert, handle_task_sms_alert
email_task = handle_task_email_alert
text_task = handle_task_sms_alert
email_alert = instance.email_alert
text_alert = instance.text_alert
dashboard_alert = instance.dashboard_alert
email_alert = instance.task.email_alert
text_alert = instance.task.text_alert
dashboard_alert = instance.task.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = instance.alert_severity
alert_severity = instance.task.alert_severity
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = alert_template.task_dashboard_alert_severity
email_severities = alert_template.task_email_alert_severity
text_severities = alert_template.task_text_alert_severity
dashboard_severities = (
alert_template.task_dashboard_alert_severity
if alert_template.task_dashboard_alert_severity
else [AlertSeverity.ERROR, AlertSeverity.WARNING]
)
email_severities = (
alert_template.task_email_alert_severity
if alert_template.task_email_alert_severity
else [AlertSeverity.ERROR, AlertSeverity.WARNING]
)
text_severities = (
alert_template.task_text_alert_severity
if alert_template.task_text_alert_severity
else [AlertSeverity.ERROR, AlertSeverity.WARNING]
)
always_dashboard = alert_template.task_always_alert
always_email = alert_template.task_always_email
always_text = alert_template.task_always_text
alert_interval = alert_template.task_periodic_alert_days
run_script_action = alert_template.task_script_actions
if instance.should_create_alert(alert_template):
alert = cls.create_or_return_task_alert(instance)
else:
# check if there is an alert that exists
if cls.objects.filter(assigned_task=instance, resolved=False).exists():
alert = cls.objects.get(assigned_task=instance, resolved=False)
else:
alert = None
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if maintenance_mode or not alert:
if not alert or maintenance_mode:
return
# check if alert severity changed on check and update the alert
# check if alert severity changed and update the alert
if alert_severity != alert.severity:
alert.severity = alert_severity
alert.save(update_fields=["severity"])
@@ -272,19 +419,25 @@ class Alert(models.Model):
if dashboard_alert or always_dashboard:
# check if alert template is set and specific severities are configured
if alert_template and alert.severity not in dashboard_severities: # type: ignore
pass
else:
if (
not alert_template
or alert_template
and dashboard_severities
and alert.severity in dashboard_severities
):
alert.hidden = False
alert.save()
alert.save(update_fields=["hidden"])
# send email if enabled
if email_alert or always_email:
# check if alert template is set and specific severities are configured
if alert_template and alert.severity not in email_severities: # type: ignore
pass
else:
if (
not alert_template
or alert_template
and email_severities
and alert.severity in email_severities
):
email_task.delay(
pk=alert.pk,
alert_interval=alert_interval,
@@ -294,13 +447,21 @@ class Alert(models.Model):
if text_alert or always_text:
# check if alert template is set and specific severities are configured
if alert_template and alert.severity not in text_severities: # type: ignore
pass
else:
if (
not alert_template
or alert_template
and text_severities
and alert.severity in text_severities
):
text_task.delay(pk=alert.pk, alert_interval=alert_interval)
# check if any scripts should be run
if alert_template and alert_template.action and run_script_action and not alert.action_run: # type: ignore
if (
alert_template
and alert_template.action
and run_script_action
and not alert.action_run
):
r = agent.run_script(
scriptpk=alert_template.action.pk,
args=alert.parse_script_args(alert_template.action_args),
@@ -308,10 +469,11 @@ class Alert(models.Model):
wait=True,
full=True,
run_on_any=True,
run_as_user=False,
)
# command was successful
if type(r) == dict:
if isinstance(r, dict):
alert.action_retcode = r["retcode"]
alert.action_stdout = r["stdout"]
alert.action_stderr = r["stderr"]
@@ -321,21 +483,24 @@ class Alert(models.Model):
else:
DebugLog.error(
agent=agent,
log_type="scripting",
log_type=DebugLogType.SCRIPTING,
message=f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) failure alert",
)
@classmethod
def handle_alert_resolve(cls, instance: Union[Agent, AutomatedTask, Check]) -> None:
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent
from autotasks.models import AutomatedTask
from checks.models import Check
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
email_on_resolved = False
text_on_resolved = False
resolved_email_task = None
resolved_text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
@@ -345,7 +510,6 @@ class Alert(models.Model):
resolved_text_task = agent_recovery_sms_task
alert_template = instance.alert_template
alert = cls.objects.get(agent=instance, resolved=False)
maintenance_mode = instance.maintenance_mode
agent = instance
@@ -354,7 +518,12 @@ class Alert(models.Model):
text_on_resolved = alert_template.agent_text_on_resolved
run_script_action = alert_template.agent_script_actions
elif isinstance(instance, Check):
if agent.overdue_email_alert:
email_on_resolved = True
if agent.overdue_text_alert:
text_on_resolved = True
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_resolved_check_email_alert_task,
handle_resolved_check_sms_alert_task,
@@ -364,7 +533,6 @@ class Alert(models.Model):
resolved_text_task = handle_resolved_check_sms_alert_task
alert_template = instance.agent.alert_template
alert = cls.objects.get(assigned_check=instance, resolved=False)
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
@@ -373,7 +541,7 @@ class Alert(models.Model):
text_on_resolved = alert_template.check_text_on_resolved
run_script_action = alert_template.check_script_actions
elif isinstance(instance, AutomatedTask):
elif isinstance(instance, TaskResult):
from autotasks.tasks import (
handle_resolved_task_email_alert,
handle_resolved_task_sms_alert,
@@ -383,7 +551,6 @@ class Alert(models.Model):
resolved_text_task = handle_resolved_task_sms_alert
alert_template = instance.agent.alert_template
alert = cls.objects.get(assigned_task=instance, resolved=False)
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
@@ -395,8 +562,10 @@ class Alert(models.Model):
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if maintenance_mode:
if not alert or maintenance_mode:
return
alert.resolve()
@@ -413,7 +582,7 @@ class Alert(models.Model):
if (
alert_template
and alert_template.resolved_action
and run_script_action # type: ignore
and run_script_action
and not alert.resolved_action_run
):
r = agent.run_script(
@@ -423,10 +592,11 @@ class Alert(models.Model):
wait=True,
full=True,
run_on_any=True,
run_as_user=False,
)
# command was successful
if type(r) == dict:
if isinstance(r, dict):
alert.resolved_action_retcode = r["retcode"]
alert.resolved_action_stdout = r["stdout"]
alert.resolved_action_stderr = r["stderr"]
@@ -438,11 +608,11 @@ class Alert(models.Model):
else:
DebugLog.error(
agent=agent,
log_type="scripting",
log_type=DebugLogType.SCRIPTING,
message=f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) resolved alert",
)
def parse_script_args(self, args: list[str]):
def parse_script_args(self, args: List[str]) -> List[str]:
if not args:
return []
@@ -456,15 +626,16 @@ class Alert(models.Model):
if match:
name = match.group(1)
if hasattr(self, name):
# check if attr exists and isn't a function
if hasattr(self, name) and not callable(getattr(self, name)):
value = f"'{getattr(self, name)}'"
else:
continue
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg)) # type: ignore
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
except Exception as e:
DebugLog.error(log_type="scripting", message=e)
DebugLog.error(log_type=DebugLogType.SCRIPTING, message=str(e))
continue
else:
@@ -534,17 +705,17 @@ class AlertTemplate(BaseAuditModel):
# check alert settings
check_email_alert_severity = ArrayField(
models.CharField(max_length=25, blank=True, choices=SEVERITY_CHOICES),
models.CharField(max_length=25, blank=True, choices=AlertSeverity.choices),
blank=True,
default=list,
)
check_text_alert_severity = ArrayField(
models.CharField(max_length=25, blank=True, choices=SEVERITY_CHOICES),
models.CharField(max_length=25, blank=True, choices=AlertSeverity.choices),
blank=True,
default=list,
)
check_dashboard_alert_severity = ArrayField(
models.CharField(max_length=25, blank=True, choices=SEVERITY_CHOICES),
models.CharField(max_length=25, blank=True, choices=AlertSeverity.choices),
blank=True,
default=list,
)
@@ -558,17 +729,17 @@ class AlertTemplate(BaseAuditModel):
# task alert settings
task_email_alert_severity = ArrayField(
models.CharField(max_length=25, blank=True, choices=SEVERITY_CHOICES),
models.CharField(max_length=25, blank=True, choices=AlertSeverity.choices),
blank=True,
default=list,
)
task_text_alert_severity = ArrayField(
models.CharField(max_length=25, blank=True, choices=SEVERITY_CHOICES),
models.CharField(max_length=25, blank=True, choices=AlertSeverity.choices),
blank=True,
default=list,
)
task_dashboard_alert_severity = ArrayField(
models.CharField(max_length=25, blank=True, choices=SEVERITY_CHOICES),
models.CharField(max_length=25, blank=True, choices=AlertSeverity.choices),
blank=True,
default=list,
)
@@ -594,11 +765,22 @@ class AlertTemplate(BaseAuditModel):
"agents.Agent", related_name="alert_exclusions", blank=True
)
def __str__(self):
def __str__(self) -> str:
return self.name
def is_agent_excluded(self, agent: "Agent") -> bool:
return (
agent in self.excluded_agents.all()
or agent.site in self.excluded_sites.all()
or agent.client in self.excluded_clients.all()
or agent.monitoring_type == AgentMonType.WORKSTATION
and self.exclude_workstations
or agent.monitoring_type == AgentMonType.SERVER
and self.exclude_servers
)
@staticmethod
def serialize(alert_template):
def serialize(alert_template: AlertTemplate) -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AlertTemplateAuditSerializer

View File

@@ -1,10 +1,15 @@
from typing import TYPE_CHECKING
from django.shortcuts import get_object_or_404
from rest_framework import permissions
from tacticalrmm.permissions import _has_perm, _has_perm_on_agent
if TYPE_CHECKING:
from accounts.models import User
def _has_perm_on_alert(user, id: int):
def _has_perm_on_alert(user: "User", id: int) -> bool:
from alerts.models import Alert
role = user.role
@@ -19,10 +24,6 @@ def _has_perm_on_alert(user, id: int):
if alert.agent:
agent_id = alert.agent.agent_id
elif alert.assigned_check:
agent_id = alert.assigned_check.agent.agent_id
elif alert.assigned_task:
agent_id = alert.assigned_task.agent.agent_id
else:
return True
@@ -30,7 +31,7 @@ def _has_perm_on_alert(user, id: int):
class AlertPerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
if r.method == "GET" or r.method == "PATCH":
if "pk" in view.kwargs.keys():
return _has_perm(r, "can_list_alerts") and _has_perm_on_alert(
@@ -48,7 +49,7 @@ class AlertPerms(permissions.BasePermission):
class AlertTemplatePerms(permissions.BasePermission):
def has_permission(self, r, view):
def has_permission(self, r, view) -> bool:
if r.method == "GET":
return _has_perm(r, "can_list_alerttemplates")
else:

View File

@@ -3,86 +3,17 @@ from rest_framework.serializers import ModelSerializer, ReadOnlyField
from automation.serializers import PolicySerializer
from clients.serializers import ClientMinimumSerializer, SiteMinimumSerializer
from tacticalrmm.utils import get_default_timezone
from .models import Alert, AlertTemplate
class AlertSerializer(ModelSerializer):
hostname = SerializerMethodField(read_only=True)
client = SerializerMethodField(read_only=True)
site = SerializerMethodField(read_only=True)
alert_time = SerializerMethodField(read_only=True)
resolve_on = SerializerMethodField(read_only=True)
snoozed_until = SerializerMethodField(read_only=True)
def get_hostname(self, instance):
if instance.alert_type == "availability":
return instance.agent.hostname if instance.agent else ""
elif instance.alert_type == "check":
return (
instance.assigned_check.agent.hostname
if instance.assigned_check
else ""
)
elif instance.alert_type == "task":
return (
instance.assigned_task.agent.hostname if instance.assigned_task else ""
)
else:
return ""
def get_client(self, instance):
if instance.alert_type == "availability":
return instance.agent.client.name if instance.agent else ""
elif instance.alert_type == "check":
return (
instance.assigned_check.agent.client.name
if instance.assigned_check
else ""
)
elif instance.alert_type == "task":
return (
instance.assigned_task.agent.client.name
if instance.assigned_task
else ""
)
else:
return ""
def get_site(self, instance):
if instance.alert_type == "availability":
return instance.agent.site.name if instance.agent else ""
elif instance.alert_type == "check":
return (
instance.assigned_check.agent.site.name
if instance.assigned_check
else ""
)
elif instance.alert_type == "task":
return (
instance.assigned_task.agent.site.name if instance.assigned_task else ""
)
else:
return ""
def get_alert_time(self, instance):
if instance.alert_time:
return instance.alert_time.astimezone(get_default_timezone()).timestamp()
else:
return None
def get_resolve_on(self, instance):
if instance.resolved_on:
return instance.resolved_on.astimezone(get_default_timezone()).timestamp()
else:
return None
def get_snoozed_until(self, instance):
if instance.snooze_until:
return instance.snooze_until.astimezone(get_default_timezone()).timestamp()
return None
hostname = ReadOnlyField(source="assigned_agent.hostname")
agent_id = ReadOnlyField(source="assigned_agent.agent_id")
client = ReadOnlyField(source="client.name")
site = ReadOnlyField(source="site.name")
alert_time = ReadOnlyField()
class Meta:
model = Alert
@@ -104,11 +35,11 @@ class AlertTemplateSerializer(ModelSerializer):
fields = "__all__"
def get_applied_count(self, instance):
count = 0
count += instance.policies.count()
count += instance.clients.count()
count += instance.sites.count()
return count
return (
instance.policies.count()
+ instance.clients.count()
+ instance.sites.count()
)
class AlertTemplateRelationSerializer(ModelSerializer):

View File

@@ -1,11 +1,13 @@
from django.utils import timezone as djangotime
from agents.models import Agent
from tacticalrmm.celery import app
from .models import Alert
@app.task
def unsnooze_alerts() -> str:
from .models import Alert
Alert.objects.filter(snoozed=True, snooze_until__lte=djangotime.now()).update(
snoozed=False, snooze_until=None
)
@@ -14,10 +16,10 @@ def unsnooze_alerts() -> str:
@app.task
def cache_agents_alert_template():
from agents.models import Agent
for agent in Agent.objects.only("pk"):
def cache_agents_alert_template() -> str:
for agent in Agent.objects.only(
"pk", "site", "policy", "alert_template"
).select_related("site", "policy", "alert_template"):
agent.set_alert_template()
return "ok"
@@ -25,8 +27,6 @@ def cache_agents_alert_template():
@app.task
def prune_resolved_alerts(older_than_days: int) -> str:
from .models import Alert
Alert.objects.filter(resolved=True).filter(
alert_time__lt=djangotime.now() - djangotime.timedelta(days=older_than_days)
).delete()

File diff suppressed because it is too large Load Diff

View File

@@ -7,7 +7,7 @@ from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from tacticalrmm.utils import notify_error
from tacticalrmm.helpers import notify_error
from .models import Alert, AlertTemplate
from .permissions import AlertPerms, AlertTemplatePerms
@@ -92,7 +92,7 @@ class GetAddAlerts(APIView):
)
alerts = (
Alert.objects.filter_by_role(request.user)
Alert.objects.filter_by_role(request.user) # type: ignore
.filter(clientFilter)
.filter(severityFilter)
.filter(resolvedFilter)
@@ -102,7 +102,7 @@ class GetAddAlerts(APIView):
return Response(AlertSerializer(alerts, many=True).data)
else:
alerts = Alert.objects.filter_by_role(request.user)
alerts = Alert.objects.filter_by_role(request.user) # type: ignore
return Response(AlertSerializer(alerts, many=True).data)
def post(self, request):

View File

@@ -1,342 +0,0 @@
import json
import os
from unittest.mock import patch
from django.conf import settings
from django.utils import timezone as djangotime
from model_bakery import baker
from autotasks.models import AutomatedTask
from tacticalrmm.test import TacticalTestCase
class TestAPIv3(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
self.agent = baker.make_recipe("agents.agent")
def test_get_checks(self):
url = f"/api/v3/{self.agent.agent_id}/checkrunner/"
# add a check
check1 = baker.make_recipe("checks.ping_check", agent=self.agent)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], self.agent.check_interval) # type: ignore
self.assertEqual(len(r.data["checks"]), 1) # type: ignore
# override check run interval
check2 = baker.make_recipe(
"checks.ping_check", agent=self.agent, run_interval=20
)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], 20) # type: ignore
self.assertEqual(len(r.data["checks"]), 2) # type: ignore
# Set last_run on both checks and should return an empty list
check1.last_run = djangotime.now()
check1.save()
check2.last_run = djangotime.now()
check2.save()
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], 20) # type: ignore
self.assertFalse(r.data["checks"]) # type: ignore
# set last_run greater than interval
check1.last_run = djangotime.now() - djangotime.timedelta(seconds=200)
check1.save()
check2.last_run = djangotime.now() - djangotime.timedelta(seconds=200)
check2.save()
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], 20) # type: ignore
self.assertEquals(len(r.data["checks"]), 2) # type: ignore
url = "/api/v3/Maj34ACb324j234asdj2n34kASDjh34-DESKTOPTEST123/checkrunner/"
r = self.client.get(url)
self.assertEqual(r.status_code, 404)
self.check_not_authenticated("get", url)
def test_sysinfo(self):
# TODO replace this with golang wmi sample data
url = "/api/v3/sysinfo/"
with open(
os.path.join(
settings.BASE_DIR, "tacticalrmm/test_data/wmi_python_agent.json"
)
) as f:
wmi_py = json.load(f)
payload = {"agent_id": self.agent.agent_id, "sysinfo": wmi_py}
r = self.client.patch(url, payload, format="json")
self.assertEqual(r.status_code, 200)
self.check_not_authenticated("patch", url)
def test_checkrunner_interval(self):
url = f"/api/v3/{self.agent.agent_id}/checkinterval/"
r = self.client.get(url, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.json(),
{"agent": self.agent.pk, "check_interval": self.agent.check_interval},
)
# add check to agent with check interval set
check = baker.make_recipe(
"checks.ping_check", agent=self.agent, run_interval=30
)
r = self.client.get(url, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.json(),
{"agent": self.agent.pk, "check_interval": 30},
)
# minimum check run interval is 15 seconds
check = baker.make_recipe("checks.ping_check", agent=self.agent, run_interval=5)
r = self.client.get(url, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.json(),
{"agent": self.agent.pk, "check_interval": 15},
)
def test_run_checks(self):
# force run all checks regardless of interval
agent = baker.make_recipe("agents.online_agent")
baker.make_recipe("checks.ping_check", agent=agent)
baker.make_recipe("checks.diskspace_check", agent=agent)
baker.make_recipe("checks.cpuload_check", agent=agent)
baker.make_recipe("checks.memory_check", agent=agent)
baker.make_recipe("checks.eventlog_check", agent=agent)
for _ in range(10):
baker.make_recipe("checks.script_check", agent=agent)
url = f"/api/v3/{agent.agent_id}/runchecks/"
r = self.client.get(url)
self.assertEqual(r.json()["agent"], agent.pk)
self.assertIsInstance(r.json()["check_interval"], int)
self.assertEqual(len(r.json()["checks"]), 15)
def test_checkin_patch(self):
from logs.models import PendingAction
url = "/api/v3/checkin/"
agent_updated = baker.make_recipe("agents.agent", version="1.3.0")
PendingAction.objects.create(
agent=agent_updated,
action_type="agentupdate",
details={
"url": agent_updated.winagent_dl,
"version": agent_updated.version,
"inno": agent_updated.win_inno_exe,
},
)
action = agent_updated.pendingactions.filter(action_type="agentupdate").first()
self.assertEqual(action.status, "pending")
# test agent failed to update and still on same version
payload = {
"func": "hello",
"agent_id": agent_updated.agent_id,
"version": "1.3.0",
}
r = self.client.patch(url, payload, format="json")
self.assertEqual(r.status_code, 200)
action = agent_updated.pendingactions.filter(action_type="agentupdate").first()
self.assertEqual(action.status, "pending")
# test agent successful update
payload["version"] = settings.LATEST_AGENT_VER
r = self.client.patch(url, payload, format="json")
self.assertEqual(r.status_code, 200)
action = agent_updated.pendingactions.filter(action_type="agentupdate").first()
self.assertEqual(action.status, "completed")
action.delete()
@patch("apiv3.views.reload_nats")
def test_agent_recovery(self, reload_nats):
reload_nats.return_value = "ok"
r = self.client.get("/api/v3/34jahsdkjasncASDjhg2b3j4r/recover/")
self.assertEqual(r.status_code, 404)
agent = baker.make_recipe("agents.online_agent")
url = f"/api/v3/{agent.agent_id}/recovery/"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json(), {"mode": "pass", "shellcmd": ""})
reload_nats.assert_not_called()
baker.make("agents.RecoveryAction", agent=agent, mode="mesh")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json(), {"mode": "mesh", "shellcmd": ""})
reload_nats.assert_not_called()
baker.make(
"agents.RecoveryAction",
agent=agent,
mode="command",
command="shutdown /r /t 5 /f",
)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.json(), {"mode": "command", "shellcmd": "shutdown /r /t 5 /f"}
)
reload_nats.assert_not_called()
baker.make("agents.RecoveryAction", agent=agent, mode="rpc")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json(), {"mode": "rpc", "shellcmd": ""})
reload_nats.assert_called_once()
def test_task_runner_get(self):
from autotasks.serializers import TaskGOGetSerializer
r = self.client.get("/api/v3/500/asdf9df9dfdf/taskrunner/")
self.assertEqual(r.status_code, 404)
# setup data
agent = baker.make_recipe("agents.agent")
script = baker.make_recipe("scripts.script")
task = baker.make("autotasks.AutomatedTask", agent=agent, script=script)
url = f"/api/v3/{task.pk}/{agent.agent_id}/taskrunner/" # type: ignore
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(TaskGOGetSerializer(task).data, r.data) # type: ignore
def test_task_runner_results(self):
from agents.models import AgentCustomField
r = self.client.patch("/api/v3/500/asdf9df9dfdf/taskrunner/")
self.assertEqual(r.status_code, 404)
# setup data
agent = baker.make_recipe("agents.agent")
task = baker.make("autotasks.AutomatedTask", agent=agent)
url = f"/api/v3/{task.pk}/{agent.agent_id}/taskrunner/" # type: ignore
# test passing task
data = {
"stdout": "test test \ntestest stdgsd\n",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertTrue(AutomatedTask.objects.get(pk=task.pk).status == "passing") # type: ignore
# test failing task
data = {
"stdout": "test test \ntestest stdgsd\n",
"stderr": "",
"retcode": 1,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertTrue(AutomatedTask.objects.get(pk=task.pk).status == "failing") # type: ignore
# test collector task
text = baker.make("core.CustomField", model="agent", type="text", name="Test")
boolean = baker.make(
"core.CustomField", model="agent", type="checkbox", name="Test1"
)
multiple = baker.make(
"core.CustomField", model="agent", type="multiple", name="Test2"
)
# test text fields
task.custom_field = text # type: ignore
task.save() # type: ignore
# test failing failing with stderr
data = {
"stdout": "test test \nthe last line",
"stderr": "This is an error",
"retcode": 1,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertTrue(AutomatedTask.objects.get(pk=task.pk).status == "failing") # type: ignore
# test saving to text field
data = {
"stdout": "test test \nthe last line",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(AutomatedTask.objects.get(pk=task.pk).status, "passing") # type: ignore
self.assertEqual(AgentCustomField.objects.get(field=text, agent=task.agent).value, "the last line") # type: ignore
# test saving to checkbox field
task.custom_field = boolean # type: ignore
task.save() # type: ignore
data = {
"stdout": "1",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(AutomatedTask.objects.get(pk=task.pk).status, "passing") # type: ignore
self.assertTrue(AgentCustomField.objects.get(field=boolean, agent=task.agent).value) # type: ignore
# test saving to multiple field with commas
task.custom_field = multiple # type: ignore
task.save() # type: ignore
data = {
"stdout": "this,is,an,array",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(AutomatedTask.objects.get(pk=task.pk).status, "passing") # type: ignore
self.assertEqual(AgentCustomField.objects.get(field=multiple, agent=task.agent).value, ["this", "is", "an", "array"]) # type: ignore
# test mutiple with a single value
data = {
"stdout": "this",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(AutomatedTask.objects.get(pk=task.pk).status, "passing") # type: ignore
self.assertEqual(AgentCustomField.objects.get(field=multiple, agent=task.agent).value, ["this"]) # type: ignore

View File

View File

@@ -0,0 +1,298 @@
from django.utils import timezone as djangotime
from model_bakery import baker
from autotasks.models import TaskResult
from tacticalrmm.constants import CustomFieldModel, CustomFieldType, TaskStatus
from tacticalrmm.test import TacticalTestCase
class TestAPIv3(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
self.agent = baker.make_recipe("agents.agent")
def test_get_checks(self):
agent = baker.make_recipe("agents.agent")
url = f"/api/v3/{agent.agent_id}/checkrunner/"
# add a check
check1 = baker.make_recipe("checks.ping_check", agent=agent)
check_result1 = baker.make(
"checks.CheckResult", agent=agent, assigned_check=check1
)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], self.agent.check_interval)
self.assertEqual(len(r.data["checks"]), 1)
# override check run interval
check2 = baker.make_recipe(
"checks.diskspace_check", agent=agent, run_interval=20
)
check_result2 = baker.make(
"checks.CheckResult", agent=agent, assigned_check=check2
)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.data["checks"]), 2)
self.assertEqual(r.data["check_interval"], 20)
# Set last_run on both checks and should return an empty list
check_result1.last_run = djangotime.now()
check_result1.save()
check_result2.last_run = djangotime.now()
check_result2.save()
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], 20)
self.assertFalse(r.data["checks"])
# set last_run greater than interval
check_result1.last_run = djangotime.now() - djangotime.timedelta(seconds=200)
check_result1.save()
check_result2.last_run = djangotime.now() - djangotime.timedelta(seconds=200)
check_result2.save()
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], 20)
self.assertEqual(len(r.data["checks"]), 2)
url = "/api/v3/Maj34ACb324j234asdj2n34kASDjh34-DESKTOPTEST123/checkrunner/"
r = self.client.get(url)
self.assertEqual(r.status_code, 404)
self.check_not_authenticated("get", url)
def test_checkrunner_interval(self):
url = f"/api/v3/{self.agent.agent_id}/checkinterval/"
r = self.client.get(url, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.json(),
{"agent": self.agent.pk, "check_interval": self.agent.check_interval},
)
# add check to agent with check interval set
check = baker.make_recipe(
"checks.ping_check", agent=self.agent, run_interval=30
)
r = self.client.get(url, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.json(),
{"agent": self.agent.pk, "check_interval": 30},
)
# minimum check run interval is 15 seconds
check = baker.make_recipe("checks.ping_check", agent=self.agent, run_interval=5)
r = self.client.get(url, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.json(),
{"agent": self.agent.pk, "check_interval": 15},
)
def test_run_checks(self):
# force run all checks regardless of interval
agent = baker.make_recipe("agents.online_agent")
baker.make_recipe("checks.ping_check", agent=agent)
baker.make_recipe("checks.diskspace_check", agent=agent)
baker.make_recipe("checks.cpuload_check", agent=agent)
baker.make_recipe("checks.memory_check", agent=agent)
baker.make_recipe("checks.eventlog_check", agent=agent)
for _ in range(10):
baker.make_recipe("checks.script_check", agent=agent)
url = f"/api/v3/{agent.agent_id}/runchecks/"
r = self.client.get(url)
self.assertEqual(r.json()["agent"], agent.pk)
self.assertIsInstance(r.json()["check_interval"], int)
self.assertEqual(len(r.json()["checks"]), 15)
def test_task_runner_get(self):
r = self.client.get("/api/v3/500/asdf9df9dfdf/taskrunner/")
self.assertEqual(r.status_code, 404)
script = baker.make("scripts.script")
# setup data
task_actions = [
{"type": "cmd", "command": "whoami", "timeout": 10, "shell": "cmd"},
{
"type": "script",
"script": script.id,
"script_args": ["test"],
"timeout": 30,
},
{"type": "script", "script": 3, "script_args": [], "timeout": 30},
]
agent = baker.make_recipe("agents.agent")
task = baker.make("autotasks.AutomatedTask", agent=agent, actions=task_actions)
url = f"/api/v3/{task.pk}/{agent.agent_id}/taskrunner/"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
def test_task_runner_results(self):
from agents.models import AgentCustomField
r = self.client.patch("/api/v3/500/asdf9df9dfdf/taskrunner/")
self.assertEqual(r.status_code, 404)
# setup data
agent = baker.make_recipe("agents.agent")
task = baker.make("autotasks.AutomatedTask", agent=agent)
task_result = baker.make("autotasks.TaskResult", agent=agent, task=task)
url = f"/api/v3/{task.pk}/{agent.agent_id}/taskrunner/"
# test passing task
data = {
"stdout": "test test \ntestest stdgsd\n",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertTrue(
TaskResult.objects.get(pk=task_result.pk).status == TaskStatus.PASSING
)
# test failing task
data = {
"stdout": "test test \ntestest stdgsd\n",
"stderr": "",
"retcode": 1,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertTrue(
TaskResult.objects.get(pk=task_result.pk).status == TaskStatus.FAILING
)
# test collector task
text = baker.make(
"core.CustomField",
model=CustomFieldModel.AGENT,
type=CustomFieldType.TEXT,
name="Test",
)
boolean = baker.make(
"core.CustomField",
model=CustomFieldModel.AGENT,
type=CustomFieldType.CHECKBOX,
name="Test1",
)
multiple = baker.make(
"core.CustomField",
model=CustomFieldModel.AGENT,
type=CustomFieldType.MULTIPLE,
name="Test2",
)
# test text fields
task.custom_field = text
task.save()
# test failing failing with stderr
data = {
"stdout": "test test \nthe last line",
"stderr": "This is an error",
"retcode": 1,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertTrue(
TaskResult.objects.get(pk=task_result.pk).status == TaskStatus.FAILING
)
# test saving to text field
data = {
"stdout": "test test \nthe last line",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(
TaskResult.objects.get(pk=task_result.pk).status, TaskStatus.PASSING
)
self.assertEqual(
AgentCustomField.objects.get(field=text, agent=task.agent).value,
"the last line",
)
# test saving to checkbox field
task.custom_field = boolean
task.save()
data = {
"stdout": "1",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(
TaskResult.objects.get(pk=task_result.pk).status, TaskStatus.PASSING
)
self.assertTrue(
AgentCustomField.objects.get(field=boolean, agent=task.agent).value
)
# test saving to multiple field with commas
task.custom_field = multiple
task.save()
data = {
"stdout": "this,is,an,array",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(
TaskResult.objects.get(pk=task_result.pk).status, TaskStatus.PASSING
)
self.assertEqual(
AgentCustomField.objects.get(field=multiple, agent=task.agent).value,
["this", "is", "an", "array"],
)
# test mutiple with a single value
data = {
"stdout": "this",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(
TaskResult.objects.get(pk=task_result.pk).status, TaskStatus.PASSING
)
self.assertEqual(
AgentCustomField.objects.get(field=multiple, agent=task.agent).value,
["this"],
)

View File

@@ -9,7 +9,6 @@ urlpatterns = [
path("<str:agentid>/checkinterval/", views.CheckRunnerInterval.as_view()),
path("<int:pk>/<str:agentid>/taskrunner/", views.TaskRunner.as_view()),
path("meshexe/", views.MeshExe.as_view()),
path("sysinfo/", views.SysInfo.as_view()),
path("newagent/", views.NewAgent.as_view()),
path("software/", views.Software.as_view()),
path("installer/", views.Installer.as_view()),
@@ -19,6 +18,5 @@ urlpatterns = [
path("winupdates/", views.WinUpdates.as_view()),
path("superseded/", views.SupersededWinUpdate.as_view()),
path("<int:pk>/chocoresult/", views.ChocoResult.as_view()),
path("<str:agentid>/recovery/", views.AgentRecovery.as_view()),
path("<int:pk>/<str:agentid>/histresult/", views.AgentHistoryResult.as_view()),
]

View File

@@ -1,9 +1,7 @@
import asyncio
import os
import time
from django.conf import settings
from django.http import HttpResponse
from django.db.models import Prefetch
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from packaging import version as pyver
@@ -15,15 +13,34 @@ from rest_framework.views import APIView
from accounts.models import User
from agents.models import Agent, AgentHistory
from agents.serializers import WinAgentSerializer, AgentHistorySerializer
from autotasks.models import AutomatedTask
from autotasks.serializers import TaskGOGetSerializer, TaskRunnerPatchSerializer
from checks.models import Check
from agents.serializers import AgentHistorySerializer
from autotasks.models import AutomatedTask, TaskResult
from autotasks.serializers import TaskGOGetSerializer, TaskResultSerializer
from checks.constants import CHECK_DEFER, CHECK_RESULT_DEFER
from checks.models import Check, CheckResult
from checks.serializers import CheckRunnerGetSerializer
from checks.utils import bytes2human
from logs.models import PendingAction, DebugLog
from core.utils import (
download_mesh_agent,
get_core_settings,
get_mesh_device_id,
get_mesh_ws_url,
)
from logs.models import DebugLog, PendingAction
from software.models import InstalledSoftware
from tacticalrmm.utils import SoftwareList, filter_software, notify_error, reload_nats
from tacticalrmm.constants import (
AGENT_DEFER,
AgentMonType,
AgentPlat,
AuditActionType,
AuditObjType,
CheckStatus,
DebugLogType,
GoArch,
MeshAgentIdent,
PAStatus,
)
from tacticalrmm.helpers import notify_error
from tacticalrmm.utils import reload_nats
from winupdate.models import WinUpdate, WinUpdatePolicy
@@ -32,108 +49,14 @@ class CheckIn(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def patch(self, request):
"""
!!! DEPRECATED AS OF AGENT 1.6.0 !!!
Endpoint be removed in a future release
"""
from alerts.models import Alert
updated = False
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
if pyver.parse(request.data["version"]) > pyver.parse(
agent.version
) or pyver.parse(request.data["version"]) == pyver.parse(
settings.LATEST_AGENT_VER
):
updated = True
agent.version = request.data["version"]
agent.last_seen = djangotime.now()
agent.save(update_fields=["version", "last_seen"])
# change agent update pending status to completed if agent has just updated
if (
updated
and agent.pendingactions.filter( # type: ignore
action_type="agentupdate", status="pending"
).exists()
):
agent.pendingactions.filter( # type: ignore
action_type="agentupdate", status="pending"
).update(status="completed")
# handles any alerting actions
if Alert.objects.filter(agent=agent, resolved=False).exists():
Alert.handle_alert_resolve(agent)
# sync scheduled tasks
if agent.autotasks.exclude(sync_status="synced").exists(): # type: ignore
tasks = agent.autotasks.exclude(sync_status="synced") # type: ignore
for task in tasks:
if task.sync_status == "pendingdeletion":
task.delete_task_on_agent()
elif task.sync_status == "initial":
task.modify_task_on_agent()
elif task.sync_status == "notsynced":
task.create_task_on_agent()
return Response("ok")
def put(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
serializer = WinAgentSerializer(instance=agent, data=request.data, partial=True)
if request.data["func"] == "disks":
disks = request.data["disks"]
new = []
for disk in disks:
tmp = {}
for _, _ in disk.items():
tmp["device"] = disk["device"]
tmp["fstype"] = disk["fstype"]
tmp["total"] = bytes2human(disk["total"])
tmp["used"] = bytes2human(disk["used"])
tmp["free"] = bytes2human(disk["free"])
tmp["percent"] = int(disk["percent"])
new.append(tmp)
serializer.is_valid(raise_exception=True)
serializer.save(disks=new)
return Response("ok")
if request.data["func"] == "loggedonuser":
if request.data["logged_in_username"] != "None":
serializer.is_valid(raise_exception=True)
serializer.save(last_logged_in_user=request.data["logged_in_username"])
return Response("ok")
if request.data["func"] == "software":
raw: SoftwareList = request.data["software"]
if not isinstance(raw, list):
return notify_error("err")
sw = filter_software(raw)
if not InstalledSoftware.objects.filter(agent=agent).exists():
InstalledSoftware(agent=agent, software=sw).save()
else:
s = agent.installedsoftware_set.first() # type: ignore
s.software = sw
s.save(update_fields=["software"])
return Response("ok")
serializer.is_valid(raise_exception=True)
serializer.save()
return Response("ok")
# called once during tacticalagent windows service startup
def post(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=request.data["agent_id"]
)
if not agent.choco_installed:
asyncio.run(agent.nats_cmd({"func": "installchoco"}, wait=False))
time.sleep(0.5)
asyncio.run(agent.nats_cmd({"func": "getwinupdates"}, wait=False))
return Response("ok")
@@ -143,7 +66,9 @@ class SyncMeshNodeID(APIView):
permission_classes = [IsAuthenticated]
def post(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=request.data["agent_id"]
)
if agent.mesh_node_id != request.data["nodeid"]:
agent.mesh_node_id = request.data["nodeid"]
agent.save(update_fields=["mesh_node_id"])
@@ -156,7 +81,9 @@ class Choco(APIView):
permission_classes = [IsAuthenticated]
def post(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=request.data["agent_id"]
)
agent.choco_installed = request.data["installed"]
agent.save(update_fields=["choco_installed"])
return Response("ok")
@@ -167,25 +94,27 @@ class WinUpdates(APIView):
permission_classes = [IsAuthenticated]
def put(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=request.data["agent_id"]
)
needs_reboot: bool = request.data["needs_reboot"]
agent.needs_reboot = needs_reboot
agent.save(update_fields=["needs_reboot"])
reboot_policy: str = agent.get_patch_policy().reboot_after_install
reboot = False
if reboot_policy == "always":
reboot = True
if request.data["needs_reboot"]:
if reboot_policy == "required":
reboot = True
elif reboot_policy == "never":
agent.needs_reboot = True
agent.save(update_fields=["needs_reboot"])
elif needs_reboot and reboot_policy == "required":
reboot = True
if reboot:
asyncio.run(agent.nats_cmd({"func": "rebootnow"}, wait=False))
DebugLog.info(
agent=agent,
log_type="windows_updates",
log_type=DebugLogType.WIN_UPDATES,
message=f"{agent.hostname} is rebooting after updates were installed.",
)
@@ -193,8 +122,13 @@ class WinUpdates(APIView):
return Response("ok")
def patch(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=request.data["agent_id"]
)
u = agent.winupdates.filter(guid=request.data["guid"]).last() # type: ignore
if not u:
raise WinUpdate.DoesNotExist
success: bool = request.data["success"]
if success:
u.result = "success"
@@ -217,8 +151,14 @@ class WinUpdates(APIView):
return Response("ok")
def post(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
updates = request.data["wua_updates"]
if not updates:
return notify_error("Empty payload")
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=request.data["agent_id"]
)
for update in updates:
if agent.winupdates.filter(guid=update["guid"]).exists(): # type: ignore
u = agent.winupdates.filter(guid=update["guid"]).last() # type: ignore
@@ -249,14 +189,6 @@ class WinUpdates(APIView):
).save()
agent.delete_superseded_updates()
# more superseded updates cleanup
if pyver.parse(agent.version) <= pyver.parse("1.4.2"):
for u in agent.winupdates.filter( # type: ignore
date_installed__isnull=True, result="failed"
).exclude(installed=True):
u.delete()
return Response("ok")
@@ -265,7 +197,9 @@ class SupersededWinUpdate(APIView):
permission_classes = [IsAuthenticated]
def post(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=request.data["agent_id"]
)
updates = agent.winupdates.filter(guid=request.data["guid"]) # type: ignore
for u in updates:
u.delete()
@@ -278,12 +212,19 @@ class RunChecks(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, agentid):
agent = get_object_or_404(Agent, agent_id=agentid)
checks = Check.objects.filter(agent__pk=agent.pk, overriden_by_policy=False)
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER).prefetch_related(
Prefetch("agentchecks", queryset=Check.objects.select_related("script"))
),
agent_id=agentid,
)
checks = agent.get_checks_with_policies(exclude_overridden=True)
ret = {
"agent": agent.pk,
"check_interval": agent.check_interval,
"checks": CheckRunnerGetSerializer(checks, many=True).data,
"checks": CheckRunnerGetSerializer(
checks, context={"agent": agent}, many=True
).data,
}
return Response(ret)
@@ -293,47 +234,72 @@ class CheckRunner(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, agentid):
agent = get_object_or_404(Agent, agent_id=agentid)
checks = agent.agentchecks.filter(overriden_by_policy=False) # type: ignore
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER).prefetch_related(
Prefetch("agentchecks", queryset=Check.objects.select_related("script"))
),
agent_id=agentid,
)
checks = agent.get_checks_with_policies(exclude_overridden=True)
run_list = [
check
for check in checks
# always run if check hasn't run yet
if not check.last_run
# if a check interval is set, see if the correct amount of seconds have passed
if not isinstance(check.check_result, CheckResult)
or not check.check_result.last_run
# see if the correct amount of seconds have passed
or (
check.run_interval
and (
check.last_run
< djangotime.now()
- djangotime.timedelta(seconds=check.run_interval)
check.check_result.last_run
< djangotime.now()
- djangotime.timedelta(
seconds=check.run_interval
if check.run_interval
else agent.check_interval
)
)
# if check interval isn't set, make sure the agent's check interval has passed before running
or (
not check.run_interval
and check.last_run
< djangotime.now() - djangotime.timedelta(seconds=agent.check_interval)
)
]
ret = {
"agent": agent.pk,
"check_interval": agent.check_run_interval(),
"checks": CheckRunnerGetSerializer(run_list, many=True).data,
"checks": CheckRunnerGetSerializer(
run_list, context={"agent": agent}, many=True
).data,
}
return Response(ret)
def patch(self, request):
check = get_object_or_404(Check, pk=request.data["id"])
if pyver.parse(check.agent.version) < pyver.parse("1.5.7"):
return notify_error("unsupported")
if "agent_id" not in request.data.keys():
return notify_error("Agent upgrade required")
check.last_run = djangotime.now()
check.save(update_fields=["last_run"])
status = check.handle_check(request.data)
if status == "failing" and check.assignedtask.exists(): # type: ignore
check.handle_assigned_task()
check = get_object_or_404(
Check.objects.defer(*CHECK_DEFER),
pk=request.data["id"],
)
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=request.data["agent_id"]
)
# get check result or create if doesn't exist
check_result, created = CheckResult.objects.defer(
*CHECK_RESULT_DEFER
).get_or_create(
assigned_check=check,
agent=agent,
)
if created:
check_result.save()
status = check_result.handle_check(request.data, check, agent)
if status == CheckStatus.FAILING and check.assignedtasks.exists():
for task in check.assignedtasks.all():
if task.enabled:
if task.policy:
task.run_win_task(agent)
else:
task.run_win_task()
return Response("ok")
@@ -343,7 +309,10 @@ class CheckRunnerInterval(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, agentid):
agent = get_object_or_404(Agent, agent_id=agentid)
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER).prefetch_related("agentchecks"),
agent_id=agentid,
)
return Response(
{"agent": agent.pk, "check_interval": agent.check_run_interval()}
@@ -355,58 +324,71 @@ class TaskRunner(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, pk, agentid):
_ = get_object_or_404(Agent, agent_id=agentid)
agent = get_object_or_404(Agent.objects.defer(*AGENT_DEFER), agent_id=agentid)
task = get_object_or_404(AutomatedTask, pk=pk)
return Response(TaskGOGetSerializer(task).data)
return Response(TaskGOGetSerializer(task, context={"agent": agent}).data)
def patch(self, request, pk, agentid):
from alerts.models import Alert
agent = get_object_or_404(Agent, agent_id=agentid)
task = get_object_or_404(AutomatedTask, pk=pk)
serializer = TaskRunnerPatchSerializer(
instance=task, data=request.data, partial=True
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER),
agent_id=agentid,
)
task = get_object_or_404(
AutomatedTask.objects.select_related("custom_field"), pk=pk
)
# get task result or create if doesn't exist
try:
task_result = (
TaskResult.objects.select_related("agent")
.defer("agent__services", "agent__wmi_detail")
.get(task=task, agent=agent)
)
serializer = TaskResultSerializer(
data=request.data, instance=task_result, partial=True
)
except TaskResult.DoesNotExist:
serializer = TaskResultSerializer(data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
new_task = serializer.save(last_run=djangotime.now())
task_result = serializer.save(last_run=djangotime.now())
AgentHistory.objects.create(
agent=agent,
type=AuditActionType.TASK_RUN,
command=task.name,
script_results=request.data,
)
# check if task is a collector and update the custom field
if task.custom_field:
if not task.stderr:
if not task_result.stderr:
task.save_collector_results()
task_result.save_collector_results()
status = "passing"
status = CheckStatus.PASSING
else:
status = "failing"
status = CheckStatus.FAILING
else:
status = "failing" if task.retcode != 0 else "passing"
status = (
CheckStatus.FAILING if task_result.retcode != 0 else CheckStatus.PASSING
)
new_task.status = status
new_task.save()
if status == "passing":
if Alert.objects.filter(assigned_task=new_task, resolved=False).exists():
Alert.handle_alert_resolve(new_task)
if task_result:
task_result.status = status
task_result.save(update_fields=["status"])
else:
Alert.handle_alert_failure(new_task)
task_result.status = status
task.save(update_fields=["status"])
return Response("ok")
if status == CheckStatus.PASSING:
if Alert.create_or_return_task_alert(task, agent=agent, skip_create=True):
Alert.handle_alert_resolve(task_result)
else:
Alert.handle_alert_failure(task_result)
class SysInfo(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def patch(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
if not isinstance(request.data["sysinfo"], dict):
return notify_error("err")
agent.wmi_detail = request.data["sysinfo"]
agent.save(update_fields=["wmi_detail"])
return Response("ok")
@@ -414,25 +396,33 @@ class MeshExe(APIView):
"""Sends the mesh exe to the installer"""
def post(self, request):
exe = "meshagent.exe" if request.data["arch"] == "64" else "meshagent-x86.exe"
mesh_exe = os.path.join(settings.EXE_DIR, exe)
match request.data:
case {"goarch": GoArch.AMD64, "plat": AgentPlat.WINDOWS}:
arch = MeshAgentIdent.WIN64
case {"goarch": GoArch.i386, "plat": AgentPlat.WINDOWS}:
arch = MeshAgentIdent.WIN32
case _:
return notify_error("Arch not specified")
if not os.path.exists(mesh_exe):
return notify_error("Mesh Agent executable not found")
core = get_core_settings()
if settings.DEBUG:
with open(mesh_exe, "rb") as f:
response = HttpResponse(
f.read(),
content_type="application/vnd.microsoft.portable-executable",
)
response["Content-Disposition"] = f"inline; filename={exe}"
return response
try:
uri = get_mesh_ws_url()
mesh_id = asyncio.run(get_mesh_device_id(uri, core.mesh_device_group))
except:
return notify_error("Unable to connect to mesh to get group id information")
if settings.DOCKER_BUILD:
dl_url = f"{settings.MESH_WS_URL.replace('ws://', 'http://')}/meshagents?id={arch}&meshid={mesh_id}&installflags=0"
else:
response = HttpResponse()
response["Content-Disposition"] = f"attachment; filename={exe}"
response["X-Accel-Redirect"] = f"/private/exe/{exe}"
return response
dl_url = (
f"{core.mesh_site}/meshagents?id={arch}&meshid={mesh_id}&installflags=0"
)
try:
return download_mesh_agent(dl_url)
except:
return notify_error("Unable to download mesh agent exe")
class NewAgent(APIView):
@@ -453,11 +443,11 @@ class NewAgent(APIView):
monitoring_type=request.data["monitoring_type"],
description=request.data["description"],
mesh_node_id=request.data["mesh_node_id"],
goarch=request.data["goarch"],
plat=request.data["plat"],
last_seen=djangotime.now(),
)
agent.save()
agent.salt_id = f"{agent.hostname}-{agent.pk}"
agent.save(update_fields=["salt_id"])
user = User.objects.create_user( # type: ignore
username=request.data["agent_id"],
@@ -467,7 +457,7 @@ class NewAgent(APIView):
token = Token.objects.create(user=user)
if agent.monitoring_type == "workstation":
if agent.monitoring_type == AgentMonType.WORKSTATION:
WinUpdatePolicy(agent=agent, run_time_days=[5, 6]).save()
else:
WinUpdatePolicy(agent=agent).save()
@@ -478,20 +468,15 @@ class NewAgent(APIView):
AuditLog.objects.create(
username=request.user,
agent=agent.hostname,
object_type="agent",
action="agent_install",
object_type=AuditObjType.AGENT,
action=AuditActionType.AGENT_INSTALL,
message=f"{request.user} installed new agent {agent.hostname}",
after_value=Agent.serialize(agent),
debug_info={"ip": request._client_ip},
)
return Response(
{
"pk": agent.pk,
"saltid": f"{agent.hostname}-{agent.pk}",
"token": token.key,
}
)
ret = {"pk": agent.pk, "token": token.key}
return Response(ret)
class Software(APIView):
@@ -500,11 +485,7 @@ class Software(APIView):
def post(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
raw: SoftwareList = request.data["software"]
if not isinstance(raw, list):
return notify_error("err")
sw = filter_software(raw)
sw = request.data["software"]
if not InstalledSoftware.objects.filter(agent=agent).exists():
InstalledSoftware(agent=agent, software=sw).save()
else:
@@ -525,7 +506,10 @@ class Installer(APIView):
return notify_error("Invalid data")
ver = request.data["version"]
if pyver.parse(ver) < pyver.parse(settings.LATEST_AGENT_VER):
if (
pyver.parse(ver) < pyver.parse(settings.LATEST_AGENT_VER)
and not "-dev" in settings.LATEST_AGENT_VER
):
return notify_error(
f"Old installer detected (version {ver} ). Latest version is {settings.LATEST_AGENT_VER} Please generate a new installer from the RMM"
)
@@ -560,42 +544,19 @@ class ChocoResult(APIView):
action.details["output"] = results
action.details["installed"] = installed
action.status = "completed"
action.status = PAStatus.COMPLETED
action.save(update_fields=["details", "status"])
return Response("ok")
class AgentRecovery(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, agentid):
agent = get_object_or_404(Agent, agent_id=agentid)
recovery = agent.recoveryactions.filter(last_run=None).last() # type: ignore
ret = {"mode": "pass", "shellcmd": ""}
if recovery is None:
return Response(ret)
recovery.last_run = djangotime.now()
recovery.save(update_fields=["last_run"])
ret["mode"] = recovery.mode
if recovery.mode == "command":
ret["shellcmd"] = recovery.command
elif recovery.mode == "rpc":
reload_nats()
return Response(ret)
class AgentHistoryResult(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def patch(self, request, agentid, pk):
_ = get_object_or_404(Agent, agent_id=agentid)
hist = get_object_or_404(AgentHistory, pk=pk)
hist = get_object_or_404(
AgentHistory.objects.filter(agent__agent_id=agentid), pk=pk
)
s = AgentHistorySerializer(instance=hist, data=request.data, partial=True)
s.is_valid(raise_exception=True)
s.save()

View File

@@ -1,8 +1,21 @@
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from django.core.cache import cache
from django.db import models
from agents.models import Agent
from core.models import CoreSettings
from clients.models import Client, Site
from logs.models import BaseAuditModel
from tacticalrmm.constants import (
CORESETTINGS_CACHE_KEY,
AgentMonType,
AgentPlat,
CheckType,
)
if TYPE_CHECKING:
from autotasks.models import AutomatedTask
from checks.models import Check
class Policy(BaseAuditModel):
@@ -27,366 +40,301 @@ class Policy(BaseAuditModel):
"agents.Agent", related_name="policy_exclusions", blank=True
)
def save(self, *args, **kwargs):
def save(self, *args: Any, **kwargs: Any) -> None:
from alerts.tasks import cache_agents_alert_template
from automation.tasks import generate_agent_checks_task
# get old policy if exists
old_policy = type(self).objects.get(pk=self.pk) if self.pk else None
old_policy: Optional[Policy] = (
type(self).objects.get(pk=self.pk) if self.pk else None
)
super(Policy, self).save(old_model=old_policy, *args, **kwargs)
# generate agent checks only if active and enforced were changed
# check if alert template was changes and cache on agents
if old_policy:
if old_policy.active != self.active or old_policy.enforced != self.enforced:
generate_agent_checks_task.delay(
policy=self.pk,
create_tasks=True,
)
if old_policy.alert_template != self.alert_template:
cache_agents_alert_template.delay()
elif self.alert_template and old_policy.active != self.active:
cache_agents_alert_template.delay()
if old_policy.active != self.active or old_policy.enforced != self.enforced:
cache.delete(CORESETTINGS_CACHE_KEY)
cache.delete_many_pattern("site_workstation_*")
cache.delete_many_pattern("site_server_*")
cache.delete_many_pattern("agent_*")
def delete(self, *args, **kwargs):
from automation.tasks import generate_agent_checks_task
cache.delete(CORESETTINGS_CACHE_KEY)
cache.delete_many_pattern("site_workstation_*")
cache.delete_many_pattern("site_server_*")
cache.delete_many_pattern("agent_*")
agents = list(self.related_agents().only("pk").values_list("pk", flat=True))
super(Policy, self).delete(*args, **kwargs)
super(Policy, self).delete(
*args,
**kwargs,
)
generate_agent_checks_task.delay(agents=agents, create_tasks=True)
def __str__(self):
def __str__(self) -> str:
return self.name
@property
def is_default_server_policy(self):
return self.default_server_policy.exists() # type: ignore
def is_default_server_policy(self) -> bool:
return self.default_server_policy.exists()
@property
def is_default_workstation_policy(self):
return self.default_workstation_policy.exists() # type: ignore
def is_default_workstation_policy(self) -> bool:
return self.default_workstation_policy.exists()
def is_agent_excluded(self, agent):
def is_agent_excluded(self, agent: "Agent") -> bool:
return (
agent in self.excluded_agents.all()
or agent.site in self.excluded_sites.all()
or agent.client in self.excluded_clients.all()
)
def related_agents(self):
return self.get_related("server") | self.get_related("workstation")
def related_agents(
self, mon_type: Optional[str] = None
) -> "models.QuerySet[Agent]":
models.prefetch_related_objects(
[self],
"excluded_agents",
"excluded_sites",
"excluded_clients",
"workstation_clients",
"server_clients",
"workstation_sites",
"server_sites",
"agents",
)
def get_related(self, mon_type):
explicit_agents = (
self.agents.filter(monitoring_type=mon_type) # type: ignore
.exclude(
pk__in=self.excluded_agents.only("pk").values_list("pk", flat=True)
agent_filter = {}
filtered_agents_ids = Agent.objects.none()
if mon_type:
agent_filter["monitoring_type"] = mon_type
excluded_clients_ids = self.excluded_clients.only("pk").values_list(
"id", flat=True
)
excluded_sites_ids = self.excluded_sites.only("pk").values_list("id", flat=True)
excluded_agents_ids = self.excluded_agents.only("pk").values_list(
"id", flat=True
)
if self.is_default_server_policy:
filtered_agents_ids |= (
Agent.objects.exclude(block_policy_inheritance=True)
.exclude(site__block_policy_inheritance=True)
.exclude(site__client__block_policy_inheritance=True)
.exclude(id__in=excluded_agents_ids)
.exclude(site_id__in=excluded_sites_ids)
.exclude(site__client_id__in=excluded_clients_ids)
.filter(monitoring_type=AgentMonType.SERVER)
.only("id")
.values_list("id", flat=True)
)
.exclude(site__in=self.excluded_sites.all())
.exclude(site__client__in=self.excluded_clients.all())
if self.is_default_workstation_policy:
filtered_agents_ids |= (
Agent.objects.exclude(block_policy_inheritance=True)
.exclude(site__block_policy_inheritance=True)
.exclude(site__client__block_policy_inheritance=True)
.exclude(id__in=excluded_agents_ids)
.exclude(site_id__in=excluded_sites_ids)
.exclude(site__client_id__in=excluded_clients_ids)
.filter(monitoring_type=AgentMonType.WORKSTATION)
.only("id")
.values_list("id", flat=True)
)
# if this is the default policy for servers and workstations and skip the other calculations
if self.is_default_server_policy and self.is_default_workstation_policy:
return Agent.objects.filter(models.Q(id__in=filtered_agents_ids))
explicit_agents = (
self.agents.filter(**agent_filter) # type: ignore
.exclude(id__in=excluded_agents_ids)
.exclude(site_id__in=excluded_sites_ids)
.exclude(site__client_id__in=excluded_clients_ids)
)
explicit_clients = getattr(self, f"{mon_type}_clients").exclude(
pk__in=self.excluded_clients.all()
)
explicit_sites = getattr(self, f"{mon_type}_sites").exclude(
pk__in=self.excluded_sites.all()
)
explicit_clients_qs = Client.objects.none()
explicit_sites_qs = Site.objects.none()
filtered_agents_pks = Policy.objects.none()
if not mon_type or mon_type == AgentMonType.WORKSTATION:
explicit_clients_qs |= self.workstation_clients.exclude( # type: ignore
id__in=excluded_clients_ids
)
explicit_sites_qs |= self.workstation_sites.exclude( # type: ignore
id__in=excluded_sites_ids
)
filtered_agents_pks |= (
if not mon_type or mon_type == AgentMonType.SERVER:
explicit_clients_qs |= self.server_clients.exclude( # type: ignore
id__in=excluded_clients_ids
)
explicit_sites_qs |= self.server_sites.exclude( # type: ignore
id__in=excluded_sites_ids
)
filtered_agents_ids |= (
Agent.objects.exclude(block_policy_inheritance=True)
.filter(
site__in=[
site
for site in explicit_sites
if site.client not in explicit_clients
and site.client not in self.excluded_clients.all()
site_id__in=[
site.id
for site in explicit_sites_qs
if site.client not in explicit_clients_qs
and site.client.id not in excluded_clients_ids
],
monitoring_type=mon_type,
**agent_filter,
)
.values_list("pk", flat=True)
.only("id")
.values_list("id", flat=True)
)
filtered_agents_pks |= (
filtered_agents_ids |= (
Agent.objects.exclude(block_policy_inheritance=True)
.exclude(site__block_policy_inheritance=True)
.filter(
site__client__in=[client for client in explicit_clients],
monitoring_type=mon_type,
site__client__in=explicit_clients_qs,
**agent_filter,
)
.values_list("pk", flat=True)
.only("id")
.values_list("id", flat=True)
)
return Agent.objects.filter(
models.Q(pk__in=filtered_agents_pks)
| models.Q(pk__in=explicit_agents.only("pk"))
models.Q(id__in=filtered_agents_ids)
| models.Q(id__in=explicit_agents.only("id"))
)
@staticmethod
def serialize(policy):
def serialize(policy: "Policy") -> Dict[str, Any]:
# serializes the policy and returns json
from .serializers import PolicyAuditSerializer
return PolicyAuditSerializer(policy).data
@staticmethod
def cascade_policy_tasks(agent):
def get_policy_tasks(agent: "Agent") -> "List[AutomatedTask]":
# List of all tasks to be applied
tasks = list()
added_task_pks = list()
agent_tasks_parent_pks = [
task.parent_task for task in agent.autotasks.filter(managed_by_policy=True)
]
# Get policies applied to agent and agent site and client
client = agent.client
site = agent.site
policies = agent.get_agent_policies()
default_policy = None
client_policy = None
site_policy = None
agent_policy = agent.policy
processed_policies = list()
# Get the Client/Site policy based on if the agent is server or workstation
if agent.monitoring_type == "server":
default_policy = CoreSettings.objects.first().server_policy
client_policy = client.server_policy
site_policy = site.server_policy
elif agent.monitoring_type == "workstation":
default_policy = CoreSettings.objects.first().workstation_policy
client_policy = client.workstation_policy
site_policy = site.workstation_policy
# check if client/site/agent is blocking inheritance and blank out policies
if agent.block_policy_inheritance:
site_policy = None
client_policy = None
default_policy = None
elif site.block_policy_inheritance:
client_policy = None
default_policy = None
elif client.block_policy_inheritance:
default_policy = None
if (
agent_policy
and agent_policy.active
and not agent_policy.is_agent_excluded(agent)
):
for task in agent_policy.autotasks.all():
if task.pk not in added_task_pks:
for _, policy in policies.items():
if policy and policy.active and policy.pk not in processed_policies:
processed_policies.append(policy.pk)
for task in policy.autotasks.all():
tasks.append(task)
added_task_pks.append(task.pk)
if (
site_policy
and site_policy.active
and not site_policy.is_agent_excluded(agent)
):
for task in site_policy.autotasks.all():
if task.pk not in added_task_pks:
tasks.append(task)
added_task_pks.append(task.pk)
if (
client_policy
and client_policy.active
and not client_policy.is_agent_excluded(agent)
):
for task in client_policy.autotasks.all():
if task.pk not in added_task_pks:
tasks.append(task)
added_task_pks.append(task.pk)
if (
default_policy
and default_policy.active
and not default_policy.is_agent_excluded(agent)
):
for task in default_policy.autotasks.all():
if task.pk not in added_task_pks:
tasks.append(task)
added_task_pks.append(task.pk)
# remove policy tasks from agent not included in policy
for task in agent.autotasks.filter(
parent_task__in=[
taskpk
for taskpk in agent_tasks_parent_pks
if taskpk not in added_task_pks
]
):
if task.sync_status == "initial":
task.delete()
else:
task.sync_status = "pendingdeletion"
task.save()
# change tasks from pendingdeletion to notsynced if policy was added or changed
agent.autotasks.filter(sync_status="pendingdeletion").filter(
parent_task__in=[taskpk for taskpk in added_task_pks]
).update(sync_status="notsynced")
return [task for task in tasks if task.pk not in agent_tasks_parent_pks]
return tasks
@staticmethod
def cascade_policy_checks(agent):
# Get checks added to agent directly
agent_checks = list(agent.agentchecks.filter(managed_by_policy=False))
def get_policy_checks(agent: "Agent") -> "List[Check]":
agent_checks_parent_pks = [
check.parent_check
for check in agent.agentchecks.filter(managed_by_policy=True)
]
# Get checks added to agent directly
agent_checks = list(agent.agentchecks.all())
# Get policies applied to agent and agent site and client
client = agent.client
site = agent.site
default_policy = None
client_policy = None
site_policy = None
agent_policy = agent.policy
if agent.monitoring_type == "server":
default_policy = CoreSettings.objects.first().server_policy
client_policy = client.server_policy
site_policy = site.server_policy
elif agent.monitoring_type == "workstation":
default_policy = CoreSettings.objects.first().workstation_policy
client_policy = client.workstation_policy
site_policy = site.workstation_policy
# check if client/site/agent is blocking inheritance and blank out policies
if agent.block_policy_inheritance:
site_policy = None
client_policy = None
default_policy = None
elif site.block_policy_inheritance:
client_policy = None
default_policy = None
elif client.block_policy_inheritance:
default_policy = None
policies = agent.get_agent_policies()
# Used to hold the policies that will be applied and the order in which they are applied
# Enforced policies are applied first
enforced_checks = list()
policy_checks = list()
if (
agent_policy
and agent_policy.active
and not agent_policy.is_agent_excluded(agent)
):
if agent_policy.enforced:
for check in agent_policy.policychecks.all():
enforced_checks.append(check)
else:
for check in agent_policy.policychecks.all():
policy_checks.append(check)
processed_policies = list()
if (
site_policy
and site_policy.active
and not site_policy.is_agent_excluded(agent)
):
if site_policy.enforced:
for check in site_policy.policychecks.all():
enforced_checks.append(check)
else:
for check in site_policy.policychecks.all():
policy_checks.append(check)
for _, policy in policies.items():
if policy and policy.active and policy.pk not in processed_policies:
processed_policies.append(policy.pk)
if policy.enforced:
for check in policy.policychecks.all():
enforced_checks.append(check)
else:
for check in policy.policychecks.all():
policy_checks.append(check)
if (
client_policy
and client_policy.active
and not client_policy.is_agent_excluded(agent)
):
if client_policy.enforced:
for check in client_policy.policychecks.all():
enforced_checks.append(check)
else:
for check in client_policy.policychecks.all():
policy_checks.append(check)
if (
default_policy
and default_policy.active
and not default_policy.is_agent_excluded(agent)
):
if default_policy.enforced:
for check in default_policy.policychecks.all():
enforced_checks.append(check)
else:
for check in default_policy.policychecks.all():
policy_checks.append(check)
if not enforced_checks and not policy_checks:
return []
# Sorted Checks already added
added_diskspace_checks = list()
added_ping_checks = list()
added_winsvc_checks = list()
added_script_checks = list()
added_eventlog_checks = list()
added_cpuload_checks = list()
added_memory_checks = list()
added_diskspace_checks: List[str] = list()
added_ping_checks: List[str] = list()
added_winsvc_checks: List[str] = list()
added_script_checks: List[int] = list()
added_eventlog_checks: List[List[str]] = list()
added_cpuload_checks: List[int] = list()
added_memory_checks: List[int] = list()
# Lists all agent and policy checks that will be created
diskspace_checks = list()
ping_checks = list()
winsvc_checks = list()
script_checks = list()
eventlog_checks = list()
cpuload_checks = list()
memory_checks = list()
# Lists all agent and policy checks that will be returned
diskspace_checks: "List[Check]" = list()
ping_checks: "List[Check]" = list()
winsvc_checks: "List[Check]" = list()
script_checks: "List[Check]" = list()
eventlog_checks: "List[Check]" = list()
cpuload_checks: "List[Check]" = list()
memory_checks: "List[Check]" = list()
overridden_checks: List[int] = list()
# Loop over checks in with enforced policies first, then non-enforced policies
for check in enforced_checks + agent_checks + policy_checks:
if check.check_type == "diskspace":
if (
check.check_type == CheckType.DISK_SPACE
and agent.plat == AgentPlat.WINDOWS
):
# Check if drive letter was already added
if check.disk not in added_diskspace_checks:
added_diskspace_checks.append(check.disk)
# Dont create the check if it is an agent check
# Dont add if check if it is an agent check
if not check.agent:
diskspace_checks.append(check)
elif check.agent:
check.overriden_by_policy = True
check.save()
overridden_checks.append(check.pk)
if check.check_type == "ping":
elif check.check_type == CheckType.PING:
# Check if IP/host was already added
if check.ip not in added_ping_checks:
added_ping_checks.append(check.ip)
# Dont create the check if it is an agent check
# Dont add if the check if it is an agent check
if not check.agent:
ping_checks.append(check)
elif check.agent:
check.overriden_by_policy = True
check.save()
overridden_checks.append(check.pk)
if check.check_type == "cpuload":
elif (
check.check_type == CheckType.CPU_LOAD
and agent.plat == AgentPlat.WINDOWS
):
# Check if cpuload list is empty
if not added_cpuload_checks:
added_cpuload_checks.append(check)
added_cpuload_checks.append(check.pk)
# Dont create the check if it is an agent check
if not check.agent:
cpuload_checks.append(check)
elif check.agent:
check.overriden_by_policy = True
check.save()
overridden_checks.append(check.pk)
if check.check_type == "memory":
elif (
check.check_type == CheckType.MEMORY and agent.plat == AgentPlat.WINDOWS
):
# Check if memory check list is empty
if not added_memory_checks:
added_memory_checks.append(check)
added_memory_checks.append(check.pk)
# Dont create the check if it is an agent check
if not check.agent:
memory_checks.append(check)
elif check.agent:
check.overriden_by_policy = True
check.save()
overridden_checks.append(check.pk)
if check.check_type == "winsvc":
elif (
check.check_type == CheckType.WINSVC and agent.plat == AgentPlat.WINDOWS
):
# Check if service name was already added
if check.svc_name not in added_winsvc_checks:
added_winsvc_checks.append(check.svc_name)
@@ -394,10 +342,11 @@ class Policy(BaseAuditModel):
if not check.agent:
winsvc_checks.append(check)
elif check.agent:
check.overriden_by_policy = True
check.save()
overridden_checks.append(check.pk)
if check.check_type == "script":
elif check.check_type == CheckType.SCRIPT and agent.is_supported_script(
check.script.supported_platforms
):
# Check if script id was already added
if check.script.id not in added_script_checks:
added_script_checks.append(check.script.id)
@@ -405,20 +354,28 @@ class Policy(BaseAuditModel):
if not check.agent:
script_checks.append(check)
elif check.agent:
check.overriden_by_policy = True
check.save()
overridden_checks.append(check.pk)
if check.check_type == "eventlog":
elif (
check.check_type == CheckType.EVENT_LOG
and agent.plat == AgentPlat.WINDOWS
):
# Check if events were already added
if [check.log_name, check.event_id] not in added_eventlog_checks:
added_eventlog_checks.append([check.log_name, check.event_id])
if not check.agent:
eventlog_checks.append(check)
elif check.agent:
check.overriden_by_policy = True
check.save()
overridden_checks.append(check.pk)
final_list = (
if overridden_checks:
from checks.models import Check
Check.objects.filter(pk__in=overridden_checks).update(
overridden_by_policy=True
)
return (
diskspace_checks
+ ping_checks
+ cpuload_checks
@@ -427,33 +384,3 @@ class Policy(BaseAuditModel):
+ script_checks
+ eventlog_checks
)
# remove policy checks from agent that fell out of policy scope
agent.agentchecks.filter(
managed_by_policy=True,
parent_check__in=[
checkpk
for checkpk in agent_checks_parent_pks
if checkpk not in [check.pk for check in final_list]
],
).delete()
return [
check for check in final_list if check.pk not in agent_checks_parent_pks
]
@staticmethod
def generate_policy_checks(agent):
checks = Policy.cascade_policy_checks(agent)
if checks:
for check in checks:
check.create_policy_check(agent)
@staticmethod
def generate_policy_tasks(agent):
tasks = Policy.cascade_policy_tasks(agent)
if tasks:
for task in tasks:
task.create_policy_task(agent)

Some files were not shown because too many files have changed in this diff Show More