Compare commits

...

356 Commits

Author SHA1 Message Date
wh1te909
4942f262f1 Release 0.8.2 2021-09-01 07:18:21 +00:00
wh1te909
a20b1a973e bump version 2021-09-01 07:18:09 +00:00
wh1te909
eae5e00706 allow filtering by overdue #674 2021-09-01 06:26:55 +00:00
sadnub
5c92d4b454 fix bug were script args weren't being substituted when testing scripts 2021-08-31 20:33:36 -04:00
wh1te909
38179b9d38 Release 0.8.1 2021-08-31 06:51:20 +00:00
wh1te909
8f510dde5a bump versions 2021-08-31 06:35:29 +00:00
wh1te909
be42d56e37 fix 500 error when trying to test newly added script 2021-08-31 06:16:40 +00:00
sadnub
c5c8f5fab1 formatting 2021-08-30 22:32:16 -04:00
sadnub
3d41d79078 change directory for nats configuration file for DOCKER. Fix nats-api commands in dev containers 2021-08-30 22:17:21 -04:00
sadnub
3005061a11 formatting 2021-08-30 08:06:15 -04:00
sadnub
65ea46f457 strip whitespace before processing collector output 2021-08-30 07:42:54 -04:00
wh1te909
eca8f32570 Release 0.8.0 2021-08-30 06:32:39 +00:00
wh1te909
8d1ef19c61 bump version 2021-08-30 06:28:40 +00:00
wh1te909
71d87d866b change schedule 2021-08-30 05:49:09 +00:00
wh1te909
c4f88bdce7 update for new debug log 2021-08-30 03:45:35 +00:00
sadnub
f722a115b1 update alerting docs and add database maintenance page 2021-08-29 16:54:05 -04:00
sadnub
1583beea7b update script docs 2021-08-29 16:25:33 -04:00
wh1te909
5b388c587b update python 2021-08-29 08:19:35 +00:00
wh1te909
e254923167 update mesh/nats 2021-08-29 08:13:04 +00:00
wh1te909
b0dbdd7803 fix field 2021-08-29 07:16:09 +00:00
wh1te909
aa6ebe0122 fix pagination 2021-08-29 03:40:14 +00:00
wh1te909
c5f179bab8 update nats-api 2021-08-29 03:39:58 +00:00
sadnub
e65cb86638 rework script testing a bit. Fix mismatch object properties and props 2021-08-28 10:33:18 -04:00
wh1te909
a349998640 add watcher 2021-08-28 06:48:00 +00:00
wh1te909
43f60610b8 fix props 2021-08-28 06:36:03 +00:00
wh1te909
46d042087a fix row name 2021-08-28 06:32:50 +00:00
sadnub
ee214727f6 format agent history table 2021-08-28 01:00:17 -04:00
sadnub
b4c1ec55ec fix env.example 2021-08-27 22:12:25 -04:00
Dan
0fdd54f710 Merge pull request #664 from bc24fl/change-community-script-add-domain-rename-capability-and-refactor
Change Win_Rename_Computer.ps1 community script to add domain joined …
2021-08-25 15:21:55 -07:00
wh1te909
4f0cdeaec0 reduce nats max payload as it will be enforced in future nats update 2021-08-25 21:14:35 +00:00
wh1te909
e5cc38857c update quasar conf to support quasar app 3.1.0 (webpack-dev-server 4.0.0) 2021-08-25 21:13:05 +00:00
wh1te909
fe4b9d71c0 update reqs 2021-08-25 21:11:39 +00:00
wh1te909
5c1181e40e Merge branch 'develop' of https://github.com/wh1te909/tacticalrmm into develop 2021-08-23 04:25:14 +00:00
wh1te909
8b71832bc2 update reqs 2021-08-23 04:19:21 +00:00
Irving
8412ed6065 Change Win_Rename_Computer.ps1 community script to add domain joined computer rename functionality and refactor per standards. 2021-08-22 16:36:19 -04:00
Dan
207f6cdc7c Merge pull request #661 from bc24fl/fix-doc-typo-in-alert-page
Fixed typo in documentation alert page
2021-08-21 23:44:09 -07:00
Dan
b0b51f5730 Merge pull request #660 from silversword411/develop
Script library - uninstall software
2021-08-21 23:38:46 -07:00
wh1te909
def6833ef0 new pipeline agent 2021-08-21 15:26:25 +00:00
wh1te909
c528dd3de1 attempt to fix pipelines 2021-08-20 08:23:16 +00:00
wh1te909
544270e35d new pipeline agent 2021-08-20 07:35:02 +00:00
bc24fl
657e029fee Fixed typo in documentation alert page 2021-08-19 15:48:46 -04:00
silversword411
49469d7689 docs update - adding to docker instructions 2021-08-18 22:59:28 -04:00
silversword411
4f0dd452c8 docs troubleshooting tweaks 2021-08-18 22:39:19 -04:00
silversword411
3f741eab11 Script library - uninstall software 2021-08-18 12:01:41 -04:00
Dan
190368788f Merge pull request #654 from NiceGuyIT/develop
Bitdefender install script:  Improve error detection and logging
2021-08-11 23:54:14 -07:00
Dan
8306a3f566 Merge pull request #649 from silversword411/develop
Docs and scripts updates
2021-08-11 23:53:24 -07:00
silversword411
988c134c09 choco typo fixes 2021-08-03 00:24:14 -04:00
silversword411
af0a4d578b Community Script Replacing Choco upgrade script 2021-08-03 00:06:38 -04:00
sadnub
9bc0abc831 fix favorited community scripts showing up if community scripts are hidden. Fix delete script in Script Manager 2021-08-02 17:48:13 -04:00
David Randall
41410e99e7 Improve error detection and logging 2021-08-02 12:43:39 -04:00
David Randall
deae04d5ff Merge branch 'wh1te909:develop' into develop 2021-08-02 12:37:49 -04:00
David Randall
7d6eeffd66 Improve error detection and logging 2021-08-02 12:33:32 -04:00
sadnub
629858e095 log django 500 errors (for easier debugging) to new log file 2021-08-02 09:35:41 -04:00
sadnub
dfdb628347 change favorited script run on agent to opent he Run Script modal with the script and defaults populated 2021-08-02 09:34:17 -04:00
sadnub
6e48b28fc9 fix filterable dropdown and prepopulating select value 2021-08-02 09:33:24 -04:00
sadnub
3ba450e837 fix replace values function 2021-08-02 09:21:07 -04:00
sadnub
688ed93500 allow url actions to be run against clients and sites 2021-08-01 00:17:48 -04:00
sadnub
7268ba20a2 Finished script snippet feature 2021-07-31 15:22:31 -04:00
sadnub
63d9e73098 fix tests 2021-07-31 13:29:51 -04:00
sadnub
564c048f90 add missing migration 2021-07-31 13:07:48 -04:00
sadnub
5f801c74d5 allowed dismissing persistent modals on Esc press. allow filtering on certain scripts and agent dropdowns. moved other dropdowns to tactical dropdown. Fixes with bulk actions 2021-07-31 11:56:47 -04:00
sadnub
b405fbc09a handle a few more errors when auth token is expired 2021-07-31 11:54:28 -04:00
sadnub
7a64c2eb49 update quasar 2021-07-31 11:54:00 -04:00
sadnub
c93cbac3b1 rework bulk action modal. start running bulk actions on next agent checkin 2021-07-30 12:48:47 -04:00
sadnub
8b0f67b8a6 actually stop the unauthorized console errors wwith websocket connection 2021-07-30 12:46:15 -04:00
sadnub
0d96129f2d get dropdown filtering working on custom tactical dropdown component 2021-07-30 12:45:26 -04:00
sadnub
54ee12d2b3 rework script manager and modals to composition api. Start on script snippets 2021-07-29 19:41:32 -04:00
silversword411
92fc042103 Win 10 upgrade script removing license check 2021-07-29 00:50:17 -04:00
silversword411
9bb7016fa7 Win 10 upgrade script commenting 2021-07-28 16:45:12 -04:00
silversword411
3ad56feafb Merge branch 'develop' of https://github.com/silversword411/tacticalrmm into develop 2021-07-27 19:04:58 -04:00
silversword411
14d59c3dec Sorting alphabetical and fixing pic 2021-07-27 19:04:40 -04:00
silversword411
443f419770 wip script add 2021-07-27 19:04:40 -04:00
silversword411
ddbb58755e Docs updates 2021-07-27 19:04:39 -04:00
silversword411
524283b9ff adding db maintenance to docs 2021-07-27 19:04:39 -04:00
silversword411
fb178d2944 add wip script 2021-07-27 19:04:39 -04:00
silversword411
52f4ad9403 add library wifi password retrieval script 2021-07-27 19:04:38 -04:00
silversword411
ba0c08ef1f Merge branch 'develop' of https://github.com/silversword411/tacticalrmm into develop 2021-07-27 19:03:36 -04:00
silversword411
9e19b1e04c wip script add 2021-07-27 19:02:22 -04:00
silversword411
b2118201b1 Sorting alphabetical and fixing pic 2021-07-25 15:27:05 -04:00
sadnub
b4346aa056 formatting 2021-07-21 20:41:11 -04:00
sadnub
b599f05aab fix version 2021-07-21 20:35:57 -04:00
sadnub
93d78a0200 add ipware req 2021-07-21 20:33:42 -04:00
silversword411
449957b2eb Docs updates 2021-07-21 15:02:56 -04:00
sadnub
0a6d44bad3 Fixes #561 2021-07-21 14:48:59 -04:00
sadnub
17ceaaa503 allow skipping alert resolved/failure actions on types of alerts 2021-07-21 14:30:25 -04:00
sadnub
d70803b416 add audit log retention 2021-07-21 13:49:34 -04:00
sadnub
aa414d4702 fix auditing on models that override the save method. Added Alert Template anmd Role to auditable models 2021-07-21 13:33:15 -04:00
sadnub
f24e1b91ea stop ws from reconnecting on unauthorized error 2021-07-21 10:53:55 -04:00
sadnub
1df8163090 add role and alert template to audit logging 2021-07-21 00:28:51 -04:00
sadnub
659ddf6a45 fix docker build script 2021-07-20 23:11:15 -04:00
sadnub
e110068da4 add public IP logging to audit log and agent login tables 2021-07-20 23:10:51 -04:00
sadnub
c943f6f936 stop the ws connection from retrying when logging out or session is expired 2021-07-20 16:46:16 -04:00
silversword411
cb1fe7fe54 adding db maintenance to docs 2021-07-19 10:44:38 -04:00
silversword411
593f1f63cc add wip script 2021-07-19 10:35:54 -04:00
silversword411
66aa70cf75 Merge branch 'develop' of https://github.com/silversword411/tacticalrmm into develop 2021-07-18 21:08:18 -04:00
silversword411
304be99067 add library wifi password retrieval script 2021-07-18 21:08:04 -04:00
silversword411
9a01ec35f4 add library wifi password retrieval script 2021-07-18 21:04:16 -04:00
sadnub
bfa5b4fba5 allow persistent mesh config and fix mongodb password uri issue 2021-07-17 15:57:35 -04:00
Dan
d2f63ef353 Merge pull request #641 from silversword411/develop
Docs and scripts additions
2021-07-17 10:57:07 -07:00
Dan
50f334425e Merge pull request #640 from bern-spl/patch-1
Update README.md
2021-07-17 10:56:33 -07:00
silversword411
f78212073c fix json 2021-07-17 11:21:39 -04:00
silversword411
5c655f5a82 Adding grafana to docs 2021-07-17 11:06:02 -04:00
silversword411
6a6446bfcb Adding configuring email to docs 2021-07-17 10:48:22 -04:00
silversword411
b60a3a5e50 Adding scripts 2021-07-17 10:33:31 -04:00
Bernard Blundell
02ccbab8e5 Update README.md 2021-07-17 14:51:09 +01:00
wh1te909
023ff3f964 update bin [skip ci] 2021-07-17 07:16:38 +00:00
wh1te909
7c5e8df3b8 fix tests 2021-07-17 07:11:29 +00:00
wh1te909
56fdab260b add/refactor task 2021-07-17 06:59:21 +00:00
wh1te909
7cce49dc1a deprecate an endpoint 2021-07-17 06:40:45 +00:00
wh1te909
2dfaafb20b fix bug where sms attempting to be sent when not configured 2021-07-17 06:35:31 +00:00
wh1te909
6138a5bf54 move some funcs to go 2021-07-17 05:13:40 +00:00
wh1te909
828c67cc00 fix tests 2021-07-17 00:33:21 +00:00
wh1te909
e70cd44e18 add history to send command 2021-07-16 21:45:16 +00:00
wh1te909
efa5ac5edd more run script rework 2021-07-16 06:11:40 +00:00
wh1te909
788b11e759 add fields to agent history 2021-07-14 07:38:31 +00:00
wh1te909
d049d7a61f update reqs 2021-07-14 07:36:55 +00:00
Dan
075c833b58 Merge pull request #626 from sadnub/runscript-rework
Agent Tabs/Run Script WIP
2021-07-13 11:43:38 -07:00
Dan
e9309c2a96 Merge pull request #638 from silversword411/develop
Docs and scripts update
2021-07-12 22:24:12 -07:00
silversword411
a592d2b397 Adding scripts to library and WIP 2021-07-13 00:21:43 -04:00
silversword411
3ad1805ac0 tweak faq 2021-07-12 23:51:16 -04:00
Dan
dbc2bab698 Merge pull request #632 from silversword411/develop
script library and docs updates
2021-07-12 08:51:13 -07:00
silversword411
79eec5c299 Bitdefender GravityZone Docs 2021-07-11 14:10:10 -04:00
silversword411
7754b0c575 howitallworks tweaks 2021-07-11 13:55:37 -04:00
silversword411
be4289ce76 Docs update 2021-07-11 13:26:15 -04:00
silversword411
67f5226270 add BitDefender Gravity Zone Install script 2021-07-10 12:42:27 -04:00
sadnub
b6d77c581b fix styling 2021-07-09 21:13:35 -04:00
sadnub
d84bf47d04 added script cloning functionality 2021-07-09 18:47:28 -04:00
sadnub
aba3a7bb9e fix and add tests 2021-07-09 18:00:28 -04:00
sadnub
6281736d89 implement test script in script edit 2021-07-09 08:03:53 -04:00
sadnub
94d96f89d3 implement run script save to custom field and agent notes 2021-07-09 00:16:15 -04:00
sadnub
4b55f9dead add tests and minor fixes 2021-07-08 22:02:02 -04:00
sadnub
5c6dce94df fix broken tests 2021-07-08 13:02:50 -04:00
wh1te909
f7d8f9c7f5 fix mkdocs warning 2021-07-08 06:39:32 +00:00
Dan
053df24f9c Merge pull request #627 from silversword411/develop
docs update and script tweak
2021-07-07 23:33:00 -07:00
silversword411
1dc470e434 powershell upgrade 2021-07-07 22:17:11 -04:00
silversword411
cfd8773267 wip script add 2021-07-07 22:13:17 -04:00
silversword411
67045cf6c1 docs tweaks 2021-07-07 22:00:52 -04:00
sadnub
ddfb9e7239 run script rework start 2021-07-07 19:28:52 -04:00
sadnub
9f6eed5472 setup pruning tasks 2021-07-07 19:28:52 -04:00
sadnub
15a1e2ebcb add agent history 2021-07-07 19:28:52 -04:00
sadnub
fcfe450b07 finish debug and audit rework 2021-07-07 19:28:52 -04:00
sadnub
a69bbb3bc9 audit manager rework wip 2021-07-07 19:28:52 -04:00
sadnub
6d2559cfc1 debug log rework 2021-07-07 19:28:52 -04:00
sadnub
b3a62615f3 moved debug log to database. modified frontend to composition api. moved a few mixins. 2021-07-07 19:28:52 -04:00
sadnub
57f5cca1cb debug modal rework into comp api 2021-07-07 19:28:52 -04:00
sadnub
6b9851f540 new agent tabs wip 2021-07-07 19:28:52 -04:00
silversword411
36fd203a88 Updating which registry tree to query 2021-07-07 16:00:48 -04:00
Dan
3f5cb5d61c Merge pull request #623 from meuchels/develop
Fix SC collector script to work with windows 7
2021-07-07 00:43:56 -07:00
Samuel Meuchel
862fc6a946 add newline to end 2021-07-06 19:45:03 -05:00
Samuel Meuchel
92c386ac0e Fixed ScreenConnect Collector script for ps 2.0 2021-07-06 19:41:16 -05:00
Samuel Meuchel
98a11a3645 add this exclusion for your ScreenConnect Deployment script to work. 2021-07-06 11:25:17 -05:00
Dan
62be0ed936 Merge pull request #610 from meuchels/develop
Add TeamViewer Script and Integration Docs
2021-07-01 14:45:45 -07:00
Samuel Meuchel
b7de73fd8a removed args from script json. 2021-07-01 08:19:12 -05:00
Samuel Meuchel
e2413f1af2 Add AnyDesk script collector and Integration Docs. 2021-06-30 17:33:48 -05:00
Samuel Meuchel
0e77d575c4 Add TeamViewer Script and Integration Docs 2021-06-30 15:09:04 -05:00
wh1te909
ba42c5e367 Release 0.7.2 2021-06-30 06:53:33 +00:00
wh1te909
6a06734192 bump version 2021-06-30 06:53:16 +00:00
Dan
5e26a406b7 Merge pull request #606 from silversword411/develop
docs update
2021-06-29 23:49:14 -07:00
wh1te909
b6dd03138d rework agent installation auth token to have minimal perms 2021-06-30 06:46:07 +00:00
wh1te909
cf03ee03ee update quasar 2021-06-30 06:46:07 +00:00
silversword411
0e665b6bf0 doc remove incomplete 2021-06-29 23:42:02 -04:00
silversword411
e3d0de7313 consolidated into 3rdparty_screenconnect.md 2021-06-29 23:40:26 -04:00
silversword411
bcf3a543a1 merged into 3rdparty_screenconnect.md 2021-06-29 23:33:00 -04:00
silversword411
b27f17c74a fix case 2021-06-29 11:28:00 -04:00
silversword411
75d864771e Merge branch 'develop' of https://github.com/silversword411/tacticalrmm into develop 2021-06-29 11:24:34 -04:00
silversword411
6420060f2a docs index update 2021-06-29 11:22:52 -04:00
Dan
c149ae71b9 Merge pull request #602 from meuchels/develop
added a Connectwise Control Integration document.
2021-06-28 23:54:37 -07:00
Dan
3a49dd034c Merge pull request #600 from sdm216/develop
Update Win_Firewall_Check_Status.ps1
2021-06-28 23:53:47 -07:00
Dan
b26d7e82e3 Merge pull request #599 from silversword411/develop
docs tweaks
2021-06-28 23:53:02 -07:00
silversword411
415abdf0ce adding windows update info 2021-06-29 01:19:05 -04:00
silversword411
f7f6f6ecb2 Separating out screenconnect docs 2021-06-29 00:39:11 -04:00
meuchels
43d54f134a added a Connectwise Control Integration document. 2021-06-28 17:50:24 -05:00
silversword411
0d2606a13b Revert "code commenting"
This reverts commit ecccf39455.
2021-06-28 15:59:50 -04:00
silversword411
1deb10dc88 community scrip typo 2021-06-28 15:49:25 -04:00
sdm216
1236d55544 Update Win_Firewall_Check_Status.ps1 2021-06-28 14:48:22 -04:00
silversword411
ecccf39455 code commenting 2021-06-28 10:17:14 -04:00
silversword411
8e0316825a Merge branch 'develop' of https://github.com/silversword411/tacticalrmm into develop 2021-06-28 10:05:22 -04:00
silversword411
aa45fa87af Noting case sensitive for all {{}} references 2021-06-28 10:03:32 -04:00
wh1te909
71e78bd0c5 Release 0.7.1 2021-06-28 07:13:33 +00:00
wh1te909
4766477c58 bump version 2021-06-28 07:13:19 +00:00
wh1te909
d97e49ff2b add button to test SMS closes #590 2021-06-28 07:05:57 +00:00
wh1te909
6b9d775cb9 add hostname to email subject/body fixes #589 2021-06-28 06:07:17 +00:00
wh1te909
e521f580d7 make clearing search field when switching client/site optional closes #597 2021-06-28 05:21:07 +00:00
silversword411
25e7cf7db0 Merge branch 'develop' of https://github.com/silversword411/tacticalrmm into develop 2021-06-27 17:16:21 -04:00
silversword411
0cab33787d Noting case sensitive for all {{}} references 2021-06-27 17:15:25 -04:00
wh1te909
bc6faf817f Release 0.7.0 2021-06-27 06:58:48 +00:00
wh1te909
d46ae55863 bump versions 2021-06-27 06:58:06 +00:00
wh1te909
bbd900ab25 move checkin to go 2021-06-27 06:23:37 +00:00
Dan
129ae93e2b Merge pull request #596 from rfost52/develop
Submitting System Report Generator to Community Scripts
2021-06-26 21:58:23 -07:00
rfost52
44dd59fa3f Merge branch 'develop' of https://github.com/rfost52/tacticalrmm into develop 2021-06-26 22:31:00 -04:00
rfost52
ec4e7559b0 updated script header 2021-06-26 22:30:52 -04:00
rfost52
dce40611cf Merge branch 'wh1te909:develop' into develop 2021-06-26 22:17:31 -04:00
rfost52
e71b8546f9 Submitting System Report Generator to Community Scripts 2021-06-26 22:09:56 -04:00
wh1te909
f827348467 style changes 2021-06-27 01:15:47 +00:00
wh1te909
f3978343db cache some values to speed up agent table loading 2021-06-27 00:51:34 +00:00
wh1te909
2654a7ea70 remove extra param 2021-06-27 00:05:00 +00:00
wh1te909
1068bf4ef7 fix row highlight 2021-06-26 17:53:06 +00:00
Dan
e7fccc97cc Merge pull request #595 from rfost52/develop
Initial Parameterization of System Report WIP Script
2021-06-25 23:57:11 -07:00
Dan
733e289852 Merge pull request #592 from silversword411/develop
Docs tweaks
2021-06-25 23:56:44 -07:00
rfost52
29d71a104c include check for C:\Temp folder 2021-06-25 00:36:16 -04:00
rfost52
05200420ad Merge branch 'develop' of https://github.com/rfost52/tacticalrmm into develop 2021-06-24 23:53:26 -04:00
rfost52
eb762d4bfd Initial Parameterization of variables 2021-06-24 23:53:06 -04:00
silversword411
58ace9eda1 Adding wip scripts 2021-06-24 17:20:49 -04:00
sadnub
eeb2623be0 Merge pull request #516 from sadnub/quasar-update
Quasar update to v2
2021-06-24 13:48:47 -04:00
sadnub
cfa242c2fe update loading bar delay 2021-06-24 13:41:34 -04:00
sadnub
ec0441ccc2 fix collector dropdown in policy task edit 2021-06-24 13:41:34 -04:00
sadnub
ae2782a8fe update quasar to v2 release 2021-06-24 13:41:34 -04:00
sadnub
58ff570251 fix assets tab 2021-06-24 13:41:34 -04:00
sadnub
7b554b12c7 update packages 2021-06-24 13:41:34 -04:00
sadnub
58f7603d4f fix agent drowndown in audit manager 2021-06-24 13:41:34 -04:00
sadnub
8895994c54 update packages 2021-06-24 13:41:34 -04:00
sadnub
de8f7e36d5 fix q-checkboxes that need to trigger actions and replace @input with @update:model-value 2021-06-24 13:41:34 -04:00
sadnub
88d7a50265 refactor user administration without vuex 2021-06-24 13:41:34 -04:00
sadnub
21e19fc7e5 add keys back to v-fors 2021-06-24 13:41:34 -04:00
sadnub
faf4935a69 fix saving custom field values and change sites dropdown in edit agent modal 2021-06-24 13:41:34 -04:00
sadnub
71a1f9d74a update reqs and fix custom field values 2021-06-24 13:41:34 -04:00
sadnub
bd8d523e10 stop blinking when loading 2021-06-24 13:41:34 -04:00
sadnub
60cae0e3ac remove 'created' hooks from components and fix agent and script optino dropdowns 2021-06-24 13:41:34 -04:00
sadnub
5a342ac012 removed key from v-for. Fixed custom dropdowns. other fixes 2021-06-24 13:41:34 -04:00
sadnub
bb8767dfc3 fix darkmode and policy check and task tables 2021-06-24 13:41:34 -04:00
sadnub
fcb2779c15 update quasar 2021-06-24 13:41:34 -04:00
sadnub
77dd6c1f61 more fixes 2021-06-24 13:41:34 -04:00
sadnub
8118eef300 upgrade to quasar v2 and vue3 initial 2021-06-24 13:41:34 -04:00
silversword411
802d1489fe adding to howitallworks 2021-06-24 02:42:41 -04:00
silversword411
443a029185 Merge branch 'develop' of https://github.com/silversword411/tacticalrmm into develop 2021-06-24 02:00:51 -04:00
silversword411
4ee508fdd0 Docs tweaks 2021-06-24 01:55:50 -04:00
wh1te909
aa5608f7e8 fix custom field args in bulk script fixes #591 2021-06-24 01:34:14 +00:00
wh1te909
cc472b4613 update celery 2021-06-24 01:32:07 +00:00
wh1te909
764b945ddc fix pipelines 2 2021-06-22 06:51:44 +00:00
wh1te909
fd2206ce4c fix pipelines 2021-06-22 06:47:17 +00:00
Dan
48c0ac9f00 Merge pull request #588 from rfost52/develop
Moving Win_AD_Join_Computer.ps1 from WIP scripts to Community Scripts
2021-06-21 23:38:18 -07:00
silversword411
84eb4fe9ed Merge branch 'develop' of https://github.com/silversword411/tacticalrmm into develop 2021-06-21 11:35:04 -04:00
silversword411
4a5428812c Docs tweaks 2021-06-21 11:34:10 -04:00
silversword411
023f98a89d Docs tweaks 2021-06-21 11:32:56 -04:00
rfost52
66893dd0c1 Update Win_AD_Join_Computer.ps1 2021-06-19 20:50:56 -04:00
rfost52
25a6666e35 Adding AD PC Join to Listings 2021-06-19 20:47:11 -04:00
rfost52
19d75309b5 Merge branch 'develop' of https://github.com/rfost52/tacticalrmm into develop 2021-06-19 20:21:21 -04:00
rfost52
11110d65c1 Adding to Community Scripts
Moving from WIP Scripts to Community Scripts after successful testing.
2021-06-19 20:21:11 -04:00
Dan
a348f58fe2 Merge pull request #585 from rfost52/develop
First rework of Join to AD PowerShell WIP Script
2021-06-19 11:41:52 -07:00
rfost52
13851dd976 Added new line at end of code 2021-06-18 23:25:15 -04:00
rfost52
2ec37c5da9 1st Code rework with parameterization 2021-06-18 22:57:23 -04:00
rfost52
8c127160de Updated synopsis and description 2021-06-18 22:51:21 -04:00
rfost52
2af820de9a Update Win_AD_Join_Computer.ps1
Parameters, error checking with exit codes
2021-06-18 22:43:26 -04:00
Dan
55fb0bb3a0 Merge pull request #584 from silversword411/develop
community script updates
2021-06-18 10:58:00 -07:00
silversword411
9f9ecc521f community script updates 2021-06-17 15:27:40 -04:00
Dan
dfd01df5ba Merge pull request #581 from silversword411/develop
Adding docs
2021-06-16 22:55:18 -07:00
silversword411
474090698c Merge branch 'wh1te909:develop' into develop 2021-06-17 01:00:40 -04:00
silversword411
6b71cdeea4 Merge branch 'develop' of https://github.com/silversword411/tacticalrmm into develop 2021-06-17 00:53:58 -04:00
wh1te909
581e974236 add view setting perms closes #569 2021-06-17 04:36:34 +00:00
wh1te909
ba3c3a42ce add missing mypy types 2021-06-17 04:35:51 +00:00
silversword411
c8bc5671c5 adding all possible script variables to docs 2021-06-17 00:34:11 -04:00
wh1te909
ff9401a040 make failing tasks fail client tree closes #571 2021-06-17 03:51:20 +00:00
wh1te909
5e1bc1989f update reqs 2021-06-17 03:50:00 +00:00
wh1te909
a1dc91cd7d fix typo in docs #580 2021-06-16 16:46:24 +00:00
sadnub
99f2772bb3 Fixes #577 2021-06-14 20:27:41 -04:00
sadnub
e5d0e42655 fix agent policies not updating when monitoring mode is changed 2021-06-14 20:18:56 -04:00
Dan
2c914cc374 Merge pull request #576 from bradhawkins85/patch-19
Update installer.ps1
2021-06-14 09:45:13 -07:00
Dan
9bceb62381 Merge pull request #575 from nextgi/zak-develop
Updates to Devcontainer and Added #467
2021-06-14 09:44:58 -07:00
Zak
de7518a800 Added new community script
New script for auto documenting ADDS.
2021-06-13 17:56:44 -07:00
bradhawkins85
304fb63453 Update installer.ps1
Fix spelling errors
2021-06-13 17:22:13 +10:00
Zak
0f7ef60ca0 Added #467
Added QTooltip to the label of the QItem in the QTree.
2021-06-12 20:50:59 -07:00
Zak
07c74e4641 Updated devcontainer
Prior it was statically set to use a specific range of IPs. I changed this so it could be set via environment variables. Also, NATS port 4222 is a reserved port for Hyper-V. I updated this so it could be set in env variables as well.
2021-06-12 20:49:10 -07:00
wh1te909
de7f325cfb fix redis appendonly backup/restore 2021-06-13 00:10:58 +00:00
wh1te909
42cdf70cb4 Release 0.6.15 2021-06-12 20:41:19 +00:00
wh1te909
6beb6be131 bump version 2021-06-12 20:40:54 +00:00
wh1te909
fa4fc2a708 only parse script args for script checks 2021-06-12 20:24:51 +00:00
wh1te909
2db9758260 fix custom fields in script checks #568 2021-06-12 19:41:49 +00:00
wh1te909
715982e40a Release 0.6.14 2021-06-11 04:41:48 +00:00
wh1te909
d00cd4453a bump versions 2021-06-11 04:40:57 +00:00
wh1te909
429c08c24a fix width on q-file caused by recent quasar update 2021-06-11 03:58:57 +00:00
wh1te909
6a71490e20 update reqs 2021-06-11 02:40:22 +00:00
Dan
9bceda0646 Merge pull request #562 from diekinderwelt/nginx_enable_ipv6
enable ipv6 in nginx config
2021-06-10 18:59:34 -07:00
Dan
a1027a6773 Merge pull request #565 from silversword411/develop
Docs Update - adding design and tipsntricks
2021-06-10 18:59:12 -07:00
silversword411
302d4b75f9 formatting fix 2021-06-08 15:39:43 -04:00
silversword411
5f6ee0e883 Docs Update - adding design and tipsntricks 2021-06-08 14:45:02 -04:00
Silvio
27f9720de1 enable ipv6 in nginx config
Signed-off-by: Silvio <silvio.zimmer@die-kinderwelt.com>
2021-06-08 11:43:55 +02:00
sadnub
22aa3fdbbc fix bug with policy copy and task that triggers on check failure. Fix check history tests 2021-06-06 23:19:07 -04:00
sadnub
069ecdd33f apply redis configuration after restore 2021-06-06 22:58:32 -04:00
sadnub
dd545ae933 catch an exception that a celery task could potentially throw and configure automation task retries 2021-06-06 22:55:47 -04:00
sadnub
6650b705c4 configure redis to use an appendonly file for celery task reliability 2021-06-06 22:54:52 -04:00
sadnub
59b0350289 fix duplicate tasks when there is an assigned check 2021-06-06 22:54:06 -04:00
sadnub
1ad159f820 remove foreign key from checkhistory to make mass check deletes reliable. (This will not migrate check history data) 2021-06-06 22:53:11 -04:00
Dan
0bf42190e9 Merge pull request #544 from bbrendon/patch-1
check for proper OS support
2021-05-30 23:10:21 -07:00
bbrendon
d2fa836232 check for proper OS support 2021-05-30 10:39:08 -07:00
Dan
c387774093 Merge pull request #543 from bbrendon/develop
fixed an edge case and warning notes
2021-05-29 22:39:52 -07:00
bbrendon
e99736ba3c fixed an edge case and warning notes 2021-05-29 19:25:53 -07:00
wh1te909
16cb54fcc9 fix multiline output not working for automation task 2021-05-29 18:47:09 +00:00
wh1te909
5aa15c51ec Release 0.6.13 2021-05-29 07:35:29 +00:00
wh1te909
a8aedd9cf3 bump version 2021-05-29 07:35:10 +00:00
wh1te909
b851b632bc fix agent_outages_task async error 2021-05-29 07:26:10 +00:00
wh1te909
541e07fb65 Release 0.6.12 2021-05-29 05:16:37 +00:00
wh1te909
6ad16a897d bump versions 2021-05-29 05:15:26 +00:00
wh1te909
72f1053a93 change interval 2021-05-29 04:49:17 +00:00
sadnub
fb15a2762c allow saving multiple script output in custom fields #533 2021-05-28 23:52:23 -04:00
wh1te909
9165248b91 update go/codec 2021-05-29 03:20:12 +00:00
sadnub
add18b29db fix agent dropdown 2021-05-28 22:59:44 -04:00
wh1te909
1971653548 bump nats/mesh 2021-05-29 02:53:16 +00:00
wh1te909
392cd64d7b hide settings in hosted 2021-05-29 02:20:07 +00:00
wh1te909
b5affbb7c8 change function name 2021-05-29 02:18:57 +00:00
wh1te909
71d1206277 more checks rework 2021-05-29 01:37:20 +00:00
wh1te909
26e6a8c409 update reqs 2021-05-28 18:12:32 +00:00
wh1te909
eb54fae11a more checks rework 2021-05-28 17:54:57 +00:00
wh1te909
ee773e5966 remove deprecated func 2021-05-28 17:54:14 +00:00
wh1te909
7218ccdba8 start checks rework 2021-05-27 07:16:06 +00:00
wh1te909
332400e48a autogrow text field fixes #533 2021-05-27 07:09:40 +00:00
Dan
ad1a5d3702 Merge pull request #534 from silversword411/develop
Script library and docs tweaks
2021-05-26 23:59:08 -07:00
silversword411
3006b4184d Docs update on regular patching 2021-05-26 21:36:28 -04:00
silversword411
84eb84a080 Script library adding comments 2021-05-26 10:19:30 -04:00
sadnub
60beea548b Allow clearing resolved/failure actions in alert template 2021-05-24 22:18:12 -04:00
Dan
5f9c149e59 Merge pull request #528 from bbrendon/develop
updated timeouts and fixed one script
2021-05-21 18:36:07 -07:00
bbrendon
53367c6f04 update timeouts on some scripts 2021-05-21 18:01:16 -07:00
bbrendon
d7f817ee44 syntax error fix. 2021-05-21 17:56:53 -07:00
Dan
d33a87da54 Merge pull request #526 from silversword411/develop
script library - Screenconnect collector
2021-05-20 20:13:51 -07:00
silversword411
3aebfb12b7 Merge branch 'develop' of https://github.com/silversword411/tacticalrmm into develop 2021-05-20 21:50:10 -04:00
silversword411
1d6c55ffa6 Script library - screenconnect collector 2021-05-20 21:49:01 -04:00
Dan
5e7080aac3 Merge pull request #522 from silversword411/develop
Docs Example and wip tweaks
2021-05-20 18:37:33 -07:00
silversword411
fad739bc01 Updating script delegated folders 2021-05-20 10:10:59 -04:00
silversword411
c6b7f23884 Adding URL Action Example to docs 2021-05-19 02:46:51 -04:00
silversword411
a6f7e446de tweaking wip scripts 2021-05-18 23:22:45 -04:00
wh1te909
89d95d3ae1 Release 0.6.11 2021-05-19 03:08:29 +00:00
wh1te909
764208698f bump version 2021-05-19 03:04:06 +00:00
Dan
57129cf934 Merge pull request #521 from agit8or/develop
Create Win_Shortcut_Creator.ps1
2021-05-18 18:10:33 -07:00
Dan
aae1a842d5 Merge pull request #519 from silversword411/develop
add script to wip
2021-05-18 18:10:03 -07:00
agit8or
623f35aec7 Create Win_Shortcut_Creator2.ps1 2021-05-18 13:05:46 -04:00
agit8or
870bf842cf Create Win_Shortcut_Creator.ps1 2021-05-18 13:00:26 -04:00
silversword411
07f2d7dd5c wip additions for printers 2021-05-18 02:00:55 -04:00
silversword411
f223f2edc5 Merge branch 'wh1te909:develop' into develop 2021-05-17 22:47:22 -04:00
wh1te909
e848a9a577 fix tests 2021-05-17 06:45:43 +00:00
wh1te909
7569d98e07 fix task args fixes #514 2021-05-17 06:01:28 +00:00
wh1te909
596dee2f24 update docs 2021-05-15 08:07:30 +00:00
wh1te909
9970403964 Release 0.6.10 2021-05-15 07:52:35 +00:00
wh1te909
07a88ae00d bump versions 2021-05-15 07:51:44 +00:00
wh1te909
5475b4d287 typo 2021-05-15 02:20:33 +00:00
sadnub
6631dcfd3e Fix custom check run interval. Fixes #473 2021-05-14 21:37:49 -04:00
sadnub
0dd3f337f3 Add Client and Site categories for agent select options. Fixes #499 2021-05-14 20:27:32 -04:00
silversword411
8eb27b5875 Merge branch 'wh1te909:develop' into develop 2021-05-14 19:03:42 -04:00
sadnub
2d1863031c fix default custom field value not being used if blank value is present on model. Fixes #501 2021-05-14 18:48:49 -04:00
sadnub
9feb76ca81 fix tests 2021-05-14 18:19:57 -04:00
sadnub
993e8f4ab3 sort script categories prior to formating script options #506 2021-05-14 18:08:51 -04:00
sadnub
e08ae95d4f Fix alignment issue #512 2021-05-14 18:08:51 -04:00
sadnub
15359e8846 ws wip 2021-05-14 18:08:51 -04:00
silversword411
d1457b312b wip addition create shortcut to URL 2021-05-14 17:50:50 -04:00
silversword411
c9dd2af196 Merge branch 'wh1te909:develop' into develop 2021-05-14 14:41:12 -04:00
wh1te909
564ef4e688 feat: add clear faults #484 2021-05-14 04:54:59 +00:00
wh1te909
a33e6e8bb5 move token refresh before local settings import to allow overriding #503 2021-05-14 01:47:25 +00:00
Dan
cf34f33f04 Merge pull request #507 from silversword411/develop
Script library and docs updates
2021-05-13 12:50:21 -07:00
silversword411
827cfe4e8f Merge branch 'wh1te909:develop' into develop 2021-05-13 13:44:45 -04:00
silversword411
2ce1c2383c Merge branch 'develop' of https://github.com/silversword411/tacticalrmm into develop 2021-05-13 13:38:51 -04:00
silversword411
6fc0a665ae script library docs - volunteers needed 2021-05-13 13:36:33 -04:00
silversword411
4f16d01263 script library - sn collector 2021-05-13 12:37:10 -04:00
sadnub
67cc37354a Evaluate policies on exclusion changes. Fixes #500 2021-05-12 18:17:03 -04:00
silversword411
e388243ef4 renaming wips 2021-05-12 11:32:16 -04:00
silversword411
3dc92763c7 Script library add 2021-05-12 11:25:22 -04:00
Dan
dfe97dd466 Merge pull request #493 from silversword411/develop
Adding comment headers to wip1
2021-05-12 00:36:57 -07:00
silversword411
23b6284b51 Adding comment headers to wip2 2021-05-11 22:55:01 -04:00
silversword411
33dfbcbe32 Adding comment headers to wip1 2021-05-11 22:53:37 -04:00
356 changed files with 16918 additions and 21760 deletions

View File

@@ -26,3 +26,6 @@ POSTGRES_PASS=postgrespass
APP_PORT=80
API_PORT=80
HTTP_PROTOCOL=https
DOCKER_NETWORK=172.21.0.0/24
DOCKER_NGINX_IP=172.21.0.20
NATS_PORTS=4222:4222

View File

@@ -1,4 +1,4 @@
FROM python:3.9.2-slim
FROM python:3.9.6-slim
ENV TACTICAL_DIR /opt/tactical
ENV TACTICAL_READY_FILE ${TACTICAL_DIR}/tmp/tactical.ready
@@ -13,12 +13,17 @@ EXPOSE 8000 8383 8005
RUN groupadd -g 1000 tactical && \
useradd -u 1000 -g 1000 tactical
# Copy Dev python reqs
COPY ./requirements.txt /
# Copy nats-api file
COPY natsapi/bin/nats-api /usr/local/bin/
RUN chmod +x /usr/local/bin/nats-api
# Copy Docker Entrypoint
COPY ./entrypoint.sh /
# Copy dev python reqs
COPY .devcontainer/requirements.txt /
# Copy docker entrypoint.sh
COPY .devcontainer/entrypoint.sh /
RUN chmod +x /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]
WORKDIR ${WORKSPACE_DIR}/api/tacticalrmm

View File

@@ -6,8 +6,8 @@ services:
image: api-dev
restart: always
build:
context: .
dockerfile: ./api.dockerfile
context: ..
dockerfile: .devcontainer/api.dockerfile
command: ["tactical-api"]
environment:
API_PORT: ${API_PORT}
@@ -46,7 +46,7 @@ services:
API_PORT: ${API_PORT}
DEV: 1
ports:
- "4222:4222"
- "${NATS_PORTS}"
volumes:
- tactical-data-dev:/opt/tactical
- ..:/workspace:cached
@@ -67,7 +67,7 @@ services:
MESH_PASS: ${MESH_PASS}
MONGODB_USER: ${MONGODB_USER}
MONGODB_PASSWORD: ${MONGODB_PASSWORD}
NGINX_HOST_IP: 172.21.0.20
NGINX_HOST_IP: ${DOCKER_NGINX_IP}
networks:
dev:
aliases:
@@ -115,7 +115,10 @@ services:
redis-dev:
container_name: trmm-redis-dev
restart: always
command: redis-server --appendonly yes
image: redis:6.0-alpine
volumes:
- redis-data-dev:/data
networks:
dev:
aliases:
@@ -124,9 +127,6 @@ services:
init-dev:
container_name: trmm-init-dev
image: api-dev
build:
context: .
dockerfile: ./api.dockerfile
restart: on-failure
command: ["tactical-init-dev"]
environment:
@@ -153,9 +153,6 @@ services:
celery-dev:
container_name: trmm-celery-dev
image: api-dev
build:
context: .
dockerfile: ./api.dockerfile
command: ["tactical-celery-dev"]
restart: always
networks:
@@ -171,9 +168,6 @@ services:
celerybeat-dev:
container_name: trmm-celerybeat-dev
image: api-dev
build:
context: .
dockerfile: ./api.dockerfile
command: ["tactical-celerybeat-dev"]
restart: always
networks:
@@ -189,9 +183,6 @@ services:
websockets-dev:
container_name: trmm-websockets-dev
image: api-dev
build:
context: .
dockerfile: ./api.dockerfile
command: ["tactical-websockets-dev"]
restart: always
networks:
@@ -220,7 +211,7 @@ services:
API_PORT: ${API_PORT}
networks:
dev:
ipv4_address: 172.21.0.20
ipv4_address: ${DOCKER_NGINX_IP}
ports:
- "80:80"
- "443:443"
@@ -231,9 +222,6 @@ services:
container_name: trmm-mkdocs-dev
image: api-dev
restart: always
build:
context: .
dockerfile: ./api.dockerfile
command: ["tactical-mkdocs-dev"]
ports:
- "8005:8005"
@@ -247,6 +235,7 @@ volumes:
postgres-data-dev:
mongo-dev-data:
mesh-data-dev:
redis-data-dev:
networks:
dev:
@@ -254,4 +243,4 @@ networks:
ipam:
driver: default
config:
- subnet: 172.21.0.0/24
- subnet: ${DOCKER_NETWORK}

View File

@@ -114,6 +114,7 @@ EOF
"${VIRTUAL_ENV}"/bin/python manage.py load_chocos
"${VIRTUAL_ENV}"/bin/python manage.py load_community_scripts
"${VIRTUAL_ENV}"/bin/python manage.py reload_nats
"${VIRTUAL_ENV}"/bin/python manage.py create_installer_user
# create super user
echo "from accounts.models import User; User.objects.create_superuser('${TRMM_USER}', 'admin@example.com', '${TRMM_PASS}') if not User.objects.filter(username='${TRMM_USER}').exists() else 0;" | python manage.py shell

View File

@@ -2,6 +2,8 @@
asyncio-nats-client
celery
channels
channels_redis
django-ipware
Django
django-cors-headers
django-rest-knox

View File

@@ -9,7 +9,7 @@ Tactical RMM is a remote monitoring & management tool for Windows computers, bui
It uses an [agent](https://github.com/wh1te909/rmmagent) written in golang and integrates with [MeshCentral](https://github.com/Ylianst/MeshCentral)
# [LIVE DEMO](https://rmm.tacticalrmm.io/)
Demo database resets every hour. Alot of features are disabled for obvious reasons due to the nature of this app.
Demo database resets every hour. A lot of features are disabled for obvious reasons due to the nature of this app.
### [Discord Chat](https://discord.gg/upGTkWp)
@@ -35,4 +35,4 @@ Demo database resets every hour. Alot of features are disabled for obvious reaso
## Installation / Backup / Restore / Usage
### Refer to the [documentation](https://wh1te909.github.io/tacticalrmm/)
### Refer to the [documentation](https://wh1te909.github.io/tacticalrmm/)

View File

@@ -0,0 +1,18 @@
import uuid
from django.core.management.base import BaseCommand
from accounts.models import User
class Command(BaseCommand):
help = "Creates the installer user"
def handle(self, *args, **kwargs):
if User.objects.filter(is_installer_user=True).exists():
return
User.objects.create_user( # type: ignore
username=uuid.uuid4().hex,
is_installer_user=True,
password=User.objects.make_random_password(60), # type: ignore
)

View File

@@ -0,0 +1,18 @@
# Generated by Django 3.2.4 on 2021-06-17 04:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0020_role_can_manage_roles'),
]
operations = [
migrations.AddField(
model_name='role',
name='can_view_core_settings',
field=models.BooleanField(default=False),
),
]

View File

@@ -0,0 +1,18 @@
# Generated by Django 3.2.4 on 2021-06-28 05:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0021_role_can_view_core_settings'),
]
operations = [
migrations.AddField(
model_name='user',
name='clear_search_when_switching',
field=models.BooleanField(default=True),
),
]

View File

@@ -0,0 +1,18 @@
# Generated by Django 3.2.4 on 2021-06-30 03:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0022_user_clear_search_when_switching'),
]
operations = [
migrations.AddField(
model_name='user',
name='is_installer_user',
field=models.BooleanField(default=False),
),
]

View File

@@ -0,0 +1,18 @@
# Generated by Django 3.2.1 on 2021-07-20 20:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0023_user_is_installer_user'),
]
operations = [
migrations.AddField(
model_name='user',
name='last_login_ip',
field=models.GenericIPAddressField(blank=True, default=None, null=True),
),
]

View File

@@ -0,0 +1,33 @@
# Generated by Django 3.2.1 on 2021-07-21 04:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0024_user_last_login_ip'),
]
operations = [
migrations.AddField(
model_name='role',
name='created_by',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='role',
name='created_time',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='role',
name='modified_by',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='role',
name='modified_time',
field=models.DateTimeField(auto_now=True, null=True),
),
]

View File

@@ -46,6 +46,9 @@ class User(AbstractUser, BaseAuditModel):
)
client_tree_splitter = models.PositiveIntegerField(default=11)
loading_bar_color = models.CharField(max_length=255, default="red")
clear_search_when_switching = models.BooleanField(default=True)
is_installer_user = models.BooleanField(default=False)
last_login_ip = models.GenericIPAddressField(default=None, blank=True, null=True)
agent = models.OneToOneField(
"agents.Agent",
@@ -71,7 +74,7 @@ class User(AbstractUser, BaseAuditModel):
return UserSerializer(user).data
class Role(models.Model):
class Role(BaseAuditModel):
name = models.CharField(max_length=255, unique=True)
is_superuser = models.BooleanField(default=False)
@@ -90,6 +93,7 @@ class Role(models.Model):
# core
can_manage_notes = models.BooleanField(default=False)
can_view_core_settings = models.BooleanField(default=False)
can_edit_core_settings = models.BooleanField(default=False)
can_do_server_maint = models.BooleanField(default=False)
can_code_sign = models.BooleanField(default=False)
@@ -137,6 +141,13 @@ class Role(models.Model):
def __str__(self):
return self.name
@staticmethod
def serialize(role):
# serializes the agent and returns json
from .serializers import RoleAuditSerializer
return RoleAuditSerializer(role).data
@staticmethod
def perms():
return [
@@ -153,6 +164,7 @@ class Role(models.Model):
"can_run_scripts",
"can_run_bulk",
"can_manage_notes",
"can_view_core_settings",
"can_edit_core_settings",
"can_do_server_maint",
"can_code_sign",

View File

@@ -16,6 +16,7 @@ class UserUISerializer(ModelSerializer):
"client_tree_sort",
"client_tree_splitter",
"loading_bar_color",
"clear_search_when_switching",
]
@@ -30,6 +31,7 @@ class UserSerializer(ModelSerializer):
"email",
"is_active",
"last_login",
"last_login_ip",
"role",
]
@@ -56,3 +58,9 @@ class RoleSerializer(ModelSerializer):
class Meta:
model = Role
fields = "__all__"
class RoleAuditSerializer(ModelSerializer):
class Meta:
model = Role
fields = "__all__"

View File

@@ -280,6 +280,7 @@ class TestUserAction(TacticalTestCase):
"client_tree_sort": "alpha",
"client_tree_splitter": 14,
"loading_bar_color": "green",
"clear_search_when_switching": False,
}
r = self.client.patch(url, data, format="json")
self.assertEqual(r.status_code, 200)

View File

@@ -3,23 +3,23 @@ from django.conf import settings
from django.contrib.auth import login
from django.db import IntegrityError
from django.shortcuts import get_object_or_404
from ipware import get_client_ip
from knox.views import LoginView as KnoxLoginView
from logs.models import AuditLog
from rest_framework import status
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from logs.models import AuditLog
from tacticalrmm.utils import notify_error
from .models import User, Role
from .models import Role, User
from .permissions import AccountsPerms, RolesPerms
from .serializers import (
RoleSerializer,
TOTPSetupSerializer,
UserSerializer,
UserUISerializer,
RoleSerializer,
)
@@ -40,7 +40,9 @@ class CheckCreds(KnoxLoginView):
# check credentials
serializer = AuthTokenSerializer(data=request.data)
if not serializer.is_valid():
AuditLog.audit_user_failed_login(request.data["username"])
AuditLog.audit_user_failed_login(
request.data["username"], debug_info={"ip": request._client_ip}
)
return Response("bad credentials", status=status.HTTP_400_BAD_REQUEST)
user = serializer.validated_data["user"]
@@ -76,10 +78,20 @@ class LoginView(KnoxLoginView):
if valid:
login(request, user)
AuditLog.audit_user_login_successful(request.data["username"])
# save ip information
client_ip, is_routable = get_client_ip(request)
user.last_login_ip = client_ip
user.save()
AuditLog.audit_user_login_successful(
request.data["username"], debug_info={"ip": request._client_ip}
)
return super(LoginView, self).post(request, format=None)
else:
AuditLog.audit_user_failed_twofactor(request.data["username"])
AuditLog.audit_user_failed_twofactor(
request.data["username"], debug_info={"ip": request._client_ip}
)
return Response("bad credentials", status=status.HTTP_400_BAD_REQUEST)
@@ -87,7 +99,14 @@ class GetAddUsers(APIView):
permission_classes = [IsAuthenticated, AccountsPerms]
def get(self, request):
users = User.objects.filter(agent=None)
search = request.GET.get("search", None)
if search:
users = User.objects.filter(agent=None, is_installer_user=False).filter(
username__icontains=search
)
else:
users = User.objects.filter(agent=None, is_installer_user=False)
return Response(UserSerializer(users, many=True).data)

View File

@@ -1,8 +1,9 @@
from django.contrib import admin
from .models import Agent, AgentCustomField, Note, RecoveryAction
from .models import Agent, AgentCustomField, Note, RecoveryAction, AgentHistory
admin.site.register(Agent)
admin.site.register(RecoveryAction)
admin.site.register(Note)
admin.site.register(AgentCustomField)
admin.site.register(AgentHistory)

View File

@@ -0,0 +1,23 @@
# Generated by Django 3.2.4 on 2021-06-27 00:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agents', '0036_agent_block_policy_inheritance'),
]
operations = [
migrations.AddField(
model_name='agent',
name='has_patches_pending',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='agent',
name='pending_actions_count',
field=models.PositiveIntegerField(default=0),
),
]

View File

@@ -0,0 +1,27 @@
# Generated by Django 3.2.1 on 2021-07-06 02:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('agents', '0037_auto_20210627_0014'),
]
operations = [
migrations.CreateModel(
name='AgentHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now_add=True)),
('type', models.CharField(choices=[('task_run', 'Task Run'), ('script_run', 'Script Run'), ('cmd_run', 'CMD Run')], default='cmd_run', max_length=50)),
('command', models.TextField(blank=True, null=True)),
('status', models.CharField(choices=[('success', 'Success'), ('failure', 'Failure')], default='success', max_length=50)),
('username', models.CharField(default='system', max_length=50)),
('results', models.TextField(blank=True, null=True)),
('agent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='history', to='agents.agent')),
],
),
]

View File

@@ -0,0 +1,25 @@
# Generated by Django 3.2.5 on 2021-07-14 07:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scripts', '0008_script_guid'),
('agents', '0038_agenthistory'),
]
operations = [
migrations.AddField(
model_name='agenthistory',
name='script',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='history', to='scripts.script'),
),
migrations.AddField(
model_name='agenthistory',
name='script_results',
field=models.JSONField(blank=True, null=True),
),
]

View File

@@ -16,14 +16,12 @@ from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.utils import timezone as djangotime
from loguru import logger
from nats.aio.client import Client as NATS
from nats.aio.errors import ErrTimeout
from packaging import version as pyver
from core.models import TZ_CHOICES, CoreSettings
from logs.models import BaseAuditModel
logger.configure(**settings.LOG_CONFIG)
from logs.models import BaseAuditModel, DebugLog
class Agent(BaseAuditModel):
@@ -64,6 +62,8 @@ class Agent(BaseAuditModel):
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
pending_actions_count = models.PositiveIntegerField(default=0)
has_patches_pending = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
@@ -89,16 +89,18 @@ class Agent(BaseAuditModel):
def save(self, *args, **kwargs):
# get old agent if exists
old_agent = type(self).objects.get(pk=self.pk) if self.pk else None
super(BaseAuditModel, self).save(*args, **kwargs)
old_agent = Agent.objects.get(pk=self.pk) if self.pk else None
super(Agent, self).save(old_model=old_agent, *args, **kwargs)
# check if new agent has been created
# or check if policy have changed on agent
# or if site has changed on agent and if so generate-policies
# or if agent was changed from server or workstation
if (
not old_agent
or (old_agent and old_agent.policy != self.policy)
or (old_agent.site != self.site)
or (old_agent.monitoring_type != self.monitoring_type)
or (old_agent.block_policy_inheritance != self.block_policy_inheritance)
):
self.generate_checks_from_policies()
@@ -119,7 +121,7 @@ class Agent(BaseAuditModel):
else:
from core.models import CoreSettings
return CoreSettings.objects.first().default_time_zone
return CoreSettings.objects.first().default_time_zone # type: ignore
@property
def arch(self):
@@ -161,10 +163,6 @@ class Agent(BaseAuditModel):
else:
return "offline"
@property
def has_patches_pending(self):
return self.winupdates.filter(action="approve").filter(installed=False).exists() # type: ignore
@property
def checks(self):
total, passing, failing, warning, info = 0, 0, 0, 0, 0
@@ -325,6 +323,7 @@ class Agent(BaseAuditModel):
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
) -> Any:
from scripts.models import Script
@@ -343,6 +342,9 @@ class Agent(BaseAuditModel):
},
}
if history_pk != 0 and pyver.parse(self.version) >= pyver.parse("1.6.0"):
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
@@ -411,6 +413,12 @@ class Agent(BaseAuditModel):
update.action = "approve"
update.save(update_fields=["action"])
DebugLog.info(
agent=self,
log_type="windows_updates",
message=f"Approving windows updates on {self.hostname}",
)
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self):
@@ -445,8 +453,8 @@ class Agent(BaseAuditModel):
# if patch policy still doesn't exist check default policy
elif (
core_settings.server_policy
and core_settings.server_policy.winupdatepolicy.exists()
core_settings.server_policy # type: ignore
and core_settings.server_policy.winupdatepolicy.exists() # type: ignore
):
# make sure agent site and client are not blocking inheritance
if (
@@ -454,7 +462,7 @@ class Agent(BaseAuditModel):
and not site.block_policy_inheritance
and not site.client.block_policy_inheritance
):
patch_policy = core_settings.server_policy.winupdatepolicy.get()
patch_policy = core_settings.server_policy.winupdatepolicy.get() # type: ignore
elif self.monitoring_type == "workstation":
# check agent policy first which should override client or site policy
@@ -483,8 +491,8 @@ class Agent(BaseAuditModel):
# if patch policy still doesn't exist check default policy
elif (
core_settings.workstation_policy
and core_settings.workstation_policy.winupdatepolicy.exists()
core_settings.workstation_policy # type: ignore
and core_settings.workstation_policy.winupdatepolicy.exists() # type: ignore
):
# make sure agent site and client are not blocking inheritance
if (
@@ -493,7 +501,7 @@ class Agent(BaseAuditModel):
and not site.client.block_policy_inheritance
):
patch_policy = (
core_settings.workstation_policy.winupdatepolicy.get()
core_settings.workstation_policy.winupdatepolicy.get() # type: ignore
)
# if policy still doesn't exist return the agent patch policy
@@ -608,35 +616,35 @@ class Agent(BaseAuditModel):
# check if alert template is applied globally and return
if (
core.alert_template
and core.alert_template.is_active
core.alert_template # type: ignore
and core.alert_template.is_active # type: ignore
and not self.block_policy_inheritance
and not site.block_policy_inheritance
and not client.block_policy_inheritance
):
templates.append(core.alert_template)
templates.append(core.alert_template) # type: ignore
# if agent is a workstation, check if policy with alert template is assigned to the site, client, or core
if (
self.monitoring_type == "server"
and core.server_policy
and core.server_policy.alert_template
and core.server_policy.alert_template.is_active
and core.server_policy # type: ignore
and core.server_policy.alert_template # type: ignore
and core.server_policy.alert_template.is_active # type: ignore
and not self.block_policy_inheritance
and not site.block_policy_inheritance
and not client.block_policy_inheritance
):
templates.append(core.server_policy.alert_template)
templates.append(core.server_policy.alert_template) # type: ignore
if (
self.monitoring_type == "workstation"
and core.workstation_policy
and core.workstation_policy.alert_template
and core.workstation_policy.alert_template.is_active
and core.workstation_policy # type: ignore
and core.workstation_policy.alert_template # type: ignore
and core.workstation_policy.alert_template.is_active # type: ignore
and not self.block_policy_inheritance
and not site.block_policy_inheritance
and not client.block_policy_inheritance
):
templates.append(core.workstation_policy.alert_template)
templates.append(core.workstation_policy.alert_template) # type: ignore
# go through the templates and return the first one that isn't excluded
for template in templates:
@@ -739,7 +747,7 @@ class Agent(BaseAuditModel):
try:
ret = msgpack.loads(msg.data) # type: ignore
except Exception as e:
logger.error(e)
DebugLog.error(agent=self, log_type="agent_issues", message=e)
ret = str(e)
await nc.close()
@@ -752,12 +760,9 @@ class Agent(BaseAuditModel):
@staticmethod
def serialize(agent):
# serializes the agent and returns json
from .serializers import AgentEditSerializer
from .serializers import AgentAuditSerializer
ret = AgentEditSerializer(agent).data
del ret["all_timezones"]
del ret["client"]
return ret
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self):
try:
@@ -772,7 +777,7 @@ class Agent(BaseAuditModel):
# skip if no version info is available therefore nothing to parse
try:
vers = [
re.search(r"\(Version(.*?)\)", i).group(1).strip()
re.search(r"\(Version(.*?)\)", i).group(1).strip() # type: ignore
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
@@ -807,7 +812,7 @@ class Agent(BaseAuditModel):
from core.models import CoreSettings
CORE = CoreSettings.objects.first()
CORE.send_mail(
CORE.send_mail( # type: ignore
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
@@ -822,7 +827,7 @@ class Agent(BaseAuditModel):
from core.models import CoreSettings
CORE = CoreSettings.objects.first()
CORE.send_mail(
CORE.send_mail( # type: ignore
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
@@ -837,7 +842,7 @@ class Agent(BaseAuditModel):
from core.models import CoreSettings
CORE = CoreSettings.objects.first()
CORE.send_sms(
CORE.send_sms( # type: ignore
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
@@ -846,7 +851,7 @@ class Agent(BaseAuditModel):
from core.models import CoreSettings
CORE = CoreSettings.objects.first()
CORE.send_sms(
CORE.send_sms( # type: ignore
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
@@ -928,3 +933,57 @@ class AgentCustomField(models.Model):
return self.bool_value
else:
return self.string_value
def save_to_field(self, value):
if self.field.type in [
"text",
"number",
"single",
"datetime",
]:
self.string_value = value
self.save()
elif self.field.type == "multiple":
self.multiple_value = value.split(",")
self.save()
elif self.field.type == "checkbox":
self.bool_value = bool(value)
self.save()
AGENT_HISTORY_TYPES = (
("task_run", "Task Run"),
("script_run", "Script Run"),
("cmd_run", "CMD Run"),
)
AGENT_HISTORY_STATUS = (("success", "Success"), ("failure", "Failure"))
class AgentHistory(models.Model):
agent = models.ForeignKey(
Agent,
related_name="history",
on_delete=models.CASCADE,
)
time = models.DateTimeField(auto_now_add=True)
type = models.CharField(
max_length=50, choices=AGENT_HISTORY_TYPES, default="cmd_run"
)
command = models.TextField(null=True, blank=True)
status = models.CharField(
max_length=50, choices=AGENT_HISTORY_STATUS, default="success"
)
username = models.CharField(max_length=50, default="system")
results = models.TextField(null=True, blank=True)
script = models.ForeignKey(
"scripts.Script",
null=True,
blank=True,
related_name="history",
on_delete=models.SET_NULL,
)
script_results = models.JSONField(null=True, blank=True)
def __str__(self):
return f"{self.agent.hostname} - {self.type}"

View File

@@ -1,15 +1,14 @@
import pytz
from rest_framework import serializers
from clients.serializers import ClientSerializer
from rest_framework import serializers
from tacticalrmm.utils import get_default_timezone
from winupdate.serializers import WinUpdatePolicySerializer
from .models import Agent, AgentCustomField, Note
from .models import Agent, AgentCustomField, Note, AgentHistory
class AgentSerializer(serializers.ModelSerializer):
# for vue
patches_pending = serializers.ReadOnlyField(source="has_patches_pending")
winupdatepolicy = WinUpdatePolicySerializer(many=True, read_only=True)
status = serializers.ReadOnlyField()
cpu_model = serializers.ReadOnlyField()
@@ -45,8 +44,6 @@ class AgentOverdueActionSerializer(serializers.ModelSerializer):
class AgentTableSerializer(serializers.ModelSerializer):
patches_pending = serializers.ReadOnlyField(source="has_patches_pending")
pending_actions = serializers.SerializerMethodField()
status = serializers.ReadOnlyField()
checks = serializers.ReadOnlyField()
last_seen = serializers.SerializerMethodField()
@@ -69,9 +66,6 @@ class AgentTableSerializer(serializers.ModelSerializer):
"always_alert": obj.alert_template.agent_always_alert,
}
def get_pending_actions(self, obj):
return obj.pendingactions.filter(status="pending").count()
def get_last_seen(self, obj) -> str:
if obj.time_zone is not None:
agent_tz = pytz.timezone(obj.time_zone)
@@ -103,8 +97,8 @@ class AgentTableSerializer(serializers.ModelSerializer):
"monitoring_type",
"description",
"needs_reboot",
"patches_pending",
"pending_actions",
"has_patches_pending",
"pending_actions_count",
"status",
"overdue_text_alert",
"overdue_email_alert",
@@ -165,6 +159,7 @@ class AgentEditSerializer(serializers.ModelSerializer):
"offline_time",
"overdue_text_alert",
"overdue_email_alert",
"overdue_dashboard_alert",
"all_timezones",
"winupdatepolicy",
"policy",
@@ -173,11 +168,6 @@ class AgentEditSerializer(serializers.ModelSerializer):
class WinAgentSerializer(serializers.ModelSerializer):
# for the windows agent
patches_pending = serializers.ReadOnlyField(source="has_patches_pending")
winupdatepolicy = WinUpdatePolicySerializer(many=True, read_only=True)
status = serializers.ReadOnlyField()
class Meta:
model = Agent
fields = "__all__"
@@ -211,3 +201,22 @@ class NotesSerializer(serializers.ModelSerializer):
class Meta:
model = Agent
fields = ["hostname", "pk", "notes"]
class AgentHistorySerializer(serializers.ModelSerializer):
time = serializers.SerializerMethodField(read_only=True)
script_name = serializers.ReadOnlyField(source="script.name")
class Meta:
model = AgentHistory
fields = "__all__"
def get_time(self, history):
timezone = get_default_timezone()
return history.time.astimezone(timezone).strftime("%m %d %Y %H:%M:%S")
class AgentAuditSerializer(serializers.ModelSerializer):
class Meta:
model = Agent
exclude = ["disks", "services", "wmi_detail"]

View File

@@ -5,19 +5,17 @@ import urllib.parse
from time import sleep
from typing import Union
from alerts.models import Alert
from core.models import CodeSignToken, CoreSettings
from django.conf import settings
from django.utils import timezone as djangotime
from loguru import logger
from logs.models import DebugLog, PendingAction
from packaging import version as pyver
from agents.models import Agent
from core.models import CodeSignToken, CoreSettings
from logs.models import PendingAction
from scripts.models import Script
from tacticalrmm.celery import app
from tacticalrmm.utils import run_nats_api_cmd
logger.configure(**settings.LOG_CONFIG)
from agents.models import Agent
def agent_update(pk: int, codesigntoken: str = None, force: bool = False) -> str:
@@ -30,8 +28,10 @@ def agent_update(pk: int, codesigntoken: str = None, force: bool = False) -> str
# skip if we can't determine the arch
if agent.arch is None:
logger.warning(
f"Unable to determine arch on {agent.hostname}. Skipping agent update."
DebugLog.warning(
agent=agent,
log_type="agent_issues",
message=f"Unable to determine arch on {agent.hostname}({agent.pk}). Skipping agent update.",
)
return "noarch"
@@ -78,7 +78,7 @@ def agent_update(pk: int, codesigntoken: str = None, force: bool = False) -> str
@app.task
def force_code_sign(pks: list[int]) -> None:
try:
token = CodeSignToken.objects.first().token
token = CodeSignToken.objects.first().tokenv # type:ignore
except:
return
@@ -93,7 +93,7 @@ def force_code_sign(pks: list[int]) -> None:
@app.task
def send_agent_update_task(pks: list[int]) -> None:
try:
codesigntoken = CodeSignToken.objects.first().token
codesigntoken = CodeSignToken.objects.first().token # type:ignore
except:
codesigntoken = None
@@ -108,11 +108,11 @@ def send_agent_update_task(pks: list[int]) -> None:
@app.task
def auto_self_agent_update_task() -> None:
core = CoreSettings.objects.first()
if not core.agent_auto_update:
if not core.agent_auto_update: # type:ignore
return
try:
codesigntoken = CodeSignToken.objects.first().token
codesigntoken = CodeSignToken.objects.first().token # type:ignore
except:
codesigntoken = None
@@ -211,6 +211,7 @@ def agent_outages_task() -> None:
agents = Agent.objects.only(
"pk",
"agent_id",
"last_seen",
"offline_time",
"overdue_time",
@@ -231,14 +232,24 @@ def run_script_email_results_task(
nats_timeout: int,
emails: list[str],
args: list[str] = [],
history_pk: int = 0,
):
agent = Agent.objects.get(pk=agentpk)
script = Script.objects.get(pk=scriptpk)
r = agent.run_script(
scriptpk=script.pk, args=args, full=True, timeout=nats_timeout, wait=True
scriptpk=script.pk,
args=args,
full=True,
timeout=nats_timeout,
wait=True,
history_pk=history_pk,
)
if r == "timeout":
logger.error(f"{agent.hostname} timed out running script.")
DebugLog.error(
agent=agent,
log_type="scripting",
message=f"{agent.hostname}({agent.pk}) timed out running script.",
)
return
CORE = CoreSettings.objects.first()
@@ -254,37 +265,60 @@ def run_script_email_results_task(
msg = EmailMessage()
msg["Subject"] = subject
msg["From"] = CORE.smtp_from_email
msg["From"] = CORE.smtp_from_email # type:ignore
if emails:
msg["To"] = ", ".join(emails)
else:
msg["To"] = ", ".join(CORE.email_alert_recipients)
msg["To"] = ", ".join(CORE.email_alert_recipients) # type:ignore
msg.set_content(body)
try:
with smtplib.SMTP(CORE.smtp_host, CORE.smtp_port, timeout=20) as server:
if CORE.smtp_requires_auth:
with smtplib.SMTP(
CORE.smtp_host, CORE.smtp_port, timeout=20 # type:ignore
) as server: # type:ignore
if CORE.smtp_requires_auth: # type:ignore
server.ehlo()
server.starttls()
server.login(CORE.smtp_host_user, CORE.smtp_host_password)
server.login(
CORE.smtp_host_user, CORE.smtp_host_password # type:ignore
) # type:ignore
server.send_message(msg)
server.quit()
else:
server.send_message(msg)
server.quit()
except Exception as e:
logger.error(e)
DebugLog.error(message=e)
@app.task
def monitor_agents_task() -> None:
agents = Agent.objects.only(
"pk", "agent_id", "last_seen", "overdue_time", "offline_time"
def clear_faults_task(older_than_days: int) -> None:
# https://github.com/wh1te909/tacticalrmm/issues/484
agents = Agent.objects.exclude(last_seen__isnull=True).filter(
last_seen__lt=djangotime.now() - djangotime.timedelta(days=older_than_days)
)
ids = [i.agent_id for i in agents if i.status != "online"]
run_nats_api_cmd("monitor", ids)
for agent in agents:
if agent.agentchecks.exists():
for check in agent.agentchecks.all():
# reset check status
check.status = "passing"
check.save(update_fields=["status"])
if check.alert.filter(resolved=False).exists():
check.alert.get(resolved=False).resolve()
# reset overdue alerts
agent.overdue_email_alert = False
agent.overdue_text_alert = False
agent.overdue_dashboard_alert = False
agent.save(
update_fields=[
"overdue_email_alert",
"overdue_text_alert",
"overdue_dashboard_alert",
]
)
@app.task
@@ -293,4 +327,67 @@ def get_wmi_task() -> None:
"pk", "agent_id", "last_seen", "overdue_time", "offline_time"
)
ids = [i.agent_id for i in agents if i.status == "online"]
run_nats_api_cmd("wmi", ids)
run_nats_api_cmd("wmi", ids, timeout=45)
@app.task
def agent_checkin_task() -> None:
run_nats_api_cmd("checkin", timeout=30)
@app.task
def agent_getinfo_task() -> None:
run_nats_api_cmd("agentinfo", timeout=30)
@app.task
def prune_agent_history(older_than_days: int) -> str:
from .models import AgentHistory
AgentHistory.objects.filter(
time__lt=djangotime.now() - djangotime.timedelta(days=older_than_days)
).delete()
return "ok"
@app.task
def handle_agents_task() -> None:
q = Agent.objects.prefetch_related("pendingactions", "autotasks").only(
"pk", "agent_id", "version", "last_seen", "overdue_time", "offline_time"
)
agents = [
i
for i in q
if pyver.parse(i.version) >= pyver.parse("1.6.0") and i.status == "online"
]
for agent in agents:
# change agent update pending status to completed if agent has just updated
if (
pyver.parse(agent.version) == pyver.parse(settings.LATEST_AGENT_VER)
and agent.pendingactions.filter(
action_type="agentupdate", status="pending"
).exists()
):
agent.pendingactions.filter(
action_type="agentupdate", status="pending"
).update(status="completed")
# sync scheduled tasks
if agent.autotasks.exclude(sync_status="synced").exists(): # type: ignore
tasks = agent.autotasks.exclude(sync_status="synced") # type: ignore
for task in tasks:
if task.sync_status == "pendingdeletion":
task.delete_task_on_agent()
elif task.sync_status == "initial":
task.modify_task_on_agent()
elif task.sync_status == "notsynced":
task.create_task_on_agent()
# handles any alerting actions
if Alert.objects.filter(agent=agent, resolved=False).exists():
try:
Alert.handle_alert_resolve(agent)
except:
continue

View File

@@ -1,19 +1,18 @@
import json
import os
from itertools import cycle
from django.utils import timezone as djangotime
from unittest.mock import patch
from django.conf import settings
from logs.models import PendingAction
from model_bakery import baker
from packaging import version as pyver
from logs.models import PendingAction
from tacticalrmm.test import TacticalTestCase
from winupdate.models import WinUpdatePolicy
from winupdate.serializers import WinUpdatePolicySerializer
from .models import Agent, AgentCustomField
from .serializers import AgentSerializer
from .models import Agent, AgentCustomField, AgentHistory
from .serializers import AgentHistorySerializer, AgentSerializer
from .tasks import auto_self_agent_update_task
@@ -306,7 +305,7 @@ class TestAgentViews(TacticalTestCase):
"shell": "cmd",
"timeout": 30,
}
mock_ret.return_value = "nt authority\system"
mock_ret.return_value = "nt authority\\system"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertIsInstance(r.data, str) # type: ignore
@@ -437,7 +436,7 @@ class TestAgentViews(TacticalTestCase):
self.assertEqual(r.status_code, 200)
self.assertEqual(RecoveryAction.objects.count(), 1)
mesh_recovery = RecoveryAction.objects.first()
self.assertEqual(mesh_recovery.mode, "mesh")
self.assertEqual(mesh_recovery.mode, "mesh") # type: ignore
nats_cmd.reset_mock()
RecoveryAction.objects.all().delete()
@@ -472,8 +471,8 @@ class TestAgentViews(TacticalTestCase):
self.assertEqual(r.status_code, 200)
self.assertEqual(RecoveryAction.objects.count(), 1)
cmd_recovery = RecoveryAction.objects.first()
self.assertEqual(cmd_recovery.mode, "command")
self.assertEqual(cmd_recovery.command, "shutdown /r /t 10 /f")
self.assertEqual(cmd_recovery.mode, "command") # type: ignore
self.assertEqual(cmd_recovery.command, "shutdown /r /t 10 /f") # type: ignore
def test_agents_agent_detail(self):
url = f"/agents/{self.agent.pk}/agentdetail/"
@@ -770,6 +769,9 @@ class TestAgentViews(TacticalTestCase):
@patch("agents.tasks.run_script_email_results_task.delay")
@patch("agents.models.Agent.run_script")
def test_run_script(self, run_script, email_task):
from .models import AgentCustomField, Note
from clients.models import ClientCustomField, SiteCustomField
run_script.return_value = "ok"
url = "/agents/runscript/"
script = baker.make_recipe("scripts.script")
@@ -777,7 +779,7 @@ class TestAgentViews(TacticalTestCase):
# test wait
data = {
"pk": self.agent.pk,
"scriptPK": script.pk,
"script": script.pk,
"output": "wait",
"args": [],
"timeout": 15,
@@ -786,18 +788,18 @@ class TestAgentViews(TacticalTestCase):
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
run_script.assert_called_with(
scriptpk=script.pk, args=[], timeout=18, wait=True
scriptpk=script.pk, args=[], timeout=18, wait=True, history_pk=0
)
run_script.reset_mock()
# test email default
data = {
"pk": self.agent.pk,
"scriptPK": script.pk,
"script": script.pk,
"output": "email",
"args": ["abc", "123"],
"timeout": 15,
"emailmode": "default",
"emailMode": "default",
"emails": ["admin@example.com", "bob@example.com"],
}
r = self.client.post(url, data, format="json")
@@ -812,7 +814,7 @@ class TestAgentViews(TacticalTestCase):
email_task.reset_mock()
# test email overrides
data["emailmode"] = "custom"
data["emailMode"] = "custom"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
email_task.assert_called_with(
@@ -826,7 +828,7 @@ class TestAgentViews(TacticalTestCase):
# test fire and forget
data = {
"pk": self.agent.pk,
"scriptPK": script.pk,
"script": script.pk,
"output": "forget",
"args": ["hello", "world"],
"timeout": 22,
@@ -835,8 +837,138 @@ class TestAgentViews(TacticalTestCase):
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
run_script.assert_called_with(
scriptpk=script.pk, args=["hello", "world"], timeout=25
scriptpk=script.pk, args=["hello", "world"], timeout=25, history_pk=0
)
run_script.reset_mock()
# test collector
# save to agent custom field
custom_field = baker.make("core.CustomField", model="agent")
data = {
"pk": self.agent.pk,
"script": script.pk,
"output": "collector",
"args": ["hello", "world"],
"timeout": 22,
"custom_field": custom_field.id, # type: ignore
"save_all_output": True,
}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
run_script.assert_called_with(
scriptpk=script.pk,
args=["hello", "world"],
timeout=25,
wait=True,
history_pk=0,
)
run_script.reset_mock()
self.assertEqual(
AgentCustomField.objects.get(agent=self.agent.pk, field=custom_field).value,
"ok",
)
# save to site custom field
custom_field = baker.make("core.CustomField", model="site")
data = {
"pk": self.agent.pk,
"script": script.pk,
"output": "collector",
"args": ["hello", "world"],
"timeout": 22,
"custom_field": custom_field.id, # type: ignore
"save_all_output": False,
}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
run_script.assert_called_with(
scriptpk=script.pk,
args=["hello", "world"],
timeout=25,
wait=True,
history_pk=0,
)
run_script.reset_mock()
self.assertEqual(
SiteCustomField.objects.get(
site=self.agent.site.pk, field=custom_field
).value,
"ok",
)
# save to client custom field
custom_field = baker.make("core.CustomField", model="client")
data = {
"pk": self.agent.pk,
"script": script.pk,
"output": "collector",
"args": ["hello", "world"],
"timeout": 22,
"custom_field": custom_field.id, # type: ignore
"save_all_output": False,
}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
run_script.assert_called_with(
scriptpk=script.pk,
args=["hello", "world"],
timeout=25,
wait=True,
history_pk=0,
)
run_script.reset_mock()
self.assertEqual(
ClientCustomField.objects.get(
client=self.agent.client.pk, field=custom_field
).value,
"ok",
)
# test save to note
data = {
"pk": self.agent.pk,
"script": script.pk,
"output": "note",
"args": ["hello", "world"],
"timeout": 22,
}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
run_script.assert_called_with(
scriptpk=script.pk,
args=["hello", "world"],
timeout=25,
wait=True,
history_pk=0,
)
run_script.reset_mock()
self.assertEqual(Note.objects.get(agent=self.agent).note, "ok")
def test_get_agent_history(self):
# setup data
agent = baker.make_recipe("agents.agent")
history = baker.make("agents.AgentHistory", agent=agent, _quantity=30)
url = f"/agents/history/{agent.id}/"
# test agent not found
r = self.client.get("/agents/history/500/", format="json")
self.assertEqual(r.status_code, 404)
# test pulling data
r = self.client.get(url, format="json")
data = AgentHistorySerializer(history, many=True).data
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data, data) # type:ignore
class TestAgentViewsNew(TacticalTestCase):
@@ -1048,3 +1180,25 @@ class TestAgentTasks(TacticalTestCase):
r = auto_self_agent_update_task.s().apply()
self.assertEqual(agent_update.call_count, 33)
def test_agent_history_prune_task(self):
from .tasks import prune_agent_history
# setup data
agent = baker.make_recipe("agents.agent")
history = baker.make(
"agents.AgentHistory",
agent=agent,
_quantity=50,
)
days = 0
for item in history: # type: ignore
item.time = djangotime.now() - djangotime.timedelta(days=days)
item.save()
days = days + 5
# delete AgentHistory older than 30 days
prune_agent_history(30)
self.assertEqual(AgentHistory.objects.filter(agent=agent).count(), 6)

View File

@@ -29,4 +29,5 @@ urlpatterns = [
path("bulk/", views.bulk),
path("maintenance/", views.agent_maintenance),
path("<int:pk>/wmi/", views.WMI.as_view()),
path("history/<int:pk>/", views.AgentHistoryView.as_view()),
]

View File

@@ -8,7 +8,6 @@ import time
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from loguru import logger
from packaging import version as pyver
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
@@ -17,14 +16,14 @@ from rest_framework.response import Response
from rest_framework.views import APIView
from core.models import CoreSettings
from logs.models import AuditLog, PendingAction
from logs.models import AuditLog, DebugLog, PendingAction
from scripts.models import Script
from scripts.tasks import handle_bulk_command_task, handle_bulk_script_task
from tacticalrmm.utils import get_default_timezone, notify_error, reload_nats
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
from .models import Agent, AgentCustomField, Note, RecoveryAction
from .models import Agent, AgentCustomField, Note, RecoveryAction, AgentHistory
from .permissions import (
EditAgentPerms,
EvtLogPerms,
@@ -42,6 +41,7 @@ from .permissions import (
from .serializers import (
AgentCustomFieldSerializer,
AgentEditSerializer,
AgentHistorySerializer,
AgentHostnameSerializer,
AgentOverdueActionSerializer,
AgentSerializer,
@@ -51,8 +51,6 @@ from .serializers import (
)
from .tasks import run_script_email_results_task, send_agent_update_task
logger.configure(**settings.LOG_CONFIG)
@api_view()
def get_agent_versions(request):
@@ -115,7 +113,7 @@ def uninstall(request):
def edit_agent(request):
agent = get_object_or_404(Agent, pk=request.data["id"])
a_serializer = AgentSerializer(instance=agent, data=request.data, partial=True)
a_serializer = AgentEditSerializer(instance=agent, data=request.data, partial=True)
a_serializer.is_valid(raise_exception=True)
a_serializer.save()
@@ -160,17 +158,21 @@ def meshcentral(request, pk):
core = CoreSettings.objects.first()
token = agent.get_login_token(
key=core.mesh_token, user=f"user//{core.mesh_username}"
key=core.mesh_token, user=f"user//{core.mesh_username}" # type:ignore
)
if token == "err":
return notify_error("Invalid mesh token")
control = f"{core.mesh_site}/?login={token}&gotonode={agent.mesh_node_id}&viewmode=11&hide=31"
terminal = f"{core.mesh_site}/?login={token}&gotonode={agent.mesh_node_id}&viewmode=12&hide=31"
file = f"{core.mesh_site}/?login={token}&gotonode={agent.mesh_node_id}&viewmode=13&hide=31"
control = f"{core.mesh_site}/?login={token}&gotonode={agent.mesh_node_id}&viewmode=11&hide=31" # type:ignore
terminal = f"{core.mesh_site}/?login={token}&gotonode={agent.mesh_node_id}&viewmode=12&hide=31" # type:ignore
file = f"{core.mesh_site}/?login={token}&gotonode={agent.mesh_node_id}&viewmode=13&hide=31" # type:ignore
AuditLog.audit_mesh_session(username=request.user.username, hostname=agent.hostname)
AuditLog.audit_mesh_session(
username=request.user.username,
agent=agent,
debug_info={"ip": request._client_ip},
)
ret = {
"hostname": agent.hostname,
@@ -248,6 +250,16 @@ def send_raw_cmd(request):
"shell": request.data["shell"],
},
}
if pyver.parse(agent.version) >= pyver.parse("1.6.0"):
hist = AgentHistory.objects.create(
agent=agent,
type="cmd_run",
command=request.data["cmd"],
username=request.user.username[:50],
)
data["id"] = hist.pk
r = asyncio.run(agent.nats_cmd(data, timeout=timeout + 2))
if r == "timeout":
@@ -255,9 +267,10 @@ def send_raw_cmd(request):
AuditLog.audit_raw_command(
username=request.user.username,
hostname=agent.hostname,
agent=agent,
cmd=request.data["cmd"],
shell=request.data["shell"],
debug_info={"ip": request._client_ip},
)
return Response(r)
@@ -302,6 +315,8 @@ class AgentsTableList(APIView):
"last_logged_in_user",
"time_zone",
"maintenance_mode",
"pending_actions_count",
"has_patches_pending",
)
ctx = {"default_tz": get_default_timezone()}
serializer = AgentTableSerializer(queryset, many=True, context=ctx)
@@ -388,6 +403,7 @@ class Reboot(APIView):
@permission_classes([IsAuthenticated, InstallAgentPerms])
def install_agent(request):
from knox.models import AuthToken
from accounts.models import User
from agents.utils import get_winagent_url
@@ -413,8 +429,10 @@ def install_agent(request):
)
download_url = get_winagent_url(arch)
installer_user = User.objects.filter(is_installer_user=True).first()
_, token = AuthToken.objects.create(
user=request.user, expiry=dt.timedelta(hours=request.data["expires"])
user=installer_user, expiry=dt.timedelta(hours=request.data["expires"])
)
if request.data["installMethod"] == "exe":
@@ -503,7 +521,7 @@ def install_agent(request):
try:
os.remove(ps1)
except Exception as e:
logger.error(str(e))
DebugLog.error(message=str(e))
with open(ps1, "w") as f:
f.write(text)
@@ -561,26 +579,41 @@ def recover(request):
@permission_classes([IsAuthenticated, RunScriptPerms])
def run_script(request):
agent = get_object_or_404(Agent, pk=request.data["pk"])
script = get_object_or_404(Script, pk=request.data["scriptPK"])
script = get_object_or_404(Script, pk=request.data["script"])
output = request.data["output"]
args = request.data["args"]
req_timeout = int(request.data["timeout"]) + 3
AuditLog.audit_script_run(
username=request.user.username,
hostname=agent.hostname,
agent=agent,
script=script.name,
debug_info={"ip": request._client_ip},
)
history_pk = 0
if pyver.parse(agent.version) >= pyver.parse("1.6.0"):
hist = AgentHistory.objects.create(
agent=agent,
type="script_run",
script=script,
username=request.user.username[:50],
)
history_pk = hist.pk
if output == "wait":
r = agent.run_script(
scriptpk=script.pk, args=args, timeout=req_timeout, wait=True
scriptpk=script.pk,
args=args,
timeout=req_timeout,
wait=True,
history_pk=history_pk,
)
return Response(r)
elif output == "email":
emails = (
[] if request.data["emailmode"] == "default" else request.data["emails"]
[] if request.data["emailMode"] == "default" else request.data["emails"]
)
run_script_email_results_task.delay(
agentpk=agent.pk,
@@ -589,8 +622,51 @@ def run_script(request):
emails=emails,
args=args,
)
elif output == "collector":
from core.models import CustomField
r = agent.run_script(
scriptpk=script.pk,
args=args,
timeout=req_timeout,
wait=True,
history_pk=history_pk,
)
custom_field = CustomField.objects.get(pk=request.data["custom_field"])
if custom_field.model == "agent":
field = custom_field.get_or_create_field_value(agent)
elif custom_field.model == "client":
field = custom_field.get_or_create_field_value(agent.client)
elif custom_field.model == "site":
field = custom_field.get_or_create_field_value(agent.site)
else:
return notify_error("Custom Field was invalid")
value = (
r.strip()
if request.data["save_all_output"]
else r.strip().split("\n")[-1].strip()
)
field.save_to_field(value)
return Response(r)
elif output == "note":
r = agent.run_script(
scriptpk=script.pk,
args=args,
timeout=req_timeout,
wait=True,
history_pk=history_pk,
)
Note.objects.create(agent=agent, user=request.user, note=r)
return Response(r)
else:
agent.run_script(scriptpk=script.pk, args=args, timeout=req_timeout)
agent.run_script(
scriptpk=script.pk, args=args, timeout=req_timeout, history_pk=history_pk
)
return Response(f"{script.name} will now be run on {agent.hostname}")
@@ -663,7 +739,7 @@ class GetEditDeleteNote(APIView):
@api_view(["POST"])
@permission_classes([IsAuthenticated, RunBulkPerms])
def bulk(request):
if request.data["target"] == "agents" and not request.data["agentPKs"]:
if request.data["target"] == "agents" and not request.data["agents"]:
return notify_error("Must select at least 1 agent")
if request.data["target"] == "client":
@@ -671,7 +747,7 @@ def bulk(request):
elif request.data["target"] == "site":
q = Agent.objects.filter(site_id=request.data["site"])
elif request.data["target"] == "agents":
q = Agent.objects.filter(pk__in=request.data["agentPKs"])
q = Agent.objects.filter(pk__in=request.data["agents"])
elif request.data["target"] == "all":
q = Agent.objects.only("pk", "monitoring_type")
else:
@@ -684,29 +760,48 @@ def bulk(request):
agents: list[int] = [agent.pk for agent in q]
AuditLog.audit_bulk_action(request.user, request.data["mode"], request.data)
if not agents:
return notify_error("No agents where found meeting the selected criteria")
AuditLog.audit_bulk_action(
request.user,
request.data["mode"],
request.data,
debug_info={"ip": request._client_ip},
)
if request.data["mode"] == "command":
handle_bulk_command_task.delay(
agents, request.data["cmd"], request.data["shell"], request.data["timeout"]
agents,
request.data["cmd"],
request.data["shell"],
request.data["timeout"],
request.user.username[:50],
run_on_offline=request.data["offlineAgents"],
)
return Response(f"Command will now be run on {len(agents)} agents")
elif request.data["mode"] == "script":
script = get_object_or_404(Script, pk=request.data["scriptPK"])
script = get_object_or_404(Script, pk=request.data["script"])
handle_bulk_script_task.delay(
script.pk, agents, request.data["args"], request.data["timeout"]
script.pk,
agents,
request.data["args"],
request.data["timeout"],
request.user.username[:50],
)
return Response(f"{script.name} will now be run on {len(agents)} agents")
elif request.data["mode"] == "install":
bulk_install_updates_task.delay(agents)
return Response(
f"Pending updates will now be installed on {len(agents)} agents"
)
elif request.data["mode"] == "scan":
bulk_check_for_updates_task.delay(agents)
return Response(f"Patch status scan will now run on {len(agents)} agents")
elif request.data["mode"] == "patch":
if request.data["patchMode"] == "install":
bulk_install_updates_task.delay(agents)
return Response(
f"Pending updates will now be installed on {len(agents)} agents"
)
elif request.data["patchMode"] == "scan":
bulk_check_for_updates_task.delay(agents)
return Response(f"Patch status scan will now run on {len(agents)} agents")
return notify_error("Something went wrong")
@@ -741,3 +836,11 @@ class WMI(APIView):
if r != "ok":
return notify_error("Unable to contact the agent")
return Response("ok")
class AgentHistoryView(APIView):
def get(self, request, pk):
agent = get_object_or_404(Agent, pk=pk)
history = AgentHistory.objects.filter(agent=agent)
return Response(AgentHistorySerializer(history, many=True).data)

View File

@@ -0,0 +1,33 @@
# Generated by Django 3.2.1 on 2021-07-21 04:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alerts', '0006_auto_20210217_1736'),
]
operations = [
migrations.AddField(
model_name='alerttemplate',
name='created_by',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='alerttemplate',
name='created_time',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='alerttemplate',
name='modified_by',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='alerttemplate',
name='modified_time',
field=models.DateTimeField(auto_now=True, null=True),
),
]

View File

@@ -0,0 +1,28 @@
# Generated by Django 3.2.1 on 2021-07-21 17:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alerts', '0007_auto_20210721_0423'),
]
operations = [
migrations.AddField(
model_name='alerttemplate',
name='agent_script_actions',
field=models.BooleanField(blank=True, default=None, null=True),
),
migrations.AddField(
model_name='alerttemplate',
name='check_script_actions',
field=models.BooleanField(blank=True, default=None, null=True),
),
migrations.AddField(
model_name='alerttemplate',
name='task_script_actions',
field=models.BooleanField(blank=True, default=None, null=True),
),
]

View File

@@ -0,0 +1,28 @@
# Generated by Django 3.2.1 on 2021-07-21 18:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alerts', '0008_auto_20210721_1757'),
]
operations = [
migrations.AlterField(
model_name='alerttemplate',
name='agent_script_actions',
field=models.BooleanField(blank=True, default=True, null=True),
),
migrations.AlterField(
model_name='alerttemplate',
name='check_script_actions',
field=models.BooleanField(blank=True, default=True, null=True),
),
migrations.AlterField(
model_name='alerttemplate',
name='task_script_actions',
field=models.BooleanField(blank=True, default=True, null=True),
),
]

View File

@@ -3,19 +3,18 @@ from __future__ import annotations
import re
from typing import TYPE_CHECKING, Union
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.db.models.fields import BooleanField, PositiveIntegerField
from django.utils import timezone as djangotime
from loguru import logger
from logs.models import BaseAuditModel, DebugLog
if TYPE_CHECKING:
from agents.models import Agent
from autotasks.models import AutomatedTask
from checks.models import Check
logger.configure(**settings.LOG_CONFIG)
SEVERITY_CHOICES = [
("info", "Informational"),
@@ -173,6 +172,7 @@ class Alert(models.Model):
always_email = alert_template.agent_always_email
always_text = alert_template.agent_always_text
alert_interval = alert_template.agent_periodic_alert_days
run_script_action = alert_template.agent_script_actions
if instance.should_create_alert(alert_template):
alert = cls.create_or_return_availability_alert(instance)
@@ -209,6 +209,7 @@ class Alert(models.Model):
always_email = alert_template.check_always_email
always_text = alert_template.check_always_text
alert_interval = alert_template.check_periodic_alert_days
run_script_action = alert_template.check_script_actions
if instance.should_create_alert(alert_template):
alert = cls.create_or_return_check_alert(instance)
@@ -242,6 +243,7 @@ class Alert(models.Model):
always_email = alert_template.task_always_email
always_text = alert_template.task_always_text
alert_interval = alert_template.task_periodic_alert_days
run_script_action = alert_template.task_script_actions
if instance.should_create_alert(alert_template):
alert = cls.create_or_return_task_alert(instance)
@@ -295,7 +297,7 @@ class Alert(models.Model):
text_task.delay(pk=alert.pk, alert_interval=alert_interval)
# check if any scripts should be run
if alert_template and alert_template.action and not alert.action_run:
if alert_template and alert_template.action and run_script_action and not alert.action_run: # type: ignore
r = agent.run_script(
scriptpk=alert_template.action.pk,
args=alert.parse_script_args(alert_template.action_args),
@@ -314,8 +316,10 @@ class Alert(models.Model):
alert.action_run = djangotime.now()
alert.save()
else:
logger.error(
f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname} failure alert"
DebugLog.error(
agent=agent,
log_type="scripting",
message=f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) failure alert",
)
@classmethod
@@ -345,6 +349,7 @@ class Alert(models.Model):
if alert_template:
email_on_resolved = alert_template.agent_email_on_resolved
text_on_resolved = alert_template.agent_text_on_resolved
run_script_action = alert_template.agent_script_actions
elif isinstance(instance, Check):
from checks.tasks import (
@@ -363,6 +368,7 @@ class Alert(models.Model):
if alert_template:
email_on_resolved = alert_template.check_email_on_resolved
text_on_resolved = alert_template.check_text_on_resolved
run_script_action = alert_template.check_script_actions
elif isinstance(instance, AutomatedTask):
from autotasks.tasks import (
@@ -381,6 +387,7 @@ class Alert(models.Model):
if alert_template:
email_on_resolved = alert_template.task_email_on_resolved
text_on_resolved = alert_template.task_text_on_resolved
run_script_action = alert_template.task_script_actions
else:
return
@@ -403,6 +410,7 @@ class Alert(models.Model):
if (
alert_template
and alert_template.resolved_action
and run_script_action # type: ignore
and not alert.resolved_action_run
):
r = agent.run_script(
@@ -425,8 +433,10 @@ class Alert(models.Model):
alert.resolved_action_run = djangotime.now()
alert.save()
else:
logger.error(
f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname} resolved alert"
DebugLog.error(
agent=agent,
log_type="scripting",
message=f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) resolved alert",
)
def parse_script_args(self, args: list[str]):
@@ -451,7 +461,7 @@ class Alert(models.Model):
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg)) # type: ignore
except Exception as e:
logger.error(e)
DebugLog.error(log_type="scripting", message=e)
continue
else:
@@ -460,7 +470,7 @@ class Alert(models.Model):
return temp_args
class AlertTemplate(models.Model):
class AlertTemplate(BaseAuditModel):
name = models.CharField(max_length=100)
is_active = models.BooleanField(default=True)
@@ -517,6 +527,7 @@ class AlertTemplate(models.Model):
agent_always_text = BooleanField(null=True, blank=True, default=None)
agent_always_alert = BooleanField(null=True, blank=True, default=None)
agent_periodic_alert_days = PositiveIntegerField(blank=True, null=True, default=0)
agent_script_actions = BooleanField(null=True, blank=True, default=True)
# check alert settings
check_email_alert_severity = ArrayField(
@@ -540,6 +551,7 @@ class AlertTemplate(models.Model):
check_always_text = BooleanField(null=True, blank=True, default=None)
check_always_alert = BooleanField(null=True, blank=True, default=None)
check_periodic_alert_days = PositiveIntegerField(blank=True, null=True, default=0)
check_script_actions = BooleanField(null=True, blank=True, default=True)
# task alert settings
task_email_alert_severity = ArrayField(
@@ -563,6 +575,7 @@ class AlertTemplate(models.Model):
task_always_text = BooleanField(null=True, blank=True, default=None)
task_always_alert = BooleanField(null=True, blank=True, default=None)
task_periodic_alert_days = PositiveIntegerField(blank=True, null=True, default=0)
task_script_actions = BooleanField(null=True, blank=True, default=True)
# exclusion settings
exclude_workstations = BooleanField(null=True, blank=True, default=False)
@@ -581,6 +594,13 @@ class AlertTemplate(models.Model):
def __str__(self):
return self.name
@staticmethod
def serialize(alert_template):
# serializes the agent and returns json
from .serializers import AlertTemplateAuditSerializer
return AlertTemplateAuditSerializer(alert_template).data
@property
def has_agent_settings(self) -> bool:
return (

View File

@@ -119,3 +119,9 @@ class AlertTemplateRelationSerializer(ModelSerializer):
class Meta:
model = AlertTemplate
fields = "__all__"
class AlertTemplateAuditSerializer(ModelSerializer):
class Meta:
model = AlertTemplate
fields = "__all__"

View File

@@ -1,11 +1,10 @@
from django.utils import timezone as djangotime
from alerts.models import Alert
from tacticalrmm.celery import app
@app.task
def unsnooze_alerts() -> str:
from .models import Alert
Alert.objects.filter(snoozed=True, snooze_until__lte=djangotime.now()).update(
snoozed=False, snooze_until=None
@@ -22,3 +21,14 @@ def cache_agents_alert_template():
agent.set_alert_template()
return "ok"
@app.task
def prune_resolved_alerts(older_than_days: int) -> str:
from .models import Alert
Alert.objects.filter(resolved=True).filter(
alert_time__lt=djangotime.now() - djangotime.timedelta(days=older_than_days)
).delete()
return "ok"

View File

@@ -1,14 +1,13 @@
from datetime import datetime, timedelta
from unittest.mock import patch
from core.models import CoreSettings
from django.conf import settings
from django.utils import timezone as djangotime
from model_bakery import baker, seq
from tacticalrmm.test import TacticalTestCase
from alerts.tasks import cache_agents_alert_template
from autotasks.models import AutomatedTask
from core.models import CoreSettings
from tacticalrmm.test import TacticalTestCase
from .models import Alert, AlertTemplate
from .serializers import (
@@ -330,8 +329,8 @@ class TestAlertsViews(TacticalTestCase):
baker.make("clients.Site", alert_template=alert_template, _quantity=3)
baker.make("automation.Policy", alert_template=alert_template)
core = CoreSettings.objects.first()
core.alert_template = alert_template
core.save()
core.alert_template = alert_template # type: ignore
core.save() # type: ignore
url = f"/alerts/alerttemplates/{alert_template.pk}/related/" # type: ignore
@@ -403,16 +402,16 @@ class TestAlertTasks(TacticalTestCase):
# assign first Alert Template as to a policy and apply it as default
policy.alert_template = alert_templates[0] # type: ignore
policy.save() # type: ignore
core.workstation_policy = policy
core.server_policy = policy
core.save()
core.workstation_policy = policy # type: ignore
core.server_policy = policy # type: ignore
core.save() # type: ignore
self.assertEquals(server.set_alert_template().pk, alert_templates[0].pk) # type: ignore
self.assertEquals(workstation.set_alert_template().pk, alert_templates[0].pk) # type: ignore
# assign second Alert Template to as default alert template
core.alert_template = alert_templates[1] # type: ignore
core.save()
core.save() # type: ignore
self.assertEquals(workstation.set_alert_template().pk, alert_templates[1].pk) # type: ignore
self.assertEquals(server.set_alert_template().pk, alert_templates[1].pk) # type: ignore
@@ -514,6 +513,7 @@ class TestAlertTasks(TacticalTestCase):
agent_recovery_email_task,
agent_recovery_sms_task,
)
from alerts.models import Alert
agent_dashboard_alert = baker.make_recipe("agents.overdue_agent")
@@ -727,7 +727,6 @@ class TestAlertTasks(TacticalTestCase):
send_email,
sleep,
):
from alerts.tasks import cache_agents_alert_template
from checks.models import Check
from checks.tasks import (
handle_check_email_alert_task,
@@ -736,6 +735,8 @@ class TestAlertTasks(TacticalTestCase):
handle_resolved_check_sms_alert_task,
)
from alerts.tasks import cache_agents_alert_template
# create test data
agent = baker.make_recipe("agents.agent")
agent_no_settings = baker.make_recipe("agents.agent")
@@ -1011,7 +1012,6 @@ class TestAlertTasks(TacticalTestCase):
send_email,
sleep,
):
from alerts.tasks import cache_agents_alert_template
from autotasks.models import AutomatedTask
from autotasks.tasks import (
handle_resolved_task_email_alert,
@@ -1020,6 +1020,8 @@ class TestAlertTasks(TacticalTestCase):
handle_task_sms_alert,
)
from alerts.tasks import cache_agents_alert_template
# create test data
agent = baker.make_recipe("agents.agent")
agent_no_settings = baker.make_recipe("agents.agent")
@@ -1272,17 +1274,17 @@ class TestAlertTasks(TacticalTestCase):
)
core = CoreSettings.objects.first()
core.smtp_host = "test.test.com"
core.smtp_port = 587
core.smtp_recipients = ["recipient@test.com"]
core.twilio_account_sid = "test"
core.twilio_auth_token = "1234123412341234"
core.sms_alert_recipients = ["+1234567890"]
core.smtp_host = "test.test.com" # type: ignore
core.smtp_port = 587 # type: ignore
core.smtp_recipients = ["recipient@test.com"] # type: ignore
core.twilio_account_sid = "test" # type: ignore
core.twilio_auth_token = "1234123412341234" # type: ignore
core.sms_alert_recipients = ["+1234567890"] # type: ignore
# test sending email with alert template settings
core.send_mail("Test", "Test", alert_template=alert_template)
core.send_mail("Test", "Test", alert_template=alert_template) # type: ignore
core.send_sms("Test", alert_template=alert_template)
core.send_sms("Test", alert_template=alert_template) # type: ignore
@patch("agents.models.Agent.nats_cmd")
@patch("agents.tasks.agent_outage_sms_task.delay")
@@ -1315,6 +1317,7 @@ class TestAlertTasks(TacticalTestCase):
"alerts.AlertTemplate",
is_active=True,
agent_always_alert=True,
agent_script_actions=False,
action=failure_action,
action_timeout=30,
resolved_action=resolved_action,
@@ -1328,6 +1331,14 @@ class TestAlertTasks(TacticalTestCase):
agent_outages_task()
# should not have been called since agent_script_actions is set to False
nats_cmd.assert_not_called()
alert_template.agent_script_actions = True # type: ignore
alert_template.save() # type: ignore
agent_outages_task()
# this is what data should be
data = {
"func": "runscriptfull",
@@ -1340,14 +1351,6 @@ class TestAlertTasks(TacticalTestCase):
nats_cmd.reset_mock()
# Setup cmd mock
success = {
"retcode": 0,
"stdout": "success!",
"stderr": "",
"execution_time": 5.0000,
}
nats_cmd.side_effect = ["pong", success]
# make sure script run results were stored
@@ -1398,3 +1401,36 @@ class TestAlertTasks(TacticalTestCase):
["-Parameter", f"-Another '{alert.id}'"], # type: ignore
alert.parse_script_args(args=args), # type: ignore
)
def test_prune_resolved_alerts(self):
from .tasks import prune_resolved_alerts
# setup data
resolved_alerts = baker.make(
"alerts.Alert",
resolved=True,
_quantity=25,
)
alerts = baker.make(
"alerts.Alert",
resolved=False,
_quantity=25,
)
days = 0
for alert in resolved_alerts: # type: ignore
alert.alert_time = djangotime.now() - djangotime.timedelta(days=days)
alert.save()
days = days + 5
days = 0
for alert in alerts: # type: ignore
alert.alert_time = djangotime.now() - djangotime.timedelta(days=days)
alert.save()
days = days + 5
# delete AgentHistory older than 30 days
prune_resolved_alerts(30)
self.assertEqual(Alert.objects.count(), 31)

View File

@@ -213,7 +213,8 @@ class TestAPIv3(TacticalTestCase):
# setup data
agent = baker.make_recipe("agents.agent")
task = baker.make("autotasks.AutomatedTask", agent=agent)
script = baker.make_recipe("scripts.script")
task = baker.make("autotasks.AutomatedTask", agent=agent, script=script)
url = f"/api/v3/{task.pk}/{agent.agent_id}/taskrunner/" # type: ignore

View File

@@ -20,4 +20,5 @@ urlpatterns = [
path("superseded/", views.SupersededWinUpdate.as_view()),
path("<int:pk>/chocoresult/", views.ChocoResult.as_view()),
path("<str:agentid>/recovery/", views.AgentRecovery.as_view()),
path("<int:pk>/<str:agentid>/histresult/", views.AgentHistoryResult.as_view()),
]

View File

@@ -6,7 +6,6 @@ from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from loguru import logger
from packaging import version as pyver
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.models import Token
@@ -15,20 +14,18 @@ from rest_framework.response import Response
from rest_framework.views import APIView
from accounts.models import User
from agents.models import Agent, AgentCustomField
from agents.serializers import WinAgentSerializer
from agents.models import Agent, AgentHistory
from agents.serializers import WinAgentSerializer, AgentHistorySerializer
from autotasks.models import AutomatedTask
from autotasks.serializers import TaskGOGetSerializer, TaskRunnerPatchSerializer
from checks.models import Check
from checks.serializers import CheckRunnerGetSerializer
from checks.utils import bytes2human
from logs.models import PendingAction
from logs.models import PendingAction, DebugLog
from software.models import InstalledSoftware
from tacticalrmm.utils import SoftwareList, filter_software, notify_error, reload_nats
from winupdate.models import WinUpdate, WinUpdatePolicy
logger.configure(**settings.LOG_CONFIG)
class CheckIn(APIView):
@@ -36,6 +33,10 @@ class CheckIn(APIView):
permission_classes = [IsAuthenticated]
def patch(self, request):
"""
!!! DEPRECATED AS OF AGENT 1.6.0 !!!
Endpoint be removed in a future release
"""
from alerts.models import Alert
updated = False
@@ -182,7 +183,11 @@ class WinUpdates(APIView):
if reboot:
asyncio.run(agent.nats_cmd({"func": "rebootnow"}, wait=False))
logger.info(f"{agent.hostname} is rebooting after updates were installed.")
DebugLog.info(
agent=agent,
log_type="windows_updates",
message=f"{agent.hostname} is rebooting after updates were installed.",
)
agent.delete_superseded_updates()
return Response("ok")
@@ -304,10 +309,11 @@ class CheckRunner(APIView):
< djangotime.now()
- djangotime.timedelta(seconds=check.run_interval)
)
# if check interval isn't set, make sure the agent's check interval has passed before running
)
# if check interval isn't set, make sure the agent's check interval has passed before running
or (
check.last_run
not check.run_interval
and check.last_run
< djangotime.now() - djangotime.timedelta(seconds=agent.check_interval)
)
]
@@ -320,11 +326,16 @@ class CheckRunner(APIView):
def patch(self, request):
check = get_object_or_404(Check, pk=request.data["id"])
if pyver.parse(check.agent.version) < pyver.parse("1.5.7"):
return notify_error("unsupported")
check.last_run = djangotime.now()
check.save(update_fields=["last_run"])
status = check.handle_checkv2(request.data)
status = check.handle_check(request.data)
if status == "failing" and check.assignedtask.exists(): # type: ignore
check.handle_assigned_task()
return Response(status)
return Response("ok")
class CheckRunnerInterval(APIView):
@@ -344,13 +355,12 @@ class TaskRunner(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, pk, agentid):
agent = get_object_or_404(Agent, agent_id=agentid)
_ = get_object_or_404(Agent, agent_id=agentid)
task = get_object_or_404(AutomatedTask, pk=pk)
return Response(TaskGOGetSerializer(task).data)
def patch(self, request, pk, agentid):
from alerts.models import Alert
from logs.models import AuditLog
agent = get_object_or_404(Agent, agent_id=agentid)
task = get_object_or_404(AutomatedTask, pk=pk)
@@ -365,29 +375,7 @@ class TaskRunner(APIView):
if task.custom_field:
if not task.stderr:
if AgentCustomField.objects.filter(
field=task.custom_field, agent=task.agent
).exists():
agent_field = AgentCustomField.objects.get(
field=task.custom_field, agent=task.agent
)
else:
agent_field = AgentCustomField.objects.create(
field=task.custom_field, agent=task.agent
)
# get last line of stdout
value = new_task.stdout.split("\n")[-1].strip()
if task.custom_field.type in ["text", "number", "single", "datetime"]:
agent_field.string_value = value
agent_field.save()
elif task.custom_field.type == "multiple":
agent_field.multiple_value = value.split(",")
agent_field.save()
elif task.custom_field.type == "checkbox":
agent_field.bool_value = bool(value)
agent_field.save()
task.save_collector_results()
status = "passing"
else:
@@ -404,15 +392,6 @@ class TaskRunner(APIView):
else:
Alert.handle_alert_failure(new_task)
AuditLog.objects.create(
username=agent.hostname,
agent=agent.hostname,
object_type="agent",
action="task_run",
message=f"Scheduled Task {task.name} was run on {agent.hostname}",
after_value=AutomatedTask.serialize(new_task),
)
return Response("ok")
@@ -503,6 +482,7 @@ class NewAgent(APIView):
action="agent_install",
message=f"{request.user} installed new agent {agent.hostname}",
after_value=Agent.serialize(agent),
debug_info={"ip": request._client_ip},
)
return Response(
@@ -607,3 +587,16 @@ class AgentRecovery(APIView):
reload_nats()
return Response(ret)
class AgentHistoryResult(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def patch(self, request, agentid, pk):
_ = get_object_or_404(Agent, agent_id=agentid)
hist = get_object_or_404(AgentHistory, pk=pk)
s = AgentHistorySerializer(instance=hist, data=request.data, partial=True)
s.is_valid(raise_exception=True)
s.save()
return Response("ok")

View File

@@ -33,7 +33,7 @@ class Policy(BaseAuditModel):
# get old policy if exists
old_policy = type(self).objects.get(pk=self.pk) if self.pk else None
super(BaseAuditModel, self).save(*args, **kwargs)
super(Policy, self).save(old_model=old_policy, *args, **kwargs)
# generate agent checks only if active and enforced were changed
if old_policy:
@@ -50,7 +50,7 @@ class Policy(BaseAuditModel):
from automation.tasks import generate_agent_checks_task
agents = list(self.related_agents().only("pk").values_list("pk", flat=True))
super(BaseAuditModel, self).delete(*args, **kwargs)
super(Policy, self).delete(*args, **kwargs)
generate_agent_checks_task.delay(agents=agents, create_tasks=True)
@@ -126,9 +126,9 @@ class Policy(BaseAuditModel):
@staticmethod
def serialize(policy):
# serializes the policy and returns json
from .serializers import PolicySerializer
from .serializers import PolicyAuditSerializer
return PolicySerializer(policy).data
return PolicyAuditSerializer(policy).data
@staticmethod
def cascade_policy_tasks(agent):

View File

@@ -83,8 +83,15 @@ class PolicyCheckSerializer(ModelSerializer):
class AutoTasksFieldSerializer(ModelSerializer):
assigned_check = PolicyCheckSerializer(read_only=True)
script = ReadOnlyField(source="script.id")
custom_field = ReadOnlyField(source="custom_field.id")
class Meta:
model = AutomatedTask
fields = "__all__"
depth = 1
class PolicyAuditSerializer(ModelSerializer):
class Meta:
model = Policy
fields = "__all__"

View File

@@ -3,7 +3,7 @@ from typing import Any, Dict, List, Union
from tacticalrmm.celery import app
@app.task
@app.task(retry_backoff=5, retry_jitter=True, retry_kwargs={"max_retries": 5})
def generate_agent_checks_task(
policy: int = None,
site: int = None,
@@ -57,7 +57,9 @@ def generate_agent_checks_task(
return "ok"
@app.task
@app.task(
acks_late=True, retry_backoff=5, retry_jitter=True, retry_kwargs={"max_retries": 5}
)
# updates policy managed check fields on agents
def update_policy_check_fields_task(check: int) -> str:
from checks.models import Check
@@ -73,7 +75,7 @@ def update_policy_check_fields_task(check: int) -> str:
return "ok"
@app.task
@app.task(retry_backoff=5, retry_jitter=True, retry_kwargs={"max_retries": 5})
# generates policy tasks on agents affected by a policy
def generate_agent_autotasks_task(policy: int = None) -> str:
from agents.models import Agent
@@ -100,7 +102,12 @@ def generate_agent_autotasks_task(policy: int = None) -> str:
return "ok"
@app.task
@app.task(
acks_late=True,
retry_backoff=5,
retry_jitter=True,
retry_kwargs={"max_retries": 5},
)
def delete_policy_autotasks_task(task: int) -> str:
from autotasks.models import AutomatedTask
@@ -120,7 +127,12 @@ def run_win_policy_autotasks_task(task: int) -> str:
return "ok"
@app.task
@app.task(
acks_late=True,
retry_backoff=5,
retry_jitter=True,
retry_kwargs={"max_retries": 5},
)
def update_policy_autotasks_fields_task(task: int, update_agent: bool = False) -> str:
from autotasks.models import AutomatedTask

View File

@@ -1,10 +1,9 @@
from itertools import cycle
from unittest.mock import patch
from model_bakery import baker, seq
from agents.models import Agent
from core.models import CoreSettings
from model_bakery import baker, seq
from tacticalrmm.test import TacticalTestCase
from winupdate.models import WinUpdatePolicy
@@ -124,7 +123,7 @@ class TestPolicyViews(TacticalTestCase):
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
# only called if active or enforced are updated
# only called if active, enforced, or excluded objects are updated
generate_agent_checks_task.assert_not_called()
data = {
@@ -134,6 +133,23 @@ class TestPolicyViews(TacticalTestCase):
"enforced": False,
}
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
generate_agent_checks_task.assert_called_with(
policy=policy.pk, create_tasks=True # type: ignore
)
generate_agent_checks_task.reset_mock()
# make sure policies are re-evaluated when excluded changes
agents = baker.make_recipe("agents.agent", _quantity=2)
clients = baker.make("clients.Client", _quantity=2)
sites = baker.make("clients.Site", _quantity=2)
data = {
"excluded_agents": [agent.pk for agent in agents], # type: ignore
"excluded_sites": [site.pk for site in sites], # type: ignore
"excluded_clients": [client.pk for client in clients], # type: ignore
}
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
generate_agent_checks_task.assert_called_with(

View File

@@ -1,14 +1,13 @@
from django.shortcuts import get_object_or_404
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from agents.models import Agent
from agents.serializers import AgentHostnameSerializer
from autotasks.models import AutomatedTask
from checks.models import Check
from clients.models import Client
from clients.serializers import ClientSerializer, SiteSerializer
from django.shortcuts import get_object_or_404
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from tacticalrmm.utils import notify_error
from winupdate.models import WinUpdatePolicy
from winupdate.serializers import WinUpdatePolicySerializer
@@ -64,12 +63,22 @@ class GetUpdateDeletePolicy(APIView):
return Response(PolicySerializer(policy).data)
def put(self, request, pk):
from .tasks import generate_agent_checks_task
policy = get_object_or_404(Policy, pk=pk)
serializer = PolicySerializer(instance=policy, data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
# check for excluding objects and in the request and if present generate policies
if (
"excluded_sites" in request.data.keys()
or "excluded_clients" in request.data.keys()
or "excluded_agents" in request.data.keys()
):
generate_agent_checks_task.delay(policy=pk, create_tasks=True)
return Response("ok")
def delete(self, request, pk):

View File

@@ -0,0 +1,18 @@
# Generated by Django 3.2.1 on 2021-05-29 03:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('autotasks', '0021_alter_automatedtask_custom_field'),
]
operations = [
migrations.AddField(
model_name='automatedtask',
name='collector_all_output',
field=models.BooleanField(default=False),
),
]

View File

@@ -6,18 +6,15 @@ from typing import List
import pytz
from alerts.models import SEVERITY_CHOICES
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.db.models.fields import DateTimeField
from django.db.utils import DatabaseError
from django.utils import timezone as djangotime
from logs.models import BaseAuditModel
from loguru import logger
from logs.models import BaseAuditModel, DebugLog
from packaging import version as pyver
from tacticalrmm.utils import bitdays_to_string
logger.configure(**settings.LOG_CONFIG)
RUN_TIME_DAY_CHOICES = [
(0, "Monday"),
(1, "Tuesday"),
@@ -104,6 +101,7 @@ class AutomatedTask(BaseAuditModel):
task_type = models.CharField(
max_length=100, choices=TASK_TYPE_CHOICES, default="manual"
)
collector_all_output = models.BooleanField(default=False)
run_time_date = DateTimeField(null=True, blank=True)
remove_if_not_scheduled = models.BooleanField(default=False)
run_asap_after_missed = models.BooleanField(default=False) # added in agent v1.4.7
@@ -182,6 +180,7 @@ class AutomatedTask(BaseAuditModel):
"remove_if_not_scheduled",
"run_asap_after_missed",
"custom_field",
"collector_all_output",
]
@staticmethod
@@ -192,9 +191,9 @@ class AutomatedTask(BaseAuditModel):
@staticmethod
def serialize(task):
# serializes the task and returns json
from .serializers import TaskSerializer
from .serializers import TaskAuditSerializer
return TaskSerializer(task).data
return TaskAuditSerializer(task).data
def create_policy_task(self, agent=None, policy=None, assigned_check=None):
@@ -251,7 +250,7 @@ class AutomatedTask(BaseAuditModel):
elif self.task_type == "runonce":
# check if scheduled time is in the past
agent_tz = pytz.timezone(agent.timezone)
agent_tz = pytz.timezone(agent.timezone) # type: ignore
task_time_utc = self.run_time_date.replace(tzinfo=agent_tz).astimezone(
pytz.utc
)
@@ -277,7 +276,7 @@ class AutomatedTask(BaseAuditModel):
},
}
if self.run_asap_after_missed and pyver.parse(agent.version) >= pyver.parse(
if self.run_asap_after_missed and pyver.parse(agent.version) >= pyver.parse( # type: ignore
"1.4.7"
):
nats_data["schedtaskpayload"]["run_asap_after_missed"] = True
@@ -298,19 +297,25 @@ class AutomatedTask(BaseAuditModel):
else:
return "error"
r = asyncio.run(agent.nats_cmd(nats_data, timeout=5))
r = asyncio.run(agent.nats_cmd(nats_data, timeout=5)) # type: ignore
if r != "ok":
self.sync_status = "initial"
self.save(update_fields=["sync_status"])
logger.warning(
f"Unable to create scheduled task {self.name} on {agent.hostname}. It will be created when the agent checks in."
DebugLog.warning(
agent=agent,
log_type="agent_issues",
message=f"Unable to create scheduled task {self.name} on {agent.hostname}. It will be created when the agent checks in.", # type: ignore
)
return "timeout"
else:
self.sync_status = "synced"
self.save(update_fields=["sync_status"])
logger.info(f"{agent.hostname} task {self.name} was successfully created")
DebugLog.info(
agent=agent,
log_type="agent_issues",
message=f"{agent.hostname} task {self.name} was successfully created", # type: ignore
)
return "ok"
@@ -330,19 +335,25 @@ class AutomatedTask(BaseAuditModel):
"enabled": self.enabled,
},
}
r = asyncio.run(agent.nats_cmd(nats_data, timeout=5))
r = asyncio.run(agent.nats_cmd(nats_data, timeout=5)) # type: ignore
if r != "ok":
self.sync_status = "notsynced"
self.save(update_fields=["sync_status"])
logger.warning(
f"Unable to modify scheduled task {self.name} on {agent.hostname}. It will try again on next agent checkin"
DebugLog.warning(
agent=agent,
log_type="agent_issues",
message=f"Unable to modify scheduled task {self.name} on {agent.hostname}({agent.pk}). It will try again on next agent checkin", # type: ignore
)
return "timeout"
else:
self.sync_status = "synced"
self.save(update_fields=["sync_status"])
logger.info(f"{agent.hostname} task {self.name} was successfully modified")
DebugLog.info(
agent=agent,
log_type="agent_issues",
message=f"{agent.hostname} task {self.name} was successfully modified", # type: ignore
)
return "ok"
@@ -359,18 +370,29 @@ class AutomatedTask(BaseAuditModel):
"func": "delschedtask",
"schedtaskpayload": {"name": self.win_task_name},
}
r = asyncio.run(agent.nats_cmd(nats_data, timeout=10))
r = asyncio.run(agent.nats_cmd(nats_data, timeout=10)) # type: ignore
if r != "ok" and "The system cannot find the file specified" not in r:
self.sync_status = "pendingdeletion"
self.save(update_fields=["sync_status"])
logger.warning(
f"{agent.hostname} task {self.name} was successfully modified"
try:
self.save(update_fields=["sync_status"])
except DatabaseError:
pass
DebugLog.warning(
agent=agent,
log_type="agent_issues",
message=f"{agent.hostname} task {self.name} will be deleted on next checkin", # type: ignore
)
return "timeout"
else:
self.delete()
logger.info(f"{agent.hostname} task {self.name} was deleted")
DebugLog.info(
agent=agent,
log_type="agent_issues",
message=f"{agent.hostname}({agent.pk}) task {self.name} was deleted", # type: ignore
)
return "ok"
@@ -383,9 +405,20 @@ class AutomatedTask(BaseAuditModel):
.first()
)
asyncio.run(agent.nats_cmd({"func": "runtask", "taskpk": self.pk}, wait=False))
asyncio.run(agent.nats_cmd({"func": "runtask", "taskpk": self.pk}, wait=False)) # type: ignore
return "ok"
def save_collector_results(self):
agent_field = self.custom_field.get_or_create_field_value(self.agent)
value = (
self.stdout.strip()
if self.collector_all_output
else self.stdout.strip().split("\n")[-1].strip()
)
agent_field.save_to_field(value)
def should_create_alert(self, alert_template=None):
return (
self.dashboard_alert
@@ -405,9 +438,9 @@ class AutomatedTask(BaseAuditModel):
from core.models import CoreSettings
CORE = CoreSettings.objects.first()
# Format of Email sent when Task has email alert
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Failed"
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
else:
subject = f"{self} Failed"
@@ -416,16 +449,15 @@ class AutomatedTask(BaseAuditModel):
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_mail(subject, body, self.agent.alert_template)
CORE.send_mail(subject, body, self.agent.alert_template) # type: ignore
def send_sms(self):
from core.models import CoreSettings
CORE = CoreSettings.objects.first()
# Format of SMS sent when Task has SMS alert
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Failed"
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
else:
subject = f"{self} Failed"
@@ -434,7 +466,7 @@ class AutomatedTask(BaseAuditModel):
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_sms(body, alert_template=self.agent.alert_template)
CORE.send_sms(body, alert_template=self.agent.alert_template) # type: ignore
def send_resolved_email(self):
from core.models import CoreSettings
@@ -446,7 +478,7 @@ class AutomatedTask(BaseAuditModel):
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_mail(subject, body, alert_template=self.agent.alert_template)
CORE.send_mail(subject, body, alert_template=self.agent.alert_template) # type: ignore
def send_resolved_sms(self):
from core.models import CoreSettings
@@ -457,4 +489,4 @@ class AutomatedTask(BaseAuditModel):
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_sms(body, alert_template=self.agent.alert_template)
CORE.send_sms(body, alert_template=self.agent.alert_template) # type: ignore

View File

@@ -68,6 +68,12 @@ class TaskRunnerGetSerializer(serializers.ModelSerializer):
class TaskGOGetSerializer(serializers.ModelSerializer):
script = ScriptCheckSerializer(read_only=True)
script_args = serializers.SerializerMethodField()
def get_script_args(self, obj):
return Script.parse_script_args(
agent=obj.agent, shell=obj.script.shell, args=obj.script_args
)
class Meta:
model = AutomatedTask
@@ -78,3 +84,9 @@ class TaskRunnerPatchSerializer(serializers.ModelSerializer):
class Meta:
model = AutomatedTask
fields = "__all__"
class TaskAuditSerializer(serializers.ModelSerializer):
class Meta:
model = AutomatedTask
fields = "__all__"

View File

@@ -1,18 +1,16 @@
import asyncio
import datetime as dt
from logging import log
import random
from time import sleep
from typing import Union
from django.conf import settings
from django.utils import timezone as djangotime
from loguru import logger
from autotasks.models import AutomatedTask
from logs.models import DebugLog
from tacticalrmm.celery import app
logger.configure(**settings.LOG_CONFIG)
@app.task
def create_win_task_schedule(pk):
@@ -53,12 +51,20 @@ def remove_orphaned_win_tasks(agentpk):
agent = Agent.objects.get(pk=agentpk)
logger.info(f"Orphaned task cleanup initiated on {agent.hostname}.")
DebugLog.info(
agent=agent,
log_type="agent_issues",
message=f"Orphaned task cleanup initiated on {agent.hostname}.",
)
r = asyncio.run(agent.nats_cmd({"func": "listschedtasks"}, timeout=10))
if not isinstance(r, list) and not r: # empty list
logger.error(f"Unable to clean up scheduled tasks on {agent.hostname}: {r}")
DebugLog.error(
agent=agent,
log_type="agent_issues",
message=f"Unable to clean up scheduled tasks on {agent.hostname}: {r}",
)
return "notlist"
agent_task_names = list(agent.autotasks.values_list("win_task_name", flat=True))
@@ -83,13 +89,23 @@ def remove_orphaned_win_tasks(agentpk):
}
ret = asyncio.run(agent.nats_cmd(nats_data, timeout=10))
if ret != "ok":
logger.error(
f"Unable to clean up orphaned task {task} on {agent.hostname}: {ret}"
DebugLog.error(
agent=agent,
log_type="agent_issues",
message=f"Unable to clean up orphaned task {task} on {agent.hostname}: {ret}",
)
else:
logger.info(f"Removed orphaned task {task} from {agent.hostname}")
DebugLog.info(
agent=agent,
log_type="agent_issues",
message=f"Removed orphaned task {task} from {agent.hostname}",
)
logger.info(f"Orphaned task cleanup finished on {agent.hostname}")
DebugLog.info(
agent=agent,
log_type="agent_issues",
message=f"Orphaned task cleanup finished on {agent.hostname}",
)
@app.task

View File

@@ -0,0 +1,22 @@
# Generated by Django 3.2.1 on 2021-06-06 16:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('checks', '0023_check_run_interval'),
]
operations = [
migrations.RemoveField(
model_name='checkhistory',
name='check_history',
),
migrations.AddField(
model_name='checkhistory',
name='check_id',
field=models.PositiveIntegerField(default=0),
),
]

View File

@@ -1,4 +1,3 @@
import asyncio
import json
import os
import string
@@ -13,12 +12,6 @@ from django.contrib.postgres.fields import ArrayField
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from logs.models import BaseAuditModel
from loguru import logger
from packaging import version as pyver
from .utils import bytes2human
logger.configure(**settings.LOG_CONFIG)
CHECK_TYPE_CHOICES = [
("diskspace", "Disk Space Check"),
@@ -316,9 +309,9 @@ class Check(BaseAuditModel):
)
def add_check_history(self, value: int, more_info: Any = None) -> None:
CheckHistory.objects.create(check_history=self, y=value, results=more_info)
CheckHistory.objects.create(check_id=self.pk, y=value, results=more_info)
def handle_checkv2(self, data):
def handle_check(self, data):
from alerts.models import Alert
# cpuload or mem checks
@@ -349,9 +342,6 @@ class Check(BaseAuditModel):
elif self.check_type == "diskspace":
if data["exists"]:
percent_used = round(data["percent_used"])
total = bytes2human(data["total"])
free = bytes2human(data["free"])
if self.error_threshold and (100 - percent_used) < self.error_threshold:
self.status = "failing"
self.alert_severity = "error"
@@ -365,7 +355,7 @@ class Check(BaseAuditModel):
else:
self.status = "passing"
self.more_info = f"Total: {total}B, Free: {free}B"
self.more_info = data["more_info"]
# add check history
self.add_check_history(100 - percent_used)
@@ -381,12 +371,7 @@ class Check(BaseAuditModel):
self.stdout = data["stdout"]
self.stderr = data["stderr"]
self.retcode = data["retcode"]
try:
# python agent
self.execution_time = "{:.4f}".format(data["stop"] - data["start"])
except:
# golang agent
self.execution_time = "{:.4f}".format(data["runtime"])
self.execution_time = "{:.4f}".format(data["runtime"])
if data["retcode"] in self.info_return_codes:
self.alert_severity = "info"
@@ -422,22 +407,8 @@ class Check(BaseAuditModel):
# ping checks
elif self.check_type == "ping":
output = data["output"]
if pyver.parse(self.agent.version) <= pyver.parse("1.5.2"):
# DEPRECATED
success = ["Reply", "bytes", "time", "TTL"]
if data["has_stdout"]:
if all(x in output for x in success):
self.status = "passing"
else:
self.status = "failing"
elif data["has_stderr"]:
self.status = "failing"
else:
self.status = data["status"]
self.more_info = output
self.status = data["status"]
self.more_info = data["output"]
self.save(update_fields=["more_info"])
self.add_check_history(
@@ -446,41 +417,8 @@ class Check(BaseAuditModel):
# windows service checks
elif self.check_type == "winsvc":
svc_stat = data["status"]
self.more_info = f"Status {svc_stat.upper()}"
if data["exists"]:
if svc_stat == "running":
self.status = "passing"
elif svc_stat == "start_pending" and self.pass_if_start_pending:
self.status = "passing"
else:
if self.agent and self.restart_if_stopped:
nats_data = {
"func": "winsvcaction",
"payload": {"name": self.svc_name, "action": "start"},
}
r = asyncio.run(self.agent.nats_cmd(nats_data, timeout=32))
if r == "timeout" or r == "natsdown":
self.status = "failing"
elif not r["success"] and r["errormsg"]:
self.status = "failing"
elif r["success"]:
self.status = "passing"
self.more_info = f"Status RUNNING"
else:
self.status = "failing"
else:
self.status = "failing"
else:
if self.pass_if_svc_not_exist:
self.status = "passing"
else:
self.status = "failing"
self.more_info = f"Service {self.svc_name} does not exist"
self.status = data["status"]
self.more_info = data["more_info"]
self.save(update_fields=["more_info"])
self.add_check_history(
@@ -488,49 +426,7 @@ class Check(BaseAuditModel):
)
elif self.check_type == "eventlog":
log = []
is_wildcard = self.event_id_is_wildcard
eventType = self.event_type
eventID = self.event_id
source = self.event_source
message = self.event_message
r = data["log"]
for i in r:
if i["eventType"] == eventType:
if not is_wildcard and not int(i["eventID"]) == eventID:
continue
if not source and not message:
if is_wildcard:
log.append(i)
elif int(i["eventID"]) == eventID:
log.append(i)
continue
if source and message:
if is_wildcard:
if source in i["source"] and message in i["message"]:
log.append(i)
elif int(i["eventID"]) == eventID:
if source in i["source"] and message in i["message"]:
log.append(i)
continue
if source and source in i["source"]:
if is_wildcard:
log.append(i)
elif int(i["eventID"]) == eventID:
log.append(i)
if message and message in i["message"]:
if is_wildcard:
log.append(i)
elif int(i["eventID"]) == eventID:
log.append(i)
log = data["log"]
if self.fail_when == "contains":
if log and len(log) >= self.number_of_events_b4_alert:
self.status = "failing"
@@ -567,12 +463,17 @@ class Check(BaseAuditModel):
return self.status
def handle_assigned_task(self) -> None:
for task in self.assignedtask.all(): # type: ignore
if task.enabled:
task.run_win_task()
@staticmethod
def serialize(check):
# serializes the check and returns json
from .serializers import CheckSerializer
from .serializers import CheckAuditSerializer
return CheckSerializer(check).data
return CheckAuditSerializer(check).data
# for policy diskchecks
@staticmethod
@@ -604,7 +505,12 @@ class Check(BaseAuditModel):
)
for task in self.assignedtask.all(): # type: ignore
task.create_policy_task(agent=agent, policy=policy, assigned_check=check)
if policy or (
agent and not agent.autotasks.filter(parent_task=task.pk).exists()
):
task.create_policy_task(
agent=agent, policy=policy, assigned_check=check
)
for field in self.policy_fields_to_copy:
setattr(check, field, getattr(self, field))
@@ -778,14 +684,10 @@ class Check(BaseAuditModel):
class CheckHistory(models.Model):
check_history = models.ForeignKey(
Check,
related_name="check_history",
on_delete=models.CASCADE,
)
check_id = models.PositiveIntegerField(default=0)
x = models.DateTimeField(auto_now_add=True)
y = models.PositiveIntegerField(null=True, blank=True, default=None)
results = models.JSONField(null=True, blank=True)
def __str__(self):
return self.check_history.readable_desc
return self.x

View File

@@ -6,6 +6,7 @@ from autotasks.models import AutomatedTask
from scripts.serializers import ScriptCheckSerializer, ScriptSerializer
from .models import Check, CheckHistory
from scripts.models import Script
class AssignedTaskField(serializers.ModelSerializer):
@@ -158,13 +159,16 @@ class AssignedTaskCheckRunnerField(serializers.ModelSerializer):
class CheckRunnerGetSerializer(serializers.ModelSerializer):
# only send data needed for agent to run a check
assigned_tasks = serializers.SerializerMethodField()
script = ScriptCheckSerializer(read_only=True)
script_args = serializers.SerializerMethodField()
def get_assigned_tasks(self, obj):
if obj.assignedtask.exists():
tasks = obj.assignedtask.all()
return AssignedTaskCheckRunnerField(tasks, many=True).data
def get_script_args(self, obj):
if obj.check_type != "script":
return []
return Script.parse_script_args(
agent=obj.agent, shell=obj.script.shell, args=obj.script_args
)
class Meta:
model = Check
@@ -193,6 +197,7 @@ class CheckRunnerGetSerializer(serializers.ModelSerializer):
"modified_by",
"modified_time",
"history",
"dashboard_alert",
]
@@ -215,3 +220,9 @@ class CheckHistorySerializer(serializers.ModelSerializer):
class Meta:
model = CheckHistory
fields = ("x", "y", "results")
class CheckAuditSerializer(serializers.ModelSerializer):
class Meta:
model = Check
fields = "__all__"

View File

@@ -363,10 +363,10 @@ class TestCheckViews(TacticalTestCase):
# setup data
agent = baker.make_recipe("agents.agent")
check = baker.make_recipe("checks.diskspace_check", agent=agent)
baker.make("checks.CheckHistory", check_history=check, _quantity=30)
baker.make("checks.CheckHistory", check_id=check.id, _quantity=30)
check_history_data = baker.make(
"checks.CheckHistory",
check_history=check,
check_id=check.id,
_quantity=30,
)
@@ -400,17 +400,17 @@ class TestCheckTasks(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
self.agent = baker.make_recipe("agents.agent")
self.agent = baker.make_recipe("agents.agent", version="1.5.7")
def test_prune_check_history(self):
from .tasks import prune_check_history
# setup data
check = baker.make_recipe("checks.diskspace_check")
baker.make("checks.CheckHistory", check_history=check, _quantity=30)
baker.make("checks.CheckHistory", check_id=check.id, _quantity=30)
check_history_data = baker.make(
"checks.CheckHistory",
check_history=check,
check_id=check.id,
_quantity=30,
)
@@ -526,6 +526,7 @@ class TestCheckTasks(TacticalTestCase):
"percent_used": 85,
"total": 500,
"free": 400,
"more_info": "More info",
}
resp = self.client.patch(url, data, format="json")
@@ -543,6 +544,7 @@ class TestCheckTasks(TacticalTestCase):
"percent_used": 95,
"total": 500,
"free": 400,
"more_info": "More info",
}
resp = self.client.patch(url, data, format="json")
@@ -573,6 +575,7 @@ class TestCheckTasks(TacticalTestCase):
"percent_used": 95,
"total": 500,
"free": 400,
"more_info": "More info",
}
resp = self.client.patch(url, data, format="json")
@@ -592,6 +595,7 @@ class TestCheckTasks(TacticalTestCase):
"percent_used": 95,
"total": 500,
"free": 400,
"more_info": "More info",
}
resp = self.client.patch(url, data, format="json")
@@ -608,6 +612,7 @@ class TestCheckTasks(TacticalTestCase):
"percent_used": 50,
"total": 500,
"free": 400,
"more_info": "More info",
}
resp = self.client.patch(url, data, format="json")
@@ -791,12 +796,7 @@ class TestCheckTasks(TacticalTestCase):
)
# test failing info
data = {
"id": ping.id,
"output": "Reply from 192.168.1.27: Destination host unreachable",
"has_stdout": True,
"has_stderr": False,
}
data = {"id": ping.id, "status": "failing", "output": "reply from a.com"}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
@@ -806,13 +806,6 @@ class TestCheckTasks(TacticalTestCase):
self.assertEqual(new_check.alert_severity, "info")
# test failing warning
data = {
"id": ping.id,
"output": "Reply from 192.168.1.27: Destination host unreachable",
"has_stdout": True,
"has_stderr": False,
}
ping.alert_severity = "warning"
ping.save()
@@ -824,13 +817,6 @@ class TestCheckTasks(TacticalTestCase):
self.assertEqual(new_check.alert_severity, "warning")
# test failing error
data = {
"id": ping.id,
"output": "Reply from 192.168.1.27: Destination host unreachable",
"has_stdout": True,
"has_stderr": False,
}
ping.alert_severity = "error"
ping.save()
@@ -842,13 +828,6 @@ class TestCheckTasks(TacticalTestCase):
self.assertEqual(new_check.alert_severity, "error")
# test failing error
data = {
"id": ping.id,
"output": "some output",
"has_stdout": False,
"has_stderr": True,
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
@@ -857,12 +836,7 @@ class TestCheckTasks(TacticalTestCase):
self.assertEqual(new_check.alert_severity, "error")
# test passing
data = {
"id": ping.id,
"output": "Reply from 192.168.1.1: bytes=32 time<1ms TTL=64",
"has_stdout": True,
"has_stderr": False,
}
data = {"id": ping.id, "status": "passing", "output": "reply from a.com"}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
@@ -881,7 +855,7 @@ class TestCheckTasks(TacticalTestCase):
)
# test passing running
data = {"id": winsvc.id, "exists": True, "status": "running"}
data = {"id": winsvc.id, "status": "passing", "more_info": "ok"}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
@@ -889,20 +863,8 @@ class TestCheckTasks(TacticalTestCase):
new_check = Check.objects.get(pk=winsvc.id)
self.assertEqual(new_check.status, "passing")
# test passing start pending
winsvc.pass_if_start_pending = True
winsvc.save()
data = {"id": winsvc.id, "exists": True, "status": "start_pending"}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=winsvc.id)
self.assertEqual(new_check.status, "passing")
# test failing no start
data = {"id": winsvc.id, "exists": True, "status": "not running"}
# test failing
data = {"id": winsvc.id, "status": "failing", "more_info": "ok"}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
@@ -911,7 +873,7 @@ class TestCheckTasks(TacticalTestCase):
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "info")
# test failing and attempt start
""" # test failing and attempt start
winsvc.restart_if_stopped = True
winsvc.alert_severity = "warning"
winsvc.save()
@@ -976,9 +938,9 @@ class TestCheckTasks(TacticalTestCase):
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=winsvc.id)
self.assertEqual(new_check.status, "passing")
self.assertEqual(new_check.status, "passing") """
def test_handle_eventlog_check(self):
""" def test_handle_eventlog_check(self):
from checks.models import Check
url = "/api/v3/checkrunner/"
@@ -1180,4 +1142,4 @@ class TestCheckTasks(TacticalTestCase):
new_check = Check.objects.get(pk=eventlog.id)
self.assertEquals(new_check.status, "passing")
self.assertEquals(new_check.status, "passing") """

View File

@@ -8,5 +8,5 @@ urlpatterns = [
path("<pk>/loadchecks/", views.load_checks),
path("getalldisks/", views.get_disks_for_policies),
path("runchecks/<pk>/", views.run_checks),
path("history/<int:checkpk>/", views.CheckHistory.as_view()),
path("history/<int:checkpk>/", views.GetCheckHistory.as_view()),
]

View File

@@ -15,7 +15,7 @@ from automation.models import Policy
from scripts.models import Script
from tacticalrmm.utils import notify_error
from .models import Check
from .models import Check, CheckHistory
from .permissions import ManageChecksPerms, RunChecksPerms
from .serializers import CheckHistorySerializer, CheckSerializer
@@ -146,7 +146,7 @@ class GetUpdateDeleteCheck(APIView):
return Response(f"{check.readable_desc} was deleted!")
class CheckHistory(APIView):
class GetCheckHistory(APIView):
def patch(self, request, checkpk):
check = get_object_or_404(Check, pk=checkpk)
@@ -160,7 +160,7 @@ class CheckHistory(APIView):
- djangotime.timedelta(days=request.data["timeFilter"]),
)
check_history = check.check_history.filter(timeFilter).order_by("-x") # type: ignore
check_history = CheckHistory.objects.filter(check_id=checkpk).filter(timeFilter).order_by("-x") # type: ignore
return Response(
CheckHistorySerializer(

View File

@@ -33,13 +33,17 @@ class Client(BaseAuditModel):
blank=True,
)
def save(self, *args, **kw):
def save(self, *args, **kwargs):
from alerts.tasks import cache_agents_alert_template
from automation.tasks import generate_agent_checks_task
# get old client if exists
old_client = type(self).objects.get(pk=self.pk) if self.pk else None
super(BaseAuditModel, self).save(*args, **kw)
old_client = Client.objects.get(pk=self.pk) if self.pk else None
super(Client, self).save(
old_model=old_client,
*args,
**kwargs,
)
# check if polcies have changed and initiate task to reapply policies if so
if old_client:
@@ -50,7 +54,6 @@ class Client(BaseAuditModel):
old_client.block_policy_inheritance != self.block_policy_inheritance
)
):
generate_agent_checks_task.delay(
client=self.pk,
create_tasks=True,
@@ -87,12 +90,20 @@ class Client(BaseAuditModel):
"offline_time",
)
.filter(site__client=self)
.prefetch_related("agentchecks")
.prefetch_related("agentchecks", "autotasks")
)
data = {"error": False, "warning": False}
for agent in agents:
if agent.maintenance_mode:
break
if agent.overdue_email_alert or agent.overdue_text_alert:
if agent.status == "overdue":
data["error"] = True
break
if agent.checks["has_failing_checks"]:
if agent.checks["warning"]:
@@ -102,19 +113,20 @@ class Client(BaseAuditModel):
data["error"] = True
break
if agent.overdue_email_alert or agent.overdue_text_alert:
if agent.status == "overdue":
data["error"] = True
break
if agent.autotasks.exists(): # type: ignore
for i in agent.autotasks.all(): # type: ignore
if i.status == "failing" and i.alert_severity == "error":
data["error"] = True
break
return data
@staticmethod
def serialize(client):
# serializes the client and returns json
from .serializers import ClientSerializer
from .serializers import ClientAuditSerializer
return ClientSerializer(client).data
# serializes the client and returns json
return ClientAuditSerializer(client).data
class Site(BaseAuditModel):
@@ -144,13 +156,17 @@ class Site(BaseAuditModel):
blank=True,
)
def save(self, *args, **kw):
def save(self, *args, **kwargs):
from alerts.tasks import cache_agents_alert_template
from automation.tasks import generate_agent_checks_task
# get old client if exists
old_site = type(self).objects.get(pk=self.pk) if self.pk else None
super(Site, self).save(*args, **kw)
old_site = Site.objects.get(pk=self.pk) if self.pk else None
super(Site, self).save(
old_model=old_site,
*args,
**kwargs,
)
# check if polcies have changed and initiate task to reapply policies if so
if old_site:
@@ -159,11 +175,10 @@ class Site(BaseAuditModel):
or (old_site.workstation_policy != self.workstation_policy)
or (old_site.block_policy_inheritance != self.block_policy_inheritance)
):
generate_agent_checks_task.delay(site=self.pk, create_tasks=True)
if old_site.alert_template != self.alert_template:
cache_agents_alert_template.delay()
if old_site.alert_template != self.alert_template:
cache_agents_alert_template.delay()
class Meta:
ordering = ("name",)
@@ -192,12 +207,19 @@ class Site(BaseAuditModel):
"offline_time",
)
.filter(site=self)
.prefetch_related("agentchecks")
.prefetch_related("agentchecks", "autotasks")
)
data = {"error": False, "warning": False}
for agent in agents:
if agent.maintenance_mode:
break
if agent.overdue_email_alert or agent.overdue_text_alert:
if agent.status == "overdue":
data["error"] = True
break
if agent.checks["has_failing_checks"]:
if agent.checks["warning"]:
@@ -207,19 +229,20 @@ class Site(BaseAuditModel):
data["error"] = True
break
if agent.overdue_email_alert or agent.overdue_text_alert:
if agent.status == "overdue":
data["error"] = True
break
if agent.autotasks.exists(): # type: ignore
for i in agent.autotasks.all(): # type: ignore
if i.status == "failing" and i.alert_severity == "error":
data["error"] = True
break
return data
@staticmethod
def serialize(site):
# serializes the site and returns json
from .serializers import SiteSerializer
from .serializers import SiteAuditSerializer
return SiteSerializer(site).data
# serializes the site and returns json
return SiteAuditSerializer(site).data
MON_TYPE_CHOICES = [
@@ -291,6 +314,22 @@ class ClientCustomField(models.Model):
else:
return self.string_value
def save_to_field(self, value):
if self.field.type in [
"text",
"number",
"single",
"datetime",
]:
self.string_value = value
self.save()
elif type == "multiple":
self.multiple_value = value.split(",")
self.save()
elif type == "checkbox":
self.bool_value = bool(value)
self.save()
class SiteCustomField(models.Model):
site = models.ForeignKey(
@@ -325,3 +364,19 @@ class SiteCustomField(models.Model):
return self.bool_value
else:
return self.string_value
def save_to_field(self, value):
if self.field.type in [
"text",
"number",
"single",
"datetime",
]:
self.string_value = value
self.save()
elif type == "multiple":
self.multiple_value = value.split(",")
self.save()
elif type == "checkbox":
self.bool_value = bool(value)
self.save()

View File

@@ -1,4 +1,10 @@
from rest_framework.serializers import ModelSerializer, ReadOnlyField, ValidationError
from django.db.models.base import Model
from rest_framework.serializers import (
ModelSerializer,
ReadOnlyField,
Serializer,
ValidationError,
)
from .models import Client, ClientCustomField, Deployment, Site, SiteCustomField
@@ -134,3 +140,15 @@ class DeploymentSerializer(ModelSerializer):
"install_flags",
"created",
]
class SiteAuditSerializer(ModelSerializer):
class Meta:
model = Site
fields = "__all__"
class ClientAuditSerializer(ModelSerializer):
class Meta:
model = Client
fields = "__all__"

View File

@@ -3,10 +3,8 @@ import re
import uuid
import pytz
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from loguru import logger
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
@@ -26,8 +24,6 @@ from .serializers import (
SiteSerializer,
)
logger.configure(**settings.LOG_CONFIG)
class GetAddClients(APIView):
permission_classes = [IsAuthenticated, ManageClientsPerms]
@@ -251,16 +247,19 @@ class AgentDeployment(APIView):
def post(self, request):
from knox.models import AuthToken
from accounts.models import User
client = get_object_or_404(Client, pk=request.data["client"])
site = get_object_or_404(Site, pk=request.data["site"])
installer_user = User.objects.filter(is_installer_user=True).first()
expires = dt.datetime.strptime(
request.data["expires"], "%Y-%m-%d %H:%M"
).astimezone(pytz.timezone("UTC"))
now = djangotime.now()
delta = expires - now
obj, token = AuthToken.objects.create(user=request.user, expiry=delta)
obj, token = AuthToken.objects.create(user=installer_user, expiry=delta)
flags = {
"power": request.data["power"],

View File

@@ -53,9 +53,9 @@ If (Get-Service $serviceName -ErrorAction SilentlyContinue) {
Write-Output "Waiting for network"
Start-Sleep -s 5
$X += 1
} until(($connectreult = Test-NetConnection $apilink[2] -Port 443 | ? { $_.TcpTestSucceeded }) -or $X -eq 3)
} until(($connectresult = Test-NetConnection $apilink[2] -Port 443 | ? { $_.TcpTestSucceeded }) -or $X -eq 3)
if ($connectreult.TcpTestSucceeded -eq $true){
if ($connectresult.TcpTestSucceeded -eq $true){
Try
{
Invoke-WebRequest -Uri $downloadlink -OutFile $OutPath\$output

View File

@@ -1,6 +1,5 @@
from django.core.management.base import BaseCommand
from agents.models import Agent
from logs.models import PendingAction
from scripts.models import Script
@@ -9,22 +8,6 @@ class Command(BaseCommand):
help = "Collection of tasks to run after updating the rmm, after migrations"
def handle(self, *args, **kwargs):
# 10-16-2020 changed the type of the agent's 'disks' model field
# from a dict of dicts, to a list of disks in the golang agent
# the following will convert dicts to lists for agent's still on the python agent
agents = Agent.objects.only("pk", "disks")
for agent in agents:
if agent.disks is not None and isinstance(agent.disks, dict):
new = []
for k, v in agent.disks.items():
new.append(v)
agent.disks = new
agent.save(update_fields=["disks"])
self.stdout.write(
self.style.SUCCESS(f"Migrated disks on {agent.hostname}")
)
# remove task pending actions. deprecated 4/20/2021
PendingAction.objects.filter(action_type="taskaction").delete()

View File

@@ -0,0 +1,18 @@
# Generated by Django 3.2.2 on 2021-05-14 04:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0022_urlaction'),
]
operations = [
migrations.AddField(
model_name='coresettings',
name='clear_faults_days',
field=models.IntegerField(default=0),
),
]

View File

@@ -0,0 +1,23 @@
# Generated by Django 3.2.1 on 2021-07-07 18:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0023_coresettings_clear_faults_days'),
]
operations = [
migrations.AddField(
model_name='coresettings',
name='agent_history_prune_days',
field=models.PositiveIntegerField(default=30),
),
migrations.AddField(
model_name='coresettings',
name='resolved_alerts_prune_days',
field=models.PositiveIntegerField(default=0),
),
]

View File

@@ -0,0 +1,28 @@
# Generated by Django 3.2.1 on 2021-07-07 18:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0024_auto_20210707_1828'),
]
operations = [
migrations.AddField(
model_name='coresettings',
name='agent_debug_level',
field=models.CharField(choices=[('info', 'Info'), ('warning', 'Warning'), ('error', 'Error'), ('critical', 'Critical')], default='info', max_length=20),
),
migrations.AddField(
model_name='coresettings',
name='debug_log_prune_days',
field=models.PositiveIntegerField(default=30),
),
migrations.AlterField(
model_name='coresettings',
name='agent_history_prune_days',
field=models.PositiveIntegerField(default=60),
),
]

View File

@@ -0,0 +1,18 @@
# Generated by Django 3.2.1 on 2021-07-21 17:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0025_auto_20210707_1835'),
]
operations = [
migrations.AddField(
model_name='coresettings',
name='audit_log_prune_days',
field=models.PositiveIntegerField(default=0),
),
]

View File

@@ -1,17 +1,15 @@
import smtplib
from email.message import EmailMessage
from django.db.models.enums import Choices
import pytz
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import ValidationError
from django.db import models
from loguru import logger
from twilio.rest import Client as TwClient
from logs.models import BaseAuditModel
logger.configure(**settings.LOG_CONFIG)
from logs.models import BaseAuditModel, DebugLog, LOG_LEVEL_CHOICES
TZ_CHOICES = [(_, _) for _ in pytz.all_timezones]
@@ -51,6 +49,14 @@ class CoreSettings(BaseAuditModel):
)
# removes check history older than days
check_history_prune_days = models.PositiveIntegerField(default=30)
resolved_alerts_prune_days = models.PositiveIntegerField(default=0)
agent_history_prune_days = models.PositiveIntegerField(default=60)
debug_log_prune_days = models.PositiveIntegerField(default=30)
audit_log_prune_days = models.PositiveIntegerField(default=0)
agent_debug_level = models.CharField(
max_length=20, choices=LOG_LEVEL_CHOICES, default="info"
)
clear_faults_days = models.IntegerField(default=0)
mesh_token = models.CharField(max_length=255, null=True, blank=True, default="")
mesh_username = models.CharField(max_length=255, null=True, blank=True, default="")
mesh_site = models.CharField(max_length=255, null=True, blank=True, default="")
@@ -183,14 +189,14 @@ class CoreSettings(BaseAuditModel):
server.quit()
except Exception as e:
logger.error(f"Sending email failed with error: {e}")
DebugLog.error(message=f"Sending email failed with error: {e}")
if test:
return str(e)
else:
return True
def send_sms(self, body, alert_template=None):
if not alert_template and not self.sms_is_configured:
if not alert_template or not self.sms_is_configured:
return
# override email recipients if alert_template is passed and is set
@@ -204,7 +210,7 @@ class CoreSettings(BaseAuditModel):
try:
tw_client.messages.create(body=body, to=num, from_=self.twilio_number)
except Exception as e:
logger.error(f"SMS failed to send: {e}")
DebugLog.error(message=f"SMS failed to send: {e}")
@staticmethod
def serialize(core):
@@ -264,6 +270,26 @@ class CustomField(models.Model):
else:
return self.default_value_string
def get_or_create_field_value(self, instance):
from agents.models import Agent, AgentCustomField
from clients.models import Client, ClientCustomField, Site, SiteCustomField
if isinstance(instance, Agent):
if AgentCustomField.objects.filter(field=self, agent=instance).exists():
return AgentCustomField.objects.get(field=self, agent=instance)
else:
return AgentCustomField.objects.create(field=self, agent=instance)
elif isinstance(instance, Client):
if ClientCustomField.objects.filter(field=self, client=instance).exists():
return ClientCustomField.objects.get(field=self, client=instance)
else:
return ClientCustomField.objects.create(field=self, client=instance)
elif isinstance(instance, Site):
if SiteCustomField.objects.filter(field=self, site=instance).exists():
return SiteCustomField.objects.get(field=self, site=instance)
else:
return SiteCustomField.objects.create(field=self, site=instance)
class CodeSignToken(models.Model):
token = models.CharField(max_length=255, null=True, blank=True)
@@ -286,6 +312,9 @@ class GlobalKVStore(models.Model):
return self.name
OPEN_ACTIONS = (("window", "New Window"), ("tab", "New Tab"))
class URLAction(models.Model):
name = models.CharField(max_length=25)
desc = models.CharField(max_length=100, null=True, blank=True)

View File

@@ -3,6 +3,11 @@ from rest_framework import permissions
from tacticalrmm.permissions import _has_perm
class ViewCoreSettingsPerms(permissions.BasePermission):
def has_permission(self, r, view):
return _has_perm(r, "can_view_core_settings")
class EditCoreSettingsPerms(permissions.BasePermission):
def has_permission(self, r, view):
return _has_perm(r, "can_edit_core_settings")

View File

@@ -1,16 +1,15 @@
import pytz
from django.conf import settings
from django.utils import timezone as djangotime
from loguru import logger
from autotasks.models import AutomatedTask
from autotasks.tasks import delete_win_task_schedule
from checks.tasks import prune_check_history
from agents.tasks import clear_faults_task, prune_agent_history
from alerts.tasks import prune_resolved_alerts
from core.models import CoreSettings
from logs.tasks import prune_debug_log, prune_audit_log
from tacticalrmm.celery import app
logger.configure(**settings.LOG_CONFIG)
@app.task
def core_maintenance_tasks():
@@ -28,6 +27,42 @@ def core_maintenance_tasks():
if now > task_time_utc:
delete_win_task_schedule.delay(task.pk)
core = CoreSettings.objects.first()
# remove old CheckHistory data
older_than = CoreSettings.objects.first().check_history_prune_days
prune_check_history.delay(older_than)
if core.check_history_prune_days > 0: # type: ignore
prune_check_history.delay(core.check_history_prune_days) # type: ignore
# remove old resolved alerts
if core.resolved_alerts_prune_days > 0: # type: ignore
prune_resolved_alerts.delay(core.resolved_alerts_prune_days) # type: ignore
# remove old agent history
if core.agent_history_prune_days > 0: # type: ignore
prune_agent_history.delay(core.agent_history_prune_days) # type: ignore
# remove old debug logs
if core.debug_log_prune_days > 0: # type: ignore
prune_debug_log.delay(core.debug_log_prune_days) # type: ignore
# remove old audit logs
if core.audit_log_prune_days > 0: # type: ignore
prune_audit_log.delay(core.audit_log_prune_days) # type: ignore
# clear faults
if core.clear_faults_days > 0: # type: ignore
clear_faults_task.delay(core.clear_faults_days) # type: ignore
@app.task
def cache_db_fields_task():
from agents.models import Agent
for agent in Agent.objects.all():
agent.pending_actions_count = agent.pendingactions.filter(
status="pending"
).count()
agent.has_patches_pending = (
agent.winupdates.filter(action="approve").filter(installed=False).exists()
)
agent.save(update_fields=["pending_actions_count", "has_patches_pending"])

View File

@@ -18,4 +18,5 @@ urlpatterns = [
path("urlaction/", views.GetAddURLAction.as_view()),
path("urlaction/<int:pk>/", views.UpdateDeleteURLAction.as_view()),
path("urlaction/run/", views.RunURLAction.as_view()),
path("smstest/", views.TwilioSMSTest.as_view()),
]

View File

@@ -1,4 +1,5 @@
import os
import pprint
import re
from django.conf import settings
@@ -15,7 +16,12 @@ from agents.permissions import MeshPerms
from tacticalrmm.utils import notify_error
from .models import CodeSignToken, CoreSettings, CustomField, GlobalKVStore, URLAction
from .permissions import CodeSignPerms, EditCoreSettingsPerms, ServerMaintPerms
from .permissions import (
CodeSignPerms,
ViewCoreSettingsPerms,
EditCoreSettingsPerms,
ServerMaintPerms,
)
from .serializers import (
CodeSignTokenSerializer,
CoreSettingsSerializer,
@@ -46,6 +52,7 @@ class UploadMeshAgent(APIView):
@api_view()
@permission_classes([IsAuthenticated, ViewCoreSettingsPerms])
def get_core_settings(request):
settings = CoreSettings.objects.first()
return Response(CoreSettingsSerializer(settings).data)
@@ -85,7 +92,8 @@ def dashboard_info(request):
"client_tree_sort": request.user.client_tree_sort,
"client_tree_splitter": request.user.client_tree_splitter,
"loading_bar_color": request.user.loading_bar_color,
"no_code_sign": hasattr(settings, "NOCODESIGN") and settings.NOCODESIGN,
"clear_search_when_switching": request.user.clear_search_when_switching,
"hosted": hasattr(settings, "HOSTED") and settings.HOSTED,
}
)
@@ -338,9 +346,18 @@ class RunURLAction(APIView):
from requests.utils import requote_uri
from agents.models import Agent
from clients.models import Client, Site
from tacticalrmm.utils import replace_db_values
agent = get_object_or_404(Agent, pk=request.data["agent"])
if "agent" in request.data.keys():
instance = get_object_or_404(Agent, pk=request.data["agent"])
elif "site" in request.data.keys():
instance = get_object_or_404(Site, pk=request.data["site"])
elif "client" in request.data.keys():
instance = get_object_or_404(Client, pk=request.data["client"])
else:
return notify_error("received an incorrect request")
action = get_object_or_404(URLAction, pk=request.data["action"])
pattern = re.compile("\\{\\{([\\w\\s]+\\.[\\w\\s]+)\\}\\}")
@@ -348,8 +365,31 @@ class RunURLAction(APIView):
url_pattern = action.pattern
for string in re.findall(pattern, action.pattern):
value = replace_db_values(string=string, agent=agent, quotes=False)
value = replace_db_values(string=string, instance=instance, quotes=False)
url_pattern = re.sub("\\{\\{" + string + "\\}\\}", str(value), url_pattern)
return Response(requote_uri(url_pattern))
class TwilioSMSTest(APIView):
def get(self, request):
from twilio.rest import Client as TwClient
core = CoreSettings.objects.first()
if not core.sms_is_configured:
return notify_error(
"All fields are required, including at least 1 recipient"
)
try:
tw_client = TwClient(core.twilio_account_sid, core.twilio_auth_token)
tw_client.messages.create(
body="TacticalRMM Test SMS",
to=core.sms_alert_recipients[0],
from_=core.twilio_number,
)
except Exception as e:
return notify_error(pprint.pformat(e))
return Response("SMS Test OK!")

View File

@@ -1,6 +1,7 @@
from django.contrib import admin
from .models import AuditLog, PendingAction
from .models import AuditLog, PendingAction, DebugLog
admin.site.register(PendingAction)
admin.site.register(AuditLog)
admin.site.register(DebugLog)

View File

@@ -0,0 +1,68 @@
# Generated by Django 3.2.1 on 2021-06-14 18:35
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
("logs", "0012_auto_20210228_0943"),
]
operations = [
migrations.AddField(
model_name="debuglog",
name="agent",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="debuglogs",
to="agents.agent",
),
),
migrations.AddField(
model_name="debuglog",
name="entry_time",
field=models.DateTimeField(
auto_now_add=True, default=django.utils.timezone.now
),
preserve_default=False,
),
migrations.AddField(
model_name="debuglog",
name="log_level",
field=models.CharField(
choices=[
("info", "Info"),
("warning", "Warning"),
("error", "Error"),
("critical", "Critical"),
],
default="info",
max_length=50,
),
),
migrations.AddField(
model_name="debuglog",
name="log_type",
field=models.CharField(
choices=[
("agent_update", "Agent Update"),
("agent_issues", "Agent Issues"),
("win_updates", "Windows Updates"),
("system_issues", "System Issues"),
("scripting", "Scripting"),
],
default="system_issues",
max_length=50,
),
),
migrations.AddField(
model_name="debuglog",
name="message",
field=models.TextField(blank=True, null=True),
),
]

View File

@@ -0,0 +1,18 @@
# Generated by Django 3.2.1 on 2021-06-28 02:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logs', '0013_auto_20210614_1835'),
]
operations = [
migrations.AddField(
model_name='auditlog',
name='agent_id',
field=models.PositiveIntegerField(blank=True, null=True),
),
]

View File

@@ -0,0 +1,18 @@
# Generated by Django 3.2.1 on 2021-07-21 04:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logs', '0014_auditlog_agent_id'),
]
operations = [
migrations.AlterField(
model_name='auditlog',
name='object_type',
field=models.CharField(choices=[('user', 'User'), ('script', 'Script'), ('agent', 'Agent'), ('policy', 'Policy'), ('winupdatepolicy', 'Patch Policy'), ('client', 'Client'), ('site', 'Site'), ('check', 'Check'), ('automatedtask', 'Automated Task'), ('coresettings', 'Core Settings'), ('bulk', 'Bulk'), ('alert_template', 'Alert Template'), ('role', 'Role')], max_length=100),
),
]

View File

@@ -0,0 +1,18 @@
# Generated by Django 3.2.1 on 2021-07-21 17:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logs', '0015_alter_auditlog_object_type'),
]
operations = [
migrations.AlterField(
model_name='auditlog',
name='object_type',
field=models.CharField(choices=[('user', 'User'), ('script', 'Script'), ('agent', 'Agent'), ('policy', 'Policy'), ('winupdatepolicy', 'Patch Policy'), ('client', 'Client'), ('site', 'Site'), ('check', 'Check'), ('automatedtask', 'Automated Task'), ('coresettings', 'Core Settings'), ('bulk', 'Bulk'), ('alerttemplate', 'Alert Template'), ('role', 'Role')], max_length=100),
),
]

View File

@@ -0,0 +1,23 @@
# Generated by Django 3.2.1 on 2021-07-31 17:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logs', '0016_alter_auditlog_object_type'),
]
operations = [
migrations.AddField(
model_name='pendingaction',
name='cancelable',
field=models.BooleanField(blank=True, default=False),
),
migrations.AlterField(
model_name='pendingaction',
name='action_type',
field=models.CharField(blank=True, choices=[('schedreboot', 'Scheduled Reboot'), ('taskaction', 'Scheduled Task Action'), ('agentupdate', 'Agent Update'), ('chocoinstall', 'Chocolatey Software Install'), ('runcmd', 'Run Command'), ('runscript', 'Run Script'), ('runpatchscan', 'Run Patch Scan'), ('runpatchinstall', 'Run Patch Install')], max_length=255, null=True),
),
]

View File

@@ -2,14 +2,24 @@ import datetime as dt
from abc import abstractmethod
from django.db import models
from tacticalrmm.middleware import get_debug_info, get_username
def get_debug_level():
from core.models import CoreSettings
return CoreSettings.objects.first().agent_debug_level # type: ignore
ACTION_TYPE_CHOICES = [
("schedreboot", "Scheduled Reboot"),
("taskaction", "Scheduled Task Action"), # deprecated
("agentupdate", "Agent Update"),
("chocoinstall", "Chocolatey Software Install"),
("runcmd", "Run Command"),
("runscript", "Run Script"),
("runpatchscan", "Run Patch Scan"),
("runpatchinstall", "Run Patch Install"),
]
AUDIT_ACTION_TYPE_CHOICES = [
@@ -40,6 +50,8 @@ AUDIT_OBJECT_TYPE_CHOICES = [
("automatedtask", "Automated Task"),
("coresettings", "Core Settings"),
("bulk", "Bulk"),
("alerttemplate", "Alert Template"),
("role", "Role"),
]
STATUS_CHOICES = [
@@ -51,6 +63,7 @@ STATUS_CHOICES = [
class AuditLog(models.Model):
username = models.CharField(max_length=100)
agent = models.CharField(max_length=255, null=True, blank=True)
agent_id = models.PositiveIntegerField(blank=True, null=True)
entry_time = models.DateTimeField(auto_now_add=True)
action = models.CharField(max_length=100, choices=AUDIT_ACTION_TYPE_CHOICES)
object_type = models.CharField(max_length=100, choices=AUDIT_OBJECT_TYPE_CHOICES)
@@ -73,24 +86,25 @@ class AuditLog(models.Model):
return super(AuditLog, self).save(*args, **kwargs)
@staticmethod
def audit_mesh_session(username, hostname, debug_info={}):
def audit_mesh_session(username, agent, debug_info={}):
AuditLog.objects.create(
username=username,
agent=hostname,
agent=agent.hostname,
agent_id=agent.id,
object_type="agent",
action="remote_session",
message=f"{username} used Mesh Central to initiate a remote session to {hostname}.",
message=f"{username} used Mesh Central to initiate a remote session to {agent.hostname}.",
debug_info=debug_info,
)
@staticmethod
def audit_raw_command(username, hostname, cmd, shell, debug_info={}):
def audit_raw_command(username, agent, cmd, shell, debug_info={}):
AuditLog.objects.create(
username=username,
agent=hostname,
agent=agent.hostname,
object_type="agent",
action="execute_command",
message=f"{username} issued {shell} command on {hostname}.",
message=f"{username} issued {shell} command on {agent.hostname}.",
after_value=cmd,
debug_info=debug_info,
)
@@ -102,6 +116,7 @@ class AuditLog(models.Model):
AuditLog.objects.create(
username=username,
object_type=object_type,
agent_id=before["id"] if object_type == "agent" else None,
action="modify",
message=f"{username} modified {object_type} {name}",
before_value=before,
@@ -114,6 +129,7 @@ class AuditLog(models.Model):
AuditLog.objects.create(
username=username,
object_type=object_type,
agent=after["id"] if object_type == "agent" else None,
action="add",
message=f"{username} added {object_type} {name}",
after_value=after,
@@ -125,6 +141,7 @@ class AuditLog(models.Model):
AuditLog.objects.create(
username=username,
object_type=object_type,
agent=before["id"] if object_type == "agent" else None,
action="delete",
message=f"{username} deleted {object_type} {name}",
before_value=before,
@@ -132,13 +149,14 @@ class AuditLog(models.Model):
)
@staticmethod
def audit_script_run(username, hostname, script, debug_info={}):
def audit_script_run(username, agent, script, debug_info={}):
AuditLog.objects.create(
agent=hostname,
agent=agent.hostname,
agent_id=agent.id,
username=username,
object_type="agent",
action="execute_script",
message=f'{username} ran script: "{script}" on {hostname}',
message=f'{username} ran script: "{script}" on {agent.hostname}',
debug_info=debug_info,
)
@@ -190,13 +208,13 @@ class AuditLog(models.Model):
site = Site.objects.get(pk=affected["site"])
target = f"on all agents within site: {site.client.name}\\{site.name}"
elif affected["target"] == "agents":
agents = Agent.objects.filter(pk__in=affected["agentPKs"]).values_list(
agents = Agent.objects.filter(pk__in=affected["agents"]).values_list(
"hostname", flat=True
)
target = "on multiple agents"
if action == "script":
script = Script.objects.get(pk=affected["scriptPK"])
script = Script.objects.get(pk=affected["script"])
action = f"script: {script.name}"
if agents:
@@ -212,8 +230,63 @@ class AuditLog(models.Model):
)
LOG_LEVEL_CHOICES = [
("info", "Info"),
("warning", "Warning"),
("error", "Error"),
("critical", "Critical"),
]
LOG_TYPE_CHOICES = [
("agent_update", "Agent Update"),
("agent_issues", "Agent Issues"),
("win_updates", "Windows Updates"),
("system_issues", "System Issues"),
("scripting", "Scripting"),
]
class DebugLog(models.Model):
pass
entry_time = models.DateTimeField(auto_now_add=True)
agent = models.ForeignKey(
"agents.Agent",
related_name="debuglogs",
on_delete=models.CASCADE,
null=True,
blank=True,
)
log_level = models.CharField(
max_length=50, choices=LOG_LEVEL_CHOICES, default="info"
)
log_type = models.CharField(
max_length=50, choices=LOG_TYPE_CHOICES, default="system_issues"
)
message = models.TextField(null=True, blank=True)
@classmethod
def info(
cls,
message,
agent=None,
log_type="system_issues",
):
if get_debug_level() in ["info"]:
cls(log_level="info", agent=agent, log_type=log_type, message=message)
@classmethod
def warning(cls, message, agent=None, log_type="system_issues"):
if get_debug_level() in ["info", "warning"]:
cls(log_level="warning", agent=agent, log_type=log_type, message=message)
@classmethod
def error(cls, message, agent=None, log_type="system_issues"):
if get_debug_level() in ["info", "warning", "error"]:
cls(log_level="error", agent=agent, log_type=log_type, message=message)
@classmethod
def critical(cls, message, agent=None, log_type="system_issues"):
if get_debug_level() in ["info", "warning", "error", "critical"]:
cls(log_level="critical", agent=agent, log_type=log_type, message=message)
class PendingAction(models.Model):
@@ -232,6 +305,7 @@ class PendingAction(models.Model):
choices=STATUS_CHOICES,
default="pending",
)
cancelable = models.BooleanField(blank=True, default=False)
celery_id = models.CharField(null=True, blank=True, max_length=255)
details = models.JSONField(null=True, blank=True)
@@ -247,6 +321,8 @@ class PendingAction(models.Model):
return "Next update cycle"
elif self.action_type == "chocoinstall":
return "ASAP"
else:
return "On next checkin"
@property
def description(self):
@@ -259,6 +335,14 @@ class PendingAction(models.Model):
elif self.action_type == "chocoinstall":
return f"{self.details['name']} software install"
elif self.action_type in [
"runcmd",
"runscript",
"runpatchscan",
"runpatchinstall",
]:
return f"{self.action_type}"
class BaseAuditModel(models.Model):
# abstract base class for auditing models
@@ -275,13 +359,14 @@ class BaseAuditModel(models.Model):
def serialize():
pass
def save(self, *args, **kwargs):
def save(self, old_model=None, *args, **kwargs):
if get_username():
before_value = {}
object_class = type(self)
object_name = object_class.__name__.lower()
username = get_username()
after_value = object_class.serialize(self) # type: ignore
# populate created_by and modified_by fields on instance
if not getattr(self, "created_by", None):
@@ -289,32 +374,37 @@ class BaseAuditModel(models.Model):
if hasattr(self, "modified_by"):
self.modified_by = username
# capture object properties before edit
if self.pk:
before_value = object_class.objects.get(pk=self.id)
# dont create entry for agent add since that is done in view
if not self.pk:
AuditLog.audit_object_add(
username,
object_name,
object_class.serialize(self),
after_value, # type: ignore
self.__str__(),
debug_info=get_debug_info(),
)
else:
AuditLog.audit_object_changed(
username,
object_class.__name__.lower(),
object_class.serialize(before_value),
object_class.serialize(self),
self.__str__(),
debug_info=get_debug_info(),
)
return super(BaseAuditModel, self).save(*args, **kwargs)
if old_model:
before_value = object_class.serialize(old_model) # type: ignore
else:
before_value = object_class.serialize(object_class.objects.get(pk=self.pk)) # type: ignore
# only create an audit entry if the values have changed
if before_value != after_value: # type: ignore
AuditLog.audit_object_changed(
username,
object_class.__name__.lower(),
before_value,
after_value, # type: ignore
self.__str__(),
debug_info=get_debug_info(),
)
super(BaseAuditModel, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
super(BaseAuditModel, self).delete(*args, **kwargs)
if get_username():
@@ -322,9 +412,7 @@ class BaseAuditModel(models.Model):
AuditLog.audit_object_delete(
get_username(),
object_class.__name__.lower(),
object_class.serialize(self),
object_class.serialize(self), # type: ignore
self.__str__(),
debug_info=get_debug_info(),
)
return super(BaseAuditModel, self).delete(*args, **kwargs)

View File

@@ -2,12 +2,12 @@ from rest_framework import serializers
from tacticalrmm.utils import get_default_timezone
from .models import AuditLog, PendingAction
from .models import AuditLog, DebugLog, PendingAction
class AuditLogSerializer(serializers.ModelSerializer):
entry_time = serializers.SerializerMethodField(read_only=True)
ip_address = serializers.ReadOnlyField(source="debug_info.ip")
class Meta:
model = AuditLog
@@ -19,7 +19,6 @@ class AuditLogSerializer(serializers.ModelSerializer):
class PendingActionSerializer(serializers.ModelSerializer):
hostname = serializers.ReadOnlyField(source="agent.hostname")
salt_id = serializers.ReadOnlyField(source="agent.salt_id")
client = serializers.ReadOnlyField(source="agent.client.name")
@@ -30,3 +29,16 @@ class PendingActionSerializer(serializers.ModelSerializer):
class Meta:
model = PendingAction
fields = "__all__"
class DebugLogSerializer(serializers.ModelSerializer):
agent = serializers.ReadOnlyField(source="agent.hostname")
entry_time = serializers.SerializerMethodField(read_only=True)
class Meta:
model = DebugLog
fields = "__all__"
def get_entry_time(self, log):
timezone = get_default_timezone()
return log.entry_time.astimezone(timezone).strftime("%m %d %Y %H:%M:%S")

View File

@@ -0,0 +1,25 @@
from django.utils import timezone as djangotime
from tacticalrmm.celery import app
@app.task
def prune_debug_log(older_than_days: int) -> str:
from .models import DebugLog
DebugLog.objects.filter(
entry_time__lt=djangotime.now() - djangotime.timedelta(days=older_than_days)
).delete()
return "ok"
@app.task
def prune_audit_log(older_than_days: int) -> str:
from .models import AuditLog
AuditLog.objects.filter(
entry_time__lt=djangotime.now() - djangotime.timedelta(days=older_than_days)
).delete()
return "ok"

View File

@@ -1,10 +1,11 @@
from datetime import datetime, timedelta
from itertools import cycle
from unittest.mock import patch
from django.utils import timezone as djangotime
from model_bakery import baker, seq
from tacticalrmm.test import TacticalTestCase
from logs.models import PendingAction
from tacticalrmm.test import TacticalTestCase
class TestAuditViews(TacticalTestCase):
@@ -16,20 +17,23 @@ class TestAuditViews(TacticalTestCase):
# create clients for client filter
site = baker.make("clients.Site")
baker.make_recipe("agents.agent", site=site, hostname="AgentHostname1")
agent1 = baker.make_recipe("agents.agent", site=site, hostname="AgentHostname1")
agent2 = baker.make_recipe("agents.agent", hostname="AgentHostname2")
agent0 = baker.make_recipe("agents.agent", hostname="AgentHostname")
# user jim agent logs
baker.make_recipe(
"logs.agent_logs",
username="jim",
agent="AgentHostname1",
entry_time=seq(datetime.now(), timedelta(days=3)),
agent_id=agent1.id,
_quantity=15,
)
baker.make_recipe(
"logs.agent_logs",
username="jim",
agent="AgentHostname2",
entry_time=seq(datetime.now(), timedelta(days=100)),
agent_id=agent2.id,
_quantity=8,
)
@@ -38,14 +42,14 @@ class TestAuditViews(TacticalTestCase):
"logs.agent_logs",
username="james",
agent="AgentHostname1",
entry_time=seq(datetime.now(), timedelta(days=55)),
agent_id=agent1.id,
_quantity=7,
)
baker.make_recipe(
"logs.agent_logs",
username="james",
agent="AgentHostname2",
entry_time=seq(datetime.now(), timedelta(days=20)),
agent_id=agent2.id,
_quantity=10,
)
@@ -53,7 +57,7 @@ class TestAuditViews(TacticalTestCase):
baker.make_recipe(
"logs.agent_logs",
agent=seq("AgentHostname"),
entry_time=seq(datetime.now(), timedelta(days=29)),
agent_id=seq(agent1.id),
_quantity=5,
)
@@ -61,7 +65,6 @@ class TestAuditViews(TacticalTestCase):
baker.make_recipe(
"logs.object_logs",
username="james",
entry_time=seq(datetime.now(), timedelta(days=5)),
_quantity=17,
)
@@ -69,7 +72,6 @@ class TestAuditViews(TacticalTestCase):
baker.make_recipe(
"logs.login_logs",
username="james",
entry_time=seq(datetime.now(), timedelta(days=7)),
_quantity=11,
)
@@ -77,51 +79,62 @@ class TestAuditViews(TacticalTestCase):
baker.make_recipe(
"logs.login_logs",
username="jim",
entry_time=seq(datetime.now(), timedelta(days=11)),
_quantity=13,
)
return site
return {"site": site, "agents": [agent0, agent1, agent2]}
def test_get_audit_logs(self):
url = "/logs/auditlogs/"
# create data
site = self.create_audit_records()
data = self.create_audit_records()
# test data and result counts
data = [
{"filter": {"timeFilter": 30}, "count": 86},
{
"filter": {"timeFilter": 45, "agentFilter": ["AgentHostname2"]},
"filter": {
"timeFilter": 45,
"agentFilter": [data["agents"][2].id],
},
"count": 19,
},
{
"filter": {"userFilter": ["jim"], "agentFilter": ["AgentHostname1"]},
"filter": {
"userFilter": ["jim"],
"agentFilter": [data["agents"][1].id],
},
"count": 15,
},
{
"filter": {
"timeFilter": 180,
"userFilter": ["james"],
"agentFilter": ["AgentHostname1"],
"agentFilter": [data["agents"][1].id],
},
"count": 7,
},
{"filter": {}, "count": 86},
{"filter": {"agentFilter": ["DoesntExist"]}, "count": 0},
{"filter": {"agentFilter": [500]}, "count": 0},
{
"filter": {
"timeFilter": 35,
"userFilter": ["james", "jim"],
"agentFilter": ["AgentHostname1", "AgentHostname2"],
"agentFilter": [
data["agents"][1].id,
data["agents"][2].id,
],
},
"count": 40,
},
{"filter": {"timeFilter": 35, "userFilter": ["james", "jim"]}, "count": 81},
{"filter": {"objectFilter": ["user"]}, "count": 26},
{"filter": {"actionFilter": ["login"]}, "count": 12},
{"filter": {"clientFilter": [site.client.id]}, "count": 23},
{
"filter": {"clientFilter": [data["site"].client.id]},
"count": 23,
},
]
pagination = {
@@ -137,45 +150,15 @@ class TestAuditViews(TacticalTestCase):
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
len(resp.data["audit_logs"]),
len(resp.data["audit_logs"]), # type:ignore
pagination["rowsPerPage"]
if req["count"] > pagination["rowsPerPage"]
else req["count"],
)
self.assertEqual(resp.data["total"], req["count"])
self.assertEqual(resp.data["total"], req["count"]) # type:ignore
self.check_not_authenticated("patch", url)
def test_options_filter(self):
url = "/logs/auditlogs/optionsfilter/"
baker.make_recipe("agents.agent", hostname=seq("AgentHostname"), _quantity=5)
baker.make_recipe("agents.agent", hostname=seq("Server"), _quantity=3)
baker.make("accounts.User", username=seq("Username"), _quantity=7)
baker.make("accounts.User", username=seq("soemthing"), _quantity=3)
data = [
{"req": {"type": "agent", "pattern": "AgeNt"}, "count": 5},
{"req": {"type": "agent", "pattern": "AgentHostname1"}, "count": 1},
{"req": {"type": "agent", "pattern": "hasjhd"}, "count": 0},
{"req": {"type": "user", "pattern": "UsEr"}, "count": 7},
{"req": {"type": "user", "pattern": "UserName1"}, "count": 1},
{"req": {"type": "user", "pattern": "dfdsadf"}, "count": 0},
]
for req in data:
resp = self.client.post(url, req["req"], format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data), req["count"])
# test for invalid payload. needs to have either type: user or agent
invalid_data = {"type": "object", "pattern": "SomeString"}
resp = self.client.post(url, invalid_data, format="json")
self.assertEqual(resp.status_code, 400)
self.check_not_authenticated("post", url)
def test_get_pending_actions(self):
url = "/logs/pendingactions/"
agent1 = baker.make_recipe("agents.online_agent")
@@ -270,3 +253,87 @@ class TestAuditViews(TacticalTestCase):
self.assertEqual(r.data, "error deleting sched task") # type: ignore
self.check_not_authenticated("delete", url)
def test_get_debug_log(self):
url = "/logs/debuglog/"
# create data
agent = baker.make_recipe("agents.agent")
baker.make(
"logs.DebugLog",
log_level=cycle(["error", "info", "warning", "critical"]),
log_type="agent_issues",
agent=agent,
_quantity=4,
)
logs = baker.make(
"logs.DebugLog",
log_type="system_issues",
log_level=cycle(["error", "info", "warning", "critical"]),
_quantity=15,
)
# test agent filter
data = {"agentFilter": agent.id}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data), 4) # type: ignore
# test log type filter and agent
data = {"agentFilter": agent.id, "logLevelFilter": "warning"}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data), 1) # type: ignore
# test time filter with other
data = {"logTypeFilter": "system_issues", "logLevelFilter": "error"}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data), 4) # type: ignore
self.check_not_authenticated("patch", url)
class TestLogTasks(TacticalTestCase):
def test_prune_debug_log(self):
from .models import DebugLog
from .tasks import prune_debug_log
# setup data
debug_log = baker.make(
"logs.DebugLog",
_quantity=50,
)
days = 0
for item in debug_log: # type:ignore
item.entry_time = djangotime.now() - djangotime.timedelta(days=days)
item.save()
days = days + 5
# delete AgentHistory older than 30 days
prune_debug_log(30)
self.assertEqual(DebugLog.objects.count(), 6)
def test_prune_audit_log(self):
from .models import AuditLog
from .tasks import prune_audit_log
# setup data
audit_log = baker.make(
"logs.AuditLog",
_quantity=50,
)
days = 0
for item in audit_log: # type:ignore
item.entry_time = djangotime.now() - djangotime.timedelta(days=days)
item.save()
days = days + 5
# delete AgentHistory older than 30 days
prune_audit_log(30)
self.assertEqual(AuditLog.objects.count(), 6)

View File

@@ -5,7 +5,5 @@ from . import views
urlpatterns = [
path("pendingactions/", views.PendingActions.as_view()),
path("auditlogs/", views.GetAuditLogs.as_view()),
path("auditlogs/optionsfilter/", views.FilterOptionsAuditLog.as_view()),
path("debuglog/<mode>/<hostname>/<order>/", views.debug_log),
path("downloadlog/", views.download_log),
path("debuglog/", views.GetDebugLog.as_view()),
]

View File

@@ -1,28 +1,23 @@
import asyncio
import subprocess
from datetime import datetime as dt
from django.conf import settings
from django.core.paginator import Paginator
from django.db.models import Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from accounts.models import User
from accounts.serializers import UserSerializer
from agents.models import Agent
from agents.serializers import AgentHostnameSerializer
from django.core.paginator import Paginator
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from tacticalrmm.utils import notify_error
from .models import AuditLog, PendingAction
from .models import AuditLog, PendingAction, DebugLog
from .permissions import AuditLogPerms, DebugLogPerms, ManagePendingActionPerms
from .serializers import AuditLogSerializer, PendingActionSerializer
from .serializers import AuditLogSerializer, DebugLogSerializer, PendingActionSerializer
class GetAuditLogs(APIView):
@@ -48,7 +43,7 @@ class GetAuditLogs(APIView):
timeFilter = Q()
if "agentFilter" in request.data:
agentFilter = Q(agent__in=request.data["agentFilter"])
agentFilter = Q(agent_id__in=request.data["agentFilter"])
elif "clientFilter" in request.data:
clients = Client.objects.filter(
@@ -95,23 +90,6 @@ class GetAuditLogs(APIView):
)
class FilterOptionsAuditLog(APIView):
permission_classes = [IsAuthenticated, AuditLogPerms]
def post(self, request):
if request.data["type"] == "agent":
agents = Agent.objects.filter(hostname__icontains=request.data["pattern"])
return Response(AgentHostnameSerializer(agents, many=True).data)
if request.data["type"] == "user":
users = User.objects.filter(
username__icontains=request.data["pattern"], agent=None
)
return Response(UserSerializer(users, many=True).data)
return Response("error", status=status.HTTP_400_BAD_REQUEST)
class PendingActions(APIView):
permission_classes = [IsAuthenticated, ManagePendingActionPerms]
@@ -156,60 +134,28 @@ class PendingActions(APIView):
return Response(f"{action.agent.hostname}: {action.description} was cancelled")
@api_view()
@permission_classes([IsAuthenticated, DebugLogPerms])
def debug_log(request, mode, hostname, order):
log_file = settings.LOG_CONFIG["handlers"][0]["sink"]
class GetDebugLog(APIView):
permission_classes = [IsAuthenticated, DebugLogPerms]
agents = Agent.objects.prefetch_related("site").only("pk", "hostname")
agent_hostnames = AgentHostnameSerializer(agents, many=True)
def patch(self, request):
switch_mode = {
"info": "INFO",
"critical": "CRITICAL",
"error": "ERROR",
"warning": "WARNING",
}
level = switch_mode.get(mode, "INFO")
agentFilter = Q()
logTypeFilter = Q()
logLevelFilter = Q()
if hostname == "all" and order == "latest":
cmd = f"grep -h {level} {log_file} | tac"
elif hostname == "all" and order == "oldest":
cmd = f"grep -h {level} {log_file}"
elif hostname != "all" and order == "latest":
cmd = f"grep {hostname} {log_file} | grep -h {level} | tac"
elif hostname != "all" and order == "oldest":
cmd = f"grep {hostname} {log_file} | grep -h {level}"
else:
return Response("error", status=status.HTTP_400_BAD_REQUEST)
if "logTypeFilter" in request.data:
logTypeFilter = Q(log_type=request.data["logTypeFilter"])
contents = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=True,
)
if "logLevelFilter" in request.data:
logLevelFilter = Q(log_level=request.data["logLevelFilter"])
if not contents.stdout:
resp = f"No {mode} logs"
else:
resp = contents.stdout
if "agentFilter" in request.data:
agentFilter = Q(agent=request.data["agentFilter"])
return Response({"log": resp, "agents": agent_hostnames.data})
debug_logs = (
DebugLog.objects.filter(logLevelFilter)
.filter(agentFilter)
.filter(logTypeFilter)
)
@api_view()
@permission_classes([IsAuthenticated, DebugLogPerms])
def download_log(request):
log_file = settings.LOG_CONFIG["handlers"][0]["sink"]
if settings.DEBUG:
with open(log_file, "rb") as f:
response = HttpResponse(f.read(), content_type="text/plain")
response["Content-Disposition"] = "attachment; filename=debug.log"
return response
else:
response = HttpResponse()
response["Content-Disposition"] = "attachment; filename=debug.log"
response["X-Accel-Redirect"] = "/private/log/debug.log"
return response
return Response(DebugLogSerializer(debug_logs, many=True).data)

View File

@@ -6,4 +6,6 @@ mkdocs-material
pymdown-extensions
Pygments
isort
mypy
mypy
types-pytz
types-pytz

View File

@@ -1,22 +1,23 @@
asgiref==3.3.4
asgiref==3.4.1
asyncio-nats-client==0.11.4
celery==5.0.5
certifi==2020.12.5
cffi==1.14.5
channels==3.0.3
celery==5.1.2
certifi==2021.5.30
cffi==1.14.6
channels==3.0.4
channels_redis==3.3.0
chardet==4.0.0
cryptography==3.4.7
cryptography==3.4.8
daphne==3.0.2
Django==3.2.2
django-cors-headers==3.7.0
Django==3.2.6
django-cors-headers==3.8.0
django-ipware==3.0.2
django-rest-knox==4.1.0
djangorestframework==3.12.4
future==0.18.2
kombu==5.0.2
loguru==0.5.3
msgpack==1.0.2
packaging==20.9
psycopg2-binary==2.8.6
packaging==21.0
psycopg2-binary==2.9.1
pycparser==2.20
pycryptodome==3.10.1
pyotp==2.6.0
@@ -24,13 +25,13 @@ pyparsing==2.4.7
pytz==2021.1
qrcode==6.1
redis==3.5.3
requests==2.25.1
six==1.15.0
requests==2.26.0
six==1.16.0
sqlparse==0.4.1
twilio==6.57.0
urllib3==1.26.4
twilio==6.63.1
urllib3==1.26.6
uWSGI==2.0.19.1
validators==0.18.2
vine==5.0.0
websockets==8.1
zipp==3.4.1
websockets==9.1
zipp==3.5.0

View File

@@ -6,7 +6,8 @@
"name": "Firefox - Clean Cache",
"description": "This script will clean up Mozilla Firefox for all users.",
"shell": "powershell",
"category": "TRMM (Win):Browsers"
"category": "TRMM (Win):Browsers",
"default_timeout": "300"
},
{
"guid": "3ff6a386-11d1-4f9d-8cca-1b0563bb6443",
@@ -15,7 +16,8 @@
"name": "Chrome - Clear Cache for All Users",
"description": "This script will clean up Google Chrome for all users.",
"shell": "powershell",
"category": "TRMM (Win):Browsers"
"category": "TRMM (Win):Browsers",
"default_timeout": "300"
},
{
"guid": "be1de837-f677-4ac5-aa0c-37a0fc9991fc",
@@ -24,7 +26,8 @@
"name": "Adobe Reader DC - Install",
"description": "Installs Adobe Reader DC.",
"shell": "powershell",
"category": "TRMM (Win):3rd Party Software>Chocolatey"
"category": "TRMM (Win):3rd Party Software>Chocolatey",
"default_timeout": "300"
},
{
"guid": "2ee134d5-76aa-4160-b334-a1efbc62079f",
@@ -33,7 +36,8 @@
"name": "Duplicati - Install",
"description": "This script installs Duplicati 2.0.5.1 as a service.",
"shell": "powershell",
"category": "TRMM (Win):3rd Party Software"
"category": "TRMM (Win):3rd Party Software",
"default_timeout": "300"
},
{
"guid": "81cc5bcb-01bf-4b0c-89b9-0ac0f3fe0c04",
@@ -42,7 +46,8 @@
"name": "Windows Update - Reset",
"description": "This script will reset all of the Windows Updates components to DEFAULT SETTINGS.",
"shell": "powershell",
"category": "TRMM (Win):Updates"
"category": "TRMM (Win):Updates",
"default_timeout": "300"
},
{
"guid": "8db87ff0-a9b4-4d9d-bc55-377bbcb85b6d",
@@ -51,7 +56,8 @@
"name": "Disk - Cleanup C: drive",
"description": "Cleans the C: drive's Window Temperary files, Windows SoftwareDistribution folder, the local users Temperary folder, IIS logs (if applicable) and empties the recycling bin. All deleted files will go into a log transcript in $env:TEMP. By default this script leaves files that are newer than 7 days old however this variable can be edited.",
"shell": "powershell",
"category": "TRMM (Win):Maintenance"
"category": "TRMM (Win):Maintenance",
"default_timeout": "25000"
},
{
"guid": "2f28e8c1-ae0f-4b46-a826-f513974526a3",
@@ -78,7 +84,8 @@
"name": "Speed Test - Python",
"description": "Runs a Speed Test using Python",
"shell": "python",
"category": "TRMM (Win):Network"
"category": "TRMM (Win):Network",
"default_timeout": "120"
},
{
"guid": "9d34f482-1f0c-4b2f-b65f-a9cf3c13ef5f",
@@ -152,6 +159,45 @@
"shell": "powershell",
"category": "TRMM (Win):Hardware"
},
{
"guid": "72c56717-28ed-4cc6-b30f-b362d30fb4b6",
"filename": "Win_Hardware_SN.ps1",
"submittedBy": "https://github.com/subzdev",
"name": "Hardware - Get Serial Number",
"description": "Returns BIOS Serial Number - Use with Custom Fields for later use",
"shell": "powershell",
"category": "TRMM (Win):Collectors"
},
{
"guid": "973c34d7-cab0-4fda-999c-b4933655f946",
"filename": "Win_Screenconnect_GetGUID.ps1",
"submittedBy": "https://github.com/silversword411",
"name": "Screenconnect - Get GUID for client",
"description": "Returns Screenconnect GUID for client - Use with Custom Fields for later use. ",
"args": [
"{{client.ScreenConnectService}}"
],
"shell": "powershell",
"category": "TRMM (Win):Collectors"
},
{
"guid": "9cfdfe8f-82bf-4081-a59f-576d694f4649",
"filename": "Win_Teamviewer_Get_ID.ps1",
"submittedBy": "https://github.com/silversword411",
"name": "TeamViewer - Get ClientID for client",
"description": "Returns Teamviwer ClientID for client - Use with Custom Fields for later use. ",
"shell": "powershell",
"category": "TRMM (Win):Collectors"
},
{
"guid": "e43081d4-6f71-4ce3-881a-22da749f7a57",
"filename": "Win_AnyDesk_Get_Anynet_ID.ps1",
"submittedBy": "https://github.com/meuchels",
"name": "AnyDesk - Get AnyNetID for client",
"description": "Returns AnyNetID for client - Use with Custom Fields for later use. ",
"shell": "powershell",
"category": "TRMM (Win):Collectors"
},
{
"guid": "95a2ee6f-b89b-4551-856e-3081b041caa7",
"filename": "Win_Power_Profile_Reset_High_Performance_to_Defaults.ps1",
@@ -177,7 +223,8 @@
"name": "Windows 10 Upgrade",
"description": "Forces an upgrade to the latest release of Windows 10.",
"shell": "powershell",
"category": "TRMM (Win):Updates"
"category": "TRMM (Win):Updates",
"default_timeout": "25000"
},
{
"guid": "375323e5-cac6-4f35-a304-bb7cef35902d",
@@ -197,6 +244,30 @@
"shell": "powershell",
"category": "TRMM (Win):3rd Party Software"
},
{
"guid": "907652a5-9ec1-4759-9871-a7743f805ff2",
"filename": "Win_Software_Uninstall.ps1",
"submittedBy": "https://github.com/subzdev",
"name": "Software Uninstaller - list, find, and uninstall most software",
"description": "Allows listing, finding and uninstalling most software on Windows. There will be a best effort to uninstall silently if the silent uninstall string is not provided.",
"shell": "powershell",
"category": "TRMM (Win):3rd Party Software",
"default_timeout": "600"
},
{
"guid": "64c3b1a8-c85f-4800-85a3-485f78a2d9ad",
"filename": "Win_Bitdefender_GravityZone_Install.ps1",
"submittedBy": "https://github.com/jhtechIL/",
"name": "BitDefender Gravity Zone Install",
"description": "Installs BitDefender Gravity Zone, requires client custom field setup. See script comments for details",
"args": [
"-url {{client.bdurl}}",
"-exe {{client.bdexe}}"
],
"default_timeout": "2500",
"shell": "powershell",
"category": "TRMM (Win):3rd Party Software"
},
{
"guid": "da51111c-aff6-4d87-9d76-0608e1f67fe5",
"filename": "Win_Defender_Enable.ps1",
@@ -213,7 +284,8 @@
"name": "SSH - Install Feature and Enable",
"description": "Installs and enabled OpenSSH Server Feature in Win10",
"shell": "powershell",
"category": "TRMM (Win):Windows Features"
"category": "TRMM (Win):Windows Features",
"default_timeout": "300"
},
{
"guid": "2435297a-6263-4e90-8688-1847400d0e22",
@@ -224,6 +296,16 @@
"shell": "cmd",
"category": "TRMM (Win):Windows Features"
},
{
"guid": "0afd8d00-b95b-4318-8d07-0b9bc4424287",
"filename": "Win_Feature_NET35_Enable.ps1",
"submittedBy": "https://github.com/silversword411",
"name": "Windows Feature - Enable .NET 3.5",
"description": "Enables the Windows .NET 3.5 Framework in Turn Features on and off",
"shell": "powershell",
"default_timeout": "300",
"category": "TRMM (Win):Windows Features"
},
{
"guid": "24f19ead-fdfe-46b4-9dcb-4cd0e12a3940",
"filename": "Win_Speedtest.ps1",
@@ -242,6 +324,20 @@
"shell": "cmd",
"category": "TRMM (Win):Active Directory"
},
{
"guid": "5320dfc8-022a-41e7-9e39-11c493545ec9",
"filename": "Win_AD_Hudu_ADDS_Documentation.ps1",
"submittedBy": "https://github.com/unplugged216",
"name": "ADDS - Directory documentation in Hudu",
"description": "Auto generates ADDS documentation and submits it to your Hudu instance.",
"args": [
"-ClientName {{client.name}}",
"-HuduBaseDomain {{global.HuduBaseDomain}}",
"-HuduApiKey {{global.HuduApiKey}}"
],
"shell": "powershell",
"category": "TRMM (Win):Active Directory"
},
{
"guid": "b6b9912f-4274-4162-99cc-9fd47fbcb292",
"filename": "Win_ADDC_Sync_Start.bat",
@@ -324,13 +420,14 @@
"category": "TRMM (Win):Other"
},
{
"guid": "5615aa90-0272-427b-8acf-0ca019612501",
"filename": "Win_Chocolatey_Update_Installed.bat",
"guid": "6c78eb04-57ae-43b0-98ed-cbd3ef9e2f80",
"filename": "Win_Chocolatey_Manage_Apps_Bulk.ps1",
"submittedBy": "https://github.com/silversword411",
"name": "Update Installed Apps",
"description": "Update all apps that were installed using Chocolatey.",
"shell": "cmd",
"category": "TRMM (Win):3rd Party Software>Chocolatey"
"name": "Chocolatey - Install, Uninstall and Upgrade Software",
"description": "This script installs, uninstalls and updates software using Chocolatey with logic to slow tasks to minimize hitting community limits. Mode install/uninstall/upgrade Hosts x",
"shell": "powershell",
"category": "TRMM (Win):3rd Party Software>Chocolatey",
"default_timeout": "600"
},
{
"guid": "fff8024d-d72e-4457-84fa-6c780f69a16f",
@@ -341,6 +438,15 @@
"shell": "powershell",
"category": "TRMM (Win):Active Directory"
},
{
"guid": "3afd07c0-04fd-4b23-b5f2-88205c0744d4",
"filename": "Win_User_Admins_Local_Disable.ps1",
"submittedBy": "https://github.com/dinger1986",
"name": "Local Administrators - Disables all local admins if joined to domain or AzureAD",
"description": "Checks to see if computer is either joined to a AD domain or Azure AD. If it is, it disables all local admin accounts. If not joined to domain/AzureAD, leaves admin accounts in place",
"shell": "powershell",
"category": "TRMM (Win):User Management"
},
{
"guid": "71090fc4-faa6-460b-adb0-95d7863544e1",
"filename": "Win_Check_Events_for_Bluescreens.ps1",
@@ -396,6 +502,16 @@
"shell": "powershell",
"category": "TRMM (Win):Updates"
},
{
"guid": "93038ae0-58ce-433e-a3b9-bc99ad1ea79a",
"filename": "Win_Services_AutomaticStartup_Running.ps1",
"submittedBy": "https://github.com/silversword411",
"name": "Ensure all services with startup type Automatic are running",
"description": "Gets a list of all service with startup type of Automatic but aren't running and tries to start them",
"shell": "powershell",
"default_timeout": "300",
"category": "TRMM (Win):Updates"
},
{
"guid": "e09895d5-ca13-44a2-a38c-6e77c740f0e8",
"filename": "Win_ScreenConnectAIO.ps1",
@@ -453,6 +569,16 @@
"category": "TRMM (Win):Network",
"default_timeout": "90"
},
{
"guid": "7c0c7e37-60ff-462f-9c34-b5cd4c4796a7",
"filename": "Win_Wifi_SSID_and_Password_Retrieval.ps1",
"submittedBy": "https://github.com/silversword411",
"name": "Network Wireless - Retrieve Saved passwords",
"description": "Returns all saved wifi passwords stored on the computer",
"shell": "powershell",
"category": "TRMM (Win):Network",
"default_timeout": "90"
},
{
"guid": "abe78170-7cf9-435b-9666-c5ef6c11a106",
"filename": "Win_Network_IPv6_Disable.ps1",
@@ -473,6 +599,16 @@
"category": "TRMM (Win):Network",
"default_timeout": "90"
},
{
"guid": "5676acca-44e5-46c8-af61-ae795ecb3ef1",
"filename": "Win_Network_IP_DHCP_Renew.bat",
"submittedBy": "https://github.com/silversword411",
"name": "Network - Release and Renew IP",
"description": "Trigger and release and renew of IP address on all network adapters",
"shell": "cmd",
"category": "TRMM (Win):Network",
"default_timeout": "90"
},
{
"guid": "83aa4d51-63ce-41e7-829f-3c16e6115bbf",
"filename": "Win_Network_DNS_Set_to_1.1.1.2.ps1",
@@ -503,6 +639,16 @@
"category": "TRMM (Win):Other",
"default_timeout": "90"
},
{
"guid": "43e65e5f-717a-4b6d-a724-1a86229fcd42",
"filename": "Win_Activation_Check.ps1",
"submittedBy": "https://github.com/dinger1986",
"name": "Windows Activation check",
"description": "Checks to see if windows is activated and returns status",
"shell": "powershell",
"category": "TRMM (Win):Other",
"default_timeout": "120"
},
{
"guid": "83f6c6ea-6120-4fd3-bec8-d3abc505dcdf",
"filename": "Win_TRMM_Start_Menu_Delete_Shortcut.ps1",
@@ -585,9 +731,18 @@
"shell": "powershell",
"category": "TRMM (Win):Storage"
},
{
"guid": "6a52f495-d43e-40f4-91a9-bbe4f578e6d1",
"filename": "Win_User_Create.ps1",
"submittedBy": "https://github.com/brodur",
"name": "Create Local User",
"description": "Create a local user. Parameters are: username, password and optional: description, fullname, group (adds to Users if not specified)",
"shell": "powershell",
"category": "TRMM (Win):Other"
},
{
"guid": "57997ec7-b293-4fd5-9f90-a25426d0eb90",
"filename": "Win_Get_Computer_Users.ps1",
"filename": "Win_Users_List.ps1",
"submittedBy": "https://github.com/tremor021",
"name": "Get Computer Users",
"description": "Get list of computer users and show which one is enabled",
@@ -641,5 +796,25 @@
"shell": "powershell",
"category": "TRMM (Win):Misc>Reference",
"default_timeout": "1"
},
{
"guid": "453c6d22-84b7-4767-8b5f-b825f233cf55",
"filename": "Win_AD_Join_Computer.ps1",
"submittedBy": "https://github.com/rfost52",
"name": "AD - Join Computer to Domain",
"description": "Join computer to a domain in Active Directory",
"shell": "powershell",
"category": "TRMM (Win):Active Directory",
"default_timeout": "300"
},
{
"guid": "962d3cce-49a2-4f3e-a790-36f62a6799a0",
"filename": "Win_Collect_System_Report_And_Email.ps1",
"submittedBy": "https://github.com/rfost52",
"name": "Collect System Report and Email",
"description": "Generates a system report in HTML format, then emails it",
"shell": "powershell",
"category": "TRMM (Win):Other",
"default_timeout": "300"
}
]

View File

@@ -0,0 +1,22 @@
# Generated by Django 3.2.1 on 2021-07-21 19:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scripts', '0008_script_guid'),
]
operations = [
migrations.CreateModel(
name='ScriptSnippet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('code', models.TextField()),
('shell', models.CharField(choices=[('powershell', 'Powershell'), ('cmd', 'Batch (CMD)'), ('python', 'Python')], max_length=15)),
],
),
]

View File

@@ -0,0 +1,33 @@
# Generated by Django 3.2.1 on 2021-07-26 16:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scripts', '0009_scriptsnippet'),
]
operations = [
migrations.AddField(
model_name='scriptsnippet',
name='desc',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='script',
name='code_base64',
field=models.TextField(blank=True, default='', null=True),
),
migrations.AlterField(
model_name='script',
name='description',
field=models.TextField(blank=True, default='', null=True),
),
migrations.AlterField(
model_name='scriptsnippet',
name='name',
field=models.CharField(max_length=40, unique=True),
),
]

View File

@@ -0,0 +1,28 @@
# Generated by Django 3.2.1 on 2021-07-31 17:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scripts', '0010_auto_20210726_1634'),
]
operations = [
migrations.AlterField(
model_name='scriptsnippet',
name='code',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='scriptsnippet',
name='desc',
field=models.CharField(blank=True, default='', max_length=50),
),
migrations.AlterField(
model_name='scriptsnippet',
name='shell',
field=models.CharField(choices=[('powershell', 'Powershell'), ('cmd', 'Batch (CMD)'), ('python', 'Python')], default='powershell', max_length=15),
),
]

View File

@@ -1,12 +1,10 @@
import base64
import re
from typing import List, Optional
from typing import List
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.db import models
from loguru import logger
from django.db.models.fields import CharField, TextField
from logs.models import BaseAuditModel
from tacticalrmm.utils import replace_db_values
@@ -21,13 +19,11 @@ SCRIPT_TYPES = [
("builtin", "Built In"),
]
logger.configure(**settings.LOG_CONFIG)
class Script(BaseAuditModel):
guid = name = models.CharField(max_length=64, null=True, blank=True)
guid = models.CharField(max_length=64, null=True, blank=True)
name = models.CharField(max_length=255)
description = models.TextField(null=True, blank=True)
description = models.TextField(null=True, blank=True, default="")
filename = models.CharField(max_length=255) # deprecated
shell = models.CharField(
max_length=100, choices=SCRIPT_SHELLS, default="powershell"
@@ -43,20 +39,44 @@ class Script(BaseAuditModel):
)
favorite = models.BooleanField(default=False)
category = models.CharField(max_length=100, null=True, blank=True)
code_base64 = models.TextField(null=True, blank=True)
code_base64 = models.TextField(null=True, blank=True, default="")
default_timeout = models.PositiveIntegerField(default=90)
def __str__(self):
return self.name
@property
def code(self):
def code_no_snippets(self):
if self.code_base64:
base64_bytes = self.code_base64.encode("ascii", "ignore")
return base64.b64decode(base64_bytes).decode("ascii", "ignore")
return base64.b64decode(self.code_base64.encode("ascii", "ignore")).decode(
"ascii", "ignore"
)
else:
return ""
@property
def code(self):
return self.replace_with_snippets(self.code_no_snippets)
@classmethod
def replace_with_snippets(cls, code):
# check if snippet has been added to script body
matches = re.finditer(r"{{(.*)}}", code)
if matches:
replaced_code = code
for snippet in matches:
snippet_name = snippet.group(1).strip()
if ScriptSnippet.objects.filter(name=snippet_name).exists():
value = ScriptSnippet.objects.get(name=snippet_name).code
else:
value = ""
replaced_code = re.sub(snippet.group(), value, replaced_code)
return replaced_code
else:
return code
@classmethod
def load_community_scripts(cls):
import json
@@ -97,20 +117,20 @@ class Script(BaseAuditModel):
if s.exists():
i = s.first()
i.name = script["name"]
i.description = script["description"]
i.category = category
i.shell = script["shell"]
i.default_timeout = default_timeout
i.args = args
i.name = script["name"] # type: ignore
i.description = script["description"] # type: ignore
i.category = category # type: ignore
i.shell = script["shell"] # type: ignore
i.default_timeout = default_timeout # type: ignore
i.args = args # type: ignore
with open(os.path.join(scripts_dir, script["filename"]), "rb") as f:
script_bytes = (
f.read().decode("utf-8").encode("ascii", "ignore")
)
i.code_base64 = base64.b64encode(script_bytes).decode("ascii")
i.code_base64 = base64.b64encode(script_bytes).decode("ascii") # type: ignore
i.save(
i.save( # type: ignore
update_fields=[
"name",
"description",
@@ -175,7 +195,6 @@ class Script(BaseAuditModel):
guid=script["guid"],
name=script["name"],
description=script["description"],
filename=script["filename"],
shell=script["shell"],
script_type="builtin",
category=category,
@@ -209,7 +228,7 @@ class Script(BaseAuditModel):
if match:
# only get the match between the () in regex
string = match.group(1)
value = replace_db_values(string=string, agent=agent, shell=shell)
value = replace_db_values(string=string, instance=agent, shell=shell)
if value:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
@@ -221,3 +240,13 @@ class Script(BaseAuditModel):
temp_args.append(arg)
return temp_args
class ScriptSnippet(models.Model):
name = CharField(max_length=40, unique=True)
desc = CharField(max_length=50, blank=True, default="")
code = TextField(default="")
shell = CharField(max_length=15, choices=SCRIPT_SHELLS, default="powershell")
def __str__(self):
return self.name

View File

@@ -1,6 +1,6 @@
from rest_framework.serializers import ModelSerializer, ReadOnlyField
from .models import Script
from .models import Script, ScriptSnippet
class ScriptTableSerializer(ModelSerializer):
@@ -41,3 +41,9 @@ class ScriptCheckSerializer(ModelSerializer):
class Meta:
model = Script
fields = ["code", "shell"]
class ScriptSnippetSerializer(ModelSerializer):
class Meta:
model = ScriptSnippet
fields = "__all__"

View File

@@ -1,12 +1,16 @@
import asyncio
from agents.models import Agent
from packaging import version as pyver
from agents.models import Agent, AgentHistory
from scripts.models import Script
from tacticalrmm.celery import app
@app.task
def handle_bulk_command_task(agentpks, cmd, shell, timeout) -> None:
def handle_bulk_command_task(
agentpks, cmd, shell, timeout, username, run_on_offline=False
) -> None:
nats_data = {
"func": "rawcmd",
"timeout": timeout,
@@ -16,20 +20,31 @@ def handle_bulk_command_task(agentpks, cmd, shell, timeout) -> None:
},
}
for agent in Agent.objects.filter(pk__in=agentpks):
if pyver.parse(agent.version) >= pyver.parse("1.6.0"):
hist = AgentHistory.objects.create(
agent=agent,
type="cmd_run",
command=cmd,
username=username,
)
nats_data["id"] = hist.pk
asyncio.run(agent.nats_cmd(nats_data, wait=False))
@app.task
def handle_bulk_script_task(scriptpk, agentpks, args, timeout) -> None:
def handle_bulk_script_task(scriptpk, agentpks, args, timeout, username) -> None:
script = Script.objects.get(pk=scriptpk)
nats_data = {
"func": "runscript",
"timeout": timeout,
"script_args": args,
"payload": {
"code": script.code,
"shell": script.shell,
},
}
for agent in Agent.objects.filter(pk__in=agentpks):
asyncio.run(agent.nats_cmd(nats_data, wait=False))
history_pk = 0
if pyver.parse(agent.version) >= pyver.parse("1.6.0"):
hist = AgentHistory.objects.create(
agent=agent,
type="script_run",
script=script,
username=username,
)
history_pk = hist.pk
agent.run_script(
scriptpk=script.pk, args=args, timeout=timeout, history_pk=history_pk
)

View File

@@ -1,15 +1,18 @@
import json
import os
from pathlib import Path
from unittest.mock import patch
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from model_bakery import baker
from tacticalrmm.test import TacticalTestCase
from .models import Script
from .serializers import ScriptSerializer, ScriptTableSerializer
from .models import Script, ScriptSnippet
from .serializers import (
ScriptSerializer,
ScriptTableSerializer,
ScriptSnippetSerializer,
)
class TestScriptViews(TacticalTestCase):
@@ -18,7 +21,7 @@ class TestScriptViews(TacticalTestCase):
self.authenticate()
def test_get_scripts(self):
url = "/scripts/scripts/"
url = "/scripts/"
scripts = baker.make("scripts.Script", _quantity=3)
serializer = ScriptTableSerializer(scripts, many=True)
@@ -29,14 +32,14 @@ class TestScriptViews(TacticalTestCase):
self.check_not_authenticated("get", url)
def test_add_script(self):
url = f"/scripts/scripts/"
url = f"/scripts/"
data = {
"name": "Name",
"description": "Description",
"shell": "powershell",
"category": "New",
"code": "Some Test Code\nnew Line",
"code_base64": "VGVzdA==", # Test
"default_timeout": 99,
"args": ["hello", "world", r"{{agent.public_ip}}"],
"favorite": False,
@@ -46,47 +49,24 @@ class TestScriptViews(TacticalTestCase):
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertTrue(Script.objects.filter(name="Name").exists())
self.assertEqual(Script.objects.get(name="Name").code, data["code"])
# test with file upload
# file with 'Test' as content
file = SimpleUploadedFile(
"test_script.bat", b"\x54\x65\x73\x74", content_type="text/plain"
)
data = {
"name": "New Name",
"description": "Description",
"shell": "cmd",
"category": "New",
"filename": file,
"default_timeout": 4455,
"args": json.dumps(
["hello", "world", r"{{agent.public_ip}}"]
), # simulate javascript's JSON.stringify() for formData
}
# test with file upload
resp = self.client.post(url, data, format="multipart")
self.assertEqual(resp.status_code, 200)
script = Script.objects.filter(name="New Name").first()
self.assertEquals(script.code, "Test")
self.assertEqual(Script.objects.get(name="Name").code, "Test")
self.check_not_authenticated("post", url)
def test_modify_script(self):
# test a call where script doesn't exist
resp = self.client.put("/scripts/500/script/", format="json")
resp = self.client.put("/scripts/500/", format="json")
self.assertEqual(resp.status_code, 404)
# make a userdefined script
script = baker.make_recipe("scripts.script")
url = f"/scripts/{script.pk}/script/"
url = f"/scripts/{script.pk}/"
data = {
"name": script.name,
"description": "Description Change",
"shell": script.shell,
"code": "Test Code\nAnother Line",
"code_base64": "VGVzdA==", # Test
"default_timeout": 13344556,
}
@@ -95,16 +75,18 @@ class TestScriptViews(TacticalTestCase):
self.assertEqual(resp.status_code, 200)
script = Script.objects.get(pk=script.pk)
self.assertEquals(script.description, "Description Change")
self.assertEquals(script.code, "Test Code\nAnother Line")
self.assertEquals(script.code, "Test")
# test edit a builtin script
data = {"name": "New Name", "description": "New Desc", "code": "Some New Code"}
data = {
"name": "New Name",
"description": "New Desc",
"code_base64": "VGVzdA==",
} # Test
builtin_script = baker.make_recipe("scripts.script", script_type="builtin")
resp = self.client.put(
f"/scripts/{builtin_script.pk}/script/", data, format="json"
)
resp = self.client.put(f"/scripts/{builtin_script.pk}/", data, format="json")
self.assertEqual(resp.status_code, 400)
data = {
@@ -112,13 +94,11 @@ class TestScriptViews(TacticalTestCase):
"description": "Description Change",
"shell": script.shell,
"favorite": True,
"code": "Test Code\nAnother Line",
"code_base64": "VGVzdA==", # Test
"default_timeout": 54345,
}
# test marking a builtin script as favorite
resp = self.client.put(
f"/scripts/{builtin_script.pk}/script/", data, format="json"
)
resp = self.client.put(f"/scripts/{builtin_script.pk}/", data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertTrue(Script.objects.get(pk=builtin_script.pk).favorite)
@@ -126,11 +106,11 @@ class TestScriptViews(TacticalTestCase):
def test_get_script(self):
# test a call where script doesn't exist
resp = self.client.get("/scripts/500/script/", format="json")
resp = self.client.get("/scripts/500/", format="json")
self.assertEqual(resp.status_code, 404)
script = baker.make("scripts.Script")
url = f"/scripts/{script.pk}/script/" # type: ignore
url = f"/scripts/{script.pk}/" # type: ignore
serializer = ScriptSerializer(script)
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
@@ -138,14 +118,34 @@ class TestScriptViews(TacticalTestCase):
self.check_not_authenticated("get", url)
@patch("agents.models.Agent.nats_cmd")
def test_test_script(self, run_script):
url = "/scripts/testscript/"
run_script.return_value = "return value"
agent = baker.make_recipe("agents.agent")
data = {
"agent": agent.pk,
"code": "some_code",
"timeout": 90,
"args": [],
"shell": "powershell",
}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, "return value") # type: ignore
self.check_not_authenticated("post", url)
def test_delete_script(self):
# test a call where script doesn't exist
resp = self.client.delete("/scripts/500/script/", format="json")
resp = self.client.delete("/scripts/500/", format="json")
self.assertEqual(resp.status_code, 404)
# test delete script
script = baker.make_recipe("scripts.script")
url = f"/scripts/{script.pk}/script/"
url = f"/scripts/{script.pk}/"
resp = self.client.delete(url, format="json")
self.assertEqual(resp.status_code, 200)
@@ -153,7 +153,7 @@ class TestScriptViews(TacticalTestCase):
# test delete community script
script = baker.make_recipe("scripts.script", script_type="builtin")
url = f"/scripts/{script.pk}/script/"
url = f"/scripts/{script.pk}/"
resp = self.client.delete(url, format="json")
self.assertEqual(resp.status_code, 400)
@@ -161,7 +161,7 @@ class TestScriptViews(TacticalTestCase):
def test_download_script(self):
# test a call where script doesn't exist
resp = self.client.get("/scripts/500/download/", format="json")
resp = self.client.get("/scripts/download/500/", format="json")
self.assertEqual(resp.status_code, 404)
# return script code property should be "Test"
@@ -170,7 +170,7 @@ class TestScriptViews(TacticalTestCase):
script = baker.make(
"scripts.Script", code_base64="VGVzdA==", shell="powershell"
)
url = f"/scripts/{script.pk}/download/" # type: ignore
url = f"/scripts/download/{script.pk}/" # type: ignore
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
@@ -178,7 +178,7 @@ class TestScriptViews(TacticalTestCase):
# test batch file
script = baker.make("scripts.Script", code_base64="VGVzdA==", shell="cmd")
url = f"/scripts/{script.pk}/download/" # type: ignore
url = f"/scripts/download/{script.pk}/" # type: ignore
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
@@ -186,7 +186,7 @@ class TestScriptViews(TacticalTestCase):
# test python file
script = baker.make("scripts.Script", code_base64="VGVzdA==", shell="python")
url = f"/scripts/{script.pk}/download/" # type: ignore
url = f"/scripts/download/{script.pk}/" # type: ignore
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
@@ -388,7 +388,7 @@ class TestScriptViews(TacticalTestCase):
)
# test with set value
baker.make(
value = baker.make(
"clients.SiteCustomField",
field=field,
site=agent.site,
@@ -399,6 +399,24 @@ class TestScriptViews(TacticalTestCase):
Script.parse_script_args(agent=agent, shell="python", args=args),
)
# test with set but empty field value
value.string_value = "" # type: ignore
value.save() # type: ignore
self.assertEqual(
["-Parameter", "-Another 'DEFAULT'"],
Script.parse_script_args(agent=agent, shell="python", args=args),
)
# test blank default and value
field.default_value_string = "" # type: ignore
field.save() # type: ignore
self.assertEqual(
["-Parameter", "-Another ''"],
Script.parse_script_args(agent=agent, shell="python", args=args),
)
def test_script_arg_replacement_array_fields(self):
agent = baker.make_recipe("agents.agent")
field = baker.make(
@@ -479,3 +497,106 @@ class TestScriptViews(TacticalTestCase):
["-Parameter", "-Another $True"],
Script.parse_script_args(agent=agent, shell="powershell", args=args),
)
class TestScriptSnippetViews(TacticalTestCase):
def setUp(self):
self.setup_coresettings()
self.authenticate()
def test_get_script_snippets(self):
url = "/scripts/snippets/"
snippets = baker.make("scripts.ScriptSnippet", _quantity=3)
serializer = ScriptSnippetSerializer(snippets, many=True)
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(serializer.data, resp.data) # type: ignore
self.check_not_authenticated("get", url)
def test_add_script_snippet(self):
url = f"/scripts/snippets/"
data = {
"name": "Name",
"description": "Description",
"shell": "powershell",
"code": "Test",
}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertTrue(ScriptSnippet.objects.filter(name="Name").exists())
self.check_not_authenticated("post", url)
def test_modify_script_snippet(self):
# test a call where script doesn't exist
resp = self.client.put("/scripts/snippets/500/", format="json")
self.assertEqual(resp.status_code, 404)
# make a userdefined script
snippet = baker.make("scripts.ScriptSnippet", name="Test")
url = f"/scripts/snippets/{snippet.pk}/" # type: ignore
data = {"name": "New Name"} # type: ignore
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
snippet = ScriptSnippet.objects.get(pk=snippet.pk) # type: ignore
self.assertEquals(snippet.name, "New Name")
self.check_not_authenticated("put", url)
def test_get_script_snippet(self):
# test a call where script doesn't exist
resp = self.client.get("/scripts/snippets/500/", format="json")
self.assertEqual(resp.status_code, 404)
snippet = baker.make("scripts.ScriptSnippet")
url = f"/scripts/snippets/{snippet.pk}/" # type: ignore
serializer = ScriptSnippetSerializer(snippet)
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(serializer.data, resp.data) # type: ignore
self.check_not_authenticated("get", url)
def test_delete_script_snippet(self):
# test a call where script doesn't exist
resp = self.client.delete("/scripts/snippets/500/", format="json")
self.assertEqual(resp.status_code, 404)
# test delete script snippet
snippet = baker.make("scripts.ScriptSnippet")
url = f"/scripts/snippets/{snippet.pk}/" # type: ignore
resp = self.client.delete(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertFalse(ScriptSnippet.objects.filter(pk=snippet.pk).exists()) # type: ignore
self.check_not_authenticated("delete", url)
def test_snippet_replacement(self):
snippet1 = baker.make(
"scripts.ScriptSnippet", name="snippet1", code="Snippet 1 Code"
)
snippet2 = baker.make(
"scripts.ScriptSnippet", name="snippet2", code="Snippet 2 Code"
)
test_no_snippet = "No Snippets Here"
test_with_snippet = "Snippet 1: {{snippet1}}\nSnippet 2: {{snippet2}}"
# test putting snippet in text
result = Script.replace_with_snippets(test_with_snippet)
self.assertEqual(
result,
f"Snippet 1: {snippet1.code}\nSnippet 2: {snippet2.code}", # type:ignore
)
# test text with no snippets
result = Script.replace_with_snippets(test_no_snippet)
self.assertEqual(result, test_no_snippet)

View File

@@ -3,7 +3,10 @@ from django.urls import path
from . import views
urlpatterns = [
path("scripts/", views.GetAddScripts.as_view()),
path("<int:pk>/script/", views.GetUpdateDeleteScript.as_view()),
path("<int:pk>/download/", views.download),
path("", views.GetAddScripts.as_view()),
path("<int:pk>/", views.GetUpdateDeleteScript.as_view()),
path("snippets/", views.GetAddScriptSnippets.as_view()),
path("snippets/<int:pk>/", views.GetUpdateDeleteScriptSnippet.as_view()),
path("testscript/", views.TestScript.as_view()),
path("download/<int:pk>/", views.download),
]

View File

@@ -1,64 +1,39 @@
import base64
import json
import asyncio
from django.conf import settings
from django.shortcuts import get_object_or_404
from loguru import logger
from rest_framework.decorators import api_view, permission_classes
from rest_framework.parsers import FileUploadParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from tacticalrmm.utils import notify_error
from .models import Script
from .models import Script, ScriptSnippet
from .permissions import ManageScriptsPerms
from .serializers import ScriptSerializer, ScriptTableSerializer
logger.configure(**settings.LOG_CONFIG)
from agents.permissions import RunScriptPerms
from .serializers import (
ScriptSerializer,
ScriptTableSerializer,
ScriptSnippetSerializer,
)
class GetAddScripts(APIView):
permission_classes = [IsAuthenticated, ManageScriptsPerms]
parser_class = (FileUploadParser,)
def get(self, request):
scripts = Script.objects.all()
showCommunityScripts = request.GET.get("showCommunityScripts", True)
if not showCommunityScripts or showCommunityScripts == "false":
scripts = Script.objects.filter(script_type="userdefined")
else:
scripts = Script.objects.all()
return Response(ScriptTableSerializer(scripts, many=True).data)
def post(self, request, format=None):
data = {
"name": request.data["name"],
"category": request.data["category"],
"description": request.data["description"],
"shell": request.data["shell"],
"default_timeout": request.data["default_timeout"],
"script_type": "userdefined", # force all uploads to be userdefined. built in scripts cannot be edited by user
}
def post(self, request):
# code editor upload
if "args" in request.data.keys() and isinstance(request.data["args"], list):
data["args"] = request.data["args"]
# file upload, have to json load it cuz it's formData
if "args" in request.data.keys() and "file_upload" in request.data.keys():
data["args"] = json.loads(request.data["args"])
if "favorite" in request.data.keys():
data["favorite"] = request.data["favorite"]
if "filename" in request.data.keys():
message_bytes = request.data["filename"].read()
data["code_base64"] = base64.b64encode(message_bytes).decode(
"ascii", "ignore"
)
elif "code" in request.data.keys():
message_bytes = request.data["code"].encode("ascii", "ignore")
data["code_base64"] = base64.b64encode(message_bytes).decode("ascii")
serializer = ScriptSerializer(data=data, partial=True)
serializer = ScriptSerializer(data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
obj = serializer.save()
@@ -85,11 +60,6 @@ class GetUpdateDeleteScript(APIView):
else:
return notify_error("Community scripts cannot be edited.")
elif "code" in data:
message_bytes = data["code"].encode("ascii")
data["code_base64"] = base64.b64encode(message_bytes).decode("ascii")
data.pop("code")
serializer = ScriptSerializer(data=data, instance=script, partial=True)
serializer.is_valid(raise_exception=True)
obj = serializer.save()
@@ -107,11 +77,87 @@ class GetUpdateDeleteScript(APIView):
return Response(f"{script.name} was deleted!")
class GetAddScriptSnippets(APIView):
permission_classes = [IsAuthenticated, ManageScriptsPerms]
def get(self, request):
snippets = ScriptSnippet.objects.all()
return Response(ScriptSnippetSerializer(snippets, many=True).data)
def post(self, request):
serializer = ScriptSnippetSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response("Script snippet was saved successfully")
class GetUpdateDeleteScriptSnippet(APIView):
permission_classes = [IsAuthenticated, ManageScriptsPerms]
def get(self, request, pk):
snippet = get_object_or_404(ScriptSnippet, pk=pk)
return Response(ScriptSnippetSerializer(snippet).data)
def put(self, request, pk):
snippet = get_object_or_404(ScriptSnippet, pk=pk)
serializer = ScriptSnippetSerializer(
instance=snippet, data=request.data, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response("Script snippet was saved successfully")
def delete(self, request, pk):
snippet = get_object_or_404(ScriptSnippet, pk=pk)
snippet.delete()
return Response("Script snippet was deleted successfully")
class TestScript(APIView):
permission_classes = [IsAuthenticated, RunScriptPerms]
def post(self, request):
from .models import Script
from agents.models import Agent
agent = get_object_or_404(Agent, pk=request.data["agent"])
parsed_args = Script.parse_script_args(
agent, request.data["shell"], request.data["args"]
)
data = {
"func": "runscript",
"timeout": request.data["timeout"],
"script_args": parsed_args,
"payload": {
"code": Script.replace_with_snippets(request.data["code"]),
"shell": request.data["shell"],
},
}
r = asyncio.run(
agent.nats_cmd(data, timeout=request.data["timeout"], wait=True)
)
return Response(r)
@api_view()
@permission_classes([IsAuthenticated, ManageScriptsPerms])
def download(request, pk):
script = get_object_or_404(Script, pk=pk)
with_snippets = request.GET.get("with_snippets", True)
if with_snippets == "false":
with_snippets = False
if script.shell == "powershell":
filename = f"{script.name}.ps1"
elif script.shell == "cmd":
@@ -119,4 +165,9 @@ def download(request, pk):
else:
filename = f"{script.name}.py"
return Response({"filename": filename, "code": script.code})
return Response(
{
"filename": filename,
"code": script.code if with_snippets else script.code_no_snippets,
}
)

View File

@@ -1,21 +1,16 @@
import asyncio
from django.conf import settings
from django.shortcuts import get_object_or_404
from loguru import logger
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from agents.models import Agent
from checks.models import Check
from django.shortcuts import get_object_or_404
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from tacticalrmm.utils import notify_error
from .permissions import ManageWinSvcsPerms
from .serializers import ServicesSerializer
logger.configure(**settings.LOG_CONFIG)
@api_view()
def get_services(request, pk):

View File

@@ -35,13 +35,17 @@ app.conf.beat_schedule = {
"task": "agents.tasks.auto_self_agent_update_task",
"schedule": crontab(minute=35, hour="*"),
},
"monitor-agents": {
"task": "agents.tasks.monitor_agents_task",
"schedule": crontab(minute="*/7"),
"handle-agents": {
"task": "agents.tasks.handle_agents_task",
"schedule": crontab(minute="*"),
},
"get-agentinfo": {
"task": "agents.tasks.agent_getinfo_task",
"schedule": crontab(minute="*"),
},
"get-wmi": {
"task": "agents.tasks.get_wmi_task",
"schedule": crontab(minute="*/18"),
"schedule": crontab(minute=18, hour="*/5"),
},
}
@@ -54,10 +58,12 @@ def debug_task(self):
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
from agents.tasks import agent_outages_task
from agents.tasks import agent_outages_task, agent_checkin_task
from alerts.tasks import unsnooze_alerts
from core.tasks import core_maintenance_tasks
from core.tasks import core_maintenance_tasks, cache_db_fields_task
sender.add_periodic_task(45.0, agent_checkin_task.s())
sender.add_periodic_task(60.0, agent_outages_task.s())
sender.add_periodic_task(60.0 * 30, core_maintenance_tasks.s())
sender.add_periodic_task(60.0 * 60, unsnooze_alerts.s())
sender.add_periodic_task(90.0, cache_db_fields_task.s())

View File

@@ -2,6 +2,7 @@ import threading
from django.conf import settings
from rest_framework.exceptions import AuthenticationFailed
from ipware import get_client_ip
request_local = threading.local()
@@ -67,6 +68,7 @@ class AuditMiddleware:
debug_info["view_func"] = view_func.__name__
debug_info["view_args"] = view_args
debug_info["view_kwargs"] = view_kwargs
debug_info["ip"] = request._client_ip
request_local.debug_info = debug_info
@@ -83,3 +85,15 @@ class AuditMiddleware:
request_local.debug_info = None
request_local.username = None
return response
class LogIPMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
client_ip, is_routable = get_client_ip(request)
request._client_ip = client_ip
response = self.get_response(request)
return response

View File

@@ -15,23 +15,25 @@ EXE_DIR = os.path.join(BASE_DIR, "tacticalrmm/private/exe")
AUTH_USER_MODEL = "accounts.User"
# latest release
TRMM_VERSION = "0.6.9"
TRMM_VERSION = "0.8.2"
# bump this version everytime vue code is changed
# to alert user they need to manually refresh their browser
APP_VER = "0.0.135"
APP_VER = "0.0.144"
# https://github.com/wh1te909/rmmagent
LATEST_AGENT_VER = "1.5.6"
LATEST_AGENT_VER = "1.6.1"
MESH_VER = "0.8.35"
MESH_VER = "0.9.16"
NATS_SERVER_VER = "2.3.3"
# for the update script, bump when need to recreate venv or npm install
PIP_VER = "16"
NPM_VER = "15"
PIP_VER = "21"
NPM_VER = "21"
SETUPTOOLS_VER = "56.1.0"
WHEEL_VER = "0.36.2"
SETUPTOOLS_VER = "57.4.0"
WHEEL_VER = "0.37.0"
DL_64 = f"https://github.com/wh1te909/rmmagent/releases/download/v{LATEST_AGENT_VER}/winagent-v{LATEST_AGENT_VER}.exe"
DL_32 = f"https://github.com/wh1te909/rmmagent/releases/download/v{LATEST_AGENT_VER}/winagent-v{LATEST_AGENT_VER}-x86.exe"
@@ -45,6 +47,12 @@ DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
ASGI_APPLICATION = "tacticalrmm.asgi.application"
REST_KNOX = {
"TOKEN_TTL": timedelta(hours=5),
"AUTO_REFRESH": True,
"MIN_REFRESH_INTERVAL": 600,
}
try:
from .local_settings import *
except ImportError:
@@ -80,6 +88,15 @@ if not "AZPIPELINE" in os.environ:
if DEBUG: # type: ignore
INSTALLED_APPS += ("django_extensions",)
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [(REDIS_HOST, 6379)], # type: ignore
},
},
}
if "AZPIPELINE" in os.environ:
ADMIN_ENABLED = False
@@ -94,6 +111,7 @@ MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"corsheaders.middleware.CorsMiddleware", ##
"tacticalrmm.middleware.LogIPMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
@@ -104,11 +122,6 @@ MIDDLEWARE = [
if ADMIN_ENABLED: # type: ignore
MIDDLEWARE += ("django.contrib.messages.middleware.MessageMiddleware",)
REST_KNOX = {
"TOKEN_TTL": timedelta(hours=5),
"AUTO_REFRESH": True,
"MIN_REFRESH_INTERVAL": 600,
}
ROOT_URLCONF = "tacticalrmm.urls"
@@ -163,12 +176,23 @@ STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "tacticalrmm/static/")]
LOG_CONFIG = {
"handlers": [{"sink": os.path.join(LOG_DIR, "debug.log"), "serialize": False}]
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"file": {
"level": "ERROR",
"class": "logging.FileHandler",
"filename": os.path.join(LOG_DIR, "django_debug.log"),
}
},
"loggers": {
"django.request": {"handlers": ["file"], "level": "ERROR", "propagate": True}
},
}
if "AZPIPELINE" in os.environ:
print("PIPELINE")
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",

View File

@@ -1,3 +1,4 @@
import uuid
from django.test import TestCase, override_settings
from model_bakery import baker
from rest_framework.authtoken.models import Token
@@ -20,6 +21,12 @@ class TacticalTestCase(TestCase):
self.client_setup()
self.client.force_authenticate(user=self.john)
User.objects.create_user( # type: ignore
username=uuid.uuid4().hex,
is_installer_user=True,
password=User.objects.make_random_password(60), # type: ignore
)
def setup_agent_auth(self, agent):
agent_user = User.objects.create_user(
username=agent.agent_id,

View File

@@ -4,7 +4,8 @@ from unittest.mock import mock_open, patch
import requests
from django.conf import settings
from django.test import TestCase, override_settings
from django.test import override_settings
from tacticalrmm.test import TacticalTestCase
from .utils import (
bitdays_to_string,
@@ -16,7 +17,10 @@ from .utils import (
)
class TestUtils(TestCase):
class TestUtils(TacticalTestCase):
def setUp(self):
self.setup_coresettings()
@patch("requests.post")
@patch("__main__.__builtins__.open", new_callable=mock_open)
def test_generate_winagent_exe_success(self, m_open, mock_post):
@@ -77,7 +81,7 @@ class TestUtils(TestCase):
@patch("subprocess.run")
def test_run_nats_api_cmd(self, mock_subprocess):
ids = ["a", "b", "c"]
_ = run_nats_api_cmd("monitor", ids)
_ = run_nats_api_cmd("wmi", ids)
mock_subprocess.assert_called_once()
def test_bitdays_to_string(self):

View File

@@ -3,7 +3,7 @@ from django.urls import include, path
from knox import views as knox_views
from accounts.views import CheckCreds, LoginView
from core import consumers
from core.consumers import DashInfo
urlpatterns = [
path("checkcreds/", CheckCreds.as_view()),
@@ -32,5 +32,5 @@ if hasattr(settings, "ADMIN_ENABLED") and settings.ADMIN_ENABLED:
urlpatterns += (path(settings.ADMIN_URL, admin.site.urls),)
ws_urlpatterns = [
path("ws/dashinfo/", consumers.DashInfo.as_asgi()), # type: ignore
path("ws/dashinfo/", DashInfo.as_asgi()), # type: ignore
]

View File

@@ -15,14 +15,12 @@ from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.http import FileResponse
from knox.auth import TokenAuthentication
from loguru import logger
from rest_framework import status
from rest_framework.response import Response
from agents.models import Agent
from core.models import CodeSignToken
logger.configure(**settings.LOG_CONFIG)
from logs.models import DebugLog
from agents.models import Agent
notify_error = lambda msg: Response(msg, status=status.HTTP_400_BAD_REQUEST)
@@ -61,7 +59,7 @@ def generate_winagent_exe(
)
try:
codetoken = CodeSignToken.objects.first().token
codetoken = CodeSignToken.objects.first().token # type:ignore
base_url = get_exegen_url() + "/api/v1/winagents/?"
params = {
"version": settings.LATEST_AGENT_VER,
@@ -107,7 +105,7 @@ def generate_winagent_exe(
break
if errors:
logger.error(errors)
DebugLog.error(message=errors)
return notify_error(
"Something went wrong. Check debug error log for exact error message"
)
@@ -123,7 +121,7 @@ def generate_winagent_exe(
def get_default_timezone():
from core.models import CoreSettings
return pytz.timezone(CoreSettings.objects.first().default_time_zone)
return pytz.timezone(CoreSettings.objects.first().default_time_zone) # type:ignore
def get_bit_days(days: list[str]) -> int:
@@ -178,28 +176,28 @@ def filter_software(sw: SoftwareList) -> SoftwareList:
def reload_nats():
users = [{"user": "tacticalrmm", "password": settings.SECRET_KEY}]
agents = Agent.objects.prefetch_related("user").only("pk", "agent_id")
agents = Agent.objects.prefetch_related("user").only(
"pk", "agent_id"
) # type:ignore
for agent in agents:
try:
users.append(
{"user": agent.agent_id, "password": agent.user.auth_token.key}
)
except:
logger.critical(
f"{agent.hostname} does not have a user account, NATS will not work"
DebugLog.critical(
agent=agent,
log_type="agent_issues",
message=f"{agent.hostname} does not have a user account, NATS will not work",
)
domain = settings.ALLOWED_HOSTS[0].split(".", 1)[1]
cert_file = f"/etc/letsencrypt/live/{domain}/fullchain.pem"
key_file = f"/etc/letsencrypt/live/{domain}/privkey.pem"
if hasattr(settings, "CERT_FILE") and hasattr(settings, "KEY_FILE"):
if os.path.exists(settings.CERT_FILE) and os.path.exists(settings.KEY_FILE):
cert_file = settings.CERT_FILE
key_file = settings.KEY_FILE
else:
cert_file = f"/etc/letsencrypt/live/{domain}/fullchain.pem"
key_file = f"/etc/letsencrypt/live/{domain}/privkey.pem"
else:
cert_file = f"/etc/letsencrypt/live/{domain}/fullchain.pem"
key_file = f"/etc/letsencrypt/live/{domain}/privkey.pem"
config = {
"tls": {
@@ -207,7 +205,7 @@ def reload_nats():
"key_file": key_file,
},
"authorization": {"users": users},
"max_payload": 2048576005,
"max_payload": 67108864,
}
conf = os.path.join(settings.BASE_DIR, "nats-rmm.conf")
@@ -248,21 +246,36 @@ KnoxAuthMiddlewareStack = lambda inner: KnoxAuthMiddlewareInstance(
)
def run_nats_api_cmd(mode: str, ids: list[str], timeout: int = 30) -> None:
config = {
"key": settings.SECRET_KEY,
"natsurl": f"tls://{settings.ALLOWED_HOSTS[0]}:4222",
"agents": ids,
}
with tempfile.NamedTemporaryFile() as fp:
def run_nats_api_cmd(mode: str, ids: list[str] = [], timeout: int = 30) -> None:
if mode == "wmi":
config = {
"key": settings.SECRET_KEY,
"natsurl": f"tls://{settings.ALLOWED_HOSTS[0]}:4222",
"agents": ids,
}
else:
db = settings.DATABASES["default"]
config = {
"key": settings.SECRET_KEY,
"natsurl": f"tls://{settings.ALLOWED_HOSTS[0]}:4222",
"user": db["USER"],
"pass": db["PASSWORD"],
"host": db["HOST"],
"port": int(db["PORT"]),
"dbname": db["NAME"],
}
with tempfile.NamedTemporaryFile(
dir="/opt/tactical/tmp" if settings.DOCKER_BUILD else None
) as fp:
with open(fp.name, "w") as f:
json.dump(config, f)
cmd = ["/usr/local/bin/nats-api", "-c", fp.name, "-m", mode]
try:
subprocess.run(cmd, capture_output=True, timeout=timeout)
subprocess.run(cmd, timeout=timeout)
except Exception as e:
logger.error(e)
DebugLog.error(message=e)
def get_latest_trmm_ver() -> str:
@@ -277,15 +290,16 @@ def get_latest_trmm_ver() -> str:
if "TRMM_VERSION" in line:
return line.split(" ")[2].strip('"')
except Exception as e:
logger.error(e)
DebugLog.error(message=e)
return "error"
def replace_db_values(
string: str, agent: Agent = None, shell: str = None, quotes=True
string: str, instance=None, shell: str = None, quotes=True # type:ignore
) -> Union[str, None]:
from core.models import CustomField, GlobalKVStore
from clients.models import Client, Site
# split by period if exists. First should be model and second should be property i.e {{client.name}}
temp = string.split(".")
@@ -293,7 +307,7 @@ def replace_db_values(
# check for model and property
if len(temp) < 2:
# ignore arg since it is invalid
return None
return ""
# value is in the global keystore and replace value
if temp[0] == "global":
@@ -302,30 +316,48 @@ def replace_db_values(
return f"'{value}'" if quotes else value
else:
logger.error(
f"Couldn't lookup value for: {string}. Make sure it exists in CoreSettings > Key Store"
DebugLog.error(
log_type="scripting",
message=f"{agent.hostname} Couldn't lookup value for: {string}. Make sure it exists in CoreSettings > Key Store", # type:ignore
)
return None
return ""
if not agent:
# agent must be set if not global property
return f"There was an error finding the agent: {agent}"
if not instance:
# instance must be set if not global property
return ""
if temp[0] == "client":
model = "client"
obj = agent.client
if isinstance(instance, Client):
obj = instance
elif hasattr(instance, "client"):
obj = instance.client
else:
obj = None
elif temp[0] == "site":
model = "site"
obj = agent.site
if isinstance(instance, Site):
obj = instance
elif hasattr(instance, "site"):
obj = instance.site
else:
obj = None
elif temp[0] == "agent":
model = "agent"
obj = agent
if isinstance(instance, Agent):
obj = instance
else:
obj = None
else:
# ignore arg since it is invalid
logger.error(
f"Not enough information to find value for: {string}. Only agent, site, client, and global are supported."
DebugLog.error(
log_type="scripting",
message=f"{instance} Not enough information to find value for: {string}. Only agent, site, client, and global are supported.",
)
return None
return ""
if not obj:
return ""
if hasattr(obj, temp[1]):
value = f"'{getattr(obj, temp[1])}'" if quotes else getattr(obj, temp[1])
@@ -336,13 +368,16 @@ def replace_db_values(
model_fields = getattr(field, f"{model}_fields")
value = None
if model_fields.filter(**{model: obj}).exists():
value = model_fields.get(**{model: obj}).value
if field.type != "checkbox" and model_fields.get(**{model: obj}).value:
value = model_fields.get(**{model: obj}).value
elif field.type == "checkbox":
value = model_fields.get(**{model: obj}).value
# need explicit None check since a false boolean value will pass default value
if value == None and field.default_value:
if value == None and field.default_value != None:
value = field.default_value
# check if value exists and if not use defa
# check if value exists and if not use default
if value and field.type == "multiple":
value = (
f"'{format_shell_array(value)}'"
@@ -356,19 +391,21 @@ def replace_db_values(
else:
# ignore arg since property is invalid
logger.error(
f"Couldn't find property on supplied variable: {string}. Make sure it exists as a custom field or a valid agent property"
DebugLog.error(
log_type="scripting",
message=f"{instance} Couldn't find property on supplied variable: {string}. Make sure it exists as a custom field or a valid agent property",
)
return None
return ""
# log any unhashable type errors
if value != None:
return value # type: ignore
else:
logger.error(
f"Couldn't lookup value for: {string}. Make sure it exists as a custom field or a valid agent property"
DebugLog.error(
log_type="scripting",
message=f" {instance}({instance.pk}) Couldn't lookup value for: {string}. Make sure it exists as a custom field or a valid agent property",
)
return None
return ""
def format_shell_array(value: list) -> str:

View File

@@ -3,15 +3,12 @@ import datetime as dt
import time
import pytz
from django.conf import settings
from django.utils import timezone as djangotime
from loguru import logger
from packaging import version as pyver
from agents.models import Agent
from tacticalrmm.celery import app
logger.configure(**settings.LOG_CONFIG)
from logs.models import DebugLog
@app.task
@@ -46,7 +43,13 @@ def auto_approve_updates_task():
def check_agent_update_schedule_task():
# scheduled task that installs updates on agents if enabled
agents = Agent.objects.only(
"pk", "agent_id", "version", "last_seen", "overdue_time", "offline_time"
"pk",
"agent_id",
"version",
"last_seen",
"overdue_time",
"offline_time",
"has_patches_pending",
)
online = [
i
@@ -114,7 +117,11 @@ def check_agent_update_schedule_task():
if install:
# initiate update on agent asynchronously and don't worry about ret code
logger.info(f"Installing windows updates on {agent.salt_id}")
DebugLog.info(
agent=agent,
log_type="windows_updates",
message=f"Installing windows updates on {agent.hostname}",
)
nats_data = {
"func": "installwinupdates",
"guids": agent.get_approved_update_guids(),

View File

@@ -8,7 +8,7 @@ jobs:
strategy:
matrix:
Debian10:
AGENT_NAME: "azpipelines-deb10"
AGENT_NAME: "az-pipeline-fran"
pool:
name: linux-vms
@@ -20,15 +20,18 @@ jobs:
sudo -u postgres psql -c 'DROP DATABASE IF EXISTS pipeline'
sudo -u postgres psql -c 'DROP DATABASE IF EXISTS test_pipeline'
sudo -u postgres psql -c 'CREATE DATABASE pipeline'
sudo -u postgres psql -c "SET client_encoding = 'UTF8'" pipeline
SETTINGS_FILE="/myagent/_work/1/s/api/tacticalrmm/tacticalrmm/settings.py"
rm -rf /myagent/_work/1/s/api/env
cd /myagent/_work/1/s/api
python3.9 -m venv env
source env/bin/activate
cd /myagent/_work/1/s/api/tacticalrmm
pip install --no-cache-dir --upgrade pip
pip install --no-cache-dir setuptools==54.2.0 wheel==0.36.2
pip install --no-cache-dir -r requirements.txt -r requirements-test.txt -r requirements-dev.txt
pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host files.pythonhosted.org --upgrade pip
SETUPTOOLS_VER=$(grep "^SETUPTOOLS_VER" "$SETTINGS_FILE" | awk -F'[= "]' '{print $5}')
WHEEL_VER=$(grep "^WHEEL_VER" "$SETTINGS_FILE" | awk -F'[= "]' '{print $5}')
pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host files.pythonhosted.org setuptools==${SETUPTOOLS_VER} wheel==${WHEEL_VER}
pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host files.pythonhosted.org -r requirements.txt -r requirements-test.txt -r requirements-dev.txt
displayName: "Install Python Dependencies"
- script: |

View File

@@ -1,6 +1,6 @@
#!/bin/bash
SCRIPT_VERSION="12"
SCRIPT_VERSION="15"
SCRIPT_URL='https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/backup.sh'
GREEN='\033[0;32m'
@@ -59,6 +59,7 @@ mkdir ${tmp_dir}/nginx
mkdir ${tmp_dir}/systemd
mkdir ${tmp_dir}/rmm
mkdir ${tmp_dir}/confd
mkdir ${tmp_dir}/redis
pg_dump --dbname=postgresql://"${POSTGRES_USER}":"${POSTGRES_PW}"@127.0.0.1:5432/tacticalrmm | gzip -9 > ${tmp_dir}/postgres/db-${dt_now}.psql.gz
@@ -72,12 +73,14 @@ sudo tar -czvf ${tmp_dir}/nginx/etc-nginx.tar.gz -C /etc/nginx .
sudo tar -czvf ${tmp_dir}/confd/etc-confd.tar.gz -C /etc/conf.d .
sudo gzip -9 -c /var/lib/redis/appendonly.aof > ${tmp_dir}/redis/appendonly.aof.gz
sudo cp ${sysd}/rmm.service ${sysd}/celery.service ${sysd}/celerybeat.service ${sysd}/meshcentral.service ${sysd}/nats.service ${tmp_dir}/systemd/
if [ -f "${sysd}/daphne.service" ]; then
sudo cp ${sysd}/daphne.service ${tmp_dir}/systemd/
fi
cat /rmm/api/tacticalrmm/tacticalrmm/private/log/debug.log | gzip -9 > ${tmp_dir}/rmm/debug.log.gz
cat /rmm/api/tacticalrmm/tacticalrmm/private/log/django_debug.log | gzip -9 > ${tmp_dir}/rmm/debug.log.gz
cp /rmm/api/tacticalrmm/tacticalrmm/local_settings.py /rmm/api/tacticalrmm/app.ini ${tmp_dir}/rmm/
cp /rmm/web/.env ${tmp_dir}/rmm/env
cp /rmm/api/tacticalrmm/tacticalrmm/private/exe/mesh*.exe ${tmp_dir}/rmm/

Some files were not shown because too many files have changed in this diff Show More