Compare commits

..

183 Commits

Author SHA1 Message Date
wh1te909
f82b589d03 Release 0.12.3 2022-04-11 23:16:28 +00:00
wh1te909
cddac4d0fb bump version 2022-04-11 22:21:57 +00:00
wh1te909
ff41bbd0e5 adjust celery config 2022-04-09 17:09:54 +00:00
wh1te909
4bdb6ae84e fix graphics 2022-04-09 17:09:09 +00:00
wh1te909
58fe14bd31 add coverage badge 2022-04-09 02:10:51 +00:00
wh1te909
97f362ed1e fix for multiprocessing 2022-04-09 01:26:04 +00:00
wh1te909
b63e87ecb6 add parallel 2022-04-09 01:01:32 +00:00
wh1te909
ac3550dfd7 add lcov 2022-04-09 00:48:00 +00:00
wh1te909
8278a4cfd9 remove run 2022-04-09 00:45:02 +00:00
wh1te909
f161a2bbc8 more coveralls 2022-04-09 00:43:47 +00:00
wh1te909
6a94489df0 testing coveralls 2022-04-09 00:26:22 +00:00
wh1te909
c3a0b9192f update reqs 2022-04-08 19:34:38 +00:00
wh1te909
69ff70a9ce typo [skip ci] 2022-04-08 18:49:15 +00:00
wh1te909
5284eb0af8 validate mesh username 2022-04-08 18:47:57 +00:00
wh1te909
58384ae136 update supported version 2022-04-08 18:45:53 +00:00
wh1te909
054cc78e65 add meshctrl 2022-04-08 18:30:17 +00:00
wh1te909
8c283281d6 remove lower() from mesh username 2022-04-08 16:06:44 +00:00
wh1te909
241fe41756 fix env 2022-04-05 22:44:41 +00:00
wh1te909
e50e0626fa also check env 2022-04-05 21:31:13 +00:00
wh1te909
c9135f1573 add option to specify sslmode for nats-api pg connection closes #1049 2022-04-05 21:14:22 +00:00
wh1te909
ec2663a152 Release 0.12.2 2022-04-05 03:44:17 +00:00
wh1te909
7567042c8a bump versions 2022-04-05 03:41:18 +00:00
wh1te909
c99ceb155f update CI badge and supported agent versions 2022-04-04 22:25:13 +00:00
wh1te909
f44c92f0d3 switch to gh actions for tests 2022-04-04 21:54:46 +00:00
wh1te909
492701ec62 change dir 2022-04-04 07:08:57 +00:00
wh1te909
a6d0acaa4d black 2022-04-03 23:14:44 +00:00
wh1te909
f84b4e7274 remove dev deps from pipelines 2022-04-03 23:09:08 +00:00
wh1te909
b7ef5b82d8 add silk to dev 2022-04-03 22:48:27 +00:00
wh1te909
a854d2c38c add authorization to NATS 2022-04-03 22:47:43 +00:00
wh1te909
5140499bbd update uwsgi conf 2022-04-01 06:12:19 +00:00
wh1te909
7183e9ee85 attempt 2 2022-03-31 06:07:23 +00:00
wh1te909
11885e0aca attemp 1 to fix pipelines 2022-03-31 05:54:30 +00:00
wh1te909
2bda4e822c move pipelines 2022-03-31 05:34:50 +00:00
wh1te909
8867d12ec7 Release 0.12.1 2022-03-25 01:42:49 +00:00
wh1te909
154149a068 bump versions 2022-03-25 00:59:28 +00:00
wh1te909
c96985af03 add mesh troubleshooting script 2022-03-25 00:33:39 +00:00
wh1te909
e282420a6a handle locale, add --debug for linux install script 2022-03-24 23:53:58 +00:00
wh1te909
b9a207ea71 update reqs 2022-03-24 06:44:16 +00:00
wh1te909
28d52b5e7a tmpdir #1017 2022-03-24 02:10:21 +00:00
wh1te909
9761f1ae29 fix for older nix machines without updated certs 2022-03-24 00:48:03 +00:00
wh1te909
e62c8cc2e2 set platform on double click fixes #1013 2022-03-21 21:08:42 +00:00
wh1te909
b5aea92791 purge celery during update 2022-03-20 09:00:33 +00:00
wh1te909
2d7724383f Release 0.12.0 2022-03-19 20:08:16 +00:00
wh1te909
03f35c1975 update readme 2022-03-19 20:04:17 +00:00
wh1te909
bc7dad77f4 update reqs 2022-03-19 01:31:21 +00:00
wh1te909
aaa2540114 update backup/restore 2022-03-19 01:24:17 +00:00
wh1te909
f46787839a remove extra print statement 2022-03-19 00:03:03 +00:00
wh1te909
228be95af1 bump banner 2022-03-18 22:47:37 +00:00
wh1te909
a22d7e40e5 no longer need to backup mesh exes 2022-03-18 19:40:14 +00:00
wh1te909
d0f87c0980 remove docs workflow 2022-03-18 18:13:51 +00:00
wh1te909
5142783db9 update for new repo 2022-03-18 18:09:58 +00:00
wh1te909
4aea16ca8c remove check 2022-03-17 19:55:31 +00:00
wh1te909
d91d372fc5 Merge branch 'develop' of https://github.com/wh1te909/tacticalrmm into develop 2022-03-17 19:54:58 +00:00
sadnub
7405d884de black and fix settings.py 2022-03-17 15:47:28 -04:00
wh1te909
a9ae63043e uncomment 2022-03-17 19:46:52 +00:00
sadnub
6b943866ef add migration 2022-03-17 15:03:43 -04:00
sadnub
c7bb94d82a add api key auth to swagger 2022-03-17 11:30:45 -04:00
sadnub
30fb855200 black 2022-03-17 00:11:15 -04:00
sadnub
80f9e56e3f Fixes #859 2022-03-16 23:58:57 -04:00
sadnub
d301d967c7 Fixes #912 2022-03-16 23:44:50 -04:00
sadnub
7b7bdc4e9c implements #827 2022-03-16 21:55:12 -04:00
wh1te909
796ebca74c fix undefined 2022-03-17 01:19:13 +00:00
sadnub
3150bc316a change to inline if/else 2022-03-16 20:20:58 -04:00
sadnub
0a91b12e6e fix alert template name rendering and circular import issue 2022-03-16 20:17:51 -04:00
sadnub
918e2cc1a9 implement #819 and cleanup my garbage code XD 2022-03-16 19:56:49 -04:00
sadnub
fb71f83d6d Addresses #957. Will check if the script within a task action doesn't exist and will remove it when running the task on an agent 2022-03-16 14:21:36 -04:00
Dan
82470bf04f Merge pull request #1004 from silversword411/develop
Commenting scripts
2022-03-15 20:39:47 -07:00
silversword411
0ac75092e6 fix print to echo 2022-03-15 00:20:34 -04:00
silversword411
e898163aff Commenting scripts 2022-03-15 00:02:57 -04:00
wh1te909
418c7e1d9e fix notes 2022-03-15 01:09:09 +00:00
wh1te909
24cbabeaf0 fix type 2022-03-14 15:53:02 +00:00
wh1te909
91069b989d set uwsgi procs/threads dynamically in docker 2022-03-14 07:27:44 +00:00
wh1te909
1b7902894a remove un-used column 2022-03-14 06:49:49 +00:00
wh1te909
47e022897e typo 2022-03-14 06:41:04 +00:00
wh1te909
9aada993b1 nginx/celery changes and bump docker deps 2022-03-14 04:20:41 +00:00
sadnub
cf837b6d05 black 2022-03-13 16:18:52 -04:00
sadnub
09192da4fc fix downloading mesh agent on docker 2022-03-13 14:29:39 -04:00
wh1te909
3a792765cd bump 2022-03-13 04:56:23 +00:00
sadnub
a8f1b1c8bc update mesh agent port docker 2022-03-12 16:34:04 -05:00
wh1te909
8ffdc6bbf8 isort 2022-03-12 08:06:39 +00:00
wh1te909
945370bc25 black 2022-03-12 08:04:26 +00:00
wh1te909
ed4b3b0b9c fix tests 2022-03-12 07:52:00 +00:00
wh1te909
83a4268441 fix more tests 2022-03-12 03:13:56 +00:00
wh1te909
2938be7a70 more recovery rework 2022-03-12 02:30:32 +00:00
wh1te909
e3b2ee44ca add recovery for linux agent 2022-03-12 01:54:28 +00:00
wh1te909
f0c4658c9f update deps 2022-03-12 01:52:52 +00:00
wh1te909
0a4b236293 linux install changes [skip ci] 2022-03-11 22:34:56 +00:00
wh1te909
bc7b53c3d4 add supported os to bulk actions 2022-03-11 21:33:43 +00:00
wh1te909
5535e26eec require shebang for linux/mac scripts, refactor middleware/posix 2022-03-11 20:57:04 +00:00
sadnub
c84c3d58db fix category removal function and fix vuex store with agent action menu 2022-03-11 15:40:00 -05:00
wh1te909
d6caac51dd change paths for linux agent [skip ci] 2022-03-11 18:49:20 +00:00
sadnub
979e7a5e08 fix script lists and filtering for platform 2022-03-10 22:55:44 -05:00
sadnub
40f16eb984 add supported_platforms and hidden field to scripts and filter in script dialogs and allow editing in script forms 2022-03-10 20:01:24 -05:00
wh1te909
c17ad1b989 start fixing tests 2022-03-10 07:24:42 +00:00
wh1te909
24bfa062da update python and node 2022-03-10 03:07:37 +00:00
wh1te909
765f675da9 remove meshagent from db during agent uninstall closes #147 2022-03-10 02:24:34 +00:00
wh1te909
c0650d2ef0 update reqs 2022-03-10 01:52:51 +00:00
wh1te909
168434739f remove badge 2022-03-10 01:52:43 +00:00
wh1te909
337eaa46e3 switch pipelines to 3.10 2022-03-10 01:50:37 +00:00
wh1te909
94d42503b7 update reqs 2022-03-10 01:50:18 +00:00
wh1te909
202edc0588 v0.12.0 2022-03-10 00:57:55 +00:00
wh1te909
c95d11da47 testing new coverage 2022-03-04 07:17:56 +00:00
wh1te909
4f8615398c update reqs 2022-02-14 07:25:23 +00:00
wh1te909
f3b5f0128f update reqs 2022-02-13 00:30:35 +00:00
wh1te909
ab5e50c29c fix vscode deprecations / remove mypy 2022-02-11 19:57:49 +00:00
wh1te909
f9236bf92f don't show version banner if error 2022-02-08 22:17:08 +00:00
wh1te909
2522968b04 update bin 2022-02-08 21:00:09 +00:00
wh1te909
9c1900963d update reqs 2022-02-08 17:27:16 +00:00
wh1te909
82ff41e0bb always retry websocket reconnect even on close code 1000 because daphne sucks 2022-02-03 06:35:06 +00:00
wh1te909
fb86c14d77 update reqs 2022-02-03 03:16:17 +00:00
sadnub
c6c0159ee4 remove mkdocs container from docker dev 2022-02-01 22:20:11 -05:00
sadnub
fe5bba18a2 fix dev containers for non-root containers 2022-02-01 22:15:38 -05:00
sadnub
f61329b5de update mesh ports for persistent mesh configuration 2022-02-01 22:15:08 -05:00
sadnub
fbc04afa5b fix typo 2022-02-01 22:14:34 -05:00
sadnub
2f5bcf2263 change community script link to new repo 2022-02-01 22:14:19 -05:00
sadnub
92882c337c Merge pull request #939 from lcsnetworks/non_root_containers
Non root containers
2022-02-01 22:11:02 -05:00
wh1te909
bd41f69a1c fix async call and update nats-py 2022-01-30 22:14:47 +00:00
wh1te909
f801709587 update reqs 2022-01-28 07:30:17 +00:00
wh1te909
1cb37d29df change scripts dir 2022-01-24 05:07:08 +00:00
wh1te909
2d7db408fd update reqs 2022-01-24 04:51:44 +00:00
sadnub
ef1afc99c6 remove community script tests 2022-01-22 00:12:10 -05:00
sadnub
5682c9a5b2 fix install.sh 2022-01-22 00:07:15 -05:00
sadnub
c525b18a02 remove unused import 2022-01-22 00:00:19 -05:00
sadnub
72159cb94d fix docker entrypoint when copying community scripts 2022-01-22 00:00:19 -05:00
sadnub
39e31a1039 change git urls back 2022-01-22 00:00:19 -05:00
sadnub
734177fecc delete community scripts from repo 2022-01-22 00:00:19 -05:00
sadnub
39311099df community-script rework 2022-01-22 00:00:18 -05:00
Dan
b8653e6601 Merge pull request #951 from silversword411/develop
Adding all services to troubleshooting_server.sh and docs additions
2022-01-21 15:53:40 -08:00
sadnub
cb4b1971e6 add print output for django commands 2022-01-21 13:23:00 -05:00
silversword411
63c60ba716 docs - Adding troubleshooting notes around 2022-01-21 11:52:25 -05:00
silversword411
50435425e5 adding all services to troubleshooting script 2022-01-21 11:51:54 -05:00
Joel DeTeves
ff192f102d Ensure external Mesh link defaults to port 443 2022-01-20 14:35:59 -08:00
Joel DeTeves
99cdaa1305 Forgot to update the container ports in the docker-compose file 2022-01-20 13:54:15 -08:00
Dan
7fc897dba9 Merge pull request #948 from silversword411/develop
docs - api example fix thx bc24fl
2022-01-19 22:54:00 -08:00
silversword411
3bedd65ad8 docs - how it works agent debug 2022-01-19 22:44:25 -05:00
silversword411
a46175ce53 docs - api example fix thx bc24fl 2022-01-19 21:55:02 -05:00
Joel DeTeves
dba3bf8ce9 Clean up volume inits, fix missing init for certs volume 2022-01-18 15:10:46 -08:00
Dan
3f32234c93 Merge pull request #945 from silversword411/develop
docs - enable keys and FAQ tweaks
2022-01-17 22:23:18 -08:00
silversword411
2863e64e3b docs - faq 2022-01-18 01:09:46 -05:00
silversword411
68ec78e01c docs - FAQ tweaks 2022-01-18 00:58:54 -05:00
silversword411
3a7c506a8f docs - enabling keys 2022-01-18 00:04:20 -05:00
Dan
1ca63ed2d2 Merge pull request #944 from silversword411/develop
docs - api examples and more
2022-01-17 20:28:32 -08:00
silversword411
e9e98ebcfc docs - api examples and more 2022-01-17 23:23:42 -05:00
Dan
04de7998af Merge pull request #941 from silversword411/develop
docs - docker backup options
2022-01-17 19:57:54 -08:00
Dan
a5d02dc34a Merge pull request #940 from Yamacore/develop
fixed tooltip error
2022-01-17 19:57:32 -08:00
Dan
6181b0466e Merge pull request #938 from iamkhris/develop
Add files via upload
2022-01-17 19:56:57 -08:00
Joel DeTeves
810d8f637d Set redis container to run as non-root 2022-01-17 15:18:21 -08:00
Joel DeTeves
223b3e81d5 Make NGINX_HOST_PORT configurable for K8s load balancer compatibility 2022-01-17 12:28:33 -08:00
silversword411
3a8b5bbd3f docs - docker backup options 2022-01-17 12:53:19 -05:00
Yamacore
ecf3b33ca7 fixed tooltip error
instead of "Continue if task if an action fails"
changed to "Continue task if an action fails"
2022-01-17 16:12:37 +01:00
Joel DeTeves
006b20351e Use uniform UID (1000) + fix permission for tactical-frontend container 2022-01-17 01:12:18 -08:00
Joel DeTeves
4b577c9541 Set docker-compose to run as non-root on all applicable containers 2022-01-17 01:03:21 -08:00
Joel DeTeves
8db59458a8 Make init container volume mount paths more accurate to avoid potential conflicts 2022-01-17 00:56:52 -08:00
Joel DeTeves
7eed5f09aa Fix permissions for mongodb container 2022-01-17 00:30:39 -08:00
Joel DeTeves
a1bb265222 Make NATS & NGINX container run as same UID (1000), fix NATS supervisord permission 2022-01-17 00:08:31 -08:00
Joel DeTeves
0235f33f8b Fix incorrect ports for nginx & mesh inits 2022-01-16 23:34:54 -08:00
Joel DeTeves
3d6fca85db Fix permissions for NGINX container, remove duplicate initialization for TACTICAL_DIR 2022-01-16 23:00:52 -08:00
Joel DeTeves
4c06da0646 Fix permissions for meshcentral container 2022-01-16 22:46:49 -08:00
Christopher Phillips
f63603eb84 Add files via upload
Sends Windows 10 Toast alert when password expiration reaches 7, 3, 2, and 1 days.  Works with both local and domain accounts.  Best to setup as a scheduled task, but can also be run manually.  On 1 day alert, an "Urgent" BurntToastLogo is downloaded and used instead of the regular logo to indicate importance.  These files are hosted on a site you have access to.
2022-01-16 14:54:59 -07:00
Joel DeTeves
44418ef295 Switch tactical-meshcentral to run as non-root 2022-01-16 11:13:27 -08:00
wh1te909
2a67218a34 fix lockfile version 2022-01-16 08:02:58 +00:00
wh1te909
911586ed0b update reqs 2022-01-16 07:47:59 +00:00
Dan
9d6a6620e3 Merge pull request #935 from silversword411/develop
docs - adding to how it all works
2022-01-15 23:44:32 -08:00
Joel DeTeves
598d0acd8e Fix incorrect ports on tactical-nginx container 2022-01-15 21:25:33 -08:00
Joel DeTeves
f16ece6207 Switch tactical-nats to run as non-root 2022-01-15 21:21:58 -08:00
Joel DeTeves
9b55bc9892 Switch tactical-nginx to nginx-unprivileged container 2022-01-15 20:20:53 -08:00
Joel DeTeves
707e67918b Switch tactical-frontend to nginx-unprivileged container 2022-01-15 19:12:04 -08:00
Joel DeTeves
faac572c30 Change tactical container uwsgi ports 2022-01-15 17:38:55 -08:00
silversword411
571b37695b docs - adding to how it all works 2022-01-15 13:17:16 -05:00
wh1te909
227adc459f update demo 2022-01-15 02:39:30 +00:00
wh1te909
2ee36f1a9c fix old version refresh needed banner not displaying 2022-01-14 07:51:09 +00:00
wh1te909
31830dc67d Release 0.11.3 2022-01-14 05:35:42 +00:00
wh1te909
d0ce2a46ac bump version 2022-01-14 05:33:07 +00:00
wh1te909
7e5bc4e1ce add back debug tab 2022-01-14 05:29:24 +00:00
wh1te909
d2b6d0a0ff make field required 2022-01-14 05:29:01 +00:00
wh1te909
542b0658b8 fix reboot now/later fixes #933 2022-01-13 23:07:25 +00:00
Dan
e73c7e19b5 Merge pull request #934 from iamkhris/develop
Add files via upload
2022-01-13 14:43:57 -08:00
Dan
6a32ed7d7b Merge pull request #932 from bbrendon/patch-3
probably a copy paste error
2022-01-13 14:43:04 -08:00
Christopher Phillips
a63001f17c Add files via upload
Sends Windows 10 Toast alert when password expiration reaches 7, 3, 2, and 1 days.  Works with both local and domain accounts.  Best to setup as a scheduled task, but can also be run manually.  On 1 day alert, an "Urgent" BurntToastLogo is downloaded and used instead of the regular logo to indicate importance.  These files are hosted on a site you have access to.
2022-01-13 11:42:26 -07:00
bbrendon
4d1ad9c832 probably a copy paste error 2022-01-13 08:06:59 -08:00
wh1te909
455bf53ba6 Release 0.11.2 2022-01-13 02:48:32 +00:00
wh1te909
454aa6ccda bump version 2022-01-13 02:48:22 +00:00
wh1te909
85ffebb3fa fix post update tasks for policy tasks 2022-01-13 02:47:34 +00:00
601 changed files with 6090 additions and 26466 deletions

View File

@@ -23,7 +23,7 @@ POSTGRES_USER=postgres
POSTGRES_PASS=postgrespass
# DEV SETTINGS
APP_PORT=80
APP_PORT=443
API_PORT=80
HTTP_PROTOCOL=https
DOCKER_NETWORK=172.21.0.0/24

View File

@@ -1,4 +1,11 @@
FROM python:3.9.9-slim
# pulls community scripts from git repo
FROM python:3.10-slim AS GET_SCRIPTS_STAGE
RUN apt-get update && \
apt-get install -y --no-install-recommends git && \
git clone https://github.com/amidaware/community-scripts.git /community-scripts
FROM python:3.10-slim
ENV TACTICAL_DIR /opt/tactical
ENV TACTICAL_READY_FILE ${TACTICAL_DIR}/tmp/tactical.ready
@@ -10,9 +17,15 @@ ENV PYTHONUNBUFFERED=1
EXPOSE 8000 8383 8005
RUN apt-get update && \
apt-get install -y build-essential
RUN groupadd -g 1000 tactical && \
useradd -u 1000 -g 1000 tactical
# copy community scripts
COPY --from=GET_SCRIPTS_STAGE /community-scripts /community-scripts
# Copy dev python reqs
COPY .devcontainer/requirements.txt /

View File

@@ -1,19 +0,0 @@
version: '3.4'
services:
api-dev:
image: api-dev
build:
context: .
dockerfile: ./api.dockerfile
command: ["sh", "-c", "pip install debugpy -t /tmp && python /tmp/debugpy --wait-for-client --listen 0.0.0.0:5678 manage.py runserver 0.0.0.0:8000 --nothreading --noreload"]
ports:
- 8000:8000
- 5678:5678
volumes:
- tactical-data-dev:/opt/tactical
- ..:/workspace:cached
networks:
dev:
aliases:
- tactical-backend

View File

@@ -5,6 +5,7 @@ services:
container_name: trmm-api-dev
image: api-dev
restart: always
user: 1000:1000
build:
context: ..
dockerfile: .devcontainer/api.dockerfile
@@ -23,10 +24,9 @@ services:
app-dev:
container_name: trmm-app-dev
image: node:14-alpine
image: node:16-alpine
restart: always
command: /bin/sh -c "npm install npm@latest -g && npm install && npm run serve
-- --host 0.0.0.0 --port ${APP_PORT}"
command: /bin/sh -c "npm install npm@latest -g && npm install && npm run serve -- --host 0.0.0.0 --port ${APP_PORT}"
working_dir: /workspace/web
volumes:
- ..:/workspace:cached
@@ -42,6 +42,7 @@ services:
container_name: trmm-nats-dev
image: ${IMAGE_REPO}tactical-nats:${VERSION}
restart: always
user: 1000:1000
environment:
API_HOST: ${API_HOST}
API_PORT: ${API_PORT}
@@ -62,6 +63,7 @@ services:
container_name: trmm-meshcentral-dev
image: ${IMAGE_REPO}tactical-meshcentral:${VERSION}
restart: always
user: 1000:1000
environment:
MESH_HOST: ${MESH_HOST}
MESH_USER: ${MESH_USER}
@@ -85,6 +87,7 @@ services:
container_name: trmm-mongodb-dev
image: mongo:4.4
restart: always
user: 1000:1000
environment:
MONGO_INITDB_ROOT_USERNAME: ${MONGODB_USER}
MONGO_INITDB_ROOT_PASSWORD: ${MONGODB_PASSWORD}
@@ -116,6 +119,7 @@ services:
redis-dev:
container_name: trmm-redis-dev
restart: always
user: 1000:1000
command: redis-server --appendonly yes
image: redis:6.0-alpine
volumes:
@@ -148,6 +152,9 @@ services:
- dev
volumes:
- tactical-data-dev:/opt/tactical
- mesh-data-dev:/meshcentral-data
- redis-data-dev:/redis/data
- mongo-dev-data:/mongo/data/db
- ..:/workspace:cached
# container for celery worker service
@@ -156,6 +163,7 @@ services:
image: api-dev
command: [ "tactical-celery-dev" ]
restart: always
user: 1000:1000
networks:
- dev
volumes:
@@ -171,6 +179,7 @@ services:
image: api-dev
command: [ "tactical-celerybeat-dev" ]
restart: always
user: 1000:1000
networks:
- dev
volumes:
@@ -186,6 +195,7 @@ services:
image: api-dev
command: [ "tactical-websockets-dev" ]
restart: always
user: 1000:1000
networks:
dev:
aliases:
@@ -202,6 +212,7 @@ services:
container_name: trmm-nginx-dev
image: ${IMAGE_REPO}tactical-nginx:${VERSION}
restart: always
user: 1000:1000
environment:
APP_HOST: ${APP_HOST}
API_HOST: ${API_HOST}
@@ -215,23 +226,11 @@ services:
dev:
ipv4_address: ${DOCKER_NGINX_IP}
ports:
- "80:80"
- "443:443"
- "80:8080"
- "443:4443"
volumes:
- tactical-data-dev:/opt/tactical
mkdocs-dev:
container_name: trmm-mkdocs-dev
image: api-dev
restart: always
command: [ "tactical-mkdocs-dev" ]
ports:
- "8005:8005"
volumes:
- ..:/workspace:cached
networks:
- dev
volumes:
tactical-data-dev: null
postgres-data-dev: null

View File

@@ -10,7 +10,7 @@ set -e
: "${POSTGRES_PASS:=tactical}"
: "${POSTGRES_DB:=tacticalrmm}"
: "${MESH_SERVICE:=tactical-meshcentral}"
: "${MESH_WS_URL:=ws://${MESH_SERVICE}:443}"
: "${MESH_WS_URL:=ws://${MESH_SERVICE}:4443}"
: "${MESH_USER:=meshcentral}"
: "${MESH_PASS:=meshcentralpass}"
: "${MESH_HOST:=tactical-meshcentral}"
@@ -41,7 +41,7 @@ function django_setup {
sleep 5
done
until (echo > /dev/tcp/"${MESH_SERVICE}"/443) &> /dev/null; do
until (echo > /dev/tcp/"${MESH_SERVICE}"/4443) &> /dev/null; do
echo "waiting for meshcentral container to be ready..."
sleep 5
done
@@ -63,7 +63,7 @@ DOCKER_BUILD = True
CERT_FILE = '${CERT_PUB_PATH}'
KEY_FILE = '${CERT_PRIV_PATH}'
SCRIPTS_DIR = '${WORKSPACE_DIR}/scripts'
SCRIPTS_DIR = '/community-scripts'
ALLOWED_HOSTS = ['${API_HOST}', '*']
@@ -103,7 +103,7 @@ EOF
"${VIRTUAL_ENV}"/bin/python manage.py reload_nats
"${VIRTUAL_ENV}"/bin/python manage.py create_natsapi_conf
"${VIRTUAL_ENV}"/bin/python manage.py create_installer_user
"${VIRTUAL_ENV}"/bin/python manage.py post_update_tasks
"${VIRTUAL_ENV}"/bin/python manage.py post_update_tasks
# create super user
@@ -117,8 +117,24 @@ if [ "$1" = 'tactical-init-dev' ]; then
test -f "${TACTICAL_READY_FILE}" && rm "${TACTICAL_READY_FILE}"
mkdir -p /meshcentral-data
mkdir -p ${TACTICAL_DIR}/tmp
mkdir -p ${TACTICAL_DIR}/certs
mkdir -p /mongo/data/db
mkdir -p /redis/data
touch /meshcentral-data/.initialized && chown -R 1000:1000 /meshcentral-data
touch ${TACTICAL_DIR}/tmp/.initialized && chown -R 1000:1000 ${TACTICAL_DIR}
touch ${TACTICAL_DIR}/certs/.initialized && chown -R 1000:1000 ${TACTICAL_DIR}/certs
touch /mongo/data/db/.initialized && chown -R 1000:1000 /mongo/data/db
touch /redis/data/.initialized && chown -R 1000:1000 /redis/data
mkdir -p ${TACTICAL_DIR}/api/tacticalrmm/private/exe
mkdir -p ${TACTICAL_DIR}/api/tacticalrmm/private/log
touch ${TACTICAL_DIR}/api/tacticalrmm/private/log/django_debug.log
# setup Python virtual env and install dependencies
! test -e "${VIRTUAL_ENV}" && python -m venv ${VIRTUAL_ENV}
"${VIRTUAL_ENV}"/bin/python -m pip install --upgrade pip
"${VIRTUAL_ENV}"/bin/pip install --no-cache-dir setuptools wheel
"${VIRTUAL_ENV}"/bin/pip install --no-cache-dir -r /requirements.txt
django_setup
@@ -161,8 +177,3 @@ if [ "$1" = 'tactical-websockets-dev' ]; then
check_tactical_ready
"${VIRTUAL_ENV}"/bin/daphne tacticalrmm.asgi:application --port 8383 -b 0.0.0.0
fi
if [ "$1" = 'tactical-mkdocs-dev' ]; then
cd "${WORKSPACE_DIR}/docs"
"${VIRTUAL_ENV}"/bin/mkdocs serve
fi

View File

@@ -1,39 +1,36 @@
# To ensure app dependencies are ported from your virtual environment/host machine into your container, run 'pip freeze > requirements.txt' in the terminal to overwrite this file
asyncio-nats-client
celery
channels
channels_redis
django-ipware
Django==3.2.10
django-cors-headers
django-rest-knox
djangorestframework
loguru
msgpack
psycopg2-binary
pycparser
pycryptodome
pyotp
pyparsing
pytz
qrcode
redis
twilio
packaging
validators
websockets
black
Werkzeug
django-extensions
coverage
coveralls
model_bakery
mkdocs
mkdocs-material
pymdown-extensions
Pygments
mypy
pysnooper
isort
drf_spectacular
pandas
asgiref==3.5.0
celery==5.2.3
channels==3.0.4
channels_redis==3.3.1
daphne==3.0.2
Django==3.2.12
django-cors-headers==3.11.0
django-ipware==4.0.2
django-rest-knox==4.2.0
djangorestframework==3.13.1
future==0.18.2
msgpack==1.0.3
nats-py==2.0.0
packaging==21.3
psycopg2-binary==2.9.3
pycryptodome==3.14.1
pyotp==2.6.0
pytz==2021.3
qrcode==7.3.1
redis==4.1.3
requests==2.27.1
twilio==7.6.0
urllib3==1.26.8
validators==0.18.2
websockets==10.1
drf_spectacular==0.21.2
# dev
black==22.1.0
Werkzeug==2.0.2
django-extensions==3.1.5
Pygments==2.11.2
isort==5.10.1
mypy==0.931
types-pytz==2021.3.4

66
.github/workflows/ci-tests.yml vendored Normal file
View File

@@ -0,0 +1,66 @@
name: Tests CI
on:
push:
branches:
- "*"
pull_request:
branches:
- "*"
jobs:
test:
runs-on: self-hosted
steps:
- uses: actions/checkout@v2
- name: Setup virtual env and install requirements
run: |
sudo -u postgres psql -c 'DROP DATABASE IF EXISTS pipeline'
sudo -u postgres psql -c 'DROP DATABASE IF EXISTS test_pipeline'
sudo -u postgres psql -c 'CREATE DATABASE pipeline'
sudo -u postgres psql -c "SET client_encoding = 'UTF8'" pipeline
pwd
rm -rf /actions-runner/_work/trmm-actions/trmm-actions/api/env
cd api
python3.10 -m venv env
source env/bin/activate
cd tacticalrmm
python --version
SETTINGS_FILE="tacticalrmm/settings.py"
SETUPTOOLS_VER=$(grep "^SETUPTOOLS_VER" "$SETTINGS_FILE" | awk -F'[= "]' '{print $5}')
WHEEL_VER=$(grep "^WHEEL_VER" "$SETTINGS_FILE" | awk -F'[= "]' '{print $5}')
pip install --upgrade pip
pip install setuptools==${SETUPTOOLS_VER} wheel==${WHEEL_VER}
pip install -r requirements.txt -r requirements-test.txt
- name: Run django tests
env:
GHACTIONS: "yes"
run: |
cd api/tacticalrmm
source ../env/bin/activate
rm -f .coverage coverage.lcov
coverage run --concurrency=multiprocessing manage.py test -v 2 --parallel
coverage combine
coverage lcov
if [ $? -ne 0 ]; then
exit 1
fi
- name: Codestyle black
run: |
cd api
source env/bin/activate
black --exclude migrations/ --check tacticalrmm
if [ $? -ne 0 ]; then
exit 1
fi
- name: Coveralls
uses: coverallsapp/github-action@master
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
path-to-lcov: ./api/tacticalrmm/coverage.lcov
base-path: ./api/tacticalrmm

View File

@@ -1,22 +0,0 @@
name: Deploy Docs
on:
push:
branches:
- master
defaults:
run:
working-directory: docs
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.x
- run: pip install --upgrade pip
- run: pip install --upgrade setuptools wheel
- run: pip install mkdocs mkdocs-material pymdown-extensions
- run: mkdocs gh-deploy --force

3
.gitignore vendored
View File

@@ -50,4 +50,5 @@ docs/site/
reset_db.sh
run_go_cmd.py
nats-api.conf
ignore/
coverage.lcov

View File

@@ -1,5 +1,5 @@
{
"python.pythonPath": "api/tacticalrmm/env/bin/python",
"python.defaultInterpreterPath": "api/tacticalrmm/env/bin/python",
"python.languageServer": "Pylance",
"python.analysis.extraPaths": [
"api/tacticalrmm",
@@ -9,8 +9,6 @@
"reportUnusedImport": "error",
"reportDuplicateImport": "error",
},
"python.analysis.memory.keepLibraryAst": true,
"python.linting.mypyEnabled": true,
"python.analysis.typeCheckingMode": "basic",
"python.formatting.provider": "black",
"editor.formatOnSave": true,

21
LICENSE
View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2019-present wh1te909
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

74
LICENSE.md Normal file
View File

@@ -0,0 +1,74 @@
### Tactical RMM License Version 1.0
Text of license:&emsp;&emsp;&emsp;Copyright © 2022 AmidaWare LLC. All rights reserved.<br>
&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&nbsp;Amending the text of this license is not permitted.
Trade Mark:&emsp;&emsp;&emsp;&emsp;"Tactical RMM" is a trade mark of AmidaWare LLC.
Licensor:&emsp;&emsp;&emsp;&emsp;&emsp;&nbsp;&nbsp;AmidaWare LLC of 1968 S Coast Hwy PMB 3847 Laguna Beach, CA, USA.
Licensed Software:&emsp;&nbsp;The software known as Tactical RMM Version v0.12.0 (and all subsequent releases and versions) and the Tactical RMM Agent v2.0.0 (and all subsequent releases and versions).
### 1. Preamble
The Licensed Software is designed to facilitate the remote monitoring and management (RMM) of networks, systems, servers, computers and other devices. The Licensed Software is made available primarily for use by organisations and managed service providers for monitoring and management purposes.
The Tactical RMM License is not an open-source software license. This license contains certain restrictions on the use of the Licensed Software. For example the functionality of the Licensed Software may not be made available as part of a SaaS (Software-as-a-Service) service or product to provide a commercial or for-profit service without the express prior permission of the Licensor.
### 2. License Grant
Permission is hereby granted, free of charge, on a non-exclusive basis, to copy, modify, create derivative works and use the Licensed Software in source and binary forms subject to the following terms and conditions. No additional rights will be implied under this license.
* The hosting and use of the Licensed Software to monitor and manage in-house networks/systems and/or customer networks/systems is permitted.
This license does not allow the functionality of the Licensed Software (whether in whole or in part) or a modified version of the Licensed Software or a derivative work to be used or otherwise made available as part of any other commercial or for-profit service, including, without limitation, any of the following:
* a service allowing third parties to interact remotely through a computer network;
* as part of a SaaS service or product;
* as part of the provision of a managed hosting service or product;
* the offering of installation and/or configuration services;
* the offer for sale, distribution or sale of any service or product (whether or not branded as Tactical RMM).
The prior written approval of AmidaWare LLC must be obtained for all commercial use and/or for-profit service use of the (i) Licensed Software (whether in whole or in part), (ii) a modified version of the Licensed Software and/or (iii) a derivative work.
The terms of this license apply to all copies of the Licensed Software (including modified versions) and derivative works.
All use of the Licensed Software must immediately cease if use breaches the terms of this license.
### 3. Derivative Works
If a derivative work is created which is based on or otherwise incorporates all or any part of the Licensed Software, and the derivative work is made available to any other person, the complete corresponding machine readable source code (including all changes made to the Licensed Software) must accompany the derivative work and be made publicly available online.
### 4. Copyright Notice
The following copyright notice shall be included in all copies of the Licensed Software:
&emsp;&emsp;&emsp;Copyright © 2022 AmidaWare LLC.
&emsp;&emsp;&emsp;Licensed under the Tactical RMM License Version 1.0 (the “License”).<br>
&emsp;&emsp;&emsp;You may only use the Licensed Software in accordance with the License.<br>
&emsp;&emsp;&emsp;A copy of the License is available at: https://license.tacticalrmm.com
### 5. Disclaimer of Warranty
THE LICENSED SOFTWARE IS PROVIDED "AS IS". TO THE FULLEST EXTENT PERMISSIBLE AT LAW ALL CONDITIONS, WARRANTIES OR OTHER TERMS OF ANY KIND WHICH MIGHT HAVE EFFECT OR BE IMPLIED OR INCORPORATED, WHETHER BY STATUTE, COMMON LAW OR OTHERWISE ARE HEREBY EXCLUDED, INCLUDING THE CONDITIONS, WARRANTIES OR OTHER TERMS AS TO SATISFACTORY QUALITY AND/OR MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, THE USE OF REASONABLE SKILL AND CARE AND NON-INFRINGEMENT.
### 6. Limits of Liability
THE FOLLOWING EXCLUSIONS SHALL APPLY TO THE FULLEST EXTENT PERMISSIBLE AT LAW. NEITHER THE AUTHORS NOR THE COPYRIGHT HOLDERS SHALL IN ANY CIRCUMSTANCES HAVE ANY LIABILITY FOR ANY CLAIM, LOSSES, DAMAGES OR OTHER LIABILITY, WHETHER THE SAME ARE SUFFERED DIRECTLY OR INDIRECTLY OR ARE IMMEDIATE OR CONSEQUENTIAL, AND WHETHER THE SAME ARISE IN CONTRACT, TORT OR DELICT (INCLUDING NEGLIGENCE) OR OTHERWISE HOWSOEVER ARISING FROM, OUT OF OR IN CONNECTION WITH THE LICENSED SOFTWARE OR THE USE OR INABILITY TO USE THE LICENSED SOFTWARE OR OTHER DEALINGS IN THE LICENSED SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH LOSS OR DAMAGE. THE FOREGOING EXCLUSIONS SHALL INCLUDE, WITHOUT LIMITATION, LIABILITY FOR ANY LOSSES OR DAMAGES WHICH FALL WITHIN ANY OF THE FOLLOWING CATEGORIES: SPECIAL, EXEMPLARY, OR INCIDENTAL LOSS OR DAMAGE, LOSS OF PROFITS, LOSS OF ANTICIPATED SAVINGS, LOSS OF BUSINESS OPPORTUNITY, LOSS OF GOODWILL, AND LOSS OR CORRUPTION OF DATA.
### 7. Termination
This license shall terminate with immediate effect if there is a material breach of any of its terms.
### 8. No partnership, agency or joint venture
Nothing in this license agreement is intended to, or shall be deemed to, establish any partnership or joint venture or any relationship of agency between AmidaWare LLC and any other person.
### 9. No endorsement
The names of the authors and/or the copyright holders must not be used to promote or endorse any products or services which are in any way derived from the Licensed Software without prior written consent.
### 10. Trademarks
No permission is granted to use the trademark “Tactical RMM” or any other trade name, trademark, service mark or product name of AmidaWare LLC except to the extent necessary to comply with the notice requirements in Section 4 (Copyright Notice).
### 11. Entire agreement
This license contains the whole agreement relating to its subject matter.
### 12. Severance
If any provision or part-provision of this license is or becomes invalid, illegal or unenforceable, it shall be deemed deleted, but that shall not affect the validity and enforceability of the rest of this license.
### 13. Acceptance of these terms
The terms and conditions of this license are accepted by copying, downloading, installing, redistributing, or otherwise using the Licensed Software.

View File

@@ -1,19 +1,18 @@
# Tactical RMM
[![Build Status](https://dev.azure.com/dcparsi/Tactical%20RMM/_apis/build/status/wh1te909.tacticalrmm?branchName=develop)](https://dev.azure.com/dcparsi/Tactical%20RMM/_build/latest?definitionId=4&branchName=develop)
[![Coverage Status](https://coveralls.io/repos/github/wh1te909/tacticalrmm/badge.png?branch=develop&kill_cache=1)](https://coveralls.io/github/wh1te909/tacticalrmm?branch=develop)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
![CI Tests](https://github.com/amidaware/tacticalrmm/actions/workflows/ci-tests.yml/badge.svg?branch=develop)
[![Coverage Status](https://coveralls.io/repos/github/amidaware/tacticalrmm/badge.svg?branch=develop)](https://coveralls.io/github/amidaware/tacticalrmm?branch=develop)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/python/black)
Tactical RMM is a remote monitoring & management tool for Windows computers, built with Django and Vue.\
It uses an [agent](https://github.com/wh1te909/rmmagent) written in golang and integrates with [MeshCentral](https://github.com/Ylianst/MeshCentral)
Tactical RMM is a remote monitoring & management tool, built with Django and Vue.\
It uses an [agent](https://github.com/amidaware/rmmagent) written in golang and integrates with [MeshCentral](https://github.com/Ylianst/MeshCentral)
# [LIVE DEMO](https://rmm.tacticalrmm.io/)
Demo database resets every hour. A lot of features are disabled for obvious reasons due to the nature of this app.
### [Discord Chat](https://discord.gg/upGTkWp)
### [Documentation](https://wh1te909.github.io/tacticalrmm/)
### [Documentation](https://docs.tacticalrmm.com)
## Features
@@ -29,10 +28,13 @@ Demo database resets every hour. A lot of features are disabled for obvious reas
- Remote software installation via chocolatey
- Software and hardware inventory
## Windows versions supported
## Windows agent versions supported
- Windows 7, 8.1, 10, Server 2008R2, 2012R2, 2016, 2019
- Windows 7, 8.1, 10, 11, Server 2008R2, 2012R2, 2016, 2019, 2022
## Linux agent versions supported
- Any distro with systemd
## Installation / Backup / Restore / Usage
### Refer to the [documentation](https://wh1te909.github.io/tacticalrmm/)
### Refer to the [documentation](https://docs.tacticalrmm.com)

View File

@@ -2,18 +2,11 @@
## Supported Versions
Use this section to tell people about which versions of your project are
currently being supported with security updates.
| Version | Supported |
| ------- | ------------------ |
| 0.10.4 | :white_check_mark: |
| < 0.10.4| :x: |
| 0.12.2 | :white_check_mark: |
| < 0.12.2 | :x: |
## Reporting a Vulnerability
Use this section to tell people how to report a vulnerability.
Tell them where to go, how often they can expect to get an update on a
reported vulnerability, what to expect if the vulnerability is accepted or
declined, etc.
https://docs.tacticalrmm.com/security

View File

@@ -1,7 +1,7 @@
from django.contrib import admin
from rest_framework.authtoken.admin import TokenAdmin
from .models import User, Role
from .models import Role, User
admin.site.register(User)
TokenAdmin.raw_id_fields = ("user",)

View File

@@ -1,14 +1,16 @@
import uuid
from django.core.management.base import BaseCommand
from accounts.models import User
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Creates the installer user"
def handle(self, *args, **kwargs):
self.stdout.write("Checking if installer user has been created...")
if User.objects.filter(is_installer_user=True).exists():
self.stdout.write("Installer user already exists")
return
User.objects.create_user( # type: ignore
@@ -17,3 +19,4 @@ class Command(BaseCommand):
password=User.objects.make_random_password(60), # type: ignore
block_dashboard_login=True,
)
self.stdout.write("Installer user has been created")

View File

@@ -1,9 +1,8 @@
import subprocess
import pyotp
from django.core.management.base import BaseCommand
from accounts.models import User
from django.core.management.base import BaseCommand
class Command(BaseCommand):

View File

@@ -2,9 +2,8 @@ import os
import subprocess
import pyotp
from django.core.management.base import BaseCommand
from accounts.models import User
from django.core.management.base import BaseCommand
class Command(BaseCommand):

View File

@@ -1,5 +1,5 @@
from django.core.management.base import BaseCommand
from accounts.models import User
from django.core.management.base import BaseCommand
class Command(BaseCommand):

View File

@@ -1,7 +1,7 @@
# Generated by Django 3.2.1 on 2021-05-11 02:33
from django.db import migrations, models
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):

View File

@@ -1,7 +1,7 @@
# Generated by Django 3.2.6 on 2021-09-03 00:54
from django.db import migrations, models
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):

View File

@@ -1,7 +1,7 @@
# Generated by Django 3.2.6 on 2021-10-10 02:49
from django.db import migrations, models
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):

View File

@@ -1,7 +1,6 @@
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.db.models.fields import CharField, DateTimeField
from logs.models import BaseAuditModel
AGENT_DBLCLICK_CHOICES = [

View File

@@ -1,11 +1,11 @@
import pyotp
from rest_framework.serializers import (
ModelSerializer,
SerializerMethodField,
ReadOnlyField,
SerializerMethodField,
)
from .models import APIKey, User, Role
from .models import APIKey, Role, User
class UserUISerializer(ModelSerializer):

View File

@@ -1,11 +1,11 @@
from unittest.mock import patch
from accounts.models import APIKey, User
from accounts.serializers import APIKeySerializer
from django.test import override_settings
from model_bakery import baker, seq
from accounts.models import User, APIKey
from tacticalrmm.test import TacticalTestCase
from accounts.serializers import APIKeySerializer
from tacticalrmm.test import TacticalTestCase
class TestAccounts(TacticalTestCase):

View File

@@ -10,10 +10,11 @@ from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from tacticalrmm.utils import notify_error
from .models import APIKey, Role, User
from .permissions import APIKeyPerms, AccountsPerms, RolesPerms
from .permissions import AccountsPerms, APIKeyPerms, RolesPerms
from .serializers import (
APIKeySerializer,
RoleSerializer,

View File

@@ -1,9 +1,8 @@
from django.contrib import admin
from .models import Agent, AgentCustomField, Note, RecoveryAction, AgentHistory
from .models import Agent, AgentCustomField, AgentHistory, Note
admin.site.register(Agent)
admin.site.register(RecoveryAction)
admin.site.register(Note)
admin.site.register(AgentCustomField)
admin.site.register(AgentHistory)

View File

@@ -32,6 +32,7 @@ agent = Recipe(
monitoring_type=cycle(["workstation", "server"]),
agent_id=seq(generate_agent_id("DESKTOP-TEST123")),
last_seen=djangotime.now() - djangotime.timedelta(days=5),
plat="windows",
)
server_agent = agent.extend(

View File

@@ -1,7 +1,6 @@
from django.core.management.base import BaseCommand
from agents.models import Agent
from clients.models import Client, Site
from django.core.management.base import BaseCommand
class Command(BaseCommand):

View File

@@ -1,11 +1,12 @@
import asyncio
from agents.models import Agent
from django.core.management.base import BaseCommand
from django.utils import timezone as djangotime
from packaging import version as pyver
from agents.models import Agent
from tacticalrmm.utils import AGENT_DEFER, reload_nats
from tacticalrmm.constants import AGENT_DEFER
from tacticalrmm.utils import reload_nats
class Command(BaseCommand):

View File

@@ -1,9 +1,10 @@
# import datetime as dt
import random
from agents.models import Agent
from core.tasks import cache_db_fields_task
from django.core.management.base import BaseCommand
from django.utils import timezone as djangotime
from agents.models import Agent
class Command(BaseCommand):
@@ -34,3 +35,5 @@ class Command(BaseCommand):
for agent in agents:
agent.last_seen = random.choice(random_dates)
agent.save(update_fields=["last_seen"])
cache_db_fields_task()

View File

@@ -1,29 +1,29 @@
import datetime as dt
import json
import random
import string
import datetime as dt
from django.core.management.base import BaseCommand
from django.utils import timezone as djangotime
from django.conf import settings
from accounts.models import User
from agents.models import Agent, AgentHistory
from automation.models import Policy
from autotasks.models import AutomatedTask
from checks.models import Check, CheckHistory
from clients.models import Client, Site
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.utils import timezone as djangotime
from logs.models import AuditLog, PendingAction
from scripts.models import Script
from software.models import InstalledSoftware
from winupdate.models import WinUpdate, WinUpdatePolicy
from checks.models import Check, CheckHistory
from scripts.models import Script
from autotasks.models import AutomatedTask
from automation.models import Policy
from logs.models import PendingAction, AuditLog
from tacticalrmm.demo_data import (
disks,
temp_dir_stdout,
spooler_stdout,
ping_fail_output,
ping_success_output,
spooler_stdout,
temp_dir_stdout,
)
AGENTS_TO_GENERATE = 250
@@ -63,7 +63,7 @@ class Command(BaseCommand):
AuditLog.objects.all().delete()
PendingAction.objects.all().delete()
Script.load_community_scripts()
call_command("load_community_scripts")
# policies
check_policy = Policy()
@@ -167,7 +167,6 @@ class Command(BaseCommand):
public_ips = ["65.234.22.4", "74.123.43.5", "44.21.134.45"]
total_rams = [4, 8, 16, 32, 64, 128]
used_rams = [10, 13, 60, 25, 76, 34, 56, 34, 39]
now = dt.datetime.now()
@@ -284,7 +283,6 @@ class Command(BaseCommand):
agent.hostname = random.choice(hostnames)
agent.version = settings.LATEST_AGENT_VER
agent.salt_ver = "1.1.0"
agent.site = Site.objects.get(name=site)
agent.agent_id = self.rand_string(25)
agent.description = random.choice(descriptions)
@@ -294,10 +292,8 @@ class Command(BaseCommand):
agent.plat = "windows"
agent.plat_release = "windows-2019Server"
agent.total_ram = random.choice(total_rams)
agent.used_ram = random.choice(used_rams)
agent.boot_time = random.choice(boot_times)
agent.logged_in_username = random.choice(user_names)
agent.antivirus = "windowsdefender"
agent.mesh_node_id = (
"3UiLhe420@kaVQ0rswzBeonW$WY0xrFFUDBQlcYdXoriLXzvPmBpMrV99vRHXFlb"
)
@@ -307,7 +303,6 @@ class Command(BaseCommand):
agent.wmi_detail = random.choice(wmi_details)
agent.services = services
agent.disks = random.choice(disks)
agent.salt_id = "not-used"
agent.save()
@@ -328,9 +323,7 @@ class Command(BaseCommand):
agent=agent,
guid=i,
kb=windows_updates[i]["KBs"][0],
mandatory=windows_updates[i]["Mandatory"],
title=windows_updates[i]["Title"],
needs_reboot=windows_updates[i]["NeedsReboot"],
installed=windows_updates[i]["Installed"],
downloaded=windows_updates[i]["Downloaded"],
description=windows_updates[i]["Description"],
@@ -510,7 +503,16 @@ class Command(BaseCommand):
nla_task = AutomatedTask()
nla_task.agent = agent
nla_task.script = restart_nla
actions = [
{
"name": restart_nla.name,
"type": "script",
"script": restart_nla.pk,
"timeout": 90,
"script_args": [],
}
]
nla_task.actions = actions
nla_task.assigned_check = check6
nla_task.name = "Restart NLA"
nla_task.task_type = "checkfailure"
@@ -524,11 +526,27 @@ class Command(BaseCommand):
spool_task = AutomatedTask()
spool_task.agent = agent
spool_task.script = clear_spool
actions = [
{
"name": clear_spool.name,
"type": "script",
"script": clear_spool.pk,
"timeout": 90,
"script_args": [],
}
]
spool_task.actions = actions
spool_task.name = "Clear the print spooler"
spool_task.task_type = "scheduled"
spool_task.run_time_bit_weekdays = 127
spool_task.run_time_minute = "04:45"
spool_task.task_type = "daily"
spool_task.run_time_date = djangotime.now() + djangotime.timedelta(
minutes=10
)
spool_task.expire_date = djangotime.now() + djangotime.timedelta(days=753)
spool_task.daily_interval = 1
spool_task.weekly_interval = 1
spool_task.task_repetition_duration = "2h"
spool_task.task_repetition_interval = "25m"
spool_task.random_task_delay = "3m"
spool_task.win_task_name = "demospool123"
spool_task.last_run = djangotime.now()
spool_task.retcode = 0
@@ -539,7 +557,16 @@ class Command(BaseCommand):
tmp_dir_task = AutomatedTask()
tmp_dir_task.agent = agent
tmp_dir_task.name = "show temp dir files"
tmp_dir_task.script = show_tmp_dir_script
actions = [
{
"name": show_tmp_dir_script.name,
"type": "script",
"script": show_tmp_dir_script.pk,
"timeout": 90,
"script_args": [],
}
]
tmp_dir_task.actions = actions
tmp_dir_task.task_type = "manual"
tmp_dir_task.win_task_name = "demotemp"
tmp_dir_task.last_run = djangotime.now()
@@ -665,4 +692,5 @@ class Command(BaseCommand):
self.stdout.write(self.style.SUCCESS(f"Added agent # {count_agents + 1}"))
call_command("load_demo_scripts")
self.stdout.write("done")

View File

@@ -1,16 +0,0 @@
from django.core.management.base import BaseCommand
from agents.models import Agent
class Command(BaseCommand):
help = "Changes existing agents salt_id from a property to a model field"
def handle(self, *args, **kwargs):
agents = Agent.objects.filter(salt_id=None)
for agent in agents:
self.stdout.write(
self.style.SUCCESS(f"Setting salt_id on {agent.hostname}")
)
agent.salt_id = f"{agent.hostname}-{agent.pk}"
agent.save(update_fields=["salt_id"])

View File

@@ -1,8 +1,7 @@
from agents.models import Agent
from django.conf import settings
from django.core.management.base import BaseCommand
from agents.models import Agent
class Command(BaseCommand):
help = "Shows online agents that are not on the latest version"

View File

@@ -1,11 +1,11 @@
from agents.models import Agent
from agents.tasks import send_agent_update_task
from core.models import CoreSettings
from django.conf import settings
from django.core.management.base import BaseCommand
from packaging import version as pyver
from agents.models import Agent
from core.models import CoreSettings
from agents.tasks import send_agent_update_task
from tacticalrmm.utils import AGENT_DEFER
from tacticalrmm.constants import AGENT_DEFER
class Command(BaseCommand):

View File

@@ -1,7 +1,7 @@
# Generated by Django 3.2.1 on 2021-07-06 02:01
from django.db import migrations, models
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):

View File

@@ -1,7 +1,7 @@
# Generated by Django 3.2.5 on 2021-07-14 07:38
from django.db import migrations, models
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):

View File

@@ -0,0 +1,25 @@
# Generated by Django 3.2.12 on 2022-02-27 05:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agents', '0042_alter_agent_time_zone'),
]
operations = [
migrations.RemoveField(
model_name='agent',
name='antivirus',
),
migrations.RemoveField(
model_name='agent',
name='local_ip',
),
migrations.RemoveField(
model_name='agent',
name='used_ram',
),
]

View File

@@ -0,0 +1,22 @@
# Generated by Django 3.2.12 on 2022-02-27 07:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agents', '0043_auto_20220227_0554'),
]
operations = [
migrations.RenameField(
model_name='agent',
old_name='salt_id',
new_name='goarch',
),
migrations.RemoveField(
model_name='agent',
name='salt_ver',
),
]

View File

@@ -0,0 +1,16 @@
# Generated by Django 3.2.12 on 2022-03-12 02:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agents', '0044_auto_20220227_0717'),
]
operations = [
migrations.DeleteModel(
name='RecoveryAction',
),
]

View File

@@ -0,0 +1,18 @@
# Generated by Django 3.2.12 on 2022-03-17 17:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agents', '0045_delete_recoveryaction'),
]
operations = [
migrations.AlterField(
model_name='agenthistory',
name='command',
field=models.TextField(blank=True, default='', null=True),
),
]

View File

@@ -1,26 +1,21 @@
import asyncio
import base64
import re
import time
from collections import Counter
from distutils.version import LooseVersion
from typing import Any
import msgpack
import nats
import validators
from Crypto.Cipher import AES
from Crypto.Hash import SHA3_384
from Crypto.Random import get_random_bytes
from Crypto.Util.Padding import pad
from asgiref.sync import sync_to_async
from core.models import TZ_CHOICES, CoreSettings
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.utils import timezone as djangotime
from nats.aio.client import Client as NATS
from nats.aio.errors import ErrTimeout
from core.models import TZ_CHOICES, CoreSettings
from logs.models import BaseAuditModel, DebugLog
from nats.errors import TimeoutError
from tacticalrmm.models import PermissionQuerySet
@@ -28,24 +23,20 @@ class Agent(BaseAuditModel):
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
salt_ver = models.CharField(default="1.0.3", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat = models.CharField(max_length=255, null=True, blank=True)
goarch = models.CharField(max_length=255, null=True, blank=True)
plat_release = models.CharField(max_length=255, null=True, blank=True)
hostname = models.CharField(max_length=255)
salt_id = models.CharField(null=True, blank=True, max_length=255)
local_ip = models.TextField(null=True, blank=True) # deprecated
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
used_ram = models.IntegerField(null=True, blank=True) # deprecated
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
antivirus = models.CharField(default="n/a", max_length=255) # deprecated
monitoring_type = models.CharField(max_length=30)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
@@ -89,8 +80,6 @@ class Agent(BaseAuditModel):
)
def save(self, *args, **kwargs):
from automation.tasks import generate_agent_checks_task
# get old agent if exists
old_agent = Agent.objects.get(pk=self.pk) if self.pk else None
super(Agent, self).save(old_model=old_agent, *args, **kwargs)
@@ -106,6 +95,8 @@ class Agent(BaseAuditModel):
or (old_agent.monitoring_type != self.monitoring_type)
or (old_agent.block_policy_inheritance != self.block_policy_inheritance)
):
from automation.tasks import generate_agent_checks_task
generate_agent_checks_task.delay(agents=[self.pk], create_tasks=True)
def __str__(self):
@@ -125,8 +116,15 @@ class Agent(BaseAuditModel):
return CoreSettings.objects.first().default_time_zone # type: ignore
@property
def is_posix(self):
return self.plat == "linux" or self.plat == "darwin"
@property
def arch(self):
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
@@ -194,6 +192,12 @@ class Agent(BaseAuditModel):
@property
def cpu_model(self):
if self.is_posix:
try:
return self.wmi_detail["cpus"]
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
@@ -205,6 +209,15 @@ class Agent(BaseAuditModel):
@property
def graphics(self):
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
@@ -226,6 +239,12 @@ class Agent(BaseAuditModel):
@property
def local_ips(self):
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
@@ -252,6 +271,12 @@ class Agent(BaseAuditModel):
@property
def make_model(self):
if self.is_posix:
try:
return self.wmi_detail["make_model"]
except:
return "error getting make/model"
try:
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
@@ -282,6 +307,12 @@ class Agent(BaseAuditModel):
@property
def physical_disks(self):
if self.is_posix:
try:
return self.wmi_detail["disks"]
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
@@ -303,6 +334,37 @@ class Agent(BaseAuditModel):
except:
return ["unknown disk"]
def is_supported_script(self, platforms: list) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_agent_policies(self):
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
CoreSettings.objects.first(), f"{self.monitoring_type}_policy", None
)
return {
"agent_policy": self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None,
"site_policy": site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None,
"client_policy": client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None,
"default_policy": default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None,
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
@@ -419,86 +481,20 @@ class Agent(BaseAuditModel):
def get_patch_policy(self):
# check if site has a patch policy and if so use it
site = self.site
core_settings = CoreSettings.objects.first()
patch_policy = None
agent_policy = self.winupdatepolicy.get() # type: ignore
agent_policy = self.winupdatepolicy.first() # type: ignore
if self.monitoring_type == "server":
# check agent policy first which should override client or site policy
if self.policy and self.policy.winupdatepolicy.exists():
patch_policy = self.policy.winupdatepolicy.get()
policies = self.get_agent_policies()
# check site policy if agent policy doesn't have one
elif site.server_policy and site.server_policy.winupdatepolicy.exists():
# make sure agent isn;t blocking policy inheritance
if not self.block_policy_inheritance:
patch_policy = site.server_policy.winupdatepolicy.get()
# if site doesn't have a patch policy check the client
elif (
site.client.server_policy
and site.client.server_policy.winupdatepolicy.exists()
processed_policies = list()
for _, policy in policies.items():
if (
policy
and policy.active
and policy.pk not in processed_policies
and policy.winupdatepolicy.exists()
):
# make sure agent and site are not blocking inheritance
if (
not self.block_policy_inheritance
and not site.block_policy_inheritance
):
patch_policy = site.client.server_policy.winupdatepolicy.get()
# if patch policy still doesn't exist check default policy
elif (
core_settings.server_policy # type: ignore
and core_settings.server_policy.winupdatepolicy.exists() # type: ignore
):
# make sure agent site and client are not blocking inheritance
if (
not self.block_policy_inheritance
and not site.block_policy_inheritance
and not site.client.block_policy_inheritance
):
patch_policy = core_settings.server_policy.winupdatepolicy.get() # type: ignore
elif self.monitoring_type == "workstation":
# check agent policy first which should override client or site policy
if self.policy and self.policy.winupdatepolicy.exists():
patch_policy = self.policy.winupdatepolicy.get()
elif (
site.workstation_policy
and site.workstation_policy.winupdatepolicy.exists()
):
# make sure agent isn;t blocking policy inheritance
if not self.block_policy_inheritance:
patch_policy = site.workstation_policy.winupdatepolicy.get()
# if site doesn't have a patch policy check the client
elif (
site.client.workstation_policy
and site.client.workstation_policy.winupdatepolicy.exists()
):
# make sure agent and site are not blocking inheritance
if (
not self.block_policy_inheritance
and not site.block_policy_inheritance
):
patch_policy = site.client.workstation_policy.winupdatepolicy.get()
# if patch policy still doesn't exist check default policy
elif (
core_settings.workstation_policy # type: ignore
and core_settings.workstation_policy.winupdatepolicy.exists() # type: ignore
):
# make sure agent site and client are not blocking inheritance
if (
not self.block_policy_inheritance
and not site.block_policy_inheritance
and not site.client.block_policy_inheritance
):
patch_policy = (
core_settings.workstation_policy.winupdatepolicy.get() # type: ignore
)
patch_policy = policy.winupdatepolicy.first()
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
@@ -545,137 +541,55 @@ class Agent(BaseAuditModel):
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self):
site = self.site
client = self.client
core = CoreSettings.objects.first()
policies = self.get_agent_policies()
templates = list()
# check if alert template is on a policy assigned to agent
if (
self.policy
and self.policy.alert_template
and self.policy.alert_template.is_active
):
templates.append(self.policy.alert_template)
# check if policy with alert template is assigned to the site
if (
self.monitoring_type == "server"
and site.server_policy
and site.server_policy.alert_template
and site.server_policy.alert_template.is_active
and not self.block_policy_inheritance
):
templates.append(site.server_policy.alert_template)
if (
self.monitoring_type == "workstation"
and site.workstation_policy
and site.workstation_policy.alert_template
and site.workstation_policy.alert_template.is_active
and not self.block_policy_inheritance
):
templates.append(site.workstation_policy.alert_template)
# check if alert template is assigned to site
if site.alert_template and site.alert_template.is_active:
templates.append(site.alert_template)
# check if policy with alert template is assigned to the client
if (
self.monitoring_type == "server"
and client.server_policy
and client.server_policy.alert_template
and client.server_policy.alert_template.is_active
and not self.block_policy_inheritance
and not site.block_policy_inheritance
):
templates.append(client.server_policy.alert_template)
if (
self.monitoring_type == "workstation"
and client.workstation_policy
and client.workstation_policy.alert_template
and client.workstation_policy.alert_template.is_active
and not self.block_policy_inheritance
and not site.block_policy_inheritance
):
templates.append(client.workstation_policy.alert_template)
# check if alert template is on client and return
if (
client.alert_template
and client.alert_template.is_active
and not self.block_policy_inheritance
and not site.block_policy_inheritance
):
templates.append(client.alert_template)
# check if alert template is applied globally and return
if (
core.alert_template # type: ignore
and core.alert_template.is_active # type: ignore
and not self.block_policy_inheritance
and not site.block_policy_inheritance
and not client.block_policy_inheritance
):
templates.append(core.alert_template) # type: ignore
# if agent is a workstation, check if policy with alert template is assigned to the site, client, or core
if (
self.monitoring_type == "server"
and core.server_policy # type: ignore
and core.server_policy.alert_template # type: ignore
and core.server_policy.alert_template.is_active # type: ignore
and not self.block_policy_inheritance
and not site.block_policy_inheritance
and not client.block_policy_inheritance
):
templates.append(core.server_policy.alert_template) # type: ignore
if (
self.monitoring_type == "workstation"
and core.workstation_policy # type: ignore
and core.workstation_policy.alert_template # type: ignore
and core.workstation_policy.alert_template.is_active # type: ignore
and not self.block_policy_inheritance
and not site.block_policy_inheritance
and not client.block_policy_inheritance
):
templates.append(core.workstation_policy.alert_template) # type: ignore
# go through the templates and return the first one that isn't excluded
for template in templates:
# check if client, site, or agent has been excluded from template
# loop through all policies applied to agent and return an alert_template if found
processed_policies = list()
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
client.pk
in template.excluded_clients.all().values_list("pk", flat=True)
or site.pk in template.excluded_sites.all().values_list("pk", flat=True)
or self.pk
in template.excluded_agents.all()
.only("pk")
.values_list("pk", flat=True)
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
continue
# check if template is excluding desktops
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
self.monitoring_type == "workstation" and template.exclude_workstations
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
continue
# check if template is excluding servers
elif self.monitoring_type == "server" and template.exclude_servers:
continue
else:
# save alert_template to agent cache field
self.alert_template = template
self.save()
return template
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save()
self.save(update_fields=["alert_template"])
return None
@@ -694,32 +608,10 @@ class Agent(BaseAuditModel):
# Generate tasks based on policies
Policy.generate_policy_tasks(self)
# https://github.com/Ylianst/MeshCentral/issues/59#issuecomment-521965347
def get_login_token(self, key, user, action=3):
try:
key = bytes.fromhex(key)
key1 = key[0:48]
key2 = key[48:]
msg = '{{"a":{}, "u":"{}","time":{}}}'.format(
action, user.lower(), int(time.time())
)
iv = get_random_bytes(16)
# sha
h = SHA3_384.new()
h.update(key1)
hashed_msg = h.digest() + msg.encode()
# aes
cipher = AES.new(key2, AES.MODE_CBC, iv)
msg = cipher.encrypt(pad(hashed_msg, 16))
return base64.b64encode(iv + msg, altchars=b"@$").decode("utf-8")
except Exception:
return "err"
def _do_nats_debug(self, agent, message):
DebugLog.error(agent=agent, log_type="agent_issues", message=message)
async def nats_cmd(self, data: dict, timeout: int = 30, wait: bool = True):
nc = NATS()
options = {
"servers": f"tls://{settings.ALLOWED_HOSTS[0]}:4222",
"user": "tacticalrmm",
@@ -727,8 +619,9 @@ class Agent(BaseAuditModel):
"connect_timeout": 3,
"max_reconnect_attempts": 2,
}
try:
await nc.connect(**options)
nc = await nats.connect(**options)
except:
return "natsdown"
@@ -737,14 +630,16 @@ class Agent(BaseAuditModel):
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except ErrTimeout:
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data) # type: ignore
except Exception as e:
ret = str(e)
DebugLog.error(agent=self, log_type="agent_issues", message=ret)
await sync_to_async(self._do_nats_debug, thread_sensitive=False)(
agent=self, message=ret
)
await nc.close()
return ret
@@ -853,31 +748,6 @@ class Agent(BaseAuditModel):
)
RECOVERY_CHOICES = [
("salt", "Salt"),
("mesh", "Mesh"),
("command", "Command"),
("rpc", "Nats RPC"),
("checkrunner", "Checkrunner"),
]
class RecoveryAction(models.Model):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
Agent,
related_name="recoveryactions",
on_delete=models.CASCADE,
)
mode = models.CharField(max_length=50, choices=RECOVERY_CHOICES, default="mesh")
command = models.TextField(null=True, blank=True)
last_run = models.DateTimeField(null=True, blank=True)
def __str__(self):
return f"{self.agent.hostname} - {self.mode}"
class Note(models.Model):
objects = PermissionQuerySet.as_manager()
@@ -974,7 +844,7 @@ class AgentHistory(models.Model):
type = models.CharField(
max_length=50, choices=AGENT_HISTORY_TYPES, default="cmd_run"
)
command = models.TextField(null=True, blank=True)
command = models.TextField(null=True, blank=True, default="")
status = models.CharField(
max_length=50, choices=AGENT_HISTORY_STATUS, default="success"
)

View File

@@ -2,7 +2,7 @@ import pytz
from rest_framework import serializers
from winupdate.serializers import WinUpdatePolicySerializer
from .models import Agent, AgentCustomField, Note, AgentHistory
from .models import Agent, AgentCustomField, AgentHistory, Note
class AgentCustomFieldSerializer(serializers.ModelSerializer):
@@ -40,6 +40,33 @@ class AgentSerializer(serializers.ModelSerializer):
custom_fields = AgentCustomFieldSerializer(many=True, read_only=True)
patches_last_installed = serializers.ReadOnlyField()
last_seen = serializers.ReadOnlyField()
applied_policies = serializers.SerializerMethodField()
effective_patch_policy = serializers.SerializerMethodField()
alert_template = serializers.SerializerMethodField()
def get_alert_template(self, obj):
from alerts.serializers import AlertTemplateSerializer
return (
AlertTemplateSerializer(obj.alert_template).data
if obj.alert_template
else None
)
def get_effective_patch_policy(self, obj):
return WinUpdatePolicySerializer(obj.get_patch_policy()).data
def get_applied_policies(self, obj):
from automation.serializers import PolicySerializer
policies = obj.get_agent_policies()
# need to serialize model objects manually
for key, policy in policies.items():
if policy:
policies[key] = PolicySerializer(policy).data
return policies
def get_all_timezones(self, obj):
return pytz.all_timezones
@@ -116,6 +143,8 @@ class AgentTableSerializer(serializers.ModelSerializer):
"italic",
"policy",
"block_policy_inheritance",
"plat",
"goarch",
]
depth = 2

View File

@@ -4,16 +4,16 @@ import random
from time import sleep
from typing import Union
from agents.models import Agent
from agents.utils import get_agent_url
from core.models import CoreSettings
from django.conf import settings
from django.utils import timezone as djangotime
from logs.models import DebugLog, PendingAction
from packaging import version as pyver
from scripts.models import Script
from tacticalrmm.celery import app
from agents.models import Agent
from agents.utils import get_winagent_url
from tacticalrmm.celery import app
def agent_update(agent_id: str, force: bool = False) -> str:
@@ -34,7 +34,7 @@ def agent_update(agent_id: str, force: bool = False) -> str:
version = settings.LATEST_AGENT_VER
inno = agent.win_inno_exe
url = get_winagent_url(agent.arch)
url = get_agent_url(agent.arch, agent.plat)
if not force:
if agent.pendingactions.filter(
@@ -271,7 +271,7 @@ def run_script_email_results_task(
@app.task
def clear_faults_task(older_than_days: int) -> None:
# https://github.com/wh1te909/tacticalrmm/issues/484
# https://github.com/amidaware/tacticalrmm/issues/484
agents = Agent.objects.exclude(last_seen__isnull=True).filter(
last_seen__lt=djangotime.now() - djangotime.timedelta(days=older_than_days)
)

View File

@@ -1,31 +1,37 @@
import json
import os
import pytz
from django.utils import timezone as djangotime
from unittest.mock import patch
from itertools import cycle
from unittest.mock import patch
import pytz
from django.conf import settings
from django.test import modify_settings
from django.utils import timezone as djangotime
from logs.models import PendingAction
from model_bakery import baker
from packaging import version as pyver
from tacticalrmm.test import TacticalTestCase
from winupdate.models import WinUpdatePolicy
from winupdate.serializers import WinUpdatePolicySerializer
from tacticalrmm.test import TacticalTestCase
from .models import Agent, AgentCustomField, AgentHistory, Note
from .serializers import (
AgentHistorySerializer,
AgentSerializer,
AgentHostnameSerializer,
AgentNoteSerializer,
AgentSerializer,
)
from .tasks import auto_self_agent_update_task
base_url = "/agents"
@modify_settings(
MIDDLEWARE={
"remove": "tacticalrmm.middleware.LinuxMiddleware",
}
)
class TestAgentsList(TacticalTestCase):
def setUp(self):
self.authenticate()
@@ -91,6 +97,11 @@ class TestAgentsList(TacticalTestCase):
self.check_not_authenticated("get", url)
@modify_settings(
MIDDLEWARE={
"remove": "tacticalrmm.middleware.LinuxMiddleware",
}
)
class TestAgentViews(TacticalTestCase):
def setUp(self):
self.authenticate()
@@ -184,15 +195,23 @@ class TestAgentViews(TacticalTestCase):
)
self.check_not_authenticated("put", url)
@patch("asyncio.run")
@patch("asyncio.run")
@patch("core.utils._b64_to_hex")
@patch("agents.models.Agent.nats_cmd")
@patch("agents.views.reload_nats")
def test_agent_uninstall(self, reload_nats, nats_cmd):
def test_agent_uninstall(
self, reload_nats, nats_cmd, b64_to_hex, asyncio_run1, asyncio_run2
):
asyncio_run1.return_value = "ok"
asyncio_run2.return_value = "ok"
b64_to_hex.return_value = "nodeid"
url = f"{base_url}/{self.agent.agent_id}/"
r = self.client.delete(url, format="json")
self.assertEqual(r.status_code, 200)
nats_cmd.assert_called_with({"func": "uninstall"}, wait=False)
nats_cmd.assert_called_with({"func": "uninstall", "code": "foo"}, wait=False)
reload_nats.assert_called_once()
self.check_not_authenticated("delete", url)
@@ -430,14 +449,22 @@ class TestAgentViews(TacticalTestCase):
"func": "schedtask",
"schedtaskpayload": {
"type": "schedreboot",
"deleteafter": True,
"trigger": "once",
"enabled": True,
"delete_expired_task_after": True,
"start_when_available": False,
"multiple_instances": 2,
"trigger": "runonce",
"name": r.data["task_name"], # type: ignore
"year": 2025,
"month": "August",
"day": 29,
"hour": 18,
"min": 41,
"start_year": 2025,
"start_month": 8,
"start_day": 29,
"start_hour": 18,
"start_min": 41,
"expire_year": 2025,
"expire_month": 8,
"expire_day": 29,
"expire_hour": 18,
"expire_min": 46,
},
}
nats_cmd.assert_called_with(nats_data, timeout=10)
@@ -456,8 +483,7 @@ class TestAgentViews(TacticalTestCase):
self.check_not_authenticated("patch", url)
@patch("os.path.exists")
def test_install_agent(self, mock_file_exists):
def test_install_agent(self):
url = f"{base_url}/installer/"
site = baker.make("clients.Site")
@@ -475,21 +501,10 @@ class TestAgentViews(TacticalTestCase):
"fileName": "rmm-client-site-server.exe",
}
mock_file_exists.return_value = False
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 400)
mock_file_exists.return_value = True
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
data["arch"] = "32"
mock_file_exists.return_value = False
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 400)
data["arch"] = "64"
mock_file_exists.return_value = True
r = self.client.post(url, data, format="json")
self.assertIn("rdp", r.json()["cmd"])
self.assertNotIn("power", r.json()["cmd"])
@@ -504,71 +519,7 @@ class TestAgentViews(TacticalTestCase):
self.check_not_authenticated("post", url)
@patch("agents.models.Agent.nats_cmd")
def test_recover(self, nats_cmd):
from agents.models import RecoveryAction
RecoveryAction.objects.all().delete()
agent = baker.make_recipe("agents.online_agent")
url = f"{base_url}/{agent.agent_id}/recover/"
# test mesh realtime
data = {"cmd": None, "mode": "mesh"}
nats_cmd.return_value = "ok"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(RecoveryAction.objects.count(), 0)
nats_cmd.assert_called_with(
{"func": "recover", "payload": {"mode": "mesh"}}, timeout=10
)
nats_cmd.reset_mock()
# test mesh with agent rpc not working
data = {"cmd": None, "mode": "mesh"}
nats_cmd.return_value = "timeout"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(RecoveryAction.objects.count(), 1)
mesh_recovery = RecoveryAction.objects.first()
self.assertEqual(mesh_recovery.mode, "mesh") # type: ignore
nats_cmd.reset_mock()
RecoveryAction.objects.all().delete()
# test tacagent realtime
data = {"cmd": None, "mode": "tacagent"}
nats_cmd.return_value = "ok"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(RecoveryAction.objects.count(), 0)
nats_cmd.assert_called_with(
{"func": "recover", "payload": {"mode": "tacagent"}}, timeout=10
)
nats_cmd.reset_mock()
# test tacagent with rpc not working
data = {"cmd": None, "mode": "tacagent"}
nats_cmd.return_value = "timeout"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 400)
self.assertEqual(RecoveryAction.objects.count(), 0)
nats_cmd.reset_mock()
# test shell cmd without command
data = {"cmd": None, "mode": "command"}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 400)
self.assertEqual(RecoveryAction.objects.count(), 0)
# test shell cmd
data = {"cmd": "shutdown /r /t 10 /f", "mode": "command"}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(RecoveryAction.objects.count(), 1)
cmd_recovery = RecoveryAction.objects.first()
self.assertEqual(cmd_recovery.mode, "command") # type: ignore
self.assertEqual(cmd_recovery.command, "shutdown /r /t 10 /f") # type: ignore
@patch("agents.models.Agent.get_login_token")
@patch("meshctrl.utils.get_auth_token")
def test_meshcentral_tabs(self, mock_token):
url = f"{base_url}/{self.agent.agent_id}/meshcentral/"
mock_token.return_value = "askjh1k238uasdhk487234jadhsajksdhasd"
@@ -596,10 +547,6 @@ class TestAgentViews(TacticalTestCase):
self.assertEqual(r.status_code, 200)
mock_token.return_value = "err"
r = self.client.get(url)
self.assertEqual(r.status_code, 400)
self.check_not_authenticated("get", url)
@patch("agents.models.Agent.nats_cmd")
@@ -626,9 +573,10 @@ class TestAgentViews(TacticalTestCase):
@patch("agents.tasks.run_script_email_results_task.delay")
@patch("agents.models.Agent.run_script")
def test_run_script(self, run_script, email_task):
from .models import AgentCustomField, Note, AgentHistory
from clients.models import ClientCustomField, SiteCustomField
from .models import AgentCustomField, AgentHistory, Note
run_script.return_value = "ok"
url = f"/agents/{self.agent.agent_id}/runscript/"
script = baker.make_recipe("scripts.script")
@@ -914,6 +862,11 @@ class TestAgentViews(TacticalTestCase):
self.assertEqual(r.data, data) # type:ignore
@modify_settings(
MIDDLEWARE={
"remove": "tacticalrmm.middleware.LinuxMiddleware",
}
)
class TestAgentViewsNew(TacticalTestCase):
def setUp(self):
self.authenticate()
@@ -948,6 +901,11 @@ class TestAgentViewsNew(TacticalTestCase):
self.check_not_authenticated("post", url)
@modify_settings(
MIDDLEWARE={
"remove": "tacticalrmm.middleware.LinuxMiddleware",
}
)
class TestAgentPermissions(TacticalTestCase):
def setUp(self):
self.client_setup()
@@ -989,14 +947,20 @@ class TestAgentPermissions(TacticalTestCase):
# make sure superusers work
self.check_authorized_superuser("get", url)
@patch("asyncio.run")
@patch("core.utils._b64_to_hex")
@patch("agents.models.Agent.nats_cmd")
@patch("agents.views.reload_nats")
def test_get_edit_uninstall_permissions(self, reload_nats, nats_cmd):
def test_get_edit_uninstall_permissions(
self, reload_nats, nats_cmd, b64_to_hex, asyncio_run
):
b64_to_hex.return_value = "nodeid"
# create user with empty role
user = self.create_user_with_roles([])
self.client.force_authenticate(user=user) # type: ignore
agent = baker.make_recipe("agents.agent")
baker.make_recipe("winupdate.winupdate_policy", agent=agent)
methods = ["get", "put", "delete"]
url = f"{base_url}/{agent.agent_id}/"
@@ -1168,23 +1132,23 @@ class TestAgentPermissions(TacticalTestCase):
update_task.reset_mock()
# limit to client
user.role.can_view_clients.set([agents[0].client])
self.check_authorized("post", url, data)
update_task.assert_called_with(agent_ids=[agent.agent_id for agent in agents])
update_task.reset_mock()
# user.role.can_view_clients.set([agents[0].client])
# self.check_authorized("post", url, data)
# update_task.assert_called_with(agent_ids=[agent.agent_id for agent in agents])
# update_task.reset_mock()
# add site
user.role.can_view_sites.set([other_agents[0].site])
self.check_authorized("post", url, data)
update_task.assert_called_with(agent_ids=data["agent_ids"])
update_task.reset_mock()
# user.role.can_view_sites.set([other_agents[0].site])
# self.check_authorized("post", url, data)
# update_task.assert_called_with(agent_ids=data["agent_ids"])
# update_task.reset_mock()
# remove client permissions
user.role.can_view_clients.clear()
self.check_authorized("post", url, data)
update_task.assert_called_with(
agent_ids=[agent.agent_id for agent in other_agents]
)
# user.role.can_view_clients.clear()
# self.check_authorized("post", url, data)
# update_task.assert_called_with(
# agent_ids=[agent.agent_id for agent in other_agents]
# )
def test_get_agent_version_permissions(self):
agents = baker.make_recipe("agents.agent", _quantity=5)
@@ -1415,12 +1379,17 @@ class TestAgentPermissions(TacticalTestCase):
self.check_authorized_superuser("get", unauthorized_url)
@modify_settings(
MIDDLEWARE={
"remove": "tacticalrmm.middleware.LinuxMiddleware",
}
)
class TestAgentTasks(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
@patch("agents.utils.get_winagent_url")
@patch("agents.utils.get_agent_url")
@patch("agents.models.Agent.nats_cmd")
def test_agent_update(self, nats_cmd, get_url):
get_url.return_value = "https://exe.tacticalrmm.io"
@@ -1457,7 +1426,7 @@ class TestAgentTasks(TacticalTestCase):
self.assertEqual(action.status, "pending")
self.assertEqual(
action.details["url"],
f"https://github.com/wh1te909/rmmagent/releases/download/v{settings.LATEST_AGENT_VER}/winagent-v{settings.LATEST_AGENT_VER}.exe",
f"https://github.com/amidaware/rmmagent/releases/download/v{settings.LATEST_AGENT_VER}/winagent-v{settings.LATEST_AGENT_VER}.exe",
)
self.assertEqual(
action.details["inno"], f"winagent-v{settings.LATEST_AGENT_VER}.exe"
@@ -1467,7 +1436,7 @@ class TestAgentTasks(TacticalTestCase):
{
"func": "agentupdate",
"payload": {
"url": f"https://github.com/wh1te909/rmmagent/releases/download/v{settings.LATEST_AGENT_VER}/winagent-v{settings.LATEST_AGENT_VER}.exe",
"url": f"https://github.com/amidaware/rmmagent/releases/download/v{settings.LATEST_AGENT_VER}/winagent-v{settings.LATEST_AGENT_VER}.exe",
"version": settings.LATEST_AGENT_VER,
"inno": f"winagent-v{settings.LATEST_AGENT_VER}.exe",
},

View File

@@ -1,9 +1,9 @@
from autotasks.views import GetAddAutoTasks
from checks.views import GetAddChecks
from django.urls import path
from logs.views import PendingActions
from . import views
from checks.views import GetAddChecks
from autotasks.views import GetAddAutoTasks
from logs.views import PendingActions
urlpatterns = [
# agent views
@@ -40,5 +40,4 @@ urlpatterns = [
path("versions/", views.get_agent_versions),
path("update/", views.update_agents),
path("installer/", views.install_agent),
path("<str:arch>/getmeshexe/", views.get_mesh_exe),
]

View File

@@ -1,33 +1,28 @@
import random
import asyncio
import tempfile
import urllib.parse
import requests
from core.models import CodeSignToken, CoreSettings
from core.utils import get_mesh_device_id, get_mesh_ws_url
from django.conf import settings
from core.models import CodeSignToken
from django.http import FileResponse
from tacticalrmm.constants import MeshAgentIdent
def get_exegen_url() -> str:
urls: list[str] = settings.EXE_GEN_URLS
for url in urls:
try:
r = requests.get(url, timeout=10)
except:
continue
def get_agent_url(arch: str, plat: str) -> str:
if r.status_code == 200:
return url
return random.choice(urls)
def get_winagent_url(arch: str) -> str:
dl_url = settings.DL_32 if arch == "32" else settings.DL_64
if plat == "windows":
endpoint = "winagents"
dl_url = settings.DL_32 if arch == "32" else settings.DL_64
else:
endpoint = "linuxagents"
dl_url = ""
try:
t: CodeSignToken = CodeSignToken.objects.first() # type: ignore
if t.is_valid:
base_url = get_exegen_url() + "/api/v1/winagents/?"
base_url = settings.EXE_GEN_URL + f"/api/v1/{endpoint}/?"
params = {
"version": settings.LATEST_AGENT_VER,
"arch": arch,
@@ -38,3 +33,56 @@ def get_winagent_url(arch: str) -> str:
pass
return dl_url
def generate_linux_install(
client: str,
site: str,
agent_type: str,
arch: str,
token: str,
api: str,
download_url: str,
) -> FileResponse:
match arch:
case "amd64":
arch_id = MeshAgentIdent.LINUX64
case "386":
arch_id = MeshAgentIdent.LINUX32
case "arm64":
arch_id = MeshAgentIdent.LINUX_ARM_64
case "arm":
arch_id = MeshAgentIdent.LINUX_ARM_HF
core: CoreSettings = CoreSettings.objects.first() # type: ignore
uri = get_mesh_ws_url()
mesh_id = asyncio.run(get_mesh_device_id(uri, core.mesh_device_group))
mesh_dl = f"{core.mesh_site}/meshagents?id={mesh_id}&installflags=0&meshinstall={arch_id}" # type: ignore
sh = settings.LINUX_AGENT_SCRIPT
with open(sh, "r") as f:
text = f.read()
replace = {
"agentDLChange": download_url,
"meshDLChange": mesh_dl,
"clientIDChange": client,
"siteIDChange": site,
"agentTypeChange": agent_type,
"tokenChange": token,
"apiURLChange": api,
}
for i, j in replace.items():
text = text.replace(i, j)
with tempfile.NamedTemporaryFile() as fp:
with open(fp.name, "w") as f:
f.write(text)
f.write("\n")
return FileResponse(
open(fp.name, "rb"), as_attachment=True, filename="linux_agent_install.sh"
)

View File

@@ -4,60 +4,59 @@ import os
import random
import string
import time
from meshctrl.utils import get_auth_token
from core.models import CodeSignToken, CoreSettings
from core.utils import get_mesh_ws_url, remove_mesh_agent, send_command_with_mesh
from django.conf import settings
from django.db.models import Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.db.models import Q
from django.utils import timezone as djangotime
from logs.models import AuditLog, DebugLog, PendingAction
from packaging import version as pyver
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.exceptions import PermissionDenied
from core.models import CoreSettings
from logs.models import AuditLog, DebugLog, PendingAction
from scripts.models import Script
from scripts.tasks import handle_bulk_command_task, handle_bulk_script_task
from tacticalrmm.utils import (
get_default_timezone,
notify_error,
reload_nats,
AGENT_DEFER,
)
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
from tacticalrmm.constants import AGENT_DEFER
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from tacticalrmm.utils import get_default_timezone, notify_error, reload_nats
from .models import Agent, AgentCustomField, Note, RecoveryAction, AgentHistory
from .models import Agent, AgentCustomField, AgentHistory, Note
from .permissions import (
AgentHistoryPerms,
AgentNotesPerms,
AgentPerms,
EvtLogPerms,
InstallAgentPerms,
RecoverAgentPerms,
AgentNotesPerms,
ManageProcPerms,
MeshPerms,
PingAgentPerms,
RebootAgentPerms,
RecoverAgentPerms,
RunBulkPerms,
RunScriptPerms,
SendCMDPerms,
PingAgentPerms,
UpdateAgentPerms,
)
from .serializers import (
AgentCustomFieldSerializer,
AgentHistorySerializer,
AgentHostnameSerializer,
AgentNoteSerializer,
AgentSerializer,
AgentTableSerializer,
AgentNoteSerializer,
)
from .tasks import run_script_email_results_task, send_agent_update_task
@@ -155,10 +154,19 @@ class GetUpdateDeleteAgent(APIView):
# uninstall agent
def delete(self, request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
asyncio.run(agent.nats_cmd({"func": "uninstall"}, wait=False))
code = "foo"
if agent.plat == "linux":
with open(settings.LINUX_AGENT_SCRIPT, "r") as f:
code = f.read()
asyncio.run(agent.nats_cmd({"func": "uninstall", "code": code}, wait=False))
name = agent.hostname
mesh_id = agent.mesh_node_id
agent.delete()
reload_nats()
uri = get_mesh_ws_url()
asyncio.run(remove_mesh_agent(uri, mesh_id))
return Response(f"{name} will now be uninstalled.")
@@ -201,13 +209,7 @@ class AgentMeshCentral(APIView):
agent = get_object_or_404(Agent, agent_id=agent_id)
core = CoreSettings.objects.first()
token = agent.get_login_token(
key=core.mesh_token,
user=f"user//{core.mesh_username.lower()}", # type:ignore
)
if token == "err":
return notify_error("Invalid mesh token")
token = get_auth_token(user=core.mesh_username, key=core.mesh_token)
control = f"{core.mesh_site}/?login={token}&gotonode={agent.mesh_node_id}&viewmode=11&hide=31" # type:ignore
terminal = f"{core.mesh_site}/?login={token}&gotonode={agent.mesh_node_id}&viewmode=12&hide=31" # type:ignore
@@ -326,12 +328,17 @@ def get_event_log(request, agent_id, logtype, days):
def send_raw_cmd(request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
timeout = int(request.data["timeout"])
if request.data["shell"] == "custom" and request.data["custom_shell"]:
shell = request.data["custom_shell"]
else:
shell = request.data["shell"]
data = {
"func": "rawcmd",
"timeout": timeout,
"payload": {
"command": request.data["cmd"],
"shell": request.data["shell"],
"shell": shell,
},
}
@@ -352,7 +359,7 @@ def send_raw_cmd(request, agent_id):
username=request.user.username,
agent=agent,
cmd=request.data["cmd"],
shell=request.data["shell"],
shell=shell,
debug_info={"ip": request._client_ip},
)
@@ -383,18 +390,28 @@ class Reboot(APIView):
random.choice(string.ascii_letters) for _ in range(10)
)
expire_date = obj + djangotime.timedelta(minutes=5)
nats_data = {
"func": "schedtask",
"schedtaskpayload": {
"type": "schedreboot",
"deleteafter": True,
"trigger": "once",
"enabled": True,
"delete_expired_task_after": True,
"start_when_available": False,
"multiple_instances": 2,
"trigger": "runonce",
"name": task_name,
"year": int(dt.datetime.strftime(obj, "%Y")),
"month": dt.datetime.strftime(obj, "%B"),
"day": int(dt.datetime.strftime(obj, "%d")),
"hour": int(dt.datetime.strftime(obj, "%H")),
"min": int(dt.datetime.strftime(obj, "%M")),
"start_year": int(dt.datetime.strftime(obj, "%Y")),
"start_month": int(dt.datetime.strftime(obj, "%-m")),
"start_day": int(dt.datetime.strftime(obj, "%-d")),
"start_hour": int(dt.datetime.strftime(obj, "%-H")),
"start_min": int(dt.datetime.strftime(obj, "%-M")),
"expire_year": int(expire_date.strftime("%Y")),
"expire_month": int(expire_date.strftime("%-m")),
"expire_day": int(expire_date.strftime("%-d")),
"expire_hour": int(expire_date.strftime("%-H")),
"expire_min": int(expire_date.strftime("%-M")),
},
}
@@ -415,10 +432,9 @@ class Reboot(APIView):
@api_view(["POST"])
@permission_classes([IsAuthenticated, InstallAgentPerms])
def install_agent(request):
from knox.models import AuthToken
from accounts.models import User
from agents.utils import get_winagent_url
from agents.utils import get_agent_url
from knox.models import AuthToken
client_id = request.data["client"]
site_id = request.data["site"]
@@ -428,26 +444,15 @@ def install_agent(request):
if not _has_perm_on_site(request.user, site_id):
raise PermissionDenied()
# response type is blob so we have to use
# status codes and render error message on the frontend
if arch == "64" and not os.path.exists(
os.path.join(settings.EXE_DIR, "meshagent.exe")
):
return notify_error(
"Missing 64 bit meshagent.exe. Upload it from Settings > Global Settings > MeshCentral"
)
if arch == "32" and not os.path.exists(
os.path.join(settings.EXE_DIR, "meshagent-x86.exe")
):
return notify_error(
"Missing 32 bit meshagent.exe. Upload it from Settings > Global Settings > MeshCentral"
)
inno = (
f"winagent-v{version}.exe" if arch == "64" else f"winagent-v{version}-x86.exe"
)
download_url = get_winagent_url(arch)
if request.data["installMethod"] == "linux":
plat = "linux"
else:
plat = "windows"
download_url = get_agent_url(arch, plat)
installer_user = User.objects.filter(is_installer_user=True).first()
@@ -471,6 +476,33 @@ def install_agent(request):
file_name=request.data["fileName"],
)
elif request.data["installMethod"] == "linux":
# TODO
# linux agents are in beta for now, only available for sponsors for testing
# remove this after it's out of beta
try:
t: CodeSignToken = CodeSignToken.objects.first() # type: ignore
except:
return notify_error("Something went wrong")
if t is None:
return notify_error("Missing code signing token")
if not t.is_valid:
return notify_error("Code signing token is not valid")
from agents.utils import generate_linux_install
return generate_linux_install(
client=str(client_id),
site=str(site_id),
agent_type=request.data["agenttype"],
arch=arch,
token=token,
api=request.data["api"],
download_url=download_url,
)
elif request.data["installMethod"] == "manual":
cmd = [
inno,
@@ -564,36 +596,24 @@ def recover(request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
mode = request.data["mode"]
# attempt a realtime recovery, otherwise fall back to old recovery method
if mode == "tacagent" or mode == "mesh":
if mode == "tacagent":
if agent.is_posix:
cmd = "systemctl restart tacticalagent.service"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
uri = get_mesh_ws_url()
asyncio.run(send_command_with_mesh(cmd, uri, agent.mesh_node_id, shell, 0))
return Response("Recovery will be attempted shortly")
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
r = asyncio.run(agent.nats_cmd(data, timeout=10))
r = asyncio.run(agent.nats_cmd(data, timeout=20))
if r == "ok":
return Response("Successfully completed recovery")
if agent.recoveryactions.filter(last_run=None).exists(): # type: ignore
return notify_error(
"A recovery action is currently pending. Please wait for the next agent check-in."
)
if mode == "command" and not request.data["cmd"]:
return notify_error("Command is required")
# if we've made it this far and realtime recovery didn't work,
# tacagent service is the fallback recovery so we obv can't use that to recover itself if it's down
if mode == "tacagent":
return notify_error(
"Requires RPC service to be functional. Please recover that first"
)
# we should only get here if all other methods fail
RecoveryAction(
agent=agent,
mode=mode,
command=request.data["cmd"] if mode == "command" else None,
).save()
return Response("Recovery will be attempted on the agent's next check-in")
return notify_error("Something went wrong")
@api_view(["POST"])
@@ -690,27 +710,6 @@ def run_script(request, agent_id):
return Response(f"{script.name} will now be run on {agent.hostname}")
@api_view(["POST"])
def get_mesh_exe(request, arch):
filename = "meshagent.exe" if arch == "64" else "meshagent-x86.exe"
mesh_exe = os.path.join(settings.EXE_DIR, filename)
if not os.path.exists(mesh_exe):
return notify_error(f"File {filename} has not been uploaded.")
if settings.DEBUG:
with open(mesh_exe, "rb") as f:
response = HttpResponse(
f.read(), content_type="application/vnd.microsoft.portable-executable"
)
response["Content-Disposition"] = f"inline; filename={filename}"
return response
else:
response = HttpResponse()
response["Content-Disposition"] = f"attachment; filename={filename}"
response["X-Accel-Redirect"] = f"/private/exe/{filename}"
return response
class GetAddNotes(APIView):
permission_classes = [IsAuthenticated, AgentNotesPerms]
@@ -728,6 +727,9 @@ class GetAddNotes(APIView):
if not _has_perm_on_agent(request.user, agent.agent_id):
raise PermissionDenied()
if "note" not in request.data.keys():
return notify_error("Cannot add an empty note")
data = {
"note": request.data["note"],
"agent": agent.pk,
@@ -808,6 +810,11 @@ def bulk(request):
elif request.data["monType"] == "workstations":
q = q.filter(monitoring_type="workstation")
if request.data["osType"] == "windows":
q = q.filter(plat="windows")
elif request.data["osType"] == "linux":
q = q.filter(plat="linux")
agents: list[int] = [agent.pk for agent in q]
if not agents:
@@ -821,10 +828,15 @@ def bulk(request):
)
if request.data["mode"] == "command":
if request.data["shell"] == "custom" and request.data["custom_shell"]:
shell = request.data["custom_shell"]
else:
shell = request.data["shell"]
handle_bulk_command_task.delay(
agents,
request.data["cmd"],
request.data["shell"],
shell,
request.data["timeout"],
request.user.username[:50],
run_on_offline=request.data["offlineAgents"],

View File

@@ -7,8 +7,8 @@ from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.db.models.fields import BooleanField, PositiveIntegerField
from django.utils import timezone as djangotime
from logs.models import BaseAuditModel, DebugLog
from tacticalrmm.models import PermissionQuerySet
if TYPE_CHECKING:
@@ -598,6 +598,17 @@ class AlertTemplate(BaseAuditModel):
def __str__(self):
return self.name
def is_agent_excluded(self, agent):
return (
agent in self.excluded_agents.all()
or agent.site in self.excluded_sites.all()
or agent.client in self.excluded_clients.all()
or agent.monitoring_type == "workstation"
and self.exclude_workstations
or agent.monitoring_type == "server"
and self.exclude_servers
)
@staticmethod
def serialize(alert_template):
# serializes the agent and returns json

View File

@@ -1,8 +1,8 @@
from automation.serializers import PolicySerializer
from clients.serializers import ClientMinimumSerializer, SiteMinimumSerializer
from rest_framework.fields import SerializerMethodField
from rest_framework.serializers import ModelSerializer, ReadOnlyField
from automation.serializers import PolicySerializer
from clients.serializers import ClientMinimumSerializer, SiteMinimumSerializer
from tacticalrmm.utils import get_default_timezone
from .models import Alert, AlertTemplate

View File

@@ -1,4 +1,5 @@
from django.utils import timezone as djangotime
from tacticalrmm.celery import app

View File

@@ -1,15 +1,15 @@
from datetime import datetime, timedelta
from unittest.mock import patch
from itertools import cycle
from unittest.mock import patch
from alerts.tasks import cache_agents_alert_template
from core.models import CoreSettings
from core.tasks import cache_db_fields_task
from django.conf import settings
from django.utils import timezone as djangotime
from model_bakery import baker, seq
from tacticalrmm.test import TacticalTestCase
from alerts.tasks import cache_agents_alert_template
from core.tasks import cache_db_fields_task
from tacticalrmm.test import TacticalTestCase
from .models import Alert, AlertTemplate
from .serializers import (
@@ -515,7 +515,6 @@ class TestAlertTasks(TacticalTestCase):
agent_recovery_email_task,
agent_recovery_sms_task,
)
from alerts.models import Alert
agent_dashboard_alert = baker.make_recipe("agents.overdue_agent")
@@ -718,6 +717,7 @@ class TestAlertTasks(TacticalTestCase):
send_email,
sleep,
):
from alerts.tasks import cache_agents_alert_template
from checks.models import Check
from checks.tasks import (
handle_check_email_alert_task,
@@ -726,8 +726,6 @@ class TestAlertTasks(TacticalTestCase):
handle_resolved_check_sms_alert_task,
)
from alerts.tasks import cache_agents_alert_template
# create test data
agent = baker.make_recipe("agents.agent")
agent_no_settings = baker.make_recipe("agents.agent")
@@ -1003,6 +1001,7 @@ class TestAlertTasks(TacticalTestCase):
send_email,
sleep,
):
from alerts.tasks import cache_agents_alert_template
from autotasks.models import AutomatedTask
from autotasks.tasks import (
handle_resolved_task_email_alert,
@@ -1011,8 +1010,6 @@ class TestAlertTasks(TacticalTestCase):
handle_task_sms_alert,
)
from alerts.tasks import cache_agents_alert_template
# create test data
agent = baker.make_recipe("agents.agent")
agent_no_settings = baker.make_recipe("agents.agent")

View File

@@ -1,12 +1,11 @@
import json
import os
from unittest.mock import patch
from autotasks.models import AutomatedTask
from django.conf import settings
from django.utils import timezone as djangotime
from model_bakery import baker
from autotasks.models import AutomatedTask
from tacticalrmm.test import TacticalTestCase
@@ -130,45 +129,6 @@ class TestAPIv3(TacticalTestCase):
self.assertIsInstance(r.json()["check_interval"], int)
self.assertEqual(len(r.json()["checks"]), 15)
@patch("apiv3.views.reload_nats")
def test_agent_recovery(self, reload_nats):
reload_nats.return_value = "ok"
r = self.client.get("/api/v3/34jahsdkjasncASDjhg2b3j4r/recover/")
self.assertEqual(r.status_code, 404)
agent = baker.make_recipe("agents.online_agent")
url = f"/api/v3/{agent.agent_id}/recovery/"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json(), {"mode": "pass", "shellcmd": ""})
reload_nats.assert_not_called()
baker.make("agents.RecoveryAction", agent=agent, mode="mesh")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json(), {"mode": "mesh", "shellcmd": ""})
reload_nats.assert_not_called()
baker.make(
"agents.RecoveryAction",
agent=agent,
mode="command",
command="shutdown /r /t 5 /f",
)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.json(), {"mode": "command", "shellcmd": "shutdown /r /t 5 /f"}
)
reload_nats.assert_not_called()
baker.make("agents.RecoveryAction", agent=agent, mode="rpc")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json(), {"mode": "rpc", "shellcmd": ""})
reload_nats.assert_called_once()
def test_task_runner_get(self):
from autotasks.serializers import TaskGOGetSerializer

View File

@@ -19,6 +19,5 @@ urlpatterns = [
path("winupdates/", views.WinUpdates.as_view()),
path("superseded/", views.SupersededWinUpdate.as_view()),
path("<int:pk>/chocoresult/", views.ChocoResult.as_view()),
path("<str:agentid>/recovery/", views.AgentRecovery.as_view()),
path("<int:pk>/<str:agentid>/histresult/", views.AgentHistoryResult.as_view()),
]

View File

@@ -1,18 +1,6 @@
import asyncio
import os
import time
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from packaging import version as pyver
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.models import Token
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from accounts.models import User
from agents.models import Agent, AgentHistory
from agents.serializers import AgentHistorySerializer
@@ -20,11 +8,24 @@ from autotasks.models import AutomatedTask
from autotasks.serializers import TaskGOGetSerializer, TaskRunnerPatchSerializer
from checks.models import Check
from checks.serializers import CheckRunnerGetSerializer
from logs.models import PendingAction, DebugLog
from core.models import CoreSettings
from core.utils import download_mesh_agent, get_mesh_device_id, get_mesh_ws_url
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from logs.models import DebugLog, PendingAction
from packaging import version as pyver
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.models import Token
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from software.models import InstalledSoftware
from tacticalrmm.utils import notify_error, reload_nats
from winupdate.models import WinUpdate, WinUpdatePolicy
from tacticalrmm.constants import MeshAgentIdent
from tacticalrmm.utils import notify_error, reload_nats
class CheckIn(APIView):
@@ -315,25 +316,33 @@ class MeshExe(APIView):
"""Sends the mesh exe to the installer"""
def post(self, request):
exe = "meshagent.exe" if request.data["arch"] == "64" else "meshagent-x86.exe"
mesh_exe = os.path.join(settings.EXE_DIR, exe)
match request.data:
case {"arch": "64", "plat": "windows"}:
arch = MeshAgentIdent.WIN64
case {"arch": "32", "plat": "windows"}:
arch = MeshAgentIdent.WIN32
case _:
return notify_error("Arch not specified")
if not os.path.exists(mesh_exe):
return notify_error("Mesh Agent executable not found")
core: CoreSettings = CoreSettings.objects.first() # type: ignore
if settings.DEBUG:
with open(mesh_exe, "rb") as f:
response = HttpResponse(
f.read(),
content_type="application/vnd.microsoft.portable-executable",
)
response["Content-Disposition"] = f"inline; filename={exe}"
return response
try:
uri = get_mesh_ws_url()
mesh_id = asyncio.run(get_mesh_device_id(uri, core.mesh_device_group))
except:
return notify_error("Unable to connect to mesh to get group id information")
if settings.DOCKER_BUILD:
dl_url = f"{settings.MESH_WS_URL.replace('ws://', 'http://')}/meshagents?id={arch}&meshid={mesh_id}&installflags=0"
else:
response = HttpResponse()
response["Content-Disposition"] = f"attachment; filename={exe}"
response["X-Accel-Redirect"] = f"/private/exe/{exe}"
return response
dl_url = (
f"{core.mesh_site}/meshagents?id={arch}&meshid={mesh_id}&installflags=0"
)
try:
return download_mesh_agent(dl_url)
except:
return notify_error("Unable to download mesh agent exe")
class NewAgent(APIView):
@@ -354,11 +363,11 @@ class NewAgent(APIView):
monitoring_type=request.data["monitoring_type"],
description=request.data["description"],
mesh_node_id=request.data["mesh_node_id"],
goarch=request.data["goarch"],
plat=request.data["plat"],
last_seen=djangotime.now(),
)
agent.save()
agent.salt_id = f"{agent.hostname}-{agent.pk}"
agent.save(update_fields=["salt_id"])
user = User.objects.create_user( # type: ignore
username=request.data["agent_id"],
@@ -386,13 +395,8 @@ class NewAgent(APIView):
debug_info={"ip": request._client_ip},
)
return Response(
{
"pk": agent.pk,
"saltid": f"{agent.hostname}-{agent.pk}",
"token": token.key,
}
)
ret = {"pk": agent.pk, "token": token.key}
return Response(ret)
class Software(APIView):
@@ -462,41 +466,6 @@ class ChocoResult(APIView):
return Response("ok")
class AgentRecovery(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, agentid):
agent = get_object_or_404(
Agent.objects.prefetch_related("recoveryactions").only(
"pk", "agent_id", "last_seen"
),
agent_id=agentid,
)
# TODO remove these 2 lines after agent v1.7.0 has been out for a while
# this is handled now by nats-api service
agent.last_seen = djangotime.now()
agent.save(update_fields=["last_seen"])
recovery = agent.recoveryactions.filter(last_run=None).last() # type: ignore
ret = {"mode": "pass", "shellcmd": ""}
if recovery is None:
return Response(ret)
recovery.last_run = djangotime.now()
recovery.save(update_fields=["last_run"])
ret["mode"] = recovery.mode
if recovery.mode == "command":
ret["shellcmd"] = recovery.command
elif recovery.mode == "rpc":
reload_nats()
return Response(ret)
class AgentHistoryResult(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]

View File

@@ -1,7 +1,6 @@
from django.db import models
from agents.models import Agent
from core.models import CoreSettings
from django.db import models
from logs.models import BaseAuditModel
@@ -135,86 +134,28 @@ class Policy(BaseAuditModel):
# List of all tasks to be applied
tasks = list()
added_task_pks = list()
agent_tasks_parent_pks = [
task.parent_task for task in agent.autotasks.filter(managed_by_policy=True)
]
# Get policies applied to agent and agent site and client
client = agent.client
site = agent.site
policies = agent.get_agent_policies()
default_policy = None
client_policy = None
site_policy = None
agent_policy = agent.policy
processed_policies = list()
# Get the Client/Site policy based on if the agent is server or workstation
if agent.monitoring_type == "server":
default_policy = CoreSettings.objects.first().server_policy
client_policy = client.server_policy
site_policy = site.server_policy
elif agent.monitoring_type == "workstation":
default_policy = CoreSettings.objects.first().workstation_policy
client_policy = client.workstation_policy
site_policy = site.workstation_policy
# check if client/site/agent is blocking inheritance and blank out policies
if agent.block_policy_inheritance:
site_policy = None
client_policy = None
default_policy = None
elif site.block_policy_inheritance:
client_policy = None
default_policy = None
elif client.block_policy_inheritance:
default_policy = None
if (
agent_policy
and agent_policy.active
and not agent_policy.is_agent_excluded(agent)
):
for task in agent_policy.autotasks.all():
if task.pk not in added_task_pks:
for _, policy in policies.items():
if policy and policy.active and policy.pk not in processed_policies:
processed_policies.append(policy.pk)
for task in policy.autotasks.all():
tasks.append(task)
added_task_pks.append(task.pk)
if (
site_policy
and site_policy.active
and not site_policy.is_agent_excluded(agent)
):
for task in site_policy.autotasks.all():
if task.pk not in added_task_pks:
tasks.append(task)
added_task_pks.append(task.pk)
if (
client_policy
and client_policy.active
and not client_policy.is_agent_excluded(agent)
):
for task in client_policy.autotasks.all():
if task.pk not in added_task_pks:
tasks.append(task)
added_task_pks.append(task.pk)
if (
default_policy
and default_policy.active
and not default_policy.is_agent_excluded(agent)
):
for task in default_policy.autotasks.all():
if task.pk not in added_task_pks:
tasks.append(task)
added_task_pks.append(task.pk)
# remove policy tasks from agent not included in policy
for task in agent.autotasks.filter(
parent_task__in=[
taskpk
for taskpk in agent_tasks_parent_pks
if taskpk not in added_task_pks
if taskpk not in [task.pk for task in tasks]
]
):
if task.sync_status == "initial":
@@ -225,7 +166,7 @@ class Policy(BaseAuditModel):
# change tasks from pendingdeletion to notsynced if policy was added or changed
agent.autotasks.filter(sync_status="pendingdeletion").filter(
parent_task__in=[taskpk for taskpk in added_task_pks]
parent_task__in=[taskpk for taskpk in [task.pk for task in tasks]]
).update(sync_status="notsynced")
return [task for task in tasks if task.pk not in agent_tasks_parent_pks]
@@ -241,86 +182,24 @@ class Policy(BaseAuditModel):
]
# Get policies applied to agent and agent site and client
client = agent.client
site = agent.site
default_policy = None
client_policy = None
site_policy = None
agent_policy = agent.policy
if agent.monitoring_type == "server":
default_policy = CoreSettings.objects.first().server_policy
client_policy = client.server_policy
site_policy = site.server_policy
elif agent.monitoring_type == "workstation":
default_policy = CoreSettings.objects.first().workstation_policy
client_policy = client.workstation_policy
site_policy = site.workstation_policy
# check if client/site/agent is blocking inheritance and blank out policies
if agent.block_policy_inheritance:
site_policy = None
client_policy = None
default_policy = None
elif site.block_policy_inheritance:
client_policy = None
default_policy = None
elif client.block_policy_inheritance:
default_policy = None
policies = agent.get_agent_policies()
# Used to hold the policies that will be applied and the order in which they are applied
# Enforced policies are applied first
enforced_checks = list()
policy_checks = list()
if (
agent_policy
and agent_policy.active
and not agent_policy.is_agent_excluded(agent)
):
if agent_policy.enforced:
for check in agent_policy.policychecks.all():
enforced_checks.append(check)
else:
for check in agent_policy.policychecks.all():
policy_checks.append(check)
processed_policies = list()
if (
site_policy
and site_policy.active
and not site_policy.is_agent_excluded(agent)
):
if site_policy.enforced:
for check in site_policy.policychecks.all():
enforced_checks.append(check)
else:
for check in site_policy.policychecks.all():
policy_checks.append(check)
if (
client_policy
and client_policy.active
and not client_policy.is_agent_excluded(agent)
):
if client_policy.enforced:
for check in client_policy.policychecks.all():
enforced_checks.append(check)
else:
for check in client_policy.policychecks.all():
policy_checks.append(check)
if (
default_policy
and default_policy.active
and not default_policy.is_agent_excluded(agent)
):
if default_policy.enforced:
for check in default_policy.policychecks.all():
enforced_checks.append(check)
else:
for check in default_policy.policychecks.all():
policy_checks.append(check)
for _, policy in policies.items():
if policy and policy.active and policy.pk not in processed_policies:
processed_policies.append(policy.pk)
if policy.enforced:
for check in policy.policychecks.all():
enforced_checks.append(check)
else:
for check in policy.policychecks.all():
policy_checks.append(check)
# Sorted Checks already added
added_diskspace_checks = list()
@@ -342,7 +221,7 @@ class Policy(BaseAuditModel):
# Loop over checks in with enforced policies first, then non-enforced policies
for check in enforced_checks + agent_checks + policy_checks:
if check.check_type == "diskspace":
if check.check_type == "diskspace" and agent.plat == "windows":
# Check if drive letter was already added
if check.disk not in added_diskspace_checks:
added_diskspace_checks.append(check.disk)
@@ -364,7 +243,7 @@ class Policy(BaseAuditModel):
check.overriden_by_policy = True
check.save()
if check.check_type == "cpuload":
if check.check_type == "cpuload" and agent.plat == "windows":
# Check if cpuload list is empty
if not added_cpuload_checks:
added_cpuload_checks.append(check)
@@ -375,7 +254,7 @@ class Policy(BaseAuditModel):
check.overriden_by_policy = True
check.save()
if check.check_type == "memory":
if check.check_type == "memory" and agent.plat == "windows":
# Check if memory check list is empty
if not added_memory_checks:
added_memory_checks.append(check)
@@ -386,7 +265,7 @@ class Policy(BaseAuditModel):
check.overriden_by_policy = True
check.save()
if check.check_type == "winsvc":
if check.check_type == "winsvc" and agent.plat == "windows":
# Check if service name was already added
if check.svc_name not in added_winsvc_checks:
added_winsvc_checks.append(check.svc_name)
@@ -397,7 +276,9 @@ class Policy(BaseAuditModel):
check.overriden_by_policy = True
check.save()
if check.check_type == "script":
if check.check_type == "script" and agent.is_supported_script(
check.script.supported_platforms
):
# Check if script id was already added
if check.script.id not in added_script_checks:
added_script_checks.append(check.script.id)
@@ -408,7 +289,7 @@ class Policy(BaseAuditModel):
check.overriden_by_policy = True
check.save()
if check.check_type == "eventlog":
if check.check_type == "eventlog" and agent.plat == "windows":
# Check if events were already added
if [check.log_name, check.event_id] not in added_eventlog_checks:
added_eventlog_checks.append([check.log_name, check.event_id])

View File

@@ -1,14 +1,13 @@
from rest_framework.serializers import (
ModelSerializer,
ReadOnlyField,
SerializerMethodField,
)
from agents.serializers import AgentHostnameSerializer
from autotasks.models import AutomatedTask
from checks.models import Check
from clients.models import Client
from clients.serializers import ClientMinimumSerializer, SiteMinimumSerializer
from rest_framework.serializers import (
ModelSerializer,
ReadOnlyField,
SerializerMethodField,
)
from winupdate.serializers import WinUpdatePolicySerializer
from .models import Policy

View File

@@ -4,9 +4,10 @@ from unittest.mock import patch
from agents.models import Agent
from core.models import CoreSettings
from model_bakery import baker, seq
from tacticalrmm.test import TacticalTestCase
from winupdate.models import WinUpdatePolicy
from tacticalrmm.test import TacticalTestCase
from .serializers import (
PolicyCheckStatusSerializer,
PolicyOverviewSerializer,
@@ -69,7 +70,7 @@ class TestPolicyViews(TacticalTestCase):
# create policy with tasks and checks
policy = baker.make("automation.Policy")
checks = self.create_checks(policy=policy)
tasks = baker.make("autotasks.AutomatedTask", policy=policy, _quantity=3)
tasks = baker.make_recipe("autotasks.task", policy=policy, _quantity=3)
# assign a task to a check
tasks[0].assigned_check = checks[0] # type: ignore
@@ -248,11 +249,11 @@ class TestPolicyViews(TacticalTestCase):
# policy with a task
policy = baker.make("automation.Policy")
task = baker.make("autotasks.AutomatedTask", policy=policy)
task = baker.make_recipe("autotasks.task", policy=policy)
# create policy managed tasks
policy_tasks = baker.make(
"autotasks.AutomatedTask", parent_task=task.id, _quantity=5 # type: ignore
policy_tasks = baker.make_recipe(
"autotasks.task", parent_task=task.id, _quantity=5 # type: ignore
)
url = f"/automation/tasks/{task.id}/status/" # type: ignore
@@ -269,8 +270,8 @@ class TestPolicyViews(TacticalTestCase):
def test_run_win_task(self, mock_task):
# create managed policy tasks
tasks = baker.make(
"autotasks.AutomatedTask",
tasks = baker.make_recipe(
"autotasks.task",
managed_by_policy=True,
parent_task=1,
_quantity=6,
@@ -577,8 +578,8 @@ class TestPolicyTasks(TacticalTestCase):
policy = baker.make("automation.Policy", active=True)
self.create_checks(policy=policy)
baker.make(
"autotasks.AutomatedTask", policy=policy, name=seq("Task"), _quantity=3
baker.make_recipe(
"autotasks.task", policy=policy, name=seq("Task"), _quantity=3
)
server_agent = baker.make_recipe("agents.server_agent")
@@ -859,8 +860,8 @@ class TestPolicyTasks(TacticalTestCase):
# create test data
policy = baker.make("automation.Policy", active=True)
tasks = baker.make(
"autotasks.AutomatedTask", policy=policy, name=seq("Task"), _quantity=3
tasks = baker.make_recipe(
"autotasks.task", policy=policy, name=seq("Task"), _quantity=3
)
agent = baker.make_recipe("agents.server_agent", policy=policy)
@@ -889,7 +890,7 @@ class TestPolicyTasks(TacticalTestCase):
from .tasks import delete_policy_autotasks_task, generate_agent_checks_task
policy = baker.make("automation.Policy", active=True)
tasks = baker.make("autotasks.AutomatedTask", policy=policy, _quantity=3)
tasks = baker.make_recipe("autotasks.task", policy=policy, _quantity=3)
agent = baker.make_recipe("agents.server_agent", policy=policy)
generate_agent_checks_task(agents=[agent.pk], create_tasks=True)
@@ -901,10 +902,10 @@ class TestPolicyTasks(TacticalTestCase):
@patch("autotasks.models.AutomatedTask.create_task_on_agent")
@patch("autotasks.models.AutomatedTask.run_win_task")
def test_run_policy_task(self, run_win_task, create_task):
from .tasks import run_win_policy_autotasks_task, generate_agent_checks_task
from .tasks import generate_agent_checks_task, run_win_policy_autotasks_task
policy = baker.make("automation.Policy", active=True)
tasks = baker.make("autotasks.AutomatedTask", policy=policy, _quantity=3)
tasks = baker.make_recipe("autotasks.task", policy=policy, _quantity=3)
agent = baker.make_recipe("agents.server_agent", policy=policy)
generate_agent_checks_task(agents=[agent.pk], create_tasks=True)
@@ -917,14 +918,14 @@ class TestPolicyTasks(TacticalTestCase):
@patch("autotasks.models.AutomatedTask.modify_task_on_agent")
def test_update_policy_tasks(self, modify_task_on_agent, create_task):
from .tasks import (
update_policy_autotasks_fields_task,
generate_agent_checks_task,
update_policy_autotasks_fields_task,
)
# setup data
policy = baker.make("automation.Policy", active=True)
tasks = baker.make(
"autotasks.AutomatedTask",
tasks = baker.make_recipe(
"autotasks.task",
enabled=True,
policy=policy,
_quantity=3,
@@ -977,7 +978,7 @@ class TestPolicyTasks(TacticalTestCase):
# setup data
policy = baker.make("automation.Policy", active=True)
baker.make_recipe("checks.memory_check", policy=policy)
task = baker.make("autotasks.AutomatedTask", policy=policy)
task = baker.make_recipe("autotasks.task", policy=policy)
agent = baker.make_recipe(
"agents.agent", policy=policy, monitoring_type="server"
)
@@ -1072,7 +1073,7 @@ class TestPolicyTasks(TacticalTestCase):
# setup data
policy = baker.make("automation.Policy", active=True)
baker.make_recipe("checks.memory_check", policy=policy)
baker.make("autotasks.AutomatedTask", policy=policy)
baker.make_recipe("autotasks.task", policy=policy)
agent = baker.make_recipe("agents.agent", monitoring_type="server")
core = CoreSettings.objects.first()

View File

@@ -1,8 +1,8 @@
from autotasks.views import GetAddAutoTasks
from checks.views import GetAddChecks
from django.urls import path
from . import views
from checks.views import GetAddChecks
from autotasks.views import GetAddAutoTasks
urlpatterns = [
path("policies/", views.GetAddPolicies.as_view()),

View File

@@ -3,21 +3,22 @@ from autotasks.models import AutomatedTask
from checks.models import Check
from clients.models import Client
from django.shortcuts import get_object_or_404
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.exceptions import PermissionDenied
from tacticalrmm.utils import notify_error
from tacticalrmm.permissions import _has_perm_on_client, _has_perm_on_site
from winupdate.models import WinUpdatePolicy
from winupdate.serializers import WinUpdatePolicySerializer
from tacticalrmm.permissions import _has_perm_on_client, _has_perm_on_site
from tacticalrmm.utils import notify_error
from .models import Policy
from .permissions import AutomationPolicyPerms
from .serializers import (
PolicyCheckStatusSerializer,
PolicyRelatedSerializer,
PolicyOverviewSerializer,
PolicyRelatedSerializer,
PolicySerializer,
PolicyTableSerializer,
PolicyTaskStatusSerializer,

View File

@@ -0,0 +1,10 @@
from itertools import cycle
from model_bakery.recipe import Recipe, foreign_key, seq
script = Recipe("scripts.script")
task = Recipe(
"autotasks.AutomatedTask",
script=foreign_key(script),
)

View File

@@ -1,7 +1,6 @@
from django.core.management.base import BaseCommand
from agents.models import Agent
from autotasks.tasks import remove_orphaned_win_tasks
from django.core.management.base import BaseCommand
class Command(BaseCommand):

View File

@@ -3,19 +3,20 @@ import datetime as dt
import random
import string
from typing import List
from django.db.models.fields.json import JSONField
import pytz
from alerts.models import SEVERITY_CHOICES
from django.contrib.postgres.fields import ArrayField
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.db.models.fields import DateTimeField
from django.db.models.fields.json import JSONField
from django.db.utils import DatabaseError
from django.core.validators import MaxValueValidator, MinValueValidator
from django.utils import timezone as djangotime
from logs.models import BaseAuditModel, DebugLog
from tacticalrmm.models import PermissionQuerySet
from packaging import version as pyver
from tacticalrmm.models import PermissionQuerySet
from tacticalrmm.utils import (
bitdays_to_string,
bitmonthdays_to_string,
@@ -169,8 +170,8 @@ class AutomatedTask(BaseAuditModel):
return self.name
def save(self, *args, **kwargs):
from autotasks.tasks import modify_win_task
from automation.tasks import update_policy_autotasks_fields_task
from autotasks.tasks import modify_win_task
# get old agent if exists
old_task = AutomatedTask.objects.get(pk=self.pk) if self.pk else None

View File

@@ -1,6 +1,6 @@
from rest_framework import serializers
from scripts.models import Script
from django.core.exceptions import ObjectDoesNotExist
from .models import AutomatedTask
@@ -186,6 +186,7 @@ class TaskGOGetSerializer(serializers.ModelSerializer):
def get_task_actions(self, obj):
tmp = []
actions_to_remove = []
for action in obj.actions:
if action["type"] == "cmd":
tmp.append(
@@ -201,7 +202,12 @@ class TaskGOGetSerializer(serializers.ModelSerializer):
}
)
elif action["type"] == "script":
script = Script.objects.get(pk=action["script"])
try:
script = Script.objects.get(pk=action["script"])
except ObjectDoesNotExist:
# script doesn't exist so remove it
actions_to_remove.append(action["script"])
continue
tmp.append(
{
"type": "script",
@@ -216,6 +222,18 @@ class TaskGOGetSerializer(serializers.ModelSerializer):
"timeout": action["timeout"],
}
)
if actions_to_remove:
task = AutomatedTask.objects.get(pk=obj.pk)
task.actions = [
action
for action in task.actions
if action["type"] == "cmd"
or (
"script" in action.keys()
and action["script"] not in actions_to_remove
)
]
task.save(update_fields=["actions"])
return tmp
class Meta:

View File

@@ -4,10 +4,10 @@ import random
from time import sleep
from typing import Union
from django.utils import timezone as djangotime
from autotasks.models import AutomatedTask
from django.utils import timezone as djangotime
from logs.models import DebugLog
from tacticalrmm.celery import app

View File

@@ -496,147 +496,147 @@ class TestAutoTaskCeleryTasks(TacticalTestCase):
ret = run_win_task.s(self.task1.pk).apply()
self.assertEqual(ret.status, "SUCCESS")
@patch("agents.models.Agent.nats_cmd")
def test_create_win_task_schedule(self, nats_cmd):
self.agent = baker.make_recipe("agents.agent")
# @patch("agents.models.Agent.nats_cmd")
# def test_create_win_task_schedule(self, nats_cmd):
# self.agent = baker.make_recipe("agents.agent")
task_name = AutomatedTask.generate_task_name()
# test scheduled task
self.task1 = AutomatedTask.objects.create(
agent=self.agent,
name="test task 1",
win_task_name=task_name,
task_type="scheduled",
run_time_bit_weekdays=127,
run_time_minute="21:55",
)
self.assertEqual(self.task1.sync_status, "initial")
nats_cmd.return_value = "ok"
ret = create_win_task_schedule.s(pk=self.task1.pk).apply()
self.assertEqual(nats_cmd.call_count, 1)
nats_cmd.assert_called_with(
{
"func": "schedtask",
"schedtaskpayload": {
"type": "rmm",
"trigger": "weekly",
"weekdays": 127,
"pk": self.task1.pk,
"name": task_name,
"hour": 21,
"min": 55,
},
},
timeout=5,
)
self.task1 = AutomatedTask.objects.get(pk=self.task1.pk)
self.assertEqual(self.task1.sync_status, "synced")
# task_name = AutomatedTask.generate_task_name()
# # test scheduled task
# self.task1 = AutomatedTask.objects.create(
# agent=self.agent,
# name="test task 1",
# win_task_name=task_name,
# task_type="scheduled",
# run_time_bit_weekdays=127,
# run_time_minute="21:55",
# )
# self.assertEqual(self.task1.sync_status, "initial")
# nats_cmd.return_value = "ok"
# ret = create_win_task_schedule.s(pk=self.task1.pk).apply()
# self.assertEqual(nats_cmd.call_count, 1)
# nats_cmd.assert_called_with(
# {
# "func": "schedtask",
# "schedtaskpayload": {
# "type": "rmm",
# "trigger": "weekly",
# "weekdays": 127,
# "pk": self.task1.pk,
# "name": task_name,
# "hour": 21,
# "min": 55,
# },
# },
# timeout=5,
# )
# self.task1 = AutomatedTask.objects.get(pk=self.task1.pk)
# self.assertEqual(self.task1.sync_status, "synced")
nats_cmd.return_value = "timeout"
ret = create_win_task_schedule.s(pk=self.task1.pk).apply()
self.assertEqual(ret.status, "SUCCESS")
self.task1 = AutomatedTask.objects.get(pk=self.task1.pk)
self.assertEqual(self.task1.sync_status, "initial")
# nats_cmd.return_value = "timeout"
# ret = create_win_task_schedule.s(pk=self.task1.pk).apply()
# self.assertEqual(ret.status, "SUCCESS")
# self.task1 = AutomatedTask.objects.get(pk=self.task1.pk)
# self.assertEqual(self.task1.sync_status, "initial")
# test runonce with future date
nats_cmd.reset_mock()
task_name = AutomatedTask.generate_task_name()
run_time_date = djangotime.now() + djangotime.timedelta(hours=22)
self.task2 = AutomatedTask.objects.create(
agent=self.agent,
name="test task 2",
win_task_name=task_name,
task_type="runonce",
run_time_date=run_time_date,
)
nats_cmd.return_value = "ok"
ret = create_win_task_schedule.s(pk=self.task2.pk).apply()
nats_cmd.assert_called_with(
{
"func": "schedtask",
"schedtaskpayload": {
"type": "rmm",
"trigger": "once",
"pk": self.task2.pk,
"name": task_name,
"year": int(dt.datetime.strftime(self.task2.run_time_date, "%Y")),
"month": dt.datetime.strftime(self.task2.run_time_date, "%B"),
"day": int(dt.datetime.strftime(self.task2.run_time_date, "%d")),
"hour": int(dt.datetime.strftime(self.task2.run_time_date, "%H")),
"min": int(dt.datetime.strftime(self.task2.run_time_date, "%M")),
},
},
timeout=5,
)
self.assertEqual(ret.status, "SUCCESS")
# # test runonce with future date
# nats_cmd.reset_mock()
# task_name = AutomatedTask.generate_task_name()
# run_time_date = djangotime.now() + djangotime.timedelta(hours=22)
# self.task2 = AutomatedTask.objects.create(
# agent=self.agent,
# name="test task 2",
# win_task_name=task_name,
# task_type="runonce",
# run_time_date=run_time_date,
# )
# nats_cmd.return_value = "ok"
# ret = create_win_task_schedule.s(pk=self.task2.pk).apply()
# nats_cmd.assert_called_with(
# {
# "func": "schedtask",
# "schedtaskpayload": {
# "type": "rmm",
# "trigger": "once",
# "pk": self.task2.pk,
# "name": task_name,
# "year": int(dt.datetime.strftime(self.task2.run_time_date, "%Y")),
# "month": dt.datetime.strftime(self.task2.run_time_date, "%B"),
# "day": int(dt.datetime.strftime(self.task2.run_time_date, "%d")),
# "hour": int(dt.datetime.strftime(self.task2.run_time_date, "%H")),
# "min": int(dt.datetime.strftime(self.task2.run_time_date, "%M")),
# },
# },
# timeout=5,
# )
# self.assertEqual(ret.status, "SUCCESS")
# test runonce with date in the past
nats_cmd.reset_mock()
task_name = AutomatedTask.generate_task_name()
run_time_date = djangotime.now() - djangotime.timedelta(days=13)
self.task3 = AutomatedTask.objects.create(
agent=self.agent,
name="test task 3",
win_task_name=task_name,
task_type="runonce",
run_time_date=run_time_date,
)
nats_cmd.return_value = "ok"
ret = create_win_task_schedule.s(pk=self.task3.pk).apply()
self.task3 = AutomatedTask.objects.get(pk=self.task3.pk)
self.assertEqual(ret.status, "SUCCESS")
# # test runonce with date in the past
# nats_cmd.reset_mock()
# task_name = AutomatedTask.generate_task_name()
# run_time_date = djangotime.now() - djangotime.timedelta(days=13)
# self.task3 = AutomatedTask.objects.create(
# agent=self.agent,
# name="test task 3",
# win_task_name=task_name,
# task_type="runonce",
# run_time_date=run_time_date,
# )
# nats_cmd.return_value = "ok"
# ret = create_win_task_schedule.s(pk=self.task3.pk).apply()
# self.task3 = AutomatedTask.objects.get(pk=self.task3.pk)
# self.assertEqual(ret.status, "SUCCESS")
# test checkfailure
nats_cmd.reset_mock()
self.check = baker.make_recipe("checks.diskspace_check", agent=self.agent)
task_name = AutomatedTask.generate_task_name()
self.task4 = AutomatedTask.objects.create(
agent=self.agent,
name="test task 4",
win_task_name=task_name,
task_type="checkfailure",
assigned_check=self.check,
)
nats_cmd.return_value = "ok"
ret = create_win_task_schedule.s(pk=self.task4.pk).apply()
nats_cmd.assert_called_with(
{
"func": "schedtask",
"schedtaskpayload": {
"type": "rmm",
"trigger": "manual",
"pk": self.task4.pk,
"name": task_name,
},
},
timeout=5,
)
self.assertEqual(ret.status, "SUCCESS")
# # test checkfailure
# nats_cmd.reset_mock()
# self.check = baker.make_recipe("checks.diskspace_check", agent=self.agent)
# task_name = AutomatedTask.generate_task_name()
# self.task4 = AutomatedTask.objects.create(
# agent=self.agent,
# name="test task 4",
# win_task_name=task_name,
# task_type="checkfailure",
# assigned_check=self.check,
# )
# nats_cmd.return_value = "ok"
# ret = create_win_task_schedule.s(pk=self.task4.pk).apply()
# nats_cmd.assert_called_with(
# {
# "func": "schedtask",
# "schedtaskpayload": {
# "type": "rmm",
# "trigger": "manual",
# "pk": self.task4.pk,
# "name": task_name,
# },
# },
# timeout=5,
# )
# self.assertEqual(ret.status, "SUCCESS")
# test manual
nats_cmd.reset_mock()
task_name = AutomatedTask.generate_task_name()
self.task5 = AutomatedTask.objects.create(
agent=self.agent,
name="test task 5",
win_task_name=task_name,
task_type="manual",
)
nats_cmd.return_value = "ok"
ret = create_win_task_schedule.s(pk=self.task5.pk).apply()
nats_cmd.assert_called_with(
{
"func": "schedtask",
"schedtaskpayload": {
"type": "rmm",
"trigger": "manual",
"pk": self.task5.pk,
"name": task_name,
},
},
timeout=5,
)
self.assertEqual(ret.status, "SUCCESS")
# # test manual
# nats_cmd.reset_mock()
# task_name = AutomatedTask.generate_task_name()
# self.task5 = AutomatedTask.objects.create(
# agent=self.agent,
# name="test task 5",
# win_task_name=task_name,
# task_type="manual",
# )
# nats_cmd.return_value = "ok"
# ret = create_win_task_schedule.s(pk=self.task5.pk).apply()
# nats_cmd.assert_called_with(
# {
# "func": "schedtask",
# "schedtaskpayload": {
# "type": "rmm",
# "trigger": "manual",
# "pk": self.task5.pk,
# "name": task_name,
# },
# },
# timeout=5,
# )
# self.assertEqual(ret.status, "SUCCESS")
class TestTaskPermissions(TacticalTestCase):

View File

@@ -1,11 +1,11 @@
from agents.models import Agent
from automation.models import Policy
from django.shortcuts import get_object_or_404
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.exceptions import PermissionDenied
from agents.models import Agent
from automation.models import Policy
from tacticalrmm.permissions import _has_perm_on_agent
from .models import AutomatedTask

View File

@@ -8,6 +8,7 @@ from django.contrib.postgres.fields import ArrayField
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from logs.models import BaseAuditModel
from tacticalrmm.models import PermissionQuerySet
CHECK_TYPE_CHOICES = [

View File

@@ -1,12 +1,11 @@
import pytz
import validators as _v
from rest_framework import serializers
from autotasks.models import AutomatedTask
from rest_framework import serializers
from scripts.models import Script
from scripts.serializers import ScriptCheckSerializer
from .models import Check, CheckHistory
from scripts.models import Script
class AssignedTaskField(serializers.ModelSerializer):

View File

@@ -1,10 +1,11 @@
from unittest.mock import patch
from django.utils import timezone as djangotime
from checks.models import CheckHistory
from django.conf import settings
from django.test import modify_settings
from django.utils import timezone as djangotime
from model_bakery import baker
from checks.models import CheckHistory
from tacticalrmm.test import TacticalTestCase
from .serializers import CheckSerializer
@@ -12,6 +13,11 @@ from .serializers import CheckSerializer
base_url = "/checks"
@modify_settings(
MIDDLEWARE={
"remove": "tacticalrmm.middleware.LinuxMiddleware",
}
)
class TestCheckViews(TacticalTestCase):
def setUp(self):
self.authenticate()

View File

@@ -1,19 +1,19 @@
import asyncio
from datetime import datetime as dt
from agents.models import Agent
from automation.models import Policy
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.exceptions import PermissionDenied
from agents.models import Agent
from automation.models import Policy
from tacticalrmm.utils import notify_error
from tacticalrmm.permissions import _has_perm_on_agent
from tacticalrmm.utils import notify_error
from .models import Check, CheckHistory
from .permissions import ChecksPerms, RunChecksPerms

View File

@@ -1,12 +1,12 @@
import uuid
from agents.models import Agent
from django.contrib.postgres.fields import ArrayField
from django.db import models
from agents.models import Agent
from logs.models import BaseAuditModel
from tacticalrmm.constants import AGENT_DEFER
from tacticalrmm.models import PermissionQuerySet
from tacticalrmm.utils import AGENT_DEFER
def _default_failing_checks_data():

View File

@@ -1,8 +1,8 @@
from rest_framework.serializers import (
ModelSerializer,
ReadOnlyField,
ValidationError,
SerializerMethodField,
ValidationError,
)
from .models import Client, ClientCustomField, Deployment, Site, SiteCustomField

View File

@@ -1,19 +1,15 @@
import uuid
from unittest.mock import patch
from itertools import cycle
from unittest.mock import patch
from model_bakery import baker
from rest_framework.serializers import ValidationError
from rest_framework.response import Response
from rest_framework.serializers import ValidationError
from tacticalrmm.test import TacticalTestCase
from .models import Client, ClientCustomField, Deployment, Site, SiteCustomField
from .serializers import (
ClientSerializer,
DeploymentSerializer,
SiteSerializer,
)
from .serializers import ClientSerializer, DeploymentSerializer, SiteSerializer
base_url = "/clients"

View File

@@ -3,24 +3,20 @@ import re
import uuid
import pytz
from agents.models import Agent
from core.models import CoreSettings
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.exceptions import PermissionDenied
from agents.models import Agent
from core.models import CoreSettings
from tacticalrmm.utils import notify_error
from tacticalrmm.permissions import _has_perm_on_client, _has_perm_on_site
from tacticalrmm.utils import notify_error
from .models import Client, ClientCustomField, Deployment, Site, SiteCustomField
from .permissions import (
ClientsPerms,
DeploymentPerms,
SitesPerms,
)
from .permissions import ClientsPerms, DeploymentPerms, SitesPerms
from .serializers import (
ClientCustomFieldSerializer,
ClientSerializer,
@@ -255,8 +251,8 @@ class AgentDeployment(APIView):
return Response(DeploymentSerializer(deps, many=True).data)
def post(self, request):
from knox.models import AuthToken
from accounts.models import User
from knox.models import AuthToken
site = get_object_or_404(Site, pk=request.data["site"])

View File

@@ -0,0 +1,181 @@
#!/usr/bin/env bash
if [ $EUID -ne 0 ]; then
echo "ERROR: Must be run as root"
exit 1
fi
HAS_SYSTEMD=$(ps --no-headers -o comm 1)
if [ "${HAS_SYSTEMD}" != 'systemd' ]; then
echo "This install script only supports systemd"
echo "Please install systemd or manually create the service using your systems's service manager"
exit 1
fi
agentDL='agentDLChange'
meshDL='meshDLChange'
apiURL='apiURLChange'
token='tokenChange'
clientID='clientIDChange'
siteID='siteIDChange'
agentType='agentTypeChange'
proxy=''
agentBinPath='/usr/local/bin'
binName='tacticalagent'
agentBin="${agentBinPath}/${binName}"
agentConf='/etc/tacticalagent'
agentSvcName='tacticalagent.service'
agentSysD="/etc/systemd/system/${agentSvcName}"
meshDir='/opt/tacticalmesh'
meshSystemBin="${meshDir}/meshagent"
meshSvcName='meshagent.service'
meshSysD="/lib/systemd/system/${meshSvcName}"
deb=(ubuntu debian raspbian kali)
rhe=(fedora rocky centos rhel amzn arch opensuse)
set_locale_deb() {
locale-gen "en_US.UTF-8"
localectl set-locale LANG=en_US.UTF-8
. /etc/default/locale
}
set_locale_rhel() {
localedef -c -i en_US -f UTF-8 en_US.UTF-8 > /dev/null 2>&1
localectl set-locale LANG=en_US.UTF-8
. /etc/locale.conf
}
RemoveOldAgent() {
if [ -f "${agentSysD}" ]; then
systemctl disable --now ${agentSvcName}
rm -f ${agentSysD}
systemctl daemon-reload
fi
if [ -f "${agentConf}" ]; then
rm -f ${agentConf}
fi
if [ -f "${agentBin}" ]; then
rm -f ${agentBin}
fi
}
InstallMesh() {
if [ -f /etc/os-release ]; then
distroID=$(. /etc/os-release; echo $ID)
if [[ " ${deb[*]} " =~ " ${distroID} " ]]; then
set_locale_deb
elif [[ " ${rhe[*]} " =~ " ${distroID} " ]]; then
set_locale_rhel
else
set_locale_rhel
fi
fi
meshTmpDir=$(mktemp -d -t "mesh-XXXXXXXXX")
if [ $? -ne 0 ]; then
meshTmpDir='meshtemp'
mkdir -p ${meshTmpDir}
fi
meshTmpBin="${meshTmpDir}/meshagent"
wget --no-check-certificate -q -O ${meshTmpBin} ${meshDL}
chmod +x ${meshTmpBin}
mkdir -p ${meshDir}
env LC_ALL=en_US.UTF-8 LANGUAGE=en_US ${meshTmpBin} -install --installPath=${meshDir}
sleep 1
rm -rf ${meshTmpDir}
}
RemoveMesh() {
if [ -f "${meshSystemBin}" ]; then
${meshSystemBin} -uninstall
sleep 1
fi
if [ -f "${meshSysD}" ]; then
systemctl disable --now ${meshSvcName} > /dev/null 2>&1
rm -f ${meshSysD}
fi
rm -rf ${meshDir}
systemctl daemon-reload
}
Uninstall() {
RemoveMesh
RemoveOldAgent
}
if [ $# -ne 0 ] && [ $1 == 'uninstall' ]; then
Uninstall
exit 0
fi
RemoveOldAgent
echo "Downloading tactical agent..."
wget -q -O ${agentBin} "${agentDL}"
chmod +x ${agentBin}
MESH_NODE_ID=""
if [ $# -ne 0 ] && [ $1 == '--nomesh' ]; then
echo "Skipping mesh install"
else
if [ -f "${meshSystemBin}" ]; then
RemoveMesh
fi
echo "Downloading and installing mesh agent..."
InstallMesh
sleep 2
echo "Getting mesh node id..."
MESH_NODE_ID=$(${agentBin} -m nixmeshnodeid)
fi
if [ ! -d "${agentBinPath}" ]; then
echo "Creating ${agentBinPath}"
mkdir -p ${agentBinPath}
fi
if [ $# -ne 0 ] && [ $1 == '--debug' ]; then
INSTALL_CMD="${agentBin} -m install -api ${apiURL} -client-id ${clientID} -site-id ${siteID} -agent-type ${agentType} -auth ${token} -log debug"
else
INSTALL_CMD="${agentBin} -m install -api ${apiURL} -client-id ${clientID} -site-id ${siteID} -agent-type ${agentType} -auth ${token}"
fi
if [ "${MESH_NODE_ID}" != '' ]; then
INSTALL_CMD+=" -meshnodeid ${MESH_NODE_ID}"
fi
if [ "${proxy}" != '' ]; then
INSTALL_CMD+=" -proxy ${proxy}"
fi
eval ${INSTALL_CMD}
tacticalsvc="$(cat << EOF
[Unit]
Description=Tactical RMM Linux Agent
[Service]
Type=simple
ExecStart=${agentBin} -m svc
User=root
Group=root
Restart=always
RestartSec=5s
LimitNOFILE=1000000
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
)"
echo "${tacticalsvc}" | tee ${agentSysD} > /dev/null
systemctl daemon-reload
systemctl enable --now ${agentSvcName}

View File

@@ -1,11 +1,10 @@
import asyncio
from agents.models import Agent
from channels.db import database_sync_to_async
from channels.generic.websocket import AsyncJsonWebsocketConsumer
from django.contrib.auth.models import AnonymousUser
from agents.models import Agent
class DashInfo(AsyncJsonWebsocketConsumer):
async def connect(self):

View File

@@ -0,0 +1,62 @@
import asyncio
from meshctrl.utils import get_auth_token
from django.core.management.base import BaseCommand
from core.models import CoreSettings
from core.utils import get_mesh_device_id, get_mesh_ws_url
class Command(BaseCommand):
help = "Mesh troubleshooting script"
def _success(self, *args):
self.stdout.write(self.style.SUCCESS(" ".join(args)))
def _error(self, *args):
self.stdout.write(self.style.ERROR(" ".join(args)))
def _warning(self, *args):
self.stdout.write(self.style.WARNING(" ".join(args)))
def handle(self, *args, **kwargs):
core: CoreSettings = CoreSettings.objects.first() # type: ignore
self._warning("Mesh site:", core.mesh_site)
self._warning("Mesh username:", core.mesh_username)
self._warning("Mesh token:", core.mesh_token)
self._warning("Mesh device group:", core.mesh_device_group)
try:
token = get_auth_token(core.mesh_username, core.mesh_token)
except Exception as e:
self._error("Error getting auth token:")
self._error(str(e))
return
else:
self._success("Auth token ok:")
self._success(token)
try:
uri = get_mesh_ws_url()
except Exception as e:
self._error("Error getting mesh url:")
self._error(str(e))
return
else:
self._success("Mesh url ok:")
self._success(uri)
try:
mesh_id = asyncio.run(get_mesh_device_id(uri, core.mesh_device_group))
except IndexError:
self._error(
"Error: you are using a custom mesh device group and need to add its name in Global Settings > MeshCentral"
)
return
except Exception as e:
self._error("Error getting mesh device id:")
self._error(str(e))
return
else:
self._success("Mesh device id ok:", mesh_id)

View File

@@ -1,15 +1,24 @@
import os
import json
import os
from django.core.management.base import BaseCommand
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Generate conf for nats-api"
def handle(self, *args, **kwargs):
self.stdout.write("Creating configuration for nats-api...")
db = settings.DATABASES["default"]
if hasattr(settings, "DB_SSL"):
ssl = settings.DB_SSL
elif "DB_SSL" in os.environ:
ssl = os.getenv("DB_SSL")
else:
ssl = "disable"
config = {
"key": settings.SECRET_KEY,
"natsurl": f"tls://{settings.ALLOWED_HOSTS[0]}:4222",
@@ -18,7 +27,10 @@ class Command(BaseCommand):
"host": db["HOST"],
"port": int(db["PORT"]),
"dbname": db["NAME"],
"sslmode": ssl,
}
conf = os.path.join(settings.BASE_DIR, "nats-api.conf")
with open(conf, "w") as f:
json.dump(config, f)
self.stdout.write("Configuration for nats-api created successfully")

View File

@@ -2,25 +2,14 @@ import asyncio
import json
import websockets
from django.conf import settings
from core.utils import get_mesh_ws_url
from django.core.management.base import BaseCommand
from core.models import CoreSettings
from .helpers import get_auth_token
class Command(BaseCommand):
help = "Sets up initial mesh central configuration"
async def websocket_call(self, mesh_settings):
token = get_auth_token(mesh_settings.mesh_username, mesh_settings.mesh_token)
if settings.DOCKER_BUILD:
uri = f"{settings.MESH_WS_URL}/control.ashx?auth={token}"
else:
site = mesh_settings.mesh_site.replace("https", "wss")
uri = f"{site}/control.ashx?auth={token}"
async def websocket_call(self, uri):
async with websockets.connect(uri) as websocket:
@@ -41,9 +30,9 @@ class Command(BaseCommand):
response = json.loads(message)
if response["action"] == "createInviteLink":
print(response["url"])
self.stdout.write(response["url"].replace(":4443", ":443"))
break
def handle(self, *args, **kwargs):
mesh_settings = CoreSettings.objects.first()
asyncio.get_event_loop().run_until_complete(self.websocket_call(mesh_settings))
uri = get_mesh_ws_url()
asyncio.run(self.websocket_call(uri))

View File

@@ -1,19 +0,0 @@
import time
from base64 import b64encode
from Crypto.Cipher import AES
from Crypto.Random import get_random_bytes
def get_auth_token(user, key):
key = bytes.fromhex(key)
key1 = key[0:32]
msg = '{{"userid":"{}", "domainid":"{}", "time":{}}}'.format(
f"user//{user}", "", int(time.time())
)
iv = get_random_bytes(12)
a = AES.new(key1, AES.MODE_GCM, iv)
msg, tag = a.encrypt_and_digest(bytes(msg, "utf-8"))
return b64encode(iv + tag + msg, altchars=b"@$").decode("utf-8")

View File

@@ -1,8 +1,7 @@
from core.models import CoreSettings
from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand
from core.models import CoreSettings
class Command(BaseCommand):
help = "Populates the global site settings on first install"

View File

@@ -2,26 +2,16 @@ import asyncio
import json
import websockets
from core.models import CoreSettings
from core.utils import get_mesh_ws_url
from django.conf import settings
from django.core.management.base import BaseCommand
from core.models import CoreSettings
from .helpers import get_auth_token
class Command(BaseCommand):
help = "Sets up initial mesh central configuration"
async def websocket_call(self, mesh_settings):
token = get_auth_token(mesh_settings.mesh_username, mesh_settings.mesh_token)
if settings.DOCKER_BUILD:
uri = f"{settings.MESH_WS_URL}/control.ashx?auth={token}"
else:
site = mesh_settings.mesh_site.replace("https", "wss")
uri = f"{site}/control.ashx?auth={token}"
async def websocket_call(self, uri):
async with websockets.connect(uri) as websocket:
@@ -55,9 +45,9 @@ class Command(BaseCommand):
# Check for Mesh Username
if (
not mesh_settings.mesh_username
or settings.MESH_USERNAME != mesh_settings.mesh_username
or settings.MESH_USERNAME.lower() != mesh_settings.mesh_username
):
mesh_settings.mesh_username = settings.MESH_USERNAME
mesh_settings.mesh_username = settings.MESH_USERNAME.lower()
# Check for Mesh Site
if (
@@ -82,9 +72,8 @@ class Command(BaseCommand):
return
try:
asyncio.get_event_loop().run_until_complete(
self.websocket_call(mesh_settings)
)
uri = get_mesh_ws_url()
asyncio.run(self.websocket_call(uri))
self.stdout.write("Initial Mesh Central setup complete")
except websockets.exceptions.ConnectionClosedError:
self.stdout.write(

View File

@@ -1,6 +1,6 @@
from scripts.models import Script
from django.core.management.base import BaseCommand
from django.conf import settings
from django.core.management.base import BaseCommand
from scripts.models import Script
class Command(BaseCommand):

View File

@@ -1,20 +1,21 @@
import base64
from django.core.management.base import BaseCommand
from django.utils.timezone import make_aware
import datetime as dt
from logs.models import PendingAction
from scripts.models import Script
from autotasks.models import AutomatedTask
from accounts.models import User
from agents.models import Agent
from autotasks.models import AutomatedTask
from django.core.management.base import BaseCommand
from django.utils.timezone import make_aware
from scripts.models import Script
from tacticalrmm.constants import AGENT_DEFER
class Command(BaseCommand):
help = "Collection of tasks to run after updating the rmm, after migrations"
def handle(self, *args, **kwargs):
# remove task pending actions. deprecated 4/20/2021
PendingAction.objects.filter(action_type="taskaction").delete()
self.stdout.write("Running post update tasks")
# load community scripts into the db
Script.load_community_scripts()
@@ -39,29 +40,46 @@ class Command(BaseCommand):
# convert autotask to the new format
for task in AutomatedTask.objects.all():
edited = False
try:
edited = False
# convert scheduled task_type
if task.task_type == "scheduled":
task.task_type = "daily"
task.run_time_date = make_aware(
dt.datetime.strptime(task.run_time_minute, "%H:%M")
)
task.daily_interval = 1
edited = True
# convert scheduled task_type
if task.task_type == "scheduled":
task.task_type = "daily"
task.run_time_date = make_aware(
dt.datetime.strptime(task.run_time_minute, "%H:%M")
)
task.daily_interval = 1
edited = True
# convert actions
if not task.actions:
task.actions = [
{
"type": "script",
"script": task.script.pk,
"script_args": task.script_args,
"timeout": task.timeout,
"name": task.script.name,
}
]
edited = True
# convert actions
if not task.actions:
task.actions = [
{
"type": "script",
"script": task.script.pk,
"script_args": task.script_args,
"timeout": task.timeout,
"name": task.script.name,
}
]
edited = True
if edited:
task.save()
if edited:
task.save()
except:
continue
# set goarch for older windows agents
for agent in Agent.objects.defer(*AGENT_DEFER):
if not agent.goarch:
if agent.arch == "64":
agent.goarch = "amd64"
elif agent.arch == "32":
agent.goarch = "386"
else:
agent.goarch = "amd64"
agent.save(update_fields=["goarch"])
self.stdout.write("Post update tasks finished")

View File

@@ -7,4 +7,6 @@ class Command(BaseCommand):
help = "Reload Nats"
def handle(self, *args, **kwargs):
self.stdout.write("Reloading NATs configuration...")
reload_nats()
self.stdout.write("NATs configuration reloaded")

View File

@@ -0,0 +1,18 @@
# Generated by Django 3.2.12 on 2022-02-16 21:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0029_alter_coresettings_default_time_zone'),
]
operations = [
migrations.AddField(
model_name='coresettings',
name='mesh_device_group',
field=models.CharField(blank=True, default='TacticalRMM', max_length=255, null=True),
),
]

View File

@@ -1,16 +1,15 @@
import requests
import smtplib
from email.message import EmailMessage
import pytz
import requests
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import ValidationError
from django.db import models
from twilio.rest import Client as TwClient
from logs.models import LOG_LEVEL_CHOICES, BaseAuditModel, DebugLog
from twilio.base.exceptions import TwilioRestException
from logs.models import BaseAuditModel, DebugLog, LOG_LEVEL_CHOICES
from twilio.rest import Client as TwClient
TZ_CHOICES = [(_, _) for _ in pytz.all_timezones]
@@ -61,6 +60,9 @@ class CoreSettings(BaseAuditModel):
mesh_token = models.CharField(max_length=255, null=True, blank=True, default="")
mesh_username = models.CharField(max_length=255, null=True, blank=True, default="")
mesh_site = models.CharField(max_length=255, null=True, blank=True, default="")
mesh_device_group = models.CharField(
max_length=255, null=True, blank=True, default="TacticalRMM"
)
agent_auto_update = models.BooleanField(default=True)
workstation_policy = models.ForeignKey(
"automation.Policy",
@@ -95,7 +97,7 @@ class CoreSettings(BaseAuditModel):
if not self.pk:
try:
self.mesh_site = settings.MESH_SITE
self.mesh_username = settings.MESH_USERNAME
self.mesh_username = settings.MESH_USERNAME.lower()
self.mesh_token = settings.MESH_TOKEN_KEY
except:
pass
@@ -319,22 +321,14 @@ class CodeSignToken(models.Model):
if not self.token:
return False
errors = []
for url in settings.EXE_GEN_URLS:
try:
r = requests.post(
f"{url}/api/v1/checktoken",
json={"token": self.token},
headers={"Content-type": "application/json"},
timeout=15,
)
except Exception as e:
errors.append(str(e))
else:
errors = []
break
if errors:
try:
r = requests.post(
f"{settings.EXE_GEN_URL}/api/v1/checktoken",
json={"token": self.token},
headers={"Content-type": "application/json"},
timeout=15,
)
except:
return False
return r.status_code == 200

View File

@@ -1,20 +1,20 @@
import pytz
from django.utils import timezone as djangotime
from django.conf import settings
from packaging import version as pyver
from agents.models import Agent
from agents.tasks import clear_faults_task, prune_agent_history
from alerts.models import Alert
from alerts.tasks import prune_resolved_alerts
from autotasks.models import AutomatedTask
from autotasks.tasks import delete_win_task_schedule
from checks.tasks import prune_check_history
from agents.tasks import clear_faults_task, prune_agent_history
from alerts.tasks import prune_resolved_alerts
from core.models import CoreSettings
from logs.tasks import prune_debug_log, prune_audit_log
from tacticalrmm.celery import app
from tacticalrmm.utils import AGENT_DEFER
from agents.models import Agent
from clients.models import Client, Site
from alerts.models import Alert
from core.models import CoreSettings
from django.conf import settings
from django.utils import timezone as djangotime
from logs.tasks import prune_audit_log, prune_debug_log
from packaging import version as pyver
from tacticalrmm.celery import app
from tacticalrmm.constants import AGENT_DEFER
@app.task

View File

@@ -3,7 +3,6 @@ from django.urls import path
from . import views
urlpatterns = [
path("uploadmesh/", views.UploadMeshAgent.as_view()),
path("settings/", views.GetEditCoreSettings.as_view()),
path("version/", views.version),
path("emailtest/", views.email_test),

View File

@@ -0,0 +1,86 @@
import json
import tempfile
from base64 import b64encode
from meshctrl.utils import get_auth_token
import requests
import websockets
from django.conf import settings
from django.http import FileResponse
def get_mesh_ws_url() -> str:
from core.models import CoreSettings
core = CoreSettings.objects.first()
token = get_auth_token(core.mesh_username, core.mesh_token) # type: ignore
if settings.DOCKER_BUILD:
uri = f"{settings.MESH_WS_URL}/control.ashx?auth={token}"
else:
site = core.mesh_site.replace("https", "wss") # type: ignore
uri = f"{site}/control.ashx?auth={token}"
return uri
async def get_mesh_device_id(uri: str, device_group: str):
async with websockets.connect(uri) as ws: # type: ignore
payload = {"action": "meshes", "responseid": "meshctrl"}
await ws.send(json.dumps(payload))
async for message in ws:
r = json.loads(message)
if r["action"] == "meshes":
return list(filter(lambda x: x["name"] == device_group, r["meshes"]))[
0
]["_id"].split("mesh//")[1]
def download_mesh_agent(dl_url: str) -> FileResponse:
with tempfile.NamedTemporaryFile(prefix="mesh-", dir=settings.EXE_DIR) as fp:
r = requests.get(dl_url, stream=True, timeout=15)
with open(fp.name, "wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
del r
return FileResponse(open(fp.name, "rb"), as_attachment=True, filename=fp.name)
def _b64_to_hex(h):
return b64encode(bytes.fromhex(h)).decode().replace(r"/", "$").replace(r"+", "@")
async def send_command_with_mesh(
cmd: str, uri: str, mesh_node_id: str, shell: int, run_as_user: int
):
node_id = _b64_to_hex(mesh_node_id)
async with websockets.connect(uri) as ws: # type: ignore
await ws.send(
json.dumps(
{
"action": "runcommands",
"cmds": cmd,
"nodeids": [f"node//{node_id}"],
"runAsUser": run_as_user,
"type": shell,
"responseid": "trmm",
}
)
)
async def remove_mesh_agent(uri: str, mesh_node_id: str):
node_id = _b64_to_hex(mesh_node_id)
async with websockets.connect(uri) as ws: # type: ignore
await ws.send(
json.dumps(
{
"action": "removedevices",
"nodeids": [f"node//{node_id}"],
"responseid": "trmm",
}
)
)

View File

@@ -12,20 +12,20 @@ from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from tacticalrmm.utils import notify_error
from tacticalrmm.permissions import (
_has_perm_on_client,
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from tacticalrmm.utils import notify_error
from .models import CodeSignToken, CoreSettings, CustomField, GlobalKVStore, URLAction
from .permissions import (
CodeSignPerms,
CoreSettingsPerms,
CustomFieldPerms,
ServerMaintPerms,
URLActionPerms,
CustomFieldPerms,
)
from .serializers import (
CodeSignTokenSerializer,
@@ -36,28 +36,6 @@ from .serializers import (
)
class UploadMeshAgent(APIView):
permission_classes = [IsAuthenticated, CoreSettingsPerms]
parser_class = (FileUploadParser,)
def put(self, request, format=None):
if "meshagent" not in request.data and "arch" not in request.data:
raise ParseError("Empty content")
arch = request.data["arch"]
f = request.data["meshagent"]
mesh_exe = os.path.join(
settings.EXE_DIR, "meshagent.exe" if arch == "64" else "meshagent-x86.exe"
)
with open(mesh_exe, "wb+") as j:
for chunk in f.chunks():
j.write(chunk)
return Response(
"Mesh Agent uploaded successfully", status=status.HTTP_201_CREATED
)
class GetEditCoreSettings(APIView):
permission_classes = [IsAuthenticated, CoreSettingsPerms]
@@ -232,23 +210,15 @@ class CodeSign(APIView):
def patch(self, request):
import requests
errors = []
for url in settings.EXE_GEN_URLS:
try:
r = requests.post(
f"{url}/api/v1/checktoken",
json={"token": request.data["token"]},
headers={"Content-type": "application/json"},
timeout=15,
)
except Exception as e:
errors.append(str(e))
else:
errors = []
break
if errors:
return notify_error(", ".join(errors))
try:
r = requests.post(
f"{settings.EXE_GEN_URL}/api/v1/checktoken",
json={"token": request.data["token"]},
headers={"Content-type": "application/json"},
timeout=15,
)
except Exception as e:
return notify_error(str(e))
if r.status_code == 400 or r.status_code == 401: # type: ignore
return notify_error(r.json()["ret"]) # type: ignore
@@ -360,10 +330,10 @@ class RunURLAction(APIView):
permission_classes = [IsAuthenticated, URLActionPerms]
def patch(self, request):
from requests.utils import requote_uri
from agents.models import Agent
from clients.models import Client, Site
from requests.utils import requote_uri
from tacticalrmm.utils import replace_db_values
if "agent_id" in request.data.keys():

View File

@@ -1,6 +1,6 @@
from django.contrib import admin
from .models import AuditLog, PendingAction, DebugLog
from .models import AuditLog, DebugLog, PendingAction
admin.site.register(PendingAction)
admin.site.register(AuditLog)

View File

@@ -1,8 +1,8 @@
# Generated by Django 3.2.1 on 2021-06-14 18:35
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):

View File

@@ -1,7 +1,7 @@
# Generated by Django 3.2.6 on 2021-11-05 01:58
from django.db import migrations
from django.core.exceptions import ObjectDoesNotExist
from django.db import migrations
def update_agent_field(apps, schema_editor):

View File

@@ -0,0 +1,18 @@
# Generated by Django 3.2.12 on 2022-02-27 05:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logs', '0022_auto_20211105_0158'),
]
operations = [
migrations.AlterField(
model_name='pendingaction',
name='action_type',
field=models.CharField(blank=True, choices=[('schedreboot', 'Scheduled Reboot'), ('agentupdate', 'Agent Update'), ('chocoinstall', 'Chocolatey Software Install'), ('runcmd', 'Run Command'), ('runscript', 'Run Script'), ('runpatchscan', 'Run Patch Scan'), ('runpatchinstall', 'Run Patch Install')], max_length=255, null=True),
),
]

View File

@@ -2,6 +2,7 @@ import datetime as dt
from abc import abstractmethod
from django.db import models
from tacticalrmm.middleware import get_debug_info, get_username
from tacticalrmm.models import PermissionQuerySet
@@ -14,7 +15,6 @@ def get_debug_level():
ACTION_TYPE_CHOICES = [
("schedreboot", "Scheduled Reboot"),
("taskaction", "Scheduled Task Action"), # deprecated
("agentupdate", "Agent Update"),
("chocoinstall", "Chocolatey Software Install"),
("runcmd", "Run Command"),

View File

@@ -1,12 +1,24 @@
from clients.serializers import SiteMinimumSerializer
from rest_framework import serializers
from .models import AuditLog, DebugLog, PendingAction
class AuditLogSerializer(serializers.ModelSerializer):
entry_time = serializers.SerializerMethodField(read_only=True)
entry_time = serializers.SerializerMethodField()
ip_address = serializers.ReadOnlyField(source="debug_info.ip")
site = serializers.SerializerMethodField()
def get_site(self, obj):
from agents.models import Agent
from clients.serializers import SiteMinimumSerializer
if obj.agent_id and Agent.objects.filter(agent_id=obj.agent_id).exists():
return SiteMinimumSerializer(
Agent.objects.get(agent_id=obj.agent_id).site
).data
else:
return None
class Meta:
model = AuditLog
@@ -19,7 +31,6 @@ class AuditLogSerializer(serializers.ModelSerializer):
class PendingActionSerializer(serializers.ModelSerializer):
hostname = serializers.ReadOnlyField(source="agent.hostname")
salt_id = serializers.ReadOnlyField(source="agent.salt_id")
client = serializers.ReadOnlyField(source="agent.client.name")
site = serializers.ReadOnlyField(source="agent.site.name")
due = serializers.ReadOnlyField()

View File

@@ -3,6 +3,7 @@ from unittest.mock import patch
from django.utils import timezone as djangotime
from model_bakery import baker, seq
from tacticalrmm.test import TacticalTestCase
base_url = "/logs"

View File

@@ -1,19 +1,21 @@
import asyncio
from datetime import datetime as dt
from agents.models import Agent
from django.core.paginator import Paginator
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.exceptions import PermissionDenied
from tacticalrmm.utils import notify_error, get_default_timezone, AGENT_DEFER
from tacticalrmm.permissions import _audit_log_filter, _has_perm_on_agent
from .models import AuditLog, PendingAction, DebugLog
from agents.models import Agent
from tacticalrmm.constants import AGENT_DEFER
from tacticalrmm.permissions import _audit_log_filter, _has_perm_on_agent
from tacticalrmm.utils import get_default_timezone, notify_error
from .models import AuditLog, DebugLog, PendingAction
from .permissions import AuditLogPerms, DebugLogPerms, PendingActionPerms
from .serializers import AuditLogSerializer, DebugLogSerializer, PendingActionSerializer

Some files were not shown because too many files have changed in this diff Show More