Compare commits

..

1 Commits

Author SHA1 Message Date
wh1te909
3588bf827b Release 0.11.0 2022-01-13 01:31:45 +00:00
792 changed files with 75946 additions and 10663 deletions

View File

@@ -23,7 +23,7 @@ POSTGRES_USER=postgres
POSTGRES_PASS=postgrespass
# DEV SETTINGS
APP_PORT=443
APP_PORT=80
API_PORT=80
HTTP_PROTOCOL=https
DOCKER_NETWORK=172.21.0.0/24

View File

@@ -1,11 +1,4 @@
# pulls community scripts from git repo
FROM python:3.10-slim AS GET_SCRIPTS_STAGE
RUN apt-get update && \
apt-get install -y --no-install-recommends git && \
git clone https://github.com/amidaware/community-scripts.git /community-scripts
FROM python:3.10-slim
FROM python:3.9.9-slim
ENV TACTICAL_DIR /opt/tactical
ENV TACTICAL_READY_FILE ${TACTICAL_DIR}/tmp/tactical.ready
@@ -17,15 +10,9 @@ ENV PYTHONUNBUFFERED=1
EXPOSE 8000 8383 8005
RUN apt-get update && \
apt-get install -y build-essential
RUN groupadd -g 1000 tactical && \
useradd -u 1000 -g 1000 tactical
# copy community scripts
COPY --from=GET_SCRIPTS_STAGE /community-scripts /community-scripts
# Copy dev python reqs
COPY .devcontainer/requirements.txt /

View File

@@ -0,0 +1,19 @@
version: '3.4'
services:
api-dev:
image: api-dev
build:
context: .
dockerfile: ./api.dockerfile
command: ["sh", "-c", "pip install debugpy -t /tmp && python /tmp/debugpy --wait-for-client --listen 0.0.0.0:5678 manage.py runserver 0.0.0.0:8000 --nothreading --noreload"]
ports:
- 8000:8000
- 5678:5678
volumes:
- tactical-data-dev:/opt/tactical
- ..:/workspace:cached
networks:
dev:
aliases:
- tactical-backend

View File

@@ -5,7 +5,6 @@ services:
container_name: trmm-api-dev
image: api-dev
restart: always
user: 1000:1000
build:
context: ..
dockerfile: .devcontainer/api.dockerfile
@@ -24,10 +23,10 @@ services:
app-dev:
container_name: trmm-app-dev
image: node:16-alpine
image: node:14-alpine
restart: always
command: /bin/sh -c "npm install --cache ~/.npm && npm run serve"
user: 1000:1000
command: /bin/sh -c "npm install npm@latest -g && npm install && npm run serve
-- --host 0.0.0.0 --port ${APP_PORT}"
working_dir: /workspace/web
volumes:
- ..:/workspace:cached
@@ -43,7 +42,6 @@ services:
container_name: trmm-nats-dev
image: ${IMAGE_REPO}tactical-nats:${VERSION}
restart: always
user: 1000:1000
environment:
API_HOST: ${API_HOST}
API_PORT: ${API_PORT}
@@ -64,7 +62,6 @@ services:
container_name: trmm-meshcentral-dev
image: ${IMAGE_REPO}tactical-meshcentral:${VERSION}
restart: always
user: 1000:1000
environment:
MESH_HOST: ${MESH_HOST}
MESH_USER: ${MESH_USER}
@@ -88,7 +85,6 @@ services:
container_name: trmm-mongodb-dev
image: mongo:4.4
restart: always
user: 1000:1000
environment:
MONGO_INITDB_ROOT_USERNAME: ${MONGODB_USER}
MONGO_INITDB_ROOT_PASSWORD: ${MONGODB_PASSWORD}
@@ -106,7 +102,7 @@ services:
image: postgres:13-alpine
restart: always
environment:
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_DB: tacticalrmm
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASS}
volumes:
@@ -120,8 +116,7 @@ services:
redis-dev:
container_name: trmm-redis-dev
restart: always
user: 1000:1000
command: redis-server
command: redis-server --appendonly yes
image: redis:6.0-alpine
volumes:
- redis-data-dev:/data
@@ -146,7 +141,6 @@ services:
TRMM_PASS: ${TRMM_PASS}
HTTP_PROTOCOL: ${HTTP_PROTOCOL}
APP_PORT: ${APP_PORT}
POSTGRES_DB: ${POSTGRES_DB}
depends_on:
- postgres-dev
- meshcentral-dev
@@ -154,9 +148,6 @@ services:
- dev
volumes:
- tactical-data-dev:/opt/tactical
- mesh-data-dev:/meshcentral-data
- redis-data-dev:/redis/data
- mongo-dev-data:/mongo/data/db
- ..:/workspace:cached
# container for celery worker service
@@ -165,7 +156,6 @@ services:
image: api-dev
command: [ "tactical-celery-dev" ]
restart: always
user: 1000:1000
networks:
- dev
volumes:
@@ -181,7 +171,6 @@ services:
image: api-dev
command: [ "tactical-celerybeat-dev" ]
restart: always
user: 1000:1000
networks:
- dev
volumes:
@@ -197,7 +186,6 @@ services:
image: api-dev
command: [ "tactical-websockets-dev" ]
restart: always
user: 1000:1000
networks:
dev:
aliases:
@@ -214,7 +202,6 @@ services:
container_name: trmm-nginx-dev
image: ${IMAGE_REPO}tactical-nginx:${VERSION}
restart: always
user: 1000:1000
environment:
APP_HOST: ${APP_HOST}
API_HOST: ${API_HOST}
@@ -228,11 +215,23 @@ services:
dev:
ipv4_address: ${DOCKER_NGINX_IP}
ports:
- "80:8080"
- "443:4443"
- "80:80"
- "443:443"
volumes:
- tactical-data-dev:/opt/tactical
mkdocs-dev:
container_name: trmm-mkdocs-dev
image: api-dev
restart: always
command: [ "tactical-mkdocs-dev" ]
ports:
- "8005:8005"
volumes:
- ..:/workspace:cached
networks:
- dev
volumes:
tactical-data-dev: null
postgres-data-dev: null

View File

@@ -10,7 +10,7 @@ set -e
: "${POSTGRES_PASS:=tactical}"
: "${POSTGRES_DB:=tacticalrmm}"
: "${MESH_SERVICE:=tactical-meshcentral}"
: "${MESH_WS_URL:=ws://${MESH_SERVICE}:4443}"
: "${MESH_WS_URL:=ws://${MESH_SERVICE}:443}"
: "${MESH_USER:=meshcentral}"
: "${MESH_PASS:=meshcentralpass}"
: "${MESH_HOST:=tactical-meshcentral}"
@@ -41,7 +41,7 @@ function django_setup {
sleep 5
done
until (echo > /dev/tcp/"${MESH_SERVICE}"/4443) &> /dev/null; do
until (echo > /dev/tcp/"${MESH_SERVICE}"/443) &> /dev/null; do
echo "waiting for meshcentral container to be ready..."
sleep 5
done
@@ -60,12 +60,10 @@ DEBUG = True
DOCKER_BUILD = True
SWAGGER_ENABLED = True
CERT_FILE = '${CERT_PUB_PATH}'
KEY_FILE = '${CERT_PRIV_PATH}'
SCRIPTS_DIR = '/community-scripts'
SCRIPTS_DIR = '${WORKSPACE_DIR}/scripts'
ALLOWED_HOSTS = ['${API_HOST}', '*']
@@ -96,7 +94,6 @@ EOF
echo "${localvars}" > ${WORKSPACE_DIR}/api/tacticalrmm/tacticalrmm/local_settings.py
# run migrations and init scripts
"${VIRTUAL_ENV}"/bin/python manage.py pre_update_tasks
"${VIRTUAL_ENV}"/bin/python manage.py migrate --no-input
"${VIRTUAL_ENV}"/bin/python manage.py collectstatic --no-input
"${VIRTUAL_ENV}"/bin/python manage.py initial_db_setup
@@ -106,7 +103,7 @@ EOF
"${VIRTUAL_ENV}"/bin/python manage.py reload_nats
"${VIRTUAL_ENV}"/bin/python manage.py create_natsapi_conf
"${VIRTUAL_ENV}"/bin/python manage.py create_installer_user
"${VIRTUAL_ENV}"/bin/python manage.py post_update_tasks
"${VIRTUAL_ENV}"/bin/python manage.py post_update_tasks
# create super user
@@ -120,24 +117,8 @@ if [ "$1" = 'tactical-init-dev' ]; then
test -f "${TACTICAL_READY_FILE}" && rm "${TACTICAL_READY_FILE}"
mkdir -p /meshcentral-data
mkdir -p ${TACTICAL_DIR}/tmp
mkdir -p ${TACTICAL_DIR}/certs
mkdir -p /mongo/data/db
mkdir -p /redis/data
touch /meshcentral-data/.initialized && chown -R 1000:1000 /meshcentral-data
touch ${TACTICAL_DIR}/tmp/.initialized && chown -R 1000:1000 ${TACTICAL_DIR}
touch ${TACTICAL_DIR}/certs/.initialized && chown -R 1000:1000 ${TACTICAL_DIR}/certs
touch /mongo/data/db/.initialized && chown -R 1000:1000 /mongo/data/db
touch /redis/data/.initialized && chown -R 1000:1000 /redis/data
mkdir -p ${TACTICAL_DIR}/api/tacticalrmm/private/exe
mkdir -p ${TACTICAL_DIR}/api/tacticalrmm/private/log
touch ${TACTICAL_DIR}/api/tacticalrmm/private/log/django_debug.log
# setup Python virtual env and install dependencies
! test -e "${VIRTUAL_ENV}" && python -m venv ${VIRTUAL_ENV}
"${VIRTUAL_ENV}"/bin/python -m pip install --upgrade pip
"${VIRTUAL_ENV}"/bin/pip install --no-cache-dir setuptools wheel
"${VIRTUAL_ENV}"/bin/pip install --no-cache-dir -r /requirements.txt
django_setup
@@ -146,7 +127,7 @@ if [ "$1" = 'tactical-init-dev' ]; then
webenv="$(cat << EOF
PROD_URL = "${HTTP_PROTOCOL}://${API_HOST}"
DEV_URL = "${HTTP_PROTOCOL}://${API_HOST}"
DEV_PORT = ${APP_PORT}
APP_URL = "https://${APP_HOST}"
DOCKER_BUILD = 1
EOF
)"
@@ -180,3 +161,8 @@ if [ "$1" = 'tactical-websockets-dev' ]; then
check_tactical_ready
"${VIRTUAL_ENV}"/bin/daphne tacticalrmm.asgi:application --port 8383 -b 0.0.0.0
fi
if [ "$1" = 'tactical-mkdocs-dev' ]; then
cd "${WORKSPACE_DIR}/docs"
"${VIRTUAL_ENV}"/bin/mkdocs serve
fi

View File

@@ -1,41 +1,39 @@
# To ensure app dependencies are ported from your virtual environment/host machine into your container, run 'pip freeze > requirements.txt' in the terminal to overwrite this file
asgiref==3.5.0
celery==5.2.6
channels==3.0.4
channels_redis==3.4.0
daphne==3.0.2
Django==4.0.4
django-cors-headers==3.11.0
django-ipware==4.0.2
django-rest-knox==4.2.0
djangorestframework==3.13.1
future==0.18.2
msgpack==1.0.3
nats-py==2.1.0
packaging==21.3
psycopg2-binary==2.9.3
pycryptodome==3.14.1
pyotp==2.6.0
pytz==2022.1
qrcode==7.3.1
redis==4.2.2
requests==2.27.1
twilio==7.8.1
urllib3==1.26.9
validators==0.18.2
websockets==10.2
drf_spectacular==0.22.0
meshctrl==0.1.15
hiredis==2.0.0
# dev
black==22.3.0
django-extensions==3.1.5
isort==5.10.1
mypy==0.942
types-pytz==2021.3.6
model-bakery==1.5.0
coverage==6.3.2
django-silk==4.3.0
django-stubs==1.10.1
djangorestframework-stubs==1.5.0
asyncio-nats-client
celery
channels
channels_redis
django-ipware
Django==3.2.10
django-cors-headers
django-rest-knox
djangorestframework
loguru
msgpack
psycopg2-binary
pycparser
pycryptodome
pyotp
pyparsing
pytz
qrcode
redis
twilio
packaging
validators
websockets
black
Werkzeug
django-extensions
coverage
coveralls
model_bakery
mkdocs
mkdocs-material
pymdown-extensions
Pygments
mypy
pysnooper
isort
drf_spectacular
pandas

View File

@@ -1,73 +0,0 @@
name: Tests CI
on:
push:
branches:
- "*"
pull_request:
branches:
- "*"
jobs:
test:
runs-on: ubuntu-latest
name: Tests
strategy:
matrix:
python-version: ['3.10.4']
steps:
- uses: actions/checkout@v3
- uses: harmon758/postgresql-action@v1
with:
postgresql version: '14'
postgresql db: 'pipeline'
postgresql user: 'pipeline'
postgresql password: 'pipeline123456'
- name: Setup Python ${{ matrix.python-version }}
uses: actions/setup-python@v3
with:
python-version: ${{ matrix.python-version }}
- name: Install redis
run: |
sudo apt update
sudo apt install -y redis
redis-server --version
- name: Install requirements
working-directory: api/tacticalrmm
run: |
python --version
SETTINGS_FILE="tacticalrmm/settings.py"
SETUPTOOLS_VER=$(grep "^SETUPTOOLS_VER" "$SETTINGS_FILE" | awk -F'[= "]' '{print $5}')
WHEEL_VER=$(grep "^WHEEL_VER" "$SETTINGS_FILE" | awk -F'[= "]' '{print $5}')
pip install --upgrade pip
pip install setuptools==${SETUPTOOLS_VER} wheel==${WHEEL_VER}
pip install -r requirements.txt -r requirements-test.txt
- name: Codestyle black
working-directory: api/tacticalrmm
run: |
black --exclude migrations/ --check tacticalrmm
if [ $? -ne 0 ]; then
exit 1
fi
- name: Run django tests
env:
GHACTIONS: "yes"
working-directory: api/tacticalrmm
run: |
pytest
if [ $? -ne 0 ]; then
exit 1
fi
- uses: codecov/codecov-action@v3
with:
directory: ./api/tacticalrmm
files: ./api/tacticalrmm/coverage.xml
verbose: true

View File

@@ -32,7 +32,7 @@ jobs:
strategy:
fail-fast: false
matrix:
language: [ 'go', 'python' ]
language: [ 'go', 'javascript', 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Learn more about CodeQL language support at https://git.io/codeql-language-support

22
.github/workflows/deploy-docs.yml vendored Normal file
View File

@@ -0,0 +1,22 @@
name: Deploy Docs
on:
push:
branches:
- master
defaults:
run:
working-directory: docs
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.x
- run: pip install --upgrade pip
- run: pip install --upgrade setuptools wheel
- run: pip install mkdocs mkdocs-material pymdown-extensions
- run: mkdocs gh-deploy --force

6
.gitignore vendored
View File

@@ -50,8 +50,4 @@ docs/site/
reset_db.sh
run_go_cmd.py
nats-api.conf
ignore/
coverage.lcov
daphne.sock.lock
.pytest_cache
coverage.xml

View File

@@ -1,23 +0,0 @@
{
"recommendations": [
// frontend
"dbaeumer.vscode-eslint",
"esbenp.prettier-vscode",
"editorconfig.editorconfig",
"vue.volar",
"wayou.vscode-todo-highlight",
// python
"matangover.mypy",
"ms-python.python",
// golang
"golang.go"
],
"unwantedRecommendations": [
"octref.vetur",
"hookyqr.beautify",
"dbaeumer.jshint",
"ms-vscode.vscode-typescript-tslint-plugin"
]
}

133
.vscode/settings.json vendored
View File

@@ -1,69 +1,70 @@
{
"python.defaultInterpreterPath": "api/tacticalrmm/env/bin/python",
"python.languageServer": "Pylance",
"python.analysis.extraPaths": ["api/tacticalrmm", "api/env"],
"python.analysis.diagnosticSeverityOverrides": {
"reportUnusedImport": "error",
"reportDuplicateImport": "error",
"reportGeneralTypeIssues": "none"
},
"python.analysis.typeCheckingMode": "basic",
"python.linting.enabled": true,
"python.linting.mypyEnabled": true,
"python.linting.mypyArgs": [
"--ignore-missing-imports",
"--follow-imports=silent",
"--show-column-numbers",
"--strict"
],
"python.linting.ignorePatterns": [
"**/site-packages/**/*.py",
".vscode/*.py",
"**env/**"
],
"python.formatting.provider": "black",
"mypy.targets": ["api/tacticalrmm"],
"mypy.runUsingActiveInterpreter": true,
"editor.bracketPairColorization.enabled": true,
"editor.guides.bracketPairs": true,
"editor.formatOnSave": true,
"files.watcherExclude": {
"files.watcherExclude": {
"**/.git/objects/**": true,
"**/.git/subtree-cache/**": true,
"**/node_modules/": true,
"/node_modules/**": true,
"**/env/": true,
"/env/**": true,
"**/__pycache__": true,
"/__pycache__/**": true,
"**/.cache": true,
"**/.eggs": true,
"**/.ipynb_checkpoints": true,
"**/.mypy_cache": true,
"**/.pytest_cache": true,
"**/*.egg-info": true,
"**/*.feather": true,
"**/*.parquet*": true,
"**/*.pyc": true,
"**/*.zip": true
}
},
"go.useLanguageServer": true,
"[go]": {
"editor.codeActionsOnSave": {
"source.organizeImports": false
"python.pythonPath": "api/tacticalrmm/env/bin/python",
"python.languageServer": "Pylance",
"python.analysis.extraPaths": [
"api/tacticalrmm",
"api/env",
],
"python.analysis.diagnosticSeverityOverrides": {
"reportUnusedImport": "error",
"reportDuplicateImport": "error",
},
"editor.snippetSuggestions": "none"
},
"[go.mod]": {
"editor.codeActionsOnSave": {
"source.organizeImports": true
"python.analysis.memory.keepLibraryAst": true,
"python.linting.mypyEnabled": true,
"python.analysis.typeCheckingMode": "basic",
"python.formatting.provider": "black",
"editor.formatOnSave": true,
"vetur.format.defaultFormatter.js": "prettier",
"vetur.format.defaultFormatterOptions": {
"prettier": {
"semi": true,
"printWidth": 120,
"tabWidth": 2,
"useTabs": false,
"arrowParens": "avoid",
}
},
"vetur.format.options.tabSize": 2,
"vetur.format.options.useTabs": false,
"files.watcherExclude": {
"files.watcherExclude": {
"**/.git/objects/**": true,
"**/.git/subtree-cache/**": true,
"**/node_modules/": true,
"/node_modules/**": true,
"**/env/": true,
"/env/**": true,
"**/__pycache__": true,
"/__pycache__/**": true,
"**/.cache": true,
"**/.eggs": true,
"**/.ipynb_checkpoints": true,
"**/.mypy_cache": true,
"**/.pytest_cache": true,
"**/*.egg-info": true,
"**/*.feather": true,
"**/*.parquet*": true,
"**/*.pyc": true,
"**/*.zip": true
},
},
"go.useLanguageServer": true,
"[go]": {
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.organizeImports": false,
},
"editor.snippetSuggestions": "none",
},
"[go.mod]": {
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.organizeImports": true,
},
},
"gopls": {
"usePlaceholders": true,
"completeUnimported": true,
"staticcheck": true,
}
},
"gopls": {
"usePlaceholders": true,
"completeUnimported": true,
"staticcheck": true
}
}
}

23
.vscode/tasks.json vendored Normal file
View File

@@ -0,0 +1,23 @@
{
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"tasks": [
{
"label": "docker debug",
"type": "shell",
"command": "docker-compose",
"args": [
"-p",
"trmm",
"-f",
".devcontainer/docker-compose.yml",
"-f",
".devcontainer/docker-compose.debug.yml",
"up",
"-d",
"--build"
]
}
]
}

21
LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2019-present wh1te909
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,74 +0,0 @@
### Tactical RMM License Version 1.0
Text of license:&emsp;&emsp;&emsp;Copyright © 2022 AmidaWare LLC. All rights reserved.<br>
&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&nbsp;Amending the text of this license is not permitted.
Trade Mark:&emsp;&emsp;&emsp;&emsp;"Tactical RMM" is a trade mark of AmidaWare LLC.
Licensor:&emsp;&emsp;&emsp;&emsp;&emsp;&nbsp;&nbsp;AmidaWare LLC of 1968 S Coast Hwy PMB 3847 Laguna Beach, CA, USA.
Licensed Software:&emsp;&nbsp;The software known as Tactical RMM Version v0.12.0 (and all subsequent releases and versions) and the Tactical RMM Agent v2.0.0 (and all subsequent releases and versions).
### 1. Preamble
The Licensed Software is designed to facilitate the remote monitoring and management (RMM) of networks, systems, servers, computers and other devices. The Licensed Software is made available primarily for use by organisations and managed service providers for monitoring and management purposes.
The Tactical RMM License is not an open-source software license. This license contains certain restrictions on the use of the Licensed Software. For example the functionality of the Licensed Software may not be made available as part of a SaaS (Software-as-a-Service) service or product to provide a commercial or for-profit service without the express prior permission of the Licensor.
### 2. License Grant
Permission is hereby granted, free of charge, on a non-exclusive basis, to copy, modify, create derivative works and use the Licensed Software in source and binary forms subject to the following terms and conditions. No additional rights will be implied under this license.
* The hosting and use of the Licensed Software to monitor and manage in-house networks/systems and/or customer networks/systems is permitted.
This license does not allow the functionality of the Licensed Software (whether in whole or in part) or a modified version of the Licensed Software or a derivative work to be used or otherwise made available as part of any other commercial or for-profit service, including, without limitation, any of the following:
* a service allowing third parties to interact remotely through a computer network;
* as part of a SaaS service or product;
* as part of the provision of a managed hosting service or product;
* the offering of installation and/or configuration services;
* the offer for sale, distribution or sale of any service or product (whether or not branded as Tactical RMM).
The prior written approval of AmidaWare LLC must be obtained for all commercial use and/or for-profit service use of the (i) Licensed Software (whether in whole or in part), (ii) a modified version of the Licensed Software and/or (iii) a derivative work.
The terms of this license apply to all copies of the Licensed Software (including modified versions) and derivative works.
All use of the Licensed Software must immediately cease if use breaches the terms of this license.
### 3. Derivative Works
If a derivative work is created which is based on or otherwise incorporates all or any part of the Licensed Software, and the derivative work is made available to any other person, the complete corresponding machine readable source code (including all changes made to the Licensed Software) must accompany the derivative work and be made publicly available online.
### 4. Copyright Notice
The following copyright notice shall be included in all copies of the Licensed Software:
&emsp;&emsp;&emsp;Copyright © 2022 AmidaWare LLC.
&emsp;&emsp;&emsp;Licensed under the Tactical RMM License Version 1.0 (the “License”).<br>
&emsp;&emsp;&emsp;You may only use the Licensed Software in accordance with the License.<br>
&emsp;&emsp;&emsp;A copy of the License is available at: https://license.tacticalrmm.com
### 5. Disclaimer of Warranty
THE LICENSED SOFTWARE IS PROVIDED "AS IS". TO THE FULLEST EXTENT PERMISSIBLE AT LAW ALL CONDITIONS, WARRANTIES OR OTHER TERMS OF ANY KIND WHICH MIGHT HAVE EFFECT OR BE IMPLIED OR INCORPORATED, WHETHER BY STATUTE, COMMON LAW OR OTHERWISE ARE HEREBY EXCLUDED, INCLUDING THE CONDITIONS, WARRANTIES OR OTHER TERMS AS TO SATISFACTORY QUALITY AND/OR MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, THE USE OF REASONABLE SKILL AND CARE AND NON-INFRINGEMENT.
### 6. Limits of Liability
THE FOLLOWING EXCLUSIONS SHALL APPLY TO THE FULLEST EXTENT PERMISSIBLE AT LAW. NEITHER THE AUTHORS NOR THE COPYRIGHT HOLDERS SHALL IN ANY CIRCUMSTANCES HAVE ANY LIABILITY FOR ANY CLAIM, LOSSES, DAMAGES OR OTHER LIABILITY, WHETHER THE SAME ARE SUFFERED DIRECTLY OR INDIRECTLY OR ARE IMMEDIATE OR CONSEQUENTIAL, AND WHETHER THE SAME ARISE IN CONTRACT, TORT OR DELICT (INCLUDING NEGLIGENCE) OR OTHERWISE HOWSOEVER ARISING FROM, OUT OF OR IN CONNECTION WITH THE LICENSED SOFTWARE OR THE USE OR INABILITY TO USE THE LICENSED SOFTWARE OR OTHER DEALINGS IN THE LICENSED SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH LOSS OR DAMAGE. THE FOREGOING EXCLUSIONS SHALL INCLUDE, WITHOUT LIMITATION, LIABILITY FOR ANY LOSSES OR DAMAGES WHICH FALL WITHIN ANY OF THE FOLLOWING CATEGORIES: SPECIAL, EXEMPLARY, OR INCIDENTAL LOSS OR DAMAGE, LOSS OF PROFITS, LOSS OF ANTICIPATED SAVINGS, LOSS OF BUSINESS OPPORTUNITY, LOSS OF GOODWILL, AND LOSS OR CORRUPTION OF DATA.
### 7. Termination
This license shall terminate with immediate effect if there is a material breach of any of its terms.
### 8. No partnership, agency or joint venture
Nothing in this license agreement is intended to, or shall be deemed to, establish any partnership or joint venture or any relationship of agency between AmidaWare LLC and any other person.
### 9. No endorsement
The names of the authors and/or the copyright holders must not be used to promote or endorse any products or services which are in any way derived from the Licensed Software without prior written consent.
### 10. Trademarks
No permission is granted to use the trademark “Tactical RMM” or any other trade name, trademark, service mark or product name of AmidaWare LLC except to the extent necessary to comply with the notice requirements in Section 4 (Copyright Notice).
### 11. Entire agreement
This license contains the whole agreement relating to its subject matter.
### 12. Severance
If any provision or part-provision of this license is or becomes invalid, illegal or unenforceable, it shall be deemed deleted, but that shall not affect the validity and enforceability of the rest of this license.
### 13. Acceptance of these terms
The terms and conditions of this license are accepted by copying, downloading, installing, redistributing, or otherwise using the Licensed Software.

View File

@@ -1,18 +1,19 @@
# Tactical RMM
![CI Tests](https://github.com/amidaware/tacticalrmm/actions/workflows/ci-tests.yml/badge.svg?branch=develop)
[![codecov](https://codecov.io/gh/amidaware/tacticalrmm/branch/develop/graph/badge.svg?token=8ACUPVPTH6)](https://codecov.io/gh/amidaware/tacticalrmm)
[![Build Status](https://dev.azure.com/dcparsi/Tactical%20RMM/_apis/build/status/wh1te909.tacticalrmm?branchName=develop)](https://dev.azure.com/dcparsi/Tactical%20RMM/_build/latest?definitionId=4&branchName=develop)
[![Coverage Status](https://coveralls.io/repos/github/wh1te909/tacticalrmm/badge.png?branch=develop&kill_cache=1)](https://coveralls.io/github/wh1te909/tacticalrmm?branch=develop)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/python/black)
Tactical RMM is a remote monitoring & management tool, built with Django and Vue.\
It uses an [agent](https://github.com/amidaware/rmmagent) written in golang and integrates with [MeshCentral](https://github.com/Ylianst/MeshCentral)
Tactical RMM is a remote monitoring & management tool for Windows computers, built with Django and Vue.\
It uses an [agent](https://github.com/wh1te909/rmmagent) written in golang and integrates with [MeshCentral](https://github.com/Ylianst/MeshCentral)
# [LIVE DEMO](https://demo.tacticalrmm.com/)
# [LIVE DEMO](https://rmm.tacticalrmm.io/)
Demo database resets every hour. A lot of features are disabled for obvious reasons due to the nature of this app.
### [Discord Chat](https://discord.gg/upGTkWp)
### [Documentation](https://docs.tacticalrmm.com)
### [Documentation](https://wh1te909.github.io/tacticalrmm/)
## Features
@@ -28,13 +29,10 @@ Demo database resets every hour. A lot of features are disabled for obvious reas
- Remote software installation via chocolatey
- Software and hardware inventory
## Windows agent versions supported
## Windows versions supported
- Windows 7, 8.1, 10, 11, Server 2008R2, 2012R2, 2016, 2019, 2022
## Linux agent versions supported
- Any distro with systemd which includes but is not limited to: Debian (10, 11), Ubuntu x86_64 (18.04, 20.04, 22.04), Synology 7, centos, freepbx and more!
- Windows 7, 8.1, 10, Server 2008R2, 2012R2, 2016, 2019
## Installation / Backup / Restore / Usage
### Refer to the [documentation](https://docs.tacticalrmm.com)
### Refer to the [documentation](https://wh1te909.github.io/tacticalrmm/)

View File

@@ -2,11 +2,18 @@
## Supported Versions
Use this section to tell people about which versions of your project are
currently being supported with security updates.
| Version | Supported |
| ------- | ------------------ |
| 0.12.2 | :white_check_mark: |
| < 0.12.2 | :x: |
| 0.10.4 | :white_check_mark: |
| < 0.10.4| :x: |
## Reporting a Vulnerability
https://docs.tacticalrmm.com/security
Use this section to tell people how to report a vulnerability.
Tell them where to go, how often they can expect to get an update on a
reported vulnerability, what to expect if the vulnerability is accepted or
declined, etc.

View File

@@ -1,15 +1,26 @@
[run]
include = *.py
omit =
tacticalrmm/asgi.py
tacticalrmm/wsgi.py
manage.py
*/__pycache__/*
*/env/*
*/baker_recipes.py
/usr/local/lib/*
**/migrations/*
**/test*.py
source = .
[report]
show_missing = True
include = *.py
omit =
*/__pycache__/*
*/env/*
*/management/*
*/migrations/*
*/static/*
manage.py
*/local_settings.py
*/apps.py
*/admin.py
*/celery.py
*/wsgi.py
*/settings.py
*/baker_recipes.py
*/urls.py
*/tests.py
*/test.py
checks/utils.py
*/asgi.py
*/demo_views.py

View File

@@ -1,7 +1,7 @@
from django.contrib import admin
from rest_framework.authtoken.admin import TokenAdmin
from .models import Role, User
from .models import User, Role
admin.site.register(User)
TokenAdmin.raw_id_fields = ("user",)

View File

@@ -1,23 +1,19 @@
import uuid
from django.core.management.base import BaseCommand
from accounts.models import User
class Command(BaseCommand):
help = "Creates the installer user"
def handle(self, *args, **kwargs): # type: ignore
self.stdout.write("Checking if installer user has been created...")
def handle(self, *args, **kwargs):
if User.objects.filter(is_installer_user=True).exists():
self.stdout.write("Installer user already exists")
return
User.objects.create_user(
User.objects.create_user( # type: ignore
username=uuid.uuid4().hex,
is_installer_user=True,
password=User.objects.make_random_password(60),
password=User.objects.make_random_password(60), # type: ignore
block_dashboard_login=True,
)
self.stdout.write("Installer user has been created")

View File

@@ -6,7 +6,7 @@ from knox.models import AuthToken
class Command(BaseCommand):
help = "Deletes all knox web tokens"
def handle(self, *args, **kwargs): # type: ignore
def handle(self, *args, **kwargs):
# only delete web tokens, not any generated by the installer or deployments
dont_delete = djangotime.now() + djangotime.timedelta(hours=23)
tokens = AuthToken.objects.exclude(deploytokens__isnull=False).filter(

View File

@@ -1,5 +1,4 @@
from django.core.management.base import BaseCommand
from accounts.models import User

View File

@@ -1,7 +1,7 @@
# Generated by Django 3.2.1 on 2021-05-11 02:33
import django.db.models.deletion
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):

View File

@@ -1,7 +1,7 @@
# Generated by Django 3.2.6 on 2021-09-03 00:54
import django.db.models.deletion
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):

View File

@@ -1,7 +1,7 @@
# Generated by Django 3.2.6 on 2021-10-10 02:49
import django.db.models.deletion
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):

View File

@@ -1,18 +0,0 @@
# Generated by Django 3.2.12 on 2022-04-02 15:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0030_auto_20211104_0221'),
]
operations = [
migrations.AddField(
model_name='user',
name='date_format',
field=models.CharField(blank=True, max_length=30, null=True),
),
]

View File

@@ -1,17 +1,26 @@
from typing import Optional
from django.contrib.auth.models import AbstractUser
from django.core.cache import cache
from django.db import models
from django.db.models.fields import CharField, DateTimeField
from logs.models import BaseAuditModel
from tacticalrmm.constants import (
ROLE_CACHE_PREFIX,
AgentDblClick,
AgentTableTabs,
ClientTreeSort,
)
AGENT_DBLCLICK_CHOICES = [
("editagent", "Edit Agent"),
("takecontrol", "Take Control"),
("remotebg", "Remote Background"),
("urlaction", "URL Action"),
]
AGENT_TBL_TAB_CHOICES = [
("server", "Servers"),
("workstation", "Workstations"),
("mixed", "Mixed"),
]
CLIENT_TREE_SORT_CHOICES = [
("alphafail", "Move failing clients to the top"),
("alpha", "Sort alphabetically"),
]
class User(AbstractUser, BaseAuditModel):
@@ -20,8 +29,8 @@ class User(AbstractUser, BaseAuditModel):
totp_key = models.CharField(max_length=50, null=True, blank=True)
dark_mode = models.BooleanField(default=True)
show_community_scripts = models.BooleanField(default=True)
agent_dblclick_action: "AgentDblClick" = models.CharField(
max_length=50, choices=AgentDblClick.choices, default=AgentDblClick.EDIT_AGENT
agent_dblclick_action = models.CharField(
max_length=50, choices=AGENT_DBLCLICK_CHOICES, default="editagent"
)
url_action = models.ForeignKey(
"core.URLAction",
@@ -31,16 +40,15 @@ class User(AbstractUser, BaseAuditModel):
on_delete=models.SET_NULL,
)
default_agent_tbl_tab = models.CharField(
max_length=50, choices=AgentTableTabs.choices, default=AgentTableTabs.SERVER
max_length=50, choices=AGENT_TBL_TAB_CHOICES, default="server"
)
agents_per_page = models.PositiveIntegerField(default=50) # not currently used
client_tree_sort = models.CharField(
max_length=50, choices=ClientTreeSort.choices, default=ClientTreeSort.ALPHA_FAIL
max_length=50, choices=CLIENT_TREE_SORT_CHOICES, default="alphafail"
)
client_tree_splitter = models.PositiveIntegerField(default=11)
loading_bar_color = models.CharField(max_length=255, default="red")
clear_search_when_switching = models.BooleanField(default=True)
date_format = models.CharField(max_length=30, blank=True, null=True)
is_installer_user = models.BooleanField(default=False)
last_login_ip = models.GenericIPAddressField(default=None, blank=True, null=True)
@@ -67,23 +75,6 @@ class User(AbstractUser, BaseAuditModel):
return UserSerializer(user).data
def get_and_set_role_cache(self) -> "Optional[Role]":
role = cache.get(f"{ROLE_CACHE_PREFIX}{self.role}")
if role and isinstance(role, Role):
return role
elif not role and not self.role:
return None
else:
models.prefetch_related_objects(
[self.role],
"can_view_clients",
"can_view_sites",
)
cache.set(f"{ROLE_CACHE_PREFIX}{self.role}", self.role, 600)
return self.role
class Role(BaseAuditModel):
name = models.CharField(max_length=255, unique=True)
@@ -184,12 +175,6 @@ class Role(BaseAuditModel):
def __str__(self):
return self.name
def save(self, *args, **kwargs) -> None:
# delete cache on save
cache.delete(f"{ROLE_CACHE_PREFIX}{self.name}")
super(BaseAuditModel, self).save(*args, **kwargs)
@staticmethod
def serialize(role):
# serializes the agent and returns json

View File

@@ -4,7 +4,7 @@ from tacticalrmm.permissions import _has_perm
class AccountsPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
if r.method == "GET":
return _has_perm(r, "can_list_accounts")
else:
@@ -28,7 +28,7 @@ class AccountsPerms(permissions.BasePermission):
class RolesPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
if r.method == "GET":
return _has_perm(r, "can_list_roles")
else:
@@ -36,7 +36,7 @@ class RolesPerms(permissions.BasePermission):
class APIKeyPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
if r.method == "GET":
return _has_perm(r, "can_list_api_keys")

View File

@@ -1,11 +1,11 @@
import pyotp
from rest_framework.serializers import (
ModelSerializer,
ReadOnlyField,
SerializerMethodField,
ReadOnlyField,
)
from .models import APIKey, Role, User
from .models import APIKey, User, Role
class UserUISerializer(ModelSerializer):
@@ -22,7 +22,6 @@ class UserUISerializer(ModelSerializer):
"loading_bar_color",
"clear_search_when_switching",
"block_dashboard_login",
"date_format",
]
@@ -40,7 +39,6 @@ class UserSerializer(ModelSerializer):
"last_login_ip",
"role",
"block_dashboard_login",
"date_format",
]

View File

@@ -2,16 +2,15 @@ from unittest.mock import patch
from django.test import override_settings
from model_bakery import baker, seq
from accounts.models import APIKey, User
from accounts.serializers import APIKeySerializer
from tacticalrmm.constants import AgentDblClick, AgentTableTabs, ClientTreeSort
from accounts.models import User, APIKey
from tacticalrmm.test import TacticalTestCase
from accounts.serializers import APIKeySerializer
class TestAccounts(TacticalTestCase):
def setUp(self):
self.setup_client()
self.client_setup()
self.bob = User(username="bob")
self.bob.set_password("hunter2")
self.bob.save()
@@ -70,17 +69,17 @@ class TestAccounts(TacticalTestCase):
self.assertEqual(r.status_code, 400)
self.assertIn("non_field_errors", r.data.keys())
# @override_settings(DEBUG=True)
# @patch("pyotp.TOTP.verify")
# def test_debug_login_view(self, mock_verify):
# url = "/login/"
# mock_verify.return_value = True
@override_settings(DEBUG=True)
@patch("pyotp.TOTP.verify")
def test_debug_login_view(self, mock_verify):
url = "/login/"
mock_verify.return_value = True
# data = {"username": "bob", "password": "hunter2", "twofactor": "sekret"}
# r = self.client.post(url, data, format="json")
# self.assertEqual(r.status_code, 200)
# self.assertIn("expiry", r.data.keys())
# self.assertIn("token", r.data.keys())
data = {"username": "bob", "password": "hunter2", "twofactor": "sekret"}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertIn("expiry", r.data.keys())
self.assertIn("token", r.data.keys())
class TestGetAddUsers(TacticalTestCase):
@@ -284,9 +283,9 @@ class TestUserAction(TacticalTestCase):
data = {
"dark_mode": True,
"show_community_scripts": True,
"agent_dblclick_action": AgentDblClick.EDIT_AGENT,
"default_agent_tbl_tab": AgentTableTabs.MIXED,
"client_tree_sort": ClientTreeSort.ALPHA,
"agent_dblclick_action": "editagent",
"default_agent_tbl_tab": "mixed",
"client_tree_sort": "alpha",
"client_tree_splitter": 14,
"loading_bar_color": "green",
"clear_search_when_switching": False,
@@ -309,7 +308,7 @@ class TestAPIKeyViews(TacticalTestCase):
serializer = APIKeySerializer(apikeys, many=True)
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(serializer.data, resp.data)
self.assertEqual(serializer.data, resp.data) # type: ignore
self.check_not_authenticated("get", url)
@@ -332,14 +331,14 @@ class TestAPIKeyViews(TacticalTestCase):
self.assertEqual(resp.status_code, 404)
apikey = baker.make("accounts.APIKey", name="Test")
url = f"/accounts/apikeys/{apikey.pk}/"
url = f"/accounts/apikeys/{apikey.pk}/" # type: ignore
data = {"name": "New Name"}
data = {"name": "New Name"} # type: ignore
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
apikey = APIKey.objects.get(pk=apikey.pk)
self.assertEqual(apikey.name, "New Name")
apikey = APIKey.objects.get(pk=apikey.pk) # type: ignore
self.assertEquals(apikey.name, "New Name")
self.check_not_authenticated("put", url)
@@ -350,11 +349,11 @@ class TestAPIKeyViews(TacticalTestCase):
# test delete api key
apikey = baker.make("accounts.APIKey")
url = f"/accounts/apikeys/{apikey.pk}/"
url = f"/accounts/apikeys/{apikey.pk}/" # type: ignore
resp = self.client.delete(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertFalse(APIKey.objects.filter(pk=apikey.pk).exists())
self.assertFalse(APIKey.objects.filter(pk=apikey.pk).exists()) # type: ignore
self.check_not_authenticated("delete", url)
@@ -394,7 +393,7 @@ class TestAPIAuthentication(TacticalTestCase):
name="Test Token", key="123456", user=self.user
)
self.setup_client()
self.client_setup()
def test_api_auth(self):
url = "/clients/"

View File

@@ -5,16 +5,15 @@ from django.db import IntegrityError
from django.shortcuts import get_object_or_404
from ipware import get_client_ip
from knox.views import LoginView as KnoxLoginView
from logs.models import AuditLog
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from logs.models import AuditLog
from tacticalrmm.helpers import notify_error
from tacticalrmm.utils import notify_error
from .models import APIKey, Role, User
from .permissions import AccountsPerms, APIKeyPerms, RolesPerms
from .permissions import APIKeyPerms, AccountsPerms, RolesPerms
from .serializers import (
APIKeySerializer,
RoleSerializer,

View File

@@ -1,8 +1,9 @@
from django.contrib import admin
from .models import Agent, AgentCustomField, AgentHistory, Note
from .models import Agent, AgentCustomField, Note, RecoveryAction, AgentHistory
admin.site.register(Agent)
admin.site.register(RecoveryAction)
admin.site.register(Note)
admin.site.register(AgentCustomField)
admin.site.register(AgentHistory)

View File

@@ -1,6 +1,6 @@
import json
import os
import secrets
import random
import string
from itertools import cycle
@@ -8,11 +8,10 @@ from django.conf import settings
from django.utils import timezone as djangotime
from model_bakery.recipe import Recipe, foreign_key, seq
from tacticalrmm.constants import AgentMonType, AgentPlat
def generate_agent_id() -> str:
return "".join(secrets.choice(string.ascii_letters) for i in range(39))
def generate_agent_id(hostname):
rand = "".join(random.choice(string.ascii_letters) for _ in range(35))
return f"{rand}-{hostname}"
site = Recipe("clients.Site")
@@ -25,34 +24,25 @@ def get_wmi_data():
return json.load(f)
def get_win_svcs():
svcs = settings.BASE_DIR.joinpath("tacticalrmm/test_data/winsvcs.json")
with open(svcs) as f:
return json.load(f)
agent = Recipe(
"agents.Agent",
site=foreign_key(site),
hostname="DESKTOP-TEST123",
version="1.3.0",
monitoring_type=cycle(AgentMonType.values),
agent_id=seq(generate_agent_id()),
monitoring_type=cycle(["workstation", "server"]),
agent_id=seq(generate_agent_id("DESKTOP-TEST123")),
last_seen=djangotime.now() - djangotime.timedelta(days=5),
plat=AgentPlat.WINDOWS,
)
server_agent = agent.extend(
monitoring_type=AgentMonType.SERVER,
monitoring_type="server",
)
workstation_agent = agent.extend(
monitoring_type=AgentMonType.WORKSTATION,
monitoring_type="workstation",
)
online_agent = agent.extend(
last_seen=djangotime.now(), services=get_win_svcs(), wmi_detail=get_wmi_data()
)
online_agent = agent.extend(last_seen=djangotime.now())
offline_agent = agent.extend(
last_seen=djangotime.now() - djangotime.timedelta(minutes=7)
@@ -87,4 +77,4 @@ agent_with_services = agent.extend(
],
)
agent_with_wmi = agent.extend(wmi_detail=get_wmi_data())
agent_with_wmi = agent.extend(wmi=get_wmi_data())

View File

@@ -1,83 +0,0 @@
from agents.models import Agent, AgentHistory
from channels.db import database_sync_to_async
from channels.generic.websocket import AsyncJsonWebsocketConsumer
from django.contrib.auth.models import AnonymousUser
from django.shortcuts import get_object_or_404
from tacticalrmm.constants import AGENT_DEFER, AgentHistoryType
from tacticalrmm.permissions import _has_perm_on_agent
class SendCMD(AsyncJsonWebsocketConsumer):
async def connect(self):
self.user = self.scope["user"]
if isinstance(self.user, AnonymousUser):
await self.close()
await self.accept()
async def receive_json(self, payload, **kwargs):
auth = await self.has_perm(payload["agent_id"])
if not auth:
await self.send_json(
{"ret": "You do not have permission to perform this action."}
)
return
agent = await self.get_agent(payload["agent_id"])
timeout = int(payload["timeout"])
if payload["shell"] == "custom" and payload["custom_shell"]:
shell = payload["custom_shell"]
else:
shell = payload["shell"]
hist_pk = await self.get_history_id(agent, payload["cmd"])
data = {
"func": "rawcmd",
"timeout": timeout,
"payload": {
"command": payload["cmd"],
"shell": shell,
},
"id": hist_pk,
}
ret = await agent.nats_cmd(data, timeout=timeout + 2)
await self.send_json({"ret": ret})
async def disconnect(self, _):
await self.close()
def _has_perm(self, perm: str) -> bool:
if self.user.is_superuser or (
self.user.role and getattr(self.user.role, "is_superuser")
):
return True
# make sure non-superusers with empty roles aren't permitted
elif not self.user.role:
return False
return self.user.role and getattr(self.user.role, perm)
@database_sync_to_async # type: ignore
def get_agent(self, agent_id: str) -> "Agent":
return get_object_or_404(Agent.objects.defer(*AGENT_DEFER), agent_id=agent_id)
@database_sync_to_async # type: ignore
def get_history_id(self, agent: "Agent", cmd: str) -> int:
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.CMD_RUN,
command=cmd,
username=self.user.username[:50],
)
return hist.pk
@database_sync_to_async # type: ignore
def has_perm(self, agent_id: str) -> bool:
return self._has_perm("can_send_cmd") and _has_perm_on_agent(
self.user, agent_id
)

View File

@@ -5,8 +5,7 @@ from django.utils import timezone as djangotime
from packaging import version as pyver
from agents.models import Agent
from tacticalrmm.constants import AGENT_DEFER
from tacticalrmm.utils import reload_nats
from tacticalrmm.utils import AGENT_DEFER, reload_nats
class Command(BaseCommand):

View File

@@ -3,9 +3,7 @@ import random
from django.core.management.base import BaseCommand
from django.utils import timezone as djangotime
from agents.models import Agent
from core.tasks import cache_db_fields_task, handle_resolved_stuff
class Command(BaseCommand):
@@ -24,10 +22,15 @@ class Command(BaseCommand):
rand = now - djangotime.timedelta(minutes=random.randint(10, 20))
random_dates.append(rand)
""" for _ in range(5):
rand = djangotime.now() - djangotime.timedelta(hours=random.randint(1, 10))
random_dates.append(rand)
for _ in range(5):
rand = djangotime.now() - djangotime.timedelta(days=random.randint(40, 90))
random_dates.append(rand) """
agents = Agent.objects.only("last_seen")
for agent in agents:
agent.last_seen = random.choice(random_dates)
agent.save(update_fields=["last_seen"])
cache_db_fields_task()
handle_resolved_stuff()

File diff suppressed because it is too large Load Diff

View File

@@ -1,30 +0,0 @@
from django.core.management.base import BaseCommand
from agents.models import Agent
from tacticalrmm.constants import AGENT_DEFER
class Command(BaseCommand):
help = "Find all agents that have a certain service installed"
def add_arguments(self, parser):
parser.add_argument("name", type=str)
def handle(self, *args, **kwargs):
search = kwargs["name"].lower()
agents = Agent.objects.defer(*AGENT_DEFER)
for agent in agents:
try:
for svc in agent.services:
if (
search in svc["name"].lower()
or search in svc["display_name"].lower()
):
self.stdout.write(
self.style.SUCCESS(
f"{agent.hostname} - {svc['name']} ({svc['display_name']}) - {svc['status']}"
)
)
except:
continue

View File

@@ -0,0 +1,16 @@
from django.core.management.base import BaseCommand
from agents.models import Agent
class Command(BaseCommand):
help = "Changes existing agents salt_id from a property to a model field"
def handle(self, *args, **kwargs):
agents = Agent.objects.filter(salt_id=None)
for agent in agents:
self.stdout.write(
self.style.SUCCESS(f"Setting salt_id on {agent.hostname}")
)
agent.salt_id = f"{agent.hostname}-{agent.pk}"
agent.save(update_fields=["salt_id"])

View File

@@ -2,16 +2,16 @@ from django.conf import settings
from django.core.management.base import BaseCommand
from agents.models import Agent
from tacticalrmm.constants import AGENT_STATUS_ONLINE, ONLINE_AGENTS
class Command(BaseCommand):
help = "Shows online agents that are not on the latest version"
def handle(self, *args, **kwargs):
only = ONLINE_AGENTS + ("hostname",)
q = Agent.objects.exclude(version=settings.LATEST_AGENT_VER).only(*only)
agents = [i for i in q if i.status == AGENT_STATUS_ONLINE]
q = Agent.objects.exclude(version=settings.LATEST_AGENT_VER).only(
"pk", "version", "last_seen", "overdue_time", "offline_time"
)
agents = [i for i in q if i.status == "online"]
for agent in agents:
self.stdout.write(
self.style.SUCCESS(f"{agent.hostname} - v{agent.version}")

View File

@@ -3,17 +3,17 @@ from django.core.management.base import BaseCommand
from packaging import version as pyver
from agents.models import Agent
from core.models import CoreSettings
from agents.tasks import send_agent_update_task
from core.utils import get_core_settings, token_is_valid
from tacticalrmm.constants import AGENT_DEFER
from tacticalrmm.utils import AGENT_DEFER
class Command(BaseCommand):
help = "Triggers an agent update task to run"
def handle(self, *args, **kwargs):
core = get_core_settings()
if not core.agent_auto_update:
core = CoreSettings.objects.first()
if not core.agent_auto_update: # type: ignore
return
q = Agent.objects.defer(*AGENT_DEFER).exclude(version=settings.LATEST_AGENT_VER)
@@ -22,5 +22,4 @@ class Command(BaseCommand):
for i in q
if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER)
]
token, _ = token_is_valid()
send_agent_update_task.delay(agent_ids=agent_ids, token=token, force=False)
send_agent_update_task.delay(agent_ids=agent_ids)

View File

@@ -1,7 +1,7 @@
# Generated by Django 3.2.1 on 2021-07-06 02:01
import django.db.models.deletion
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):

View File

@@ -1,7 +1,7 @@
# Generated by Django 3.2.5 on 2021-07-14 07:38
import django.db.models.deletion
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):

View File

@@ -1,25 +0,0 @@
# Generated by Django 3.2.12 on 2022-02-27 05:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agents', '0042_alter_agent_time_zone'),
]
operations = [
migrations.RemoveField(
model_name='agent',
name='antivirus',
),
migrations.RemoveField(
model_name='agent',
name='local_ip',
),
migrations.RemoveField(
model_name='agent',
name='used_ram',
),
]

View File

@@ -1,22 +0,0 @@
# Generated by Django 3.2.12 on 2022-02-27 07:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agents', '0043_auto_20220227_0554'),
]
operations = [
migrations.RenameField(
model_name='agent',
old_name='salt_id',
new_name='goarch',
),
migrations.RemoveField(
model_name='agent',
name='salt_ver',
),
]

View File

@@ -1,16 +0,0 @@
# Generated by Django 3.2.12 on 2022-03-12 02:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agents', '0044_auto_20220227_0717'),
]
operations = [
migrations.DeleteModel(
name='RecoveryAction',
),
]

View File

@@ -1,18 +0,0 @@
# Generated by Django 3.2.12 on 2022-03-17 17:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agents', '0045_delete_recoveryaction'),
]
operations = [
migrations.AlterField(
model_name='agenthistory',
name='command',
field=models.TextField(blank=True, default='', null=True),
),
]

View File

@@ -1,26 +0,0 @@
# Generated by Django 4.0.3 on 2022-04-07 17:28
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clients', '0020_auto_20211226_0547'),
('agents', '0046_alter_agenthistory_command'),
]
operations = [
migrations.AlterField(
model_name='agent',
name='plat',
field=models.CharField(default='windows', max_length=255),
),
migrations.AlterField(
model_name='agent',
name='site',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.RESTRICT, related_name='agents', to='clients.site'),
preserve_default=False,
),
]

View File

@@ -1,21 +0,0 @@
# Generated by Django 4.0.3 on 2022-04-16 17:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agents', '0047_alter_agent_plat_alter_agent_site'),
]
operations = [
migrations.RemoveField(
model_name='agent',
name='has_patches_pending',
),
migrations.RemoveField(
model_name='agent',
name='pending_actions_count',
),
]

View File

@@ -1,17 +0,0 @@
# Generated by Django 4.0.3 on 2022-04-18 14:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agents', '0048_remove_agent_has_patches_pending_and_more'),
]
operations = [
migrations.AddIndex(
model_name='agent',
index=models.Index(fields=['monitoring_type'], name='agents_agen_monitor_df8816_idx'),
),
]

View File

@@ -1,17 +0,0 @@
# Generated by Django 4.0.4 on 2022-04-25 06:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agents', '0049_agent_agents_agen_monitor_df8816_idx'),
]
operations = [
migrations.RemoveField(
model_name='agent',
name='plat_release',
),
]

View File

@@ -1,18 +0,0 @@
# Generated by Django 4.0.4 on 2022-05-18 03:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agents', '0050_remove_agent_plat_release'),
]
operations = [
migrations.AlterField(
model_name='agent',
name='plat',
field=models.CharField(choices=[('windows', 'Windows'), ('linux', 'Linux'), ('darwin', 'macOS')], default='windows', max_length=255),
),
]

View File

@@ -1,18 +0,0 @@
# Generated by Django 4.0.4 on 2022-05-18 05:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agents', '0051_alter_agent_plat'),
]
operations = [
migrations.AlterField(
model_name='agent',
name='monitoring_type',
field=models.CharField(choices=[('server', 'Server'), ('workstation', 'Workstation')], default='server', max_length=30),
),
]

View File

@@ -1,17 +0,0 @@
# Generated by Django 4.0.4 on 2022-05-18 06:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agents', '0052_alter_agent_monitoring_type'),
]
operations = [
migrations.RemoveField(
model_name='agenthistory',
name='status',
),
]

View File

@@ -1,18 +0,0 @@
# Generated by Django 4.0.4 on 2022-06-06 04:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agents', '0053_remove_agenthistory_status'),
]
operations = [
migrations.AlterField(
model_name='agent',
name='goarch',
field=models.CharField(blank=True, choices=[('amd64', 'amd64'), ('386', '386'), ('arm64', 'arm64'), ('arm', 'arm')], max_length=255, null=True),
),
]

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,7 @@ from tacticalrmm.permissions import _has_perm, _has_perm_on_agent
class AgentPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
if r.method == "GET":
if "agent_id" in view.kwargs.keys():
return _has_perm(r, "can_list_agents") and _has_perm_on_agent(
@@ -26,76 +26,73 @@ class AgentPerms(permissions.BasePermission):
class RecoverAgentPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
if "agent_id" not in view.kwargs.keys():
return _has_perm(r, "can_recover_agents")
def has_permission(self, r, view):
return _has_perm(r, "can_recover_agents") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class MeshPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
return _has_perm(r, "can_use_mesh") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class UpdateAgentPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
return _has_perm(r, "can_update_agents")
class PingAgentPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
return _has_perm(r, "can_ping_agents") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class ManageProcPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
return _has_perm(r, "can_manage_procs") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class EvtLogPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
return _has_perm(r, "can_view_eventlogs") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class SendCMDPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
return _has_perm(r, "can_send_cmd") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class RebootAgentPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
return _has_perm(r, "can_reboot_agents") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class InstallAgentPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
return _has_perm(r, "can_install_agents")
class RunScriptPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
return _has_perm(r, "can_run_scripts") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class AgentNotesPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
# permissions for GET /agents/notes/ endpoint
if r.method == "GET":
@@ -112,12 +109,12 @@ class AgentNotesPerms(permissions.BasePermission):
class RunBulkPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
return _has_perm(r, "can_run_bulk")
class AgentHistoryPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
if "agent_id" in view.kwargs.keys():
return _has_perm(r, "can_list_agent_history") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]

View File

@@ -1,10 +1,8 @@
import pytz
from rest_framework import serializers
from tacticalrmm.constants import AGENT_STATUS_ONLINE
from winupdate.serializers import WinUpdatePolicySerializer
from .models import Agent, AgentCustomField, AgentHistory, Note
from .models import Agent, AgentCustomField, Note, AgentHistory
class AgentCustomFieldSerializer(serializers.ModelSerializer):
@@ -42,33 +40,6 @@ class AgentSerializer(serializers.ModelSerializer):
custom_fields = AgentCustomFieldSerializer(many=True, read_only=True)
patches_last_installed = serializers.ReadOnlyField()
last_seen = serializers.ReadOnlyField()
applied_policies = serializers.SerializerMethodField()
effective_patch_policy = serializers.SerializerMethodField()
alert_template = serializers.SerializerMethodField()
def get_alert_template(self, obj):
from alerts.serializers import AlertTemplateSerializer
return (
AlertTemplateSerializer(obj.alert_template).data
if obj.alert_template
else None
)
def get_effective_patch_policy(self, obj):
return WinUpdatePolicySerializer(obj.get_patch_policy()).data
def get_applied_policies(self, obj):
from automation.serializers import PolicySerializer
policies = obj.get_agent_policies()
# need to serialize model objects manually
for key, policy in policies.items():
if policy:
policies[key] = PolicySerializer(policy).data
return policies
def get_all_timezones(self, obj):
return pytz.all_timezones
@@ -81,15 +52,13 @@ class AgentSerializer(serializers.ModelSerializer):
class AgentTableSerializer(serializers.ModelSerializer):
status = serializers.ReadOnlyField()
checks = serializers.ReadOnlyField()
last_seen = serializers.SerializerMethodField()
client_name = serializers.ReadOnlyField(source="client.name")
site_name = serializers.ReadOnlyField(source="site.name")
logged_username = serializers.SerializerMethodField()
italic = serializers.SerializerMethodField()
policy = serializers.ReadOnlyField(source="policy.id")
alert_template = serializers.SerializerMethodField()
last_seen = serializers.ReadOnlyField()
pending_actions_count = serializers.ReadOnlyField()
has_patches_pending = serializers.ReadOnlyField()
def get_alert_template(self, obj):
@@ -103,8 +72,16 @@ class AgentTableSerializer(serializers.ModelSerializer):
"always_alert": obj.alert_template.agent_always_alert,
}
def get_last_seen(self, obj) -> str:
if obj.time_zone is not None:
agent_tz = pytz.timezone(obj.time_zone)
else:
agent_tz = self.context["default_tz"]
return obj.last_seen.astimezone(agent_tz).strftime("%m %d %Y %H:%M")
def get_logged_username(self, obj) -> str:
if obj.logged_in_username == "None" and obj.status == AGENT_STATUS_ONLINE:
if obj.logged_in_username == "None" and obj.status == "online":
return obj.last_logged_in_user
elif obj.logged_in_username != "None":
return obj.logged_in_username
@@ -112,7 +89,7 @@ class AgentTableSerializer(serializers.ModelSerializer):
return "-"
def get_italic(self, obj) -> bool:
return obj.logged_in_username == "None" and obj.status == AGENT_STATUS_ONLINE
return obj.logged_in_username == "None" and obj.status == "online"
class Meta:
model = Agent
@@ -125,6 +102,7 @@ class AgentTableSerializer(serializers.ModelSerializer):
"monitoring_type",
"description",
"needs_reboot",
"has_patches_pending",
"pending_actions_count",
"status",
"overdue_text_alert",
@@ -138,9 +116,6 @@ class AgentTableSerializer(serializers.ModelSerializer):
"italic",
"policy",
"block_policy_inheritance",
"plat",
"goarch",
"has_patches_pending",
]
depth = 2
@@ -177,12 +152,17 @@ class AgentNoteSerializer(serializers.ModelSerializer):
class AgentHistorySerializer(serializers.ModelSerializer):
time = serializers.SerializerMethodField(read_only=True)
script_name = serializers.ReadOnlyField(source="script.name")
class Meta:
model = AgentHistory
fields = "__all__"
def get_time(self, history):
tz = self.context["default_tz"]
return history.time.astimezone(tz).strftime("%m %d %Y %H:%M:%S")
class AgentAuditSerializer(serializers.ModelSerializer):
class Meta:

View File

@@ -1,52 +1,120 @@
import asyncio
import datetime as dt
import random
from time import sleep
from typing import TYPE_CHECKING, Optional
from typing import Union
from django.core.management import call_command
from core.models import CoreSettings
from django.conf import settings
from django.utils import timezone as djangotime
from agents.models import Agent
from core.utils import get_core_settings
from logs.models import DebugLog
from logs.models import DebugLog, PendingAction
from packaging import version as pyver
from scripts.models import Script
from tacticalrmm.celery import app
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_OVERDUE,
CheckStatus,
DebugLogType,
)
if TYPE_CHECKING:
from django.db.models.query import QuerySet
from agents.models import Agent
from agents.utils import get_winagent_url
def agent_update(agent_id: str, force: bool = False) -> str:
agent = Agent.objects.get(agent_id=agent_id)
if pyver.parse(agent.version) <= pyver.parse("1.3.0"):
return "not supported"
# skip if we can't determine the arch
if agent.arch is None:
DebugLog.warning(
agent=agent,
log_type="agent_issues",
message=f"Unable to determine arch on {agent.hostname}({agent.agent_id}). Skipping agent update.",
)
return "noarch"
version = settings.LATEST_AGENT_VER
inno = agent.win_inno_exe
url = get_winagent_url(agent.arch)
if not force:
if agent.pendingactions.filter(
action_type="agentupdate", status="pending"
).exists():
agent.pendingactions.filter(
action_type="agentupdate", status="pending"
).delete()
PendingAction.objects.create(
agent=agent,
action_type="agentupdate",
details={
"url": url,
"version": version,
"inno": inno,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": version,
"inno": inno,
},
}
asyncio.run(agent.nats_cmd(nats_data, wait=False))
return "created"
@app.task
def send_agent_update_task(*, agent_ids: list[str], token: str, force: bool) -> None:
agents: "QuerySet[Agent]" = Agent.objects.defer(*AGENT_DEFER).filter(
agent_id__in=agent_ids
)
for agent in agents:
agent.do_update(token=token, force=force)
def force_code_sign(agent_ids: list[str]) -> None:
chunks = (agent_ids[i : i + 50] for i in range(0, len(agent_ids), 50))
for chunk in chunks:
for agent_id in chunk:
agent_update(agent_id=agent_id, force=True)
sleep(0.05)
sleep(4)
@app.task
def send_agent_update_task(agent_ids: list[str]) -> None:
chunks = (agent_ids[i : i + 50] for i in range(0, len(agent_ids), 50))
for chunk in chunks:
for agent_id in chunk:
agent_update(agent_id)
sleep(0.05)
sleep(4)
@app.task
def auto_self_agent_update_task() -> None:
call_command("update_agents")
core = CoreSettings.objects.first()
if not core.agent_auto_update: # type:ignore
return
q = Agent.objects.only("agent_id", "version")
agent_ids: list[str] = [
i.agent_id
for i in q
if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER)
]
chunks = (agent_ids[i : i + 30] for i in range(0, len(agent_ids), 30))
for chunk in chunks:
for agent_id in chunk:
agent_update(agent_id)
sleep(0.05)
sleep(4)
@app.task
def agent_outage_email_task(pk: int, alert_interval: Optional[float] = None) -> str:
def agent_outage_email_task(pk: int, alert_interval: Union[float, None] = None) -> str:
from alerts.models import Alert
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
alert = Alert.objects.get(pk=pk)
if not alert.email_sent:
sleep(random.randint(1, 5))
sleep(random.randint(1, 15))
alert.agent.send_outage_email()
alert.email_sent = djangotime.now()
alert.save(update_fields=["email_sent"])
@@ -55,7 +123,7 @@ def agent_outage_email_task(pk: int, alert_interval: Optional[float] = None) ->
# send an email only if the last email sent is older than alert interval
delta = djangotime.now() - dt.timedelta(days=alert_interval)
if alert.email_sent < delta:
sleep(random.randint(1, 5))
sleep(random.randint(1, 10))
alert.agent.send_outage_email()
alert.email_sent = djangotime.now()
alert.save(update_fields=["email_sent"])
@@ -67,13 +135,8 @@ def agent_outage_email_task(pk: int, alert_interval: Optional[float] = None) ->
def agent_recovery_email_task(pk: int) -> str:
from alerts.models import Alert
sleep(random.randint(1, 5))
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
sleep(random.randint(1, 15))
alert = Alert.objects.get(pk=pk)
alert.agent.send_recovery_email()
alert.resolved_email_sent = djangotime.now()
alert.save(update_fields=["resolved_email_sent"])
@@ -82,16 +145,13 @@ def agent_recovery_email_task(pk: int) -> str:
@app.task
def agent_outage_sms_task(pk: int, alert_interval: Optional[float] = None) -> str:
def agent_outage_sms_task(pk: int, alert_interval: Union[float, None] = None) -> str:
from alerts.models import Alert
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
alert = Alert.objects.get(pk=pk)
if not alert.sms_sent:
sleep(random.randint(1, 3))
sleep(random.randint(1, 15))
alert.agent.send_outage_sms()
alert.sms_sent = djangotime.now()
alert.save(update_fields=["sms_sent"])
@@ -100,7 +160,7 @@ def agent_outage_sms_task(pk: int, alert_interval: Optional[float] = None) -> st
# send an sms only if the last sms sent is older than alert interval
delta = djangotime.now() - dt.timedelta(days=alert_interval)
if alert.sms_sent < delta:
sleep(random.randint(1, 3))
sleep(random.randint(1, 10))
alert.agent.send_outage_sms()
alert.sms_sent = djangotime.now()
alert.save(update_fields=["sms_sent"])
@@ -113,11 +173,7 @@ def agent_recovery_sms_task(pk: int) -> str:
from alerts.models import Alert
sleep(random.randint(1, 3))
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
alert = Alert.objects.get(pk=pk)
alert.agent.send_recovery_sms()
alert.resolved_sms_sent = djangotime.now()
alert.save(update_fields=["resolved_sms_sent"])
@@ -141,7 +197,7 @@ def agent_outages_task() -> None:
)
for agent in agents:
if agent.status == AGENT_STATUS_OVERDUE:
if agent.status == "overdue":
Alert.handle_alert_failure(agent)
@@ -167,12 +223,12 @@ def run_script_email_results_task(
if r == "timeout":
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
log_type="scripting",
message=f"{agent.hostname}({agent.pk}) timed out running script.",
)
return
CORE = get_core_settings()
CORE = CoreSettings.objects.first()
subject = f"{agent.hostname} {script.name} Results"
exec_time = "{:.4f}".format(r["execution_time"])
body = (
@@ -185,21 +241,25 @@ def run_script_email_results_task(
msg = EmailMessage()
msg["Subject"] = subject
msg["From"] = CORE.smtp_from_email
msg["From"] = CORE.smtp_from_email # type:ignore
if emails:
msg["To"] = ", ".join(emails)
else:
msg["To"] = ", ".join(CORE.email_alert_recipients)
msg["To"] = ", ".join(CORE.email_alert_recipients) # type:ignore
msg.set_content(body)
try:
with smtplib.SMTP(CORE.smtp_host, CORE.smtp_port, timeout=20) as server:
if CORE.smtp_requires_auth:
with smtplib.SMTP(
CORE.smtp_host, CORE.smtp_port, timeout=20 # type:ignore
) as server: # type:ignore
if CORE.smtp_requires_auth: # type:ignore
server.ehlo()
server.starttls()
server.login(CORE.smtp_host_user, CORE.smtp_host_password)
server.login(
CORE.smtp_host_user, CORE.smtp_host_password # type:ignore
) # type:ignore
server.send_message(msg)
server.quit()
else:
@@ -211,22 +271,18 @@ def run_script_email_results_task(
@app.task
def clear_faults_task(older_than_days: int) -> None:
from alerts.models import Alert
# https://github.com/amidaware/tacticalrmm/issues/484
# https://github.com/wh1te909/tacticalrmm/issues/484
agents = Agent.objects.exclude(last_seen__isnull=True).filter(
last_seen__lt=djangotime.now() - djangotime.timedelta(days=older_than_days)
)
for agent in agents:
for check in agent.get_checks_with_policies():
# reset check status
if check.check_result:
check.check_result.status = CheckStatus.PASSING
check.check_result.save(update_fields=["status"])
if check.alert.filter(agent=agent, resolved=False).exists():
alert = Alert.create_or_return_check_alert(check, agent=agent)
if alert:
alert.resolve()
if agent.agentchecks.exists():
for check in agent.agentchecks.all():
# reset check status
check.status = "passing"
check.save(update_fields=["status"])
if check.alert.filter(resolved=False).exists():
check.alert.get(resolved=False).resolve()
# reset overdue alerts
agent.overdue_email_alert = False
@@ -250,8 +306,3 @@ def prune_agent_history(older_than_days: int) -> str:
).delete()
return "ok"
@app.task
def bulk_recover_agents_task() -> None:
call_command("bulk_restart_agents")

View File

@@ -1,106 +0,0 @@
from unittest.mock import patch
from rest_framework.response import Response
from tacticalrmm.test import TacticalTestCase
class TestAgentInstalls(TacticalTestCase):
def setUp(self) -> None:
self.authenticate()
self.setup_coresettings()
self.setup_base_instance()
@patch("agents.utils.generate_linux_install")
@patch("knox.models.AuthToken.objects.create")
@patch("tacticalrmm.utils.generate_winagent_exe")
@patch("core.utils.token_is_valid")
@patch("agents.utils.get_agent_url")
def test_install_agent(
self,
mock_agent_url,
mock_token_valid,
mock_gen_win_exe,
mock_auth,
mock_linux_install,
):
mock_agent_url.return_value = "https://example.com"
mock_token_valid.return_value = "", False
mock_gen_win_exe.return_value = Response("ok")
mock_auth.return_value = "", "token"
mock_linux_install.return_value = Response("ok")
url = "/agents/installer/"
# test windows dynamic exe
data = {
"installMethod": "exe",
"client": self.site2.client.pk,
"site": self.site2.pk,
"expires": 24,
"agenttype": "server",
"power": 0,
"rdp": 1,
"ping": 0,
"goarch": "amd64",
"api": "https://api.example.com",
"fileName": "rmm-client-site-server.exe",
"plat": "windows",
}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
mock_gen_win_exe.assert_called_with(
client=self.site2.client.pk,
site=self.site2.pk,
agent_type="server",
rdp=1,
ping=0,
power=0,
goarch="amd64",
token="token",
api="https://api.example.com",
file_name="rmm-client-site-server.exe",
)
# test linux no code sign
data["plat"] = "linux"
data["installMethod"] = "bash"
data["rdp"] = 0
data["agenttype"] = "workstation"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 400)
# test linux
mock_token_valid.return_value = "token123", True
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
mock_linux_install.assert_called_with(
client=str(self.site2.client.pk),
site=str(self.site2.pk),
agent_type="workstation",
arch="amd64",
token="token",
api="https://api.example.com",
download_url="https://example.com",
)
# test manual
data["rdp"] = 1
data["installMethod"] = "manual"
r = self.client.post(url, data, format="json")
self.assertIn("rdp", r.json()["cmd"])
self.assertNotIn("power", r.json()["cmd"])
data.update({"ping": 1, "power": 1})
r = self.client.post(url, data, format="json")
self.assertIn("power", r.json()["cmd"])
self.assertIn("ping", r.json()["cmd"])
# test powershell
data["installMethod"] = "powershell"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.check_not_authenticated("post", url)

View File

@@ -1,313 +0,0 @@
from unittest.mock import patch
from django.conf import settings
from django.core.management import call_command
from model_bakery import baker
from packaging import version as pyver
from agents.models import Agent
from agents.tasks import auto_self_agent_update_task, send_agent_update_task
from logs.models import PendingAction
from tacticalrmm.constants import (
AGENT_DEFER,
AgentMonType,
AgentPlat,
GoArch,
PAAction,
PAStatus,
)
from tacticalrmm.test import TacticalTestCase
class TestAgentUpdate(TacticalTestCase):
def setUp(self) -> None:
self.authenticate()
self.setup_coresettings()
self.setup_base_instance()
@patch("agents.management.commands.update_agents.send_agent_update_task.delay")
@patch("agents.management.commands.update_agents.token_is_valid")
@patch("agents.management.commands.update_agents.get_core_settings")
def test_update_agents_mgmt_command(self, mock_core, mock_token, mock_update):
mock_token.return_value = ("token123", True)
baker.make_recipe(
"agents.online_agent",
site=self.site1,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
version="2.0.3",
_quantity=6,
)
baker.make_recipe(
"agents.online_agent",
site=self.site3,
monitoring_type=AgentMonType.WORKSTATION,
plat=AgentPlat.LINUX,
version="2.0.3",
_quantity=5,
)
baker.make_recipe(
"agents.online_agent",
site=self.site2,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
version=settings.LATEST_AGENT_VER,
_quantity=8,
)
mock_core.return_value.agent_auto_update = False
call_command("update_agents")
mock_update.assert_not_called()
mock_core.return_value.agent_auto_update = True
call_command("update_agents")
ids = list(
Agent.objects.defer(*AGENT_DEFER)
.exclude(version=settings.LATEST_AGENT_VER)
.values_list("agent_id", flat=True)
)
mock_update.assert_called_with(agent_ids=ids, token="token123", force=False)
@patch("agents.models.Agent.nats_cmd")
@patch("agents.models.get_agent_url")
def test_do_update(self, mock_agent_url, mock_nats_cmd):
mock_agent_url.return_value = "https://example.com/123"
# test noarch
agent_noarch = baker.make_recipe(
"agents.online_agent",
site=self.site1,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
version="2.3.0",
)
r = agent_noarch.do_update(token="", force=True)
self.assertEqual(r, "noarch")
# test too old
agent_old = baker.make_recipe(
"agents.online_agent",
site=self.site2,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
version="1.3.0",
goarch=GoArch.AMD64,
)
r = agent_old.do_update(token="", force=True)
self.assertEqual(r, "not supported")
win = baker.make_recipe(
"agents.online_agent",
site=self.site1,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
version="2.3.0",
goarch=GoArch.AMD64,
)
lin = baker.make_recipe(
"agents.online_agent",
site=self.site3,
monitoring_type=AgentMonType.WORKSTATION,
plat=AgentPlat.LINUX,
version="2.3.0",
goarch=GoArch.ARM32,
)
# test windows agent update
r = win.do_update(token="", force=False)
self.assertEqual(r, "created")
mock_nats_cmd.assert_called_with(
{
"func": "agentupdate",
"payload": {
"url": "https://example.com/123",
"version": settings.LATEST_AGENT_VER,
"inno": f"tacticalagent-v{settings.LATEST_AGENT_VER}-windows-amd64.exe",
},
},
wait=False,
)
action1 = PendingAction.objects.get(agent__agent_id=win.agent_id)
self.assertEqual(action1.action_type, PAAction.AGENT_UPDATE)
self.assertEqual(action1.status, PAStatus.PENDING)
self.assertEqual(action1.details["url"], "https://example.com/123")
self.assertEqual(
action1.details["inno"],
f"tacticalagent-v{settings.LATEST_AGENT_VER}-windows-amd64.exe",
)
self.assertEqual(action1.details["version"], settings.LATEST_AGENT_VER)
mock_nats_cmd.reset_mock()
# test linux agent update
r = lin.do_update(token="", force=False)
mock_nats_cmd.assert_called_with(
{
"func": "agentupdate",
"payload": {
"url": "https://example.com/123",
"version": settings.LATEST_AGENT_VER,
"inno": f"tacticalagent-v{settings.LATEST_AGENT_VER}-linux-arm.exe",
},
},
wait=False,
)
action2 = PendingAction.objects.get(agent__agent_id=lin.agent_id)
self.assertEqual(action2.action_type, PAAction.AGENT_UPDATE)
self.assertEqual(action2.status, PAStatus.PENDING)
self.assertEqual(action2.details["url"], "https://example.com/123")
self.assertEqual(
action2.details["inno"],
f"tacticalagent-v{settings.LATEST_AGENT_VER}-linux-arm.exe",
)
self.assertEqual(action2.details["version"], settings.LATEST_AGENT_VER)
# check if old agent update pending actions are being deleted
# should only be 1 pending action at all times
pa_count = win.pendingactions.filter(
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).count()
self.assertEqual(pa_count, 1)
for _ in range(4):
win.do_update(token="", force=False)
pa_count = win.pendingactions.filter(
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).count()
self.assertEqual(pa_count, 1)
def test_auto_self_agent_update_task(self):
auto_self_agent_update_task()
@patch("agents.models.Agent.do_update")
def test_send_agent_update_task(self, mock_update):
baker.make_recipe(
"agents.online_agent",
site=self.site2,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
version="2.3.0",
goarch=GoArch.AMD64,
_quantity=6,
)
ids = list(
Agent.objects.defer(*AGENT_DEFER)
.exclude(version=settings.LATEST_AGENT_VER)
.values_list("agent_id", flat=True)
)
send_agent_update_task(agent_ids=ids, token="", force=False)
self.assertEqual(mock_update.call_count, 6)
@patch("agents.views.token_is_valid")
@patch("agents.tasks.send_agent_update_task.delay")
def test_update_agents(self, mock_update, mock_token):
mock_token.return_value = ("", False)
url = "/agents/update/"
baker.make_recipe(
"agents.online_agent",
site=self.site2,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
version="2.3.0",
goarch=GoArch.AMD64,
_quantity=7,
)
baker.make_recipe(
"agents.online_agent",
site=self.site2,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
version=settings.LATEST_AGENT_VER,
goarch=GoArch.AMD64,
_quantity=3,
)
baker.make_recipe(
"agents.online_agent",
site=self.site2,
monitoring_type=AgentMonType.WORKSTATION,
plat=AgentPlat.LINUX,
version="2.0.1",
goarch=GoArch.ARM32,
_quantity=9,
)
agent_ids: list[str] = list(
Agent.objects.only("agent_id").values_list("agent_id", flat=True)
)
data = {"agent_ids": agent_ids}
expected: list[str] = [
i.agent_id
for i in Agent.objects.only("agent_id", "version")
if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER)
]
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
mock_update.assert_called_with(agent_ids=expected, token="", force=False)
self.check_not_authenticated("post", url)
@patch("agents.views.token_is_valid")
@patch("agents.tasks.send_agent_update_task.delay")
def test_agent_update_permissions(self, update_task, mock_token):
mock_token.return_value = ("", False)
agents = baker.make_recipe("agents.agent", _quantity=5)
other_agents = baker.make_recipe("agents.agent", _quantity=7)
url = f"/agents/update/"
data = {
"agent_ids": [agent.agent_id for agent in agents]
+ [agent.agent_id for agent in other_agents]
}
# test superuser access
self.check_authorized_superuser("post", url, data)
update_task.assert_called_with(
agent_ids=data["agent_ids"], token="", force=False
)
update_task.reset_mock()
user = self.create_user_with_roles([])
self.client.force_authenticate(user=user)
self.check_not_authorized("post", url, data)
update_task.assert_not_called()
user.role.can_update_agents = True
user.role.save()
self.check_authorized("post", url, data)
update_task.assert_called_with(
agent_ids=data["agent_ids"], token="", force=False
)
update_task.reset_mock()
# limit to client
# user.role.can_view_clients.set([agents[0].client])
# self.check_authorized("post", url, data)
# update_task.assert_called_with(agent_ids=[agent.agent_id for agent in agents])
# update_task.reset_mock()
# add site
# user.role.can_view_sites.set([other_agents[0].site])
# self.check_authorized("post", url, data)
# update_task.assert_called_with(agent_ids=data["agent_ids"])
# update_task.reset_mock()
# remove client permissions
# user.role.can_view_clients.clear()
# self.check_authorized("post", url, data)
# update_task.assert_called_with(
# agent_ids=[agent.agent_id for agent in other_agents]
# )

View File

@@ -1,60 +0,0 @@
from unittest.mock import patch, AsyncMock
from django.conf import settings
from rest_framework.response import Response
from agents.utils import generate_linux_install, get_agent_url
from tacticalrmm.test import TacticalTestCase
class TestAgentUtils(TacticalTestCase):
def setUp(self) -> None:
self.authenticate()
self.setup_coresettings()
self.setup_base_instance()
def test_get_agent_url(self):
ver = settings.LATEST_AGENT_VER
# test without token
r = get_agent_url(goarch="amd64", plat="windows", token="")
expected = f"https://github.com/amidaware/rmmagent/releases/download/v{ver}/tacticalagent-v{ver}-windows-amd64.exe"
self.assertEqual(r, expected)
# test with token
r = get_agent_url(goarch="386", plat="linux", token="token123")
expected = f"https://{settings.AGENTS_URL}version={ver}&arch=386&token=token123&plat=linux&api=api.example.com"
@patch("agents.utils.get_mesh_device_id")
@patch("agents.utils.asyncio.run")
@patch("agents.utils.get_mesh_ws_url")
@patch("agents.utils.get_core_settings")
def test_generate_linux_install(
self, mock_core, mock_mesh, mock_async_run, mock_mesh_device_id
):
mock_mesh_device_id.return_value = "meshdeviceid"
mock_core.return_value.mesh_site = "meshsite"
mock_async_run.return_value = "meshid"
mock_mesh.return_value = "meshws"
r = generate_linux_install(
client="1",
site="1",
agent_type="server",
arch="amd64",
token="token123",
api="api.example.com",
download_url="asdasd3423",
)
ret = r.getvalue().decode("utf-8")
self.assertIn(r"agentDL='asdasd3423'", ret)
self.assertIn(
r"meshDL='meshsite/meshagents?id=meshid&installflags=0&meshinstall=6'", ret
)
self.assertIn(r"apiURL='api.example.com'", ret)
self.assertIn(r"agentDL='asdasd3423'", ret)
self.assertIn(r"token='token123'", ret)
self.assertIn(r"clientID='1'", ret)
self.assertIn(r"siteID='1'", ret)
self.assertIn(r"agentType='server'", ret)

View File

@@ -1,46 +0,0 @@
from unittest.mock import call, patch
from django.core.management import call_command
from model_bakery import baker
from tacticalrmm.constants import AgentMonType, AgentPlat
from tacticalrmm.test import TacticalTestCase
class TestBulkRestartAgents(TacticalTestCase):
def setUp(self) -> None:
self.authenticate()
self.setup_coresettings()
self.setup_base_instance()
@patch("core.management.commands.bulk_restart_agents.sleep")
@patch("agents.models.Agent.recover")
@patch("core.management.commands.bulk_restart_agents.get_mesh_ws_url")
def test_bulk_restart_agents_mgmt_cmd(
self, get_mesh_ws_url, recover, mock_sleep
) -> None:
get_mesh_ws_url.return_value = "https://mesh.example.com/test"
baker.make_recipe(
"agents.online_agent",
site=self.site1,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
)
baker.make_recipe(
"agents.online_agent",
site=self.site3,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.LINUX,
)
calls = [
call("tacagent", "https://mesh.example.com/test", wait=False),
call("mesh", "", wait=False),
]
call_command("bulk_restart_agents")
recover.assert_has_calls(calls)
mock_sleep.assert_called_with(10)

View File

@@ -1,63 +0,0 @@
from typing import TYPE_CHECKING
from unittest.mock import patch
from model_bakery import baker
from tacticalrmm.constants import AgentMonType, AgentPlat
from tacticalrmm.test import TacticalTestCase
if TYPE_CHECKING:
from clients.models import Client, Site
class TestRecovery(TacticalTestCase):
def setUp(self) -> None:
self.authenticate()
self.setup_coresettings()
self.client1: "Client" = baker.make("clients.Client")
self.site1: "Site" = baker.make("clients.Site", client=self.client1)
@patch("agents.models.Agent.recover")
@patch("agents.views.get_mesh_ws_url")
def test_recover(self, get_mesh_ws_url, recover) -> None:
get_mesh_ws_url.return_value = "https://mesh.example.com"
agent = baker.make_recipe(
"agents.online_agent",
site=self.site1,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
)
url = f"/agents/{agent.agent_id}/recover/"
# test successfull tacticalagent recovery
data = {"mode": "tacagent"}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
recover.assert_called_with("tacagent", "https://mesh.example.com", wait=False)
get_mesh_ws_url.assert_called_once()
# reset mocks
recover.reset_mock()
get_mesh_ws_url.reset_mock()
# test successfull mesh agent recovery
data = {"mode": "mesh"}
recover.return_value = ("ok", False)
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
get_mesh_ws_url.assert_not_called()
recover.assert_called_with("mesh", "")
# reset mocks
recover.reset_mock()
get_mesh_ws_url.reset_mock()
# test failed mesh agent recovery
data = {"mode": "mesh"}
recover.return_value = ("Unable to contact the agent", True)
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 400)
self.check_not_authenticated("post", url)

View File

@@ -1,10 +1,9 @@
from django.urls import path
from autotasks.views import GetAddAutoTasks
from checks.views import GetAddChecks
from logs.views import PendingActions
from . import views
from checks.views import GetAddChecks
from autotasks.views import GetAddAutoTasks
from logs.views import PendingActions
urlpatterns = [
# agent views
@@ -41,5 +40,5 @@ urlpatterns = [
path("versions/", views.get_agent_versions),
path("update/", views.update_agents),
path("installer/", views.install_agent),
path("bulkrecovery/", views.bulk_agent_recovery),
path("<str:arch>/getmeshexe/", views.get_mesh_exe),
]

View File

@@ -1,81 +1,40 @@
import asyncio
import tempfile
import random
import urllib.parse
import requests
from django.conf import settings
from django.http import FileResponse
from core.utils import get_core_settings, get_mesh_device_id, get_mesh_ws_url
from tacticalrmm.constants import MeshAgentIdent
from core.models import CodeSignToken
def get_agent_url(*, goarch: str, plat: str, token: str = "") -> str:
ver = settings.LATEST_AGENT_VER
if token:
params = {
"version": ver,
"arch": goarch,
"token": token,
"plat": plat,
"api": settings.ALLOWED_HOSTS[0],
}
return settings.AGENTS_URL + urllib.parse.urlencode(params)
def get_exegen_url() -> str:
urls: list[str] = settings.EXE_GEN_URLS
for url in urls:
try:
r = requests.get(url, timeout=10)
except:
continue
return f"https://github.com/amidaware/rmmagent/releases/download/v{ver}/tacticalagent-v{ver}-{plat}-{goarch}.exe"
if r.status_code == 200:
return url
return random.choice(urls)
def generate_linux_install(
client: str,
site: str,
agent_type: str,
arch: str,
token: str,
api: str,
download_url: str,
) -> FileResponse:
def get_winagent_url(arch: str) -> str:
match arch:
case "amd64":
arch_id = MeshAgentIdent.LINUX64
case "386":
arch_id = MeshAgentIdent.LINUX32
case "arm64":
arch_id = MeshAgentIdent.LINUX_ARM_64
case "arm":
arch_id = MeshAgentIdent.LINUX_ARM_HF
case _:
arch_id = "not_found"
dl_url = settings.DL_32 if arch == "32" else settings.DL_64
core = get_core_settings()
try:
t: CodeSignToken = CodeSignToken.objects.first() # type: ignore
if t.is_valid:
base_url = get_exegen_url() + "/api/v1/winagents/?"
params = {
"version": settings.LATEST_AGENT_VER,
"arch": arch,
"token": t.token,
}
dl_url = base_url + urllib.parse.urlencode(params)
except:
pass
uri = get_mesh_ws_url()
mesh_id = asyncio.run(get_mesh_device_id(uri, core.mesh_device_group))
mesh_dl = (
f"{core.mesh_site}/meshagents?id={mesh_id}&installflags=0&meshinstall={arch_id}"
)
sh = settings.LINUX_AGENT_SCRIPT
with open(sh, "r") as f:
text = f.read()
replace = {
"agentDLChange": download_url,
"meshDLChange": mesh_dl,
"clientIDChange": client,
"siteIDChange": site,
"agentTypeChange": agent_type,
"tokenChange": token,
"apiURLChange": api,
}
for i, j in replace.items():
text = text.replace(i, j)
with tempfile.NamedTemporaryFile() as fp:
with open(fp.name, "w") as f:
f.write(text)
f.write("\n")
return FileResponse(
open(fp.name, "rb"), as_attachment=True, filename="linux_agent_install.sh"
)
return dl_url

View File

@@ -6,102 +6,72 @@ import string
import time
from django.conf import settings
from django.db.models import Count, Exists, OuterRef, Prefetch, Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from meshctrl.utils import get_login_token
from django.db.models import Q
from packaging import version as pyver
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.exceptions import PermissionDenied
from core.utils import (
get_core_settings,
get_mesh_ws_url,
remove_mesh_agent,
token_is_valid,
)
from core.models import CoreSettings
from logs.models import AuditLog, DebugLog, PendingAction
from scripts.models import Script
from scripts.tasks import handle_bulk_command_task, handle_bulk_script_task
from tacticalrmm.constants import (
from tacticalrmm.utils import (
get_default_timezone,
notify_error,
reload_nats,
AGENT_DEFER,
AGENT_STATUS_OFFLINE,
AGENT_STATUS_ONLINE,
AgentHistoryType,
AgentMonType,
AgentPlat,
CustomFieldModel,
EvtLogNames,
PAAction,
PAStatus,
)
from tacticalrmm.helpers import notify_error
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from tacticalrmm.utils import get_default_timezone, reload_nats
from winupdate.models import WinUpdate
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
from .models import Agent, AgentCustomField, AgentHistory, Note
from .models import Agent, AgentCustomField, Note, RecoveryAction, AgentHistory
from .permissions import (
AgentHistoryPerms,
AgentNotesPerms,
AgentPerms,
EvtLogPerms,
InstallAgentPerms,
RecoverAgentPerms,
AgentNotesPerms,
ManageProcPerms,
MeshPerms,
PingAgentPerms,
RebootAgentPerms,
RecoverAgentPerms,
RunBulkPerms,
RunScriptPerms,
SendCMDPerms,
PingAgentPerms,
UpdateAgentPerms,
)
from .serializers import (
AgentCustomFieldSerializer,
AgentHistorySerializer,
AgentHostnameSerializer,
AgentNoteSerializer,
AgentSerializer,
AgentTableSerializer,
AgentNoteSerializer,
)
from .tasks import (
bulk_recover_agents_task,
run_script_email_results_task,
send_agent_update_task,
)
from .tasks import run_script_email_results_task, send_agent_update_task
class GetAgents(APIView):
permission_classes = [IsAuthenticated, AgentPerms]
def get(self, request):
from checks.models import Check, CheckResult
monitoring_type_filter = Q()
client_site_filter = Q()
monitoring_type = request.query_params.get("monitoring_type", None)
if monitoring_type:
if monitoring_type in AgentMonType.values:
monitoring_type_filter = Q(monitoring_type=monitoring_type)
else:
return notify_error("monitoring type does not exist")
if "site" in request.query_params.keys():
client_site_filter = Q(site_id=request.query_params["site"])
filter = Q(site_id=request.query_params["site"])
elif "client" in request.query_params.keys():
client_site_filter = Q(site__client_id=request.query_params["client"])
filter = Q(site__client_id=request.query_params["client"])
else:
filter = Q()
# by default detail=true
if (
@@ -109,53 +79,24 @@ class GetAgents(APIView):
or "detail" in request.query_params.keys()
and request.query_params["detail"] == "true"
):
agents = (
Agent.objects.filter_by_role(request.user) # type: ignore
.filter(monitoring_type_filter)
.filter(client_site_filter)
.select_related("site", "policy", "alert_template")
.prefetch_related("agentchecks")
.filter(filter)
.defer(*AGENT_DEFER)
.select_related(
"site__server_policy",
"site__workstation_policy",
"site__client__server_policy",
"site__client__workstation_policy",
"policy",
"alert_template",
)
.prefetch_related(
Prefetch(
"agentchecks",
queryset=Check.objects.select_related("script"),
),
Prefetch(
"checkresults",
queryset=CheckResult.objects.select_related("assigned_check"),
),
)
.annotate(
pending_actions_count=Count(
"pendingactions",
filter=Q(pendingactions__status=PAStatus.PENDING),
)
)
.annotate(
has_patches_pending=Exists(
WinUpdate.objects.filter(
agent_id=OuterRef("pk"), action="approve", installed=False
)
)
)
)
serializer = AgentTableSerializer(agents, many=True)
ctx = {"default_tz": get_default_timezone()}
serializer = AgentTableSerializer(agents, many=True, context=ctx)
# if detail=false
else:
agents = (
Agent.objects.filter_by_role(request.user) # type: ignore
.defer(*AGENT_DEFER)
.select_related("site__client")
.filter(monitoring_type_filter)
.filter(client_site_filter)
.select_related("site")
.filter(filter)
.only("agent_id", "hostname", "site")
)
serializer = AgentHostnameSerializer(agents, many=True)
@@ -191,13 +132,13 @@ class GetUpdateDeleteAgent(APIView):
for field in request.data["custom_fields"]:
custom_field = field
custom_field["agent"] = agent.pk
custom_field["agent"] = agent.id # type: ignore
if AgentCustomField.objects.filter(
field=field["field"], agent=agent.pk
field=field["field"], agent=agent.id # type: ignore
):
value = AgentCustomField.objects.get(
field=field["field"], agent=agent.pk
field=field["field"], agent=agent.id # type: ignore
)
serializer = AgentCustomFieldSerializer(
instance=value, data=custom_field
@@ -214,19 +155,10 @@ class GetUpdateDeleteAgent(APIView):
# uninstall agent
def delete(self, request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
code = "foo"
if agent.plat == AgentPlat.LINUX:
with open(settings.LINUX_AGENT_SCRIPT, "r") as f:
code = f.read()
asyncio.run(agent.nats_cmd({"func": "uninstall", "code": code}, wait=False))
asyncio.run(agent.nats_cmd({"func": "uninstall"}, wait=False))
name = agent.hostname
mesh_id = agent.mesh_node_id
agent.delete()
reload_nats()
uri = get_mesh_ws_url()
asyncio.run(remove_mesh_agent(uri, mesh_id))
return Response(f"{name} will now be uninstalled.")
@@ -267,19 +199,19 @@ class AgentMeshCentral(APIView):
# get mesh urls
def get(self, request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
core = get_core_settings()
core = CoreSettings.objects.first()
if not core.mesh_disable_auto_login:
token = get_login_token(
key=core.mesh_token, user=f"user//{core.mesh_username}"
)
token_param = f"login={token}&"
else:
token_param = ""
token = agent.get_login_token(
key=core.mesh_token,
user=f"user//{core.mesh_username.lower()}", # type:ignore
)
control = f"{core.mesh_site}/?{token_param}gotonode={agent.mesh_node_id}&viewmode=11&hide=31"
terminal = f"{core.mesh_site}/?{token_param}gotonode={agent.mesh_node_id}&viewmode=12&hide=31"
file = f"{core.mesh_site}/?{token_param}gotonode={agent.mesh_node_id}&viewmode=13&hide=31"
if token == "err":
return notify_error("Invalid mesh token")
control = f"{core.mesh_site}/?login={token}&gotonode={agent.mesh_node_id}&viewmode=11&hide=31" # type:ignore
terminal = f"{core.mesh_site}/?login={token}&gotonode={agent.mesh_node_id}&viewmode=12&hide=31" # type:ignore
file = f"{core.mesh_site}/?login={token}&gotonode={agent.mesh_node_id}&viewmode=13&hide=31" # type:ignore
AuditLog.audit_mesh_session(
username=request.user.username,
@@ -313,9 +245,9 @@ class AgentMeshCentral(APIView):
@permission_classes([IsAuthenticated, AgentPerms])
def get_agent_versions(request):
agents = (
Agent.objects.defer(*AGENT_DEFER)
.filter_by_role(request.user) # type: ignore
.select_related("site__client")
Agent.objects.filter_by_role(request.user)
.prefetch_related("site")
.only("pk", "hostname")
)
return Response(
{
@@ -329,7 +261,7 @@ def get_agent_versions(request):
@permission_classes([IsAuthenticated, UpdateAgentPerms])
def update_agents(request):
q = (
Agent.objects.filter_by_role(request.user) # type: ignore
Agent.objects.filter_by_role(request.user)
.filter(agent_id__in=request.data["agent_ids"])
.only("agent_id", "version")
)
@@ -338,9 +270,7 @@ def update_agents(request):
for i in q
if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER)
]
token, _ = token_is_valid()
send_agent_update_task.delay(agent_ids=agent_ids, token=token, force=False)
send_agent_update_task.delay(agent_ids=agent_ids)
return Response("ok")
@@ -348,18 +278,18 @@ def update_agents(request):
@permission_classes([IsAuthenticated, PingAgentPerms])
def ping(request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
status = AGENT_STATUS_OFFLINE
status = "offline"
attempts = 0
while 1:
r = asyncio.run(agent.nats_cmd({"func": "ping"}, timeout=2))
if r == "pong":
status = AGENT_STATUS_ONLINE
status = "online"
break
else:
attempts += 1
time.sleep(0.5)
time.sleep(1)
if attempts >= 3:
if attempts >= 5:
break
return Response({"name": agent.hostname, "status": status})
@@ -374,7 +304,7 @@ def get_event_log(request, agent_id, logtype, days):
return demo_get_eventlog()
agent = get_object_or_404(Agent, agent_id=agent_id)
timeout = 180 if logtype == EvtLogNames.SECURITY else 30
timeout = 180 if logtype == "Security" else 30
data = {
"func": "eventlog",
@@ -396,23 +326,18 @@ def get_event_log(request, agent_id, logtype, days):
def send_raw_cmd(request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
timeout = int(request.data["timeout"])
if request.data["shell"] == "custom" and request.data["custom_shell"]:
shell = request.data["custom_shell"]
else:
shell = request.data["shell"]
data = {
"func": "rawcmd",
"timeout": timeout,
"payload": {
"command": request.data["cmd"],
"shell": shell,
"shell": request.data["shell"],
},
}
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.CMD_RUN,
type="cmd_run",
command=request.data["cmd"],
username=request.user.username[:50],
)
@@ -427,7 +352,7 @@ def send_raw_cmd(request, agent_id):
username=request.user.username,
agent=agent,
cmd=request.data["cmd"],
shell=shell,
shell=request.data["shell"],
debug_info={"ip": request._client_ip},
)
@@ -448,11 +373,9 @@ class Reboot(APIView):
# reboot later
def patch(self, request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
if agent.is_posix:
return notify_error(f"Not currently implemented for {agent.plat}")
try:
obj = dt.datetime.strptime(request.data["datetime"], "%Y-%m-%dT%H:%M:%S")
obj = dt.datetime.strptime(request.data["datetime"], "%Y-%m-%d %H:%M")
except Exception:
return notify_error("Invalid date")
@@ -460,28 +383,18 @@ class Reboot(APIView):
random.choice(string.ascii_letters) for _ in range(10)
)
expire_date = obj + djangotime.timedelta(minutes=5)
nats_data = {
"func": "schedtask",
"schedtaskpayload": {
"type": "schedreboot",
"enabled": True,
"delete_expired_task_after": True,
"start_when_available": False,
"multiple_instances": 2,
"trigger": "runonce",
"deleteafter": True,
"trigger": "once",
"name": task_name,
"start_year": int(dt.datetime.strftime(obj, "%Y")),
"start_month": int(dt.datetime.strftime(obj, "%-m")),
"start_day": int(dt.datetime.strftime(obj, "%-d")),
"start_hour": int(dt.datetime.strftime(obj, "%-H")),
"start_min": int(dt.datetime.strftime(obj, "%-M")),
"expire_year": int(expire_date.strftime("%Y")),
"expire_month": int(expire_date.strftime("%-m")),
"expire_day": int(expire_date.strftime("%-d")),
"expire_hour": int(expire_date.strftime("%-H")),
"expire_min": int(expire_date.strftime("%-M")),
"year": int(dt.datetime.strftime(obj, "%Y")),
"month": dt.datetime.strftime(obj, "%B"),
"day": int(dt.datetime.strftime(obj, "%d")),
"hour": int(dt.datetime.strftime(obj, "%H")),
"min": int(dt.datetime.strftime(obj, "%M")),
},
}
@@ -491,7 +404,7 @@ class Reboot(APIView):
details = {"taskname": task_name, "time": str(obj)}
PendingAction.objects.create(
agent=agent, action_type=PAAction.SCHED_REBOOT, details=details
agent=agent, action_type="schedreboot", details=details
)
nice_time = dt.datetime.strftime(obj, "%B %d, %Y at %I:%M %p")
return Response(
@@ -503,24 +416,38 @@ class Reboot(APIView):
@permission_classes([IsAuthenticated, InstallAgentPerms])
def install_agent(request):
from knox.models import AuthToken
from accounts.models import User
from agents.utils import get_agent_url
from core.utils import token_is_valid
from agents.utils import get_winagent_url
client_id = request.data["client"]
site_id = request.data["site"]
version = settings.LATEST_AGENT_VER
goarch = request.data["goarch"]
plat = request.data["plat"]
arch = request.data["arch"]
if not _has_perm_on_site(request.user, site_id):
raise PermissionDenied()
codesign_token, is_valid = token_is_valid()
# response type is blob so we have to use
# status codes and render error message on the frontend
if arch == "64" and not os.path.exists(
os.path.join(settings.EXE_DIR, "meshagent.exe")
):
return notify_error(
"Missing 64 bit meshagent.exe. Upload it from Settings > Global Settings > MeshCentral"
)
inno = f"tacticalagent-v{version}-{plat}-{goarch}.exe"
download_url = get_agent_url(goarch=goarch, plat=plat, token=codesign_token)
if arch == "32" and not os.path.exists(
os.path.join(settings.EXE_DIR, "meshagent-x86.exe")
):
return notify_error(
"Missing 32 bit meshagent.exe. Upload it from Settings > Global Settings > MeshCentral"
)
inno = (
f"winagent-v{version}.exe" if arch == "64" else f"winagent-v{version}-x86.exe"
)
download_url = get_winagent_url(arch)
installer_user = User.objects.filter(is_installer_user=True).first()
@@ -538,34 +465,12 @@ def install_agent(request):
rdp=request.data["rdp"],
ping=request.data["ping"],
power=request.data["power"],
goarch=goarch,
arch=arch,
token=token,
api=request.data["api"],
file_name=request.data["fileName"],
)
elif request.data["installMethod"] == "bash":
# TODO
# linux agents are in beta for now, only available for sponsors for testing
# remove this after it's out of beta
if not is_valid:
return notify_error(
"Missing code signing token, or token is no longer valid. Please read the docs for more info."
)
from agents.utils import generate_linux_install
return generate_linux_install(
client=str(client_id),
site=str(site_id),
agent_type=request.data["agenttype"],
arch=goarch,
token=token,
api=request.data["api"],
download_url=download_url,
)
elif request.data["installMethod"] == "manual":
cmd = [
inno,
@@ -655,23 +560,40 @@ def install_agent(request):
@api_view(["POST"])
@permission_classes([IsAuthenticated, RecoverAgentPerms])
def recover(request, agent_id: str) -> Response:
agent: Agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=agent_id
)
def recover(request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
mode = request.data["mode"]
# attempt a realtime recovery, otherwise fall back to old recovery method
if mode == "tacagent" or mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
r = asyncio.run(agent.nats_cmd(data, timeout=10))
if r == "ok":
return Response("Successfully completed recovery")
if agent.recoveryactions.filter(last_run=None).exists(): # type: ignore
return notify_error(
"A recovery action is currently pending. Please wait for the next agent check-in."
)
if mode == "command" and not request.data["cmd"]:
return notify_error("Command is required")
# if we've made it this far and realtime recovery didn't work,
# tacagent service is the fallback recovery so we obv can't use that to recover itself if it's down
if mode == "tacagent":
uri = get_mesh_ws_url()
agent.recover(mode, uri, wait=False)
return Response("Recovery will be attempted shortly")
return notify_error(
"Requires RPC service to be functional. Please recover that first"
)
elif mode == "mesh":
r, err = agent.recover(mode, "")
if err:
return notify_error(f"Unable to complete recovery: {r}")
# we should only get here if all other methods fail
RecoveryAction(
agent=agent,
mode=mode,
command=request.data["cmd"] if mode == "command" else None,
).save()
return Response("Successfully completed recovery")
return Response("Recovery will be attempted on the agent's next check-in")
@api_view(["POST"])
@@ -692,7 +614,7 @@ def run_script(request, agent_id):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
type="script_run",
script=script,
username=request.user.username[:50],
)
@@ -732,11 +654,11 @@ def run_script(request, agent_id):
custom_field = CustomField.objects.get(pk=request.data["custom_field"])
if custom_field.model == CustomFieldModel.AGENT:
if custom_field.model == "agent":
field = custom_field.get_or_create_field_value(agent)
elif custom_field.model == CustomFieldModel.CLIENT:
elif custom_field.model == "client":
field = custom_field.get_or_create_field_value(agent.client)
elif custom_field.model == CustomFieldModel.SITE:
elif custom_field.model == "site":
field = custom_field.get_or_create_field_value(agent.site)
else:
return notify_error("Custom Field was invalid")
@@ -768,6 +690,27 @@ def run_script(request, agent_id):
return Response(f"{script.name} will now be run on {agent.hostname}")
@api_view(["POST"])
def get_mesh_exe(request, arch):
filename = "meshagent.exe" if arch == "64" else "meshagent-x86.exe"
mesh_exe = os.path.join(settings.EXE_DIR, filename)
if not os.path.exists(mesh_exe):
return notify_error(f"File {filename} has not been uploaded.")
if settings.DEBUG:
with open(mesh_exe, "rb") as f:
response = HttpResponse(
f.read(), content_type="application/vnd.microsoft.portable-executable"
)
response["Content-Disposition"] = f"inline; filename={filename}"
return response
else:
response = HttpResponse()
response["Content-Disposition"] = f"attachment; filename={filename}"
response["X-Accel-Redirect"] = f"/private/exe/{filename}"
return response
class GetAddNotes(APIView):
permission_classes = [IsAuthenticated, AgentNotesPerms]
@@ -776,7 +719,7 @@ class GetAddNotes(APIView):
agent = get_object_or_404(Agent, agent_id=agent_id)
notes = Note.objects.filter(agent=agent)
else:
notes = Note.objects.filter_by_role(request.user) # type: ignore
notes = Note.objects.filter_by_role(request.user)
return Response(AgentNoteSerializer(notes, many=True).data)
@@ -785,9 +728,6 @@ class GetAddNotes(APIView):
if not _has_perm_on_agent(request.user, agent.agent_id):
raise PermissionDenied()
if "note" not in request.data.keys():
return notify_error("Cannot add an empty note")
data = {
"note": request.data["note"],
"agent": agent.pk,
@@ -841,37 +781,32 @@ def bulk(request):
if request.data["target"] == "client":
if not _has_perm_on_client(request.user, request.data["client"]):
raise PermissionDenied()
q = Agent.objects.filter_by_role(request.user).filter( # type: ignore
q = Agent.objects.filter_by_role(request.user).filter(
site__client_id=request.data["client"]
)
elif request.data["target"] == "site":
if not _has_perm_on_site(request.user, request.data["site"]):
raise PermissionDenied()
q = Agent.objects.filter_by_role(request.user).filter( # type: ignore
q = Agent.objects.filter_by_role(request.user).filter(
site_id=request.data["site"]
)
elif request.data["target"] == "agents":
q = Agent.objects.filter_by_role(request.user).filter( # type: ignore
q = Agent.objects.filter_by_role(request.user).filter(
agent_id__in=request.data["agents"]
)
elif request.data["target"] == "all":
q = Agent.objects.filter_by_role(request.user).only("pk", "monitoring_type") # type: ignore
q = Agent.objects.filter_by_role(request.user).only("pk", "monitoring_type")
else:
return notify_error("Something went wrong")
if request.data["monType"] == "servers":
q = q.filter(monitoring_type=AgentMonType.SERVER)
q = q.filter(monitoring_type="server")
elif request.data["monType"] == "workstations":
q = q.filter(monitoring_type=AgentMonType.WORKSTATION)
if request.data["osType"] == AgentPlat.WINDOWS:
q = q.filter(plat=AgentPlat.WINDOWS)
elif request.data["osType"] == AgentPlat.LINUX:
q = q.filter(plat=AgentPlat.LINUX)
q = q.filter(monitoring_type="workstation")
agents: list[int] = [agent.pk for agent in q]
@@ -886,15 +821,10 @@ def bulk(request):
)
if request.data["mode"] == "command":
if request.data["shell"] == "custom" and request.data["custom_shell"]:
shell = request.data["custom_shell"]
else:
shell = request.data["shell"]
handle_bulk_command_task.delay(
agents,
request.data["cmd"],
shell,
request.data["shell"],
request.data["timeout"],
request.user.username[:50],
run_on_offline=request.data["offlineAgents"],
@@ -935,7 +865,7 @@ def agent_maintenance(request):
raise PermissionDenied()
count = (
Agent.objects.filter_by_role(request.user) # type: ignore
Agent.objects.filter_by_role(request.user)
.filter(site__client_id=request.data["id"])
.update(maintenance_mode=request.data["action"])
)
@@ -945,7 +875,7 @@ def agent_maintenance(request):
raise PermissionDenied()
count = (
Agent.objects.filter_by_role(request.user) # type: ignore
Agent.objects.filter_by_role(request.user)
.filter(site_id=request.data["id"])
.update(maintenance_mode=request.data["action"])
)
@@ -962,13 +892,6 @@ def agent_maintenance(request):
)
@api_view(["GET"])
@permission_classes([IsAuthenticated, RecoverAgentPerms])
def bulk_agent_recovery(request):
bulk_recover_agents_task.delay()
return Response("Agents will now be recovered")
class WMI(APIView):
permission_classes = [IsAuthenticated, AgentPerms]
@@ -988,6 +911,6 @@ class AgentHistoryView(APIView):
agent = get_object_or_404(Agent, agent_id=agent_id)
history = AgentHistory.objects.filter(agent=agent)
else:
history = AgentHistory.objects.filter_by_role(request.user) # type: ignore
history = AgentHistory.objects.filter_by_role(request.user)
ctx = {"default_tz": get_default_timezone()}
return Response(AgentHistorySerializer(history, many=True, context=ctx).data)

View File

@@ -1,24 +0,0 @@
# Generated by Django 4.0.3 on 2022-04-07 17:28
import django.db.models.deletion
from django.db import migrations, models
def delete_alerts_without_agent(apps, schema):
Alert = apps.get_model("alerts", "Alert")
Alert.objects.filter(agent=None).delete()
class Migration(migrations.Migration):
dependencies = [
("agents", "0047_alter_agent_plat_alter_agent_site"),
("alerts", "0010_auto_20210917_1954"),
]
operations = [
migrations.RunPython(
delete_alerts_without_agent, reverse_code=migrations.RunPython.noop
),
]

View File

@@ -1,23 +0,0 @@
# Generated by Django 4.0.5 on 2022-06-29 07:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alerts', '0011_alter_alert_agent'),
]
operations = [
migrations.AlterField(
model_name='alert',
name='action_retcode',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='alert',
name='resolved_action_retcode',
field=models.BigIntegerField(blank=True, null=True),
),
]

View File

@@ -1,7 +1,7 @@
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union, cast
from typing import TYPE_CHECKING, Union
from django.contrib.postgres.fields import ArrayField
from django.db import models
@@ -9,20 +9,26 @@ from django.db.models.fields import BooleanField, PositiveIntegerField
from django.utils import timezone as djangotime
from logs.models import BaseAuditModel, DebugLog
from tacticalrmm.constants import (
AgentMonType,
AlertSeverity,
AlertType,
CheckType,
DebugLogType,
)
from tacticalrmm.models import PermissionQuerySet
if TYPE_CHECKING:
from agents.models import Agent
from autotasks.models import AutomatedTask, TaskResult
from checks.models import Check, CheckResult
from clients.models import Client, Site
from autotasks.models import AutomatedTask
from checks.models import Check
SEVERITY_CHOICES = [
("info", "Informational"),
("warning", "Warning"),
("error", "Error"),
]
ALERT_TYPE_CHOICES = [
("availability", "Availability"),
("check", "Check"),
("task", "Task"),
("custom", "Custom"),
]
class Alert(models.Model):
@@ -50,7 +56,7 @@ class Alert(models.Model):
blank=True,
)
alert_type = models.CharField(
max_length=20, choices=AlertType.choices, default=AlertType.AVAILABILITY
max_length=20, choices=ALERT_TYPE_CHOICES, default="availability"
)
message = models.TextField(null=True, blank=True)
alert_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
@@ -58,9 +64,7 @@ class Alert(models.Model):
snooze_until = models.DateTimeField(null=True, blank=True)
resolved = models.BooleanField(default=False)
resolved_on = models.DateTimeField(null=True, blank=True)
severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
severity = models.CharField(max_length=30, choices=SEVERITY_CHOICES, default="info")
email_sent = models.DateTimeField(null=True, blank=True)
resolved_email_sent = models.DateTimeField(null=True, blank=True)
sms_sent = models.DateTimeField(null=True, blank=True)
@@ -69,208 +73,72 @@ class Alert(models.Model):
action_run = models.DateTimeField(null=True, blank=True)
action_stdout = models.TextField(null=True, blank=True)
action_stderr = models.TextField(null=True, blank=True)
action_retcode = models.BigIntegerField(null=True, blank=True)
action_retcode = models.IntegerField(null=True, blank=True)
action_execution_time = models.CharField(max_length=100, null=True, blank=True)
resolved_action_run = models.DateTimeField(null=True, blank=True)
resolved_action_stdout = models.TextField(null=True, blank=True)
resolved_action_stderr = models.TextField(null=True, blank=True)
resolved_action_retcode = models.BigIntegerField(null=True, blank=True)
resolved_action_retcode = models.IntegerField(null=True, blank=True)
resolved_action_execution_time = models.CharField(
max_length=100, null=True, blank=True
)
def __str__(self) -> str:
return f"{self.alert_type} - {self.message}"
def __str__(self):
return self.message
@property
def assigned_agent(self) -> "Optional[Agent]":
return self.agent
@property
def site(self) -> "Site":
return self.agent.site
@property
def client(self) -> "Client":
return self.agent.client
def resolve(self) -> None:
def resolve(self):
self.resolved = True
self.resolved_on = djangotime.now()
self.snoozed = False
self.snooze_until = None
self.save(update_fields=["resolved", "resolved_on", "snoozed", "snooze_until"])
self.save()
@classmethod
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
if not cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
agent=agent,
alert_type=AlertType.AVAILABILITY,
severity=AlertSeverity.ERROR,
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
),
def create_or_return_availability_alert(cls, agent):
if not cls.objects.filter(agent=agent, resolved=False).exists():
return cls.objects.create(
agent=agent,
alert_type="availability",
severity="error",
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
)
else:
try:
return cast(
Alert,
cls.objects.get(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
return cls.objects.get(agent=agent, resolved=False)
@classmethod
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
def create_or_return_check_alert(cls, check):
# need to pass agent if the check is a policy
if not cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_check=check,
agent=agent,
alert_type=AlertType.CHECK,
severity=check.alert_severity
if check.check_type
not in [
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
]
else alert_severity,
message=f"{agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
),
if not cls.objects.filter(assigned_check=check, resolved=False).exists():
return cls.objects.create(
assigned_check=check,
alert_type="check",
severity=check.alert_severity,
message=f"{check.agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_check=check,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
return cls.objects.get(assigned_check=check, resolved=False)
@classmethod
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
def create_or_return_task_alert(cls, task):
if not cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_task=task,
agent=agent,
alert_type=AlertType.TASK,
severity=task.alert_severity,
message=f"{agent.hostname} has task: {task.name} that failed.",
hidden=True,
),
if not cls.objects.filter(assigned_task=task, resolved=False).exists():
return cls.objects.create(
assigned_task=task,
alert_type="task",
severity=task.alert_severity,
message=f"{task.agent.hostname} has task: {task.name} that failed.",
hidden=True,
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_task=task,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
return cls.objects.get(assigned_task=task, resolved=False)
@classmethod
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
def handle_alert_failure(cls, instance: Union[Agent, AutomatedTask, Check]) -> None:
from agents.models import Agent
from autotasks.models import TaskResult
from checks.models import CheckResult
from autotasks.models import AutomatedTask
from checks.models import Check
# set variables
dashboard_severities = None
@@ -282,7 +150,6 @@ class Alert(models.Model):
alert_interval = None
email_task = None
text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
@@ -296,21 +163,30 @@ class Alert(models.Model):
dashboard_alert = instance.overdue_dashboard_alert
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
alert_severity = AlertSeverity.ERROR
alert_severity = "error"
agent = instance
dashboard_severities = [AlertSeverity.ERROR]
email_severities = [AlertSeverity.ERROR]
text_severities = [AlertSeverity.ERROR]
# set alert_template settings
if alert_template:
dashboard_severities = ["error"]
email_severities = ["error"]
text_severities = ["error"]
always_dashboard = alert_template.agent_always_alert
always_email = alert_template.agent_always_email
always_text = alert_template.agent_always_text
alert_interval = alert_template.agent_periodic_alert_days
run_script_action = alert_template.agent_script_actions
elif isinstance(instance, CheckResult):
if instance.should_create_alert(alert_template):
alert = cls.create_or_return_availability_alert(instance)
else:
# check if there is an alert that exists
if cls.objects.filter(agent=instance, resolved=False).exists():
alert = cls.objects.get(agent=instance, resolved=False)
else:
alert = None
elif isinstance(instance, Check):
from checks.tasks import (
handle_check_email_alert_task,
handle_check_sms_alert_task,
@@ -319,98 +195,75 @@ class Alert(models.Model):
email_task = handle_check_email_alert_task
text_task = handle_check_sms_alert_task
email_alert = instance.assigned_check.email_alert
text_alert = instance.assigned_check.text_alert
dashboard_alert = instance.assigned_check.dashboard_alert
email_alert = instance.email_alert
text_alert = instance.text_alert
dashboard_alert = instance.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = (
instance.assigned_check.alert_severity
if instance.assigned_check.check_type
not in [
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
]
else instance.alert_severity
)
alert_severity = instance.alert_severity
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = (
alert_template.check_dashboard_alert_severity
if alert_template.check_dashboard_alert_severity
else [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
AlertSeverity.INFO,
]
)
email_severities = (
alert_template.check_email_alert_severity
if alert_template.check_email_alert_severity
else [AlertSeverity.ERROR, AlertSeverity.WARNING]
)
text_severities = (
alert_template.check_text_alert_severity
if alert_template.check_text_alert_severity
else [AlertSeverity.ERROR, AlertSeverity.WARNING]
)
dashboard_severities = alert_template.check_dashboard_alert_severity
email_severities = alert_template.check_email_alert_severity
text_severities = alert_template.check_text_alert_severity
always_dashboard = alert_template.check_always_alert
always_email = alert_template.check_always_email
always_text = alert_template.check_always_text
alert_interval = alert_template.check_periodic_alert_days
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
if instance.should_create_alert(alert_template):
alert = cls.create_or_return_check_alert(instance)
else:
# check if there is an alert that exists
if cls.objects.filter(assigned_check=instance, resolved=False).exists():
alert = cls.objects.get(assigned_check=instance, resolved=False)
else:
alert = None
elif isinstance(instance, AutomatedTask):
from autotasks.tasks import handle_task_email_alert, handle_task_sms_alert
email_task = handle_task_email_alert
text_task = handle_task_sms_alert
email_alert = instance.task.email_alert
text_alert = instance.task.text_alert
dashboard_alert = instance.task.dashboard_alert
email_alert = instance.email_alert
text_alert = instance.text_alert
dashboard_alert = instance.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = instance.task.alert_severity
alert_severity = instance.alert_severity
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = (
alert_template.task_dashboard_alert_severity
if alert_template.task_dashboard_alert_severity
else [AlertSeverity.ERROR, AlertSeverity.WARNING]
)
email_severities = (
alert_template.task_email_alert_severity
if alert_template.task_email_alert_severity
else [AlertSeverity.ERROR, AlertSeverity.WARNING]
)
text_severities = (
alert_template.task_text_alert_severity
if alert_template.task_text_alert_severity
else [AlertSeverity.ERROR, AlertSeverity.WARNING]
)
dashboard_severities = alert_template.task_dashboard_alert_severity
email_severities = alert_template.task_email_alert_severity
text_severities = alert_template.task_text_alert_severity
always_dashboard = alert_template.task_always_alert
always_email = alert_template.task_always_email
always_text = alert_template.task_always_text
alert_interval = alert_template.task_periodic_alert_days
run_script_action = alert_template.task_script_actions
if instance.should_create_alert(alert_template):
alert = cls.create_or_return_task_alert(instance)
else:
# check if there is an alert that exists
if cls.objects.filter(assigned_task=instance, resolved=False).exists():
alert = cls.objects.get(assigned_task=instance, resolved=False)
else:
alert = None
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
if maintenance_mode or not alert:
return
# check if alert severity changed and update the alert
# check if alert severity changed on check and update the alert
if alert_severity != alert.severity:
alert.severity = alert_severity
alert.save(update_fields=["severity"])
@@ -419,25 +272,19 @@ class Alert(models.Model):
if dashboard_alert or always_dashboard:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and dashboard_severities
and alert.severity in dashboard_severities
):
if alert_template and alert.severity not in dashboard_severities: # type: ignore
pass
else:
alert.hidden = False
alert.save(update_fields=["hidden"])
alert.save()
# send email if enabled
if email_alert or always_email:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and email_severities
and alert.severity in email_severities
):
if alert_template and alert.severity not in email_severities: # type: ignore
pass
else:
email_task.delay(
pk=alert.pk,
alert_interval=alert_interval,
@@ -447,21 +294,13 @@ class Alert(models.Model):
if text_alert or always_text:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and text_severities
and alert.severity in text_severities
):
if alert_template and alert.severity not in text_severities: # type: ignore
pass
else:
text_task.delay(pk=alert.pk, alert_interval=alert_interval)
# check if any scripts should be run
if (
alert_template
and alert_template.action
and run_script_action
and not alert.action_run
):
if alert_template and alert_template.action and run_script_action and not alert.action_run: # type: ignore
r = agent.run_script(
scriptpk=alert_template.action.pk,
args=alert.parse_script_args(alert_template.action_args),
@@ -472,7 +311,7 @@ class Alert(models.Model):
)
# command was successful
if isinstance(r, dict):
if type(r) == dict:
alert.action_retcode = r["retcode"]
alert.action_stdout = r["stdout"]
alert.action_stderr = r["stderr"]
@@ -482,24 +321,21 @@ class Alert(models.Model):
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
log_type="scripting",
message=f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) failure alert",
)
@classmethod
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
def handle_alert_resolve(cls, instance: Union[Agent, AutomatedTask, Check]) -> None:
from agents.models import Agent
from autotasks.models import TaskResult
from checks.models import CheckResult
from autotasks.models import AutomatedTask
from checks.models import Check
# set variables
email_on_resolved = False
text_on_resolved = False
resolved_email_task = None
resolved_text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
@@ -509,6 +345,7 @@ class Alert(models.Model):
resolved_text_task = agent_recovery_sms_task
alert_template = instance.alert_template
alert = cls.objects.get(agent=instance, resolved=False)
maintenance_mode = instance.maintenance_mode
agent = instance
@@ -517,12 +354,7 @@ class Alert(models.Model):
text_on_resolved = alert_template.agent_text_on_resolved
run_script_action = alert_template.agent_script_actions
if agent.overdue_email_alert:
email_on_resolved = True
if agent.overdue_text_alert:
text_on_resolved = True
elif isinstance(instance, CheckResult):
elif isinstance(instance, Check):
from checks.tasks import (
handle_resolved_check_email_alert_task,
handle_resolved_check_sms_alert_task,
@@ -532,6 +364,7 @@ class Alert(models.Model):
resolved_text_task = handle_resolved_check_sms_alert_task
alert_template = instance.agent.alert_template
alert = cls.objects.get(assigned_check=instance, resolved=False)
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
@@ -540,7 +373,7 @@ class Alert(models.Model):
text_on_resolved = alert_template.check_text_on_resolved
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
elif isinstance(instance, AutomatedTask):
from autotasks.tasks import (
handle_resolved_task_email_alert,
handle_resolved_task_sms_alert,
@@ -550,6 +383,7 @@ class Alert(models.Model):
resolved_text_task = handle_resolved_task_sms_alert
alert_template = instance.agent.alert_template
alert = cls.objects.get(assigned_task=instance, resolved=False)
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
@@ -561,10 +395,8 @@ class Alert(models.Model):
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
if maintenance_mode:
return
alert.resolve()
@@ -581,7 +413,7 @@ class Alert(models.Model):
if (
alert_template
and alert_template.resolved_action
and run_script_action
and run_script_action # type: ignore
and not alert.resolved_action_run
):
r = agent.run_script(
@@ -594,7 +426,7 @@ class Alert(models.Model):
)
# command was successful
if isinstance(r, dict):
if type(r) == dict:
alert.resolved_action_retcode = r["retcode"]
alert.resolved_action_stdout = r["stdout"]
alert.resolved_action_stderr = r["stderr"]
@@ -606,11 +438,11 @@ class Alert(models.Model):
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
log_type="scripting",
message=f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) resolved alert",
)
def parse_script_args(self, args: List[str]) -> List[str]:
def parse_script_args(self, args: list[str]):
if not args:
return []
@@ -631,9 +463,9 @@ class Alert(models.Model):
continue
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg)) # type: ignore
except Exception as e:
DebugLog.error(log_type=DebugLogType.SCRIPTING, message=str(e))
DebugLog.error(log_type="scripting", message=str(e))
continue
else:
@@ -703,17 +535,17 @@ class AlertTemplate(BaseAuditModel):
# check alert settings
check_email_alert_severity = ArrayField(
models.CharField(max_length=25, blank=True, choices=AlertSeverity.choices),
models.CharField(max_length=25, blank=True, choices=SEVERITY_CHOICES),
blank=True,
default=list,
)
check_text_alert_severity = ArrayField(
models.CharField(max_length=25, blank=True, choices=AlertSeverity.choices),
models.CharField(max_length=25, blank=True, choices=SEVERITY_CHOICES),
blank=True,
default=list,
)
check_dashboard_alert_severity = ArrayField(
models.CharField(max_length=25, blank=True, choices=AlertSeverity.choices),
models.CharField(max_length=25, blank=True, choices=SEVERITY_CHOICES),
blank=True,
default=list,
)
@@ -727,17 +559,17 @@ class AlertTemplate(BaseAuditModel):
# task alert settings
task_email_alert_severity = ArrayField(
models.CharField(max_length=25, blank=True, choices=AlertSeverity.choices),
models.CharField(max_length=25, blank=True, choices=SEVERITY_CHOICES),
blank=True,
default=list,
)
task_text_alert_severity = ArrayField(
models.CharField(max_length=25, blank=True, choices=AlertSeverity.choices),
models.CharField(max_length=25, blank=True, choices=SEVERITY_CHOICES),
blank=True,
default=list,
)
task_dashboard_alert_severity = ArrayField(
models.CharField(max_length=25, blank=True, choices=AlertSeverity.choices),
models.CharField(max_length=25, blank=True, choices=SEVERITY_CHOICES),
blank=True,
default=list,
)
@@ -763,22 +595,11 @@ class AlertTemplate(BaseAuditModel):
"agents.Agent", related_name="alert_exclusions", blank=True
)
def __str__(self) -> str:
def __str__(self):
return self.name
def is_agent_excluded(self, agent: "Agent") -> bool:
return (
agent in self.excluded_agents.all()
or agent.site in self.excluded_sites.all()
or agent.client in self.excluded_clients.all()
or agent.monitoring_type == AgentMonType.WORKSTATION
and self.exclude_workstations
or agent.monitoring_type == AgentMonType.SERVER
and self.exclude_servers
)
@staticmethod
def serialize(alert_template: AlertTemplate) -> Dict[str, Any]:
def serialize(alert_template):
# serializes the agent and returns json
from .serializers import AlertTemplateAuditSerializer

View File

@@ -1,15 +1,10 @@
from typing import TYPE_CHECKING
from django.shortcuts import get_object_or_404
from rest_framework import permissions
from tacticalrmm.permissions import _has_perm, _has_perm_on_agent
if TYPE_CHECKING:
from accounts.models import User
def _has_perm_on_alert(user: "User", id: int) -> bool:
def _has_perm_on_alert(user, id: int):
from alerts.models import Alert
role = user.role
@@ -24,6 +19,10 @@ def _has_perm_on_alert(user: "User", id: int) -> bool:
if alert.agent:
agent_id = alert.agent.agent_id
elif alert.assigned_check:
agent_id = alert.assigned_check.agent.agent_id
elif alert.assigned_task:
agent_id = alert.assigned_task.agent.agent_id
else:
return True
@@ -31,7 +30,7 @@ def _has_perm_on_alert(user: "User", id: int) -> bool:
class AlertPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
if r.method == "GET" or r.method == "PATCH":
if "pk" in view.kwargs.keys():
return _has_perm(r, "can_list_alerts") and _has_perm_on_alert(
@@ -49,7 +48,7 @@ class AlertPerms(permissions.BasePermission):
class AlertTemplatePerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
if r.method == "GET":
return _has_perm(r, "can_list_alerttemplates")
else:

View File

@@ -3,17 +3,103 @@ from rest_framework.serializers import ModelSerializer, ReadOnlyField
from automation.serializers import PolicySerializer
from clients.serializers import ClientMinimumSerializer, SiteMinimumSerializer
from tacticalrmm.utils import get_default_timezone
from .models import Alert, AlertTemplate
class AlertSerializer(ModelSerializer):
hostname = ReadOnlyField(source="assigned_agent.hostname")
agent_id = ReadOnlyField(source="assigned_agent.agent_id")
client = ReadOnlyField(source="client.name")
site = ReadOnlyField(source="site.name")
alert_time = ReadOnlyField()
hostname = SerializerMethodField()
agent_id = SerializerMethodField()
client = SerializerMethodField()
site = SerializerMethodField()
alert_time = SerializerMethodField()
resolve_on = SerializerMethodField()
snoozed_until = SerializerMethodField()
def get_agent_id(self, instance):
if instance.alert_type == "availability":
return instance.agent.agent_id if instance.agent else ""
elif instance.alert_type == "check":
return (
instance.assigned_check.agent.agent_id
if instance.assigned_check
else ""
)
elif instance.alert_type == "task":
return (
instance.assigned_task.agent.agent_id if instance.assigned_task else ""
)
else:
return ""
def get_hostname(self, instance):
if instance.alert_type == "availability":
return instance.agent.hostname if instance.agent else ""
elif instance.alert_type == "check":
return (
instance.assigned_check.agent.hostname
if instance.assigned_check
else ""
)
elif instance.alert_type == "task":
return (
instance.assigned_task.agent.hostname if instance.assigned_task else ""
)
else:
return ""
def get_client(self, instance):
if instance.alert_type == "availability":
return instance.agent.client.name if instance.agent else ""
elif instance.alert_type == "check":
return (
instance.assigned_check.agent.client.name
if instance.assigned_check
else ""
)
elif instance.alert_type == "task":
return (
instance.assigned_task.agent.client.name
if instance.assigned_task
else ""
)
else:
return ""
def get_site(self, instance):
if instance.alert_type == "availability":
return instance.agent.site.name if instance.agent else ""
elif instance.alert_type == "check":
return (
instance.assigned_check.agent.site.name
if instance.assigned_check
else ""
)
elif instance.alert_type == "task":
return (
instance.assigned_task.agent.site.name if instance.assigned_task else ""
)
else:
return ""
def get_alert_time(self, instance):
if instance.alert_time:
return instance.alert_time.astimezone(get_default_timezone()).timestamp()
else:
return None
def get_resolve_on(self, instance):
if instance.resolved_on:
return instance.resolved_on.astimezone(get_default_timezone()).timestamp()
else:
return None
def get_snoozed_until(self, instance):
if instance.snooze_until:
return instance.snooze_until.astimezone(get_default_timezone()).timestamp()
return None
class Meta:
model = Alert
@@ -35,11 +121,11 @@ class AlertTemplateSerializer(ModelSerializer):
fields = "__all__"
def get_applied_count(self, instance):
return (
instance.policies.count()
+ instance.clients.count()
+ instance.sites.count()
)
count = 0
count += instance.policies.count()
count += instance.clients.count()
count += instance.sites.count()
return count
class AlertTemplateRelationSerializer(ModelSerializer):

View File

@@ -1,13 +1,11 @@
from django.utils import timezone as djangotime
from agents.models import Agent
from tacticalrmm.celery import app
from .models import Alert
@app.task
def unsnooze_alerts() -> str:
from .models import Alert
Alert.objects.filter(snoozed=True, snooze_until__lte=djangotime.now()).update(
snoozed=False, snooze_until=None
)
@@ -16,10 +14,10 @@ def unsnooze_alerts() -> str:
@app.task
def cache_agents_alert_template() -> str:
for agent in Agent.objects.only(
"pk", "site", "policy", "alert_template"
).select_related("site", "policy", "alert_template"):
def cache_agents_alert_template():
from agents.models import Agent
for agent in Agent.objects.only("pk"):
agent.set_alert_template()
return "ok"
@@ -27,6 +25,8 @@ def cache_agents_alert_template() -> str:
@app.task
def prune_resolved_alerts(older_than_days: int) -> str:
from .models import Alert
Alert.objects.filter(resolved=True).filter(
alert_time__lt=djangotime.now() - djangotime.timedelta(days=older_than_days)
).delete()

File diff suppressed because it is too large Load Diff

View File

@@ -7,7 +7,7 @@ from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from tacticalrmm.helpers import notify_error
from tacticalrmm.utils import notify_error
from .models import Alert, AlertTemplate
from .permissions import AlertPerms, AlertTemplatePerms
@@ -92,7 +92,7 @@ class GetAddAlerts(APIView):
)
alerts = (
Alert.objects.filter_by_role(request.user) # type: ignore
Alert.objects.filter_by_role(request.user)
.filter(clientFilter)
.filter(severityFilter)
.filter(resolvedFilter)
@@ -102,7 +102,7 @@ class GetAddAlerts(APIView):
return Response(AlertSerializer(alerts, many=True).data)
else:
alerts = Alert.objects.filter_by_role(request.user) # type: ignore
alerts = Alert.objects.filter_by_role(request.user)
return Response(AlertSerializer(alerts, many=True).data)
def post(self, request):

View File

@@ -1,8 +1,12 @@
import json
import os
from unittest.mock import patch
from django.conf import settings
from django.utils import timezone as djangotime
from model_bakery import baker
from autotasks.models import TaskResult
from tacticalrmm.constants import CustomFieldModel, CustomFieldType, TaskStatus
from autotasks.models import AutomatedTask
from tacticalrmm.test import TacticalTestCase
@@ -13,53 +17,46 @@ class TestAPIv3(TacticalTestCase):
self.agent = baker.make_recipe("agents.agent")
def test_get_checks(self):
agent = baker.make_recipe("agents.agent")
url = f"/api/v3/{agent.agent_id}/checkrunner/"
url = f"/api/v3/{self.agent.agent_id}/checkrunner/"
# add a check
check1 = baker.make_recipe("checks.ping_check", agent=agent)
check_result1 = baker.make(
"checks.CheckResult", agent=agent, assigned_check=check1
)
check1 = baker.make_recipe("checks.ping_check", agent=self.agent)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], self.agent.check_interval)
self.assertEqual(len(r.data["checks"]), 1)
self.assertEqual(r.data["check_interval"], self.agent.check_interval) # type: ignore
self.assertEqual(len(r.data["checks"]), 1) # type: ignore
# override check run interval
check2 = baker.make_recipe(
"checks.diskspace_check", agent=agent, run_interval=20
)
check_result2 = baker.make(
"checks.CheckResult", agent=agent, assigned_check=check2
"checks.ping_check", agent=self.agent, run_interval=20
)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.data["checks"]), 2)
self.assertEqual(r.data["check_interval"], 20)
self.assertEqual(r.data["check_interval"], 20) # type: ignore
self.assertEqual(len(r.data["checks"]), 2) # type: ignore
# Set last_run on both checks and should return an empty list
check_result1.last_run = djangotime.now()
check_result1.save()
check_result2.last_run = djangotime.now()
check_result2.save()
check1.last_run = djangotime.now()
check1.save()
check2.last_run = djangotime.now()
check2.save()
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], 20)
self.assertFalse(r.data["checks"])
self.assertEqual(r.data["check_interval"], 20) # type: ignore
self.assertFalse(r.data["checks"]) # type: ignore
# set last_run greater than interval
check_result1.last_run = djangotime.now() - djangotime.timedelta(seconds=200)
check_result1.save()
check_result2.last_run = djangotime.now() - djangotime.timedelta(seconds=200)
check_result2.save()
check1.last_run = djangotime.now() - djangotime.timedelta(seconds=200)
check1.save()
check2.last_run = djangotime.now() - djangotime.timedelta(seconds=200)
check2.save()
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], 20)
self.assertEqual(len(r.data["checks"]), 2)
self.assertEqual(r.data["check_interval"], 20) # type: ignore
self.assertEquals(len(r.data["checks"]), 2) # type: ignore
url = "/api/v3/Maj34ACb324j234asdj2n34kASDjh34-DESKTOPTEST123/checkrunner/"
r = self.client.get(url)
@@ -67,6 +64,24 @@ class TestAPIv3(TacticalTestCase):
self.check_not_authenticated("get", url)
def test_sysinfo(self):
# TODO replace this with golang wmi sample data
url = "/api/v3/sysinfo/"
with open(
os.path.join(
settings.BASE_DIR, "tacticalrmm/test_data/wmi_python_agent.json"
)
) as f:
wmi_py = json.load(f)
payload = {"agent_id": self.agent.agent_id, "sysinfo": wmi_py}
r = self.client.patch(url, payload, format="json")
self.assertEqual(r.status_code, 200)
self.check_not_authenticated("patch", url)
def test_checkrunner_interval(self):
url = f"/api/v3/{self.agent.agent_id}/checkinterval/"
r = self.client.get(url, format="json")
@@ -115,31 +130,61 @@ class TestAPIv3(TacticalTestCase):
self.assertIsInstance(r.json()["check_interval"], int)
self.assertEqual(len(r.json()["checks"]), 15)
def test_task_runner_get(self):
r = self.client.get("/api/v3/500/asdf9df9dfdf/taskrunner/")
@patch("apiv3.views.reload_nats")
def test_agent_recovery(self, reload_nats):
reload_nats.return_value = "ok"
r = self.client.get("/api/v3/34jahsdkjasncASDjhg2b3j4r/recover/")
self.assertEqual(r.status_code, 404)
script = baker.make("scripts.script")
# setup data
task_actions = [
{"type": "cmd", "command": "whoami", "timeout": 10, "shell": "cmd"},
{
"type": "script",
"script": script.id,
"script_args": ["test"],
"timeout": 30,
},
{"type": "script", "script": 3, "script_args": [], "timeout": 30},
]
agent = baker.make_recipe("agents.agent")
task = baker.make("autotasks.AutomatedTask", agent=agent, actions=task_actions)
url = f"/api/v3/{task.pk}/{agent.agent_id}/taskrunner/"
agent = baker.make_recipe("agents.online_agent")
url = f"/api/v3/{agent.agent_id}/recovery/"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json(), {"mode": "pass", "shellcmd": ""})
reload_nats.assert_not_called()
baker.make("agents.RecoveryAction", agent=agent, mode="mesh")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json(), {"mode": "mesh", "shellcmd": ""})
reload_nats.assert_not_called()
baker.make(
"agents.RecoveryAction",
agent=agent,
mode="command",
command="shutdown /r /t 5 /f",
)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.json(), {"mode": "command", "shellcmd": "shutdown /r /t 5 /f"}
)
reload_nats.assert_not_called()
baker.make("agents.RecoveryAction", agent=agent, mode="rpc")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json(), {"mode": "rpc", "shellcmd": ""})
reload_nats.assert_called_once()
def test_task_runner_get(self):
from autotasks.serializers import TaskGOGetSerializer
r = self.client.get("/api/v3/500/asdf9df9dfdf/taskrunner/")
self.assertEqual(r.status_code, 404)
# setup data
agent = baker.make_recipe("agents.agent")
script = baker.make_recipe("scripts.script")
task = baker.make("autotasks.AutomatedTask", agent=agent, script=script)
url = f"/api/v3/{task.pk}/{agent.agent_id}/taskrunner/" # type: ignore
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(TaskGOGetSerializer(task).data, r.data) # type: ignore
def test_task_runner_results(self):
from agents.models import AgentCustomField
@@ -150,9 +195,8 @@ class TestAPIv3(TacticalTestCase):
# setup data
agent = baker.make_recipe("agents.agent")
task = baker.make("autotasks.AutomatedTask", agent=agent)
task_result = baker.make("autotasks.TaskResult", agent=agent, task=task)
url = f"/api/v3/{task.pk}/{agent.agent_id}/taskrunner/"
url = f"/api/v3/{task.pk}/{agent.agent_id}/taskrunner/" # type: ignore
# test passing task
data = {
@@ -164,9 +208,7 @@ class TestAPIv3(TacticalTestCase):
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertTrue(
TaskResult.objects.get(pk=task_result.pk).status == TaskStatus.PASSING
)
self.assertTrue(AutomatedTask.objects.get(pk=task.pk).status == "passing") # type: ignore
# test failing task
data = {
@@ -178,33 +220,20 @@ class TestAPIv3(TacticalTestCase):
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertTrue(
TaskResult.objects.get(pk=task_result.pk).status == TaskStatus.FAILING
)
self.assertTrue(AutomatedTask.objects.get(pk=task.pk).status == "failing") # type: ignore
# test collector task
text = baker.make(
"core.CustomField",
model=CustomFieldModel.AGENT,
type=CustomFieldType.TEXT,
name="Test",
)
text = baker.make("core.CustomField", model="agent", type="text", name="Test")
boolean = baker.make(
"core.CustomField",
model=CustomFieldModel.AGENT,
type=CustomFieldType.CHECKBOX,
name="Test1",
"core.CustomField", model="agent", type="checkbox", name="Test1"
)
multiple = baker.make(
"core.CustomField",
model=CustomFieldModel.AGENT,
type=CustomFieldType.MULTIPLE,
name="Test2",
"core.CustomField", model="agent", type="multiple", name="Test2"
)
# test text fields
task.custom_field = text
task.save()
task.custom_field = text # type: ignore
task.save() # type: ignore
# test failing failing with stderr
data = {
@@ -216,9 +245,7 @@ class TestAPIv3(TacticalTestCase):
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertTrue(
TaskResult.objects.get(pk=task_result.pk).status == TaskStatus.FAILING
)
self.assertTrue(AutomatedTask.objects.get(pk=task.pk).status == "failing") # type: ignore
# test saving to text field
data = {
@@ -230,17 +257,12 @@ class TestAPIv3(TacticalTestCase):
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(
TaskResult.objects.get(pk=task_result.pk).status, TaskStatus.PASSING
)
self.assertEqual(
AgentCustomField.objects.get(field=text, agent=task.agent).value,
"the last line",
)
self.assertEqual(AutomatedTask.objects.get(pk=task.pk).status, "passing") # type: ignore
self.assertEqual(AgentCustomField.objects.get(field=text, agent=task.agent).value, "the last line") # type: ignore
# test saving to checkbox field
task.custom_field = boolean
task.save()
task.custom_field = boolean # type: ignore
task.save() # type: ignore
data = {
"stdout": "1",
@@ -251,16 +273,12 @@ class TestAPIv3(TacticalTestCase):
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(
TaskResult.objects.get(pk=task_result.pk).status, TaskStatus.PASSING
)
self.assertTrue(
AgentCustomField.objects.get(field=boolean, agent=task.agent).value
)
self.assertEqual(AutomatedTask.objects.get(pk=task.pk).status, "passing") # type: ignore
self.assertTrue(AgentCustomField.objects.get(field=boolean, agent=task.agent).value) # type: ignore
# test saving to multiple field with commas
task.custom_field = multiple
task.save()
task.custom_field = multiple # type: ignore
task.save() # type: ignore
data = {
"stdout": "this,is,an,array",
@@ -271,13 +289,8 @@ class TestAPIv3(TacticalTestCase):
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(
TaskResult.objects.get(pk=task_result.pk).status, TaskStatus.PASSING
)
self.assertEqual(
AgentCustomField.objects.get(field=multiple, agent=task.agent).value,
["this", "is", "an", "array"],
)
self.assertEqual(AutomatedTask.objects.get(pk=task.pk).status, "passing") # type: ignore
self.assertEqual(AgentCustomField.objects.get(field=multiple, agent=task.agent).value, ["this", "is", "an", "array"]) # type: ignore
# test mutiple with a single value
data = {
@@ -289,10 +302,5 @@ class TestAPIv3(TacticalTestCase):
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(
TaskResult.objects.get(pk=task_result.pk).status, TaskStatus.PASSING
)
self.assertEqual(
AgentCustomField.objects.get(field=multiple, agent=task.agent).value,
["this"],
)
self.assertEqual(AutomatedTask.objects.get(pk=task.pk).status, "passing") # type: ignore
self.assertEqual(AgentCustomField.objects.get(field=multiple, agent=task.agent).value, ["this"]) # type: ignore

View File

@@ -9,6 +9,7 @@ urlpatterns = [
path("<str:agentid>/checkinterval/", views.CheckRunnerInterval.as_view()),
path("<int:pk>/<str:agentid>/taskrunner/", views.TaskRunner.as_view()),
path("meshexe/", views.MeshExe.as_view()),
path("sysinfo/", views.SysInfo.as_view()),
path("newagent/", views.NewAgent.as_view()),
path("software/", views.Software.as_view()),
path("installer/", views.Installer.as_view()),
@@ -18,5 +19,6 @@ urlpatterns = [
path("winupdates/", views.WinUpdates.as_view()),
path("superseded/", views.SupersededWinUpdate.as_view()),
path("<int:pk>/chocoresult/", views.ChocoResult.as_view()),
path("<str:agentid>/recovery/", views.AgentRecovery.as_view()),
path("<int:pk>/<str:agentid>/histresult/", views.AgentHistoryResult.as_view()),
]

View File

@@ -1,7 +1,9 @@
import asyncio
import os
import time
from django.conf import settings
from django.db.models import Prefetch
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from packaging import version as pyver
@@ -14,33 +16,13 @@ from rest_framework.views import APIView
from accounts.models import User
from agents.models import Agent, AgentHistory
from agents.serializers import AgentHistorySerializer
from autotasks.models import AutomatedTask, TaskResult
from autotasks.serializers import TaskGOGetSerializer, TaskResultSerializer
from checks.constants import CHECK_DEFER, CHECK_RESULT_DEFER
from checks.models import Check, CheckResult
from autotasks.models import AutomatedTask
from autotasks.serializers import TaskGOGetSerializer, TaskRunnerPatchSerializer
from checks.models import Check
from checks.serializers import CheckRunnerGetSerializer
from core.utils import (
download_mesh_agent,
get_core_settings,
get_mesh_device_id,
get_mesh_ws_url,
)
from logs.models import DebugLog, PendingAction
from logs.models import PendingAction, DebugLog
from software.models import InstalledSoftware
from tacticalrmm.constants import (
AGENT_DEFER,
AgentMonType,
AgentPlat,
AuditActionType,
AuditObjType,
CheckStatus,
DebugLogType,
GoArch,
MeshAgentIdent,
PAStatus,
)
from tacticalrmm.helpers import notify_error
from tacticalrmm.utils import reload_nats
from tacticalrmm.utils import notify_error, reload_nats
from winupdate.models import WinUpdate, WinUpdatePolicy
@@ -51,12 +33,11 @@ class CheckIn(APIView):
# called once during tacticalagent windows service startup
def post(self, request):
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=request.data["agent_id"]
)
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
if not agent.choco_installed:
asyncio.run(agent.nats_cmd({"func": "installchoco"}, wait=False))
time.sleep(0.5)
asyncio.run(agent.nats_cmd({"func": "getwinupdates"}, wait=False))
return Response("ok")
@@ -66,9 +47,7 @@ class SyncMeshNodeID(APIView):
permission_classes = [IsAuthenticated]
def post(self, request):
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=request.data["agent_id"]
)
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
if agent.mesh_node_id != request.data["nodeid"]:
agent.mesh_node_id = request.data["nodeid"]
agent.save(update_fields=["mesh_node_id"])
@@ -81,9 +60,7 @@ class Choco(APIView):
permission_classes = [IsAuthenticated]
def post(self, request):
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=request.data["agent_id"]
)
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
agent.choco_installed = request.data["installed"]
agent.save(update_fields=["choco_installed"])
return Response("ok")
@@ -94,9 +71,7 @@ class WinUpdates(APIView):
permission_classes = [IsAuthenticated]
def put(self, request):
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=request.data["agent_id"]
)
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
needs_reboot: bool = request.data["needs_reboot"]
agent.needs_reboot = needs_reboot
@@ -114,7 +89,7 @@ class WinUpdates(APIView):
asyncio.run(agent.nats_cmd({"func": "rebootnow"}, wait=False))
DebugLog.info(
agent=agent,
log_type=DebugLogType.WIN_UPDATES,
log_type="windows_updates",
message=f"{agent.hostname} is rebooting after updates were installed.",
)
@@ -122,13 +97,8 @@ class WinUpdates(APIView):
return Response("ok")
def patch(self, request):
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=request.data["agent_id"]
)
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
u = agent.winupdates.filter(guid=request.data["guid"]).last() # type: ignore
if not u:
raise WinUpdate.DoesNotExist
success: bool = request.data["success"]
if success:
u.result = "success"
@@ -151,14 +121,8 @@ class WinUpdates(APIView):
return Response("ok")
def post(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
updates = request.data["wua_updates"]
if not updates:
return notify_error("Empty payload")
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=request.data["agent_id"]
)
for update in updates:
if agent.winupdates.filter(guid=update["guid"]).exists(): # type: ignore
u = agent.winupdates.filter(guid=update["guid"]).last() # type: ignore
@@ -197,9 +161,7 @@ class SupersededWinUpdate(APIView):
permission_classes = [IsAuthenticated]
def post(self, request):
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=request.data["agent_id"]
)
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
updates = agent.winupdates.filter(guid=request.data["guid"]) # type: ignore
for u in updates:
u.delete()
@@ -212,19 +174,12 @@ class RunChecks(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, agentid):
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER).prefetch_related(
Prefetch("agentchecks", queryset=Check.objects.select_related("script"))
),
agent_id=agentid,
)
checks = agent.get_checks_with_policies(exclude_overridden=True)
agent = get_object_or_404(Agent, agent_id=agentid)
checks = Check.objects.filter(agent__pk=agent.pk, overriden_by_policy=False)
ret = {
"agent": agent.pk,
"check_interval": agent.check_interval,
"checks": CheckRunnerGetSerializer(
checks, context={"agent": agent}, many=True
).data,
"checks": CheckRunnerGetSerializer(checks, many=True).data,
}
return Response(ret)
@@ -234,72 +189,45 @@ class CheckRunner(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, agentid):
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER).prefetch_related(
Prefetch("agentchecks", queryset=Check.objects.select_related("script"))
),
agent_id=agentid,
)
checks = agent.get_checks_with_policies(exclude_overridden=True)
agent = get_object_or_404(Agent, agent_id=agentid)
checks = agent.agentchecks.filter(overriden_by_policy=False) # type: ignore
run_list = [
check
for check in checks
# always run if check hasn't run yet
if not isinstance(check.check_result, CheckResult)
or not check.check_result.last_run
# see if the correct amount of seconds have passed
if not check.last_run
# if a check interval is set, see if the correct amount of seconds have passed
or (
check.check_result.last_run
< djangotime.now()
- djangotime.timedelta(
seconds=check.run_interval
if check.run_interval
else agent.check_interval
check.run_interval
and (
check.last_run
< djangotime.now()
- djangotime.timedelta(seconds=check.run_interval)
)
)
# if check interval isn't set, make sure the agent's check interval has passed before running
or (
not check.run_interval
and check.last_run
< djangotime.now() - djangotime.timedelta(seconds=agent.check_interval)
)
]
ret = {
"agent": agent.pk,
"check_interval": agent.check_run_interval(),
"checks": CheckRunnerGetSerializer(
run_list, context={"agent": agent}, many=True
).data,
"checks": CheckRunnerGetSerializer(run_list, many=True).data,
}
return Response(ret)
def patch(self, request):
if "agent_id" not in request.data.keys():
return notify_error("Agent upgrade required")
check = get_object_or_404(Check, pk=request.data["id"])
check = get_object_or_404(
Check.objects.defer(*CHECK_DEFER),
pk=request.data["id"],
)
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=request.data["agent_id"]
)
# get check result or create if doesn't exist
check_result, created = CheckResult.objects.defer(
*CHECK_RESULT_DEFER
).get_or_create(
assigned_check=check,
agent=agent,
)
if created:
check_result.save()
status = check_result.handle_check(request.data, check, agent)
if status == CheckStatus.FAILING and check.assignedtasks.exists():
for task in check.assignedtasks.all():
if task.enabled:
if task.policy:
task.run_win_task(agent)
else:
task.run_win_task()
check.last_run = djangotime.now()
check.save(update_fields=["last_run"])
status = check.handle_check(request.data)
if status == "failing" and check.assignedtask.exists(): # type: ignore
check.handle_assigned_task()
return Response("ok")
@@ -309,10 +237,7 @@ class CheckRunnerInterval(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, agentid):
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER).prefetch_related("agentchecks"),
agent_id=agentid,
)
agent = get_object_or_404(Agent, agent_id=agentid)
return Response(
{"agent": agent.pk, "check_interval": agent.check_run_interval()}
@@ -324,71 +249,65 @@ class TaskRunner(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, pk, agentid):
agent = get_object_or_404(Agent.objects.defer(*AGENT_DEFER), agent_id=agentid)
_ = get_object_or_404(Agent, agent_id=agentid)
task = get_object_or_404(AutomatedTask, pk=pk)
return Response(TaskGOGetSerializer(task, context={"agent": agent}).data)
return Response(TaskGOGetSerializer(task).data)
def patch(self, request, pk, agentid):
from alerts.models import Alert
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER),
agent_id=agentid,
)
task = get_object_or_404(
AutomatedTask.objects.select_related("custom_field"), pk=pk
)
# get task result or create if doesn't exist
try:
task_result = (
TaskResult.objects.select_related("agent")
.defer("agent__services", "agent__wmi_detail")
.get(task=task, agent=agent)
)
serializer = TaskResultSerializer(
data=request.data, instance=task_result, partial=True
)
except TaskResult.DoesNotExist:
serializer = TaskResultSerializer(data=request.data, partial=True)
agent = get_object_or_404(Agent, agent_id=agentid)
task = get_object_or_404(AutomatedTask, pk=pk)
serializer = TaskRunnerPatchSerializer(
instance=task, data=request.data, partial=True
)
serializer.is_valid(raise_exception=True)
task_result = serializer.save(last_run=djangotime.now())
new_task = serializer.save(last_run=djangotime.now())
AgentHistory.objects.create(
agent=agent,
type=AuditActionType.TASK_RUN,
command=task.name,
type="task_run",
script=task.script,
script_results=request.data,
)
# check if task is a collector and update the custom field
if task.custom_field:
if not task_result.stderr:
if not task.stderr:
task_result.save_collector_results()
task.save_collector_results()
status = CheckStatus.PASSING
status = "passing"
else:
status = CheckStatus.FAILING
status = "failing"
else:
status = (
CheckStatus.FAILING if task_result.retcode != 0 else CheckStatus.PASSING
)
status = "failing" if task.retcode != 0 else "passing"
if task_result:
task_result.status = status
task_result.save(update_fields=["status"])
new_task.status = status
new_task.save()
if status == "passing":
if Alert.objects.filter(assigned_task=new_task, resolved=False).exists():
Alert.handle_alert_resolve(new_task)
else:
task_result.status = status
task.save(update_fields=["status"])
Alert.handle_alert_failure(new_task)
if status == CheckStatus.PASSING:
if Alert.create_or_return_task_alert(task, agent=agent, skip_create=True):
Alert.handle_alert_resolve(task_result)
else:
Alert.handle_alert_failure(task_result)
return Response("ok")
class SysInfo(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def patch(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
if not isinstance(request.data["sysinfo"], dict):
return notify_error("err")
agent.wmi_detail = request.data["sysinfo"]
agent.save(update_fields=["wmi_detail"])
return Response("ok")
@@ -396,33 +315,25 @@ class MeshExe(APIView):
"""Sends the mesh exe to the installer"""
def post(self, request):
match request.data:
case {"goarch": GoArch.AMD64, "plat": AgentPlat.WINDOWS}:
arch = MeshAgentIdent.WIN64
case {"goarch": GoArch.i386, "plat": AgentPlat.WINDOWS}:
arch = MeshAgentIdent.WIN32
case _:
return notify_error("Arch not specified")
exe = "meshagent.exe" if request.data["arch"] == "64" else "meshagent-x86.exe"
mesh_exe = os.path.join(settings.EXE_DIR, exe)
core = get_core_settings()
if not os.path.exists(mesh_exe):
return notify_error("Mesh Agent executable not found")
try:
uri = get_mesh_ws_url()
mesh_id = asyncio.run(get_mesh_device_id(uri, core.mesh_device_group))
except:
return notify_error("Unable to connect to mesh to get group id information")
if settings.DOCKER_BUILD:
dl_url = f"{settings.MESH_WS_URL.replace('ws://', 'http://')}/meshagents?id={arch}&meshid={mesh_id}&installflags=0"
if settings.DEBUG:
with open(mesh_exe, "rb") as f:
response = HttpResponse(
f.read(),
content_type="application/vnd.microsoft.portable-executable",
)
response["Content-Disposition"] = f"inline; filename={exe}"
return response
else:
dl_url = (
f"{core.mesh_site}/meshagents?id={arch}&meshid={mesh_id}&installflags=0"
)
try:
return download_mesh_agent(dl_url)
except:
return notify_error("Unable to download mesh agent exe")
response = HttpResponse()
response["Content-Disposition"] = f"attachment; filename={exe}"
response["X-Accel-Redirect"] = f"/private/exe/{exe}"
return response
class NewAgent(APIView):
@@ -443,11 +354,11 @@ class NewAgent(APIView):
monitoring_type=request.data["monitoring_type"],
description=request.data["description"],
mesh_node_id=request.data["mesh_node_id"],
goarch=request.data["goarch"],
plat=request.data["plat"],
last_seen=djangotime.now(),
)
agent.save()
agent.salt_id = f"{agent.hostname}-{agent.pk}"
agent.save(update_fields=["salt_id"])
user = User.objects.create_user( # type: ignore
username=request.data["agent_id"],
@@ -457,7 +368,7 @@ class NewAgent(APIView):
token = Token.objects.create(user=user)
if agent.monitoring_type == AgentMonType.WORKSTATION:
if agent.monitoring_type == "workstation":
WinUpdatePolicy(agent=agent, run_time_days=[5, 6]).save()
else:
WinUpdatePolicy(agent=agent).save()
@@ -468,15 +379,20 @@ class NewAgent(APIView):
AuditLog.objects.create(
username=request.user,
agent=agent.hostname,
object_type=AuditObjType.AGENT,
action=AuditActionType.AGENT_INSTALL,
object_type="agent",
action="agent_install",
message=f"{request.user} installed new agent {agent.hostname}",
after_value=Agent.serialize(agent),
debug_info={"ip": request._client_ip},
)
ret = {"pk": agent.pk, "token": token.key}
return Response(ret)
return Response(
{
"pk": agent.pk,
"saltid": f"{agent.hostname}-{agent.pk}",
"token": token.key,
}
)
class Software(APIView):
@@ -506,10 +422,7 @@ class Installer(APIView):
return notify_error("Invalid data")
ver = request.data["version"]
if (
pyver.parse(ver) < pyver.parse(settings.LATEST_AGENT_VER)
and not "-dev" in settings.LATEST_AGENT_VER
):
if pyver.parse(ver) < pyver.parse(settings.LATEST_AGENT_VER):
return notify_error(
f"Old installer detected (version {ver} ). Latest version is {settings.LATEST_AGENT_VER} Please generate a new installer from the RMM"
)
@@ -544,19 +457,53 @@ class ChocoResult(APIView):
action.details["output"] = results
action.details["installed"] = installed
action.status = PAStatus.COMPLETED
action.status = "completed"
action.save(update_fields=["details", "status"])
return Response("ok")
class AgentRecovery(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, agentid):
agent = get_object_or_404(
Agent.objects.prefetch_related("recoveryactions").only(
"pk", "agent_id", "last_seen"
),
agent_id=agentid,
)
# TODO remove these 2 lines after agent v1.7.0 has been out for a while
# this is handled now by nats-api service
agent.last_seen = djangotime.now()
agent.save(update_fields=["last_seen"])
recovery = agent.recoveryactions.filter(last_run=None).last() # type: ignore
ret = {"mode": "pass", "shellcmd": ""}
if recovery is None:
return Response(ret)
recovery.last_run = djangotime.now()
recovery.save(update_fields=["last_run"])
ret["mode"] = recovery.mode
if recovery.mode == "command":
ret["shellcmd"] = recovery.command
elif recovery.mode == "rpc":
reload_nats()
return Response(ret)
class AgentHistoryResult(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def patch(self, request, agentid, pk):
hist = get_object_or_404(
AgentHistory.objects.filter(agent__agent_id=agentid), pk=pk
)
_ = get_object_or_404(Agent, agent_id=agentid)
hist = get_object_or_404(AgentHistory, pk=pk)
s = AgentHistorySerializer(instance=hist, data=request.data, partial=True)
s.is_valid(raise_exception=True)
s.save()

View File

@@ -1,21 +1,8 @@
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from django.core.cache import cache
from django.db import models
from agents.models import Agent
from clients.models import Client, Site
from core.models import CoreSettings
from logs.models import BaseAuditModel
from tacticalrmm.constants import (
CORESETTINGS_CACHE_KEY,
AgentMonType,
AgentPlat,
CheckType,
)
if TYPE_CHECKING:
from autotasks.models import AutomatedTask
from checks.models import Check
class Policy(BaseAuditModel):
@@ -40,301 +27,366 @@ class Policy(BaseAuditModel):
"agents.Agent", related_name="policy_exclusions", blank=True
)
def save(self, *args: Any, **kwargs: Any) -> None:
def save(self, *args, **kwargs):
from alerts.tasks import cache_agents_alert_template
from automation.tasks import generate_agent_checks_task
# get old policy if exists
old_policy: Optional[Policy] = (
type(self).objects.get(pk=self.pk) if self.pk else None
)
old_policy = type(self).objects.get(pk=self.pk) if self.pk else None
super(Policy, self).save(old_model=old_policy, *args, **kwargs)
# check if alert template was changes and cache on agents
# generate agent checks only if active and enforced were changed
if old_policy:
if old_policy.active != self.active or old_policy.enforced != self.enforced:
generate_agent_checks_task.delay(
policy=self.pk,
create_tasks=True,
)
if old_policy.alert_template != self.alert_template:
cache_agents_alert_template.delay()
elif self.alert_template and old_policy.active != self.active:
cache_agents_alert_template.delay()
if old_policy.active != self.active or old_policy.enforced != self.enforced:
cache.delete(CORESETTINGS_CACHE_KEY)
cache.delete_many_pattern("site_workstation_*")
cache.delete_many_pattern("site_server_*")
cache.delete_many_pattern("agent_*")
def delete(self, *args, **kwargs):
cache.delete(CORESETTINGS_CACHE_KEY)
cache.delete_many_pattern("site_workstation_*")
cache.delete_many_pattern("site_server_*")
cache.delete_many_pattern("agent_*")
from automation.tasks import generate_agent_checks_task
super(Policy, self).delete(
*args,
**kwargs,
)
agents = list(self.related_agents().only("pk").values_list("pk", flat=True))
super(Policy, self).delete(*args, **kwargs)
def __str__(self) -> str:
generate_agent_checks_task.delay(agents=agents, create_tasks=True)
def __str__(self):
return self.name
@property
def is_default_server_policy(self) -> bool:
return self.default_server_policy.exists()
def is_default_server_policy(self):
return self.default_server_policy.exists() # type: ignore
@property
def is_default_workstation_policy(self) -> bool:
return self.default_workstation_policy.exists()
def is_default_workstation_policy(self):
return self.default_workstation_policy.exists() # type: ignore
def is_agent_excluded(self, agent: "Agent") -> bool:
def is_agent_excluded(self, agent):
return (
agent in self.excluded_agents.all()
or agent.site in self.excluded_sites.all()
or agent.client in self.excluded_clients.all()
)
def related_agents(
self, mon_type: Optional[str] = None
) -> "models.QuerySet[Agent]":
models.prefetch_related_objects(
[self],
"excluded_agents",
"excluded_sites",
"excluded_clients",
"workstation_clients",
"server_clients",
"workstation_sites",
"server_sites",
"agents",
)
agent_filter = {}
filtered_agents_ids = Agent.objects.none()
if mon_type:
agent_filter["monitoring_type"] = mon_type
excluded_clients_ids = self.excluded_clients.only("pk").values_list(
"id", flat=True
)
excluded_sites_ids = self.excluded_sites.only("pk").values_list("id", flat=True)
excluded_agents_ids = self.excluded_agents.only("pk").values_list(
"id", flat=True
)
if self.is_default_server_policy:
filtered_agents_ids |= (
Agent.objects.exclude(block_policy_inheritance=True)
.exclude(site__block_policy_inheritance=True)
.exclude(site__client__block_policy_inheritance=True)
.exclude(id__in=excluded_agents_ids)
.exclude(site_id__in=excluded_sites_ids)
.exclude(site__client_id__in=excluded_clients_ids)
.filter(monitoring_type=AgentMonType.SERVER)
.only("id")
.values_list("id", flat=True)
)
if self.is_default_workstation_policy:
filtered_agents_ids |= (
Agent.objects.exclude(block_policy_inheritance=True)
.exclude(site__block_policy_inheritance=True)
.exclude(site__client__block_policy_inheritance=True)
.exclude(id__in=excluded_agents_ids)
.exclude(site_id__in=excluded_sites_ids)
.exclude(site__client_id__in=excluded_clients_ids)
.filter(monitoring_type=AgentMonType.WORKSTATION)
.only("id")
.values_list("id", flat=True)
)
# if this is the default policy for servers and workstations and skip the other calculations
if self.is_default_server_policy and self.is_default_workstation_policy:
return Agent.objects.filter(models.Q(id__in=filtered_agents_ids))
def related_agents(self):
return self.get_related("server") | self.get_related("workstation")
def get_related(self, mon_type):
explicit_agents = (
self.agents.filter(**agent_filter) # type: ignore
.exclude(id__in=excluded_agents_ids)
.exclude(site_id__in=excluded_sites_ids)
.exclude(site__client_id__in=excluded_clients_ids)
self.agents.filter(monitoring_type=mon_type) # type: ignore
.exclude(
pk__in=self.excluded_agents.only("pk").values_list("pk", flat=True)
)
.exclude(site__in=self.excluded_sites.all())
.exclude(site__client__in=self.excluded_clients.all())
)
explicit_clients_qs = Client.objects.none()
explicit_sites_qs = Site.objects.none()
explicit_clients = getattr(self, f"{mon_type}_clients").exclude(
pk__in=self.excluded_clients.all()
)
explicit_sites = getattr(self, f"{mon_type}_sites").exclude(
pk__in=self.excluded_sites.all()
)
if not mon_type or mon_type == AgentMonType.WORKSTATION:
explicit_clients_qs |= self.workstation_clients.exclude( # type: ignore
id__in=excluded_clients_ids
)
explicit_sites_qs |= self.workstation_sites.exclude( # type: ignore
id__in=excluded_sites_ids
)
filtered_agents_pks = Policy.objects.none()
if not mon_type or mon_type == AgentMonType.SERVER:
explicit_clients_qs |= self.server_clients.exclude( # type: ignore
id__in=excluded_clients_ids
)
explicit_sites_qs |= self.server_sites.exclude( # type: ignore
id__in=excluded_sites_ids
)
filtered_agents_ids |= (
filtered_agents_pks |= (
Agent.objects.exclude(block_policy_inheritance=True)
.filter(
site_id__in=[
site.id
for site in explicit_sites_qs
if site.client not in explicit_clients_qs
and site.client.id not in excluded_clients_ids
site__in=[
site
for site in explicit_sites
if site.client not in explicit_clients
and site.client not in self.excluded_clients.all()
],
**agent_filter,
monitoring_type=mon_type,
)
.only("id")
.values_list("id", flat=True)
.values_list("pk", flat=True)
)
filtered_agents_ids |= (
filtered_agents_pks |= (
Agent.objects.exclude(block_policy_inheritance=True)
.exclude(site__block_policy_inheritance=True)
.filter(
site__client__in=explicit_clients_qs,
**agent_filter,
site__client__in=[client for client in explicit_clients],
monitoring_type=mon_type,
)
.only("id")
.values_list("id", flat=True)
.values_list("pk", flat=True)
)
return Agent.objects.filter(
models.Q(id__in=filtered_agents_ids)
| models.Q(id__in=explicit_agents.only("id"))
models.Q(pk__in=filtered_agents_pks)
| models.Q(pk__in=explicit_agents.only("pk"))
)
@staticmethod
def serialize(policy: "Policy") -> Dict[str, Any]:
def serialize(policy):
# serializes the policy and returns json
from .serializers import PolicyAuditSerializer
return PolicyAuditSerializer(policy).data
@staticmethod
def get_policy_tasks(agent: "Agent") -> "List[AutomatedTask]":
def cascade_policy_tasks(agent):
# List of all tasks to be applied
tasks = list()
added_task_pks = list()
agent_tasks_parent_pks = [
task.parent_task for task in agent.autotasks.filter(managed_by_policy=True)
]
# Get policies applied to agent and agent site and client
policies = agent.get_agent_policies()
client = agent.client
site = agent.site
processed_policies = list()
default_policy = None
client_policy = None
site_policy = None
agent_policy = agent.policy
for _, policy in policies.items():
if policy and policy.active and policy.pk not in processed_policies:
processed_policies.append(policy.pk)
for task in policy.autotasks.all():
# Get the Client/Site policy based on if the agent is server or workstation
if agent.monitoring_type == "server":
default_policy = CoreSettings.objects.first().server_policy
client_policy = client.server_policy
site_policy = site.server_policy
elif agent.monitoring_type == "workstation":
default_policy = CoreSettings.objects.first().workstation_policy
client_policy = client.workstation_policy
site_policy = site.workstation_policy
# check if client/site/agent is blocking inheritance and blank out policies
if agent.block_policy_inheritance:
site_policy = None
client_policy = None
default_policy = None
elif site.block_policy_inheritance:
client_policy = None
default_policy = None
elif client.block_policy_inheritance:
default_policy = None
if (
agent_policy
and agent_policy.active
and not agent_policy.is_agent_excluded(agent)
):
for task in agent_policy.autotasks.all():
if task.pk not in added_task_pks:
tasks.append(task)
added_task_pks.append(task.pk)
if (
site_policy
and site_policy.active
and not site_policy.is_agent_excluded(agent)
):
for task in site_policy.autotasks.all():
if task.pk not in added_task_pks:
tasks.append(task)
added_task_pks.append(task.pk)
if (
client_policy
and client_policy.active
and not client_policy.is_agent_excluded(agent)
):
for task in client_policy.autotasks.all():
if task.pk not in added_task_pks:
tasks.append(task)
added_task_pks.append(task.pk)
return tasks
if (
default_policy
and default_policy.active
and not default_policy.is_agent_excluded(agent)
):
for task in default_policy.autotasks.all():
if task.pk not in added_task_pks:
tasks.append(task)
added_task_pks.append(task.pk)
# remove policy tasks from agent not included in policy
for task in agent.autotasks.filter(
parent_task__in=[
taskpk
for taskpk in agent_tasks_parent_pks
if taskpk not in added_task_pks
]
):
if task.sync_status == "initial":
task.delete()
else:
task.sync_status = "pendingdeletion"
task.save()
# change tasks from pendingdeletion to notsynced if policy was added or changed
agent.autotasks.filter(sync_status="pendingdeletion").filter(
parent_task__in=[taskpk for taskpk in added_task_pks]
).update(sync_status="notsynced")
return [task for task in tasks if task.pk not in agent_tasks_parent_pks]
@staticmethod
def get_policy_checks(agent: "Agent") -> "List[Check]":
def cascade_policy_checks(agent):
# Get checks added to agent directly
agent_checks = list(agent.agentchecks.all())
agent_checks = list(agent.agentchecks.filter(managed_by_policy=False))
agent_checks_parent_pks = [
check.parent_check
for check in agent.agentchecks.filter(managed_by_policy=True)
]
# Get policies applied to agent and agent site and client
policies = agent.get_agent_policies()
client = agent.client
site = agent.site
default_policy = None
client_policy = None
site_policy = None
agent_policy = agent.policy
if agent.monitoring_type == "server":
default_policy = CoreSettings.objects.first().server_policy
client_policy = client.server_policy
site_policy = site.server_policy
elif agent.monitoring_type == "workstation":
default_policy = CoreSettings.objects.first().workstation_policy
client_policy = client.workstation_policy
site_policy = site.workstation_policy
# check if client/site/agent is blocking inheritance and blank out policies
if agent.block_policy_inheritance:
site_policy = None
client_policy = None
default_policy = None
elif site.block_policy_inheritance:
client_policy = None
default_policy = None
elif client.block_policy_inheritance:
default_policy = None
# Used to hold the policies that will be applied and the order in which they are applied
# Enforced policies are applied first
enforced_checks = list()
policy_checks = list()
processed_policies = list()
if (
agent_policy
and agent_policy.active
and not agent_policy.is_agent_excluded(agent)
):
if agent_policy.enforced:
for check in agent_policy.policychecks.all():
enforced_checks.append(check)
else:
for check in agent_policy.policychecks.all():
policy_checks.append(check)
for _, policy in policies.items():
if policy and policy.active and policy.pk not in processed_policies:
processed_policies.append(policy.pk)
if policy.enforced:
for check in policy.policychecks.all():
enforced_checks.append(check)
else:
for check in policy.policychecks.all():
policy_checks.append(check)
if (
site_policy
and site_policy.active
and not site_policy.is_agent_excluded(agent)
):
if site_policy.enforced:
for check in site_policy.policychecks.all():
enforced_checks.append(check)
else:
for check in site_policy.policychecks.all():
policy_checks.append(check)
if not enforced_checks and not policy_checks:
return []
if (
client_policy
and client_policy.active
and not client_policy.is_agent_excluded(agent)
):
if client_policy.enforced:
for check in client_policy.policychecks.all():
enforced_checks.append(check)
else:
for check in client_policy.policychecks.all():
policy_checks.append(check)
if (
default_policy
and default_policy.active
and not default_policy.is_agent_excluded(agent)
):
if default_policy.enforced:
for check in default_policy.policychecks.all():
enforced_checks.append(check)
else:
for check in default_policy.policychecks.all():
policy_checks.append(check)
# Sorted Checks already added
added_diskspace_checks: List[str] = list()
added_ping_checks: List[str] = list()
added_winsvc_checks: List[str] = list()
added_script_checks: List[int] = list()
added_eventlog_checks: List[List[str]] = list()
added_cpuload_checks: List[int] = list()
added_memory_checks: List[int] = list()
added_diskspace_checks = list()
added_ping_checks = list()
added_winsvc_checks = list()
added_script_checks = list()
added_eventlog_checks = list()
added_cpuload_checks = list()
added_memory_checks = list()
# Lists all agent and policy checks that will be returned
diskspace_checks: "List[Check]" = list()
ping_checks: "List[Check]" = list()
winsvc_checks: "List[Check]" = list()
script_checks: "List[Check]" = list()
eventlog_checks: "List[Check]" = list()
cpuload_checks: "List[Check]" = list()
memory_checks: "List[Check]" = list()
overridden_checks: List[int] = list()
# Lists all agent and policy checks that will be created
diskspace_checks = list()
ping_checks = list()
winsvc_checks = list()
script_checks = list()
eventlog_checks = list()
cpuload_checks = list()
memory_checks = list()
# Loop over checks in with enforced policies first, then non-enforced policies
for check in enforced_checks + agent_checks + policy_checks:
if (
check.check_type == CheckType.DISK_SPACE
and agent.plat == AgentPlat.WINDOWS
):
if check.check_type == "diskspace":
# Check if drive letter was already added
if check.disk not in added_diskspace_checks:
added_diskspace_checks.append(check.disk)
# Dont add if check if it is an agent check
# Dont create the check if it is an agent check
if not check.agent:
diskspace_checks.append(check)
elif check.agent:
overridden_checks.append(check.pk)
check.overriden_by_policy = True
check.save()
elif check.check_type == CheckType.PING:
if check.check_type == "ping":
# Check if IP/host was already added
if check.ip not in added_ping_checks:
added_ping_checks.append(check.ip)
# Dont add if the check if it is an agent check
# Dont create the check if it is an agent check
if not check.agent:
ping_checks.append(check)
elif check.agent:
overridden_checks.append(check.pk)
check.overriden_by_policy = True
check.save()
elif (
check.check_type == CheckType.CPU_LOAD
and agent.plat == AgentPlat.WINDOWS
):
if check.check_type == "cpuload":
# Check if cpuload list is empty
if not added_cpuload_checks:
added_cpuload_checks.append(check.pk)
added_cpuload_checks.append(check)
# Dont create the check if it is an agent check
if not check.agent:
cpuload_checks.append(check)
elif check.agent:
overridden_checks.append(check.pk)
check.overriden_by_policy = True
check.save()
elif (
check.check_type == CheckType.MEMORY and agent.plat == AgentPlat.WINDOWS
):
if check.check_type == "memory":
# Check if memory check list is empty
if not added_memory_checks:
added_memory_checks.append(check.pk)
added_memory_checks.append(check)
# Dont create the check if it is an agent check
if not check.agent:
memory_checks.append(check)
elif check.agent:
overridden_checks.append(check.pk)
check.overriden_by_policy = True
check.save()
elif (
check.check_type == CheckType.WINSVC and agent.plat == AgentPlat.WINDOWS
):
if check.check_type == "winsvc":
# Check if service name was already added
if check.svc_name not in added_winsvc_checks:
added_winsvc_checks.append(check.svc_name)
@@ -342,11 +394,10 @@ class Policy(BaseAuditModel):
if not check.agent:
winsvc_checks.append(check)
elif check.agent:
overridden_checks.append(check.pk)
check.overriden_by_policy = True
check.save()
elif check.check_type == CheckType.SCRIPT and agent.is_supported_script(
check.script.supported_platforms
):
if check.check_type == "script":
# Check if script id was already added
if check.script.id not in added_script_checks:
added_script_checks.append(check.script.id)
@@ -354,28 +405,20 @@ class Policy(BaseAuditModel):
if not check.agent:
script_checks.append(check)
elif check.agent:
overridden_checks.append(check.pk)
check.overriden_by_policy = True
check.save()
elif (
check.check_type == CheckType.EVENT_LOG
and agent.plat == AgentPlat.WINDOWS
):
if check.check_type == "eventlog":
# Check if events were already added
if [check.log_name, check.event_id] not in added_eventlog_checks:
added_eventlog_checks.append([check.log_name, check.event_id])
if not check.agent:
eventlog_checks.append(check)
elif check.agent:
overridden_checks.append(check.pk)
check.overriden_by_policy = True
check.save()
if overridden_checks:
from checks.models import Check
Check.objects.filter(pk__in=overridden_checks).update(
overridden_by_policy=True
)
return (
final_list = (
diskspace_checks
+ ping_checks
+ cpuload_checks
@@ -384,3 +427,33 @@ class Policy(BaseAuditModel):
+ script_checks
+ eventlog_checks
)
# remove policy checks from agent that fell out of policy scope
agent.agentchecks.filter(
managed_by_policy=True,
parent_check__in=[
checkpk
for checkpk in agent_checks_parent_pks
if checkpk not in [check.pk for check in final_list]
],
).delete()
return [
check for check in final_list if check.pk not in agent_checks_parent_pks
]
@staticmethod
def generate_policy_checks(agent):
checks = Policy.cascade_policy_checks(agent)
if checks:
for check in checks:
check.create_policy_check(agent)
@staticmethod
def generate_policy_tasks(agent):
tasks = Policy.cascade_policy_tasks(agent)
if tasks:
for task in tasks:
task.create_policy_task(agent)

View File

@@ -4,7 +4,7 @@ from tacticalrmm.permissions import _has_perm
class AutomationPolicyPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
if r.method == "GET":
return _has_perm(r, "can_list_automation_policies")
else:

View File

@@ -5,8 +5,8 @@ from rest_framework.serializers import (
)
from agents.serializers import AgentHostnameSerializer
from autotasks.models import TaskResult
from checks.models import CheckResult
from autotasks.models import AutomatedTask
from checks.models import Check
from clients.models import Client
from clients.serializers import ClientMinimumSerializer, SiteMinimumSerializer
from winupdate.serializers import WinUpdatePolicySerializer
@@ -96,7 +96,7 @@ class PolicyCheckStatusSerializer(ModelSerializer):
hostname = ReadOnlyField(source="agent.hostname")
class Meta:
model = CheckResult
model = Check
fields = "__all__"
@@ -104,7 +104,7 @@ class PolicyTaskStatusSerializer(ModelSerializer):
hostname = ReadOnlyField(source="agent.hostname")
class Meta:
model = TaskResult
model = AutomatedTask
fields = "__all__"

View File

@@ -1,20 +1,155 @@
from typing import Any, Dict, List, Union
from tacticalrmm.celery import app
@app.task(retry_backoff=5, retry_jitter=True, retry_kwargs={"max_retries": 5})
def generate_agent_checks_task(
policy: int = None,
site: int = None,
client: int = None,
agents: List[int] = list(),
all: bool = False,
create_tasks: bool = False,
) -> Union[str, None]:
from agents.models import Agent
from automation.models import Policy
p = Policy.objects.get(pk=policy) if policy else None
# generate checks on all agents if all is specified or if policy is default server/workstation policy
if (p and p.is_default_server_policy and p.is_default_workstation_policy) or all:
a = Agent.objects.prefetch_related("policy").only("pk", "monitoring_type")
# generate checks on all servers if policy is a default servers policy
elif p and p.is_default_server_policy:
a = Agent.objects.filter(monitoring_type="server").only("pk", "monitoring_type")
# generate checks on all workstations if policy is a default workstations policy
elif p and p.is_default_workstation_policy:
a = Agent.objects.filter(monitoring_type="workstation").only(
"pk", "monitoring_type"
)
# generate checks on a list of supplied agents
elif agents:
a = Agent.objects.filter(pk__in=agents)
# generate checks on agents affected by supplied policy
elif policy:
a = p.related_agents().only("pk")
# generate checks that has specified site
elif site:
a = Agent.objects.filter(site_id=site)
# generate checks that has specified client
elif client:
a = Agent.objects.filter(site__client_id=client)
else:
a = []
for agent in a:
agent.generate_checks_from_policies()
if create_tasks:
agent.generate_tasks_from_policies()
agent.set_alert_template()
return "ok"
@app.task(
acks_late=True, retry_backoff=5, retry_jitter=True, retry_kwargs={"max_retries": 5}
)
# updates policy managed check fields on agents
def update_policy_check_fields_task(check: int) -> str:
from checks.models import Check
c: Check = Check.objects.get(pk=check)
update_fields: Dict[Any, Any] = {}
for field in c.policy_fields_to_copy:
update_fields[field] = getattr(c, field)
Check.objects.filter(parent_check=check).update(**update_fields)
return "ok"
@app.task(retry_backoff=5, retry_jitter=True, retry_kwargs={"max_retries": 5})
# generates policy tasks on agents affected by a policy
def generate_agent_autotasks_task(policy: int = None) -> str:
from agents.models import Agent
from automation.models import Policy
p: Policy = Policy.objects.get(pk=policy)
if p and p.is_default_server_policy and p.is_default_workstation_policy:
agents = Agent.objects.prefetch_related("policy").only("pk", "monitoring_type")
elif p and p.is_default_server_policy:
agents = Agent.objects.filter(monitoring_type="server").only(
"pk", "monitoring_type"
)
elif p and p.is_default_workstation_policy:
agents = Agent.objects.filter(monitoring_type="workstation").only(
"pk", "monitoring_type"
)
else:
agents = p.related_agents().only("pk")
for agent in agents:
agent.generate_tasks_from_policies()
return "ok"
@app.task(
acks_late=True,
retry_backoff=5,
retry_jitter=True,
retry_kwargs={"max_retries": 5},
)
def delete_policy_autotasks_task(task: int) -> str:
from autotasks.models import AutomatedTask
for t in AutomatedTask.objects.filter(parent_task=task):
t.delete_task_on_agent()
return "ok"
@app.task
def run_win_policy_autotasks_task(task: int) -> str:
from autotasks.models import AutomatedTask
try:
policy_task = AutomatedTask.objects.get(pk=task)
except AutomatedTask.DoesNotExist:
return "AutomatedTask not found"
if not policy_task.policy:
return "AutomatedTask must be a policy"
# get related agents from policy
for agent in policy_task.policy.related_agents():
policy_task.run_win_task(agent)
for t in AutomatedTask.objects.filter(parent_task=task):
t.run_win_task()
return "ok"
@app.task(
acks_late=True,
retry_backoff=5,
retry_jitter=True,
retry_kwargs={"max_retries": 5},
)
def update_policy_autotasks_fields_task(task: int, update_agent: bool = False) -> str:
from autotasks.models import AutomatedTask
t = AutomatedTask.objects.get(pk=task)
update_fields: Dict[str, Any] = {}
for field in t.policy_fields_to_copy:
update_fields[field] = getattr(t, field)
AutomatedTask.objects.filter(parent_task=task).update(**update_fields)
if update_agent:
for t in AutomatedTask.objects.filter(parent_task=task).exclude(
sync_status="initial"
):
t.modify_task_on_agent()
return "ok"

File diff suppressed because it is too large Load Diff

View File

@@ -1,15 +1,15 @@
from django.urls import path
from autotasks.views import GetAddAutoTasks
from checks.views import GetAddChecks
from . import views
from checks.views import GetAddChecks
from autotasks.views import GetAddAutoTasks
urlpatterns = [
path("policies/", views.GetAddPolicies.as_view()),
path("policies/<int:pk>/related/", views.GetRelated.as_view()),
path("policies/overview/", views.OverviewPolicy.as_view()),
path("policies/<int:pk>/", views.GetUpdateDeletePolicy.as_view()),
path("sync/", views.PolicySync.as_view()),
# alias to get policy checks
path("policies/<int:policy>/checks/", GetAddChecks.as_view()),
# alias to get policy tasks

View File

@@ -1,13 +1,13 @@
from agents.models import Agent
from autotasks.models import AutomatedTask
from checks.models import Check
from clients.models import Client
from django.shortcuts import get_object_or_404
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from agents.models import Agent
from autotasks.models import TaskResult
from checks.models import CheckResult
from clients.models import Client
from rest_framework.exceptions import PermissionDenied
from tacticalrmm.utils import notify_error
from tacticalrmm.permissions import _has_perm_on_client, _has_perm_on_site
from winupdate.models import WinUpdatePolicy
from winupdate.serializers import WinUpdatePolicySerializer
@@ -16,8 +16,8 @@ from .models import Policy
from .permissions import AutomationPolicyPerms
from .serializers import (
PolicyCheckStatusSerializer,
PolicyOverviewSerializer,
PolicyRelatedSerializer,
PolicyOverviewSerializer,
PolicySerializer,
PolicyTableSerializer,
PolicyTaskStatusSerializer,
@@ -28,9 +28,7 @@ class GetAddPolicies(APIView):
permission_classes = [IsAuthenticated, AutomationPolicyPerms]
def get(self, request):
policies = Policy.objects.select_related("alert_template").prefetch_related(
"excluded_agents", "excluded_sites", "excluded_clients"
)
policies = Policy.objects.all()
return Response(
PolicyTableSerializer(
@@ -52,9 +50,9 @@ class GetAddPolicies(APIView):
check.create_policy_check(policy=policy)
tasks = copyPolicy.autotasks.all()
for task in tasks:
if not task.assigned_check:
task.create_policy_task(policy=policy)
task.create_policy_task(policy=policy)
return Response("ok")
@@ -68,12 +66,22 @@ class GetUpdateDeletePolicy(APIView):
return Response(PolicySerializer(policy).data)
def put(self, request, pk):
from .tasks import generate_agent_checks_task
policy = get_object_or_404(Policy, pk=pk)
serializer = PolicySerializer(instance=policy, data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
# check for excluding objects and in the request and if present generate policies
if (
"excluded_sites" in request.data.keys()
or "excluded_clients" in request.data.keys()
or "excluded_agents" in request.data.keys()
):
generate_agent_checks_task.delay(policy=pk, create_tasks=True)
return Response("ok")
def delete(self, request, pk):
@@ -82,11 +90,25 @@ class GetUpdateDeletePolicy(APIView):
return Response("ok")
class PolicySync(APIView):
def post(self, request):
if "policy" in request.data.keys():
from automation.tasks import generate_agent_checks_task
generate_agent_checks_task.delay(
policy=request.data["policy"], create_tasks=True
)
return Response("ok")
else:
return notify_error("The request was invalid")
class PolicyAutoTask(APIView):
# get status of all tasks
def get(self, request, task):
tasks = TaskResult.objects.filter(task=task)
tasks = AutomatedTask.objects.filter(parent_task=task)
return Response(PolicyTaskStatusSerializer(tasks, many=True).data)
# bulk run win tasks associated with policy
@@ -101,16 +123,14 @@ class PolicyCheck(APIView):
permission_classes = [IsAuthenticated, AutomationPolicyPerms]
def get(self, request, check):
checks = CheckResult.objects.filter(assigned_check=check)
checks = Check.objects.filter(parent_check=check)
return Response(PolicyCheckStatusSerializer(checks, many=True).data)
class OverviewPolicy(APIView):
def get(self, request):
clients = Client.objects.filter_by_role(request.user).select_related(
"workstation_policy", "server_policy"
)
clients = Client.objects.all()
return Response(PolicyOverviewSerializer(clients, many=True).data)
@@ -141,7 +161,7 @@ class UpdatePatchPolicy(APIView):
serializer = WinUpdatePolicySerializer(data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.policy = policy
serializer.policy = policy # type: ignore
serializer.save()
return Response("ok")
@@ -174,7 +194,7 @@ class ResetPatchPolicy(APIView):
raise PermissionDenied()
agents = (
Agent.objects.filter_by_role(request.user) # type: ignore
Agent.objects.filter_by_role(request.user)
.prefetch_related("winupdatepolicy")
.filter(site__client_id=request.data["client"])
)
@@ -183,13 +203,13 @@ class ResetPatchPolicy(APIView):
raise PermissionDenied()
agents = (
Agent.objects.filter_by_role(request.user) # type: ignore
Agent.objects.filter_by_role(request.user)
.prefetch_related("winupdatepolicy")
.filter(site_id=request.data["site"])
)
else:
agents = (
Agent.objects.filter_by_role(request.user) # type: ignore
Agent.objects.filter_by_role(request.user)
.prefetch_related("winupdatepolicy")
.only("pk")
)

View File

@@ -1,6 +1,5 @@
from django.contrib import admin
from .models import AutomatedTask, TaskResult
from .models import AutomatedTask
admin.site.register(AutomatedTask)
admin.site.register(TaskResult)

View File

@@ -1,5 +0,0 @@
from model_bakery.recipe import Recipe
task = Recipe(
"autotasks.AutomatedTask",
)

View File

@@ -1,5 +1,6 @@
from django.core.management.base import BaseCommand
from agents.models import Agent
from autotasks.tasks import remove_orphaned_win_tasks
@@ -7,7 +8,10 @@ class Command(BaseCommand):
help = "Checks for orphaned tasks on all agents and removes them"
def handle(self, *args, **kwargs):
remove_orphaned_win_tasks.s()
agents = Agent.objects.only("pk", "last_seen", "overdue_time", "offline_time")
online = [i for i in agents if i.status == "online"]
for agent in online:
remove_orphaned_win_tasks.delay(agent.pk)
self.stdout.write(
self.style.SUCCESS(

View File

@@ -1,99 +0,0 @@
# Generated by Django 3.2.12 on 2022-04-01 22:44
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("checks", "0025_auto_20210917_1954"),
("agents", "0046_alter_agenthistory_command"),
("autotasks", "0029_alter_automatedtask_task_type"),
]
operations = [
migrations.RemoveField(
model_name="automatedtask",
name="retvalue",
),
migrations.AlterField(
model_name="automatedtask",
name="assigned_check",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="assignedtasks",
to="checks.check",
),
),
migrations.AlterField(
model_name="automatedtask",
name="win_task_name",
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.CreateModel(
name="TaskResult",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("retcode", models.IntegerField(blank=True, null=True)),
("stdout", models.TextField(blank=True, null=True)),
("stderr", models.TextField(blank=True, null=True)),
("execution_time", models.CharField(default="0.0000", max_length=100)),
("last_run", models.DateTimeField(blank=True, null=True)),
(
"status",
models.CharField(
choices=[
("passing", "Passing"),
("failing", "Failing"),
("pending", "Pending"),
],
default="pending",
max_length=30,
),
),
(
"sync_status",
models.CharField(
choices=[
("synced", "Synced With Agent"),
("notsynced", "Waiting On Agent Checkin"),
("pendingdeletion", "Pending Deletion on Agent"),
("initial", "Initial Task Sync"),
],
default="initial",
max_length=100,
),
),
(
"agent",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="taskresults",
to="agents.agent",
),
),
(
"task",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="taskresults",
to="autotasks.automatedtask",
),
),
],
options={
"unique_together": {("agent", "task")},
},
),
]

View File

@@ -1,50 +0,0 @@
# Generated by Django 3.2.12 on 2022-04-01 22:49
from django.db import migrations, transaction
from django.db.utils import IntegrityError
def migrate_task_results(apps, schema_editor):
AutomatedTask = apps.get_model("autotasks", "AutomatedTask")
TaskResult = apps.get_model("autotasks", "TaskResult")
for task in AutomatedTask.objects.exclude(agent=None):
try:
with transaction.atomic():
if task.managed_by_policy:
TaskResult.objects.create(
task_id=task.parent_task,
agent_id=task.agent_id,
retcode=task.retcode,
stdout=task.stdout,
stderr=task.stderr,
execution_time=task.execution_time,
last_run=task.last_run,
status=task.status,
sync_status=task.sync_status,
)
else:
TaskResult.objects.create(
task_id=task.id,
agent_id=task.agent.id,
retcode=task.retcode,
stdout=task.stdout,
stderr=task.stderr,
execution_time=task.execution_time,
last_run=task.last_run,
status=task.status,
sync_status=task.sync_status,
)
except IntegrityError:
continue
class Migration(migrations.Migration):
atomic = False
dependencies = [
("autotasks", "0030_auto_20220401_2244"),
]
operations = [
migrations.RunPython(migrate_task_results),
]

View File

@@ -1,45 +0,0 @@
# Generated by Django 3.2.12 on 2022-04-01 23:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('autotasks', '0031_auto_20220401_2249'),
]
operations = [
migrations.RemoveField(
model_name='automatedtask',
name='execution_time',
),
migrations.RemoveField(
model_name='automatedtask',
name='last_run',
),
migrations.RemoveField(
model_name='automatedtask',
name='parent_task',
),
migrations.RemoveField(
model_name='automatedtask',
name='retcode',
),
migrations.RemoveField(
model_name='automatedtask',
name='status',
),
migrations.RemoveField(
model_name='automatedtask',
name='stderr',
),
migrations.RemoveField(
model_name='automatedtask',
name='stdout',
),
migrations.RemoveField(
model_name='automatedtask',
name='sync_status',
),
]

View File

@@ -1,53 +0,0 @@
# Generated by Django 3.2.12 on 2022-04-02 00:41
from django.db import migrations
from django.utils.timezone import make_aware
from tacticalrmm.constants import TaskType
def migrate_script_data(apps, schema_editor):
AutomatedTask = apps.get_model("autotasks", "AutomatedTask")
# convert autotask to the new format
for task in AutomatedTask.objects.all():
try:
edited = False
# convert scheduled task_type
if task.task_type == TaskType.SCHEDULED:
task.task_type = TaskType.DAILY
task.run_time_date = make_aware(task.run_time_minute.strptime("%H:%M"))
task.daily_interval = 1
edited = True
# convert actions
if not task.actions:
if not task.script:
task.delete()
task.actions = [
{
"type": "script",
"script": task.script.pk,
"script_args": task.script_args,
"timeout": task.timeout,
"name": task.script.name,
}
]
edited = True
if edited:
task.save()
except:
continue
class Migration(migrations.Migration):
dependencies = [
("autotasks", "0032_auto_20220401_2301"),
]
operations = [
migrations.RunPython(migrate_script_data),
]

View File

@@ -1,25 +0,0 @@
# Generated by Django 3.2.12 on 2022-04-02 00:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('autotasks', '0033_auto_20220402_0041'),
]
operations = [
migrations.RemoveField(
model_name='automatedtask',
name='script',
),
migrations.RemoveField(
model_name='automatedtask',
name='script_args',
),
migrations.RemoveField(
model_name='automatedtask',
name='timeout',
),
]

View File

@@ -1,39 +0,0 @@
# Generated by Django 4.0.3 on 2022-04-15 18:18
from django.db import migrations
from django.db.models import Count
from autotasks.models import generate_task_name
from tacticalrmm.constants import TaskSyncStatus
def check_for_win_task_name_duplicates(apps, schema_editor):
AutomatedTask = apps.get_model("autotasks", "AutomatedTask")
TaskResult = apps.get_model("autotasks", "TaskResult")
duplicate_tasks = (
AutomatedTask.objects.values("win_task_name")
.annotate(records=Count("win_task_name"))
.filter(records__gt=1)
)
for task in duplicate_tasks:
dups = list(AutomatedTask.objects.filter(win_task_name=task["win_task_name"]))
for x in range(task["records"] - 1):
dups[x].win_task_name = generate_task_name()
dups[x].save(update_fields=["win_task_name"])
# update task_result sync status
TaskResult.objects.filter(task=dups[x]).update(
sync_status=TaskSyncStatus.NOT_SYNCED
)
class Migration(migrations.Migration):
dependencies = [
("autotasks", "0034_auto_20220402_0046"),
]
operations = [
migrations.RunPython(check_for_win_task_name_duplicates),
]

View File

@@ -1,20 +0,0 @@
# Generated by Django 4.0.3 on 2022-04-15 20:52
from django.db import migrations, models
import autotasks.models
class Migration(migrations.Migration):
dependencies = [
('autotasks', '0035_auto_20220415_1818'),
]
operations = [
migrations.AlterField(
model_name='automatedtask',
name='win_task_name',
field=models.CharField(blank=True, default=autotasks.models.generate_task_name, max_length=255, unique=True),
),
]

View File

@@ -1,18 +0,0 @@
# Generated by Django 4.0.5 on 2022-06-29 07:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('autotasks', '0036_alter_automatedtask_win_task_name'),
]
operations = [
migrations.AlterField(
model_name='taskresult',
name='retcode',
field=models.BigIntegerField(blank=True, null=True),
),
]

View File

@@ -1,36 +1,21 @@
import asyncio
import datetime as dt
import random
import string
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from typing import List
from django.db.models.fields.json import JSONField
import pytz
from django.core.cache import cache
from django.core.validators import MaxValueValidator, MinValueValidator
from alerts.models import SEVERITY_CHOICES
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.db.models.fields import DateTimeField
from django.db.models.fields.json import JSONField
from django.db.utils import DatabaseError
from django.core.validators import MaxValueValidator, MinValueValidator
from django.utils import timezone as djangotime
from core.utils import get_core_settings
from logs.models import BaseAuditModel, DebugLog
from tacticalrmm.constants import (
FIELDS_TRIGGER_TASK_UPDATE_AGENT,
POLICY_TASK_FIELDS_TO_COPY,
AlertSeverity,
DebugLogType,
TaskStatus,
TaskSyncStatus,
TaskType,
)
if TYPE_CHECKING:
from automation.models import Policy
from alerts.models import Alert, AlertTemplate
from agents.models import Agent
from checks.models import Check
from tacticalrmm.models import PermissionQuerySet
from packaging import version as pyver
from tacticalrmm.utils import (
bitdays_to_string,
bitmonthdays_to_string,
@@ -39,10 +24,29 @@ from tacticalrmm.utils import (
convert_to_iso_duration,
)
TASK_TYPE_CHOICES = [
("daily", "Daily"),
("weekly", "Weekly"),
("monthly", "Monthly"),
("monthlydow", "Monthly Day of Week"),
("checkfailure", "On Check Failure"),
("manual", "Manual"),
("runonce", "Run Once"),
("scheduled", "Scheduled"), # deprecated
]
def generate_task_name() -> str:
chars = string.ascii_letters
return "TacticalRMM_" + "".join(random.choice(chars) for i in range(35))
SYNC_STATUS_CHOICES = [
("synced", "Synced With Agent"),
("notsynced", "Waiting On Agent Checkin"),
("pendingdeletion", "Pending Deletion on Agent"),
("initial", "Initial Task Sync"),
]
TASK_STATUS_CHOICES = [
("passing", "Passing"),
("failing", "Failing"),
("pending", "Pending"),
]
class AutomatedTask(BaseAuditModel):
@@ -70,21 +74,53 @@ class AutomatedTask(BaseAuditModel):
on_delete=models.SET_NULL,
)
# format -> [{"type": "script", "script": 1, "name": "Script Name", "timeout": 90, "script_args": []}, {"type": "cmd", "command": "whoami", "timeout": 90}]
# deprecated
script = models.ForeignKey(
"scripts.Script",
null=True,
blank=True,
related_name="autoscript",
on_delete=models.SET_NULL,
)
# deprecated
script_args = ArrayField(
models.CharField(max_length=255, null=True, blank=True),
null=True,
blank=True,
default=list,
)
# deprecated
timeout = models.PositiveIntegerField(blank=True, default=120)
# format -> {"actions": [{"type": "script", "script": 1, "name": "Script Name", "timeout": 90, "script_args": []}, {"type": "cmd", "command": "whoami", "timeout": 90}]}
actions = JSONField(default=list)
assigned_check = models.ForeignKey(
"checks.Check",
null=True,
blank=True,
related_name="assignedtasks",
related_name="assignedtask",
on_delete=models.SET_NULL,
)
name = models.CharField(max_length=255)
collector_all_output = models.BooleanField(default=False)
managed_by_policy = models.BooleanField(default=False)
parent_task = models.PositiveIntegerField(null=True, blank=True)
retvalue = models.TextField(null=True, blank=True)
retcode = models.IntegerField(null=True, blank=True)
stdout = models.TextField(null=True, blank=True)
stderr = models.TextField(null=True, blank=True)
execution_time = models.CharField(max_length=100, default="0.0000")
last_run = models.DateTimeField(null=True, blank=True)
enabled = models.BooleanField(default=True)
continue_on_error = models.BooleanField(default=True)
status = models.CharField(
max_length=30, choices=TASK_STATUS_CHOICES, default="pending"
)
sync_status = models.CharField(
max_length=100, choices=SYNC_STATUS_CHOICES, default="initial"
)
alert_severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
max_length=30, choices=SEVERITY_CHOICES, default="info"
)
email_alert = models.BooleanField(default=False)
text_alert = models.BooleanField(default=False)
@@ -93,11 +129,9 @@ class AutomatedTask(BaseAuditModel):
# options sent to agent for task creation
# general task settings
task_type = models.CharField(
max_length=100, choices=TaskType.choices, default=TaskType.MANUAL
max_length=100, choices=TASK_TYPE_CHOICES, default="manual"
)
win_task_name = models.CharField(
max_length=255, unique=True, blank=True, default=generate_task_name
) # should be changed to unique=True
win_task_name = models.CharField(max_length=255, null=True, blank=True)
run_time_date = DateTimeField(null=True, blank=True)
expire_date = DateTimeField(null=True, blank=True)
@@ -131,89 +165,144 @@ class AutomatedTask(BaseAuditModel):
run_asap_after_missed = models.BooleanField(default=False) # added in agent v1.4.7
task_instance_policy = models.PositiveSmallIntegerField(blank=True, default=1)
# deprecated
managed_by_policy = models.BooleanField(default=False)
# non-database property
task_result: "Union[TaskResult, Dict[None, None]]" = {}
def __str__(self) -> str:
def __str__(self):
return self.name
def save(self, *args, **kwargs) -> None:
def save(self, *args, **kwargs):
from autotasks.tasks import modify_win_task
from automation.tasks import update_policy_autotasks_fields_task
# if task is a policy task clear cache on everything
if self.policy:
cache.delete_many_pattern("site_*_tasks")
cache.delete_many_pattern("agent_*_tasks")
# get old task if exists
# get old agent if exists
old_task = AutomatedTask.objects.get(pk=self.pk) if self.pk else None
super(AutomatedTask, self).save(old_model=old_task, *args, **kwargs)
# check if fields were updated that require a sync to the agent and set status to notsynced
# check if fields were updated that require a sync to the agent
update_agent = False
if old_task:
for field in self.fields_that_trigger_task_update_on_agent:
if getattr(self, field) != getattr(old_task, field):
if self.policy:
TaskResult.objects.exclude(
sync_status=TaskSyncStatus.INITIAL
).filter(task__policy_id=self.policy.id).update(
sync_status=TaskSyncStatus.NOT_SYNCED
)
else:
TaskResult.objects.filter(agent=self.agent, task=self).update(
sync_status=TaskSyncStatus.NOT_SYNCED
)
update_agent = True
break
def delete(self, *args, **kwargs):
# check if automated task was enabled/disabled and send celery task
if old_task and old_task.agent and update_agent:
modify_win_task.delay(pk=self.pk)
# if task is a policy task clear cache on everything
if self.policy:
cache.delete_many_pattern("site_*_tasks")
cache.delete_many_pattern("agent_*_tasks")
super(AutomatedTask, self).delete(
*args,
**kwargs,
)
# check if policy task was edited and then check if it was a field worth copying to rest of agent tasks
elif old_task and old_task.policy:
if update_agent:
update_policy_autotasks_fields_task.delay(
task=self.pk, update_agent=update_agent
)
else:
for field in self.policy_fields_to_copy:
if getattr(self, field) != getattr(old_task, field):
update_policy_autotasks_fields_task.delay(task=self.pk)
break
@property
def schedule(self) -> Optional[str]:
if self.task_type == TaskType.MANUAL:
def schedule(self):
if self.task_type == "manual":
return "Manual"
elif self.task_type == TaskType.CHECK_FAILURE:
elif self.task_type == "checkfailure":
return "Every time check fails"
elif self.task_type == TaskType.RUN_ONCE:
elif self.task_type == "runonce":
return f'Run once on {self.run_time_date.strftime("%m/%d/%Y %I:%M%p")}'
elif self.task_type == TaskType.DAILY:
elif self.task_type == "daily":
run_time_nice = self.run_time_date.strftime("%I:%M%p")
if self.daily_interval == 1:
return f"Daily at {run_time_nice}"
else:
return f"Every {self.daily_interval} days at {run_time_nice}"
elif self.task_type == TaskType.WEEKLY:
elif self.task_type == "weekly":
run_time_nice = self.run_time_date.strftime("%I:%M%p")
days = bitdays_to_string(self.run_time_bit_weekdays)
if self.weekly_interval != 1:
return f"{days} at {run_time_nice}"
else:
return f"{days} at {run_time_nice} every {self.weekly_interval} weeks"
elif self.task_type == TaskType.MONTHLY:
elif self.task_type == "monthly":
run_time_nice = self.run_time_date.strftime("%I:%M%p")
months = bitmonths_to_string(self.monthly_months_of_year)
days = bitmonthdays_to_string(self.monthly_days_of_month)
return f"Runs on {months} on days {days} at {run_time_nice}"
elif self.task_type == TaskType.MONTHLY_DOW:
elif self.task_type == "monthlydow":
run_time_nice = self.run_time_date.strftime("%I:%M%p")
months = bitmonths_to_string(self.monthly_months_of_year)
weeks = bitweeks_to_string(self.monthly_weeks_of_month)
days = bitdays_to_string(self.run_time_bit_weekdays)
return f"Runs on {months} on {weeks} on {days} at {run_time_nice}"
@property
def last_run_as_timezone(self):
if self.last_run is not None and self.agent is not None:
return self.last_run.astimezone(
pytz.timezone(self.agent.timezone)
).strftime("%b-%d-%Y - %H:%M")
return self.last_run
# These fields will be duplicated on the agent tasks that are managed by a policy
@property
def policy_fields_to_copy(self) -> List[str]:
return [
"alert_severity",
"email_alert",
"text_alert",
"dashboard_alert",
"assigned_check",
"name",
"actions",
"run_time_bit_weekdays",
"run_time_date",
"expire_date",
"daily_interval",
"weekly_interval",
"task_type",
"win_task_name",
"enabled",
"remove_if_not_scheduled",
"run_asap_after_missed",
"custom_field",
"collector_all_output",
"monthly_days_of_month",
"monthly_months_of_year",
"monthly_weeks_of_month",
"task_repetition_duration",
"task_repetition_interval",
"stop_task_at_duration_end",
"random_task_delay",
"run_asap_after_missed",
"task_instance_policy",
"continue_on_error",
]
@property
def fields_that_trigger_task_update_on_agent(self) -> List[str]:
return FIELDS_TRIGGER_TASK_UPDATE_AGENT
return [
"run_time_bit_weekdays",
"run_time_date",
"expire_date",
"daily_interval",
"weekly_interval",
"enabled",
"remove_if_not_scheduled",
"run_asap_after_missed",
"monthly_days_of_month",
"monthly_months_of_year",
"monthly_weeks_of_month",
"task_repetition_duration",
"task_repetition_interval",
"stop_task_at_duration_end",
"random_task_delay",
"run_asap_after_missed",
"task_instance_policy",
]
@staticmethod
def generate_task_name():
chars = string.ascii_letters
return "TacticalRMM_" + "".join(random.choice(chars) for i in range(35))
@staticmethod
def serialize(task):
@@ -222,35 +311,53 @@ class AutomatedTask(BaseAuditModel):
return TaskAuditSerializer(task).data
def create_policy_task(
self, policy: "Policy", assigned_check: "Optional[Check]" = None
) -> None:
### Copies certain properties on this task (self) to a new task and sets it to the supplied Policy
fields_to_copy = POLICY_TASK_FIELDS_TO_COPY
def create_policy_task(self, agent=None, policy=None, assigned_check=None):
# added to allow new policy tasks to be assigned to check only when the agent check exists already
if (
self.assigned_check
and agent
and agent.agentchecks.filter(parent_check=self.assigned_check.id).exists()
):
assigned_check = agent.agentchecks.get(parent_check=self.assigned_check.id)
# if policy is present, then this task is being copied to another policy
# if agent is present, then this task is being created on an agent from a policy
# exit if neither are set or if both are set
# also exit if assigned_check is set because this task will be created when the check is
if (
(not agent and not policy)
or (agent and policy)
or (self.assigned_check and not assigned_check)
):
return
task = AutomatedTask.objects.create(
agent=agent,
policy=policy,
managed_by_policy=bool(agent),
parent_task=(self.pk if agent else None),
assigned_check=assigned_check,
)
for field in fields_to_copy:
setattr(task, field, getattr(self, field))
for field in self.policy_fields_to_copy:
if field != "assigned_check":
setattr(task, field, getattr(self, field))
task.save()
if agent:
task.create_task_on_agent()
# agent version >= 1.8.0
def generate_nats_task_payload(
self, agent: "Optional[Agent]" = None, editing: bool = False
) -> Dict[str, Any]:
def generate_nats_task_payload(self, editing=False):
task = {
"pk": self.pk,
"type": "rmm",
"name": self.win_task_name,
"overwrite_task": editing,
"enabled": self.enabled,
"trigger": self.task_type
if self.task_type != TaskType.CHECK_FAILURE
else TaskType.MANUAL,
"trigger": self.task_type if self.task_type != "checkfailure" else "manual",
"multiple_instances": self.task_instance_policy
if self.task_instance_policy
else 0,
@@ -258,29 +365,11 @@ class AutomatedTask(BaseAuditModel):
if self.expire_date
else False,
"start_when_available": self.run_asap_after_missed
if self.task_type != TaskType.RUN_ONCE
if self.task_type != "runonce"
else True,
}
if self.task_type in [
TaskType.RUN_ONCE,
TaskType.DAILY,
TaskType.WEEKLY,
TaskType.MONTHLY,
TaskType.MONTHLY_DOW,
]:
# set runonce task in future if creating and run_asap_after_missed is set
if (
not editing
and self.task_type == TaskType.RUN_ONCE
and self.run_asap_after_missed
and agent
and self.run_time_date
< djangotime.now().astimezone(pytz.timezone(agent.timezone))
):
self.run_time_date = (
djangotime.now() + djangotime.timedelta(minutes=5)
).astimezone(pytz.timezone(agent.timezone))
if self.task_type in ["runonce", "daily", "weekly", "monthly", "monthlydow"]:
task["start_year"] = int(self.run_time_date.strftime("%Y"))
task["start_month"] = int(self.run_time_date.strftime("%-m"))
@@ -307,14 +396,14 @@ class AutomatedTask(BaseAuditModel):
)
task["stop_at_duration_end"] = self.stop_task_at_duration_end
if self.task_type == TaskType.DAILY:
if self.task_type == "daily":
task["day_interval"] = self.daily_interval
elif self.task_type == TaskType.WEEKLY:
elif self.task_type == "weekly":
task["week_interval"] = self.weekly_interval
task["days_of_week"] = self.run_time_bit_weekdays
elif self.task_type == TaskType.MONTHLY:
elif self.task_type == "monthly":
# check if "last day is configured"
if self.monthly_days_of_month >= 0x80000000:
@@ -326,152 +415,222 @@ class AutomatedTask(BaseAuditModel):
task["months_of_year"] = self.monthly_months_of_year
elif self.task_type == TaskType.MONTHLY_DOW:
elif self.task_type == "monthlydow":
task["days_of_week"] = self.run_time_bit_weekdays
task["months_of_year"] = self.monthly_months_of_year
task["weeks_of_month"] = self.monthly_weeks_of_month
return task
def create_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
def create_task_on_agent(self):
from agents.models import Agent
agent = (
Agent.objects.filter(pk=self.agent.pk)
.only("pk", "version", "hostname", "agent_id")
.get()
)
if pyver.parse(agent.version) >= pyver.parse("1.8.0"):
nats_data = {
"func": "schedtask",
"schedtaskpayload": self.generate_nats_task_payload(),
}
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
if self.task_type == "scheduled":
nats_data = {
"func": "schedtask",
"schedtaskpayload": {
"type": "rmm",
"trigger": "weekly",
"weekdays": self.run_time_bit_weekdays,
"pk": self.pk,
"name": self.win_task_name,
"hour": dt.datetime.strptime(
self.run_time_minute, "%H:%M"
).hour,
"min": dt.datetime.strptime(
self.run_time_minute, "%H:%M"
).minute,
},
}
nats_data = {
"func": "schedtask",
"schedtaskpayload": self.generate_nats_task_payload(agent),
}
elif self.task_type == "runonce":
# check if scheduled time is in the past
agent_tz = pytz.timezone(agent.timezone)
task_time_utc = self.run_time_date.replace(tzinfo=agent_tz).astimezone(
pytz.utc
)
now = djangotime.now()
if task_time_utc < now:
self.run_time_date = now.astimezone(agent_tz).replace(
tzinfo=pytz.utc
) + djangotime.timedelta(minutes=5)
self.save(update_fields=["run_time_date"])
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=5))
nats_data = {
"func": "schedtask",
"schedtaskpayload": {
"type": "rmm",
"trigger": "once",
"pk": self.pk,
"name": self.win_task_name,
"year": int(dt.datetime.strftime(self.run_time_date, "%Y")),
"month": dt.datetime.strftime(self.run_time_date, "%B"),
"day": int(dt.datetime.strftime(self.run_time_date, "%d")),
"hour": int(dt.datetime.strftime(self.run_time_date, "%H")),
"min": int(dt.datetime.strftime(self.run_time_date, "%M")),
},
}
if self.run_asap_after_missed:
nats_data["schedtaskpayload"]["run_asap_after_missed"] = True
if self.remove_if_not_scheduled:
nats_data["schedtaskpayload"]["deleteafter"] = True
elif self.task_type == "checkfailure" or self.task_type == "manual":
nats_data = {
"func": "schedtask",
"schedtaskpayload": {
"type": "rmm",
"trigger": "manual",
"pk": self.pk,
"name": self.win_task_name,
},
}
else:
return "error"
r = asyncio.run(agent.nats_cmd(nats_data, timeout=5))
if r != "ok":
task_result.sync_status = TaskSyncStatus.INITIAL
task_result.save(update_fields=["sync_status"])
self.sync_status = "initial"
self.save(update_fields=["sync_status"])
DebugLog.warning(
agent=agent,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to create scheduled task {self.name} on {task_result.agent.hostname}. It will be created when the agent checks in.",
log_type="agent_issues",
message=f"Unable to create scheduled task {self.name} on {agent.hostname}. It will be created when the agent checks in.",
)
return "timeout"
else:
task_result.sync_status = TaskSyncStatus.SYNCED
task_result.save(update_fields=["sync_status"])
self.sync_status = "synced"
self.save(update_fields=["sync_status"])
DebugLog.info(
agent=agent,
log_type=DebugLogType.AGENT_ISSUES,
message=f"{task_result.agent.hostname} task {self.name} was successfully created",
log_type="agent_issues",
message=f"{agent.hostname} task {self.name} was successfully created",
)
return "ok"
def modify_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
def modify_task_on_agent(self):
from agents.models import Agent
agent = (
Agent.objects.filter(pk=self.agent.pk)
.only("pk", "version", "hostname", "agent_id")
.get()
)
if pyver.parse(agent.version) >= pyver.parse("1.8.0"):
nats_data = {
"func": "schedtask",
"schedtaskpayload": self.generate_nats_task_payload(editing=True),
}
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
nats_data = {
"func": "schedtask",
"schedtaskpayload": self.generate_nats_task_payload(editing=True),
}
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=5))
nats_data = {
"func": "enableschedtask",
"schedtaskpayload": {
"name": self.win_task_name,
"enabled": self.enabled,
},
}
r = asyncio.run(agent.nats_cmd(nats_data, timeout=5))
if r != "ok":
task_result.sync_status = TaskSyncStatus.NOT_SYNCED
task_result.save(update_fields=["sync_status"])
self.sync_status = "notsynced"
self.save(update_fields=["sync_status"])
DebugLog.warning(
agent=agent,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to modify scheduled task {self.name} on {task_result.agent.hostname}({task_result.agent.agent_id}). It will try again on next agent checkin",
log_type="agent_issues",
message=f"Unable to modify scheduled task {self.name} on {agent.hostname}({agent.pk}). It will try again on next agent checkin",
)
return "timeout"
else:
task_result.sync_status = TaskSyncStatus.SYNCED
task_result.save(update_fields=["sync_status"])
self.sync_status = "synced"
self.save(update_fields=["sync_status"])
DebugLog.info(
agent=agent,
log_type=DebugLogType.AGENT_ISSUES,
message=f"{task_result.agent.hostname} task {self.name} was successfully modified",
log_type="agent_issues",
message=f"{agent.hostname} task {self.name} was successfully modified",
)
return "ok"
def delete_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
def delete_task_on_agent(self):
from agents.models import Agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
agent = (
Agent.objects.filter(pk=self.agent.pk)
.only("pk", "version", "hostname", "agent_id")
.get()
)
nats_data = {
"func": "delschedtask",
"schedtaskpayload": {"name": self.win_task_name},
}
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=10))
r = asyncio.run(agent.nats_cmd(nats_data, timeout=10))
if r != "ok" and "The system cannot find the file specified" not in r:
task_result.sync_status = TaskSyncStatus.PENDING_DELETION
self.sync_status = "pendingdeletion"
try:
task_result.save(update_fields=["sync_status"])
self.save(update_fields=["sync_status"])
except DatabaseError:
pass
DebugLog.warning(
agent=agent,
log_type=DebugLogType.AGENT_ISSUES,
message=f"{task_result.agent.hostname} task {self.name} will be deleted on next checkin",
log_type="agent_issues",
message=f"{agent.hostname} task {self.name} will be deleted on next checkin",
)
return "timeout"
else:
self.delete()
DebugLog.info(
agent=agent,
log_type=DebugLogType.AGENT_ISSUES,
message=f"{task_result.agent.hostname}({task_result.agent.agent_id}) task {self.name} was deleted",
log_type="agent_issues",
message=f"{agent.hostname}({agent.pk}) task {self.name} was deleted",
)
return "ok"
def run_win_task(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
def run_win_task(self):
from agents.models import Agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
asyncio.run(
task_result.agent.nats_cmd(
{"func": "runtask", "taskpk": self.pk}, wait=False
)
agent = (
Agent.objects.filter(pk=self.agent.pk)
.only("pk", "version", "hostname", "agent_id")
.get()
)
asyncio.run(agent.nats_cmd({"func": "runtask", "taskpk": self.pk}, wait=False))
return "ok"
def save_collector_results(self):
agent_field = self.custom_field.get_or_create_field_value(self.agent)
value = (
self.stdout.strip()
if self.collector_all_output
else self.stdout.strip().split("\n")[-1].strip()
)
agent_field.save_to_field(value)
def should_create_alert(self, alert_template=None):
return (
self.dashboard_alert
@@ -487,64 +646,10 @@ class AutomatedTask(BaseAuditModel):
)
)
class TaskResult(models.Model):
class Meta:
unique_together = (("agent", "task"),)
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="taskresults",
on_delete=models.CASCADE,
)
task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="taskresults",
on_delete=models.CASCADE,
)
retcode = models.BigIntegerField(null=True, blank=True)
stdout = models.TextField(null=True, blank=True)
stderr = models.TextField(null=True, blank=True)
execution_time = models.CharField(max_length=100, default="0.0000")
last_run = models.DateTimeField(null=True, blank=True)
status = models.CharField(
max_length=30, choices=TaskStatus.choices, default=TaskStatus.PENDING
)
sync_status = models.CharField(
max_length=100, choices=TaskSyncStatus.choices, default=TaskSyncStatus.INITIAL
)
def __str__(self):
return f"{self.agent.hostname} - {self.task}"
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_task_alert(
self.task,
agent=self.agent,
skip_create=not self.task.should_create_alert(alert_template),
)
def save_collector_results(self) -> None:
agent_field = self.task.custom_field.get_or_create_field_value(self.agent)
value = (
self.stdout.strip()
if self.task.collector_all_output
else self.stdout.strip().split("\n")[-1].strip()
)
agent_field.save_to_field(value)
def send_email(self):
CORE = get_core_settings()
from core.models import CoreSettings
CORE = CoreSettings.objects.first()
# Format of Email sent when Task has email alert
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
@@ -556,11 +661,12 @@ class TaskResult(models.Model):
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_mail(subject, body, self.agent.alert_template)
CORE.send_mail(subject, body, self.agent.alert_template) # type: ignore
def send_sms(self):
CORE = get_core_settings()
from core.models import CoreSettings
CORE = CoreSettings.objects.first()
# Format of SMS sent when Task has SMS alert
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
@@ -572,24 +678,27 @@ class TaskResult(models.Model):
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_sms(body, alert_template=self.agent.alert_template)
CORE.send_sms(body, alert_template=self.agent.alert_template) # type: ignore
def send_resolved_email(self):
CORE = get_core_settings()
from core.models import CoreSettings
CORE = CoreSettings.objects.first()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_mail(subject, body, alert_template=self.agent.alert_template)
CORE.send_mail(subject, body, alert_template=self.agent.alert_template) # type: ignore
def send_resolved_sms(self):
CORE = get_core_settings()
from core.models import CoreSettings
CORE = CoreSettings.objects.first()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_sms(body, alert_template=self.agent.alert_template)
CORE.send_sms(body, alert_template=self.agent.alert_template) # type: ignore

View File

@@ -4,7 +4,7 @@ from tacticalrmm.permissions import _has_perm, _has_perm_on_agent
class AutoTaskPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
if r.method == "GET":
if "agent_id" in view.kwargs.keys():
return _has_perm(r, "can_list_autotasks") and _has_perm_on_agent(
@@ -17,5 +17,5 @@ class AutoTaskPerms(permissions.BasePermission):
class RunAutoTaskPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
def has_permission(self, r, view):
return _has_perm(r, "can_run_autotasks")

Some files were not shown because too many files have changed in this diff Show More