Compare commits

...

57 Commits

Author SHA1 Message Date
wh1te909
2bff297f79 Release 0.4.18 2021-02-24 20:52:49 +00:00
wh1te909
dee68f6933 bump versions 2021-02-24 20:51:47 +00:00
wh1te909
afa1e19c83 also grep postgres info during restore #285 2021-02-24 20:39:02 +00:00
wh1te909
6052088eb4 grab postgres creds automatically for backup closes #285 2021-02-24 19:23:47 +00:00
wh1te909
c7fa5167c4 also reinstall py env / node modules during forced update 2021-02-24 11:25:42 +00:00
wh1te909
1034b0b146 also reinstall py env / node modules during forced update 2021-02-24 11:24:47 +00:00
wh1te909
8bcc4e5945 fix docs styling 2021-02-24 10:04:45 +00:00
wh1te909
c3c24aa1db black 2021-02-24 09:46:38 +00:00
wh1te909
281c75d2d2 add find_software management command 2021-02-24 09:42:24 +00:00
wh1te909
52307420f3 more docs 2021-02-24 09:36:59 +00:00
wh1te909
6185347cd8 remove border 2021-02-24 09:34:30 +00:00
wh1te909
b6cd29f77e change wording 2021-02-24 09:26:36 +00:00
wh1te909
b8ea8b1567 typo 2021-02-24 08:38:44 +00:00
wh1te909
2f7dc98830 change save query 2021-02-24 07:37:48 +00:00
wh1te909
e248a99f79 add option to run sched task asap after scheduled start was missed #247 2021-02-24 06:14:28 +00:00
wh1te909
4fb6d9aa5d more docs 2021-02-24 05:32:16 +00:00
sadnub
f092ea8d67 black 2021-02-23 23:58:28 -05:00
sadnub
c32cbbdda6 check run tests and agent alert actions tests 2021-02-23 23:53:55 -05:00
sadnub
2497675259 UI changes for AddAutomated Task and ScriptCheck models 2021-02-23 23:53:55 -05:00
sadnub
8d084ab90a docker dev changes 2021-02-23 23:53:55 -05:00
wh1te909
2398773ef0 moar docs 2021-02-24 03:33:39 +00:00
wh1te909
a05998a30e docs 2021-02-24 00:12:55 +00:00
wh1te909
f863c29194 more docs 2021-02-23 22:19:58 +00:00
wh1te909
d16a98c788 Release 0.4.17 2021-02-23 19:26:54 +00:00
wh1te909
9421b02e96 bump versions 2021-02-23 19:26:17 +00:00
wh1te909
10256864e4 improve typing support 2021-02-23 09:50:57 +00:00
wh1te909
85d010615d black 2021-02-23 08:27:22 +00:00
wh1te909
cd1cb186be deploy docs with gh actions 2021-02-23 08:24:19 +00:00
wh1te909
4458354d70 more docs 2021-02-23 08:14:25 +00:00
wh1te909
0f27da8808 add management command to show outdated agents 2021-02-22 20:31:57 +00:00
wh1te909
dd76bfa3c2 fix python build from source 2021-02-22 10:06:47 +00:00
wh1te909
5780a66f7d fix python build from source 2021-02-22 10:05:46 +00:00
wh1te909
d4342c034c add test for run_script 2021-02-22 09:46:48 +00:00
wh1te909
1ec43f2530 refactor to remove duplicate code 2021-02-22 08:46:59 +00:00
wh1te909
3c300d8fdf remove print 2021-02-22 08:45:57 +00:00
wh1te909
23119b55d1 isort 2021-02-22 08:43:21 +00:00
wh1te909
c8fb0e8f8a remove unneeded imports that are now builtin in python 3.9 2021-02-22 08:05:30 +00:00
sadnub
0ec32a77ef make check results chart more responsive with large amounts of data 2021-02-21 19:00:43 -05:00
sadnub
52921bfce8 black 2021-02-21 18:56:14 -05:00
sadnub
960b929097 move annotation labels to the left for check history chart 2021-02-21 18:51:45 -05:00
sadnub
d4ce23eced adding tests to agent alert actions and a bunch of fixes 2021-02-21 18:45:34 -05:00
wh1te909
6925510f44 no cgo 2021-02-21 10:18:05 +00:00
wh1te909
9827ad4c22 add isort to dev reqs 2021-02-21 10:17:47 +00:00
wh1te909
ef8aaee028 Release 0.4.16 2021-02-21 09:58:41 +00:00
wh1te909
3d7d39f248 bump version 2021-02-21 09:58:28 +00:00
wh1te909
3eac620560 add go mod to fix docker agent exe 2021-02-21 09:56:16 +00:00
wh1te909
ab17006956 Release 0.4.15 2021-02-21 08:37:01 +00:00
wh1te909
bfc6889ee9 bump version 2021-02-21 08:36:44 +00:00
wh1te909
0ec0b4a044 python 3.9 2021-02-21 07:57:36 +00:00
wh1te909
f1a523f327 update reqs 2021-02-21 07:37:36 +00:00
sadnub
4181449aea fix tests 2021-02-20 23:18:54 -05:00
sadnub
e192f8db52 dont create alerts if not configured to do so. Added some more tests 2021-02-20 23:01:19 -05:00
wh1te909
8097c681ac Release 0.4.14 2021-02-20 22:35:35 +00:00
wh1te909
f45938bdd5 bump version 2021-02-20 22:35:14 +00:00
wh1te909
6ea4e97eca fix script args 2021-02-20 22:33:10 +00:00
wh1te909
f274c8e837 add prune alerts to server maintenance tool 2021-02-20 11:01:04 +00:00
wh1te909
335e571485 add optional --force flag to update.sh 2021-02-20 10:33:21 +00:00
90 changed files with 2420 additions and 738 deletions

View File

@@ -1,4 +1,4 @@
FROM python:3.8-slim
FROM python:3.9.2-slim
ENV TACTICAL_DIR /opt/tactical
ENV TACTICAL_GO_DIR /usr/local/rmmgo

View File

@@ -126,7 +126,7 @@ if [ "$1" = 'tactical-init-dev' ]; then
test -f "${TACTICAL_READY_FILE}" && rm "${TACTICAL_READY_FILE}"
# setup Python virtual env and install dependencies
! test -e "${VIRTUAL_ENV}" && python -m venv --copies ${VIRTUAL_ENV}
! test -e "${VIRTUAL_ENV}" && python -m venv ${VIRTUAL_ENV}
"${VIRTUAL_ENV}"/bin/pip install --no-cache-dir -r /requirements.txt
django_setup

View File

@@ -1,40 +1,24 @@
# To ensure app dependencies are ported from your virtual environment/host machine into your container, run 'pip freeze > requirements.txt' in the terminal to overwrite this file
amqp==5.0.5
asgiref==3.3.1
asyncio-nats-client==0.11.4
billiard==3.6.3.0
celery==5.0.5
certifi==2020.12.5
cffi==1.14.5
chardet==4.0.0
cryptography==3.4.4
decorator==4.4.2
Django==3.1.6
django-cors-headers==3.7.0
django-rest-knox==4.1.0
djangorestframework==3.12.2
future==0.18.2
kombu==5.0.2
loguru==0.5.3
msgpack==1.0.2
packaging==20.8
psycopg2-binary==2.8.6
pycparser==2.20
pycryptodome==3.10.1
pyotp==2.6.0
pyparsing==2.4.7
pytz==2021.1
qrcode==6.1
redis==3.5.3
requests==2.25.1
six==1.15.0
sqlparse==0.4.1
twilio==6.52.0
urllib3==1.26.3
validators==0.18.2
vine==5.0.0
websockets==8.1
zipp==3.4.0
asyncio-nats-client
celery
Django
django-cors-headers
django-rest-knox
djangorestframework
loguru
msgpack
psycopg2-binary
pycparser
pycryptodome
pyotp
pyparsing
pytz
qrcode
redis
twilio
packaging
validators
websockets
black
Werkzeug
django-extensions
@@ -44,3 +28,5 @@ model_bakery
mkdocs
mkdocs-material
pymdown-extensions
Pygments
mypy

22
.github/workflows/deploy-docs.yml vendored Normal file
View File

@@ -0,0 +1,22 @@
name: Deploy Docs
on:
push:
branches:
- develop
defaults:
run:
working-directory: docs
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.x
- run: pip install --upgrade pip
- run: pip install --upgrade setuptools wheel
- run: pip install mkdocs mkdocs-material pymdown-extensions
- run: mkdocs gh-deploy --force

2
.gitignore vendored
View File

@@ -45,3 +45,5 @@ htmlcov/
docker-compose.dev.yml
docs/.vuepress/dist
nats-rmm.conf
.mypy_cache
docs/site/

View File

@@ -3,7 +3,14 @@
"python.languageServer": "Pylance",
"python.analysis.extraPaths": [
"api/tacticalrmm",
"api/env",
],
"python.analysis.diagnosticSeverityOverrides": {
"reportUnusedImport": "error",
"reportDuplicateImport": "error",
},
"python.analysis.memory.keepLibraryAst": true,
"python.linting.mypyEnabled": true,
"python.analysis.typeCheckingMode": "basic",
"python.formatting.provider": "black",
"editor.formatOnSave": true,

View File

@@ -15,6 +15,8 @@ Demo database resets every hour. Alot of features are disabled for obvious reaso
### [Discord Chat](https://discord.gg/upGTkWp)
### [Documentation](https://wh1te909.github.io/tacticalrmm/)
## Features
- Teamviewer-like remote desktop control
@@ -33,98 +35,6 @@ Demo database resets every hour. Alot of features are disabled for obvious reaso
- Windows 7, 8.1, 10, Server 2008R2, 2012R2, 2016, 2019
## Installation
## Installation / Backup / Restore / Usage
### Requirements
- VPS with 2GB ram (an install script is provided for Ubuntu Server 20.04 / Debian 10)
- A domain you own with at least 3 subdomains
- Google Authenticator app (2 factor is NOT optional)
### Docker
Refer to the [docker setup](docker/readme.md)
### Installation example (Ubuntu server 20.04 LTS)
Fresh VPS with latest updates\
login as root and create a user and add to sudoers group (we will be creating a user called tactical)
```
apt update && apt -y upgrade
adduser tactical
usermod -a -G sudo tactical
```
switch to the tactical user and setup the firewall
```
su - tactical
sudo ufw default deny incoming
sudo ufw default allow outgoing
sudo ufw allow ssh
sudo ufw allow http
sudo ufw allow https
sudo ufw allow proto tcp from any to any port 4222
sudo ufw enable && sudo ufw reload
```
Our domain for this example is tacticalrmm.com
In the DNS manager of wherever our domain is hosted, we will create three A records, all pointing to the public IP address of our VPS
Create A record ```api.tacticalrmm.com``` for the django rest backend\
Create A record ```rmm.tacticalrmm.com``` for the vue frontend\
Create A record ```mesh.tacticalrmm.com``` for meshcentral
Download the install script and run it
```
wget https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/install.sh
chmod +x install.sh
./install.sh
```
Links will be provided at the end of the install script.\
Download the executable from the first link, then open ```rmm.tacticalrmm.com``` and login.\
Upload the executable when prompted during the initial setup page.
### Install an agent
From the app's dashboard, choose Agents > Install Agent to generate an installer.
## Updating
Download and run [update.sh](https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/update.sh)
```
wget https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/update.sh
chmod +x update.sh
./update.sh
```
## Backup
Download [backup.sh](https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/backup.sh)
```
wget https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/backup.sh
```
Change the postgres username and password at the top of the file (you can find them in `/rmm/api/tacticalrmm/tacticalrmm/local_settings.py` under the DATABASES section)
Run it
```
chmod +x backup.sh
./backup.sh
```
## Restore
Change your 3 A records to point to new server's public IP
Create same linux user account as old server and add to sudoers group and setup firewall (see install instructions above)
Copy backup file to new server
Download the restore script, and edit the postgres username/password at the top of the file. Same instructions as above in the backup steps.
```
wget https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/restore.sh
```
Run the restore script, passing it the backup tar file as the first argument
```
chmod +x restore.sh
./restore.sh rmm-backup-xxxxxxx.tar
```
### Refer to the [documentation](https://wh1te909.github.io/tacticalrmm/)

View File

@@ -7,7 +7,7 @@ from accounts.models import User
class Command(BaseCommand):
help = "Generates barcode for Google Authenticator and creates totp for user"
help = "Generates barcode for Authenticator and creates totp for user"
def add_arguments(self, parser):
parser.add_argument("code", type=str)
@@ -26,12 +26,10 @@ class Command(BaseCommand):
url = pyotp.totp.TOTP(code).provisioning_uri(username, issuer_name=domain)
subprocess.run(f'qr "{url}"', shell=True)
self.stdout.write(
self.style.SUCCESS(
"Scan the barcode above with your google authenticator app"
)
self.style.SUCCESS("Scan the barcode above with your authenticator app")
)
self.stdout.write(
self.style.SUCCESS(
f"If that doesn't work you may manually enter the key: {code}"
f"If that doesn't work you may manually enter the setup key: {code}"
)
)

View File

@@ -10,7 +10,6 @@ from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from agents.models import Agent
from logs.models import AuditLog
from tacticalrmm.utils import notify_error
@@ -79,7 +78,7 @@ class GetAddUsers(APIView):
def post(self, request):
# add new user
try:
user = User.objects.create_user(
user = User.objects.create_user( # type: ignore
request.data["username"],
request.data["email"],
request.data["password"],

View File

@@ -6,7 +6,7 @@ from itertools import cycle
from django.conf import settings
from django.utils import timezone as djangotime
from model_bakery.recipe import Recipe, foreign_key
from model_bakery.recipe import Recipe, foreign_key, seq
def generate_agent_id(hostname):
@@ -30,8 +30,7 @@ agent = Recipe(
hostname="DESKTOP-TEST123",
version="1.3.0",
monitoring_type=cycle(["workstation", "server"]),
salt_id=generate_agent_id("DESKTOP-TEST123"),
agent_id="71AHC-AA813-HH1BC-AAHH5-00013|DESKTOP-TEST123",
agent_id=seq("asdkj3h4234-1234hg3h4g34-234jjh34|DESKTOP-TEST123"),
)
server_agent = agent.extend(
@@ -45,7 +44,7 @@ workstation_agent = agent.extend(
online_agent = agent.extend(last_seen=djangotime.now())
overdue_agent = agent.extend(
last_seen=djangotime.now() - djangotime.timedelta(minutes=6)
last_seen=djangotime.now() - djangotime.timedelta(minutes=35)
)
agent_with_services = agent.extend(

View File

@@ -0,0 +1,18 @@
from django.conf import settings
from django.core.management.base import BaseCommand
from agents.models import Agent
class Command(BaseCommand):
help = "Shows online agents that are not on the latest version"
def handle(self, *args, **kwargs):
q = Agent.objects.exclude(version=settings.LATEST_AGENT_VER).only(
"pk", "version", "last_seen", "overdue_time", "offline_time"
)
agents = [i for i in q if i.status == "online"]
for agent in agents:
self.stdout.write(
self.style.SUCCESS(f"{agent.hostname} - v{agent.version}")
)

View File

@@ -4,7 +4,7 @@ import re
import time
from collections import Counter
from distutils.version import LooseVersion
from typing import Any, List, Union
from typing import Any, Union
import msgpack
import validators
@@ -164,14 +164,14 @@ class Agent(BaseAuditModel):
@property
def has_patches_pending(self):
return self.winupdates.filter(action="approve").filter(installed=False).exists()
return self.winupdates.filter(action="approve").filter(installed=False).exists() # type: ignore
@property
def checks(self):
total, passing, failing = 0, 0, 0
if self.agentchecks.exists():
for i in self.agentchecks.all():
if self.agentchecks.exists(): # type: ignore
for i in self.agentchecks.all(): # type: ignore
total += 1
if i.status == "passing":
passing += 1
@@ -273,7 +273,7 @@ class Agent(BaseAuditModel):
def run_script(
self,
scriptpk: int,
args: List[str] = [],
args: list[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
@@ -333,27 +333,27 @@ class Agent(BaseAuditModel):
updates = list()
if patch_policy.critical == "approve":
updates += self.winupdates.filter(
updates += self.winupdates.filter( # type: ignore
severity="Critical", installed=False
).exclude(action="approve")
if patch_policy.important == "approve":
updates += self.winupdates.filter(
updates += self.winupdates.filter( # type: ignore
severity="Important", installed=False
).exclude(action="approve")
if patch_policy.moderate == "approve":
updates += self.winupdates.filter(
updates += self.winupdates.filter( # type: ignore
severity="Moderate", installed=False
).exclude(action="approve")
if patch_policy.low == "approve":
updates += self.winupdates.filter(severity="Low", installed=False).exclude(
updates += self.winupdates.filter(severity="Low", installed=False).exclude( # type: ignore
action="approve"
)
if patch_policy.other == "approve":
updates += self.winupdates.filter(severity="", installed=False).exclude(
updates += self.winupdates.filter(severity="", installed=False).exclude( # type: ignore
action="approve"
)
@@ -368,7 +368,7 @@ class Agent(BaseAuditModel):
site = self.site
core_settings = CoreSettings.objects.first()
patch_policy = None
agent_policy = self.winupdatepolicy.get()
agent_policy = self.winupdatepolicy.get() # type: ignore
if self.monitoring_type == "server":
# check agent policy first which should override client or site policy
@@ -453,9 +453,9 @@ class Agent(BaseAuditModel):
return patch_policy
def get_approved_update_guids(self) -> List[str]:
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
@@ -571,7 +571,7 @@ class Agent(BaseAuditModel):
from automation.models import Policy
# Clear agent checks that have overriden_by_policy set
self.agentchecks.update(overriden_by_policy=False)
self.agentchecks.update(overriden_by_policy=False) # type: ignore
# Generate checks based on policies
Policy.generate_policy_checks(self)
@@ -606,7 +606,7 @@ class Agent(BaseAuditModel):
except Exception:
return "err"
async def nats_cmd(self, data, timeout=30, wait=True):
async def nats_cmd(self, data: dict, timeout: int = 30, wait: bool = True):
nc = NATS()
options = {
"servers": f"tls://{settings.ALLOWED_HOSTS[0]}:4222",
@@ -628,7 +628,7 @@ class Agent(BaseAuditModel):
except ErrTimeout:
ret = "timeout"
else:
ret = msgpack.loads(msg.data)
ret = msgpack.loads(msg.data) # type: ignore
await nc.close()
return ret
@@ -650,12 +650,12 @@ class Agent(BaseAuditModel):
def delete_superseded_updates(self):
try:
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
kbs = list(self.winupdates.values_list("kb", flat=True)) # type: ignore
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True) # type: ignore
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
@@ -668,17 +668,17 @@ class Agent(BaseAuditModel):
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver) # type: ignore
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
self.winupdates.filter(pk__in=pks).delete() # type: ignore
except:
pass
# define how the agent should handle pending actions
def handle_pending_actions(self):
pending_actions = self.pendingactions.filter(status="pending")
pending_actions = self.pendingactions.filter(status="pending") # type: ignore
for action in pending_actions:
if action.action_type == "taskaction":
@@ -702,7 +702,7 @@ class Agent(BaseAuditModel):
# for clearing duplicate pending actions on agent
def remove_matching_pending_task_actions(self, task_id):
# remove any other pending actions on agent with same task_id
for action in self.pendingactions.exclude(status="completed"):
for action in self.pendingactions.exclude(status="completed"): # type: ignore
if action.details["task_id"] == task_id:
action.delete()
@@ -731,27 +731,23 @@ class Agent(BaseAuditModel):
# check if a resolved notification should be emailed
if (
not alert.resolved_email_sent
and alert_template
alert_template
and alert_template.agent_email_on_resolved
or self.overdue_email_alert
and not alert.resolved_email_sent
):
agent_recovery_email_task.delay(pk=alert.pk)
# check if a resolved notification should be texted
if (
not alert.resolved_sms_sent
and alert_template
alert_template
and alert_template.agent_text_on_resolved
or self.overdue_text_alert
and not alert.resolved_sms_sent
):
agent_recovery_sms_task.delay(pk=alert.pk)
# check if any scripts should be run
if (
not alert.resolved_action_run
and alert_template
and alert_template.resolved_action
if not alert.resolved_action_run and (
alert_template and alert_template.resolved_action
):
r = self.run_script(
scriptpk=alert_template.resolved_action.pk,
@@ -779,50 +775,59 @@ class Agent(BaseAuditModel):
# called when agent is offline
else:
# check if alert hasn't been created yet so create it
if not Alert.objects.filter(agent=self, resolved=False).exists():
alert = Alert.create_availability_alert(self)
# check if alert should be created and if not return
if (
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
):
alert = Alert.create_availability_alert(self)
else:
return
# add a null check history to allow gaps in graph
for check in self.agentchecks.all():
for check in self.agentchecks.all(): # type: ignore
check.add_check_history(None)
else:
alert = Alert.objects.get(agent=self, resolved=False)
# create dashboard alert if enabled
if (
alert_template
and alert_template.agent_always_alert
or self.overdue_dashboard_alert
if self.overdue_dashboard_alert or (
alert_template and alert_template.agent_always_alert
):
alert.hidden = False
alert.save()
# send email alert if enabled
if (
not alert.email_sent
and alert_template
and alert_template.agent_always_email
or self.overdue_email_alert
if self.overdue_email_alert or (
alert_template and alert_template.agent_always_email
):
agent_outage_email_task.delay(
pk=alert.pk,
alert_interval=alert_template.check_periodic_alert_days
alert_interval=alert_template.agent_periodic_alert_days
if alert_template
else None,
)
# send text message if enabled
if (
not alert.sms_sent
and alert_template
and alert_template.agent_always_text
or self.overdue_text_alert
if self.overdue_text_alert or (
alert_template and alert_template.agent_always_text
):
agent_outage_sms_task.delay(
pk=alert.pk,
alert_interval=alert_template.check_periodic_alert_days
alert_interval=alert_template.agent_periodic_alert_days
if alert_template
else None,
)

View File

@@ -2,7 +2,7 @@ import asyncio
import datetime as dt
import random
from time import sleep
from typing import List, Union
from typing import Union
from django.conf import settings
from django.utils import timezone as djangotime
@@ -77,7 +77,7 @@ def agent_update(pk: int) -> str:
@app.task
def send_agent_update_task(pks: List[int]) -> None:
def send_agent_update_task(pks: list[int]) -> None:
chunks = (pks[i : i + 30] for i in range(0, len(pks), 30))
for chunk in chunks:
for pk in chunk:
@@ -93,7 +93,7 @@ def auto_self_agent_update_task() -> None:
return
q = Agent.objects.only("pk", "version")
pks: List[int] = [
pks: list[int] = [
i.pk
for i in q
if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER)
@@ -214,11 +214,17 @@ def handle_agent_recovery_task(pk: int) -> None:
@app.task
def run_script_email_results_task(
agentpk: int, scriptpk: int, nats_timeout: int, emails: List[str]
agentpk: int,
scriptpk: int,
nats_timeout: int,
emails: list[str],
args: list[str] = [],
):
agent = Agent.objects.get(pk=agentpk)
script = Script.objects.get(pk=scriptpk)
r = agent.run_script(scriptpk=script.pk, full=True, timeout=nats_timeout, wait=True)
r = agent.run_script(
scriptpk=script.pk, args=args, full=True, timeout=nats_timeout, wait=True
)
if r == "timeout":
logger.error(f"{agent.hostname} timed out running script.")
return

View File

@@ -1,7 +1,6 @@
import json
import os
from itertools import cycle
from typing import List
from unittest.mock import patch
from django.conf import settings
@@ -78,12 +77,12 @@ class TestAgentViews(TacticalTestCase):
_quantity=15,
)
pks: List[int] = list(
pks: list[int] = list(
Agent.objects.only("pk", "version").values_list("pk", flat=True)
)
data = {"pks": pks}
expected: List[int] = [
expected: list[int] = [
i.pk
for i in Agent.objects.only("pk", "version")
if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER)
@@ -699,6 +698,77 @@ class TestAgentViews(TacticalTestCase):
self.check_not_authenticated("get", url)
@patch("agents.tasks.run_script_email_results_task.delay")
@patch("agents.models.Agent.run_script")
def test_run_script(self, run_script, email_task):
run_script.return_value = "ok"
url = "/agents/runscript/"
script = baker.make_recipe("scripts.script")
# test wait
data = {
"pk": self.agent.pk,
"scriptPK": script.pk,
"output": "wait",
"args": [],
"timeout": 15,
}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
run_script.assert_called_with(
scriptpk=script.pk, args=[], timeout=18, wait=True
)
run_script.reset_mock()
# test email default
data = {
"pk": self.agent.pk,
"scriptPK": script.pk,
"output": "email",
"args": ["abc", "123"],
"timeout": 15,
"emailmode": "default",
"emails": ["admin@example.com", "bob@example.com"],
}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
email_task.assert_called_with(
agentpk=self.agent.pk,
scriptpk=script.pk,
nats_timeout=18,
emails=[],
args=["abc", "123"],
)
email_task.reset_mock()
# test email overrides
data["emailmode"] = "custom"
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
email_task.assert_called_with(
agentpk=self.agent.pk,
scriptpk=script.pk,
nats_timeout=18,
emails=["admin@example.com", "bob@example.com"],
args=["abc", "123"],
)
# test fire and forget
data = {
"pk": self.agent.pk,
"scriptPK": script.pk,
"output": "forget",
"args": ["hello", "world"],
"timeout": 22,
}
r = self.client.post(url, data, format="json")
self.assertEqual(r.status_code, 200)
run_script.assert_called_with(
scriptpk=script.pk, args=["hello", "world"], timeout=25
)
class TestAgentViewsNew(TacticalTestCase):
def setUp(self):

View File

@@ -3,8 +3,6 @@ import datetime as dt
import os
import random
import string
import subprocess
from typing import List
from django.conf import settings
from django.http import HttpResponse
@@ -20,7 +18,12 @@ from core.models import CoreSettings
from logs.models import AuditLog, PendingAction
from scripts.models import Script
from scripts.tasks import handle_bulk_command_task, handle_bulk_script_task
from tacticalrmm.utils import get_default_timezone, notify_error, reload_nats
from tacticalrmm.utils import (
generate_installer_exe,
get_default_timezone,
notify_error,
reload_nats,
)
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
@@ -53,7 +56,7 @@ def get_agent_versions(request):
@api_view(["POST"])
def update_agents(request):
q = Agent.objects.filter(pk__in=request.data["pks"]).only("pk", "version")
pks: List[int] = [
pks: list[int] = [
i.pk
for i in q
if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER)
@@ -95,7 +98,7 @@ def edit_agent(request):
a_serializer.save()
if "winupdatepolicy" in request.data.keys():
policy = agent.winupdatepolicy.get()
policy = agent.winupdatepolicy.get() # type: ignore
p_serializer = WinUpdatePolicySerializer(
instance=policy, data=request.data["winupdatepolicy"][0]
)
@@ -427,124 +430,20 @@ def install_agent(request):
)
if request.data["installMethod"] == "exe":
go_bin = "/usr/local/rmmgo/go/bin/go"
if not os.path.exists(go_bin):
return Response("nogolang", status=status.HTTP_409_CONFLICT)
api = request.data["api"]
atype = request.data["agenttype"]
rdp = request.data["rdp"]
ping = request.data["ping"]
power = request.data["power"]
file_name = "rmm-installer.exe"
exe = os.path.join(settings.EXE_DIR, file_name)
if os.path.exists(exe):
try:
os.remove(exe)
except Exception as e:
logger.error(str(e))
goarch = "amd64" if arch == "64" else "386"
cmd = [
"env",
"GOOS=windows",
f"GOARCH={goarch}",
go_bin,
"build",
f"-ldflags=\"-s -w -X 'main.Inno={inno}'",
f"-X 'main.Api={api}'",
f"-X 'main.Client={client_id}'",
f"-X 'main.Site={site_id}'",
f"-X 'main.Atype={atype}'",
f"-X 'main.Rdp={rdp}'",
f"-X 'main.Ping={ping}'",
f"-X 'main.Power={power}'",
f"-X 'main.DownloadUrl={download_url}'",
f"-X 'main.Token={token}'\"",
"-o",
exe,
]
build_error = False
gen_error = False
gen = [
"env",
"GOOS=windows",
f"GOARCH={goarch}",
go_bin,
"generate",
]
try:
r1 = subprocess.run(
" ".join(gen),
capture_output=True,
shell=True,
cwd=os.path.join(settings.BASE_DIR, "core/goinstaller"),
)
except Exception as e:
gen_error = True
logger.error(str(e))
return Response(
"genfailed", status=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE
)
if r1.returncode != 0:
gen_error = True
if r1.stdout:
logger.error(r1.stdout.decode("utf-8", errors="ignore"))
if r1.stderr:
logger.error(r1.stderr.decode("utf-8", errors="ignore"))
logger.error(f"Go build failed with return code {r1.returncode}")
if gen_error:
return Response(
"genfailed", status=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE
)
try:
r = subprocess.run(
" ".join(cmd),
capture_output=True,
shell=True,
cwd=os.path.join(settings.BASE_DIR, "core/goinstaller"),
)
except Exception as e:
build_error = True
logger.error(str(e))
return Response("buildfailed", status=status.HTTP_412_PRECONDITION_FAILED)
if r.returncode != 0:
build_error = True
if r.stdout:
logger.error(r.stdout.decode("utf-8", errors="ignore"))
if r.stderr:
logger.error(r.stderr.decode("utf-8", errors="ignore"))
logger.error(f"Go build failed with return code {r.returncode}")
if build_error:
return Response("buildfailed", status=status.HTTP_412_PRECONDITION_FAILED)
if settings.DEBUG:
with open(exe, "rb") as f:
response = HttpResponse(
f.read(),
content_type="application/vnd.microsoft.portable-executable",
)
response["Content-Disposition"] = f"inline; filename={file_name}"
return response
else:
response = HttpResponse()
response["Content-Disposition"] = f"attachment; filename={file_name}"
response["X-Accel-Redirect"] = f"/private/exe/{file_name}"
return response
return generate_installer_exe(
file_name="rmm-installer.exe",
goarch="amd64" if arch == "64" else "386",
inno=inno,
api=request.data["api"],
client_id=client_id,
site_id=site_id,
atype=request.data["agenttype"],
rdp=request.data["rdp"],
ping=request.data["ping"],
power=request.data["power"],
download_url=download_url,
token=token,
)
elif request.data["installMethod"] == "manual":
cmd = [
@@ -653,7 +552,7 @@ def recover(request):
if r == "ok":
return Response("Successfully completed recovery")
if agent.recoveryactions.filter(last_run=None).exists():
if agent.recoveryactions.filter(last_run=None).exists(): # type: ignore
return notify_error(
"A recovery action is currently pending. Please wait for the next agent check-in."
)
@@ -681,10 +580,9 @@ def recover(request):
@api_view(["POST"])
def run_script(request):
agent = get_object_or_404(Agent, pk=request.data["pk"])
if not agent.has_nats:
return notify_error("Requires agent version 1.1.0 or greater")
script = get_object_or_404(Script, pk=request.data["scriptPK"])
output = request.data["output"]
args = request.data["args"]
req_timeout = int(request.data["timeout"]) + 3
AuditLog.audit_script_run(
@@ -694,13 +592,12 @@ def run_script(request):
)
if output == "wait":
r = agent.run_script(scriptpk=script.pk, timeout=req_timeout, wait=True)
r = agent.run_script(
scriptpk=script.pk, args=args, timeout=req_timeout, wait=True
)
return Response(r)
elif output == "email":
if not pyver.parse(agent.version) >= pyver.parse("1.1.12"):
return notify_error("Requires agent version 1.1.12 or greater")
emails = (
[] if request.data["emailmode"] == "default" else request.data["emails"]
)
@@ -709,9 +606,10 @@ def run_script(request):
scriptpk=script.pk,
nats_timeout=req_timeout,
emails=emails,
args=args,
)
else:
agent.run_script(scriptpk=script.pk, timeout=req_timeout)
agent.run_script(scriptpk=script.pk, args=args, timeout=req_timeout)
return Response(f"{script.name} will now be run on {agent.hostname}")
@@ -803,7 +701,7 @@ def bulk(request):
elif request.data["monType"] == "workstations":
q = q.filter(monitoring_type="workstation")
agents: List[int] = [agent.pk for agent in q]
agents: list[int] = [agent.pk for agent in q]
AuditLog.audit_bulk_action(request.user, request.data["mode"], request.data)

View File

@@ -112,10 +112,6 @@ class Alert(models.Model):
hidden=True,
)
@classmethod
def create_custom_alert(cls, custom):
pass
class AlertTemplate(models.Model):
name = models.CharField(max_length=100)
@@ -283,4 +279,4 @@ class AlertTemplate(models.Model):
@property
def is_default_template(self) -> bool:
return self.default_alert_template.exists()
return self.default_alert_template.exists() # type: ignore

View File

@@ -1,7 +1,10 @@
from datetime import datetime, timedelta
from unittest.mock import patch
from django.conf import settings
from django.utils import timezone as djangotime
from model_bakery import baker, seq
from itertools import cycle
from core.models import CoreSettings
from tacticalrmm.test import TacticalTestCase
@@ -346,7 +349,15 @@ class TestAlertsViews(TacticalTestCase):
class TestAlertTasks(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
core = CoreSettings.objects.first()
core.twilio_account_sid = "test"
core.twilio_auth_token = "test"
core.text_recipients = ["+12314567890"]
core.email_recipients = ["test@example.com"]
core.twilio_number = "+12314567890"
core.save()
def test_unsnooze_alert_task(self):
from alerts.tasks import unsnooze_alerts
@@ -483,3 +494,412 @@ class TestAlertTasks(TacticalTestCase):
self.assertEquals(workstation.get_alert_template().pk, alert_templates[1].pk)
self.assertEquals(server.get_alert_template().pk, alert_templates[2].pk)
@patch("agents.tasks.sleep")
@patch("smtplib.SMTP")
@patch("core.models.TwClient")
@patch("agents.tasks.agent_outage_sms_task.delay")
@patch("agents.tasks.agent_outage_email_task.delay")
@patch("agents.tasks.agent_recovery_email_task.delay")
@patch("agents.tasks.agent_recovery_sms_task.delay")
def test_handle_agent_offline_alerts(
self,
recovery_sms,
recovery_email,
outage_email,
outage_sms,
TwClient,
SMTP,
sleep,
):
from agents.tasks import (
agent_outage_email_task,
agent_outage_sms_task,
agent_outages_task,
agent_recovery_email_task,
agent_recovery_sms_task,
)
from alerts.models import Alert
# setup sms and email mock objects
TwClient.messages.create.return_value.sid = "SomeRandomText"
SMTP.return_value = True
agent_dashboard_alert = baker.make_recipe("agents.overdue_agent")
# call outages task and no alert should be created
agent_outages_task()
self.assertEquals(Alert.objects.count(), 0)
# set overdue_dashboard_alert and alert should be created
agent_dashboard_alert.overdue_dashboard_alert = True
agent_dashboard_alert.save()
# create other agents with various alert settings
alert_template_always_alert = baker.make(
"alerts.AlertTemplate", is_active=True, agent_always_alert=True
)
alert_template_always_text = baker.make(
"alerts.AlertTemplate",
is_active=True,
agent_always_text=True,
agent_periodic_alert_days=5,
)
alert_template_always_email = baker.make(
"alerts.AlertTemplate",
is_active=True,
agent_always_email=True,
agent_periodic_alert_days=5,
)
alert_template_blank = baker.make("alerts.AlertTemplate", is_active=True)
agent_template_email = baker.make_recipe("agents.overdue_agent")
agent_template_dashboard = baker.make_recipe("agents.overdue_agent")
agent_template_text = baker.make_recipe("agents.overdue_agent")
agent_template_blank = baker.make_recipe("agents.overdue_agent")
# assign alert templates to agent's clients
agent_template_email.client.alert_template = alert_template_always_email
agent_template_email.client.save()
agent_template_dashboard.client.alert_template = alert_template_always_alert
agent_template_dashboard.client.save()
agent_template_text.client.alert_template = alert_template_always_text
agent_template_text.client.save()
agent_template_blank.client.alert_template = alert_template_blank
agent_template_blank.client.save()
agent_text_alert = baker.make_recipe(
"agents.overdue_agent", overdue_text_alert=True
)
agent_email_alert = baker.make_recipe(
"agents.overdue_agent", overdue_email_alert=True
)
agent_outages_task()
# should have created 6 alerts
self.assertEquals(Alert.objects.count(), 6)
# other specific agents should have created alerts
self.assertEquals(Alert.objects.filter(agent=agent_dashboard_alert).count(), 1)
self.assertEquals(Alert.objects.filter(agent=agent_text_alert).count(), 1)
self.assertEquals(Alert.objects.filter(agent=agent_email_alert).count(), 1)
self.assertEquals(Alert.objects.filter(agent=agent_template_email).count(), 1)
self.assertEquals(
Alert.objects.filter(agent=agent_template_dashboard).count(), 1
)
self.assertEquals(Alert.objects.filter(agent=agent_template_text).count(), 1)
self.assertEquals(Alert.objects.filter(agent=agent_template_blank).count(), 0)
# check if email and text tasks were called
self.assertEquals(outage_email.call_count, 2)
self.assertEquals(outage_sms.call_count, 2)
outage_sms.assert_any_call(
pk=Alert.objects.get(agent=agent_text_alert).pk, alert_interval=None
)
outage_sms.assert_any_call(
pk=Alert.objects.get(agent=agent_template_text).pk, alert_interval=5
)
outage_email.assert_any_call(
pk=Alert.objects.get(agent=agent_email_alert).pk, alert_interval=None
)
outage_email.assert_any_call(
pk=Alert.objects.get(agent=agent_template_email).pk, alert_interval=5
)
# call the email/sms outage tasks synchronously
agent_outage_sms_task(
pk=Alert.objects.get(agent=agent_text_alert).pk, alert_interval=None
)
agent_outage_email_task(
pk=Alert.objects.get(agent=agent_email_alert).pk, alert_interval=None
)
agent_outage_sms_task(
pk=Alert.objects.get(agent=agent_template_text).pk, alert_interval=5
)
agent_outage_email_task(
pk=Alert.objects.get(agent=agent_template_email).pk, alert_interval=5
)
# check if email/text sent was set
self.assertTrue(Alert.objects.get(agent=agent_text_alert).sms_sent)
self.assertFalse(Alert.objects.get(agent=agent_text_alert).email_sent)
self.assertTrue(Alert.objects.get(agent=agent_email_alert).email_sent)
self.assertFalse(Alert.objects.get(agent=agent_email_alert).sms_sent)
self.assertTrue(Alert.objects.get(agent=agent_template_text).sms_sent)
self.assertTrue(Alert.objects.get(agent=agent_template_email).email_sent)
self.assertFalse(Alert.objects.get(agent=agent_dashboard_alert).email_sent)
self.assertFalse(Alert.objects.get(agent=agent_dashboard_alert).sms_sent)
SMTP.reset_mock()
TwClient.reset_mock()
# calling agent outage task again shouldn't create duplicate alerts and won't send alerts
agent_outages_task()
self.assertEquals(Alert.objects.count(), 6)
SMTP.assert_not_called()
TwClient.assert_not_called()
# test periodic notification
# change email/text sent to sometime in the past
alert_text = Alert.objects.get(agent=agent_template_text)
alert_text.sms_sent = djangotime.now() - djangotime.timedelta(days=20)
alert_text.save()
alert_email = Alert.objects.get(agent=agent_template_email)
alert_email.email_sent = djangotime.now() - djangotime.timedelta(days=20)
alert_email.save()
agent_outages_task()
outage_sms.assert_any_call(
pk=Alert.objects.get(agent=agent_template_text).pk, alert_interval=5
)
outage_email.assert_any_call(
pk=Alert.objects.get(agent=agent_template_email).pk, alert_interval=5
)
agent_outage_sms_task(
pk=Alert.objects.get(agent=agent_template_text).pk, alert_interval=5
)
agent_outage_email_task(
pk=Alert.objects.get(agent=agent_template_email).pk, alert_interval=5
)
self.assertEquals(SMTP.call_count, 1)
self.assertEquals(TwClient.call_count, 1)
# test resolved alerts
# alter the alert template to email and test on resolved
alert_template_always_email.agent_email_on_resolved = True
alert_template_always_email.save()
alert_template_always_text.agent_text_on_resolved = True
alert_template_always_text.save()
# have the two agents checkin
url = "/api/v3/checkin/"
agent_template_text.version = settings.LATEST_AGENT_VER
agent_template_text.save()
agent_template_email.version = settings.LATEST_AGENT_VER
agent_template_email.save()
data = {
"agent_id": agent_template_text.agent_id,
"version": settings.LATEST_AGENT_VER,
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
data = {
"agent_id": agent_template_email.agent_id,
"version": settings.LATEST_AGENT_VER,
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
recovery_sms.assert_called_with(
pk=Alert.objects.get(agent=agent_template_text).pk
)
recovery_email.assert_any_call(
pk=Alert.objects.get(agent=agent_template_email).pk
)
agent_recovery_sms_task(pk=Alert.objects.get(agent=agent_template_text).pk)
agent_recovery_email_task(pk=Alert.objects.get(agent=agent_template_email).pk)
self.assertTrue(Alert.objects.get(agent=agent_template_text).resolved_sms_sent)
self.assertTrue(
Alert.objects.get(agent=agent_template_email).resolved_email_sent
)
@patch("checks.tasks.sleep")
@patch("checks.tasks.handle_check_sms_alert_task.delay")
@patch("checks.tasks.handle_check_email_alert_task.delay")
@patch("checks.tasks.handle_resolved_check_email_alert_task.delay")
@patch("checks.tasks.handle_resolved_check_sms_alert_task.delay")
def test_handle_check_alerts(
self, resolved_sms, resolved_email, outage_email, outage_sms, sleep
):
# create test data
agent = baker.make_recipe("agents.agent")
agent_no_settings = baker.make_recipe("agents.agent")
agent_template_email = baker.make_recipe("agents.agent")
agent_template_dashboard_text = baker.make_recipe("agents.agent")
agent_template_blank = baker.make_recipe("agents.agent")
# create agent with template to always email on warning severity
alert_template_email = baker.make(
"alerts.AlertTemplate",
is_active=True,
check_always_email=True,
check_email_alert_severity=["warning"],
)
agent_template_email.client.alert_template = alert_template_email
agent_template_email.client.save()
# create agent with template to always dashboard and text on various alert severities
alert_template_dashboard_text = baker.make(
"alerts.AlertTemplate",
is_active=True,
check_always_alert=True,
check_always_text=True,
check_dashboard_alert_severity=["info", "warning", "error"],
check_text_alert_severity=["error"],
)
agent_template_dashboard_text.client.alert_template = (
alert_template_dashboard_text
)
agent_template_dashboard_text.client.save()
# create agent with blank template
alert_template_blank = baker.make("alerts.AlertTemplate", is_active=True)
agent_template_dashboard_text.client.alert_template = alert_template_blank
agent_template_dashboard_text.client.save()
# create some checks per agent above
agents = [
agent,
agent_template_email,
agent_template_dashboard_text,
agent_template_blank,
agent_no_settings,
]
diskspaces = baker.make_recipe(
"checks.diskspace_check", agent=cycle(agents), _quantity=5
)
cpuloads = baker.make_recipe(
"checks.cpuload_check", agent=cycle(agents), _quantity=5
)
memories = baker.make_recipe(
"checks.memory_check", agent=cycle(agents), _quantity=5
)
pings = baker.make_recipe("checks.ping_check", agent=cycle(agents), _quantity=5)
scripts = baker.make_recipe(
"checks.script_check", agent=cycle(agents), _quantity=5
)
# update the agent checks to alert on everything
agent.agentchecks.update(
email_alert=True, text_alert=True, dashboard_alert=True
)
def test_handle_task_alerts(self):
pass
def test_override_email_settings(self):
pass
@patch("agents.models.Agent.nats_cmd")
@patch("agents.tasks.agent_outage_sms_task.delay")
@patch("agents.tasks.agent_outage_email_task.delay")
@patch("agents.tasks.agent_recovery_email_task.delay")
@patch("agents.tasks.agent_recovery_sms_task.delay")
def test_agent_alert_actions(
self, recovery_sms, recovery_email, outage_email, outage_sms, nats_cmd
):
from agents.tasks import agent_outages_task
# Setup cmd mock
success = {
"retcode": 0,
"stdout": "success!",
"stderr": "",
"execution_time": 5.0000,
}
nats_cmd.side_effect = ["pong", success]
# setup data
agent = baker.make_recipe(
"agents.overdue_agent", version=settings.LATEST_AGENT_VER
)
failure_action = baker.make_recipe("scripts.script")
resolved_action = baker.make_recipe("scripts.script")
alert_template = baker.make(
"alerts.AlertTemplate",
is_active=True,
agent_always_alert=True,
action=failure_action,
action_timeout=30,
resolved_action=resolved_action,
resolved_action_timeout=35,
resolved_action_args=["nice_arg"],
)
agent.client.alert_template = alert_template
agent.client.save()
agent_outages_task()
# this is what data should be
data = {
"func": "runscriptfull",
"timeout": 30,
"script_args": [],
"payload": {"code": failure_action.code, "shell": failure_action.shell},
}
nats_cmd.assert_called_with(data, timeout=30, wait=True)
nats_cmd.reset_mock()
# Setup cmd mock
success = {
"retcode": 0,
"stdout": "success!",
"stderr": "",
"execution_time": 5.0000,
}
nats_cmd.side_effect = ["pong", success]
# make sure script run results were stored
alert = Alert.objects.get(agent=agent)
self.assertEqual(alert.action_retcode, 0)
self.assertEqual(alert.action_execution_time, "5.0000")
self.assertEqual(alert.action_stdout, "success!")
self.assertEqual(alert.action_stderr, "")
# resolve alert and test
agent.last_seen = djangotime.now()
agent.save()
url = "/api/v3/checkin/"
data = {
"agent_id": agent.agent_id,
"version": settings.LATEST_AGENT_VER,
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
# this is what data should be
data = {
"func": "runscriptfull",
"timeout": 35,
"script_args": ["nice_arg"],
"payload": {"code": resolved_action.code, "shell": resolved_action.shell},
}
nats_cmd.assert_called_with(data, timeout=35, wait=True)
# make sure script run results were stored
alert = Alert.objects.get(agent=agent)
self.assertEqual(alert.resolved_action_retcode, 0)
self.assertEqual(alert.resolved_action_execution_time, "5.0000")
self.assertEqual(alert.resolved_action_stdout, "success!")
self.assertEqual(alert.resolved_action_stderr, "")
def test_check_alert_actions(self):
pass
def test_task_alert_actions(self):
pass

View File

@@ -50,26 +50,26 @@ class CheckIn(APIView):
# change agent update pending status to completed if agent has just updated
if (
updated
and agent.pendingactions.filter(
and agent.pendingactions.filter( # type: ignore
action_type="agentupdate", status="pending"
).exists()
):
agent.pendingactions.filter(
agent.pendingactions.filter( # type: ignore
action_type="agentupdate", status="pending"
).update(status="completed")
# handles any alerting actions
agent.handle_alert(checkin=True)
recovery = agent.recoveryactions.filter(last_run=None).last()
recovery = agent.recoveryactions.filter(last_run=None).last() # type: ignore
if recovery is not None:
recovery.last_run = djangotime.now()
recovery.save(update_fields=["last_run"])
handle_agent_recovery_task.delay(pk=recovery.pk)
handle_agent_recovery_task.delay(pk=recovery.pk) # type: ignore
return Response("ok")
# get any pending actions
if agent.pendingactions.filter(status="pending").exists():
if agent.pendingactions.filter(status="pending").exists(): # type: ignore
agent.handle_pending_actions()
return Response("ok")
@@ -111,7 +111,7 @@ class CheckIn(APIView):
if not InstalledSoftware.objects.filter(agent=agent).exists():
InstalledSoftware(agent=agent, software=sw).save()
else:
s = agent.installedsoftware_set.first()
s = agent.installedsoftware_set.first() # type: ignore
s.software = sw
s.save(update_fields=["software"])
@@ -184,7 +184,7 @@ class WinUpdates(APIView):
def patch(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
u = agent.winupdates.filter(guid=request.data["guid"]).last()
u = agent.winupdates.filter(guid=request.data["guid"]).last() # type: ignore
success: bool = request.data["success"]
if success:
u.result = "success"
@@ -210,8 +210,8 @@ class WinUpdates(APIView):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
updates = request.data["wua_updates"]
for update in updates:
if agent.winupdates.filter(guid=update["guid"]).exists():
u = agent.winupdates.filter(guid=update["guid"]).last()
if agent.winupdates.filter(guid=update["guid"]).exists(): # type: ignore
u = agent.winupdates.filter(guid=update["guid"]).last() # type: ignore
u.downloaded = update["downloaded"]
u.installed = update["installed"]
u.save(update_fields=["downloaded", "installed"])
@@ -242,7 +242,7 @@ class WinUpdates(APIView):
# more superseded updates cleanup
if pyver.parse(agent.version) <= pyver.parse("1.4.2"):
for u in agent.winupdates.filter(
for u in agent.winupdates.filter( # type: ignore
date_installed__isnull=True, result="failed"
).exclude(installed=True):
u.delete()
@@ -256,7 +256,7 @@ class SupersededWinUpdate(APIView):
def post(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
updates = agent.winupdates.filter(guid=request.data["guid"])
updates = agent.winupdates.filter(guid=request.data["guid"]) # type: ignore
for u in updates:
u.delete()
@@ -404,10 +404,10 @@ class NewAgent(APIView):
agent.salt_id = f"{agent.hostname}-{agent.pk}"
agent.save(update_fields=["salt_id"])
user = User.objects.create_user(
user = User.objects.create_user( # type: ignore
username=request.data["agent_id"],
agent=agent,
password=User.objects.make_random_password(60),
password=User.objects.make_random_password(60), # type: ignore
)
token = Token.objects.create(user=user)
@@ -452,7 +452,7 @@ class Software(APIView):
if not InstalledSoftware.objects.filter(agent=agent).exists():
InstalledSoftware(agent=agent, software=sw).save()
else:
s = agent.installedsoftware_set.first()
s = agent.installedsoftware_set.first() # type: ignore
s.software = sw
s.save(update_fields=["software"])

View File

@@ -43,11 +43,11 @@ class Policy(BaseAuditModel):
@property
def is_default_server_policy(self):
return self.default_server_policy.exists()
return self.default_server_policy.exists() # type: ignore
@property
def is_default_workstation_policy(self):
return self.default_workstation_policy.exists()
return self.default_workstation_policy.exists() # type: ignore
def __str__(self):
return self.name
@@ -56,7 +56,7 @@ class Policy(BaseAuditModel):
return self.get_related("server") | self.get_related("workstation")
def get_related(self, mon_type):
explicit_agents = self.agents.filter(monitoring_type=mon_type)
explicit_agents = self.agents.filter(monitoring_type=mon_type) # type: ignore
explicit_clients = getattr(self, f"{mon_type}_clients").all()
explicit_sites = getattr(self, f"{mon_type}_sites").all()

View File

@@ -505,12 +505,12 @@ class TestPolicyTasks(TacticalTestCase):
self.assertEqual(check.ip, checks[1].ip)
elif check.check_type == "cpuload":
self.assertEqual(check.parent_check, checks[2].id)
self.assertEqual(check.error_threshold, checks[0].error_threshold)
self.assertEqual(check.warning_threshold, checks[0].warning_threshold)
self.assertEqual(check.error_threshold, checks[2].error_threshold)
self.assertEqual(check.warning_threshold, checks[2].warning_threshold)
elif check.check_type == "memory":
self.assertEqual(check.parent_check, checks[3].id)
self.assertEqual(check.error_threshold, checks[0].error_threshold)
self.assertEqual(check.warning_threshold, checks[0].warning_threshold)
self.assertEqual(check.error_threshold, checks[3].error_threshold)
self.assertEqual(check.warning_threshold, checks[3].warning_threshold)
elif check.check_type == "winsvc":
self.assertEqual(check.parent_check, checks[4].id)
self.assertEqual(check.svc_name, checks[4].svc_name)

View File

@@ -171,7 +171,7 @@ class UpdatePatchPolicy(APIView):
serializer = WinUpdatePolicySerializer(data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.policy = policy
serializer.policy = policy # type: ignore
serializer.save()
return Response("ok")

View File

@@ -0,0 +1,18 @@
# Generated by Django 3.1.7 on 2021-02-24 05:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('autotasks', '0017_auto_20210210_1512'),
]
operations = [
migrations.AddField(
model_name='automatedtask',
name='run_asap_after_missed',
field=models.BooleanField(default=False),
),
]

View File

@@ -96,6 +96,7 @@ class AutomatedTask(BaseAuditModel):
)
run_time_date = DateTimeField(null=True, blank=True)
remove_if_not_scheduled = models.BooleanField(default=False)
run_asap_after_missed = models.BooleanField(default=False) # added in agent v1.4.7
managed_by_policy = models.BooleanField(default=False)
parent_task = models.PositiveIntegerField(null=True, blank=True)
win_task_name = models.CharField(max_length=255, null=True, blank=True)
@@ -218,6 +219,7 @@ class AutomatedTask(BaseAuditModel):
timeout=self.timeout,
enabled=self.enabled,
remove_if_not_scheduled=self.remove_if_not_scheduled,
run_asap_after_missed=self.run_asap_after_missed,
)
create_win_task_schedule.delay(task.pk)
@@ -249,27 +251,19 @@ class AutomatedTask(BaseAuditModel):
# check if resolved email should be send
if (
not alert.resolved_email_sent
and self.email_alert
or alert_template
and alert_template.task_email_on_resolved
):
alert_template and alert_template.task_email_on_resolved
) and not alert.resolved_email_sent:
handle_resolved_task_email_alert.delay(pk=alert.pk)
# check if resolved text should be sent
if (
not alert.resolved_sms_sent
and self.text_alert
or alert_template
and alert_template.task_text_on_resolved
):
alert_template and alert_template.task_text_on_resolved
) and not alert.resolved_sms_sent:
handle_resolved_task_sms_alert.delay(pk=alert.pk)
# check if resolved script should be run
if (
alert_template
and alert_template.resolved_action
and not alert.resolved_action_run
if not alert.resolved_action_run and (
alert_template and alert_template.resolved_action
):
r = self.agent.run_script(
@@ -299,7 +293,24 @@ class AutomatedTask(BaseAuditModel):
# create alert if task is failing
else:
if not Alert.objects.filter(assigned_task=self, resolved=False).exists():
alert = Alert.create_task_alert(self)
# check if alert should be created and if not return
if (
self.dashboard_alert
or self.email_alert
or self.text_alert
or (
alert_template
and (
alert_template.task_always_alert
or alert_template.task_always_email
or alert_template.task_always_text
)
)
):
alert = Alert.create_task_alert(self)
else:
return
else:
alert = Alert.objects.get(assigned_task=self, resolved=False)
@@ -309,40 +320,36 @@ class AutomatedTask(BaseAuditModel):
alert.save(update_fields=["severity"])
# create alert in dashboard if enabled
if (
self.dashboard_alert
or alert_template
if self.dashboard_alert or (
alert_template
and self.alert_severity in alert_template.task_dashboard_alert_severity
and alert_template.task_always_alert
):
alert.hidden = False
alert.save()
# send email if enabled
if (
not alert.email_sent
and self.email_alert
or alert_template
if self.email_alert or (
alert_template
and self.alert_severity in alert_template.task_email_alert_severity
and alert_template.check_always_email
and alert_template.task_always_email
):
handle_task_email_alert.delay(
pk=alert.pk,
alert_template=alert_template.check_periodic_alert_days
alert_template=alert_template.task_periodic_alert_days
if alert_template
else None,
)
# send text if enabled
if (
not alert.sms_sent
and self.text_alert
or alert_template
if self.text_alert or (
alert_template
and self.alert_severity in alert_template.task_text_alert_severity
and alert_template.check_always_text
and alert_template.task_always_text
):
handle_task_sms_alert.delay(
pk=alert.pk,
alert_template=alert_template.check_periodic_alert_days
alert_template=alert_template.task_periodic_alert_days
if alert_template
else None,
)

View File

@@ -45,7 +45,7 @@ def create_win_task_schedule(pk, pending_action=False):
task.run_time_date = now.astimezone(agent_tz).replace(
tzinfo=pytz.utc
) + djangotime.timedelta(minutes=5)
task.save()
task.save(update_fields=["run_time_date"])
nats_data = {
"func": "schedtask",
@@ -62,9 +62,12 @@ def create_win_task_schedule(pk, pending_action=False):
},
}
if task.remove_if_not_scheduled and pyver.parse(
if task.run_asap_after_missed and pyver.parse(
task.agent.version
) >= pyver.parse("1.1.2"):
) >= pyver.parse("1.4.7"):
nats_data["schedtaskpayload"]["run_asap_after_missed"] = True
if task.remove_if_not_scheduled:
nats_data["schedtaskpayload"]["deleteafter"] = True
elif task.task_type == "checkfailure" or task.task_type == "manual":

View File

@@ -3,7 +3,7 @@ from model_bakery.recipe import Recipe
check = Recipe("checks.Check")
diskspace_check = check.extend(
check_type="diskspace", disk="C:", warning_threshold=30, error_threshold=75
check_type="diskspace", disk="C:", warning_threshold=30, error_threshold=10
)
cpuload_check = check.extend(
@@ -13,7 +13,7 @@ cpuload_check = check.extend(
ping_check = check.extend(check_type="ping", ip="10.10.10.10")
memory_check = check.extend(
check_type="memory", warning_threshold=30, error_threshold=75
check_type="memory", warning_threshold=60, error_threshold=75
)
winsvc_check = check.extend(
@@ -21,6 +21,7 @@ winsvc_check = check.extend(
svc_name="ServiceName",
svc_display_name="ServiceName",
svc_policy_mode="manual",
pass_if_svc_not_exist=False,
)
eventlog_check = check.extend(

View File

@@ -3,7 +3,7 @@ import json
import os
import string
from statistics import mean
from typing import Any, List, Union
from typing import Any, Union
import pytz
from django.conf import settings
@@ -12,7 +12,6 @@ from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils import timezone as djangotime
from loguru import logger
from rest_framework.fields import JSONField
from alerts.models import SEVERITY_CHOICES
from core.models import CoreSettings
@@ -206,9 +205,9 @@ class Check(BaseAuditModel):
if self.error_threshold:
text += f" Error Threshold: {self.error_threshold}%"
return f"{self.get_check_type_display()}: Drive {self.disk} < {text}"
return f"{self.get_check_type_display()}: Drive {self.disk} < {text}" # type: ignore
elif self.check_type == "ping":
return f"{self.get_check_type_display()}: {self.name}"
return f"{self.get_check_type_display()}: {self.name}" # type: ignore
elif self.check_type == "cpuload" or self.check_type == "memory":
text = ""
@@ -217,13 +216,13 @@ class Check(BaseAuditModel):
if self.error_threshold:
text += f" Error Threshold: {self.error_threshold}%"
return f"{self.get_check_type_display()} > {text}"
return f"{self.get_check_type_display()} > {text}" # type: ignore
elif self.check_type == "winsvc":
return f"{self.get_check_type_display()}: {self.svc_display_name}"
return f"{self.get_check_type_display()}: {self.svc_display_name}" # type: ignore
elif self.check_type == "eventlog":
return f"{self.get_check_type_display()}: {self.name}"
return f"{self.get_check_type_display()}: {self.name}" # type: ignore
elif self.check_type == "script":
return f"{self.get_check_type_display()}: {self.script.name}"
return f"{self.get_check_type_display()}: {self.script.name}" # type: ignore
else:
return "n/a"
@@ -242,7 +241,7 @@ class Check(BaseAuditModel):
return self.last_run
@property
def non_editable_fields(self) -> List[str]:
def non_editable_fields(self) -> list[str]:
return [
"check_type",
"status",
@@ -331,7 +330,24 @@ class Check(BaseAuditModel):
elif self.fail_count >= self.fails_b4_alert:
if not Alert.objects.filter(assigned_check=self, resolved=False).exists():
alert = Alert.create_check_alert(self)
# check if alert should be created and if not return
if (
self.dashboard_alert
or self.email_alert
or self.text_alert
or (
alert_template
and (
alert_template.check_always_alert
or alert_template.check_always_email
or alert_template.check_always_text
)
)
):
alert = Alert.create_check_alert(self)
else:
return
else:
alert = Alert.objects.get(assigned_check=self, resolved=False)
@@ -341,9 +357,8 @@ class Check(BaseAuditModel):
alert.save(update_fields=["severity"])
# create alert in dashboard if enabled
if (
self.dashboard_alert
or alert_template
if self.dashboard_alert or (
alert_template
and self.alert_severity in alert_template.check_dashboard_alert_severity
and alert_template.check_always_alert
):
@@ -366,10 +381,8 @@ class Check(BaseAuditModel):
)
# send text if enabled
if (
not alert.sms_sent
and self.text_alert
or alert_template
if self.text_alert or (
alert_template
and self.alert_severity in alert_template.check_text_alert_severity
and alert_template.check_always_text
):

View File

@@ -1,3 +1,4 @@
from logging import warning
from unittest.mock import patch
from django.utils import timezone as djangotime
@@ -24,7 +25,7 @@ class TestCheckViews(TacticalTestCase):
serializer = CheckSerializer(disk_check)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, serializer.data)
self.assertEqual(resp.data, serializer.data) # type: ignore
self.check_not_authenticated("get", url)
def test_add_disk_check(self):
@@ -211,7 +212,7 @@ class TestCheckViews(TacticalTestCase):
serializer = CheckSerializer(disk_check)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, serializer.data)
self.assertEqual(resp.data, serializer.data) # type: ignore
self.check_not_authenticated("post", url)
def test_add_policy_disk_check(self):
@@ -221,7 +222,7 @@ class TestCheckViews(TacticalTestCase):
url = "/checks/checks/"
valid_payload = {
"policy": policy.pk,
"policy": policy.pk, # type: ignore
"check": {
"check_type": "diskspace",
"disk": "M:",
@@ -233,7 +234,7 @@ class TestCheckViews(TacticalTestCase):
# should fail because both error and warning thresholds are 0
invalid_payload = {
"policy": policy.pk,
"policy": policy.pk, # type: ignore
"check": {
"check_type": "diskspace",
"error_threshold": 0,
@@ -247,7 +248,7 @@ class TestCheckViews(TacticalTestCase):
# should fail because warning is less than error
invalid_payload = {
"policy": policy.pk,
"policy": policy.pk, # type: ignore
"check": {
"check_type": "diskspace",
"error_threshold": 80,
@@ -261,7 +262,7 @@ class TestCheckViews(TacticalTestCase):
# this should fail because we already have a check for drive M: in setup
invalid_payload = {
"policy": policy.pk,
"policy": policy.pk, # type: ignore
"check": {
"check_type": "diskspace",
"disk": "M:",
@@ -277,8 +278,8 @@ class TestCheckViews(TacticalTestCase):
def test_get_disks_for_policies(self):
url = "/checks/getalldisks/"
r = self.client.get(url)
self.assertIsInstance(r.data, list)
self.assertEqual(26, len(r.data))
self.assertIsInstance(r.data, list) # type: ignore
self.assertEqual(26, len(r.data)) # type: ignore
def test_edit_check_alert(self):
# setup data
@@ -361,8 +362,8 @@ class TestCheckViews(TacticalTestCase):
)
# need to manually set the date back 35 days
for check_history in check_history_data:
check_history.x = djangotime.now() - djangotime.timedelta(days=35)
for check_history in check_history_data: # type: ignore
check_history.x = djangotime.now() - djangotime.timedelta(days=35) # type: ignore
check_history.save()
# test invalid check pk
@@ -375,20 +376,22 @@ class TestCheckViews(TacticalTestCase):
data = {"timeFilter": 30}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data), 30)
self.assertEqual(len(resp.data), 30) # type: ignore
# test with timeFilter equal to 0
data = {"timeFilter": 0}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data), 60)
self.assertEqual(len(resp.data), 60) # type: ignore
self.check_not_authenticated("patch", url)
class TestCheckTasks(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
self.agent = baker.make_recipe("agents.agent")
def test_prune_check_history(self):
from .tasks import prune_check_history
@@ -403,8 +406,8 @@ class TestCheckTasks(TacticalTestCase):
)
# need to manually set the date back 35 days
for check_history in check_history_data:
check_history.x = djangotime.now() - djangotime.timedelta(days=35)
for check_history in check_history_data: # type: ignore
check_history.x = djangotime.now() - djangotime.timedelta(days=35) # type: ignore
check_history.save()
# prune data 30 days old
@@ -414,3 +417,577 @@ class TestCheckTasks(TacticalTestCase):
# prune all Check history Data
prune_check_history(0)
self.assertEqual(CheckHistory.objects.count(), 0)
def test_handle_script_check(self):
from checks.models import Check
url = "/api/v3/checkrunner/"
script = baker.make_recipe("checks.script_check", agent=self.agent)
# test failing
data = {
"id": script.id,
"retcode": 500,
"stderr": "error",
"stdout": "message",
"runtime": 5.000,
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=script.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "error")
# test passing
data = {
"id": script.id,
"retcode": 0,
"stderr": "error",
"stdout": "message",
"runtime": 5.000,
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=script.id)
self.assertEqual(new_check.status, "passing")
# test failing info
script.info_return_codes = [20, 30, 50]
script.save()
data = {
"id": script.id,
"retcode": 30,
"stderr": "error",
"stdout": "message",
"runtime": 5.000,
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=script.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "info")
# test failing warning
script.warning_return_codes = [80, 100, 1040]
script.save()
data = {
"id": script.id,
"retcode": 1040,
"stderr": "error",
"stdout": "message",
"runtime": 5.000,
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=script.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "warning")
def test_handle_diskspace_check(self):
from checks.models import Check
url = "/api/v3/checkrunner/"
diskspace = baker.make_recipe(
"checks.diskspace_check",
warning_threshold=20,
error_threshold=10,
agent=self.agent,
)
# test warning threshold failure
data = {
"id": diskspace.id,
"exists": True,
"percent_used": 85,
"total": 500,
"free": 400,
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=diskspace.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "warning")
# test error failure
data = {
"id": diskspace.id,
"exists": True,
"percent_used": 95,
"total": 500,
"free": 400,
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=diskspace.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "error")
# test disk not exist
data = {"id": diskspace.id, "exists": False}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=diskspace.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "error")
# test warning threshold 0
diskspace.warning_threshold = 0
diskspace.save()
data = {
"id": diskspace.id,
"exists": True,
"percent_used": 95,
"total": 500,
"free": 400,
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=diskspace.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "error")
# test error threshold 0
diskspace.warning_threshold = 50
diskspace.error_threshold = 0
diskspace.save()
data = {
"id": diskspace.id,
"exists": True,
"percent_used": 95,
"total": 500,
"free": 400,
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=diskspace.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "warning")
# test passing
data = {
"id": diskspace.id,
"exists": True,
"percent_used": 50,
"total": 500,
"free": 400,
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=diskspace.id)
self.assertEqual(new_check.status, "passing")
def test_handle_cpuload_check(self):
from checks.models import Check
url = "/api/v3/checkrunner/"
cpuload = baker.make_recipe(
"checks.cpuload_check",
warning_threshold=70,
error_threshold=90,
agent=self.agent,
)
# test failing warning
data = {"id": cpuload.id, "percent": 80}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=cpuload.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "warning")
# test failing error
data = {"id": cpuload.id, "percent": 95}
# reset check history
cpuload.history = []
cpuload.save()
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=cpuload.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "error")
# test passing
data = {"id": cpuload.id, "percent": 50}
# reset check history
cpuload.history = []
cpuload.save()
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=cpuload.id)
self.assertEqual(new_check.status, "passing")
# test warning threshold 0
cpuload.warning_threshold = 0
cpuload.save()
data = {"id": cpuload.id, "percent": 95}
# reset check history
cpuload.history = []
cpuload.save()
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=cpuload.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "error")
# test error threshold 0
cpuload.warning_threshold = 50
cpuload.error_threshold = 0
cpuload.save()
data = {"id": cpuload.id, "percent": 95}
# reset check history
cpuload.history = []
cpuload.save()
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=cpuload.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "warning")
def test_handle_memory_check(self):
from checks.models import Check
url = "/api/v3/checkrunner/"
memory = baker.make_recipe(
"checks.memory_check",
warning_threshold=70,
error_threshold=90,
agent=self.agent,
)
# test failing warning
data = {"id": memory.id, "percent": 80}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=memory.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "warning")
# test failing error
data = {"id": memory.id, "percent": 95}
# reset check history
memory.history = []
memory.save()
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=memory.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "error")
# test passing
data = {"id": memory.id, "percent": 50}
# reset check history
memory.history = []
memory.save()
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=memory.id)
self.assertEqual(new_check.status, "passing")
# test warning threshold 0
memory.warning_threshold = 0
memory.save()
data = {"id": memory.id, "percent": 95}
# reset check history
memory.history = []
memory.save()
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=memory.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "error")
# test error threshold 0
memory.warning_threshold = 50
memory.error_threshold = 0
memory.save()
data = {"id": memory.id, "percent": 95}
# reset check history
memory.history = []
memory.save()
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=memory.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "warning")
def test_handle_ping_check(self):
from checks.models import Check
url = "/api/v3/checkrunner/"
ping = baker.make_recipe(
"checks.ping_check", agent=self.agent, alert_severity="info"
)
# test failing info
data = {
"id": ping.id,
"output": "Reply from 192.168.1.27: Destination host unreachable",
"has_stdout": True,
"has_stderr": False,
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=ping.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "info")
# test failing warning
data = {
"id": ping.id,
"output": "Reply from 192.168.1.27: Destination host unreachable",
"has_stdout": True,
"has_stderr": False,
}
ping.alert_severity = "warning"
ping.save()
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=ping.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "warning")
# test failing error
data = {
"id": ping.id,
"output": "Reply from 192.168.1.27: Destination host unreachable",
"has_stdout": True,
"has_stderr": False,
}
ping.alert_severity = "error"
ping.save()
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=ping.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "error")
# test failing error
data = {
"id": ping.id,
"output": "some output",
"has_stdout": False,
"has_stderr": True,
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=ping.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "error")
# test passing
data = {
"id": ping.id,
"output": "Reply from 192.168.1.1: bytes=32 time<1ms TTL=64",
"has_stdout": True,
"has_stderr": False,
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=ping.id)
self.assertEqual(new_check.status, "passing")
@patch("agents.models.Agent.nats_cmd")
def test_handle_winsvc_check(self, nats_cmd):
from checks.models import Check
url = "/api/v3/checkrunner/"
winsvc = baker.make_recipe(
"checks.winsvc_check", agent=self.agent, alert_severity="info"
)
# test passing running
data = {"id": winsvc.id, "exists": True, "status": "running"}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=winsvc.id)
self.assertEqual(new_check.status, "passing")
# test passing start pending
winsvc.pass_if_start_pending = True
winsvc.save()
data = {"id": winsvc.id, "exists": True, "status": "start_pending"}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=winsvc.id)
self.assertEqual(new_check.status, "passing")
# test failing no start
data = {"id": winsvc.id, "exists": True, "status": "not running"}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=winsvc.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "info")
# test failing and attempt start
winsvc.restart_if_stopped = True
winsvc.alert_severity = "warning"
winsvc.save()
nats_cmd.return_value = "timeout"
data = {"id": winsvc.id, "exists": True, "status": "not running"}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=winsvc.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "warning")
nats_cmd.assert_called()
nats_cmd.reset_mock()
# test failing and attempt start
winsvc.alert_severity = "error"
winsvc.save()
nats_cmd.return_value = {"success": False, "errormsg": "Some Error"}
data = {"id": winsvc.id, "exists": True, "status": "not running"}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=winsvc.id)
self.assertEqual(new_check.status, "failing")
self.assertEqual(new_check.alert_severity, "error")
nats_cmd.assert_called()
nats_cmd.reset_mock()
# test success and attempt start
nats_cmd.return_value = {"success": True}
data = {"id": winsvc.id, "exists": True, "status": "not running"}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=winsvc.id)
self.assertEqual(new_check.status, "passing")
nats_cmd.assert_called()
nats_cmd.reset_mock()
# test failing and service not exist
data = {"id": winsvc.id, "exists": False, "status": ""}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=winsvc.id)
self.assertEqual(new_check.status, "failing")
# test success and service not exist
winsvc.pass_if_svc_not_exist = True
winsvc.save()
data = {"id": winsvc.id, "exists": False, "status": ""}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
new_check = Check.objects.get(pk=winsvc.id)
self.assertEqual(new_check.status, "passing")
def test_handle_eventlog_check(self):
url = "/api/v3/checkrunner/"
eventlog = baker.make_recipe("checks.eventlog_check", agent=self.agent)
# test failing warning
data = {}
# resp = self.client.patch(url, data, format="json")
# self.assertEqual(resp.status_code, 200)
# test failing error
data = {}
# resp = self.client.patch(url, data, format="json")
# self.assertEqual(resp.status_code, 200)
# test passing
data = {}
# resp = self.client.patch(url, data, format="json")
# self.assertEqual(resp.status_code, 200)

View File

@@ -59,7 +59,7 @@ class AddCheck(APIView):
if policy:
generate_agent_checks_from_policies_task.delay(policypk=policy.pk)
elif agent:
checks = agent.agentchecks.filter(
checks = agent.agentchecks.filter( # type: ignore
check_type=obj.check_type, managed_by_policy=True
)
@@ -149,7 +149,7 @@ class CheckHistory(APIView):
- djangotime.timedelta(days=request.data["timeFilter"]),
)
check_history = check.check_history.filter(timeFilter).order_by("-x")
check_history = check.check_history.filter(timeFilter).order_by("-x") # type: ignore
return Response(
CheckHistorySerializer(

View File

@@ -1,12 +1,9 @@
import datetime as dt
import os
import re
import subprocess
import uuid
import pytz
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from rest_framework.permissions import AllowAny
@@ -15,7 +12,7 @@ from rest_framework.views import APIView
from agents.models import Agent
from core.models import CoreSettings
from tacticalrmm.utils import notify_error
from tacticalrmm.utils import generate_installer_exe, notify_error
from .models import Client, Deployment, Site
from .serializers import (
@@ -183,99 +180,28 @@ class GenerateAgent(APIView):
d = get_object_or_404(Deployment, uid=uid)
go_bin = "/usr/local/rmmgo/go/bin/go"
if not os.path.exists(go_bin):
return notify_error("Missing golang")
api = f"https://{request.get_host()}"
inno = (
f"winagent-v{settings.LATEST_AGENT_VER}.exe"
if d.arch == "64"
else f"winagent-v{settings.LATEST_AGENT_VER}-x86.exe"
)
download_url = settings.DL_64 if d.arch == "64" else settings.DL_32
client = d.client.name.replace(" ", "").lower()
site = d.site.name.replace(" ", "").lower()
client = re.sub(r"([^a-zA-Z0-9]+)", "", client)
site = re.sub(r"([^a-zA-Z0-9]+)", "", site)
ext = ".exe" if d.arch == "64" else "-x86.exe"
file_name = f"rmm-{client}-{site}-{d.mon_type}{ext}"
exe = os.path.join(settings.EXE_DIR, file_name)
if os.path.exists(exe):
try:
os.remove(exe)
except:
pass
goarch = "amd64" if d.arch == "64" else "386"
cmd = [
"env",
"GOOS=windows",
f"GOARCH={goarch}",
go_bin,
"build",
f"-ldflags=\"-s -w -X 'main.Inno={inno}'",
f"-X 'main.Api={api}'",
f"-X 'main.Client={d.client.pk}'",
f"-X 'main.Site={d.site.pk}'",
f"-X 'main.Atype={d.mon_type}'",
f"-X 'main.Rdp={d.install_flags['rdp']}'",
f"-X 'main.Ping={d.install_flags['ping']}'",
f"-X 'main.Power={d.install_flags['power']}'",
f"-X 'main.DownloadUrl={download_url}'",
f"-X 'main.Token={d.token_key}'\"",
"-o",
exe,
]
gen = [
"env",
"GOOS=windows",
f"GOARCH={goarch}",
go_bin,
"generate",
]
try:
r1 = subprocess.run(
" ".join(gen),
capture_output=True,
shell=True,
cwd=os.path.join(settings.BASE_DIR, "core/goinstaller"),
)
except:
return notify_error("genfailed")
if r1.returncode != 0:
return notify_error("genfailed")
try:
r = subprocess.run(
" ".join(cmd),
capture_output=True,
shell=True,
cwd=os.path.join(settings.BASE_DIR, "core/goinstaller"),
)
except:
return notify_error("buildfailed")
if r.returncode != 0:
return notify_error("buildfailed")
if settings.DEBUG:
with open(exe, "rb") as f:
response = HttpResponse(
f.read(),
content_type="application/vnd.microsoft.portable-executable",
)
response["Content-Disposition"] = f"inline; filename={file_name}"
return response
else:
response = HttpResponse()
response["Content-Disposition"] = f"attachment; filename={file_name}"
response["X-Accel-Redirect"] = f"/private/exe/{file_name}"
return response
return generate_installer_exe(
file_name=f"rmm-{client}-{site}-{d.mon_type}{ext}",
goarch="amd64" if d.arch == "64" else "386",
inno=inno,
api=f"https://{request.get_host()}",
client_id=d.client.pk,
site_id=d.site.pk,
atype=d.mon_type,
rdp=d.install_flags["rdp"],
ping=d.install_flags["ping"],
power=d.install_flags["power"],
download_url=settings.DL_64 if d.arch == "64" else settings.DL_32,
token=d.token_key,
)

View File

@@ -0,0 +1,5 @@
module github.com/wh1te909/goinstaller
go 1.16
require github.com/josephspurrier/goversioninfo v1.2.0 // indirect

View File

@@ -0,0 +1,10 @@
github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw=
github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/josephspurrier/goversioninfo v1.2.0 h1:tpLHXAxLHKHg/dCU2AAYx08A4m+v9/CWg6+WUvTF4uQ=
github.com/josephspurrier/goversioninfo v1.2.0/go.mod h1:AGP2a+Y/OVJZ+s6XM4IwFUpkETwvn0orYurY8qpw1+0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -116,7 +116,7 @@ class TestCoreTasks(TacticalTestCase):
# test prune db with tables
data = {
"action": "prune_db",
"prune_tables": ["audit_logs", "agent_outages", "pending_actions"],
"prune_tables": ["audit_logs", "alerts", "pending_actions"],
}
r = self.client.post(url, data)
self.assertEqual(r.status_code, 200)

View File

@@ -122,6 +122,13 @@ def server_maintenance(request):
records_count += pendingactions.count()
pendingactions.delete()
if "alerts" in tables:
from alerts.models import Alert
alerts = Alert.objects.all()
records_count += alerts.count()
alerts.delete()
return Response(f"{records_count} records were pruned from the database")
return notify_error("The data is incorrect")

View File

@@ -1,6 +1,5 @@
import asyncio
import time
from typing import List
from django.conf import settings
from django.shortcuts import get_object_or_404
@@ -17,11 +16,7 @@ from rest_framework.views import APIView
from agents.models import Agent
from agents.serializers import WinAgentSerializer
from agents.tasks import (
agent_recovery_email_task,
agent_recovery_sms_task,
handle_agent_recovery_task,
)
from agents.tasks import handle_agent_recovery_task
from checks.utils import bytes2human
from software.models import InstalledSoftware
from tacticalrmm.utils import SoftwareList, filter_software, notify_error
@@ -39,8 +34,8 @@ def nats_info(request):
class NatsCheckIn(APIView):
authentication_classes = []
permission_classes = []
authentication_classes = [] # type: ignore
permission_classes = [] # type: ignore
def patch(self, request):
updated = False
@@ -58,18 +53,18 @@ class NatsCheckIn(APIView):
# change agent update pending status to completed if agent has just updated
if (
updated
and agent.pendingactions.filter(
and agent.pendingactions.filter( # type: ignore
action_type="agentupdate", status="pending"
).exists()
):
agent.pendingactions.filter(
agent.pendingactions.filter( # type: ignore
action_type="agentupdate", status="pending"
).update(status="completed")
# handles any alerting actions
agent.handle_alert(checkin=True)
recovery = agent.recoveryactions.filter(last_run=None).last()
recovery = agent.recoveryactions.filter(last_run=None).last() # type: ignore
if recovery is not None:
recovery.last_run = djangotime.now()
recovery.save(update_fields=["last_run"])
@@ -77,7 +72,7 @@ class NatsCheckIn(APIView):
return Response("ok")
# get any pending actions
if agent.pendingactions.filter(status="pending").exists():
if agent.pendingactions.filter(status="pending").exists(): # type: ignore
agent.handle_pending_actions()
return Response("ok")
@@ -119,7 +114,7 @@ class NatsCheckIn(APIView):
if not InstalledSoftware.objects.filter(agent=agent).exists():
InstalledSoftware(agent=agent, software=sw).save()
else:
s = agent.installedsoftware_set.first()
s = agent.installedsoftware_set.first() # type: ignore
s.software = sw
s.save(update_fields=["software"])
@@ -141,8 +136,8 @@ class NatsCheckIn(APIView):
class SyncMeshNodeID(APIView):
authentication_classes = []
permission_classes = []
authentication_classes = [] # type: ignore
permission_classes = [] # type: ignore
def post(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
@@ -154,8 +149,8 @@ class SyncMeshNodeID(APIView):
class NatsChoco(APIView):
authentication_classes = []
permission_classes = []
authentication_classes = [] # type: ignore
permission_classes = [] # type: ignore
def post(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
@@ -165,8 +160,8 @@ class NatsChoco(APIView):
class NatsWinUpdates(APIView):
authentication_classes = []
permission_classes = []
authentication_classes = [] # type: ignore
permission_classes = [] # type: ignore
def put(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
@@ -192,7 +187,7 @@ class NatsWinUpdates(APIView):
def patch(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
u = agent.winupdates.filter(guid=request.data["guid"]).last()
u = agent.winupdates.filter(guid=request.data["guid"]).last() # type: ignore
success: bool = request.data["success"]
if success:
u.result = "success"
@@ -218,8 +213,8 @@ class NatsWinUpdates(APIView):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
updates = request.data["wua_updates"]
for update in updates:
if agent.winupdates.filter(guid=update["guid"]).exists():
u = agent.winupdates.filter(guid=update["guid"]).last()
if agent.winupdates.filter(guid=update["guid"]).exists(): # type: ignore
u = agent.winupdates.filter(guid=update["guid"]).last() # type: ignore
u.downloaded = update["downloaded"]
u.installed = update["installed"]
u.save(update_fields=["downloaded", "installed"])
@@ -250,7 +245,7 @@ class NatsWinUpdates(APIView):
# more superseded updates cleanup
if pyver.parse(agent.version) <= pyver.parse("1.4.2"):
for u in agent.winupdates.filter(
for u in agent.winupdates.filter( # type: ignore
date_installed__isnull=True, result="failed"
).exclude(installed=True):
u.delete()
@@ -259,12 +254,12 @@ class NatsWinUpdates(APIView):
class SupersededWinUpdate(APIView):
authentication_classes = []
permission_classes = []
authentication_classes = [] # type: ignore
permission_classes = [] # type: ignore
def post(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agent_id"])
updates = agent.winupdates.filter(guid=request.data["guid"])
updates = agent.winupdates.filter(guid=request.data["guid"]) # type: ignore
for u in updates:
u.delete()
@@ -273,14 +268,14 @@ class SupersededWinUpdate(APIView):
class NatsWMI(APIView):
authentication_classes = []
permission_classes = []
authentication_classes = [] # type: ignore
permission_classes = [] # type: ignore
def get(self, request):
agents = Agent.objects.only(
"pk", "agent_id", "version", "last_seen", "overdue_time", "offline_time"
)
online: List[str] = [
online: list[str] = [
i.agent_id
for i in agents
if pyver.parse(i.version) >= pyver.parse("1.2.0") and i.status == "online"
@@ -289,22 +284,22 @@ class NatsWMI(APIView):
class OfflineAgents(APIView):
authentication_classes = []
permission_classes = []
authentication_classes = [] # type: ignore
permission_classes = [] # type: ignore
def get(self, request):
agents = Agent.objects.only(
"pk", "agent_id", "version", "last_seen", "overdue_time", "offline_time"
)
offline: List[str] = [
offline: list[str] = [
i.agent_id for i in agents if i.has_nats and i.status != "online"
]
return Response({"agent_ids": offline})
class LogCrash(APIView):
authentication_classes = []
permission_classes = []
authentication_classes = [] # type: ignore
permission_classes = [] # type: ignore
def post(self, request):
agent = get_object_or_404(Agent, agent_id=request.data["agentid"])

View File

@@ -3,4 +3,7 @@ Werkzeug
django-extensions
mkdocs
mkdocs-material
pymdown-extensions
pymdown-extensions
Pygments
isort
mypy

View File

@@ -6,9 +6,9 @@ celery==5.0.5
certifi==2020.12.5
cffi==1.14.5
chardet==4.0.0
cryptography==3.4.4
cryptography==3.4.6
decorator==4.4.2
Django==3.1.6
Django==3.1.7
django-cors-headers==3.7.0
django-rest-knox==4.1.0
djangorestframework==3.12.2

View File

@@ -0,0 +1,24 @@
from django.core.management.base import BaseCommand
from agents.models import Agent
class Command(BaseCommand):
help = "Find all agents that have a certain software installed"
def add_arguments(self, parser):
parser.add_argument("name", type=str)
def handle(self, *args, **kwargs):
search = kwargs["name"].lower()
agents = Agent.objects.all()
for agent in agents:
sw = agent.installedsoftware_set.first().software
for i in sw:
if search in i["name"].lower():
self.stdout.write(
self.style.SUCCESS(
f"Found {i['name']} installed on {agent.hostname}"
)
)
break

View File

@@ -55,7 +55,7 @@ def refresh_installed(request, pk):
if not InstalledSoftware.objects.filter(agent=agent).exists():
InstalledSoftware(agent=agent, software=sw).save()
else:
s = agent.installedsoftware_set.first()
s = agent.installedsoftware_set.first() # type: ignore
s.software = sw
s.save(update_fields=["software"])

View File

@@ -14,11 +14,11 @@ app = Celery(
broker="redis://" + settings.REDIS_HOST,
)
# app.config_from_object('django.conf:settings', namespace='CELERY')
app.broker_url = "redis://" + settings.REDIS_HOST + ":6379"
app.result_backend = "redis://" + settings.REDIS_HOST + ":6379"
app.accept_content = ["application/json"]
app.result_serializer = "json"
app.task_serializer = "json"
app.broker_url = "redis://" + settings.REDIS_HOST + ":6379" # type: ignore
app.result_backend = "redis://" + settings.REDIS_HOST + ":6379" # type: ignore
app.accept_content = ["application/json"] # type: ignore
app.result_serializer = "json" # type: ignore
app.task_serializer = "json" # type: ignore
app.conf.task_track_started = True
app.autodiscover_tasks()

View File

@@ -15,19 +15,19 @@ EXE_DIR = os.path.join(BASE_DIR, "tacticalrmm/private/exe")
AUTH_USER_MODEL = "accounts.User"
# latest release
TRMM_VERSION = "0.4.13"
TRMM_VERSION = "0.4.18"
# bump this version everytime vue code is changed
# to alert user they need to manually refresh their browser
APP_VER = "0.0.114"
APP_VER = "0.0.116"
# https://github.com/wh1te909/rmmagent
LATEST_AGENT_VER = "1.4.6"
LATEST_AGENT_VER = "1.4.7"
MESH_VER = "0.7.72"
MESH_VER = "0.7.73"
# for the update script, bump when need to recreate venv or npm install
PIP_VER = "9"
PIP_VER = "10"
NPM_VER = "8"
DL_64 = f"https://github.com/wh1te909/rmmagent/releases/download/v{LATEST_AGENT_VER}/winagent-v{LATEST_AGENT_VER}.exe"

View File

@@ -3,10 +3,11 @@ import os
import string
import subprocess
import time
from typing import Dict, List
from typing import Union
import pytz
from django.conf import settings
from django.http import HttpResponse
from loguru import logger
from rest_framework import status
from rest_framework.response import Response
@@ -17,7 +18,7 @@ logger.configure(**settings.LOG_CONFIG)
notify_error = lambda msg: Response(msg, status=status.HTTP_400_BAD_REQUEST)
SoftwareList = List[Dict[str, str]]
SoftwareList = list[dict[str, str]]
WEEK_DAYS = {
"Sunday": 0x1,
@@ -30,13 +31,137 @@ WEEK_DAYS = {
}
def generate_installer_exe(
file_name: str,
goarch: str,
inno: str,
api: str,
client_id: int,
site_id: int,
atype: str,
rdp: int,
ping: int,
power: int,
download_url: str,
token: str,
) -> Union[Response, HttpResponse]:
go_bin = "/usr/local/rmmgo/go/bin/go"
if not os.path.exists(go_bin):
return Response("nogolang", status=status.HTTP_409_CONFLICT)
exe = os.path.join(settings.EXE_DIR, file_name)
if os.path.exists(exe):
try:
os.remove(exe)
except Exception as e:
logger.error(str(e))
cmd = [
"env",
"CGO_ENABLED=0",
"GOOS=windows",
f"GOARCH={goarch}",
go_bin,
"build",
f"-ldflags=\"-s -w -X 'main.Inno={inno}'",
f"-X 'main.Api={api}'",
f"-X 'main.Client={client_id}'",
f"-X 'main.Site={site_id}'",
f"-X 'main.Atype={atype}'",
f"-X 'main.Rdp={rdp}'",
f"-X 'main.Ping={ping}'",
f"-X 'main.Power={power}'",
f"-X 'main.DownloadUrl={download_url}'",
f"-X 'main.Token={token}'\"",
"-o",
exe,
]
build_error = False
gen_error = False
gen = [
"env",
"GOOS=windows",
"CGO_ENABLED=0",
f"GOARCH={goarch}",
go_bin,
"generate",
]
try:
r1 = subprocess.run(
" ".join(gen),
capture_output=True,
shell=True,
cwd=os.path.join(settings.BASE_DIR, "core/goinstaller"),
)
except Exception as e:
gen_error = True
logger.error(str(e))
return Response("genfailed", status=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE)
if r1.returncode != 0:
gen_error = True
if r1.stdout:
logger.error(r1.stdout.decode("utf-8", errors="ignore"))
if r1.stderr:
logger.error(r1.stderr.decode("utf-8", errors="ignore"))
logger.error(f"Go build failed with return code {r1.returncode}")
if gen_error:
return Response("genfailed", status=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE)
try:
r = subprocess.run(
" ".join(cmd),
capture_output=True,
shell=True,
cwd=os.path.join(settings.BASE_DIR, "core/goinstaller"),
)
except Exception as e:
build_error = True
logger.error(str(e))
return Response("buildfailed", status=status.HTTP_412_PRECONDITION_FAILED)
if r.returncode != 0:
build_error = True
if r.stdout:
logger.error(r.stdout.decode("utf-8", errors="ignore"))
if r.stderr:
logger.error(r.stderr.decode("utf-8", errors="ignore"))
logger.error(f"Go build failed with return code {r.returncode}")
if build_error:
return Response("buildfailed", status=status.HTTP_412_PRECONDITION_FAILED)
if settings.DEBUG:
with open(exe, "rb") as f:
response = HttpResponse(
f.read(),
content_type="application/vnd.microsoft.portable-executable",
)
response["Content-Disposition"] = f"inline; filename={file_name}"
return response
else:
response = HttpResponse()
response["Content-Disposition"] = f"attachment; filename={file_name}"
response["X-Accel-Redirect"] = f"/private/exe/{file_name}"
return response
def get_default_timezone():
from core.models import CoreSettings
return pytz.timezone(CoreSettings.objects.first().default_time_zone)
def get_bit_days(days: List[str]) -> int:
def get_bit_days(days: list[str]) -> int:
bit_days = 0
for day in days:
bit_days |= WEEK_DAYS.get(day)

View File

@@ -1,7 +1,6 @@
import asyncio
import datetime as dt
import time
from typing import List
import pytz
from django.conf import settings
@@ -126,7 +125,7 @@ def check_agent_update_schedule_task():
@app.task
def bulk_install_updates_task(pks: List[int]) -> None:
def bulk_install_updates_task(pks: list[int]) -> None:
q = Agent.objects.filter(pk__in=pks)
agents = [i for i in q if pyver.parse(i.version) >= pyver.parse("1.3.0")]
chunks = (agents[i : i + 40] for i in range(0, len(agents), 40))
@@ -147,7 +146,7 @@ def bulk_install_updates_task(pks: List[int]) -> None:
@app.task
def bulk_check_for_updates_task(pks: List[int]) -> None:
def bulk_check_for_updates_task(pks: list[int]) -> None:
q = Agent.objects.filter(pk__in=pks)
agents = [i for i in q if pyver.parse(i.version) >= pyver.parse("1.3.0")]
chunks = (agents[i : i + 40] for i in range(0, len(agents), 40))

View File

@@ -23,11 +23,11 @@ jobs:
rm -rf /myagent/_work/1/s/api/env
cd /myagent/_work/1/s/api
python3.8 -m venv env
python3.9 -m venv env
source env/bin/activate
cd /myagent/_work/1/s/api/tacticalrmm
pip install --no-cache-dir --upgrade pip
pip install --no-cache-dir setuptools==52.0.0 wheel==0.36.2
pip install --no-cache-dir setuptools==53.0.0 wheel==0.36.2
pip install --no-cache-dir -r requirements.txt -r requirements-test.txt -r requirements-dev.txt
displayName: "Install Python Dependencies"

View File

@@ -1,13 +1,6 @@
#!/bin/bash
#####################################################
POSTGRES_USER="changeme"
POSTGRES_PW="hunter2"
#####################################################
SCRIPT_VERSION="9"
SCRIPT_VERSION="10"
SCRIPT_URL='https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/backup.sh'
GREEN='\033[0;32m'
@@ -31,11 +24,9 @@ if [ $EUID -eq 0 ]; then
exit 1
fi
if [[ "$POSTGRES_USER" == "changeme" || "$POSTGRES_PW" == "hunter2" ]]; then
printf >&2 "${RED}You must change the postgres username/password at the top of this file.${NC}\n"
printf >&2 "${RED}Check the github readme for where to find them.${NC}\n"
exit 1
fi
POSTGRES_USER=$(grep -w USER /rmm/api/tacticalrmm/tacticalrmm/local_settings.py | sed 's/^.*: //' | sed 's/.//' | sed -r 's/.{2}$//')
POSTGRES_PW=$(grep -w PASSWORD /rmm/api/tacticalrmm/tacticalrmm/local_settings.py | sed 's/^.*: //' | sed 's/.//' | sed -r 's/.{2}$//')
if [ ! -d /rmmbackups ]; then
sudo mkdir /rmmbackups

View File

@@ -1,5 +1,5 @@
# creates python virtual env
FROM python:3.8-slim AS CREATE_VENV_STAGE
FROM python:3.9.2-slim AS CREATE_VENV_STAGE
ARG DEBIAN_FRONTEND=noninteractive
@@ -24,7 +24,7 @@ RUN apt-get update && \
# runtime image
FROM python:3.8-slim
FROM python:3.9.2-slim
# set env variables
ENV VIRTUAL_ENV /opt/venv

View File

@@ -1,14 +0,0 @@
#!/usr/bin/env sh
set -e
npm run build
cd .vuepress/dist
git init
git add -A
git commit -m 'deploy'
git push -f git@github.com:wh1te909/tacticalrmm.git develop:gh-pages
cd -

26
docs/docs/backup.md Normal file
View File

@@ -0,0 +1,26 @@
# Backing up the RMM
A backup script is provided for quick and easy way to backup all settings into one file to move to another server.
Download the backup script:
```bash
wget https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/backup.sh
```
From the Web UI, click **Tools > Server Maintenance**
Choose **Prune DB Tables** from the dropdown and check the `Audit Log` and `Pending Actions` checkboxes, and then click **Submit**
Doing a prune first before running the backup will significantly speed up the postgres vacuum command that is run during backup.
Run the backup script
```bash
chmod +x backup.sh
./backup.sh
```
The backup tar file will be saved in `/rmmbackups` with the following format:
`rmm-backup-CURRENTDATETIME.tar`

58
docs/docs/faq.md Normal file
View File

@@ -0,0 +1,58 @@
# FAQ
#### How do I do X feature in the web UI?
Alot of features in the web UI are hidden behind right-click menus; almost everything has a right click menu so if you don't see something, try right clicking on it.
#### Where are the Linux / Mac agents?
Linux / Mac agents are currently under development.
#### Can I run Tactical RMM locally behind NAT without exposing anything to the internet?
Yes, you will just need to setup local DNS for the 3 subdomains, either by editing host files on all your agents or through a local DNS server.
#### I am locked out of the web UI. How do I reset my password?
SSH into your server and run these commands:
!!!note
The code below will reset the password for the account that was created during install.
To reset a password for a different user, you should use the web UI (see the next question below), but can also do so through the command line by replacing<br/>
`#!python user = User.objects.first()`<br/>
with<br/>
`#!python user = User.objects.get(username='someuser')`
<br/>
in the code snippet below.
```python
tactical@tacrmm:~$ /rmm/api/env/bin/python /rmm/api/tacticalrmm/manage.py shell
Python 3.9.2 (default, Feb 21 2021, 00:50:28)
[GCC 9.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
(InteractiveConsole)
>>> from accounts.models import User
>>> user = User.objects.first()
>>> user.set_password("superSekret123")
>>> user.save()
>>> exit()
```
<br/>
#### How do I reset password or 2 factor token?
From the web UI, click **Settings > User Administration** and then right-click on a user:<br/><br/>
![reset2fa](images/reset2fa.png)
<br/><br/>
Or from the command line:<br/>
```python
tactical@tacrmm:~$ /rmm/api/env/bin/python /rmm/api/tacticalrmm/manage.py shell
Python 3.9.2 (default, Feb 21 2021, 00:50:28)
[GCC 9.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
(InteractiveConsole)
>>> from accounts.models import User
>>> user = User.objects.get(username='someuser')
>>> user.totp_key = None
>>> user.save(update_fields=['totp_key'])
>>> exit()
```
<br/>
Then simply log out of the web UI and next time the user logs in they will be redirected to the 2FA setup page which will present a barcode to be scanned with the Authenticator app.

View File

@@ -0,0 +1,12 @@
# Maintenance Mode
Enabling maintenance mode for an agent will prevent any overdue/check/task email/sms alerts from being sent.
It will also prevent clients/sites/agents from showing up as red in the dashboard if they have any failing checks or are overdue.
To enable maintenance mode for all agents in a client/site, **Right Click** on a client / site and choose **Enable Maintenance Mode**
![maint_mode](../images/maint_mode.png)
To enable maintenance mode for a single agent, **Right Click** on the agent and choose **Enable Maintenance Mode**

View File

@@ -0,0 +1,21 @@
# Remote Background
To access: **Right click** on an agent > **Remote Background**
#### Services Tab
Right click on a service to show the context menu where you can start/stop/restart services:
![services_contextmenu](../images/services_contextmenu.png)
Click *Service Details* to bring up the details tab where you can edit more service options:
![service_detail](../images/service_detail.png)
#### Processes Tab
A very basic task manager that shows real time process usage.
**Right click** on a process to end the task:
![taskmanager](../images/taskmanager.png)

View File

@@ -0,0 +1,8 @@
# User Interface Preferences
Click on your username at the top right of the dashboard > Preferences:
![user_prefs](../images/user_prefs.png)
![user_prefs2](../images/user_prefs2.png)

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

BIN
docs/docs/images/dnstxt.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

View File

@@ -6,10 +6,10 @@
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/python/black)
Tactical RMM is a remote monitoring & management tool for Windows computers, built with Django, Vue and Golang.
It uses an [agent](https://github.com/wh1te909/rmmagent) written in golang and integrates with [MeshCentral](https://github.com/Ylianst/MeshCentral)
It uses an [agent](https://github.com/wh1te909/rmmagent) written in Golang and integrates with [MeshCentral](https://github.com/Ylianst/MeshCentral)
## [LIVE DEMO](https://rmm.xlawgaming.com/)
Demo database resets every hour. Alot of features are disabled for obvious reasons due to the nature of this app.
Demo database resets every hour. A lot of features are disabled for obvious reasons due to the nature of this app.
*Tactical RMM is currently in alpha and subject to breaking changes. Use in production at your own risk.*

View File

@@ -0,0 +1,66 @@
# Installing an agent
!!!warning
You must add antivirus exlusions for the tactical agent.<br/>
Any decent AV will flag the agent as a virus, since it technically is one due to the nature of this software.<br/>
Adding the following exlucions will make sure everything works, including agent update:<br/>
`C:\Program Files\TacticalAgent\*`<br/>
`C:\Program Files\Mesh Agent\*`<br/>
`C:\Windows\Temp\winagent-v*.exe`<br/>
`C:\Windows\Temp\trmm\*`<br/>
#### Dynamically generated executable
The generated exe is simply a wrapper around the Manual install method, using a single exe/command without the need to pass any command line flags to the installer.<br/><br/>
All it does is download the generic installer from the agent's github [release page](https://github.com/wh1te909/rmmagent/releases) and call it using predefined command line args that you choose from the web UI.<br/><br/>
It "bakes" the command line args into the executable.<br/><br/>
From the UI, click **Agents > Install Agent**<br/>
You can also **right click on a site > Install Agent**. This will automatically fill in the client/site dropdown for you.<br/><br/>
![siteagentinstall](images/siteagentinstall.png)
#### Powershell
The powershell method is very similar to the generated exe in that it simply downloads the installer from github and calls the exe for you.
#### Manual
The manual installation method requires you to first download the generic installer and call it using command line args.<br/><br/>
This is useful for scripting the installation using Group Policy or some other batch deployment method.<br/>
!!!tip
You can reuse the installer for any of the deployment methods, you don't need to constantly create a new installer for each new agent.<br/>
The installer will be valid for however long you specify the token expiry time when generating an agent.
<br/>
#### Using a deployment link
Creating a deployment link is the recommended way to deploy agents.<br/><br/>
The main benefit of this method is that the exectuable is generated only whenever the deployment download link is accessed, whereas with the other methods it's generated right away and the agent's version hardcoded into the exe.<br/><br/>
Using a deployment link will allow you to not worry about installing using an older version of an agent, which will fail to install if you have updated your RMM to a version that is not compatible with an older installer you might have lying around.<br/><br/>
To create a deployment, from the web UI click **Agents > Manage Deployments**.<br/><br/>
![managedeployments](images/managedeployments.png)
!!!tip
Create a client/site named "Default" and create a deployment for it with a very long expiry to have a generic installer that can be deployed anytime at any client/site.<br/><br/>
You can then move the agent into the correct client/site from the web UI after it's been installed.
Copy/paste the download link from the deployment into your browser. It will take a few seconds to dynamically generate the executable and then your browser will automatically download the exe.
#### Optional installer args
The following optional arguments can be passed to any of the installation method executables:
```
-log debug
```
Will print very verbose logging during agent install. Usefull for troubleshooting agent install.
```
-silent
```
This will not popup any message boxes during install, either any error messages or the "Installation was successfull" message box that pops up at the end of a successfull install.

View File

@@ -0,0 +1,72 @@
# Docker Setup
- Install docker and docker-compose
- Obtain valid wildcard certificate for your domain. If certificates are not provided, a self-signed certificate will be generated and most agent functions won't work. See below on how to generate a free Let's Encrypt!
## Generate certificates with certbot
Install Certbot
```
sudo apt-get install certbot
```
Generate the wildcard certificate. Add the DNS entry for domain validation. Replace `example.com` with your root doamin
```
sudo certbot certonly --manual -d *.example.com --agree-tos --no-bootstrap --manual-public-ip-logging-ok --preferred-challenges dns
```
## Configure DNS and firewall
You will need to add DNS entries so that the three subdomains resolve to the IP of the docker host. There is a reverse proxy running that will route the hostnames to the correct container. On the host, you will need to ensure the firewall is open on tcp ports 80, 443 and 4222.
## Setting up the environment
Get the docker-compose and .env.example file on the host you which to install on
```
wget https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/docker/docker-compose.yml
wget https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/docker/.env.example
mv .env.example .env
```
Change the values in .env to match your environment.
If you are supplying certificates through Let's Encrypt or another source, see the section below about base64 encoding the certificate files.
## Base64 encoding certificates to pass as env variables
Use the below command to add the the correct values to the .env.
Running this command multiple times will add redundant entries, so those will need to be removed.
Let's encrypt certs paths are below. Replace ${rootdomain} with your own.
public key
`/etc/letsencrypt/live/${rootdomain}/fullchain.pem`
private key
`/etc/letsencrypt/live/${rootdomain}/privkey.pem`
```
echo "CERT_PUB_KEY=$(sudo base64 -w 0 /path/to/pub/key)" >> .env
echo "CERT_PRIV_KEY=$(sudo base64 -w 0 /path/to/priv/key)" >> .env
```
## Starting the environment
Run the below command to start the environment.
```
sudo docker-compose up -d
```
Removing the -d will start the containers in the foreground and is useful for debugging.
## Get MeshCentral EXE download link
Run the below command to get the download link for the mesh central exe. This needs to be uploaded on first successful signin.
```
sudo docker-compose exec tactical-backend python manage.py get_mesh_exe_url
```

121
docs/docs/install_server.md Normal file
View File

@@ -0,0 +1,121 @@
# Installation
## Minimum requirements
- A fresh linux VM running either Ubuntu 20.04 or Debian 10, with a minimum of 2GB RAM.<br/>
!!!warning
The provided install script assumes a fresh server with no software installed on it. Attempting to run it on an existing server with other services **will** break things and the install will fail.<br/><br/>
The install script has been tested on the following public cloud providers: DigitalOcean, Linode, Vultr, BuyVM (highly recommended), Hetzner, AWS, Google Cloud and Azure, as well as behind NAT on Hyper-V, Proxmox and ESXi.
- A real domain is needed to generate a Let's Encrypt wildcard cert. <br/>If you cannot afford to purchase a domain ($12 a year) then you can get one for free at [freenom.com](https://www.freenom.com/)<br/><br/>
- A TOTP based authenticator app. Some popular ones are Google Authenticator, Authy and Microsoft Authenticator.<br/><br/>
## Install
#### Run updates and setup the linux user
SSH into the server as **root**.<br/><br/>
Download and run the prereqs and latest updates<br/>
```bash
apt update
apt install -y wget curl sudo
apt -y upgrade
```
If a new kernel is installed, then reboot the server with the `reboot` command<br/><br/>
Create a linux user to run the rmm and add it to the sudoers group.<br/>For this example we'll be using a user named `tactical` but feel free to create whatever name you want.
```bash
adduser tactical
usermod -a -G sudo tactical
```
!!!tip
[Enable passwordless sudo to make your life easier](https://linuxconfig.org/configure-sudo-without-password-on-ubuntu-20-04-focal-fossa-linux)
#### Setup the firewall (optional but highly recommended)
!!!info
Skip this step if your VM is __not__ publicly exposed to the world e.g. running behind NAT. You should setup the firewall rules in your router instead (ports 22, 443 and 4222 TCP).
```bash
ufw default deny incoming
ufw default allow outgoing
ufw allow https
ufw allow proto tcp from any to any port 4222
```
!!!info
SSH is only required for you to remotely login and do basic linux server administration for your rmm. It is not needed for any agent communication.<br/>
Allow ssh from everywhere (__not__ recommended)
```bash
ufw allow ssh
```
Allow ssh from only allowed IP's (__highly__ recommended)
```bash
ufw allow from X.X.X.X to any port 22
ufw allow from X.X.X.X to any port 22
```
Enable and activate the firewall
```
ufw enable && ufw reload
```
#### Create the A records
We'll be using `example.com` as our domain for this example.
!!!info
The RMM uses 3 different sites. The Vue frontend e.g. `rmm.example.com` which is where you'll be accesing your RMM from the browser, the REST backend e.g. `api.example.com` and Meshcentral e.g. `mesh.example.com`
Get the public IP of your server with `curl icanhazip.com`<br/>
Open the DNS manager of wherever the domain you purchased is hosted.<br/>
Create 3 A records: `rmm`, `api` and `mesh` and point them to the public IP of your server:
![arecords](images/arecords.png)
#### Run the install script
Switch to the `tactical` user
```bash
su - tactical
```
Download and run the install script
```bash
wget https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/install.sh
chmod +x install.sh
./install.sh
```
Answer the initial questions when prompted. Replace `example.com` with your domain.
![questions](images/install_questions.png)
#### Deploy the TXT record in your DNS manager:
!!!warning
TXT records can take anywhere from 1 minute to a few hours to propogate depending on your DNS provider.<br/>
You should verify the TXT record has been deployed first before pressing Enter.<br/>
A quick way to check is with the following command:<br/> `dig -t txt _acme-challenge.example.com`
![txtrecord](images/txtrecord.png)
![dnstxt](images/dnstxt.png)
Create a login for the RMM web UI:
![rmmlogin](images/rmmlogin.png)
A bunch of URLS / usernames / passwords will be printed out at the end of the install script. Save these somewhere safe.
Copy the url for the meshagent exe (`https://mesh.example.com/agentinvite?c=......`), paste it in your browser and download the mesh agent:
![meshagentdl](images/meshagentdl.png)
Navigate to `https://rmm.example.com` and login with the username/password you created during install.<br/><br/>
Once logged in, you will be redirected to the initial setup page.<br/><br/>
Create your first client/site, choose the default timezone and then upload the mesh agent you just downloaded.

21
docs/docs/license.md Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2019-present wh1te909
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,28 @@
# MeshCentral Integration
#### Overview
Tactical RMM integrates with [MeshCentral](https://github.com/Ylianst/MeshCentral) for the following 3 functions:
- Take Control
- Real time shell
- Real time file browser
At some point in the future, these functions will be directly built into the Tactical Agent, removing the need for MeshCentral.
It should be noted that Tactical RMM and MeshCentral are 2 completely separate products and can run independently of each other.
They do not even have to run on the same box, however when you install Tactical RMM it simply installs meshcentral for you with some preconfigured settings to allow integration.
It is highly recommended to use the MeshCentral instance that Tactical installs, since it allows the developers more control over it and to ensure things don't break.
#### How does it work
MeshCentral has an embedding feature that allows integration into existing products.
See *Section 14 - Embedding MeshCentral* in the [MeshCentral User Guide](https://info.meshcentral.com/downloads/MeshCentral2/MeshCentral2UserGuide.pdf) for a detailed explanation of how this works.
The Tactical RMM Agent keeps track of your Mesh Agents, and periodically interacts with them to synchronize the mesh agent's unique ID with the tactical rmm database.
When you do a take control / terminal / file browser on an agent using the Tactical UI, behind the scenes, Tactical generates a login token for meshcentral's website and then "wraps" MeshCentral's UI in an iframe for that specific agent only, using it's unique ID to know what agent to render in the iframe.

33
docs/docs/restore.md Normal file
View File

@@ -0,0 +1,33 @@
# Restore
!!!info
It is currently not possible to restore to a different domain/subdomain, only to a different physical or virtual server.
#### Prepare the new server
Create the same exact linux user account as you did when you installed the original server.
Add it to the sudoers group and setup the firewall.
Refer to the [installation instructions](install_server.md) for steps on how to do all of the above.
#### Change DNS A records
Open the DNS manager of whever your domain is hosted.
Change the 3 A records `rmm`, `api` and `mesh` and point them to the public IP of your new server.
#### Run the restore script
Copy the backup tar file you created during [backup](backup.md) to the new server.
Download the restore script.
```bash
wget https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/restore.sh
```
Call the restore script, passing it the backup file as the first argument:
```bash
chmod +x restore.sh
./restore.sh rmm-backup-XXXXXXXXX.tar
```

View File

@@ -0,0 +1,80 @@
# Troubleshooting
#### "Bad credentials" error when trying to login to the Web UI
If you are sure you are using the correct credentials and still getting a "bad credentials" error, open your browser's dev tools (ctrl + shift + j on chrome) and check the Console tab to see the real error.
It will most probably be a CORS error which means you need to check your DNS settings and make sure whatever computer you're trying to access the UI from resolves your 3 subdomains to the correct IP of the server running the RMM (public IP if running in the cloud, or private IP if running behind NAT).
If you see an error about SSL or certificate expired, then your Let's Encrypt cert has probably expired and you'll need to renew it.
Refer to the Let's Encrypt cert renewal instructions [here](update_server.md#keeping-your-lets-encrypt-certificate-up-to-date)
<br/>
#### Agents not updating
The most common problem we've seen of agents not updating is due to Antivirus blocking the updater executable.
Windows Defender will 100% of the time block the updater from running unless an exclusion is set.
Refer to the [Agent Installation](install_agent.md) instructions for AV exceptions to set and manually doing an agent update with logging to troubleshoot further.
Agents will also not automatically update if they are too old.
Since Tactical RMM is still in alpha and the developers makes breaking changes pretty frequently, there is no promise of backwards compatibility.
If you have agents that are relatively old, you will need to uninstall them manually and reinstall using the latest version.
<br/>
#### Agents not checking in or showing up / General agent issues
Open CMD as admin on the problem computer and stop the agent services:
```cmd
net stop tacticalagent
net stop tacticalrpc
```
Run the tacticalagent service manually with debug logging:
```cmd
"C:\Program Files\TacticalAgent\tacticalrmm.exe" -m winagentsvc -log debug -logto stdout
```
Run the tacticalrpc service manually with debug logging:
```cmd
"C:\Program Files\TacticalAgent\tacticalrmm.exe" -m rpc -log debug -logto stdout
```
This will print out a ton of info. You should be able to see the error from the debug log output.
Please then copy/paste the logs and post them either in our [Discord support chat](https://discord.gg/upGTkWp), or create a [github issue](https://github.com/wh1te909/tacticalrmm/issues).
#### Web UI frozen or not loading / website errors / general errors
Check all the systemd services that the rmm uses to function and check to make sure they're all active/running and enabled:
```bash
sudo systemctl status rmm
sudo systemctl status celery
sudo systemctl status celerybeat
sudo systemctl status nginx
sudo systemctl status nats
sudo systemctl status natsapi
sudo systemctl status meshcentral
sudo systemctl status mongod
sudo systemctl status postgresql
sudo systemctl status redis
```
Read through the log files in the following folders and check for errors:
```bash
/rmm/api/tacticalrmm/tacticalrmm/private/log
/var/log/celery
```

View File

@@ -0,0 +1,39 @@
# Updating Agents
!!!warning
The agent's version is directly tied to the RMM's version.<br/><br/>
For example, currently RMM version 0.4.17 is compatible with agent version 1.4.6 and lower.<br/><br/>
You should never attempt to manually update an agent to a newer version without first making sure your RMM is on the latest version.
#### Updating from the Web UI
Agents will automatically self update themselves if you have auto self update enabled in **Settings > Global Settings**<br/><br/>
![autoagentupdate](images/autoagentupdate.png)
There is a background job that runs every hour, at 35 minutes past the hour and sends any online agents an update command if it detects they are on an older version.<br/><br/>
You can also trigger this background job to run on demand by clicking **Agents > Update Agents** in the web UI:<br/><br/>
![manualagentupdate](images/manualagentupdate.png)
You can individually choose which agents to update, or simply Select All.<br/><br/>
The RMM will automatically skip any agents that don't need updating.<br/><br/>
You can trigger this manual agent update anytime you want. It is safe to spam, and won't run if an agent update task is already running.<br/><br/>
It will also make sure agents update to the correct version, in case they are an older version that cannot be directly upgraded to the latest version.<br/><br/>
For example, agents older than version 1.3.0 must first be updated to 1.3.0 before they can go any further.<br/>
<br/>
#### Manually updating from the command line on the agent
You should never need to do this but might be needed to troubleshoot agents that are not updating automatically.<br/>
Download the `winagent-vX.X.X.exe` executable from the [github releases page](https://github.com/wh1te909/rmmagent/releases) and place it somewhere on the filesystem.<br/>
Open CMD as admin and call the exe like so:
```
C:\Windows\Temp>winagent-vX.X.X.exe /VERYSILENT /LOG=agentupdate.txt
```
This command will return immediately since it spawns a background process to run the update.<br/>
The agent will take around 30 seconds to fully update.<br/><br/>
You can check the `agentupdate.txt` log file that is created for troubleshooting.<br/><br/>

View File

@@ -0,0 +1,65 @@
# Updating the RMM
#### Keeping your linux server up to date
You should periodically run `sudo apt update` and `sudo apt -y upgrade` to keep your server up to date.
You can also update `npm` if prompted to by a message that might appear when running the `update.sh` script.
Other than this, you should avoid making any changes to your server and let the `update.sh` script handle everything else for you.
#### Updating to the latest RMM version
!!!danger
Do __not__ attempt to manually edit the update script or any configuration files unless specifically told to by one of the developers.<br/><br/>
Since this software is completely self hosted and we have no access to your server, we have to assume you have not made any config changes to any of the files or services on your server, and the update script will assume this.<br/><br/>
You should also **never** attempt to automate running the update script via cron.<br/><br/>
The update script will update itself if needed to the latest version when you run it, and them prompt you to run it again.<br/><br/>
Sometimes, manual intervention will be required during an update in the form of yes/no prompts, so attempting to automate this will ignore these prompts and cause your installation to break.
SSH into your server as the linux user you created during install.<br/><br/>
__Never__ run any update scripts or commands as the `root` user.<br/>This will mess up permissions and break your installation.<br/><br/>
Download the update script and run it:<br/>
```bash
tactical@tacrmm:~$ wget https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/update.sh
tactical@tacrmm:~$ chmod +x update.sh
tactical@tacrmm:~$ ./update.sh
```
<br/>
If you are already on the latest version, the update script will notify you of this and return immediately.<br/><br/>
You can pass the optional `--force` flag to the update script to forcefully run through an update, which will bypass the check for latest version.<br/>
```bash
tactical@tacrmm:~$ ./update.sh --force
```
This is usefull for a botched update that might have not completed fully.<br/><br/>
The update script will also fix any permissions that might have gotten messed up during a botched update, or if you accidentally ran the update script as the `root` user.
<br/>
!!!warning
Do __not__ attempt to manually update MeshCentral to a newer version.
You should let the `update.sh` script handle this for you.
The developers will test MeshCentral and make sure integration does not break before bumping the mesh version.
#### Keeping your Let's Encrypt certificate up to date
!!!info
Currently, the update script does not automatically renew your Let's Encrypt wildcard certificate, which expires every 3 months, since this non-trivial to automate using the DNS TXT record method.
To renew your Let's Encrypt wildcard cert, run the following command, replacing `example.com` with your domain and `admin@example.com` with your email:
```bash
sudo certbot certonly --manual -d *.example.com --agree-tos --no-bootstrap --manual-public-ip-logging-ok --preferred-challenges dns -m admin@example.com --no-eff-email
```
Same instructions as during install for [verifying the TXT record](install_server.md#deploy-the-txt-record-in-your-dns-manager) has propogated before hitting Enter.
After this you have renewed the cert, simply run the `update.sh` script, passing it the `--force` flag.
```bash
./update.sh --force
```

View File

@@ -1,9 +1,29 @@
site_name: "Tactical RMM"
site_name: "Tactical RMM Documentation"
nav:
- Home: index.md
site_description: "A remote monitoring and management tool for Windows computers"
- RMM Installation:
- "Traditional Install": install_server.md
- "Docker Install": install_docker.md
- Agent Installation: install_agent.md
- Updating:
- "Updating the RMM": update_server.md
- "Updating Agents": update_agents.md
- Functionality:
- "Remote Background": functions/remote_bg.md
- "Maintenance Mode": functions/maintenance_mode.md
- "Alerting": alerting.md
- "User Interface Preferences": functions/user_ui.md
- Backup: backup.md
- Restore: restore.md
- Troubleshooting: troubleshooting.md
- FAQ: faq.md
- MeshCentral Integration: mesh_integration.md
- License: license.md
site_description: "A remote monitoring and management tool"
site_author: "wh1te909"
dev_addr: "0.0.0.0:8005"
# Repository
repo_name: "wh1te909/tacticalrmm"
repo_url: "https://github.com/wh1te909/tacticalrmm"
@@ -11,7 +31,6 @@ edit_uri: ""
theme:
name: "material"
custom_dir: "theme"
logo: "images/onit.ico"
favicon: "images/favicon.ico"
language: "en"

View File

@@ -1,4 +0,0 @@
{% extends "base.html" %}
{% block site_nav %}
{{ super() }}
{% endblock %}

View File

@@ -1,70 +0,0 @@
{% import "partials/language.html" as lang with context %}
<!-- Application footer -->
<footer class="md-footer">
<!-- Link to previous and/or next page -->
{% if page.previous_page or page.next_page %}
<div class="md-footer-nav">
<nav class="md-footer-nav__inner md-grid">
<!-- Link to previous page -->
{% if page.previous_page %}
<a href="{{ page.previous_page.url | url }}" title="{{ page.previous_page.title }}"
class="md-flex md-footer-nav__link md-footer-nav__link--prev" rel="prev">
<div class="md-flex__cell md-flex__cell--shrink">
<i class="md-icon md-icon--arrow-back
md-footer-nav__button"></i>
</div>
<div class="md-flex__cell md-flex__cell--stretch
md-footer-nav__title">
<span class="md-flex__ellipsis">
<span class="md-footer-nav__direction">
{{ lang.t("footer.previous") }}
</span>
{{ page.previous_page.title }}
</span>
</div>
</a>
{% endif %}
<!-- Link to next page -->
{% if page.next_page %}
<a href="{{ page.next_page.url | url }}" title="{{ page.next_page.title }}"
class="md-flex md-footer-nav__link md-footer-nav__link--next" rel="next">
<div class="md-flex__cell md-flex__cell--stretch
md-footer-nav__title">
<span class="md-flex__ellipsis">
<span class="md-footer-nav__direction">
{{ lang.t("footer.next") }}
</span>
{{ page.next_page.title }}
</span>
</div>
<div class="md-flex__cell md-flex__cell--shrink">
<i class="md-icon md-icon--arrow-forward
md-footer-nav__button"></i>
</div>
</a>
{% endif %}
</nav>
</div>
{% endif %}
<!-- Further information -->
<div class="md-footer-meta md-typeset">
<div class="md-footer-meta__inner md-grid">
<!-- Copyright and theme information -->
<div class="md-footer-copyright">
{% if config.copyright %}
<div class="md-footer-copyright__highlight">
{{ config.copyright }}
</div>
{% endif %}
</div>
<!-- Social links -->
{% include "partials/social.html" %}
</div>
</div>
</footer>

View File

@@ -1,6 +1,6 @@
#!/bin/bash
SCRIPT_VERSION="39"
SCRIPT_VERSION="40"
SCRIPT_URL='https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/install.sh'
sudo apt install -y curl wget dirmngr gnupg lsb-release
@@ -226,12 +226,23 @@ sudo apt install -y mongodb-org
sudo systemctl enable mongod
sudo systemctl restart mongod
print_green 'Installing Python 3.9'
sudo apt install -y build-essential zlib1g-dev libncurses5-dev libgdbm-dev libnss3-dev libssl-dev libreadline-dev libffi-dev libsqlite3-dev libbz2-dev
numprocs=$(nproc)
cd ~
wget https://www.python.org/ftp/python/3.9.2/Python-3.9.2.tgz
tar -xf Python-3.9.2.tgz
cd Python-3.9.2
./configure --enable-optimizations
make -j $numprocs
sudo make altinstall
cd ~
sudo rm -rf Python-3.9.2 Python-3.9.2.tgz
print_green 'Installing python, redis and git'
sudo apt update
sudo apt install -y python3-venv python3-dev python3-pip python3-setuptools python3-wheel ca-certificates redis git
print_green 'Installing redis and git'
sudo apt install -y ca-certificates redis git
print_green 'Installing postgresql'
@@ -371,11 +382,11 @@ sudo chmod +x /usr/local/bin/nats-api
print_green 'Installing the backend'
cd /rmm/api
python3 -m venv env
python3.9 -m venv env
source /rmm/api/env/bin/activate
cd /rmm/api/tacticalrmm
pip install --no-cache-dir --upgrade pip
pip install --no-cache-dir setuptools==52.0.0 wheel==0.36.2
pip install --no-cache-dir setuptools==53.0.0 wheel==0.36.2
pip install --no-cache-dir -r /rmm/api/tacticalrmm/requirements.txt
python manage.py migrate
python manage.py collectstatic --no-input

View File

@@ -1,15 +1,9 @@
#!/bin/bash
#####################################################
pgusername="changeme"
pgpw="hunter2"
#####################################################
SCRIPT_VERSION="17"
SCRIPT_VERSION="19"
SCRIPT_URL='https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/restore.sh'
sudo apt update
sudo apt install -y curl wget dirmngr gnupg lsb-release
GREEN='\033[0;32m'
@@ -31,12 +25,6 @@ fi
rm -f $TMP_FILE
if [[ "$pgusername" == "changeme" || "$pgpw" == "hunter2" ]]; then
printf >&2 "${RED}You must change the postgres username/password at the top of this file.${NC}\n"
printf >&2 "${RED}Check the github readme for where to find them.${NC}\n"
exit 1
fi
osname=$(lsb_release -si); osname=${osname^}
osname=$(echo "$osname" | tr '[A-Z]' '[a-z]')
fullrel=$(lsb_release -sd)
@@ -192,9 +180,23 @@ print_green 'Restoring systemd services'
sudo cp $tmp_dir/systemd/* /etc/systemd/system/
sudo systemctl daemon-reload
print_green 'Installing python, redis and git'
print_green 'Installing Python 3.9'
sudo apt install -y python3-venv python3-dev python3-pip python3-setuptools python3-wheel ca-certificates redis git
sudo apt install -y build-essential zlib1g-dev libncurses5-dev libgdbm-dev libnss3-dev libssl-dev libreadline-dev libffi-dev libsqlite3-dev libbz2-dev
numprocs=$(nproc)
cd ~
wget https://www.python.org/ftp/python/3.9.2/Python-3.9.2.tgz
tar -xf Python-3.9.2.tgz
cd Python-3.9.2
./configure --enable-optimizations
make -j $numprocs
sudo make altinstall
cd ~
sudo rm -rf Python-3.9.2 Python-3.9.2.tgz
print_green 'Installing redis and git'
sudo apt install -y ca-certificates redis git
print_green 'Installing postgresql'
@@ -204,18 +206,7 @@ sudo apt update
sudo apt install -y postgresql-13
sleep 2
print_green 'Restoring the database'
sudo -u postgres psql -c "DROP DATABASE IF EXISTS tacticalrmm"
sudo -u postgres psql -c "CREATE DATABASE tacticalrmm"
sudo -u postgres psql -c "CREATE USER ${pgusername} WITH PASSWORD '${pgpw}'"
sudo -u postgres psql -c "ALTER ROLE ${pgusername} SET client_encoding TO 'utf8'"
sudo -u postgres psql -c "ALTER ROLE ${pgusername} SET default_transaction_isolation TO 'read committed'"
sudo -u postgres psql -c "ALTER ROLE ${pgusername} SET timezone TO 'UTC'"
sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE tacticalrmm TO ${pgusername}"
gzip -d $tmp_dir/postgres/*.psql.gz
PGPASSWORD=${pgpw} psql -h localhost -U ${pgusername} -d tacticalrmm -f $tmp_dir/postgres/db*.psql
print_green 'Restoring MongoDB'
@@ -267,12 +258,28 @@ sudo cp /rmm/natsapi/bin/nats-api /usr/local/bin
sudo chown ${USER}:${USER} /usr/local/bin/nats-api
sudo chmod +x /usr/local/bin/nats-api
print_green 'Restoring the database'
pgusername=$(grep -w USER /rmm/api/tacticalrmm/tacticalrmm/local_settings.py | sed 's/^.*: //' | sed 's/.//' | sed -r 's/.{2}$//')
pgpw=$(grep -w PASSWORD /rmm/api/tacticalrmm/tacticalrmm/local_settings.py | sed 's/^.*: //' | sed 's/.//' | sed -r 's/.{2}$//')
sudo -u postgres psql -c "DROP DATABASE IF EXISTS tacticalrmm"
sudo -u postgres psql -c "CREATE DATABASE tacticalrmm"
sudo -u postgres psql -c "CREATE USER ${pgusername} WITH PASSWORD '${pgpw}'"
sudo -u postgres psql -c "ALTER ROLE ${pgusername} SET client_encoding TO 'utf8'"
sudo -u postgres psql -c "ALTER ROLE ${pgusername} SET default_transaction_isolation TO 'read committed'"
sudo -u postgres psql -c "ALTER ROLE ${pgusername} SET timezone TO 'UTC'"
sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE tacticalrmm TO ${pgusername}"
gzip -d $tmp_dir/postgres/*.psql.gz
PGPASSWORD=${pgpw} psql -h localhost -U ${pgusername} -d tacticalrmm -f $tmp_dir/postgres/db*.psql
cd /rmm/api
python3 -m venv env
python3.9 -m venv env
source /rmm/api/env/bin/activate
cd /rmm/api/tacticalrmm
pip install --no-cache-dir --upgrade pip
pip install --no-cache-dir setuptools==52.0.0 wheel==0.36.2
pip install --no-cache-dir setuptools==53.0.0 wheel==0.36.2
pip install --no-cache-dir -r /rmm/api/tacticalrmm/requirements.txt
python manage.py collectstatic --no-input
python manage.py reload_nats

View File

@@ -1,6 +1,6 @@
#!/bin/bash
SCRIPT_VERSION="108"
SCRIPT_VERSION="112"
SCRIPT_URL='https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/update.sh'
LATEST_SETTINGS_URL='https://raw.githubusercontent.com/wh1te909/tacticalrmm/master/api/tacticalrmm/tacticalrmm/settings.py'
YELLOW='\033[1;33m'
@@ -22,6 +22,13 @@ fi
rm -f $TMP_FILE
force=false
if [[ $* == *--force* ]]; then
force=true
fi
sudo apt update
if [ $EUID -eq 0 ]; then
echo -ne "\033[0;31mDo NOT run this script as root. Exiting.\e[0m\n"
exit 1
@@ -114,7 +121,7 @@ SETTINGS_FILE="/rmm/api/tacticalrmm/tacticalrmm/settings.py"
LATEST_TRMM_VER=$(grep "^TRMM_VERSION" "$TMP_SETTINGS" | awk -F'[= "]' '{print $5}')
CURRENT_TRMM_VER=$(grep "^TRMM_VERSION" "$SETTINGS_FILE" | awk -F'[= "]' '{print $5}')
if [[ "${CURRENT_TRMM_VER}" == "${LATEST_TRMM_VER}" ]]; then
if [[ "${CURRENT_TRMM_VER}" == "${LATEST_TRMM_VER}" ]] && ! [[ "$force" = true ]]; then
printf >&2 "${GREEN}Already on latest version. Current version: ${CURRENT_TRMM_VER} Latest version: ${LATEST_TRMM_VER}${NC}\n"
rm -f $TMP_SETTINGS
exit 0
@@ -188,6 +195,22 @@ if ! [[ $CHECK_HAS_GO116 ]]; then
sudo chown -R $USER:$GROUP /home/${USER}/.cache
fi
HAS_PY39=$(which python3.9)
if ! [[ $HAS_PY39 ]]; then
printf >&2 "${GREEN}Updating to Python 3.9${NC}\n"
sudo apt install -y build-essential zlib1g-dev libncurses5-dev libgdbm-dev libnss3-dev libssl-dev libreadline-dev libffi-dev libsqlite3-dev libbz2-dev
numprocs=$(nproc)
cd ~
wget https://www.python.org/ftp/python/3.9.2/Python-3.9.2.tgz
tar -xf Python-3.9.2.tgz
cd Python-3.9.2
./configure --enable-optimizations
make -j $numprocs
sudo make altinstall
cd ~
sudo rm -rf Python-3.9.2 Python-3.9.2.tgz
fi
cd /rmm
git config user.email "admin@example.com"
git config user.name "Bob"
@@ -256,14 +279,14 @@ sudo cp /rmm/natsapi/bin/nats-api /usr/local/bin
sudo chown ${USER}:${USER} /usr/local/bin/nats-api
sudo chmod +x /usr/local/bin/nats-api
if [[ "${CURRENT_PIP_VER}" != "${LATEST_PIP_VER}" ]]; then
if [[ "${CURRENT_PIP_VER}" != "${LATEST_PIP_VER}" ]] || [[ "$force" = true ]]; then
rm -rf /rmm/api/env
cd /rmm/api
python3 -m venv env
python3.9 -m venv env
source /rmm/api/env/bin/activate
cd /rmm/api/tacticalrmm
pip install --no-cache-dir --upgrade pip
pip install --no-cache-dir setuptools==52.0.0 wheel==0.36.2
pip install --no-cache-dir setuptools==53.0.0 wheel==0.36.2
pip install --no-cache-dir -r requirements.txt
else
source /rmm/api/env/bin/activate
@@ -283,7 +306,7 @@ deactivate
rm -rf /rmm/web/dist
rm -rf /rmm/web/.quasar
cd /rmm/web
if [[ "${CURRENT_NPM_VER}" != "${LATEST_NPM_VER}" ]]; then
if [[ "${CURRENT_NPM_VER}" != "${LATEST_NPM_VER}" ]] || [[ "$force" = true ]]; then
rm -rf /rmm/web/node_modules
fi
@@ -300,7 +323,7 @@ sudo systemctl start ${i}
done
CURRENT_MESH_VER=$(cd /meshcentral/node_modules/meshcentral && node -p -e "require('./package.json').version")
if [[ "${CURRENT_MESH_VER}" != "${LATEST_MESH_VER}" ]]; then
if [[ "${CURRENT_MESH_VER}" != "${LATEST_MESH_VER}" ]] || [[ "$force" = true ]]; then
printf >&2 "${GREEN}Updating meshcentral from ${CURRENT_MESH_VER} to ${LATEST_MESH_VER}${NC}\n"
sudo systemctl stop meshcentral
sudo chown ${USER}:${USER} -R /meshcentral
@@ -308,7 +331,6 @@ if [[ "${CURRENT_MESH_VER}" != "${LATEST_MESH_VER}" ]]; then
rm -rf node_modules/
npm install meshcentral@${LATEST_MESH_VER}
sudo systemctl start meshcentral
sleep 10
fi
rm -f $TMP_SETTINGS

View File

@@ -125,7 +125,7 @@ export default {
this.$axios
.patch(`/checks/history/${this.check.id}/`, { timeFilter: this.timeFilter })
.then(r => {
this.history = r.data;
this.history = Object.freeze(r.data);
// save copy of data to reference results in chart tooltip
if (
@@ -133,7 +133,7 @@ export default {
this.check.check_type !== "memory" ||
this.check.check_type !== "diskspace"
) {
this.results = r.data;
this.results = Object.freeze(r.data);
}
this.$q.loading.hide();
@@ -171,6 +171,8 @@ export default {
strokeDashArray: 0,
borderColor: "#C10015",
label: {
position: "left",
offsetX: 100,
borderColor: "#C10015",
style: {
color: "#FFF",
@@ -188,6 +190,8 @@ export default {
strokeDashArray: 0,
borderColor: "#ff9800",
label: {
position: "left",
offsetX: 100,
borderColor: "#ff9800",
style: {
color: "#FFF",

View File

@@ -50,7 +50,7 @@
<q-card-section>
<q-select
dense
label="Informational return codes (press Enter after typing each argument)"
label="Informational return codes (press Enter after typing each code)"
filled
v-model="scriptcheck.info_return_codes"
use-input
@@ -65,7 +65,7 @@
<q-card-section>
<q-select
dense
label="Warning return codes (press Enter after typing each argument)"
label="Warning return codes (press Enter after typing each code)"
filled
v-model="scriptcheck.warning_return_codes"
use-input

View File

@@ -32,6 +32,9 @@
<q-checkbox v-model="prune_tables" val="pending_actions" label="Pending Actions">
<q-tooltip>Removes completed pending actions</q-tooltip>
</q-checkbox>
<q-checkbox v-model="prune_tables" val="alerts" label="Alerts">
<q-tooltip>Removes all alerts</q-tooltip>
</q-checkbox>
</q-card-section>
<q-card-actions align="left">

View File

@@ -114,7 +114,7 @@
<div class="col-3">Recipients</div>
<div class="col-4"></div>
<div class="col-5">
<q-list bordered dense v-if="ready && settings.email_alert_recipients.length !== 0">
<q-list dense v-if="ready && settings.email_alert_recipients.length !== 0">
<q-item
v-for="email in settings.email_alert_recipients"
:key="email"
@@ -221,7 +221,7 @@
<div class="col-3">Recipients</div>
<div class="col-4"></div>
<div class="col-5">
<q-list bordered dense v-if="ready && settings.sms_alert_recipients.length !== 0">
<q-list dense v-if="ready && settings.sms_alert_recipients.length !== 0">
<q-item
v-for="num in settings.sms_alert_recipients"
:key="num"

View File

@@ -19,7 +19,7 @@
<div class="text-subtitle2">User Interface</div>
<hr />
<q-card-section class="row">
<div class="col-6">Agent table double-click action:</div>
<div class="col-6">Agent double-click action:</div>
<div class="col-2"></div>
<q-select
map-options

View File

@@ -120,7 +120,13 @@
</template>
</q-input>
<div class="q-gutter-sm">
<q-checkbox v-model="autotask.remove_if_not_scheduled" label="Delete task after schedule date" />
<q-checkbox v-model="autotask.remove_if_not_scheduled" label="Delete task after scheduled date" />
</div>
<div class="q-gutter-sm">
<q-checkbox
v-model="autotask.run_asap_after_missed"
label="Run task ASAP after a scheduled start is missed (requires agent v1.4.7)"
/>
</div>
</div>
<div class="col-1"></div>
@@ -150,7 +156,7 @@
@click="addTask"
label="Add Task"
/>
<q-btn v-else @click="$refs.stepper.next()" color="primary" label="Next" />
<q-btn v-else @click="step2" color="primary" label="Next" />
<q-btn v-if="step > 1" flat color="primary" @click="$refs.stepper.previous()" label="Back" class="q-ml-sm" />
</q-stepper-navigation>
</template>
@@ -181,6 +187,7 @@ export default {
run_time_minute: null,
run_time_date: null,
remove_if_not_scheduled: false,
run_asap_after_missed: true,
task_type: "scheduled",
timeout: 120,
alert_severity: "info",
@@ -254,6 +261,15 @@ export default {
this.notifyError("Unable to get policy checks");
});
},
step2() {
if (this.step1Done) {
this.$refs.stepper.next();
} else {
if (!this.autotask.script) this.notifyError("Script field is required");
else if (!this.autotask.name) this.notifyError("Name field is required");
else if (!this.autotask.timeout) this.notifyError("Timeout field is required");
}
},
},
computed: {
...mapGetters(["selectedAgentPk"]),
@@ -270,9 +286,7 @@ export default {
return r.sort((a, b) => a.label.localeCompare(b.label));
},
step1Done() {
return this.step > 1 && this.autotask.script !== null && this.autotask.name && this.autotask.timeout
? true
: false;
return !!this.autotask.script && !!this.autotask.name && !!this.autotask.timeout ? true : false;
},
step2Done() {
if (this.autotask.task_type === "scheduled") {