From f0b028cb77ed76e0b17dbfaa792da637e166c27e Mon Sep 17 00:00:00 2001 From: Muhammad Ibrahim Date: Sat, 8 Nov 2025 22:00:34 +0000 Subject: [PATCH 1/3] alpine support on the agent installation script --- agents/patchmon_install.sh | 274 +++++++++++++++++++++++++++---------- 1 file changed, 202 insertions(+), 72 deletions(-) diff --git a/agents/patchmon_install.sh b/agents/patchmon_install.sh index 0dd3adf..3a7e7a8 100644 --- a/agents/patchmon_install.sh +++ b/agents/patchmon_install.sh @@ -1,7 +1,32 @@ -#!/bin/bash +#!/bin/sh +# PatchMon Agent Installation Script +# This script requires bash for full functionality +# Usage: curl -s {PATCHMON_URL}/api/v1/hosts/install -H "X-API-ID: {API_ID}" -H "X-API-KEY: {API_KEY}" | sh + +# Check if bash is available, if not try to install it (for Alpine Linux) +if ! command -v bash >/dev/null 2>&1; then + if command -v apk >/dev/null 2>&1; then + echo "Installing bash for script compatibility..." + apk add --no-cache bash >/dev/null 2>&1 || true + fi +fi + +# If bash is available and we're not already running in bash, switch to bash +# When piped, we can't re-execute easily, so we'll continue with sh +# but ensure bash is available for bash-specific features +if command -v bash >/dev/null 2>&1 && [ -z "${BASH_VERSION:-}" ]; then + # Check if we're being piped (stdin is not a terminal) + if [ -t 0 ]; then + # Direct execution, re-execute with bash + exec bash "$0" "$@" + exit $? + fi + # When piped, we continue with sh but bash is now available + # The script will use bash-specific features which should work if bash is installed +fi # PatchMon Agent Installation Script -# Usage: curl -s {PATCHMON_URL}/api/v1/hosts/install -H "X-API-ID: {API_ID}" -H "X-API-KEY: {API_KEY}" | bash +# Usage: curl -s {PATCHMON_URL}/api/v1/hosts/install -H "X-API-ID: {API_ID}" -H "X-API-KEY: {API_KEY}" | sh set -e @@ -36,7 +61,7 @@ warning() { } # Check if running as root -if [[ $EUID -ne 0 ]]; then +if [ "$(id -u)" -ne 0 ]; then error "This script must be run as root (use sudo)" fi @@ -45,8 +70,8 @@ verify_datetime() { info "๐Ÿ• Verifying system datetime and timezone..." # Get current system time - local system_time=$(date) - local timezone=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "Unknown") + system_time=$(date) + timezone=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "Unknown") # Display current datetime info echo "" @@ -56,14 +81,17 @@ verify_datetime() { echo "" # Check if we can read from stdin (interactive terminal) - if [[ -t 0 ]]; then + if [ -t 0 ]; then # Interactive terminal - ask user - read -p "Does this date/time look correct to you? (y/N): " -r response - if [[ "$response" =~ ^[Yy]$ ]]; then - success "โœ… Date/time verification passed" - echo "" - return 0 - else + printf "Does this date/time look correct to you? (y/N): " + read -r response + case "$response" in + [Yy]*) + success "โœ… Date/time verification passed" + echo "" + return 0 + ;; + *) echo "" echo -e "${RED}โŒ Date/time verification failed${NC}" echo "" @@ -72,9 +100,10 @@ verify_datetime() { echo " sudo timedatectl set-timezone 'America/New_York' # or your timezone" echo " sudo timedatectl list-timezones # to see available timezones" echo "" - echo -e "${BLUE}โ„น๏ธ After fixing the date/time, re-run this installation script.${NC}" - error "Installation cancelled - please fix date/time and re-run" - fi + echo -e "${BLUE}โ„น๏ธ After fixing the date/time, re-run this installation script.${NC}" + error "Installation cancelled - please fix date/time and re-run" + ;; + esac else # Non-interactive (piped from curl) - show warning and continue echo -e "${YELLOW}โš ๏ธ Non-interactive installation detected${NC}" @@ -121,9 +150,9 @@ cleanup_old_files # Generate or retrieve machine ID get_machine_id() { # Try multiple sources for machine ID - if [[ -f /etc/machine-id ]]; then + if [ -f /etc/machine-id ]; then cat /etc/machine-id - elif [[ -f /var/lib/dbus/machine-id ]]; then + elif [ -f /var/lib/dbus/machine-id ]; then cat /var/lib/dbus/machine-id else # Fallback: generate from hardware info (less ideal but works) @@ -132,12 +161,12 @@ get_machine_id() { } # Parse arguments from environment (passed via HTTP headers) -if [[ -z "$PATCHMON_URL" ]] || [[ -z "$API_ID" ]] || [[ -z "$API_KEY" ]]; then +if [ -z "$PATCHMON_URL" ] || [ -z "$API_ID" ] || [ -z "$API_KEY" ]; then error "Missing required parameters. This script should be called via the PatchMon web interface." fi # Auto-detect architecture if not explicitly set -if [[ -z "$ARCHITECTURE" ]]; then +if [ -z "$ARCHITECTURE" ]; then arch_raw=$(uname -m 2>/dev/null || echo "unknown") # Map architecture to supported values @@ -162,13 +191,16 @@ if [[ -z "$ARCHITECTURE" ]]; then fi # Validate architecture -if [[ "$ARCHITECTURE" != "amd64" && "$ARCHITECTURE" != "386" && "$ARCHITECTURE" != "arm64" && "$ARCHITECTURE" != "arm" ]]; then +if [ "$ARCHITECTURE" != "amd64" ] && [ "$ARCHITECTURE" != "386" ] && [ "$ARCHITECTURE" != "arm64" ] && [ "$ARCHITECTURE" != "arm" ]; then error "Invalid architecture '$ARCHITECTURE'. Must be one of: amd64, 386, arm64, arm" fi # Check if --force flag is set (for bypassing broken packages) FORCE_INSTALL="${FORCE_INSTALL:-false}" -if [[ "$*" == *"--force"* ]] || [[ "$FORCE_INSTALL" == "true" ]]; then +case "$*" in + *"--force"*) FORCE_INSTALL="true" ;; +esac +if [ "$FORCE_INSTALL" = "true" ]; then FORCE_INSTALL="true" warning "โš ๏ธ Force mode enabled - will bypass broken packages" fi @@ -224,7 +256,7 @@ install_apt_packages() { # Build apt-get command based on force mode local apt_cmd="apt-get install ${missing_packages[*]} -y" - if [[ "$FORCE_INSTALL" == "true" ]]; then + if [ "$FORCE_INSTALL" = "true" ]; then info "Using force mode - bypassing broken packages..." apt_cmd="$apt_cmd -o APT::Get::Fix-Broken=false -o DPkg::Options::=\"--force-confold\" -o DPkg::Options::=\"--force-confdef\"" fi @@ -240,7 +272,7 @@ install_apt_packages() { local all_ok=true for pkg in "${packages[@]}"; do if ! command_exists "$pkg"; then - if [[ "$FORCE_INSTALL" == "true" ]]; then + if [ "$FORCE_INSTALL" = "true" ]; then error "Critical dependency '$pkg' is not available even with --force. Please install manually." else error "Critical dependency '$pkg' is not available. Try again with --force flag or install manually: apt-get install $pkg" @@ -279,7 +311,7 @@ install_yum_dnf_packages() { info "Need to install: ${missing_packages[*]}" - if [[ "$pkg_manager" == "yum" ]]; then + if [ "$pkg_manager" = "yum" ]; then yum install -y "${missing_packages[@]}" else dnf install -y "${missing_packages[@]}" @@ -365,7 +397,7 @@ install_apk_packages() { local all_ok=true for pkg in "${packages[@]}"; do if ! command_exists "$pkg"; then - if [[ "$FORCE_INSTALL" == "true" ]]; then + if [ "$FORCE_INSTALL" = "true" ]; then error "Critical dependency '$pkg' is not available even with --force. Please install manually." else error "Critical dependency '$pkg' is not available. Try again with --force flag or install manually: apk add $pkg" @@ -391,7 +423,7 @@ if command -v apt-get >/dev/null 2>&1; then # Check for broken packages if dpkg -l | grep -q "^iH\|^iF" 2>/dev/null; then - if [[ "$FORCE_INSTALL" == "true" ]]; then + if [ "$FORCE_INSTALL" = "true" ]; then warning "Detected broken packages on system - force mode will work around them" else warning "โš ๏ธ Broken packages detected on system" @@ -446,7 +478,7 @@ echo "" info "๐Ÿ“ Setting up configuration directory..." # Check if configuration directory already exists -if [[ -d "/etc/patchmon" ]]; then +if [ -d "/etc/patchmon" ]; then warning "โš ๏ธ Configuration directory already exists at /etc/patchmon" warning "โš ๏ธ Preserving existing configuration files" @@ -463,8 +495,8 @@ fi # Check if agent is already configured and working (before we overwrite anything) info "๐Ÿ” Checking if agent is already configured..." -if [[ -f /etc/patchmon/config.yml ]] && [[ -f /etc/patchmon/credentials.yml ]]; then - if [[ -f /usr/local/bin/patchmon-agent ]]; then +if [ -f /etc/patchmon/config.yml ] && [ -f /etc/patchmon/credentials.yml ]; then + if [ -f /usr/local/bin/patchmon-agent ]; then info "๐Ÿ“‹ Found existing agent configuration" info "๐Ÿงช Testing existing configuration with ping..." @@ -495,7 +527,7 @@ fi info "๐Ÿ” Creating configuration files..." # Check if config file already exists -if [[ -f "/etc/patchmon/config.yml" ]]; then +if [ -f "/etc/patchmon/config.yml" ]; then warning "โš ๏ธ Config file already exists at /etc/patchmon/config.yml" warning "โš ๏ธ Moving existing file out of the way for fresh installation" @@ -508,7 +540,7 @@ if [[ -f "/etc/patchmon/config.yml" ]]; then fi # Check if credentials file already exists -if [[ -f "/etc/patchmon/credentials.yml" ]]; then +if [ -f "/etc/patchmon/credentials.yml" ]; then warning "โš ๏ธ Credentials file already exists at /etc/patchmon/credentials.yml" warning "โš ๏ธ Moving existing file out of the way for fresh installation" @@ -521,7 +553,7 @@ if [[ -f "/etc/patchmon/credentials.yml" ]]; then fi # Clean up old credentials file if it exists (from previous installations) -if [[ -f "/etc/patchmon/credentials" ]]; then +if [ -f "/etc/patchmon/credentials" ]; then warning "โš ๏ธ Found old credentials file, removing it..." rm -f /etc/patchmon/credentials info "๐Ÿ“‹ Removed old credentials file" @@ -557,7 +589,7 @@ info "๐Ÿ“ฅ Downloading PatchMon agent binary..." BINARY_NAME="patchmon-agent-linux-${ARCHITECTURE}" # Check if agent binary already exists -if [[ -f "/usr/local/bin/patchmon-agent" ]]; then +if [ -f "/usr/local/bin/patchmon-agent" ]; then warning "โš ๏ธ Agent binary already exists at /usr/local/bin/patchmon-agent" warning "โš ๏ธ Moving existing file out of the way for fresh installation" @@ -570,7 +602,7 @@ if [[ -f "/usr/local/bin/patchmon-agent" ]]; then fi # Clean up old shell script if it exists (from previous installations) -if [[ -f "/usr/local/bin/patchmon-agent.sh" ]]; then +if [ -f "/usr/local/bin/patchmon-agent.sh" ]; then warning "โš ๏ธ Found old shell script agent, removing it..." rm -f /usr/local/bin/patchmon-agent.sh info "๐Ÿ“‹ Removed old shell script agent" @@ -596,7 +628,7 @@ info "๐Ÿ“ Setting up log directory..." mkdir -p /etc/patchmon/logs # Handle existing log files -if [[ -f "/etc/patchmon/logs/patchmon-agent.log" ]]; then +if [ -f "/etc/patchmon/logs/patchmon-agent.log" ]; then warning "โš ๏ธ Existing log file found at /etc/patchmon/logs/patchmon-agent.log" warning "โš ๏ธ Rotating log file for fresh start" @@ -613,23 +645,26 @@ else error "โŒ Failed to validate API credentials or reach server" fi -# Step 5: Setup systemd service for WebSocket connection +# Step 5: Setup service for WebSocket connection # Note: The service will automatically send an initial report on startup (see serve.go) -info "๐Ÿ”ง Setting up systemd service..." - -# Stop and disable existing service if it exists -if systemctl is-active --quiet patchmon-agent.service 2>/dev/null; then - warning "โš ๏ธ Stopping existing PatchMon agent service..." - systemctl stop patchmon-agent.service -fi - -if systemctl is-enabled --quiet patchmon-agent.service 2>/dev/null; then - warning "โš ๏ธ Disabling existing PatchMon agent service..." - systemctl disable patchmon-agent.service -fi - -# Create systemd service file -cat > /etc/systemd/system/patchmon-agent.service << EOF +# Detect init system and create appropriate service +if command -v systemctl >/dev/null 2>&1; then + # Systemd is available + info "๐Ÿ”ง Setting up systemd service..." + + # Stop and disable existing service if it exists + if systemctl is-active --quiet patchmon-agent.service 2>/dev/null; then + warning "โš ๏ธ Stopping existing PatchMon agent service..." + systemctl stop patchmon-agent.service + fi + + if systemctl is-enabled --quiet patchmon-agent.service 2>/dev/null; then + warning "โš ๏ธ Disabling existing PatchMon agent service..." + systemctl disable patchmon-agent.service + fi + + # Create systemd service file + cat > /etc/systemd/system/patchmon-agent.service << EOF [Unit] Description=PatchMon Agent Service After=network.target @@ -651,25 +686,105 @@ SyslogIdentifier=patchmon-agent [Install] WantedBy=multi-user.target EOF + + # Clean up old crontab entries if they exist (from previous installations) + if crontab -l 2>/dev/null | grep -q "patchmon-agent"; then + warning "โš ๏ธ Found old crontab entries, removing them..." + crontab -l 2>/dev/null | grep -v "patchmon-agent" | crontab - + info "๐Ÿ“‹ Removed old crontab entries" + fi + + # Reload systemd and enable/start the service + systemctl daemon-reload + systemctl enable patchmon-agent.service + systemctl start patchmon-agent.service + + # Check if service started successfully + if systemctl is-active --quiet patchmon-agent.service; then + success "โœ… PatchMon Agent service started successfully" + info "๐Ÿ”— WebSocket connection established" + else + warning "โš ๏ธ Service may have failed to start. Check status with: systemctl status patchmon-agent" + fi + + SERVICE_TYPE="systemd" +elif [ -d /etc/init.d ] && command -v rc-service >/dev/null 2>&1; then + # OpenRC is available (Alpine Linux) + info "๐Ÿ”ง Setting up OpenRC service..." + + # Stop and disable existing service if it exists + if rc-service patchmon-agent status >/dev/null 2>&1; then + warning "โš ๏ธ Stopping existing PatchMon agent service..." + rc-service patchmon-agent stop + fi + + if rc-update show default 2>/dev/null | grep -q "patchmon-agent"; then + warning "โš ๏ธ Disabling existing PatchMon agent service..." + rc-update del patchmon-agent default + fi + + # Create OpenRC service file + cat > /etc/init.d/patchmon-agent << 'EOF' +#!/sbin/openrc-run -# Clean up old crontab entries if they exist (from previous installations) -if crontab -l 2>/dev/null | grep -q "patchmon-agent"; then - warning "โš ๏ธ Found old crontab entries, removing them..." - crontab -l 2>/dev/null | grep -v "patchmon-agent" | crontab - - info "๐Ÿ“‹ Removed old crontab entries" -fi +name="patchmon-agent" +description="PatchMon Agent Service" +command="/usr/local/bin/patchmon-agent" +command_args="serve" +command_user="root" +pidfile="/var/run/patchmon-agent.pid" +command_background="yes" +working_dir="/etc/patchmon" -# Reload systemd and enable/start the service -systemctl daemon-reload -systemctl enable patchmon-agent.service -systemctl start patchmon-agent.service - -# Check if service started successfully -if systemctl is-active --quiet patchmon-agent.service; then - success "โœ… PatchMon Agent service started successfully" - info "๐Ÿ”— WebSocket connection established" +depend() { + need net + after net +} +EOF + + chmod +x /etc/init.d/patchmon-agent + + # Clean up old crontab entries if they exist (from previous installations) + if crontab -l 2>/dev/null | grep -q "patchmon-agent"; then + warning "โš ๏ธ Found old crontab entries, removing them..." + crontab -l 2>/dev/null | grep -v "patchmon-agent" | crontab - + info "๐Ÿ“‹ Removed old crontab entries" + fi + + # Enable and start the service + rc-update add patchmon-agent default + rc-service patchmon-agent start + + # Check if service started successfully + if rc-service patchmon-agent status >/dev/null 2>&1; then + success "โœ… PatchMon Agent service started successfully" + info "๐Ÿ”— WebSocket connection established" + else + warning "โš ๏ธ Service may have failed to start. Check status with: rc-service patchmon-agent status" + fi + + SERVICE_TYPE="openrc" else - warning "โš ๏ธ Service may have failed to start. Check status with: systemctl status patchmon-agent" + # No init system detected, use crontab as fallback + warning "โš ๏ธ No init system detected (systemd or OpenRC). Using crontab for service management." + + # Clean up old crontab entries if they exist + if crontab -l 2>/dev/null | grep -q "patchmon-agent"; then + warning "โš ๏ธ Found old crontab entries, removing them..." + crontab -l 2>/dev/null | grep -v "patchmon-agent" | crontab - + info "๐Ÿ“‹ Removed old crontab entries" + fi + + # Add crontab entry to run the agent + (crontab -l 2>/dev/null; echo "@reboot /usr/local/bin/patchmon-agent serve >/dev/null 2>&1") | crontab - + info "๐Ÿ“‹ Added crontab entry for PatchMon agent" + + # Start the agent manually + /usr/local/bin/patchmon-agent serve >/dev/null 2>&1 & + success "โœ… PatchMon Agent started in background" + info "๐Ÿ”— WebSocket connection established" + + SERVICE_TYPE="crontab" fi # Installation complete @@ -680,14 +795,20 @@ echo " โ€ข Configuration directory: /etc/patchmon" echo " โ€ข Agent binary installed: /usr/local/bin/patchmon-agent" echo " โ€ข Architecture: $ARCHITECTURE" echo " โ€ข Dependencies installed: jq, curl, bc" -echo " โ€ข Systemd service configured and running" +if [ "$SERVICE_TYPE" = "systemd" ]; then + echo " โ€ข Systemd service configured and running" +elif [ "$SERVICE_TYPE" = "openrc" ]; then + echo " โ€ข OpenRC service configured and running" +else + echo " โ€ข Service configured via crontab" +fi echo " โ€ข API credentials configured and tested" echo " โ€ข WebSocket connection established" echo " โ€ข Logs directory: /etc/patchmon/logs" # Check for moved files and show them MOVED_FILES=$(ls /etc/patchmon/credentials.yml.backup.* /etc/patchmon/config.yml.backup.* /usr/local/bin/patchmon-agent.backup.* /etc/patchmon/logs/patchmon-agent.log.old.* /usr/local/bin/patchmon-agent.sh.backup.* /etc/patchmon/credentials.backup.* 2>/dev/null || true) -if [[ -n "$MOVED_FILES" ]]; then +if [ -n "$MOVED_FILES" ]; then echo "" echo -e "${YELLOW}๐Ÿ“‹ Files Moved for Fresh Installation:${NC}" echo "$MOVED_FILES" | while read -r moved_file; do @@ -702,8 +823,17 @@ echo -e "${BLUE}๐Ÿ”ง Management Commands:${NC}" echo " โ€ข Test connection: /usr/local/bin/patchmon-agent ping" echo " โ€ข Manual report: /usr/local/bin/patchmon-agent report" echo " โ€ข Check status: /usr/local/bin/patchmon-agent diagnostics" -echo " โ€ข Service status: systemctl status patchmon-agent" -echo " โ€ข Service logs: journalctl -u patchmon-agent -f" -echo " โ€ข Restart service: systemctl restart patchmon-agent" +if [ "$SERVICE_TYPE" = "systemd" ]; then + echo " โ€ข Service status: systemctl status patchmon-agent" + echo " โ€ข Service logs: journalctl -u patchmon-agent -f" + echo " โ€ข Restart service: systemctl restart patchmon-agent" +elif [ "$SERVICE_TYPE" = "openrc" ]; then + echo " โ€ข Service status: rc-service patchmon-agent status" + echo " โ€ข Service logs: tail -f /etc/patchmon/logs/patchmon-agent.log" + echo " โ€ข Restart service: rc-service patchmon-agent restart" +else + echo " โ€ข Service logs: tail -f /etc/patchmon/logs/patchmon-agent.log" + echo " โ€ข Restart service: pkill -f 'patchmon-agent serve' && /usr/local/bin/patchmon-agent serve &" +fi echo "" success "โœ… Your system is now being monitored by PatchMon!" From bedcd1ac73d740713e8d5b02f5b55c43a507feec Mon Sep 17 00:00:00 2001 From: Muhammad Ibrahim Date: Mon, 10 Nov 2025 20:32:40 +0000 Subject: [PATCH 2/3] added api scope creator --- frontend/src/pages/settings/Integrations.jsx | 632 ++++++++++++++++++- 1 file changed, 627 insertions(+), 5 deletions(-) diff --git a/frontend/src/pages/settings/Integrations.jsx b/frontend/src/pages/settings/Integrations.jsx index 1d3d493..c8df57d 100644 --- a/frontend/src/pages/settings/Integrations.jsx +++ b/frontend/src/pages/settings/Integrations.jsx @@ -28,6 +28,8 @@ const Integrations = () => { const [host_groups, setHostGroups] = useState([]); const [loading, setLoading] = useState(true); const [show_create_modal, setShowCreateModal] = useState(false); + const [show_edit_modal, setShowEditModal] = useState(false); + const [edit_token, setEditToken] = useState(null); const [new_token, setNewToken] = useState(null); const [show_secret, setShowSecret] = useState(false); const [server_url, setServerUrl] = useState(""); @@ -40,6 +42,9 @@ const Integrations = () => { default_host_group_id: "", allowed_ip_ranges: "", expires_at: "", + scopes: { + host: [], + }, }); const [copy_success, setCopySuccess] = useState({}); @@ -54,6 +59,25 @@ const Integrations = () => { setActiveTab(tabName); }; + const toggle_scope_action = (resource, action) => { + setFormData((prev) => { + const current_scopes = prev.scopes || { [resource]: [] }; + const resource_scopes = current_scopes[resource] || []; + + const updated_scopes = resource_scopes.includes(action) + ? resource_scopes.filter((a) => a !== action) + : [...resource_scopes, action]; + + return { + ...prev, + scopes: { + ...current_scopes, + [resource]: updated_scopes, + }, + }; + }); + }; + // biome-ignore lint/correctness/useExhaustiveDependencies: Only run on mount useEffect(() => { load_tokens(); @@ -96,6 +120,14 @@ const Integrations = () => { e.preventDefault(); try { + // Determine integration type based on active tab + let integration_type = "proxmox-lxc"; + if (activeTab === "gethomepage") { + integration_type = "gethomepage"; + } else if (activeTab === "api") { + integration_type = "api"; + } + const data = { token_name: form_data.token_name, max_hosts_per_day: Number.parseInt(form_data.max_hosts_per_day, 10), @@ -103,8 +135,7 @@ const Integrations = () => { ? form_data.allowed_ip_ranges.split(",").map((ip) => ip.trim()) : [], metadata: { - integration_type: - activeTab === "gethomepage" ? "gethomepage" : "proxmox-lxc", + integration_type: integration_type, }, }; @@ -116,6 +147,11 @@ const Integrations = () => { data.expires_at = form_data.expires_at; } + // Add scopes for API credentials + if (activeTab === "api" && form_data.scopes) { + data.scopes = form_data.scopes; + } + const response = await api.post("/auto-enrollment/tokens", data); setNewToken(response.data.token); setShowCreateModal(false); @@ -128,6 +164,9 @@ const Integrations = () => { default_host_group_id: "", allowed_ip_ranges: "", expires_at: "", + scopes: { + host: [], + }, }); } catch (error) { console.error("Failed to create token:", error); @@ -168,6 +207,69 @@ const Integrations = () => { } }; + const open_edit_modal = (token) => { + setEditToken(token); + setFormData({ + token_name: token.token_name, + max_hosts_per_day: token.max_hosts_per_day || 100, + default_host_group_id: token.default_host_group_id || "", + allowed_ip_ranges: token.allowed_ip_ranges?.join(", ") || "", + expires_at: token.expires_at + ? new Date(token.expires_at).toISOString().slice(0, 16) + : "", + scopes: token.scopes || { host: [] }, + }); + setShowEditModal(true); + }; + + const update_token = async (e) => { + e.preventDefault(); + + try { + const data = { + allowed_ip_ranges: form_data.allowed_ip_ranges + ? form_data.allowed_ip_ranges.split(",").map((ip) => ip.trim()) + : [], + }; + + // Add expiration if provided + if (form_data.expires_at) { + data.expires_at = form_data.expires_at; + } + + // Add scopes for API credentials + if ( + edit_token?.metadata?.integration_type === "api" && + form_data.scopes + ) { + data.scopes = form_data.scopes; + } + + await api.patch(`/auto-enrollment/tokens/${edit_token.id}`, data); + setShowEditModal(false); + setEditToken(null); + load_tokens(); + + // Reset form + setFormData({ + token_name: "", + max_hosts_per_day: 100, + default_host_group_id: "", + allowed_ip_ranges: "", + expires_at: "", + scopes: { + host: [], + }, + }); + } catch (error) { + console.error("Failed to update token:", error); + const error_message = error.response?.data?.errors + ? error.response.data.errors.map((e) => e.msg).join(", ") + : error.response?.data?.error || "Failed to update token"; + alert(error_message); + } + }; + const copy_to_clipboard = async (text, key) => { // Check if Clipboard API is available if (navigator.clipboard && window.isSecureContext) { @@ -256,6 +358,17 @@ const Integrations = () => { > GetHomepage + + + + {/* API Credentials List */} + {loading ? ( +
+
+
+ ) : tokens.filter( + (token) => token.metadata?.integration_type === "api", + ).length === 0 ? ( +
+

No API credentials created yet.

+

+ Create a credential to enable programmatic access to + PatchMon. +

+
+ ) : ( +
+ {tokens + .filter( + (token) => token.metadata?.integration_type === "api", + ) + .map((token) => ( +
+
+
+
+

+ {token.token_name} +

+ + API + + {token.is_active ? ( + + Active + + ) : ( + + Inactive + + )} +
+
+
+ + {token.token_key} + + +
+ {token.scopes && ( +

+ Scopes:{" "} + {Object.entries(token.scopes) + .map( + ([resource, actions]) => + `${resource}: ${Array.isArray(actions) ? actions.join(", ") : actions}`, + ) + .join(" | ")} +

+ )} + {token.allowed_ip_ranges?.length > 0 && ( +

+ Allowed IPs:{" "} + {token.allowed_ip_ranges.join(", ")} +

+ )} +

Created: {format_date(token.created_at)}

+ {token.last_used_at && ( +

+ Last Used: {format_date(token.last_used_at)} +

+ )} + {token.expires_at && ( +

+ Expires: {format_date(token.expires_at)} + {new Date(token.expires_at) < + new Date() && ( + + (Expired) + + )} +

+ )} +
+
+
+ + + +
+
+
+ ))} +
+ )} + + {/* Documentation Section */} +
+

+ Using API Credentials +

+
+

+ API credentials allow you to programmatically access + PatchMon data using Basic Authentication. +

+
+

+ Example cURL Request: +

+
+ curl -u "YOUR_API_KEY:YOUR_API_SECRET" \
+   {server_url}/api/v1/api/hosts +
+
+
+

+ Query Hosts by Group: +

+
+ curl -u "YOUR_API_KEY:YOUR_API_SECRET" \
+   "{server_url} + /api/v1/api/hosts?hostgroup=Production,Development" +
+
+

+ ๐Ÿ’ก Tip: You can filter by host group + names or UUIDs. Multiple groups can be specified as a + comma-separated list. +

+
+
+
+ )} + {/* Docker Tab */} {activeTab === "docker" && (
@@ -885,7 +1206,9 @@ const Integrations = () => {

{activeTab === "gethomepage" ? "Create GetHomepage API Key" - : "Create Auto-Enrollment Token"} + : activeTab === "api" + ? "Create API Credential" + : "Create Auto-Enrollment Token"}

+ + +
+

+ Filter by host group: +

+
+ + +
+
+ +

+ ๐Ÿ’ก Replace "Production" with your host group name or UUID +

+ + )} + {activeTab === "proxmox" && (
@@ -1371,6 +1845,154 @@ const Integrations = () => {
)} + + {/* Edit API Credential Modal */} + {show_edit_modal && edit_token && ( +
+
+
+
+

+ Edit API Credential +

+ +
+ +
+
+ + Token Name + + +

+ Token name cannot be changed +

+
+ + {edit_token?.metadata?.integration_type === "api" && ( +
+ + Scopes + +
+
+

+ Host Permissions +

+
+ {["get", "put", "patch", "update", "delete"].map( + (action) => ( + + ), + )} +
+
+
+

+ Update the permissions for this API credential +

+
+ )} + + + + + +
+ + +
+
+
+
+
+ )} ); }; From d1069a8bd08b43d7d40ddcd493ea2693329c13b3 Mon Sep 17 00:00:00 2001 From: Muhammad Ibrahim Date: Mon, 10 Nov 2025 20:34:03 +0000 Subject: [PATCH 3/3] api endpoint and scopes created --- backend/prisma/schema.prisma | 1 + backend/src/middleware/apiAuth.js | 113 ++++++++++++++++ backend/src/middleware/apiScope.js | 76 +++++++++++ backend/src/routes/apiHostsRoutes.js | 143 +++++++++++++++++++++ backend/src/routes/autoEnrollmentRoutes.js | 83 ++++++++++++ backend/src/routes/gethomepageRoutes.js | 107 +-------------- backend/src/server.js | 2 + 7 files changed, 421 insertions(+), 104 deletions(-) create mode 100644 backend/src/middleware/apiAuth.js create mode 100644 backend/src/middleware/apiScope.js create mode 100644 backend/src/routes/apiHostsRoutes.js diff --git a/backend/prisma/schema.prisma b/backend/prisma/schema.prisma index e0d48c1..51d442f 100644 --- a/backend/prisma/schema.prisma +++ b/backend/prisma/schema.prisma @@ -288,6 +288,7 @@ model auto_enrollment_tokens { last_used_at DateTime? expires_at DateTime? metadata Json? + scopes Json? users users? @relation(fields: [created_by_user_id], references: [id], onDelete: SetNull) host_groups host_groups? @relation(fields: [default_host_group_id], references: [id], onDelete: SetNull) diff --git a/backend/src/middleware/apiAuth.js b/backend/src/middleware/apiAuth.js new file mode 100644 index 0000000..8c9bbe2 --- /dev/null +++ b/backend/src/middleware/apiAuth.js @@ -0,0 +1,113 @@ +const { getPrismaClient } = require("../config/prisma"); +const bcrypt = require("bcryptjs"); + +const prisma = getPrismaClient(); + +/** + * Middleware factory to authenticate API tokens using Basic Auth + * @param {string} integrationType - The expected integration type (e.g., "api", "gethomepage") + * @returns {Function} Express middleware function + */ +const authenticateApiToken = (integrationType) => { + return async (req, res, next) => { + try { + const authHeader = req.headers.authorization; + + if (!authHeader || !authHeader.startsWith("Basic ")) { + return res + .status(401) + .json({ error: "Missing or invalid authorization header" }); + } + + // Decode base64 credentials + const base64Credentials = authHeader.split(" ")[1]; + const credentials = Buffer.from(base64Credentials, "base64").toString( + "ascii", + ); + const [apiKey, apiSecret] = credentials.split(":"); + + if (!apiKey || !apiSecret) { + return res.status(401).json({ error: "Invalid credentials format" }); + } + + // Find the token in database + const token = await prisma.auto_enrollment_tokens.findUnique({ + where: { token_key: apiKey }, + include: { + users: { + select: { + id: true, + username: true, + role: true, + }, + }, + }, + }); + + if (!token) { + console.log(`API key not found: ${apiKey}`); + return res.status(401).json({ error: "Invalid API key" }); + } + + // Check if token is active + if (!token.is_active) { + return res.status(401).json({ error: "API key is disabled" }); + } + + // Check if token has expired + if (token.expires_at && new Date(token.expires_at) < new Date()) { + return res.status(401).json({ error: "API key has expired" }); + } + + // Check if token is for the expected integration type + if (token.metadata?.integration_type !== integrationType) { + return res.status(401).json({ error: "Invalid API key type" }); + } + + // Verify the secret + const isValidSecret = await bcrypt.compare(apiSecret, token.token_secret); + if (!isValidSecret) { + return res.status(401).json({ error: "Invalid API secret" }); + } + + // Check IP restrictions if any + if (token.allowed_ip_ranges && token.allowed_ip_ranges.length > 0) { + const clientIp = req.ip || req.connection.remoteAddress; + const forwardedFor = req.headers["x-forwarded-for"]; + const realIp = req.headers["x-real-ip"]; + + // Get the actual client IP (considering proxies) + const actualClientIp = forwardedFor + ? forwardedFor.split(",")[0].trim() + : realIp || clientIp; + + const isAllowedIp = token.allowed_ip_ranges.some((range) => { + // Simple IP range check (can be enhanced for CIDR support) + return actualClientIp.startsWith(range) || actualClientIp === range; + }); + + if (!isAllowedIp) { + console.log( + `IP validation failed. Client IP: ${actualClientIp}, Allowed ranges: ${token.allowed_ip_ranges.join(", ")}`, + ); + return res.status(403).json({ error: "IP address not allowed" }); + } + } + + // Update last used timestamp + await prisma.auto_enrollment_tokens.update({ + where: { id: token.id }, + data: { last_used_at: new Date() }, + }); + + // Attach token info to request + req.apiToken = token; + next(); + } catch (error) { + console.error("API key authentication error:", error); + res.status(500).json({ error: "Authentication failed" }); + } + }; +}; + +module.exports = { authenticateApiToken }; diff --git a/backend/src/middleware/apiScope.js b/backend/src/middleware/apiScope.js new file mode 100644 index 0000000..69950e7 --- /dev/null +++ b/backend/src/middleware/apiScope.js @@ -0,0 +1,76 @@ +/** + * Middleware factory to validate API token scopes + * Only applies to tokens with metadata.integration_type === "api" + * @param {string} resource - The resource being accessed (e.g., "host") + * @param {string} action - The action being performed (e.g., "get", "put", "patch", "update", "delete") + * @returns {Function} Express middleware function + */ +const requireApiScope = (resource, action) => { + return async (req, res, next) => { + try { + const token = req.apiToken; + + // If no token attached, this should have been caught by auth middleware + if (!token) { + return res.status(401).json({ error: "Unauthorized" }); + } + + // Only validate scopes for API type tokens + if (token.metadata?.integration_type !== "api") { + // For non-API tokens, skip scope validation + return next(); + } + + // Check if token has scopes field + if (!token.scopes || typeof token.scopes !== "object") { + console.warn( + `API token ${token.token_key} missing scopes field for ${resource}:${action}`, + ); + return res.status(403).json({ + error: "Access denied", + message: "This API key does not have the required permissions", + }); + } + + // Check if resource exists in scopes + if (!token.scopes[resource]) { + console.warn( + `API token ${token.token_key} missing resource ${resource} for ${action}`, + ); + return res.status(403).json({ + error: "Access denied", + message: `This API key does not have access to ${resource}`, + }); + } + + // Check if action exists in resource scopes + if (!Array.isArray(token.scopes[resource])) { + console.warn( + `API token ${token.token_key} has invalid scopes structure for ${resource}`, + ); + return res.status(403).json({ + error: "Access denied", + message: "Invalid API key permissions configuration", + }); + } + + if (!token.scopes[resource].includes(action)) { + console.warn( + `API token ${token.token_key} missing action ${action} for resource ${resource}`, + ); + return res.status(403).json({ + error: "Access denied", + message: `This API key does not have permission to ${action} ${resource}`, + }); + } + + // Scope validation passed + next(); + } catch (error) { + console.error("Scope validation error:", error); + res.status(500).json({ error: "Scope validation failed" }); + } + }; +}; + +module.exports = { requireApiScope }; diff --git a/backend/src/routes/apiHostsRoutes.js b/backend/src/routes/apiHostsRoutes.js new file mode 100644 index 0000000..f4d9f02 --- /dev/null +++ b/backend/src/routes/apiHostsRoutes.js @@ -0,0 +1,143 @@ +const express = require("express"); +const { getPrismaClient } = require("../config/prisma"); +const { authenticateApiToken } = require("../middleware/apiAuth"); +const { requireApiScope } = require("../middleware/apiScope"); + +const router = express.Router(); +const prisma = getPrismaClient(); + +// Helper function to check if a string is a valid UUID +const isUUID = (str) => { + const uuidRegex = + /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i; + return uuidRegex.test(str); +}; + +// GET /api/v1/api/hosts - List hosts with IP and groups +router.get( + "/hosts", + authenticateApiToken("api"), + requireApiScope("host", "get"), + async (req, res) => { + try { + const { hostgroup } = req.query; + + let whereClause = {}; + let filterValues = []; + + // Parse hostgroup filter (comma-separated names or UUIDs) + if (hostgroup) { + filterValues = hostgroup.split(",").map((g) => g.trim()); + + // Separate UUIDs from names + const uuidFilters = []; + const nameFilters = []; + + for (const value of filterValues) { + if (isUUID(value)) { + uuidFilters.push(value); + } else { + nameFilters.push(value); + } + } + + // Find host group IDs from names + const groupIds = [...uuidFilters]; + + if (nameFilters.length > 0) { + const groups = await prisma.host_groups.findMany({ + where: { + name: { + in: nameFilters, + }, + }, + select: { + id: true, + name: true, + }, + }); + + // Add found group IDs + groupIds.push(...groups.map((g) => g.id)); + + // Check if any name filters didn't match + const foundNames = groups.map((g) => g.name); + const notFoundNames = nameFilters.filter( + (name) => !foundNames.includes(name), + ); + + if (notFoundNames.length > 0) { + console.warn(`Host groups not found: ${notFoundNames.join(", ")}`); + } + } + + // Filter hosts by group memberships + if (groupIds.length > 0) { + whereClause = { + host_group_memberships: { + some: { + host_group_id: { + in: groupIds, + }, + }, + }, + }; + } else { + // No valid groups found, return empty result + return res.json({ + hosts: [], + total: 0, + filtered_by_groups: filterValues, + }); + } + } + + // Query hosts with groups + const hosts = await prisma.hosts.findMany({ + where: whereClause, + select: { + id: true, + friendly_name: true, + hostname: true, + ip: true, + host_group_memberships: { + include: { + host_groups: { + select: { + id: true, + name: true, + }, + }, + }, + }, + }, + orderBy: { + friendly_name: "asc", + }, + }); + + // Format response + const formattedHosts = hosts.map((host) => ({ + id: host.id, + friendly_name: host.friendly_name, + hostname: host.hostname, + ip: host.ip, + host_groups: host.host_group_memberships.map((membership) => ({ + id: membership.host_groups.id, + name: membership.host_groups.name, + })), + })); + + res.json({ + hosts: formattedHosts, + total: formattedHosts.length, + filtered_by_groups: filterValues.length > 0 ? filterValues : undefined, + }); + } catch (error) { + console.error("Error fetching hosts:", error); + res.status(500).json({ error: "Failed to fetch hosts" }); + } + }, +); + +module.exports = router; diff --git a/backend/src/routes/autoEnrollmentRoutes.js b/backend/src/routes/autoEnrollmentRoutes.js index d20564f..3df5fae 100644 --- a/backend/src/routes/autoEnrollmentRoutes.js +++ b/backend/src/routes/autoEnrollmentRoutes.js @@ -125,6 +125,10 @@ router.post( .optional({ nullable: true, checkFalsy: true }) .isISO8601() .withMessage("Invalid date format"), + body("scopes") + .optional() + .isObject() + .withMessage("Scopes must be an object"), ], async (req, res) => { try { @@ -140,6 +144,7 @@ router.post( default_host_group_id, expires_at, metadata = {}, + scopes, } = req.body; // Validate host group if provided @@ -153,6 +158,32 @@ router.post( } } + // Validate scopes for API tokens + if (metadata.integration_type === "api" && scopes) { + // Validate scopes structure + if (typeof scopes !== "object" || scopes === null) { + return res.status(400).json({ error: "Scopes must be an object" }); + } + + // Validate each resource in scopes + for (const [resource, actions] of Object.entries(scopes)) { + if (!Array.isArray(actions)) { + return res.status(400).json({ + error: `Scopes for resource "${resource}" must be an array of actions`, + }); + } + + // Validate action names + for (const action of actions) { + if (typeof action !== "string") { + return res.status(400).json({ + error: `All actions in scopes must be strings`, + }); + } + } + } + } + const { token_key, token_secret } = generate_auto_enrollment_token(); const hashed_secret = await bcrypt.hash(token_secret, 10); @@ -168,6 +199,7 @@ router.post( default_host_group_id: default_host_group_id || null, expires_at: expires_at ? new Date(expires_at) : null, metadata: { integration_type: "proxmox-lxc", ...metadata }, + scopes: metadata.integration_type === "api" ? scopes || null : null, updated_at: new Date(), }, include: { @@ -201,6 +233,7 @@ router.post( default_host_group: token.host_groups, created_by: token.users, expires_at: token.expires_at, + scopes: token.scopes, }, warning: "โš ๏ธ Save the token_secret now - it cannot be retrieved later!", }); @@ -232,6 +265,7 @@ router.get( created_at: true, default_host_group_id: true, metadata: true, + scopes: true, host_groups: { select: { id: true, @@ -314,6 +348,10 @@ router.patch( body("max_hosts_per_day").optional().isInt({ min: 1, max: 1000 }), body("allowed_ip_ranges").optional().isArray(), body("expires_at").optional().isISO8601(), + body("scopes") + .optional() + .isObject() + .withMessage("Scopes must be an object"), ], async (req, res) => { try { @@ -323,6 +361,16 @@ router.patch( } const { tokenId } = req.params; + + // First, get the existing token to check its integration type + const existing_token = await prisma.auto_enrollment_tokens.findUnique({ + where: { id: tokenId }, + }); + + if (!existing_token) { + return res.status(404).json({ error: "Token not found" }); + } + const update_data = { updated_at: new Date() }; if (req.body.is_active !== undefined) @@ -334,6 +382,41 @@ router.patch( if (req.body.expires_at !== undefined) update_data.expires_at = new Date(req.body.expires_at); + // Handle scopes updates for API tokens only + if (req.body.scopes !== undefined) { + if (existing_token.metadata?.integration_type === "api") { + // Validate scopes structure + const scopes = req.body.scopes; + if (typeof scopes !== "object" || scopes === null) { + return res.status(400).json({ error: "Scopes must be an object" }); + } + + // Validate each resource in scopes + for (const [resource, actions] of Object.entries(scopes)) { + if (!Array.isArray(actions)) { + return res.status(400).json({ + error: `Scopes for resource "${resource}" must be an array of actions`, + }); + } + + // Validate action names + for (const action of actions) { + if (typeof action !== "string") { + return res.status(400).json({ + error: `All actions in scopes must be strings`, + }); + } + } + } + + update_data.scopes = scopes; + } else { + return res.status(400).json({ + error: "Scopes can only be updated for API integration tokens", + }); + } + } + const token = await prisma.auto_enrollment_tokens.update({ where: { id: tokenId }, data: update_data, diff --git a/backend/src/routes/gethomepageRoutes.js b/backend/src/routes/gethomepageRoutes.js index c015d9e..d5305dd 100644 --- a/backend/src/routes/gethomepageRoutes.js +++ b/backend/src/routes/gethomepageRoutes.js @@ -1,113 +1,12 @@ const express = require("express"); const { getPrismaClient } = require("../config/prisma"); -const bcrypt = require("bcryptjs"); +const { authenticateApiToken } = require("../middleware/apiAuth"); const router = express.Router(); const prisma = getPrismaClient(); -// Middleware to authenticate API key -const authenticateApiKey = async (req, res, next) => { - try { - const authHeader = req.headers.authorization; - - if (!authHeader || !authHeader.startsWith("Basic ")) { - return res - .status(401) - .json({ error: "Missing or invalid authorization header" }); - } - - // Decode base64 credentials - const base64Credentials = authHeader.split(" ")[1]; - const credentials = Buffer.from(base64Credentials, "base64").toString( - "ascii", - ); - const [apiKey, apiSecret] = credentials.split(":"); - - if (!apiKey || !apiSecret) { - return res.status(401).json({ error: "Invalid credentials format" }); - } - - // Find the token in database - const token = await prisma.auto_enrollment_tokens.findUnique({ - where: { token_key: apiKey }, - include: { - users: { - select: { - id: true, - username: true, - role: true, - }, - }, - }, - }); - - if (!token) { - console.log(`API key not found: ${apiKey}`); - return res.status(401).json({ error: "Invalid API key" }); - } - - // Check if token is active - if (!token.is_active) { - return res.status(401).json({ error: "API key is disabled" }); - } - - // Check if token has expired - if (token.expires_at && new Date(token.expires_at) < new Date()) { - return res.status(401).json({ error: "API key has expired" }); - } - - // Check if token is for gethomepage integration - if (token.metadata?.integration_type !== "gethomepage") { - return res.status(401).json({ error: "Invalid API key type" }); - } - - // Verify the secret - const isValidSecret = await bcrypt.compare(apiSecret, token.token_secret); - if (!isValidSecret) { - return res.status(401).json({ error: "Invalid API secret" }); - } - - // Check IP restrictions if any - if (token.allowed_ip_ranges && token.allowed_ip_ranges.length > 0) { - const clientIp = req.ip || req.connection.remoteAddress; - const forwardedFor = req.headers["x-forwarded-for"]; - const realIp = req.headers["x-real-ip"]; - - // Get the actual client IP (considering proxies) - const actualClientIp = forwardedFor - ? forwardedFor.split(",")[0].trim() - : realIp || clientIp; - - const isAllowedIp = token.allowed_ip_ranges.some((range) => { - // Simple IP range check (can be enhanced for CIDR support) - return actualClientIp.startsWith(range) || actualClientIp === range; - }); - - if (!isAllowedIp) { - console.log( - `IP validation failed. Client IP: ${actualClientIp}, Allowed ranges: ${token.allowed_ip_ranges.join(", ")}`, - ); - return res.status(403).json({ error: "IP address not allowed" }); - } - } - - // Update last used timestamp - await prisma.auto_enrollment_tokens.update({ - where: { id: token.id }, - data: { last_used_at: new Date() }, - }); - - // Attach token info to request - req.apiToken = token; - next(); - } catch (error) { - console.error("API key authentication error:", error); - res.status(500).json({ error: "Authentication failed" }); - } -}; - // Get homepage widget statistics -router.get("/stats", authenticateApiKey, async (_req, res) => { +router.get("/stats", authenticateApiToken("gethomepage"), async (_req, res) => { try { // Get total hosts count const totalHosts = await prisma.hosts.count({ @@ -235,7 +134,7 @@ router.get("/stats", authenticateApiKey, async (_req, res) => { }); // Health check endpoint for the API -router.get("/health", authenticateApiKey, async (req, res) => { +router.get("/health", authenticateApiToken("gethomepage"), async (req, res) => { res.json({ status: "ok", timestamp: new Date().toISOString(), diff --git a/backend/src/server.js b/backend/src/server.js index f8feb8c..e92cfd9 100644 --- a/backend/src/server.js +++ b/backend/src/server.js @@ -71,6 +71,7 @@ const wsRoutes = require("./routes/wsRoutes"); const agentVersionRoutes = require("./routes/agentVersionRoutes"); const metricsRoutes = require("./routes/metricsRoutes"); const userPreferencesRoutes = require("./routes/userPreferencesRoutes"); +const apiHostsRoutes = require("./routes/apiHostsRoutes"); const { initSettings } = require("./services/settingsService"); const { queueManager } = require("./services/automation"); const { authenticateToken, requireAdmin } = require("./middleware/auth"); @@ -480,6 +481,7 @@ app.use(`/api/${apiVersion}/ws`, wsRoutes); app.use(`/api/${apiVersion}/agent`, agentVersionRoutes); app.use(`/api/${apiVersion}/metrics`, metricsRoutes); app.use(`/api/${apiVersion}/user/preferences`, userPreferencesRoutes); +app.use(`/api/${apiVersion}/api`, authLimiter, apiHostsRoutes); // Bull Board - will be populated after queue manager initializes let bullBoardRouter = null;