diff --git a/agents/patchmon-agent.sh b/agents/patchmon-agent.sh index ca04313..f78040e 100755 --- a/agents/patchmon-agent.sh +++ b/agents/patchmon-agent.sh @@ -1,12 +1,12 @@ #!/bin/bash -# PatchMon Agent Script +# PatchMon Agent Script v1.2.5 # This script sends package update information to the PatchMon server using API credentials # Configuration PATCHMON_SERVER="${PATCHMON_SERVER:-http://localhost:3001}" API_VERSION="v1" -AGENT_VERSION="1.2.4" +AGENT_VERSION="1.2.5" CONFIG_FILE="/etc/patchmon/agent.conf" CREDENTIALS_FILE="/etc/patchmon/credentials" LOG_FILE="/var/log/patchmon-agent.log" @@ -656,6 +656,114 @@ get_yum_packages() { done <<< "$installed" } +# Get hardware information +get_hardware_info() { + local cpu_model="" + local cpu_cores=0 + local ram_installed=0 + local swap_size=0 + local disk_details="[]" + + # CPU Information + if command -v lscpu >/dev/null 2>&1; then + cpu_model=$(lscpu | grep "Model name" | cut -d':' -f2 | xargs) + cpu_cores=$(lscpu | grep "^CPU(s):" | cut -d':' -f2 | xargs) + elif [[ -f /proc/cpuinfo ]]; then + cpu_model=$(grep "model name" /proc/cpuinfo | head -1 | cut -d':' -f2 | xargs) + cpu_cores=$(grep -c "^processor" /proc/cpuinfo) + fi + + # Memory Information + if command -v free >/dev/null 2>&1; then + ram_installed=$(free -g | grep "^Mem:" | awk '{print $2}') + swap_size=$(free -g | grep "^Swap:" | awk '{print $2}') + elif [[ -f /proc/meminfo ]]; then + ram_installed=$(grep "MemTotal" /proc/meminfo | awk '{print int($2/1024/1024)}') + swap_size=$(grep "SwapTotal" /proc/meminfo | awk '{print int($2/1024/1024)}') + fi + + # Disk Information + if command -v lsblk >/dev/null 2>&1; then + disk_details=$(lsblk -J -o NAME,SIZE,TYPE,MOUNTPOINT | jq -c '[.blockdevices[] | select(.type == "disk") | {name: .name, size: .size, mountpoint: .mountpoint}]') + elif command -v df >/dev/null 2>&1; then + disk_details=$(df -h | grep -E "^/dev/" | awk '{print "{\"name\":\""$1"\",\"size\":\""$2"\",\"mountpoint\":\""$6"\"}"}' | jq -s .) + fi + + echo "{\"cpuModel\":\"$cpu_model\",\"cpuCores\":$cpu_cores,\"ramInstalled\":$ram_installed,\"swapSize\":$swap_size,\"diskDetails\":$disk_details}" +} + +# Get network information +get_network_info() { + local gateway_ip="" + local dns_servers="[]" + local network_interfaces="[]" + + # Gateway IP + if command -v ip >/dev/null 2>&1; then + gateway_ip=$(ip route | grep default | head -1 | awk '{print $3}') + elif command -v route >/dev/null 2>&1; then + gateway_ip=$(route -n | grep '^0.0.0.0' | head -1 | awk '{print $2}') + fi + + # DNS Servers + if [[ -f /etc/resolv.conf ]]; then + dns_servers=$(grep "nameserver" /etc/resolv.conf | awk '{print $2}' | jq -R . | jq -s .) + fi + + # Network Interfaces + if command -v ip >/dev/null 2>&1; then + network_interfaces=$(ip -j addr show | jq -c '[.[] | {name: .ifname, type: .link_type, addresses: [.addr_info[]? | {address: .local, family: .family}]}]') + elif command -v ifconfig >/dev/null 2>&1; then + network_interfaces=$(ifconfig -a | grep -E "^[a-zA-Z]" | awk '{print $1}' | jq -R . | jq -s .) + fi + + echo "{\"gatewayIp\":\"$gateway_ip\",\"dnsServers\":$dns_servers,\"networkInterfaces\":$network_interfaces}" +} + +# Get system information +get_system_info() { + local kernel_version="" + local selinux_status="" + local system_uptime="" + local load_average="[]" + + # Kernel Version + if [[ -f /proc/version ]]; then + kernel_version=$(cat /proc/version | awk '{print $3}') + elif command -v uname >/dev/null 2>&1; then + kernel_version=$(uname -r) + fi + + # SELinux Status + if command -v getenforce >/dev/null 2>&1; then + selinux_status=$(getenforce 2>/dev/null | tr '[:upper:]' '[:lower:]') + elif [[ -f /etc/selinux/config ]]; then + selinux_status=$(grep "^SELINUX=" /etc/selinux/config | cut -d'=' -f2 | tr '[:upper:]' '[:lower:]') + else + selinux_status="disabled" + fi + + # System Uptime + if [[ -f /proc/uptime ]]; then + local uptime_seconds=$(cat /proc/uptime | awk '{print int($1)}') + local days=$((uptime_seconds / 86400)) + local hours=$(((uptime_seconds % 86400) / 3600)) + local minutes=$(((uptime_seconds % 3600) / 60)) + system_uptime="${days}d ${hours}h ${minutes}m" + elif command -v uptime >/dev/null 2>&1; then + system_uptime=$(uptime | awk -F'up ' '{print $2}' | awk -F', load' '{print $1}') + fi + + # Load Average + if [[ -f /proc/loadavg ]]; then + load_average=$(cat /proc/loadavg | awk '{print "["$1","$2","$3"]"}') + elif command -v uptime >/dev/null 2>&1; then + load_average=$(uptime | awk -F'load average: ' '{print "["$2"]"}' | tr -d ' ') + fi + + echo "{\"kernelVersion\":\"$kernel_version\",\"selinuxStatus\":\"$selinux_status\",\"systemUptime\":\"$system_uptime\",\"loadAverage\":$load_average}" +} + # Send package update to server send_update() { load_credentials @@ -666,14 +774,27 @@ send_update() { info "Collecting repository information..." local repositories_json=$(get_repository_info) + info "Collecting hardware information..." + local hardware_json=$(get_hardware_info) + + info "Collecting network information..." + local network_json=$(get_network_info) + + info "Collecting system information..." + local system_json=$(get_system_info) + info "Sending update to PatchMon server..." - local payload=$(cat <&2 + log "ERROR: $1" + exit 1 +} + +# Info logging +info() { + echo -e "${BLUE}INFO: $1${NC}" + log "INFO: $1" +} + +# Success logging +success() { + echo -e "${GREEN}SUCCESS: $1${NC}" + log "SUCCESS: $1" +} + +# Warning logging +warning() { + echo -e "${YELLOW}WARNING: $1${NC}" + log "WARNING: $1" +} + +# Check if running as root +check_root() { + if [[ $EUID -ne 0 ]]; then + error "This script must be run as root" + fi +} + +# Create necessary directories +setup_directories() { + mkdir -p /etc/patchmon + mkdir -p /var/log + touch "$LOG_FILE" + chmod 600 "$LOG_FILE" +} + +# Load configuration +load_config() { + if [[ -f "$CONFIG_FILE" ]]; then + source "$CONFIG_FILE" + fi +} + +# Load API credentials +load_credentials() { + if [[ ! -f "$CREDENTIALS_FILE" ]]; then + error "Credentials file not found at $CREDENTIALS_FILE. Please configure API credentials first." + fi + + source "$CREDENTIALS_FILE" + + if [[ -z "$API_ID" ]] || [[ -z "$API_KEY" ]]; then + error "API_ID and API_KEY must be configured in $CREDENTIALS_FILE" + fi + + # Use PATCHMON_URL from credentials if available, otherwise use default + if [[ -n "$PATCHMON_URL" ]]; then + PATCHMON_SERVER="$PATCHMON_URL" + fi +} + +# Configure API credentials +configure_credentials() { + info "Setting up API credentials..." + + if [[ -z "$1" ]] || [[ -z "$2" ]]; then + echo "Usage: $0 configure [SERVER_URL]" + echo "" + echo "Example:" + echo " $0 configure patchmon_1a2b3c4d abcd1234567890abcdef1234567890abcdef1234567890abcdef1234567890" + echo " $0 configure patchmon_1a2b3c4d abcd1234567890abcdef1234567890abcdef1234567890abcdef1234567890 http://patchmon.example.com" + echo "" + echo "Contact your PatchMon administrator to get your API credentials." + exit 1 + fi + + local api_id="$1" + local api_key="$2" + local server_url="${3:-$PATCHMON_SERVER}" + + # Validate API ID format + if [[ ! "$api_id" =~ ^patchmon_[a-f0-9]{16}$ ]]; then + error "Invalid API ID format. API ID should be in format: patchmon_xxxxxxxxxxxxxxxx" + fi + + # Validate API Key format (64 hex characters) + if [[ ! "$api_key" =~ ^[a-f0-9]{64}$ ]]; then + error "Invalid API Key format. API Key should be 64 hexadecimal characters." + fi + + # Validate server URL format + if [[ ! "$server_url" =~ ^https?:// ]]; then + error "Invalid server URL format. Must start with http:// or https://" + fi + + # Create credentials file + cat > "$CREDENTIALS_FILE" << EOF +# PatchMon API Credentials +# Generated on $(date) +PATCHMON_URL="$server_url" +API_ID="$api_id" +API_KEY="$api_key" +EOF + + chmod 600 "$CREDENTIALS_FILE" + success "API credentials configured successfully" + info "Credentials saved to: $CREDENTIALS_FILE" + + # Test credentials + info "Testing API credentials..." + test_credentials +} + +# Test API credentials +test_credentials() { + load_credentials + + local response=$(curl -s -X POST \ + -H "Content-Type: application/json" \ + -H "X-API-ID: $API_ID" \ + -H "X-API-KEY: $API_KEY" \ + "$PATCHMON_SERVER/api/$API_VERSION/hosts/ping") + + if [[ $? -eq 0 ]] && echo "$response" | grep -q "success"; then + success "API credentials are valid" + local hostname=$(echo "$response" | grep -o '"hostname":"[^"]*' | cut -d'"' -f4) + if [[ -n "$hostname" ]]; then + info "Connected as host: $hostname" + fi + else + error "API credentials test failed: $response" + fi +} + +# Detect OS and version +detect_os() { + if [[ -f /etc/os-release ]]; then + source /etc/os-release + OS_TYPE=$(echo "$ID" | tr '[:upper:]' '[:lower:]') + OS_VERSION="$VERSION_ID" + + # Map OS variations to their appropriate categories + case "$OS_TYPE" in + "pop"|"linuxmint"|"elementary") + OS_TYPE="ubuntu" + ;; + "opensuse"|"opensuse-leap"|"opensuse-tumbleweed") + OS_TYPE="suse" + ;; + "rocky"|"almalinux") + OS_TYPE="rhel" + ;; + esac + + elif [[ -f /etc/redhat-release ]]; then + if grep -q "CentOS" /etc/redhat-release; then + OS_TYPE="centos" + elif grep -q "Red Hat" /etc/redhat-release; then + OS_TYPE="rhel" + fi + OS_VERSION=$(grep -oE '[0-9]+\.[0-9]+' /etc/redhat-release | head -1) + else + error "Unable to detect OS version" + fi + + ARCHITECTURE=$(uname -m) + HOSTNAME=$(hostname) + IP_ADDRESS=$(hostname -I | awk '{print $1}') +} + +# Get repository information based on OS +get_repository_info() { + local repos_json="[" + local first=true + + case "$OS_TYPE" in + "ubuntu"|"debian") + get_apt_repositories repos_json first + ;; + "centos"|"rhel"|"fedora") + get_yum_repositories repos_json first + ;; + *) + # Return empty array for unsupported OS + ;; + esac + + repos_json+="]" + echo "$repos_json" +} + +# Get repository info for APT-based systems +get_apt_repositories() { + local -n repos_ref=$1 + local -n first_ref=$2 + + # Parse traditional .list files + local sources_files="/etc/apt/sources.list" + if [[ -d "/etc/apt/sources.list.d" ]]; then + sources_files="$sources_files $(find /etc/apt/sources.list.d -name '*.list' 2>/dev/null)" + fi + + for file in $sources_files; do + if [[ -f "$file" ]]; then + while IFS= read -r line; do + # Skip comments and empty lines + if [[ "$line" =~ ^[[:space:]]*# ]] || [[ -z "$line" ]]; then + continue + fi + + # Parse repository line (deb or deb-src) + if [[ "$line" =~ ^[[:space:]]*(deb|deb-src)[[:space:]]+ ]]; then + # Clean the line and extract components + local clean_line=$(echo "$line" | xargs) + local repo_type=$(echo "$clean_line" | awk '{print $1}') + + # Handle modern APT format with options like [signed-by=...] + local url="" + local distribution="" + local components="" + + if [[ "$clean_line" =~ \[.*\] ]]; then + # Modern format: deb [options] URL distribution components + # Extract URL (first field after the options) + url=$(echo "$clean_line" | sed 's/deb[^[:space:]]* \[[^]]*\] //' | awk '{print $1}') + distribution=$(echo "$clean_line" | sed 's/deb[^[:space:]]* \[[^]]*\] //' | awk '{print $2}') + components=$(echo "$clean_line" | sed 's/deb[^[:space:]]* \[[^]]*\] [^[:space:]]* [^[:space:]]* //') + else + # Traditional format: deb URL distribution components + url=$(echo "$clean_line" | awk '{print $2}') + distribution=$(echo "$clean_line" | awk '{print $3}') + components=$(echo "$clean_line" | cut -d' ' -f4- | xargs) + fi + + # Skip if URL doesn't look like a valid URL + if [[ ! "$url" =~ ^https?:// ]] && [[ ! "$url" =~ ^ftp:// ]]; then + continue + fi + + # Skip if distribution is empty or looks malformed + if [[ -z "$distribution" ]] || [[ "$distribution" =~ \[.*\] ]]; then + continue + fi + + # Determine if repository uses HTTPS + local is_secure=false + if [[ "$url" =~ ^https:// ]]; then + is_secure=true + fi + + # Generate repository name from URL and distribution + local repo_name="$distribution" + + # Extract meaningful name from URL for better identification + if [[ "$url" =~ archive\.ubuntu\.com ]]; then + repo_name="ubuntu-$distribution" + elif [[ "$url" =~ security\.ubuntu\.com ]]; then + repo_name="ubuntu-$distribution-security" + elif [[ "$url" =~ deb\.nodesource\.com ]]; then + repo_name="nodesource-$distribution" + elif [[ "$url" =~ packagecloud\.io ]]; then + repo_name="packagecloud-$(echo "$url" | cut -d'/' -f4-5 | tr '/' '-')" + elif [[ "$url" =~ ppa\.launchpad ]]; then + repo_name="ppa-$(echo "$url" | cut -d'/' -f4-5 | tr '/' '-')" + elif [[ "$url" =~ packages\.microsoft\.com ]]; then + repo_name="microsoft-$(echo "$url" | cut -d'/' -f4-)" + elif [[ "$url" =~ download\.docker\.com ]]; then + repo_name="docker-$distribution" + else + # Fallback: use domain name + distribution + local domain=$(echo "$url" | cut -d'/' -f3 | cut -d':' -f1) + repo_name="$domain-$distribution" + fi + + # Add component suffix if relevant + if [[ "$components" =~ updates ]]; then + repo_name="$repo_name-updates" + elif [[ "$components" =~ security ]]; then + repo_name="$repo_name-security" + elif [[ "$components" =~ backports ]]; then + repo_name="$repo_name-backports" + fi + + if [[ "$first_ref" == true ]]; then + first_ref=false + else + repos_ref+="," + fi + + repos_ref+="{\"name\":\"$repo_name\",\"url\":\"$url\",\"distribution\":\"$distribution\",\"components\":\"$components\",\"repoType\":\"$repo_type\",\"isEnabled\":true,\"isSecure\":$is_secure}" + fi + done < "$file" + fi + done + + # Parse modern DEB822 format (.sources files) + if [[ -d "/etc/apt/sources.list.d" ]]; then + local sources_files_deb822=$(find /etc/apt/sources.list.d -name '*.sources' 2>/dev/null) + for file in $sources_files_deb822; do + if [[ -f "$file" ]]; then + local deb822_result=$(parse_deb822_sources_simple "$file") + if [[ -n "$deb822_result" ]]; then + if [[ "$first_ref" == true ]]; then + first_ref=false + repos_ref+="$deb822_result" + else + repos_ref+=",$deb822_result" + fi + fi + fi + done + fi +} + +# Simple DEB822 parser that returns JSON string +parse_deb822_sources_simple() { + local file=$1 + local result="" + local enabled="" + local types="" + local uris="" + local suites="" + local components="" + local name="" + local first_entry=true + + while IFS= read -r line; do + # Skip empty lines and comments + if [[ -z "$line" ]] || [[ "$line" =~ ^[[:space:]]*# ]]; then + continue + fi + + # Parse key-value pairs + if [[ "$line" =~ ^([^:]+):[[:space:]]*(.*)$ ]]; then + local key="${BASH_REMATCH[1]}" + local value="${BASH_REMATCH[2]}" + + case "$key" in + "Enabled") + enabled="$value" + ;; + "Types") + types="$value" + ;; + "URIs") + uris="$value" + ;; + "Suites") + suites="$value" + ;; + "Components") + components="$value" + ;; + "X-Repolib-Name") + name="$value" + ;; + esac + fi + + # Process repository entry when we hit a blank line + if [[ -z "$line" ]] || [[ "$line" =~ ^[[:space:]]*$ ]]; then + if [[ -n "$uris" && -n "$suites" && "$enabled" == "yes" ]]; then + local entry_result=$(process_deb822_entry_simple "$name" "$types" "$uris" "$suites" "$components") + if [[ -n "$entry_result" ]]; then + if [[ "$first_entry" == true ]]; then + first_entry=false + result="$entry_result" + else + result="$result,$entry_result" + fi + fi + fi + # Reset variables for next entry + enabled="" + types="" + uris="" + suites="" + components="" + name="" + fi + done < "$file" + + # Process the last entry if file doesn't end with blank line + if [[ -n "$uris" && -n "$suites" && "$enabled" == "yes" ]]; then + local entry_result=$(process_deb822_entry_simple "$name" "$types" "$uris" "$suites" "$components") + if [[ -n "$entry_result" ]]; then + if [[ "$first_entry" == true ]]; then + result="$entry_result" + else + result="$result,$entry_result" + fi + fi + fi + + echo "$result" +} + +# Process a DEB822 repository entry and return JSON +process_deb822_entry_simple() { + local name=$1 + local types=$2 + local uris=$3 + local suites=$4 + local components=$5 + local result="" + local first_entry=true + + # Handle multiple URIs + for uri in $uris; do + # Skip if URI doesn't look like a valid URL + if [[ ! "$uri" =~ ^https?:// ]] && [[ ! "$uri" =~ ^ftp:// ]]; then + continue + fi + + # Handle multiple suites + for suite in $suites; do + # Skip if suite looks malformed + if [[ -z "$suite" ]]; then + continue + fi + + # Determine if repository uses HTTPS + local is_secure=false + if [[ "$uri" =~ ^https:// ]]; then + is_secure=true + fi + + # Generate repository name + local repo_name="" + if [[ -n "$name" ]]; then + repo_name=$(echo "$name" | tr ' ' '-' | tr '[:upper:]' '[:lower:]') + else + repo_name="$suite" + fi + + # Extract meaningful name from URI for better identification + if [[ "$uri" =~ apt\.pop-os\.org/ubuntu ]]; then + repo_name="pop-os-ubuntu-$suite" + elif [[ "$uri" =~ apt\.pop-os\.org/release ]]; then + repo_name="pop-os-release-$suite" + elif [[ "$uri" =~ apt\.pop-os\.org/proprietary ]]; then + repo_name="pop-os-apps-$suite" + elif [[ "$uri" =~ archive\.ubuntu\.com ]]; then + repo_name="ubuntu-$suite" + elif [[ "$uri" =~ security\.ubuntu\.com ]]; then + repo_name="ubuntu-$suite-security" + else + # Fallback: use domain name + suite + local domain=$(echo "$uri" | cut -d'/' -f3 | cut -d':' -f1) + repo_name="$domain-$suite" + fi + + # Add component suffix if relevant and not already included + if [[ "$suite" != *"security"* && "$components" =~ security ]]; then + repo_name="$repo_name-security" + elif [[ "$suite" != *"updates"* && "$components" =~ updates ]]; then + repo_name="$repo_name-updates" + elif [[ "$suite" != *"backports"* && "$components" =~ backports ]]; then + repo_name="$repo_name-backports" + fi + + # Determine repo type (prefer deb over deb-src) + local repo_type="deb" + if [[ "$types" =~ deb-src ]] && [[ ! "$types" =~ ^deb[[:space:]] ]]; then + repo_type="deb-src" + fi + + local json_entry="{\"name\":\"$repo_name\",\"url\":\"$uri\",\"distribution\":\"$suite\",\"components\":\"$components\",\"repoType\":\"$repo_type\",\"isEnabled\":true,\"isSecure\":$is_secure}" + + if [[ "$first_entry" == true ]]; then + first_entry=false + result="$json_entry" + else + result="$result,$json_entry" + fi + done + done + + echo "$result" +} + +# Get repository info for YUM-based systems +get_yum_repositories() { + local -n repos_ref=$1 + local -n first_ref=$2 + + # Parse yum/dnf repository configuration + if command -v dnf >/dev/null 2>&1; then + local repo_info=$(dnf repolist all --verbose 2>/dev/null | grep -E "^Repo-id|^Repo-baseurl|^Repo-name|^Repo-status") + elif command -v yum >/dev/null 2>&1; then + local repo_info=$(yum repolist all -v 2>/dev/null | grep -E "^Repo-id|^Repo-baseurl|^Repo-name|^Repo-status") + fi + + # This is a simplified implementation - would need more work for full YUM support + # For now, return empty for non-APT systems +} + +# Get package information based on OS +get_package_info() { + local packages_json="[" + local first=true + + case "$OS_TYPE" in + "ubuntu"|"debian") + get_apt_packages packages_json first + ;; + "centos"|"rhel"|"fedora") + get_yum_packages packages_json first + ;; + *) + error "Unsupported OS type: $OS_TYPE" + ;; + esac + + packages_json+="]" + echo "$packages_json" +} + +# Get package info for APT-based systems +get_apt_packages() { + local -n packages_ref=$1 + local -n first_ref=$2 + + # Update package lists + apt-get update -qq + + # Get upgradable packages + local upgradable=$(apt list --upgradable 2>/dev/null | grep -v "WARNING") + + while IFS= read -r line; do + if [[ "$line" =~ ^([^/]+)/([^[:space:]]+)[[:space:]]+([^[:space:]]+)[[:space:]]+.*[[:space:]]([^[:space:]]+)[[:space:]]*(\[.*\])? ]]; then + local package_name="${BASH_REMATCH[1]}" + local current_version="${BASH_REMATCH[4]}" + local available_version="${BASH_REMATCH[3]}" + local is_security_update=false + + # Check if it's a security update + if echo "$line" | grep -q "security"; then + is_security_update=true + fi + + if [[ "$first_ref" == true ]]; then + first_ref=false + else + packages_ref+="," + fi + + packages_ref+="{\"name\":\"$package_name\",\"currentVersion\":\"$current_version\",\"availableVersion\":\"$available_version\",\"needsUpdate\":true,\"isSecurityUpdate\":$is_security_update}" + fi + done <<< "$upgradable" + + # Get installed packages that are up to date + local installed=$(dpkg-query -W -f='${Package} ${Version}\n' | head -100) + + while IFS=' ' read -r package_name version; do + if [[ -n "$package_name" && -n "$version" ]]; then + # Check if this package is not in the upgrade list + if ! echo "$upgradable" | grep -q "^$package_name/"; then + if [[ "$first_ref" == true ]]; then + first_ref=false + else + packages_ref+="," + fi + + packages_ref+="{\"name\":\"$package_name\",\"currentVersion\":\"$version\",\"needsUpdate\":false,\"isSecurityUpdate\":false}" + fi + fi + done <<< "$installed" +} + +# Get package info for YUM/DNF-based systems +get_yum_packages() { + local -n packages_ref=$1 + local -n first_ref=$2 + + local package_manager="yum" + if command -v dnf &> /dev/null; then + package_manager="dnf" + fi + + # Get upgradable packages + local upgradable=$($package_manager check-update 2>/dev/null | grep -v "^$" | grep -v "^Loaded" | grep -v "^Last metadata" | tail -n +2) + + while IFS= read -r line; do + if [[ "$line" =~ ^([^[:space:]]+)[[:space:]]+([^[:space:]]+)[[:space:]]+([^[:space:]]+) ]]; then + local package_name="${BASH_REMATCH[1]}" + local available_version="${BASH_REMATCH[2]}" + local repo="${BASH_REMATCH[3]}" + + # Get current version + local current_version=$($package_manager list installed "$package_name" 2>/dev/null | grep "^$package_name" | awk '{print $2}') + + local is_security_update=false + if echo "$repo" | grep -q "security"; then + is_security_update=true + fi + + if [[ "$first_ref" == true ]]; then + first_ref=false + else + packages_ref+="," + fi + + packages_ref+="{\"name\":\"$package_name\",\"currentVersion\":\"$current_version\",\"availableVersion\":\"$available_version\",\"needsUpdate\":true,\"isSecurityUpdate\":$is_security_update}" + fi + done <<< "$upgradable" + + # Get some installed packages that are up to date + local installed=$($package_manager list installed 2>/dev/null | grep -v "^Loaded" | grep -v "^Installed" | head -100) + + while IFS= read -r line; do + if [[ "$line" =~ ^([^[:space:]]+)[[:space:]]+([^[:space:]]+) ]]; then + local package_name="${BASH_REMATCH[1]}" + local version="${BASH_REMATCH[2]}" + + # Check if this package is not in the upgrade list + if ! echo "$upgradable" | grep -q "^$package_name "; then + if [[ "$first_ref" == true ]]; then + first_ref=false + else + packages_ref+="," + fi + + packages_ref+="{\"name\":\"$package_name\",\"currentVersion\":\"$version\",\"needsUpdate\":false,\"isSecurityUpdate\":false}" + fi + fi + done <<< "$installed" +} + +# Get hardware information +get_hardware_info() { + local cpu_model="" + local cpu_cores=0 + local ram_installed=0 + local swap_size=0 + local disk_details="[]" + + # CPU Information + if command -v lscpu >/dev/null 2>&1; then + cpu_model=$(lscpu | grep "Model name" | cut -d':' -f2 | xargs) + cpu_cores=$(lscpu | grep "^CPU(s):" | cut -d':' -f2 | xargs) + elif [[ -f /proc/cpuinfo ]]; then + cpu_model=$(grep "model name" /proc/cpuinfo | head -1 | cut -d':' -f2 | xargs) + cpu_cores=$(grep -c "^processor" /proc/cpuinfo) + fi + + # Memory Information + if command -v free >/dev/null 2>&1; then + ram_installed=$(free -g | grep "^Mem:" | awk '{print $2}') + swap_size=$(free -g | grep "^Swap:" | awk '{print $2}') + elif [[ -f /proc/meminfo ]]; then + ram_installed=$(grep "MemTotal" /proc/meminfo | awk '{print int($2/1024/1024)}') + swap_size=$(grep "SwapTotal" /proc/meminfo | awk '{print int($2/1024/1024)}') + fi + + # Disk Information + if command -v lsblk >/dev/null 2>&1; then + disk_details=$(lsblk -J -o NAME,SIZE,TYPE,MOUNTPOINT | jq -c '[.blockdevices[] | select(.type == "disk") | {name: .name, size: .size, mountpoint: .mountpoint}]') + elif command -v df >/dev/null 2>&1; then + disk_details=$(df -h | grep -E "^/dev/" | awk '{print "{\"name\":\""$1"\",\"size\":\""$2"\",\"mountpoint\":\""$6"\"}"}' | jq -s .) + fi + + echo "{\"cpuModel\":\"$cpu_model\",\"cpuCores\":$cpu_cores,\"ramInstalled\":$ram_installed,\"swapSize\":$swap_size,\"diskDetails\":$disk_details}" +} + +# Get network information +get_network_info() { + local gateway_ip="" + local dns_servers="[]" + local network_interfaces="[]" + + # Gateway IP + if command -v ip >/dev/null 2>&1; then + gateway_ip=$(ip route | grep default | head -1 | awk '{print $3}') + elif command -v route >/dev/null 2>&1; then + gateway_ip=$(route -n | grep '^0.0.0.0' | head -1 | awk '{print $2}') + fi + + # DNS Servers + if [[ -f /etc/resolv.conf ]]; then + dns_servers=$(grep "nameserver" /etc/resolv.conf | awk '{print $2}' | jq -R . | jq -s .) + fi + + # Network Interfaces + if command -v ip >/dev/null 2>&1; then + network_interfaces=$(ip -j addr show | jq -c '[.[] | {name: .ifname, type: .link_type, addresses: [.addr_info[]? | {address: .local, family: .family}]}]') + elif command -v ifconfig >/dev/null 2>&1; then + network_interfaces=$(ifconfig -a | grep -E "^[a-zA-Z]" | awk '{print $1}' | jq -R . | jq -s .) + fi + + echo "{\"gatewayIp\":\"$gateway_ip\",\"dnsServers\":$dns_servers,\"networkInterfaces\":$network_interfaces}" +} + +# Get system information +get_system_info() { + local kernel_version="" + local selinux_status="" + local system_uptime="" + local load_average="[]" + + # Kernel Version + if [[ -f /proc/version ]]; then + kernel_version=$(cat /proc/version | awk '{print $3}') + elif command -v uname >/dev/null 2>&1; then + kernel_version=$(uname -r) + fi + + # SELinux Status + if command -v getenforce >/dev/null 2>&1; then + selinux_status=$(getenforce 2>/dev/null | tr '[:upper:]' '[:lower:]') + elif [[ -f /etc/selinux/config ]]; then + selinux_status=$(grep "^SELINUX=" /etc/selinux/config | cut -d'=' -f2 | tr '[:upper:]' '[:lower:]') + else + selinux_status="disabled" + fi + + # System Uptime + if [[ -f /proc/uptime ]]; then + local uptime_seconds=$(cat /proc/uptime | awk '{print int($1)}') + local days=$((uptime_seconds / 86400)) + local hours=$(((uptime_seconds % 86400) / 3600)) + local minutes=$(((uptime_seconds % 3600) / 60)) + system_uptime="${days}d ${hours}h ${minutes}m" + elif command -v uptime >/dev/null 2>&1; then + system_uptime=$(uptime | awk -F'up ' '{print $2}' | awk -F', load' '{print $1}') + fi + + # Load Average + if [[ -f /proc/loadavg ]]; then + load_average=$(cat /proc/loadavg | awk '{print "["$1","$2","$3"]"}') + elif command -v uptime >/dev/null 2>&1; then + load_average=$(uptime | awk -F'load average: ' '{print "["$2"]"}' | tr -d ' ') + fi + + echo "{\"kernelVersion\":\"$kernel_version\",\"selinuxStatus\":\"$selinux_status\",\"systemUptime\":\"$system_uptime\",\"loadAverage\":$load_average}" +} + +# Send package update to server +send_update() { + load_credentials + + info "Collecting package information..." + local packages_json=$(get_package_info) + + info "Collecting repository information..." + local repositories_json=$(get_repository_info) + + info "Collecting hardware information..." + local hardware_json=$(get_hardware_info) + + info "Collecting network information..." + local network_json=$(get_network_info) + + info "Collecting system information..." + local system_json=$(get_system_info) + + info "Sending update to PatchMon server..." + + # Merge all JSON objects into one + local merged_json=$(echo "$hardware_json $network_json $system_json" | jq -s '.[0] * .[1] * .[2]') + + local payload=$(cat </dev/null; then + # Replace current script + mv "/tmp/patchmon-agent-new.sh" "$0" + chmod +x "$0" + success "Agent updated successfully" + info "Backup saved as: $0.backup.$(date +%Y%m%d_%H%M%S)" + + # Get the new version number + local new_version=$(grep '^AGENT_VERSION=' "$0" | cut -d'"' -f2) + info "Updated to version: $new_version" + + # Automatically run update to send new information to PatchMon + info "Sending updated information to PatchMon..." + if "$0" update; then + success "Successfully sent updated information to PatchMon" + else + warning "Failed to send updated information to PatchMon (this is not critical)" + fi + else + error "Downloaded script is invalid" + rm -f "/tmp/patchmon-agent-new.sh" + fi + else + error "Failed to download new agent script" + fi + else + error "Failed to get update information" + fi +} + +# Update crontab with current policy +update_crontab() { + load_credentials + info "Updating crontab with current policy..." + local response=$(curl -s -X GET "$PATCHMON_SERVER/api/$API_VERSION/settings/update-interval") + if [[ $? -eq 0 ]]; then + local update_interval=$(echo "$response" | grep -o '"updateInterval":[0-9]*' | cut -d':' -f2) + if [[ -n "$update_interval" ]]; then + # Generate the expected crontab entry + local expected_crontab="" + if [[ $update_interval -eq 60 ]]; then + # Hourly updates + expected_crontab="0 * * * * /usr/local/bin/patchmon-agent.sh update >/dev/null 2>&1" + else + # Custom interval updates + expected_crontab="*/$update_interval * * * * /usr/local/bin/patchmon-agent.sh update >/dev/null 2>&1" + fi + + # Get current crontab + local current_crontab=$(crontab -l 2>/dev/null | grep "patchmon-agent.sh update" | head -1) + + # Check if crontab needs updating + if [[ "$current_crontab" == "$expected_crontab" ]]; then + info "Crontab is already up to date (interval: $update_interval minutes)" + return 0 + fi + + info "Setting update interval to $update_interval minutes" + echo "$expected_crontab" | crontab - + success "Crontab updated successfully" + else + error "Could not determine update interval from server" + fi + else + error "Failed to get update interval policy" + fi +} + +# Show detailed system diagnostics +show_diagnostics() { + info "PatchMon Agent Diagnostics v$AGENT_VERSION" + echo "" + + # System information + echo "=== System Information ===" + echo "OS: $(uname -s)" + echo "Architecture: $(uname -m)" + echo "Kernel: $(uname -r)" + echo "Hostname: $(hostname)" + echo "Uptime: $(uptime -p 2>/dev/null || uptime)" + echo "" + + # Agent information + echo "=== Agent Information ===" + echo "Version: $AGENT_VERSION" + echo "Script Path: $0" + echo "Config File: $CONFIG_FILE" + echo "Credentials File: $CREDENTIALS_FILE" + echo "Log File: $LOG_FILE" + echo "Script Size: $(stat -c%s "$0" 2>/dev/null || echo "Unknown") bytes" + echo "Last Modified: $(stat -c%y "$0" 2>/dev/null || echo "Unknown")" + echo "" + + # Configuration + if [[ -f "$CONFIG_FILE" ]]; then + echo "=== Configuration ===" + cat "$CONFIG_FILE" + echo "" + else + echo "=== Configuration ===" + echo "No configuration file found at $CONFIG_FILE" + echo "" + fi + + # Credentials status + echo "=== Credentials Status ===" + if [[ -f "$CREDENTIALS_FILE" ]]; then + echo "Credentials file exists: Yes" + echo "File size: $(stat -c%s "$CREDENTIALS_FILE" 2>/dev/null || echo "Unknown") bytes" + echo "File permissions: $(stat -c%a "$CREDENTIALS_FILE" 2>/dev/null || echo "Unknown")" + else + echo "Credentials file exists: No" + fi + echo "" + + # Crontab status + echo "=== Crontab Status ===" + local crontab_entries=$(crontab -l 2>/dev/null | grep patchmon-agent || echo "None") + if [[ "$crontab_entries" != "None" ]]; then + echo "Crontab entries:" + echo "$crontab_entries" + else + echo "No crontab entries found" + fi + echo "" + + # Network connectivity + echo "=== Network Connectivity ===" + if ping -c 1 -W 3 "$(echo "$PATCHMON_SERVER" | sed 's|http://||' | sed 's|https://||' | cut -d: -f1)" >/dev/null 2>&1; then + echo "Server reachable: Yes" + else + echo "Server reachable: No" + fi + echo "Server URL: $PATCHMON_SERVER" + echo "" + + # Recent logs + echo "=== Recent Logs (last 10 lines) ===" + if [[ -f "$LOG_FILE" ]]; then + tail -10 "$LOG_FILE" 2>/dev/null || echo "Could not read log file" + else + echo "Log file does not exist" + fi +} + +# Show current configuration +show_config() { + info "Current Configuration:" + echo " Server: ${PATCHMON_SERVER}" + echo " API Version: ${API_VERSION}" + echo " Agent Version: ${AGENT_VERSION}" + echo " Config File: ${CONFIG_FILE}" + echo " Credentials File: ${CREDENTIALS_FILE}" + echo " Log File: ${LOG_FILE}" + + if [[ -f "$CREDENTIALS_FILE" ]]; then + source "$CREDENTIALS_FILE" + echo " API ID: ${API_ID}" + echo " API Key: ${API_KEY:0:8}..." # Show only first 8 characters + else + echo " API Credentials: Not configured" + fi +} + +# Main function +main() { + case "$1" in + "configure") + check_root + setup_directories + load_config + configure_credentials "$2" "$3" + ;; + "test") + check_root + setup_directories + load_config + test_credentials + ;; + "update") + check_root + setup_directories + load_config + detect_os + send_update + ;; + "ping") + check_root + setup_directories + load_config + ping_server + ;; + "config") + load_config + show_config + ;; + "check-version") + check_root + setup_directories + load_config + check_version + ;; + "update-agent") + check_root + setup_directories + load_config + update_agent + ;; + "update-crontab") + check_root + setup_directories + load_config + update_crontab + ;; + "diagnostics") + show_diagnostics + ;; + *) + echo "PatchMon Agent v$AGENT_VERSION - API Credential Based" + echo "Usage: $0 {configure|test|update|ping|config|check-version|update-agent|update-crontab|diagnostics}" + echo "" + echo "Commands:" + echo " configure - Configure API credentials for this host" + echo " test - Test API credentials connectivity" + echo " update - Send package update information to server" + echo " ping - Test connectivity to server" + echo " config - Show current configuration" + echo " check-version - Check for agent updates" + echo " update-agent - Update agent to latest version" + echo " update-crontab - Update crontab with current policy" + echo " diagnostics - Show detailed system diagnostics" + echo "" + echo "Setup Process:" + echo " 1. Contact your PatchMon administrator to create a host entry" + echo " 2. Run: $0 configure (provided by admin)" + echo " 3. Run: $0 test (to verify connection)" + echo " 4. Run: $0 update (to send initial package data)" + echo "" + echo "Configuration:" + echo " Edit $CONFIG_FILE to customize server settings" + echo " PATCHMON_SERVER=http://your-server:3001" + exit 1 + ;; + esac +} + +# Run main function +main "$@" \ No newline at end of file diff --git a/agents/patchmon-agent.sh.backup.20250920_001319 b/agents/patchmon-agent.sh.backup.20250920_001319 new file mode 100755 index 0000000..f6ecdb3 --- /dev/null +++ b/agents/patchmon-agent.sh.backup.20250920_001319 @@ -0,0 +1,1219 @@ +#!/bin/bash + +# PatchMon Agent Script v1.2.5 +# This script sends package update information to the PatchMon server using API credentials + +# Configuration +PATCHMON_SERVER="${PATCHMON_SERVER:-http://localhost:3001}" +API_VERSION="v1" +AGENT_VERSION="1.2.5" +CONFIG_FILE="/etc/patchmon/agent.conf" +CREDENTIALS_FILE="/etc/patchmon/credentials" +LOG_FILE="/var/log/patchmon-agent.log" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Logging function +log() { + echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE" +} + +# Error handling +error() { + echo -e "${RED}ERROR: $1${NC}" >&2 + log "ERROR: $1" + exit 1 +} + +# Info logging +info() { + echo -e "${BLUE}INFO: $1${NC}" + log "INFO: $1" +} + +# Success logging +success() { + echo -e "${GREEN}SUCCESS: $1${NC}" + log "SUCCESS: $1" +} + +# Warning logging +warning() { + echo -e "${YELLOW}WARNING: $1${NC}" + log "WARNING: $1" +} + +# Check if running as root +check_root() { + if [[ $EUID -ne 0 ]]; then + error "This script must be run as root" + fi +} + +# Create necessary directories +setup_directories() { + mkdir -p /etc/patchmon + mkdir -p /var/log + touch "$LOG_FILE" + chmod 600 "$LOG_FILE" +} + +# Load configuration +load_config() { + if [[ -f "$CONFIG_FILE" ]]; then + source "$CONFIG_FILE" + fi +} + +# Load API credentials +load_credentials() { + if [[ ! -f "$CREDENTIALS_FILE" ]]; then + error "Credentials file not found at $CREDENTIALS_FILE. Please configure API credentials first." + fi + + source "$CREDENTIALS_FILE" + + if [[ -z "$API_ID" ]] || [[ -z "$API_KEY" ]]; then + error "API_ID and API_KEY must be configured in $CREDENTIALS_FILE" + fi + + # Use PATCHMON_URL from credentials if available, otherwise use default + if [[ -n "$PATCHMON_URL" ]]; then + PATCHMON_SERVER="$PATCHMON_URL" + fi +} + +# Configure API credentials +configure_credentials() { + info "Setting up API credentials..." + + if [[ -z "$1" ]] || [[ -z "$2" ]]; then + echo "Usage: $0 configure [SERVER_URL]" + echo "" + echo "Example:" + echo " $0 configure patchmon_1a2b3c4d abcd1234567890abcdef1234567890abcdef1234567890abcdef1234567890" + echo " $0 configure patchmon_1a2b3c4d abcd1234567890abcdef1234567890abcdef1234567890abcdef1234567890 http://patchmon.example.com" + echo "" + echo "Contact your PatchMon administrator to get your API credentials." + exit 1 + fi + + local api_id="$1" + local api_key="$2" + local server_url="${3:-$PATCHMON_SERVER}" + + # Validate API ID format + if [[ ! "$api_id" =~ ^patchmon_[a-f0-9]{16}$ ]]; then + error "Invalid API ID format. API ID should be in format: patchmon_xxxxxxxxxxxxxxxx" + fi + + # Validate API Key format (64 hex characters) + if [[ ! "$api_key" =~ ^[a-f0-9]{64}$ ]]; then + error "Invalid API Key format. API Key should be 64 hexadecimal characters." + fi + + # Validate server URL format + if [[ ! "$server_url" =~ ^https?:// ]]; then + error "Invalid server URL format. Must start with http:// or https://" + fi + + # Create credentials file + cat > "$CREDENTIALS_FILE" << EOF +# PatchMon API Credentials +# Generated on $(date) +PATCHMON_URL="$server_url" +API_ID="$api_id" +API_KEY="$api_key" +EOF + + chmod 600 "$CREDENTIALS_FILE" + success "API credentials configured successfully" + info "Credentials saved to: $CREDENTIALS_FILE" + + # Test credentials + info "Testing API credentials..." + test_credentials +} + +# Test API credentials +test_credentials() { + load_credentials + + local response=$(curl -s -X POST \ + -H "Content-Type: application/json" \ + -H "X-API-ID: $API_ID" \ + -H "X-API-KEY: $API_KEY" \ + "$PATCHMON_SERVER/api/$API_VERSION/hosts/ping") + + if [[ $? -eq 0 ]] && echo "$response" | grep -q "success"; then + success "API credentials are valid" + local hostname=$(echo "$response" | grep -o '"hostname":"[^"]*' | cut -d'"' -f4) + if [[ -n "$hostname" ]]; then + info "Connected as host: $hostname" + fi + else + error "API credentials test failed: $response" + fi +} + +# Detect OS and version +detect_os() { + if [[ -f /etc/os-release ]]; then + source /etc/os-release + OS_TYPE=$(echo "$ID" | tr '[:upper:]' '[:lower:]') + OS_VERSION="$VERSION_ID" + + # Map OS variations to their appropriate categories + case "$OS_TYPE" in + "pop"|"linuxmint"|"elementary") + OS_TYPE="ubuntu" + ;; + "opensuse"|"opensuse-leap"|"opensuse-tumbleweed") + OS_TYPE="suse" + ;; + "rocky"|"almalinux") + OS_TYPE="rhel" + ;; + esac + + elif [[ -f /etc/redhat-release ]]; then + if grep -q "CentOS" /etc/redhat-release; then + OS_TYPE="centos" + elif grep -q "Red Hat" /etc/redhat-release; then + OS_TYPE="rhel" + fi + OS_VERSION=$(grep -oE '[0-9]+\.[0-9]+' /etc/redhat-release | head -1) + else + error "Unable to detect OS version" + fi + + ARCHITECTURE=$(uname -m) + HOSTNAME=$(hostname) + IP_ADDRESS=$(hostname -I | awk '{print $1}') +} + +# Get repository information based on OS +get_repository_info() { + local repos_json="[" + local first=true + + case "$OS_TYPE" in + "ubuntu"|"debian") + get_apt_repositories repos_json first + ;; + "centos"|"rhel"|"fedora") + get_yum_repositories repos_json first + ;; + *) + # Return empty array for unsupported OS + ;; + esac + + repos_json+="]" + echo "$repos_json" +} + +# Get repository info for APT-based systems +get_apt_repositories() { + local -n repos_ref=$1 + local -n first_ref=$2 + + # Parse traditional .list files + local sources_files="/etc/apt/sources.list" + if [[ -d "/etc/apt/sources.list.d" ]]; then + sources_files="$sources_files $(find /etc/apt/sources.list.d -name '*.list' 2>/dev/null)" + fi + + for file in $sources_files; do + if [[ -f "$file" ]]; then + while IFS= read -r line; do + # Skip comments and empty lines + if [[ "$line" =~ ^[[:space:]]*# ]] || [[ -z "$line" ]]; then + continue + fi + + # Parse repository line (deb or deb-src) + if [[ "$line" =~ ^[[:space:]]*(deb|deb-src)[[:space:]]+ ]]; then + # Clean the line and extract components + local clean_line=$(echo "$line" | xargs) + local repo_type=$(echo "$clean_line" | awk '{print $1}') + + # Handle modern APT format with options like [signed-by=...] + local url="" + local distribution="" + local components="" + + if [[ "$clean_line" =~ \[.*\] ]]; then + # Modern format: deb [options] URL distribution components + # Extract URL (first field after the options) + url=$(echo "$clean_line" | sed 's/deb[^[:space:]]* \[[^]]*\] //' | awk '{print $1}') + distribution=$(echo "$clean_line" | sed 's/deb[^[:space:]]* \[[^]]*\] //' | awk '{print $2}') + components=$(echo "$clean_line" | sed 's/deb[^[:space:]]* \[[^]]*\] [^[:space:]]* [^[:space:]]* //') + else + # Traditional format: deb URL distribution components + url=$(echo "$clean_line" | awk '{print $2}') + distribution=$(echo "$clean_line" | awk '{print $3}') + components=$(echo "$clean_line" | cut -d' ' -f4- | xargs) + fi + + # Skip if URL doesn't look like a valid URL + if [[ ! "$url" =~ ^https?:// ]] && [[ ! "$url" =~ ^ftp:// ]]; then + continue + fi + + # Skip if distribution is empty or looks malformed + if [[ -z "$distribution" ]] || [[ "$distribution" =~ \[.*\] ]]; then + continue + fi + + # Determine if repository uses HTTPS + local is_secure=false + if [[ "$url" =~ ^https:// ]]; then + is_secure=true + fi + + # Generate repository name from URL and distribution + local repo_name="$distribution" + + # Extract meaningful name from URL for better identification + if [[ "$url" =~ archive\.ubuntu\.com ]]; then + repo_name="ubuntu-$distribution" + elif [[ "$url" =~ security\.ubuntu\.com ]]; then + repo_name="ubuntu-$distribution-security" + elif [[ "$url" =~ deb\.nodesource\.com ]]; then + repo_name="nodesource-$distribution" + elif [[ "$url" =~ packagecloud\.io ]]; then + repo_name="packagecloud-$(echo "$url" | cut -d'/' -f4-5 | tr '/' '-')" + elif [[ "$url" =~ ppa\.launchpad ]]; then + repo_name="ppa-$(echo "$url" | cut -d'/' -f4-5 | tr '/' '-')" + elif [[ "$url" =~ packages\.microsoft\.com ]]; then + repo_name="microsoft-$(echo "$url" | cut -d'/' -f4-)" + elif [[ "$url" =~ download\.docker\.com ]]; then + repo_name="docker-$distribution" + else + # Fallback: use domain name + distribution + local domain=$(echo "$url" | cut -d'/' -f3 | cut -d':' -f1) + repo_name="$domain-$distribution" + fi + + # Add component suffix if relevant + if [[ "$components" =~ updates ]]; then + repo_name="$repo_name-updates" + elif [[ "$components" =~ security ]]; then + repo_name="$repo_name-security" + elif [[ "$components" =~ backports ]]; then + repo_name="$repo_name-backports" + fi + + if [[ "$first_ref" == true ]]; then + first_ref=false + else + repos_ref+="," + fi + + repos_ref+="{\"name\":\"$repo_name\",\"url\":\"$url\",\"distribution\":\"$distribution\",\"components\":\"$components\",\"repoType\":\"$repo_type\",\"isEnabled\":true,\"isSecure\":$is_secure}" + fi + done < "$file" + fi + done + + # Parse modern DEB822 format (.sources files) + if [[ -d "/etc/apt/sources.list.d" ]]; then + local sources_files_deb822=$(find /etc/apt/sources.list.d -name '*.sources' 2>/dev/null) + for file in $sources_files_deb822; do + if [[ -f "$file" ]]; then + local deb822_result=$(parse_deb822_sources_simple "$file") + if [[ -n "$deb822_result" ]]; then + if [[ "$first_ref" == true ]]; then + first_ref=false + repos_ref+="$deb822_result" + else + repos_ref+=",$deb822_result" + fi + fi + fi + done + fi +} + +# Simple DEB822 parser that returns JSON string +parse_deb822_sources_simple() { + local file=$1 + local result="" + local enabled="" + local types="" + local uris="" + local suites="" + local components="" + local name="" + local first_entry=true + + while IFS= read -r line; do + # Skip empty lines and comments + if [[ -z "$line" ]] || [[ "$line" =~ ^[[:space:]]*# ]]; then + continue + fi + + # Parse key-value pairs + if [[ "$line" =~ ^([^:]+):[[:space:]]*(.*)$ ]]; then + local key="${BASH_REMATCH[1]}" + local value="${BASH_REMATCH[2]}" + + case "$key" in + "Enabled") + enabled="$value" + ;; + "Types") + types="$value" + ;; + "URIs") + uris="$value" + ;; + "Suites") + suites="$value" + ;; + "Components") + components="$value" + ;; + "X-Repolib-Name") + name="$value" + ;; + esac + fi + + # Process repository entry when we hit a blank line + if [[ -z "$line" ]] || [[ "$line" =~ ^[[:space:]]*$ ]]; then + if [[ -n "$uris" && -n "$suites" && "$enabled" == "yes" ]]; then + local entry_result=$(process_deb822_entry_simple "$name" "$types" "$uris" "$suites" "$components") + if [[ -n "$entry_result" ]]; then + if [[ "$first_entry" == true ]]; then + first_entry=false + result="$entry_result" + else + result="$result,$entry_result" + fi + fi + fi + # Reset variables for next entry + enabled="" + types="" + uris="" + suites="" + components="" + name="" + fi + done < "$file" + + # Process the last entry if file doesn't end with blank line + if [[ -n "$uris" && -n "$suites" && "$enabled" == "yes" ]]; then + local entry_result=$(process_deb822_entry_simple "$name" "$types" "$uris" "$suites" "$components") + if [[ -n "$entry_result" ]]; then + if [[ "$first_entry" == true ]]; then + result="$entry_result" + else + result="$result,$entry_result" + fi + fi + fi + + echo "$result" +} + +# Process a DEB822 repository entry and return JSON +process_deb822_entry_simple() { + local name=$1 + local types=$2 + local uris=$3 + local suites=$4 + local components=$5 + local result="" + local first_entry=true + + # Handle multiple URIs + for uri in $uris; do + # Skip if URI doesn't look like a valid URL + if [[ ! "$uri" =~ ^https?:// ]] && [[ ! "$uri" =~ ^ftp:// ]]; then + continue + fi + + # Handle multiple suites + for suite in $suites; do + # Skip if suite looks malformed + if [[ -z "$suite" ]]; then + continue + fi + + # Determine if repository uses HTTPS + local is_secure=false + if [[ "$uri" =~ ^https:// ]]; then + is_secure=true + fi + + # Generate repository name + local repo_name="" + if [[ -n "$name" ]]; then + repo_name=$(echo "$name" | tr ' ' '-' | tr '[:upper:]' '[:lower:]') + else + repo_name="$suite" + fi + + # Extract meaningful name from URI for better identification + if [[ "$uri" =~ apt\.pop-os\.org/ubuntu ]]; then + repo_name="pop-os-ubuntu-$suite" + elif [[ "$uri" =~ apt\.pop-os\.org/release ]]; then + repo_name="pop-os-release-$suite" + elif [[ "$uri" =~ apt\.pop-os\.org/proprietary ]]; then + repo_name="pop-os-apps-$suite" + elif [[ "$uri" =~ archive\.ubuntu\.com ]]; then + repo_name="ubuntu-$suite" + elif [[ "$uri" =~ security\.ubuntu\.com ]]; then + repo_name="ubuntu-$suite-security" + else + # Fallback: use domain name + suite + local domain=$(echo "$uri" | cut -d'/' -f3 | cut -d':' -f1) + repo_name="$domain-$suite" + fi + + # Add component suffix if relevant and not already included + if [[ "$suite" != *"security"* && "$components" =~ security ]]; then + repo_name="$repo_name-security" + elif [[ "$suite" != *"updates"* && "$components" =~ updates ]]; then + repo_name="$repo_name-updates" + elif [[ "$suite" != *"backports"* && "$components" =~ backports ]]; then + repo_name="$repo_name-backports" + fi + + # Determine repo type (prefer deb over deb-src) + local repo_type="deb" + if [[ "$types" =~ deb-src ]] && [[ ! "$types" =~ ^deb[[:space:]] ]]; then + repo_type="deb-src" + fi + + local json_entry="{\"name\":\"$repo_name\",\"url\":\"$uri\",\"distribution\":\"$suite\",\"components\":\"$components\",\"repoType\":\"$repo_type\",\"isEnabled\":true,\"isSecure\":$is_secure}" + + if [[ "$first_entry" == true ]]; then + first_entry=false + result="$json_entry" + else + result="$result,$json_entry" + fi + done + done + + echo "$result" +} + +# Get repository info for YUM-based systems +get_yum_repositories() { + local -n repos_ref=$1 + local -n first_ref=$2 + + # Parse yum/dnf repository configuration + if command -v dnf >/dev/null 2>&1; then + local repo_info=$(dnf repolist all --verbose 2>/dev/null | grep -E "^Repo-id|^Repo-baseurl|^Repo-name|^Repo-status") + elif command -v yum >/dev/null 2>&1; then + local repo_info=$(yum repolist all -v 2>/dev/null | grep -E "^Repo-id|^Repo-baseurl|^Repo-name|^Repo-status") + fi + + # This is a simplified implementation - would need more work for full YUM support + # For now, return empty for non-APT systems +} + +# Get package information based on OS +get_package_info() { + local packages_json="[" + local first=true + + case "$OS_TYPE" in + "ubuntu"|"debian") + get_apt_packages packages_json first + ;; + "centos"|"rhel"|"fedora") + get_yum_packages packages_json first + ;; + *) + error "Unsupported OS type: $OS_TYPE" + ;; + esac + + packages_json+="]" + echo "$packages_json" +} + +# Get package info for APT-based systems +get_apt_packages() { + local -n packages_ref=$1 + local -n first_ref=$2 + + # Update package lists + apt-get update -qq + + # Get upgradable packages + local upgradable=$(apt list --upgradable 2>/dev/null | grep -v "WARNING") + + while IFS= read -r line; do + if [[ "$line" =~ ^([^/]+)/([^[:space:]]+)[[:space:]]+([^[:space:]]+)[[:space:]]+.*[[:space:]]([^[:space:]]+)[[:space:]]*(\[.*\])? ]]; then + local package_name="${BASH_REMATCH[1]}" + local current_version="${BASH_REMATCH[4]}" + local available_version="${BASH_REMATCH[3]}" + local is_security_update=false + + # Check if it's a security update + if echo "$line" | grep -q "security"; then + is_security_update=true + fi + + if [[ "$first_ref" == true ]]; then + first_ref=false + else + packages_ref+="," + fi + + packages_ref+="{\"name\":\"$package_name\",\"currentVersion\":\"$current_version\",\"availableVersion\":\"$available_version\",\"needsUpdate\":true,\"isSecurityUpdate\":$is_security_update}" + fi + done <<< "$upgradable" + + # Get installed packages that are up to date + local installed=$(dpkg-query -W -f='${Package} ${Version}\n' | head -100) + + while IFS=' ' read -r package_name version; do + if [[ -n "$package_name" && -n "$version" ]]; then + # Check if this package is not in the upgrade list + if ! echo "$upgradable" | grep -q "^$package_name/"; then + if [[ "$first_ref" == true ]]; then + first_ref=false + else + packages_ref+="," + fi + + packages_ref+="{\"name\":\"$package_name\",\"currentVersion\":\"$version\",\"needsUpdate\":false,\"isSecurityUpdate\":false}" + fi + fi + done <<< "$installed" +} + +# Get package info for YUM/DNF-based systems +get_yum_packages() { + local -n packages_ref=$1 + local -n first_ref=$2 + + local package_manager="yum" + if command -v dnf &> /dev/null; then + package_manager="dnf" + fi + + # Get upgradable packages + local upgradable=$($package_manager check-update 2>/dev/null | grep -v "^$" | grep -v "^Loaded" | grep -v "^Last metadata" | tail -n +2) + + while IFS= read -r line; do + if [[ "$line" =~ ^([^[:space:]]+)[[:space:]]+([^[:space:]]+)[[:space:]]+([^[:space:]]+) ]]; then + local package_name="${BASH_REMATCH[1]}" + local available_version="${BASH_REMATCH[2]}" + local repo="${BASH_REMATCH[3]}" + + # Get current version + local current_version=$($package_manager list installed "$package_name" 2>/dev/null | grep "^$package_name" | awk '{print $2}') + + local is_security_update=false + if echo "$repo" | grep -q "security"; then + is_security_update=true + fi + + if [[ "$first_ref" == true ]]; then + first_ref=false + else + packages_ref+="," + fi + + packages_ref+="{\"name\":\"$package_name\",\"currentVersion\":\"$current_version\",\"availableVersion\":\"$available_version\",\"needsUpdate\":true,\"isSecurityUpdate\":$is_security_update}" + fi + done <<< "$upgradable" + + # Get some installed packages that are up to date + local installed=$($package_manager list installed 2>/dev/null | grep -v "^Loaded" | grep -v "^Installed" | head -100) + + while IFS= read -r line; do + if [[ "$line" =~ ^([^[:space:]]+)[[:space:]]+([^[:space:]]+) ]]; then + local package_name="${BASH_REMATCH[1]}" + local version="${BASH_REMATCH[2]}" + + # Check if this package is not in the upgrade list + if ! echo "$upgradable" | grep -q "^$package_name "; then + if [[ "$first_ref" == true ]]; then + first_ref=false + else + packages_ref+="," + fi + + packages_ref+="{\"name\":\"$package_name\",\"currentVersion\":\"$version\",\"needsUpdate\":false,\"isSecurityUpdate\":false}" + fi + fi + done <<< "$installed" +} + +# Get hardware information +get_hardware_info() { + local cpu_model="" + local cpu_cores=0 + local ram_installed=0 + local swap_size=0 + local disk_details="[]" + + # CPU Information + if command -v lscpu >/dev/null 2>&1; then + cpu_model=$(lscpu | grep "Model name" | cut -d':' -f2 | xargs) + cpu_cores=$(lscpu | grep "^CPU(s):" | cut -d':' -f2 | xargs) + elif [[ -f /proc/cpuinfo ]]; then + cpu_model=$(grep "model name" /proc/cpuinfo | head -1 | cut -d':' -f2 | xargs) + cpu_cores=$(grep -c "^processor" /proc/cpuinfo) + fi + + # Memory Information + if command -v free >/dev/null 2>&1; then + ram_installed=$(free -g | grep "^Mem:" | awk '{print $2}') + swap_size=$(free -g | grep "^Swap:" | awk '{print $2}') + elif [[ -f /proc/meminfo ]]; then + ram_installed=$(grep "MemTotal" /proc/meminfo | awk '{print int($2/1024/1024)}') + swap_size=$(grep "SwapTotal" /proc/meminfo | awk '{print int($2/1024/1024)}') + fi + + # Disk Information + if command -v lsblk >/dev/null 2>&1; then + disk_details=$(lsblk -J -o NAME,SIZE,TYPE,MOUNTPOINT | jq -c '[.blockdevices[] | select(.type == "disk") | {name: .name, size: .size, mountpoint: .mountpoint}]') + elif command -v df >/dev/null 2>&1; then + disk_details=$(df -h | grep -E "^/dev/" | awk '{print "{\"name\":\""$1"\",\"size\":\""$2"\",\"mountpoint\":\""$6"\"}"}' | jq -s .) + fi + + echo "{\"cpuModel\":\"$cpu_model\",\"cpuCores\":$cpu_cores,\"ramInstalled\":$ram_installed,\"swapSize\":$swap_size,\"diskDetails\":$disk_details}" +} + +# Get network information +get_network_info() { + local gateway_ip="" + local dns_servers="[]" + local network_interfaces="[]" + + # Gateway IP + if command -v ip >/dev/null 2>&1; then + gateway_ip=$(ip route | grep default | head -1 | awk '{print $3}') + elif command -v route >/dev/null 2>&1; then + gateway_ip=$(route -n | grep '^0.0.0.0' | head -1 | awk '{print $2}') + fi + + # DNS Servers + if [[ -f /etc/resolv.conf ]]; then + dns_servers=$(grep "nameserver" /etc/resolv.conf | awk '{print $2}' | jq -R . | jq -s .) + fi + + # Network Interfaces + if command -v ip >/dev/null 2>&1; then + network_interfaces=$(ip -j addr show | jq -c '[.[] | {name: .ifname, type: .link_type, addresses: [.addr_info[]? | {address: .local, family: .family}]}]') + elif command -v ifconfig >/dev/null 2>&1; then + network_interfaces=$(ifconfig -a | grep -E "^[a-zA-Z]" | awk '{print $1}' | jq -R . | jq -s .) + fi + + echo "{\"gatewayIp\":\"$gateway_ip\",\"dnsServers\":$dns_servers,\"networkInterfaces\":$network_interfaces}" +} + +# Get system information +get_system_info() { + local kernel_version="" + local selinux_status="" + local system_uptime="" + local load_average="[]" + + # Kernel Version + if [[ -f /proc/version ]]; then + kernel_version=$(cat /proc/version | awk '{print $3}') + elif command -v uname >/dev/null 2>&1; then + kernel_version=$(uname -r) + fi + + # SELinux Status + if command -v getenforce >/dev/null 2>&1; then + selinux_status=$(getenforce 2>/dev/null | tr '[:upper:]' '[:lower:]') + elif [[ -f /etc/selinux/config ]]; then + selinux_status=$(grep "^SELINUX=" /etc/selinux/config | cut -d'=' -f2 | tr '[:upper:]' '[:lower:]') + else + selinux_status="disabled" + fi + + # System Uptime + if [[ -f /proc/uptime ]]; then + local uptime_seconds=$(cat /proc/uptime | awk '{print int($1)}') + local days=$((uptime_seconds / 86400)) + local hours=$(((uptime_seconds % 86400) / 3600)) + local minutes=$(((uptime_seconds % 3600) / 60)) + system_uptime="${days}d ${hours}h ${minutes}m" + elif command -v uptime >/dev/null 2>&1; then + system_uptime=$(uptime | awk -F'up ' '{print $2}' | awk -F', load' '{print $1}') + fi + + # Load Average + if [[ -f /proc/loadavg ]]; then + load_average=$(cat /proc/loadavg | awk '{print "["$1","$2","$3"]"}') + elif command -v uptime >/dev/null 2>&1; then + load_average=$(uptime | awk -F'load average: ' '{print "["$2"]"}' | tr -d ' ') + fi + + echo "{\"kernelVersion\":\"$kernel_version\",\"selinuxStatus\":\"$selinux_status\",\"systemUptime\":\"$system_uptime\",\"loadAverage\":$load_average}" +} + +# Send package update to server +send_update() { + load_credentials + + info "Collecting package information..." + local packages_json=$(get_package_info) + + info "Collecting repository information..." + local repositories_json=$(get_repository_info) + + info "Collecting hardware information..." + local hardware_json=$(get_hardware_info) + + info "Collecting network information..." + local network_json=$(get_network_info) + + info "Collecting system information..." + local system_json=$(get_system_info) + + info "Sending update to PatchMon server..." + + # Merge all JSON objects into one + local merged_json=$(echo "$hardware_json $network_json $system_json" | jq -s '.[0] * .[1] * .[2]') + + local payload=$(cat </dev/null; then + # Replace current script + mv "/tmp/patchmon-agent-new.sh" "$0" + chmod +x "$0" + success "Agent updated successfully" + info "Backup saved as: $0.backup.$(date +%Y%m%d_%H%M%S)" + + # Get the new version number + local new_version=$(grep '^AGENT_VERSION=' "$0" | cut -d'"' -f2) + info "Updated to version: $new_version" + + # Automatically run update to send new information to PatchMon + info "Sending updated information to PatchMon..." + if "$0" update; then + success "Successfully sent updated information to PatchMon" + else + warning "Failed to send updated information to PatchMon (this is not critical)" + fi + else + error "Downloaded script is invalid" + rm -f "/tmp/patchmon-agent-new.sh" + fi + else + error "Failed to download new agent script" + fi + else + error "Failed to get update information" + fi +} + +# Update crontab with current policy +update_crontab() { + load_credentials + info "Updating crontab with current policy..." + local response=$(curl -s -X GET "$PATCHMON_SERVER/api/$API_VERSION/settings/update-interval") + if [[ $? -eq 0 ]]; then + local update_interval=$(echo "$response" | grep -o '"updateInterval":[0-9]*' | cut -d':' -f2) + if [[ -n "$update_interval" ]]; then + # Generate the expected crontab entry + local expected_crontab="" + if [[ $update_interval -eq 60 ]]; then + # Hourly updates + expected_crontab="0 * * * * /usr/local/bin/patchmon-agent.sh update >/dev/null 2>&1" + else + # Custom interval updates + expected_crontab="*/$update_interval * * * * /usr/local/bin/patchmon-agent.sh update >/dev/null 2>&1" + fi + + # Get current crontab + local current_crontab=$(crontab -l 2>/dev/null | grep "patchmon-agent.sh update" | head -1) + + # Check if crontab needs updating + if [[ "$current_crontab" == "$expected_crontab" ]]; then + info "Crontab is already up to date (interval: $update_interval minutes)" + return 0 + fi + + info "Setting update interval to $update_interval minutes" + echo "$expected_crontab" | crontab - + success "Crontab updated successfully" + else + error "Could not determine update interval from server" + fi + else + error "Failed to get update interval policy" + fi +} + +# Show detailed system diagnostics +show_diagnostics() { + info "PatchMon Agent Diagnostics v$AGENT_VERSION" + echo "" + + # System information + echo "=== System Information ===" + echo "OS: $(uname -s)" + echo "Architecture: $(uname -m)" + echo "Kernel: $(uname -r)" + echo "Hostname: $(hostname)" + echo "Uptime: $(uptime -p 2>/dev/null || uptime)" + echo "" + + # Agent information + echo "=== Agent Information ===" + echo "Version: $AGENT_VERSION" + echo "Script Path: $0" + echo "Config File: $CONFIG_FILE" + echo "Credentials File: $CREDENTIALS_FILE" + echo "Log File: $LOG_FILE" + echo "Script Size: $(stat -c%s "$0" 2>/dev/null || echo "Unknown") bytes" + echo "Last Modified: $(stat -c%y "$0" 2>/dev/null || echo "Unknown")" + echo "" + + # Configuration + if [[ -f "$CONFIG_FILE" ]]; then + echo "=== Configuration ===" + cat "$CONFIG_FILE" + echo "" + else + echo "=== Configuration ===" + echo "No configuration file found at $CONFIG_FILE" + echo "" + fi + + # Credentials status + echo "=== Credentials Status ===" + if [[ -f "$CREDENTIALS_FILE" ]]; then + echo "Credentials file exists: Yes" + echo "File size: $(stat -c%s "$CREDENTIALS_FILE" 2>/dev/null || echo "Unknown") bytes" + echo "File permissions: $(stat -c%a "$CREDENTIALS_FILE" 2>/dev/null || echo "Unknown")" + else + echo "Credentials file exists: No" + fi + echo "" + + # Crontab status + echo "=== Crontab Status ===" + local crontab_entries=$(crontab -l 2>/dev/null | grep patchmon-agent || echo "None") + if [[ "$crontab_entries" != "None" ]]; then + echo "Crontab entries:" + echo "$crontab_entries" + else + echo "No crontab entries found" + fi + echo "" + + # Network connectivity + echo "=== Network Connectivity ===" + if ping -c 1 -W 3 "$(echo "$PATCHMON_SERVER" | sed 's|http://||' | sed 's|https://||' | cut -d: -f1)" >/dev/null 2>&1; then + echo "Server reachable: Yes" + else + echo "Server reachable: No" + fi + echo "Server URL: $PATCHMON_SERVER" + echo "" + + # Recent logs + echo "=== Recent Logs (last 10 lines) ===" + if [[ -f "$LOG_FILE" ]]; then + tail -10 "$LOG_FILE" 2>/dev/null || echo "Could not read log file" + else + echo "Log file does not exist" + fi +} + +# Show current configuration +show_config() { + info "Current Configuration:" + echo " Server: ${PATCHMON_SERVER}" + echo " API Version: ${API_VERSION}" + echo " Agent Version: ${AGENT_VERSION}" + echo " Config File: ${CONFIG_FILE}" + echo " Credentials File: ${CREDENTIALS_FILE}" + echo " Log File: ${LOG_FILE}" + + if [[ -f "$CREDENTIALS_FILE" ]]; then + source "$CREDENTIALS_FILE" + echo " API ID: ${API_ID}" + echo " API Key: ${API_KEY:0:8}..." # Show only first 8 characters + else + echo " API Credentials: Not configured" + fi +} + +# Main function +main() { + case "$1" in + "configure") + check_root + setup_directories + load_config + configure_credentials "$2" "$3" + ;; + "test") + check_root + setup_directories + load_config + test_credentials + ;; + "update") + check_root + setup_directories + load_config + detect_os + send_update + ;; + "ping") + check_root + setup_directories + load_config + ping_server + ;; + "config") + load_config + show_config + ;; + "check-version") + check_root + setup_directories + load_config + check_version + ;; + "update-agent") + check_root + setup_directories + load_config + update_agent + ;; + "update-crontab") + check_root + setup_directories + load_config + update_crontab + ;; + "diagnostics") + show_diagnostics + ;; + *) + echo "PatchMon Agent v$AGENT_VERSION - API Credential Based" + echo "Usage: $0 {configure|test|update|ping|config|check-version|update-agent|update-crontab|diagnostics}" + echo "" + echo "Commands:" + echo " configure - Configure API credentials for this host" + echo " test - Test API credentials connectivity" + echo " update - Send package update information to server" + echo " ping - Test connectivity to server" + echo " config - Show current configuration" + echo " check-version - Check for agent updates" + echo " update-agent - Update agent to latest version" + echo " update-crontab - Update crontab with current policy" + echo " diagnostics - Show detailed system diagnostics" + echo "" + echo "Setup Process:" + echo " 1. Contact your PatchMon administrator to create a host entry" + echo " 2. Run: $0 configure (provided by admin)" + echo " 3. Run: $0 test (to verify connection)" + echo " 4. Run: $0 update (to send initial package data)" + echo "" + echo "Configuration:" + echo " Edit $CONFIG_FILE to customize server settings" + echo " PATCHMON_SERVER=http://your-server:3001" + exit 1 + ;; + esac +} + +# Run main function +main "$@" \ No newline at end of file diff --git a/agents/patchmon-agent.sh.backup.20250920_002529 b/agents/patchmon-agent.sh.backup.20250920_002529 new file mode 100755 index 0000000..f6ecdb3 --- /dev/null +++ b/agents/patchmon-agent.sh.backup.20250920_002529 @@ -0,0 +1,1219 @@ +#!/bin/bash + +# PatchMon Agent Script v1.2.5 +# This script sends package update information to the PatchMon server using API credentials + +# Configuration +PATCHMON_SERVER="${PATCHMON_SERVER:-http://localhost:3001}" +API_VERSION="v1" +AGENT_VERSION="1.2.5" +CONFIG_FILE="/etc/patchmon/agent.conf" +CREDENTIALS_FILE="/etc/patchmon/credentials" +LOG_FILE="/var/log/patchmon-agent.log" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Logging function +log() { + echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE" +} + +# Error handling +error() { + echo -e "${RED}ERROR: $1${NC}" >&2 + log "ERROR: $1" + exit 1 +} + +# Info logging +info() { + echo -e "${BLUE}INFO: $1${NC}" + log "INFO: $1" +} + +# Success logging +success() { + echo -e "${GREEN}SUCCESS: $1${NC}" + log "SUCCESS: $1" +} + +# Warning logging +warning() { + echo -e "${YELLOW}WARNING: $1${NC}" + log "WARNING: $1" +} + +# Check if running as root +check_root() { + if [[ $EUID -ne 0 ]]; then + error "This script must be run as root" + fi +} + +# Create necessary directories +setup_directories() { + mkdir -p /etc/patchmon + mkdir -p /var/log + touch "$LOG_FILE" + chmod 600 "$LOG_FILE" +} + +# Load configuration +load_config() { + if [[ -f "$CONFIG_FILE" ]]; then + source "$CONFIG_FILE" + fi +} + +# Load API credentials +load_credentials() { + if [[ ! -f "$CREDENTIALS_FILE" ]]; then + error "Credentials file not found at $CREDENTIALS_FILE. Please configure API credentials first." + fi + + source "$CREDENTIALS_FILE" + + if [[ -z "$API_ID" ]] || [[ -z "$API_KEY" ]]; then + error "API_ID and API_KEY must be configured in $CREDENTIALS_FILE" + fi + + # Use PATCHMON_URL from credentials if available, otherwise use default + if [[ -n "$PATCHMON_URL" ]]; then + PATCHMON_SERVER="$PATCHMON_URL" + fi +} + +# Configure API credentials +configure_credentials() { + info "Setting up API credentials..." + + if [[ -z "$1" ]] || [[ -z "$2" ]]; then + echo "Usage: $0 configure [SERVER_URL]" + echo "" + echo "Example:" + echo " $0 configure patchmon_1a2b3c4d abcd1234567890abcdef1234567890abcdef1234567890abcdef1234567890" + echo " $0 configure patchmon_1a2b3c4d abcd1234567890abcdef1234567890abcdef1234567890abcdef1234567890 http://patchmon.example.com" + echo "" + echo "Contact your PatchMon administrator to get your API credentials." + exit 1 + fi + + local api_id="$1" + local api_key="$2" + local server_url="${3:-$PATCHMON_SERVER}" + + # Validate API ID format + if [[ ! "$api_id" =~ ^patchmon_[a-f0-9]{16}$ ]]; then + error "Invalid API ID format. API ID should be in format: patchmon_xxxxxxxxxxxxxxxx" + fi + + # Validate API Key format (64 hex characters) + if [[ ! "$api_key" =~ ^[a-f0-9]{64}$ ]]; then + error "Invalid API Key format. API Key should be 64 hexadecimal characters." + fi + + # Validate server URL format + if [[ ! "$server_url" =~ ^https?:// ]]; then + error "Invalid server URL format. Must start with http:// or https://" + fi + + # Create credentials file + cat > "$CREDENTIALS_FILE" << EOF +# PatchMon API Credentials +# Generated on $(date) +PATCHMON_URL="$server_url" +API_ID="$api_id" +API_KEY="$api_key" +EOF + + chmod 600 "$CREDENTIALS_FILE" + success "API credentials configured successfully" + info "Credentials saved to: $CREDENTIALS_FILE" + + # Test credentials + info "Testing API credentials..." + test_credentials +} + +# Test API credentials +test_credentials() { + load_credentials + + local response=$(curl -s -X POST \ + -H "Content-Type: application/json" \ + -H "X-API-ID: $API_ID" \ + -H "X-API-KEY: $API_KEY" \ + "$PATCHMON_SERVER/api/$API_VERSION/hosts/ping") + + if [[ $? -eq 0 ]] && echo "$response" | grep -q "success"; then + success "API credentials are valid" + local hostname=$(echo "$response" | grep -o '"hostname":"[^"]*' | cut -d'"' -f4) + if [[ -n "$hostname" ]]; then + info "Connected as host: $hostname" + fi + else + error "API credentials test failed: $response" + fi +} + +# Detect OS and version +detect_os() { + if [[ -f /etc/os-release ]]; then + source /etc/os-release + OS_TYPE=$(echo "$ID" | tr '[:upper:]' '[:lower:]') + OS_VERSION="$VERSION_ID" + + # Map OS variations to their appropriate categories + case "$OS_TYPE" in + "pop"|"linuxmint"|"elementary") + OS_TYPE="ubuntu" + ;; + "opensuse"|"opensuse-leap"|"opensuse-tumbleweed") + OS_TYPE="suse" + ;; + "rocky"|"almalinux") + OS_TYPE="rhel" + ;; + esac + + elif [[ -f /etc/redhat-release ]]; then + if grep -q "CentOS" /etc/redhat-release; then + OS_TYPE="centos" + elif grep -q "Red Hat" /etc/redhat-release; then + OS_TYPE="rhel" + fi + OS_VERSION=$(grep -oE '[0-9]+\.[0-9]+' /etc/redhat-release | head -1) + else + error "Unable to detect OS version" + fi + + ARCHITECTURE=$(uname -m) + HOSTNAME=$(hostname) + IP_ADDRESS=$(hostname -I | awk '{print $1}') +} + +# Get repository information based on OS +get_repository_info() { + local repos_json="[" + local first=true + + case "$OS_TYPE" in + "ubuntu"|"debian") + get_apt_repositories repos_json first + ;; + "centos"|"rhel"|"fedora") + get_yum_repositories repos_json first + ;; + *) + # Return empty array for unsupported OS + ;; + esac + + repos_json+="]" + echo "$repos_json" +} + +# Get repository info for APT-based systems +get_apt_repositories() { + local -n repos_ref=$1 + local -n first_ref=$2 + + # Parse traditional .list files + local sources_files="/etc/apt/sources.list" + if [[ -d "/etc/apt/sources.list.d" ]]; then + sources_files="$sources_files $(find /etc/apt/sources.list.d -name '*.list' 2>/dev/null)" + fi + + for file in $sources_files; do + if [[ -f "$file" ]]; then + while IFS= read -r line; do + # Skip comments and empty lines + if [[ "$line" =~ ^[[:space:]]*# ]] || [[ -z "$line" ]]; then + continue + fi + + # Parse repository line (deb or deb-src) + if [[ "$line" =~ ^[[:space:]]*(deb|deb-src)[[:space:]]+ ]]; then + # Clean the line and extract components + local clean_line=$(echo "$line" | xargs) + local repo_type=$(echo "$clean_line" | awk '{print $1}') + + # Handle modern APT format with options like [signed-by=...] + local url="" + local distribution="" + local components="" + + if [[ "$clean_line" =~ \[.*\] ]]; then + # Modern format: deb [options] URL distribution components + # Extract URL (first field after the options) + url=$(echo "$clean_line" | sed 's/deb[^[:space:]]* \[[^]]*\] //' | awk '{print $1}') + distribution=$(echo "$clean_line" | sed 's/deb[^[:space:]]* \[[^]]*\] //' | awk '{print $2}') + components=$(echo "$clean_line" | sed 's/deb[^[:space:]]* \[[^]]*\] [^[:space:]]* [^[:space:]]* //') + else + # Traditional format: deb URL distribution components + url=$(echo "$clean_line" | awk '{print $2}') + distribution=$(echo "$clean_line" | awk '{print $3}') + components=$(echo "$clean_line" | cut -d' ' -f4- | xargs) + fi + + # Skip if URL doesn't look like a valid URL + if [[ ! "$url" =~ ^https?:// ]] && [[ ! "$url" =~ ^ftp:// ]]; then + continue + fi + + # Skip if distribution is empty or looks malformed + if [[ -z "$distribution" ]] || [[ "$distribution" =~ \[.*\] ]]; then + continue + fi + + # Determine if repository uses HTTPS + local is_secure=false + if [[ "$url" =~ ^https:// ]]; then + is_secure=true + fi + + # Generate repository name from URL and distribution + local repo_name="$distribution" + + # Extract meaningful name from URL for better identification + if [[ "$url" =~ archive\.ubuntu\.com ]]; then + repo_name="ubuntu-$distribution" + elif [[ "$url" =~ security\.ubuntu\.com ]]; then + repo_name="ubuntu-$distribution-security" + elif [[ "$url" =~ deb\.nodesource\.com ]]; then + repo_name="nodesource-$distribution" + elif [[ "$url" =~ packagecloud\.io ]]; then + repo_name="packagecloud-$(echo "$url" | cut -d'/' -f4-5 | tr '/' '-')" + elif [[ "$url" =~ ppa\.launchpad ]]; then + repo_name="ppa-$(echo "$url" | cut -d'/' -f4-5 | tr '/' '-')" + elif [[ "$url" =~ packages\.microsoft\.com ]]; then + repo_name="microsoft-$(echo "$url" | cut -d'/' -f4-)" + elif [[ "$url" =~ download\.docker\.com ]]; then + repo_name="docker-$distribution" + else + # Fallback: use domain name + distribution + local domain=$(echo "$url" | cut -d'/' -f3 | cut -d':' -f1) + repo_name="$domain-$distribution" + fi + + # Add component suffix if relevant + if [[ "$components" =~ updates ]]; then + repo_name="$repo_name-updates" + elif [[ "$components" =~ security ]]; then + repo_name="$repo_name-security" + elif [[ "$components" =~ backports ]]; then + repo_name="$repo_name-backports" + fi + + if [[ "$first_ref" == true ]]; then + first_ref=false + else + repos_ref+="," + fi + + repos_ref+="{\"name\":\"$repo_name\",\"url\":\"$url\",\"distribution\":\"$distribution\",\"components\":\"$components\",\"repoType\":\"$repo_type\",\"isEnabled\":true,\"isSecure\":$is_secure}" + fi + done < "$file" + fi + done + + # Parse modern DEB822 format (.sources files) + if [[ -d "/etc/apt/sources.list.d" ]]; then + local sources_files_deb822=$(find /etc/apt/sources.list.d -name '*.sources' 2>/dev/null) + for file in $sources_files_deb822; do + if [[ -f "$file" ]]; then + local deb822_result=$(parse_deb822_sources_simple "$file") + if [[ -n "$deb822_result" ]]; then + if [[ "$first_ref" == true ]]; then + first_ref=false + repos_ref+="$deb822_result" + else + repos_ref+=",$deb822_result" + fi + fi + fi + done + fi +} + +# Simple DEB822 parser that returns JSON string +parse_deb822_sources_simple() { + local file=$1 + local result="" + local enabled="" + local types="" + local uris="" + local suites="" + local components="" + local name="" + local first_entry=true + + while IFS= read -r line; do + # Skip empty lines and comments + if [[ -z "$line" ]] || [[ "$line" =~ ^[[:space:]]*# ]]; then + continue + fi + + # Parse key-value pairs + if [[ "$line" =~ ^([^:]+):[[:space:]]*(.*)$ ]]; then + local key="${BASH_REMATCH[1]}" + local value="${BASH_REMATCH[2]}" + + case "$key" in + "Enabled") + enabled="$value" + ;; + "Types") + types="$value" + ;; + "URIs") + uris="$value" + ;; + "Suites") + suites="$value" + ;; + "Components") + components="$value" + ;; + "X-Repolib-Name") + name="$value" + ;; + esac + fi + + # Process repository entry when we hit a blank line + if [[ -z "$line" ]] || [[ "$line" =~ ^[[:space:]]*$ ]]; then + if [[ -n "$uris" && -n "$suites" && "$enabled" == "yes" ]]; then + local entry_result=$(process_deb822_entry_simple "$name" "$types" "$uris" "$suites" "$components") + if [[ -n "$entry_result" ]]; then + if [[ "$first_entry" == true ]]; then + first_entry=false + result="$entry_result" + else + result="$result,$entry_result" + fi + fi + fi + # Reset variables for next entry + enabled="" + types="" + uris="" + suites="" + components="" + name="" + fi + done < "$file" + + # Process the last entry if file doesn't end with blank line + if [[ -n "$uris" && -n "$suites" && "$enabled" == "yes" ]]; then + local entry_result=$(process_deb822_entry_simple "$name" "$types" "$uris" "$suites" "$components") + if [[ -n "$entry_result" ]]; then + if [[ "$first_entry" == true ]]; then + result="$entry_result" + else + result="$result,$entry_result" + fi + fi + fi + + echo "$result" +} + +# Process a DEB822 repository entry and return JSON +process_deb822_entry_simple() { + local name=$1 + local types=$2 + local uris=$3 + local suites=$4 + local components=$5 + local result="" + local first_entry=true + + # Handle multiple URIs + for uri in $uris; do + # Skip if URI doesn't look like a valid URL + if [[ ! "$uri" =~ ^https?:// ]] && [[ ! "$uri" =~ ^ftp:// ]]; then + continue + fi + + # Handle multiple suites + for suite in $suites; do + # Skip if suite looks malformed + if [[ -z "$suite" ]]; then + continue + fi + + # Determine if repository uses HTTPS + local is_secure=false + if [[ "$uri" =~ ^https:// ]]; then + is_secure=true + fi + + # Generate repository name + local repo_name="" + if [[ -n "$name" ]]; then + repo_name=$(echo "$name" | tr ' ' '-' | tr '[:upper:]' '[:lower:]') + else + repo_name="$suite" + fi + + # Extract meaningful name from URI for better identification + if [[ "$uri" =~ apt\.pop-os\.org/ubuntu ]]; then + repo_name="pop-os-ubuntu-$suite" + elif [[ "$uri" =~ apt\.pop-os\.org/release ]]; then + repo_name="pop-os-release-$suite" + elif [[ "$uri" =~ apt\.pop-os\.org/proprietary ]]; then + repo_name="pop-os-apps-$suite" + elif [[ "$uri" =~ archive\.ubuntu\.com ]]; then + repo_name="ubuntu-$suite" + elif [[ "$uri" =~ security\.ubuntu\.com ]]; then + repo_name="ubuntu-$suite-security" + else + # Fallback: use domain name + suite + local domain=$(echo "$uri" | cut -d'/' -f3 | cut -d':' -f1) + repo_name="$domain-$suite" + fi + + # Add component suffix if relevant and not already included + if [[ "$suite" != *"security"* && "$components" =~ security ]]; then + repo_name="$repo_name-security" + elif [[ "$suite" != *"updates"* && "$components" =~ updates ]]; then + repo_name="$repo_name-updates" + elif [[ "$suite" != *"backports"* && "$components" =~ backports ]]; then + repo_name="$repo_name-backports" + fi + + # Determine repo type (prefer deb over deb-src) + local repo_type="deb" + if [[ "$types" =~ deb-src ]] && [[ ! "$types" =~ ^deb[[:space:]] ]]; then + repo_type="deb-src" + fi + + local json_entry="{\"name\":\"$repo_name\",\"url\":\"$uri\",\"distribution\":\"$suite\",\"components\":\"$components\",\"repoType\":\"$repo_type\",\"isEnabled\":true,\"isSecure\":$is_secure}" + + if [[ "$first_entry" == true ]]; then + first_entry=false + result="$json_entry" + else + result="$result,$json_entry" + fi + done + done + + echo "$result" +} + +# Get repository info for YUM-based systems +get_yum_repositories() { + local -n repos_ref=$1 + local -n first_ref=$2 + + # Parse yum/dnf repository configuration + if command -v dnf >/dev/null 2>&1; then + local repo_info=$(dnf repolist all --verbose 2>/dev/null | grep -E "^Repo-id|^Repo-baseurl|^Repo-name|^Repo-status") + elif command -v yum >/dev/null 2>&1; then + local repo_info=$(yum repolist all -v 2>/dev/null | grep -E "^Repo-id|^Repo-baseurl|^Repo-name|^Repo-status") + fi + + # This is a simplified implementation - would need more work for full YUM support + # For now, return empty for non-APT systems +} + +# Get package information based on OS +get_package_info() { + local packages_json="[" + local first=true + + case "$OS_TYPE" in + "ubuntu"|"debian") + get_apt_packages packages_json first + ;; + "centos"|"rhel"|"fedora") + get_yum_packages packages_json first + ;; + *) + error "Unsupported OS type: $OS_TYPE" + ;; + esac + + packages_json+="]" + echo "$packages_json" +} + +# Get package info for APT-based systems +get_apt_packages() { + local -n packages_ref=$1 + local -n first_ref=$2 + + # Update package lists + apt-get update -qq + + # Get upgradable packages + local upgradable=$(apt list --upgradable 2>/dev/null | grep -v "WARNING") + + while IFS= read -r line; do + if [[ "$line" =~ ^([^/]+)/([^[:space:]]+)[[:space:]]+([^[:space:]]+)[[:space:]]+.*[[:space:]]([^[:space:]]+)[[:space:]]*(\[.*\])? ]]; then + local package_name="${BASH_REMATCH[1]}" + local current_version="${BASH_REMATCH[4]}" + local available_version="${BASH_REMATCH[3]}" + local is_security_update=false + + # Check if it's a security update + if echo "$line" | grep -q "security"; then + is_security_update=true + fi + + if [[ "$first_ref" == true ]]; then + first_ref=false + else + packages_ref+="," + fi + + packages_ref+="{\"name\":\"$package_name\",\"currentVersion\":\"$current_version\",\"availableVersion\":\"$available_version\",\"needsUpdate\":true,\"isSecurityUpdate\":$is_security_update}" + fi + done <<< "$upgradable" + + # Get installed packages that are up to date + local installed=$(dpkg-query -W -f='${Package} ${Version}\n' | head -100) + + while IFS=' ' read -r package_name version; do + if [[ -n "$package_name" && -n "$version" ]]; then + # Check if this package is not in the upgrade list + if ! echo "$upgradable" | grep -q "^$package_name/"; then + if [[ "$first_ref" == true ]]; then + first_ref=false + else + packages_ref+="," + fi + + packages_ref+="{\"name\":\"$package_name\",\"currentVersion\":\"$version\",\"needsUpdate\":false,\"isSecurityUpdate\":false}" + fi + fi + done <<< "$installed" +} + +# Get package info for YUM/DNF-based systems +get_yum_packages() { + local -n packages_ref=$1 + local -n first_ref=$2 + + local package_manager="yum" + if command -v dnf &> /dev/null; then + package_manager="dnf" + fi + + # Get upgradable packages + local upgradable=$($package_manager check-update 2>/dev/null | grep -v "^$" | grep -v "^Loaded" | grep -v "^Last metadata" | tail -n +2) + + while IFS= read -r line; do + if [[ "$line" =~ ^([^[:space:]]+)[[:space:]]+([^[:space:]]+)[[:space:]]+([^[:space:]]+) ]]; then + local package_name="${BASH_REMATCH[1]}" + local available_version="${BASH_REMATCH[2]}" + local repo="${BASH_REMATCH[3]}" + + # Get current version + local current_version=$($package_manager list installed "$package_name" 2>/dev/null | grep "^$package_name" | awk '{print $2}') + + local is_security_update=false + if echo "$repo" | grep -q "security"; then + is_security_update=true + fi + + if [[ "$first_ref" == true ]]; then + first_ref=false + else + packages_ref+="," + fi + + packages_ref+="{\"name\":\"$package_name\",\"currentVersion\":\"$current_version\",\"availableVersion\":\"$available_version\",\"needsUpdate\":true,\"isSecurityUpdate\":$is_security_update}" + fi + done <<< "$upgradable" + + # Get some installed packages that are up to date + local installed=$($package_manager list installed 2>/dev/null | grep -v "^Loaded" | grep -v "^Installed" | head -100) + + while IFS= read -r line; do + if [[ "$line" =~ ^([^[:space:]]+)[[:space:]]+([^[:space:]]+) ]]; then + local package_name="${BASH_REMATCH[1]}" + local version="${BASH_REMATCH[2]}" + + # Check if this package is not in the upgrade list + if ! echo "$upgradable" | grep -q "^$package_name "; then + if [[ "$first_ref" == true ]]; then + first_ref=false + else + packages_ref+="," + fi + + packages_ref+="{\"name\":\"$package_name\",\"currentVersion\":\"$version\",\"needsUpdate\":false,\"isSecurityUpdate\":false}" + fi + fi + done <<< "$installed" +} + +# Get hardware information +get_hardware_info() { + local cpu_model="" + local cpu_cores=0 + local ram_installed=0 + local swap_size=0 + local disk_details="[]" + + # CPU Information + if command -v lscpu >/dev/null 2>&1; then + cpu_model=$(lscpu | grep "Model name" | cut -d':' -f2 | xargs) + cpu_cores=$(lscpu | grep "^CPU(s):" | cut -d':' -f2 | xargs) + elif [[ -f /proc/cpuinfo ]]; then + cpu_model=$(grep "model name" /proc/cpuinfo | head -1 | cut -d':' -f2 | xargs) + cpu_cores=$(grep -c "^processor" /proc/cpuinfo) + fi + + # Memory Information + if command -v free >/dev/null 2>&1; then + ram_installed=$(free -g | grep "^Mem:" | awk '{print $2}') + swap_size=$(free -g | grep "^Swap:" | awk '{print $2}') + elif [[ -f /proc/meminfo ]]; then + ram_installed=$(grep "MemTotal" /proc/meminfo | awk '{print int($2/1024/1024)}') + swap_size=$(grep "SwapTotal" /proc/meminfo | awk '{print int($2/1024/1024)}') + fi + + # Disk Information + if command -v lsblk >/dev/null 2>&1; then + disk_details=$(lsblk -J -o NAME,SIZE,TYPE,MOUNTPOINT | jq -c '[.blockdevices[] | select(.type == "disk") | {name: .name, size: .size, mountpoint: .mountpoint}]') + elif command -v df >/dev/null 2>&1; then + disk_details=$(df -h | grep -E "^/dev/" | awk '{print "{\"name\":\""$1"\",\"size\":\""$2"\",\"mountpoint\":\""$6"\"}"}' | jq -s .) + fi + + echo "{\"cpuModel\":\"$cpu_model\",\"cpuCores\":$cpu_cores,\"ramInstalled\":$ram_installed,\"swapSize\":$swap_size,\"diskDetails\":$disk_details}" +} + +# Get network information +get_network_info() { + local gateway_ip="" + local dns_servers="[]" + local network_interfaces="[]" + + # Gateway IP + if command -v ip >/dev/null 2>&1; then + gateway_ip=$(ip route | grep default | head -1 | awk '{print $3}') + elif command -v route >/dev/null 2>&1; then + gateway_ip=$(route -n | grep '^0.0.0.0' | head -1 | awk '{print $2}') + fi + + # DNS Servers + if [[ -f /etc/resolv.conf ]]; then + dns_servers=$(grep "nameserver" /etc/resolv.conf | awk '{print $2}' | jq -R . | jq -s .) + fi + + # Network Interfaces + if command -v ip >/dev/null 2>&1; then + network_interfaces=$(ip -j addr show | jq -c '[.[] | {name: .ifname, type: .link_type, addresses: [.addr_info[]? | {address: .local, family: .family}]}]') + elif command -v ifconfig >/dev/null 2>&1; then + network_interfaces=$(ifconfig -a | grep -E "^[a-zA-Z]" | awk '{print $1}' | jq -R . | jq -s .) + fi + + echo "{\"gatewayIp\":\"$gateway_ip\",\"dnsServers\":$dns_servers,\"networkInterfaces\":$network_interfaces}" +} + +# Get system information +get_system_info() { + local kernel_version="" + local selinux_status="" + local system_uptime="" + local load_average="[]" + + # Kernel Version + if [[ -f /proc/version ]]; then + kernel_version=$(cat /proc/version | awk '{print $3}') + elif command -v uname >/dev/null 2>&1; then + kernel_version=$(uname -r) + fi + + # SELinux Status + if command -v getenforce >/dev/null 2>&1; then + selinux_status=$(getenforce 2>/dev/null | tr '[:upper:]' '[:lower:]') + elif [[ -f /etc/selinux/config ]]; then + selinux_status=$(grep "^SELINUX=" /etc/selinux/config | cut -d'=' -f2 | tr '[:upper:]' '[:lower:]') + else + selinux_status="disabled" + fi + + # System Uptime + if [[ -f /proc/uptime ]]; then + local uptime_seconds=$(cat /proc/uptime | awk '{print int($1)}') + local days=$((uptime_seconds / 86400)) + local hours=$(((uptime_seconds % 86400) / 3600)) + local minutes=$(((uptime_seconds % 3600) / 60)) + system_uptime="${days}d ${hours}h ${minutes}m" + elif command -v uptime >/dev/null 2>&1; then + system_uptime=$(uptime | awk -F'up ' '{print $2}' | awk -F', load' '{print $1}') + fi + + # Load Average + if [[ -f /proc/loadavg ]]; then + load_average=$(cat /proc/loadavg | awk '{print "["$1","$2","$3"]"}') + elif command -v uptime >/dev/null 2>&1; then + load_average=$(uptime | awk -F'load average: ' '{print "["$2"]"}' | tr -d ' ') + fi + + echo "{\"kernelVersion\":\"$kernel_version\",\"selinuxStatus\":\"$selinux_status\",\"systemUptime\":\"$system_uptime\",\"loadAverage\":$load_average}" +} + +# Send package update to server +send_update() { + load_credentials + + info "Collecting package information..." + local packages_json=$(get_package_info) + + info "Collecting repository information..." + local repositories_json=$(get_repository_info) + + info "Collecting hardware information..." + local hardware_json=$(get_hardware_info) + + info "Collecting network information..." + local network_json=$(get_network_info) + + info "Collecting system information..." + local system_json=$(get_system_info) + + info "Sending update to PatchMon server..." + + # Merge all JSON objects into one + local merged_json=$(echo "$hardware_json $network_json $system_json" | jq -s '.[0] * .[1] * .[2]') + + local payload=$(cat </dev/null; then + # Replace current script + mv "/tmp/patchmon-agent-new.sh" "$0" + chmod +x "$0" + success "Agent updated successfully" + info "Backup saved as: $0.backup.$(date +%Y%m%d_%H%M%S)" + + # Get the new version number + local new_version=$(grep '^AGENT_VERSION=' "$0" | cut -d'"' -f2) + info "Updated to version: $new_version" + + # Automatically run update to send new information to PatchMon + info "Sending updated information to PatchMon..." + if "$0" update; then + success "Successfully sent updated information to PatchMon" + else + warning "Failed to send updated information to PatchMon (this is not critical)" + fi + else + error "Downloaded script is invalid" + rm -f "/tmp/patchmon-agent-new.sh" + fi + else + error "Failed to download new agent script" + fi + else + error "Failed to get update information" + fi +} + +# Update crontab with current policy +update_crontab() { + load_credentials + info "Updating crontab with current policy..." + local response=$(curl -s -X GET "$PATCHMON_SERVER/api/$API_VERSION/settings/update-interval") + if [[ $? -eq 0 ]]; then + local update_interval=$(echo "$response" | grep -o '"updateInterval":[0-9]*' | cut -d':' -f2) + if [[ -n "$update_interval" ]]; then + # Generate the expected crontab entry + local expected_crontab="" + if [[ $update_interval -eq 60 ]]; then + # Hourly updates + expected_crontab="0 * * * * /usr/local/bin/patchmon-agent.sh update >/dev/null 2>&1" + else + # Custom interval updates + expected_crontab="*/$update_interval * * * * /usr/local/bin/patchmon-agent.sh update >/dev/null 2>&1" + fi + + # Get current crontab + local current_crontab=$(crontab -l 2>/dev/null | grep "patchmon-agent.sh update" | head -1) + + # Check if crontab needs updating + if [[ "$current_crontab" == "$expected_crontab" ]]; then + info "Crontab is already up to date (interval: $update_interval minutes)" + return 0 + fi + + info "Setting update interval to $update_interval minutes" + echo "$expected_crontab" | crontab - + success "Crontab updated successfully" + else + error "Could not determine update interval from server" + fi + else + error "Failed to get update interval policy" + fi +} + +# Show detailed system diagnostics +show_diagnostics() { + info "PatchMon Agent Diagnostics v$AGENT_VERSION" + echo "" + + # System information + echo "=== System Information ===" + echo "OS: $(uname -s)" + echo "Architecture: $(uname -m)" + echo "Kernel: $(uname -r)" + echo "Hostname: $(hostname)" + echo "Uptime: $(uptime -p 2>/dev/null || uptime)" + echo "" + + # Agent information + echo "=== Agent Information ===" + echo "Version: $AGENT_VERSION" + echo "Script Path: $0" + echo "Config File: $CONFIG_FILE" + echo "Credentials File: $CREDENTIALS_FILE" + echo "Log File: $LOG_FILE" + echo "Script Size: $(stat -c%s "$0" 2>/dev/null || echo "Unknown") bytes" + echo "Last Modified: $(stat -c%y "$0" 2>/dev/null || echo "Unknown")" + echo "" + + # Configuration + if [[ -f "$CONFIG_FILE" ]]; then + echo "=== Configuration ===" + cat "$CONFIG_FILE" + echo "" + else + echo "=== Configuration ===" + echo "No configuration file found at $CONFIG_FILE" + echo "" + fi + + # Credentials status + echo "=== Credentials Status ===" + if [[ -f "$CREDENTIALS_FILE" ]]; then + echo "Credentials file exists: Yes" + echo "File size: $(stat -c%s "$CREDENTIALS_FILE" 2>/dev/null || echo "Unknown") bytes" + echo "File permissions: $(stat -c%a "$CREDENTIALS_FILE" 2>/dev/null || echo "Unknown")" + else + echo "Credentials file exists: No" + fi + echo "" + + # Crontab status + echo "=== Crontab Status ===" + local crontab_entries=$(crontab -l 2>/dev/null | grep patchmon-agent || echo "None") + if [[ "$crontab_entries" != "None" ]]; then + echo "Crontab entries:" + echo "$crontab_entries" + else + echo "No crontab entries found" + fi + echo "" + + # Network connectivity + echo "=== Network Connectivity ===" + if ping -c 1 -W 3 "$(echo "$PATCHMON_SERVER" | sed 's|http://||' | sed 's|https://||' | cut -d: -f1)" >/dev/null 2>&1; then + echo "Server reachable: Yes" + else + echo "Server reachable: No" + fi + echo "Server URL: $PATCHMON_SERVER" + echo "" + + # Recent logs + echo "=== Recent Logs (last 10 lines) ===" + if [[ -f "$LOG_FILE" ]]; then + tail -10 "$LOG_FILE" 2>/dev/null || echo "Could not read log file" + else + echo "Log file does not exist" + fi +} + +# Show current configuration +show_config() { + info "Current Configuration:" + echo " Server: ${PATCHMON_SERVER}" + echo " API Version: ${API_VERSION}" + echo " Agent Version: ${AGENT_VERSION}" + echo " Config File: ${CONFIG_FILE}" + echo " Credentials File: ${CREDENTIALS_FILE}" + echo " Log File: ${LOG_FILE}" + + if [[ -f "$CREDENTIALS_FILE" ]]; then + source "$CREDENTIALS_FILE" + echo " API ID: ${API_ID}" + echo " API Key: ${API_KEY:0:8}..." # Show only first 8 characters + else + echo " API Credentials: Not configured" + fi +} + +# Main function +main() { + case "$1" in + "configure") + check_root + setup_directories + load_config + configure_credentials "$2" "$3" + ;; + "test") + check_root + setup_directories + load_config + test_credentials + ;; + "update") + check_root + setup_directories + load_config + detect_os + send_update + ;; + "ping") + check_root + setup_directories + load_config + ping_server + ;; + "config") + load_config + show_config + ;; + "check-version") + check_root + setup_directories + load_config + check_version + ;; + "update-agent") + check_root + setup_directories + load_config + update_agent + ;; + "update-crontab") + check_root + setup_directories + load_config + update_crontab + ;; + "diagnostics") + show_diagnostics + ;; + *) + echo "PatchMon Agent v$AGENT_VERSION - API Credential Based" + echo "Usage: $0 {configure|test|update|ping|config|check-version|update-agent|update-crontab|diagnostics}" + echo "" + echo "Commands:" + echo " configure - Configure API credentials for this host" + echo " test - Test API credentials connectivity" + echo " update - Send package update information to server" + echo " ping - Test connectivity to server" + echo " config - Show current configuration" + echo " check-version - Check for agent updates" + echo " update-agent - Update agent to latest version" + echo " update-crontab - Update crontab with current policy" + echo " diagnostics - Show detailed system diagnostics" + echo "" + echo "Setup Process:" + echo " 1. Contact your PatchMon administrator to create a host entry" + echo " 2. Run: $0 configure (provided by admin)" + echo " 3. Run: $0 test (to verify connection)" + echo " 4. Run: $0 update (to send initial package data)" + echo "" + echo "Configuration:" + echo " Edit $CONFIG_FILE to customize server settings" + echo " PATCHMON_SERVER=http://your-server:3001" + exit 1 + ;; + esac +} + +# Run main function +main "$@" \ No newline at end of file diff --git a/backend/add-agent-version.js b/backend/add-agent-version.js new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/backend/add-agent-version.js @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/backend/check-agent-version.js b/backend/check-agent-version.js new file mode 100644 index 0000000..3c0717b --- /dev/null +++ b/backend/check-agent-version.js @@ -0,0 +1,67 @@ +const { PrismaClient } = require('@prisma/client'); + +const prisma = new PrismaClient(); + +async function checkAgentVersion() { + try { + // Check current agent version in database + const agentVersion = await prisma.agentVersion.findFirst({ + where: { version: '1.2.5' } + }); + + if (agentVersion) { + console.log('✅ Agent version 1.2.5 found in database'); + console.log('Version:', agentVersion.version); + console.log('Is Default:', agentVersion.isDefault); + console.log('Script Content Length:', agentVersion.scriptContent?.length || 0); + console.log('Created At:', agentVersion.createdAt); + console.log('Updated At:', agentVersion.updatedAt); + + // Check if script content contains the current version + if (agentVersion.scriptContent && agentVersion.scriptContent.includes('AGENT_VERSION="1.2.5"')) { + console.log('✅ Script content contains correct version 1.2.5'); + } else { + console.log('❌ Script content does not contain version 1.2.5'); + } + + // Check if script content contains system info functions + if (agentVersion.scriptContent && agentVersion.scriptContent.includes('get_hardware_info()')) { + console.log('✅ Script content contains hardware info function'); + } else { + console.log('❌ Script content missing hardware info function'); + } + + if (agentVersion.scriptContent && agentVersion.scriptContent.includes('get_network_info()')) { + console.log('✅ Script content contains network info function'); + } else { + console.log('❌ Script content missing network info function'); + } + + if (agentVersion.scriptContent && agentVersion.scriptContent.includes('get_system_info()')) { + console.log('✅ Script content contains system info function'); + } else { + console.log('❌ Script content missing system info function'); + } + + } else { + console.log('❌ Agent version 1.2.5 not found in database'); + } + + // List all agent versions + console.log('\n=== All Agent Versions ==='); + const allVersions = await prisma.agentVersion.findMany({ + orderBy: { createdAt: 'desc' } + }); + + allVersions.forEach(version => { + console.log(`Version: ${version.version}, Default: ${version.isDefault}, Length: ${version.scriptContent?.length || 0}`); + }); + + } catch (error) { + console.error('❌ Error checking agent version:', error); + } finally { + await prisma.$disconnect(); + } +} + +checkAgentVersion(); \ No newline at end of file diff --git a/backend/check-host-updates.js b/backend/check-host-updates.js new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/backend/check-host-updates.js @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/backend/check-script-content.js b/backend/check-script-content.js new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/backend/check-script-content.js @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/backend/create-proper-test-host.js b/backend/create-proper-test-host.js new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/backend/create-proper-test-host.js @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/backend/create-test-host.js b/backend/create-test-host.js new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/backend/create-test-host.js @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/backend/prisma/migrations/20250919165704_/migration.sql b/backend/prisma/migrations/20250919165704_/migration.sql new file mode 100644 index 0000000..26ed066 --- /dev/null +++ b/backend/prisma/migrations/20250919165704_/migration.sql @@ -0,0 +1,2 @@ +-- RenameIndex +ALTER INDEX "hosts_hostname_key" RENAME TO "hosts_friendly_name_key"; diff --git a/backend/prisma/migrations/20250919175557_rename_hostname_to_friendly_name/migration.sql b/backend/prisma/migrations/20250919175557_rename_hostname_to_friendly_name/migration.sql new file mode 100644 index 0000000..f5a76ff --- /dev/null +++ b/backend/prisma/migrations/20250919175557_rename_hostname_to_friendly_name/migration.sql @@ -0,0 +1,2 @@ +-- Rename hostname column to friendly_name in hosts table +ALTER TABLE "hosts" RENAME COLUMN "hostname" TO "friendly_name"; diff --git a/backend/prisma/migrations/20250919224305_add_system_information_fields_safe/migration.sql b/backend/prisma/migrations/20250919224305_add_system_information_fields_safe/migration.sql new file mode 100644 index 0000000..55742bf --- /dev/null +++ b/backend/prisma/migrations/20250919224305_add_system_information_fields_safe/migration.sql @@ -0,0 +1,14 @@ +-- AlterTable +ALTER TABLE "hosts" ADD COLUMN "cpu_cores" INTEGER, +ADD COLUMN "cpu_model" TEXT, +ADD COLUMN "disk_details" JSONB, +ADD COLUMN "dns_servers" JSONB, +ADD COLUMN "gateway_ip" TEXT, +ADD COLUMN "hostname" TEXT, +ADD COLUMN "kernel_version" TEXT, +ADD COLUMN "load_average" JSONB, +ADD COLUMN "network_interfaces" JSONB, +ADD COLUMN "ram_installed" INTEGER, +ADD COLUMN "selinux_status" TEXT, +ADD COLUMN "swap_size" INTEGER, +ADD COLUMN "system_uptime" TEXT; diff --git a/backend/prisma/schema.prisma b/backend/prisma/schema.prisma index 0bd52d8..678eb01 100644 --- a/backend/prisma/schema.prisma +++ b/backend/prisma/schema.prisma @@ -67,7 +67,8 @@ model HostGroup { model Host { id String @id @default(cuid()) - hostname String @unique + friendlyName String @unique @map("friendly_name") + hostname String? // Actual system hostname from agent ip String? osType String @map("os_type") osVersion String @map("os_version") @@ -79,6 +80,25 @@ model Host { hostGroupId String? @map("host_group_id") // Optional group association agentVersion String? @map("agent_version") // Agent script version autoUpdate Boolean @map("auto_update") @default(true) // Enable auto-update for this host + + // Hardware Information + cpuModel String? @map("cpu_model") // CPU model name + cpuCores Int? @map("cpu_cores") // Number of CPU cores + ramInstalled Int? @map("ram_installed") // RAM in GB + swapSize Int? @map("swap_size") // Swap size in GB + diskDetails Json? @map("disk_details") // Array of disk objects + + // Network Information + gatewayIp String? @map("gateway_ip") // Gateway IP address + dnsServers Json? @map("dns_servers") // Array of DNS servers + networkInterfaces Json? @map("network_interfaces") // Array of network interface objects + + // System Information + kernelVersion String? @map("kernel_version") // Kernel version + selinuxStatus String? @map("selinux_status") // SELinux status (enabled/disabled/permissive) + systemUptime String? @map("system_uptime") // System uptime + loadAverage Json? @map("load_average") // Load average (1min, 5min, 15min) + createdAt DateTime @map("created_at") @default(now()) updatedAt DateTime @map("updated_at") @updatedAt diff --git a/backend/src/config/database.js b/backend/src/config/database.js new file mode 100644 index 0000000..5a4cd54 --- /dev/null +++ b/backend/src/config/database.js @@ -0,0 +1,80 @@ +/** + * Database configuration for multiple instances + * Optimizes connection pooling to prevent "too many connections" errors + */ + +const { PrismaClient } = require('@prisma/client'); + +// Parse DATABASE_URL and add connection pooling parameters +function getOptimizedDatabaseUrl() { + const originalUrl = process.env.DATABASE_URL; + + if (!originalUrl) { + throw new Error('DATABASE_URL environment variable is required'); + } + + // Parse the URL + const url = new URL(originalUrl); + + // Add connection pooling parameters for multiple instances + url.searchParams.set('connection_limit', '5'); // Reduced from default 10 + url.searchParams.set('pool_timeout', '10'); // 10 seconds + url.searchParams.set('connect_timeout', '10'); // 10 seconds + url.searchParams.set('idle_timeout', '300'); // 5 minutes + url.searchParams.set('max_lifetime', '1800'); // 30 minutes + + return url.toString(); +} + +// Create optimized Prisma client +function createPrismaClient() { + const optimizedUrl = getOptimizedDatabaseUrl(); + + return new PrismaClient({ + datasources: { + db: { + url: optimizedUrl + } + }, + log: process.env.NODE_ENV === 'development' + ? ['query', 'info', 'warn', 'error'] + : ['warn', 'error'], + errorFormat: 'pretty' + }); +} + +// Connection health check +async function checkDatabaseConnection(prisma) { + try { + await prisma.$queryRaw`SELECT 1`; + return true; + } catch (error) { + console.error('Database connection failed:', error.message); + return false; + } +} + +// Graceful disconnect with retry +async function disconnectPrisma(prisma, maxRetries = 3) { + for (let i = 0; i < maxRetries; i++) { + try { + await prisma.$disconnect(); + console.log('Database disconnected successfully'); + return; + } catch (error) { + console.error(`Disconnect attempt ${i + 1} failed:`, error.message); + if (i === maxRetries - 1) { + console.error('Failed to disconnect from database after all retries'); + } else { + await new Promise(resolve => setTimeout(resolve, 1000)); // Wait 1 second + } + } + } +} + +module.exports = { + createPrismaClient, + checkDatabaseConnection, + disconnectPrisma, + getOptimizedDatabaseUrl +}; diff --git a/backend/src/routes/dashboardRoutes.js b/backend/src/routes/dashboardRoutes.js index 24dcf2f..3898e4e 100644 --- a/backend/src/routes/dashboardRoutes.js +++ b/backend/src/routes/dashboardRoutes.js @@ -162,6 +162,7 @@ router.get('/hosts', authenticateToken, requireViewHosts, async (req, res) => { // Show all hosts regardless of status select: { id: true, + friendlyName: true, hostname: true, ip: true, osType: true, @@ -200,6 +201,13 @@ router.get('/hosts', authenticateToken, requireViewHosts, async (req, res) => { } }); + // Get total packages count for this host + const totalPackagesCount = await prisma.hostPackage.count({ + where: { + hostId: host.id + } + }); + // Get the agent update interval setting for stale calculation const settings = await prisma.settings.findFirst(); const updateIntervalMinutes = settings?.updateInterval || 60; @@ -217,6 +225,7 @@ router.get('/hosts', authenticateToken, requireViewHosts, async (req, res) => { return { ...host, updatesCount, + totalPackagesCount, isStale, effectiveStatus }; @@ -256,7 +265,7 @@ router.get('/packages', authenticateToken, requireViewPackages, async (req, res) host: { select: { id: true, - hostname: true, + friendlyName: true, osType: true } } @@ -278,7 +287,7 @@ router.get('/packages', authenticateToken, requireViewPackages, async (req, res) isSecurityUpdate: pkg.hostPackages.some(hp => hp.isSecurityUpdate), affectedHosts: pkg.hostPackages.map(hp => ({ hostId: hp.host.id, - hostname: hp.host.hostname, + friendlyName: hp.host.friendlyName, osType: hp.host.osType, currentVersion: hp.currentVersion, availableVersion: hp.availableVersion, diff --git a/backend/src/routes/hostGroupRoutes.js b/backend/src/routes/hostGroupRoutes.js index 6084376..297e175 100644 --- a/backend/src/routes/hostGroupRoutes.js +++ b/backend/src/routes/hostGroupRoutes.js @@ -41,6 +41,7 @@ router.get('/:id', authenticateToken, async (req, res) => { hosts: { select: { id: true, + friendlyName: true, hostname: true, ip: true, osType: true, @@ -201,7 +202,7 @@ router.get('/:id/hosts', authenticateToken, async (req, res) => { where: { hostGroupId: id }, select: { id: true, - hostname: true, + friendlyName: true, ip: true, osType: true, osVersion: true, @@ -211,7 +212,7 @@ router.get('/:id/hosts', authenticateToken, async (req, res) => { createdAt: true }, orderBy: { - hostname: 'asc' + friendlyName: 'asc' } }); diff --git a/backend/src/routes/hostRoutes.js b/backend/src/routes/hostRoutes.js index 13b00cc..5dd53af 100644 --- a/backend/src/routes/hostRoutes.js +++ b/backend/src/routes/hostRoutes.js @@ -133,7 +133,7 @@ const validateApiCredentials = async (req, res, next) => { // Admin endpoint to create a new host manually (replaces auto-registration) router.post('/create', authenticateToken, requireManageHosts, [ - body('hostname').isLength({ min: 1 }).withMessage('Hostname is required'), + body('friendlyName').isLength({ min: 1 }).withMessage('Friendly name is required'), body('hostGroupId').optional() ], async (req, res) => { try { @@ -142,14 +142,14 @@ router.post('/create', authenticateToken, requireManageHosts, [ return res.status(400).json({ errors: errors.array() }); } - const { hostname, hostGroupId } = req.body; + const { friendlyName, hostGroupId } = req.body; // Generate unique API credentials for this host const { apiId, apiKey } = generateApiCredentials(); // Check if host already exists const existingHost = await prisma.host.findUnique({ - where: { hostname } + where: { friendlyName } }); if (existingHost) { @@ -170,7 +170,7 @@ router.post('/create', authenticateToken, requireManageHosts, [ // Create new host with API credentials - system info will be populated when agent connects const host = await prisma.host.create({ data: { - hostname, + friendlyName, osType: 'unknown', // Will be updated when agent connects osVersion: 'unknown', // Will be updated when agent connects ip: null, // Will be updated when agent connects @@ -194,7 +194,7 @@ router.post('/create', authenticateToken, requireManageHosts, [ res.status(201).json({ message: 'Host created successfully', hostId: host.id, - hostname: host.hostname, + friendlyName: host.friendlyName, apiId: host.apiId, apiKey: host.apiKey, hostGroup: host.hostGroup, @@ -223,7 +223,22 @@ router.post('/update', validateApiCredentials, [ body('packages.*.availableVersion').optional().isLength({ min: 1 }), body('packages.*.needsUpdate').isBoolean().withMessage('needsUpdate must be boolean'), body('packages.*.isSecurityUpdate').optional().isBoolean().withMessage('isSecurityUpdate must be boolean'), - body('agentVersion').optional().isLength({ min: 1 }).withMessage('Agent version must be a non-empty string') + body('agentVersion').optional().isLength({ min: 1 }).withMessage('Agent version must be a non-empty string'), + // Hardware Information + body('cpuModel').optional().isString().withMessage('CPU model must be a string'), + body('cpuCores').optional().isInt({ min: 1 }).withMessage('CPU cores must be a positive integer'), + body('ramInstalled').optional().isInt({ min: 1 }).withMessage('RAM installed must be a positive integer'), + body('swapSize').optional().isInt({ min: 0 }).withMessage('Swap size must be a non-negative integer'), + body('diskDetails').optional().isArray().withMessage('Disk details must be an array'), + // Network Information + body('gatewayIp').optional().isIP().withMessage('Gateway IP must be a valid IP address'), + body('dnsServers').optional().isArray().withMessage('DNS servers must be an array'), + body('networkInterfaces').optional().isArray().withMessage('Network interfaces must be an array'), + // System Information + body('kernelVersion').optional().isString().withMessage('Kernel version must be a string'), + body('selinuxStatus').optional().isIn(['enabled', 'disabled', 'permissive']).withMessage('SELinux status must be enabled, disabled, or permissive'), + body('systemUptime').optional().isString().withMessage('System uptime must be a string'), + body('loadAverage').optional().isArray().withMessage('Load average must be an array') ], async (req, res) => { try { const errors = validationResult(req); @@ -234,14 +249,35 @@ router.post('/update', validateApiCredentials, [ const { packages, repositories } = req.body; const host = req.hostRecord; - // Update host last update timestamp and OS info if provided + // Update host last update timestamp and system info if provided const updateData = { lastUpdate: new Date() }; + + // Basic system info if (req.body.osType) updateData.osType = req.body.osType; if (req.body.osVersion) updateData.osVersion = req.body.osVersion; + if (req.body.hostname) updateData.hostname = req.body.hostname; if (req.body.ip) updateData.ip = req.body.ip; if (req.body.architecture) updateData.architecture = req.body.architecture; if (req.body.agentVersion) updateData.agentVersion = req.body.agentVersion; + // Hardware Information + if (req.body.cpuModel) updateData.cpuModel = req.body.cpuModel; + if (req.body.cpuCores) updateData.cpuCores = req.body.cpuCores; + if (req.body.ramInstalled) updateData.ramInstalled = req.body.ramInstalled; + if (req.body.swapSize !== undefined) updateData.swapSize = req.body.swapSize; + if (req.body.diskDetails) updateData.diskDetails = req.body.diskDetails; + + // Network Information + if (req.body.gatewayIp) updateData.gatewayIp = req.body.gatewayIp; + if (req.body.dnsServers) updateData.dnsServers = req.body.dnsServers; + if (req.body.networkInterfaces) updateData.networkInterfaces = req.body.networkInterfaces; + + // System Information + if (req.body.kernelVersion) updateData.kernelVersion = req.body.kernelVersion; + if (req.body.selinuxStatus) updateData.selinuxStatus = req.body.selinuxStatus; + if (req.body.systemUptime) updateData.systemUptime = req.body.systemUptime; + if (req.body.loadAverage) updateData.loadAverage = req.body.loadAverage; + // If this is the first update (status is 'pending'), change to 'active' if (host.status === 'pending') { updateData.status = 'active'; @@ -454,6 +490,7 @@ router.get('/info', validateApiCredentials, async (req, res) => { where: { id: req.hostRecord.id }, select: { id: true, + friendlyName: true, hostname: true, ip: true, osType: true, @@ -485,12 +522,12 @@ router.post('/ping', validateApiCredentials, async (req, res) => { const response = { message: 'Ping successful', timestamp: new Date().toISOString(), - hostname: req.hostRecord.hostname + friendlyName: req.hostRecord.friendlyName }; // Check if this is a crontab update trigger if (req.body.triggerCrontabUpdate && req.hostRecord.autoUpdate) { - console.log(`Triggering crontab update for host: ${req.hostRecord.hostname}`); + console.log(`Triggering crontab update for host: ${req.hostRecord.friendlyName}`); response.crontabUpdate = { shouldUpdate: true, message: 'Update interval changed, please run: /usr/local/bin/patchmon-agent.sh update-crontab', @@ -568,7 +605,7 @@ router.put('/bulk/group', authenticateToken, requireManageHosts, [ // Check if all hosts exist const existingHosts = await prisma.host.findMany({ where: { id: { in: hostIds } }, - select: { id: true, hostname: true } + select: { id: true, friendlyName: true } }); if (existingHosts.length !== hostIds.length) { @@ -593,7 +630,7 @@ router.put('/bulk/group', authenticateToken, requireManageHosts, [ where: { id: { in: hostIds } }, select: { id: true, - hostname: true, + friendlyName: true, hostGroup: { select: { id: true, @@ -681,6 +718,7 @@ router.get('/admin/list', authenticateToken, requireManageHosts, async (req, res const hosts = await prisma.host.findMany({ select: { id: true, + friendlyName: true, hostname: true, ip: true, osType: true, @@ -742,7 +780,7 @@ router.patch('/:hostId/auto-update', authenticateToken, requireManageHosts, [ message: `Host auto-update ${autoUpdate ? 'enabled' : 'disabled'} successfully`, host: { id: host.id, - hostname: host.hostname, + friendlyName: host.friendlyName, autoUpdate: host.autoUpdate } }); @@ -934,4 +972,77 @@ router.delete('/agent/versions/:versionId', authenticateToken, requireManageSett } }); +// Update host friendly name (admin only) +router.patch('/:hostId/friendly-name', authenticateToken, requireManageHosts, [ + body('friendlyName').isLength({ min: 1, max: 100 }).withMessage('Friendly name must be between 1 and 100 characters') +], async (req, res) => { + try { + const errors = validationResult(req); + if (!errors.isEmpty()) { + return res.status(400).json({ errors: errors.array() }); + } + + const { hostId } = req.params; + const { friendlyName } = req.body; + + // Check if host exists + const host = await prisma.host.findUnique({ + where: { id: hostId } + }); + + if (!host) { + return res.status(404).json({ error: 'Host not found' }); + } + + // Check if friendly name is already taken by another host + const existingHost = await prisma.host.findFirst({ + where: { + friendlyName: friendlyName, + id: { not: hostId } + } + }); + + if (existingHost) { + return res.status(400).json({ error: 'Friendly name is already taken by another host' }); + } + + // Update the friendly name + const updatedHost = await prisma.host.update({ + where: { id: hostId }, + data: { friendlyName }, + select: { + id: true, + friendlyName: true, + hostname: true, + ip: true, + osType: true, + osVersion: true, + architecture: true, + lastUpdate: true, + status: true, + hostGroupId: true, + agentVersion: true, + autoUpdate: true, + createdAt: true, + updatedAt: true, + hostGroup: { + select: { + id: true, + name: true, + color: true + } + } + } + }); + + res.json({ + message: 'Friendly name updated successfully', + host: updatedHost + }); + } catch (error) { + console.error('Update friendly name error:', error); + res.status(500).json({ error: 'Failed to update friendly name' }); + } +}); + module.exports = router; \ No newline at end of file diff --git a/backend/src/routes/packageRoutes.js b/backend/src/routes/packageRoutes.js index ca22234..3d73967 100644 --- a/backend/src/routes/packageRoutes.js +++ b/backend/src/routes/packageRoutes.js @@ -100,6 +100,7 @@ router.get('/', async (req, res) => { host: { select: { id: true, + friendlyName: true, hostname: true, osType: true } diff --git a/backend/src/routes/repositoryRoutes.js b/backend/src/routes/repositoryRoutes.js index c275a96..abccb10 100644 --- a/backend/src/routes/repositoryRoutes.js +++ b/backend/src/routes/repositoryRoutes.js @@ -17,7 +17,7 @@ router.get('/', authenticateToken, requireViewHosts, async (req, res) => { host: { select: { id: true, - hostname: true, + friendlyName: true, status: true } } @@ -43,7 +43,7 @@ router.get('/', authenticateToken, requireViewHosts, async (req, res) => { activeHostCount: repo.hostRepositories.filter(hr => hr.host.status === 'active').length, hosts: repo.hostRepositories.map(hr => ({ id: hr.host.id, - hostname: hr.host.hostname, + friendlyName: hr.host.friendlyName, status: hr.host.status, isEnabled: hr.isEnabled, lastChecked: hr.lastChecked @@ -69,7 +69,7 @@ router.get('/host/:hostId', authenticateToken, requireViewHosts, async (req, res host: { select: { id: true, - hostname: true + friendlyName: true } } }, @@ -100,6 +100,7 @@ router.get('/:repositoryId', authenticateToken, requireViewHosts, async (req, re host: { select: { id: true, + friendlyName: true, hostname: true, ip: true, osType: true, @@ -111,7 +112,7 @@ router.get('/:repositoryId', authenticateToken, requireViewHosts, async (req, re }, orderBy: { host: { - hostname: 'asc' + friendlyName: 'asc' } } } @@ -197,14 +198,14 @@ router.patch('/host/:hostId/repository/:repositoryId', authenticateToken, requir repository: true, host: { select: { - hostname: true + friendlyName: true } } } }); res.json({ - message: `Repository ${isEnabled ? 'enabled' : 'disabled'} for host ${hostRepository.host.hostname}`, + message: `Repository ${isEnabled ? 'enabled' : 'disabled'} for host ${hostRepository.host.friendlyName}`, hostRepository }); } catch (error) { diff --git a/backend/src/routes/settingsRoutes.js b/backend/src/routes/settingsRoutes.js index ce35a75..1da1ee2 100644 --- a/backend/src/routes/settingsRoutes.js +++ b/backend/src/routes/settingsRoutes.js @@ -20,7 +20,7 @@ async function triggerCrontabUpdates() { }, select: { id: true, - hostname: true, + friendlyName: true, apiId: true, apiKey: true } @@ -32,7 +32,7 @@ async function triggerCrontabUpdates() { // This is done by sending a ping with a special flag for (const host of hosts) { try { - console.log(`Triggering crontab update for host: ${host.hostname}`); + console.log(`Triggering crontab update for host: ${host.friendlyName}`); // We'll use the existing ping endpoint but add a special parameter // The agent will detect this and run update-crontab command @@ -64,20 +64,20 @@ async function triggerCrontabUpdates() { const req = client.request(options, (res) => { if (res.statusCode === 200) { - console.log(`Successfully triggered crontab update for ${host.hostname}`); + console.log(`Successfully triggered crontab update for ${host.friendlyName}`); } else { - console.error(`Failed to trigger crontab update for ${host.hostname}: ${res.statusCode}`); + console.error(`Failed to trigger crontab update for ${host.friendlyName}: ${res.statusCode}`); } }); req.on('error', (error) => { - console.error(`Error triggering crontab update for ${host.hostname}:`, error.message); + console.error(`Error triggering crontab update for ${host.friendlyName}:`, error.message); }); req.write(postData); req.end(); } catch (error) { - console.error(`Error triggering crontab update for ${host.hostname}:`, error.message); + console.error(`Error triggering crontab update for ${host.friendlyName}:`, error.message); } } diff --git a/backend/src/server.js b/backend/src/server.js index cd35708..26eea09 100644 --- a/backend/src/server.js +++ b/backend/src/server.js @@ -3,7 +3,7 @@ const express = require('express'); const cors = require('cors'); const helmet = require('helmet'); const rateLimit = require('express-rate-limit'); -const { PrismaClient } = require('@prisma/client'); +const { createPrismaClient, checkDatabaseConnection, disconnectPrisma } = require('./config/database'); const winston = require('winston'); // Import routes @@ -20,8 +20,8 @@ const versionRoutes = require('./routes/versionRoutes'); const tfaRoutes = require('./routes/tfaRoutes'); const updateScheduler = require('./services/updateScheduler'); -// Initialize Prisma client -const prisma = new PrismaClient(); +// Initialize Prisma client with optimized connection pooling for multiple instances +const prisma = createPrismaClient(); // Initialize logger - only if logging is enabled const logger = process.env.ENABLE_LOGGING === 'true' ? winston.createLogger({ @@ -157,33 +157,53 @@ app.use('*', (req, res) => { }); // Graceful shutdown -process.on('SIGTERM', async () => { - if (process.env.ENABLE_LOGGING === 'true') { - logger.info('SIGTERM received, shutting down gracefully'); - } - updateScheduler.stop(); - await prisma.$disconnect(); - process.exit(0); -}); - process.on('SIGINT', async () => { if (process.env.ENABLE_LOGGING === 'true') { logger.info('SIGINT received, shutting down gracefully'); } updateScheduler.stop(); - await prisma.$disconnect(); + await disconnectPrisma(prisma); process.exit(0); }); -// Start server -app.listen(PORT, () => { +process.on('SIGTERM', async () => { if (process.env.ENABLE_LOGGING === 'true') { - logger.info(`Server running on port ${PORT}`); - logger.info(`Environment: ${process.env.NODE_ENV}`); + logger.info('SIGTERM received, shutting down gracefully'); } - - // Start update scheduler - updateScheduler.start(); + updateScheduler.stop(); + await disconnectPrisma(prisma); + process.exit(0); }); +// Start server with database health check +async function startServer() { + try { + // Check database connection before starting server + const isConnected = await checkDatabaseConnection(prisma); + if (!isConnected) { + console.error('❌ Database connection failed. Server not started.'); + process.exit(1); + } + + if (process.env.ENABLE_LOGGING === 'true') { + logger.info('✅ Database connection successful'); + } + + app.listen(PORT, () => { + if (process.env.ENABLE_LOGGING === 'true') { + logger.info(`Server running on port ${PORT}`); + logger.info(`Environment: ${process.env.NODE_ENV}`); + } + + // Start update scheduler + updateScheduler.start(); + }); + } catch (error) { + console.error('❌ Failed to start server:', error.message); + process.exit(1); + } +} + +startServer(); + module.exports = app; \ No newline at end of file diff --git a/backend/test-json-construction.js b/backend/test-json-construction.js new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/backend/test-json-construction.js @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/backend/update-agent-script.js b/backend/update-agent-script.js new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/backend/update-agent-script.js @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/backend/update-agent-version.js b/backend/update-agent-version.js new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/backend/update-agent-version.js @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/backend/update-final-fix.js b/backend/update-final-fix.js new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/backend/update-final-fix.js @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/backend/update-fixed-agent-final.js b/backend/update-fixed-agent-final.js new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/backend/update-fixed-agent-final.js @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/backend/update-fixed-agent.js b/backend/update-fixed-agent.js new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/backend/update-fixed-agent.js @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/backend/update-script-content.js b/backend/update-script-content.js new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/backend/update-script-content.js @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/backend/verify-agent-version.js b/backend/verify-agent-version.js new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/backend/verify-agent-version.js @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/package.json b/frontend/package.json index d9321df..cd6a4ed 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -24,6 +24,7 @@ "react": "^18.2.0", "react-chartjs-2": "^5.2.0", "react-dom": "^18.2.0", + "react-icons": "^5.5.0", "react-router-dom": "^6.20.1" }, "devDependencies": { diff --git a/frontend/src/components/InlineEdit.jsx b/frontend/src/components/InlineEdit.jsx new file mode 100644 index 0000000..05d0f88 --- /dev/null +++ b/frontend/src/components/InlineEdit.jsx @@ -0,0 +1,157 @@ +import React, { useState, useRef, useEffect } from 'react'; +import { Edit2, Check, X } from 'lucide-react'; +import { Link } from 'react-router-dom'; + +const InlineEdit = ({ + value, + onSave, + onCancel, + placeholder = "Enter value...", + maxLength = 100, + className = "", + disabled = false, + validate = null, + linkTo = null +}) => { + const [isEditing, setIsEditing] = useState(false); + const [editValue, setEditValue] = useState(value); + const [isLoading, setIsLoading] = useState(false); + const [error, setError] = useState(''); + const inputRef = useRef(null); + + useEffect(() => { + if (isEditing && inputRef.current) { + inputRef.current.focus(); + inputRef.current.select(); + } + }, [isEditing]); + + useEffect(() => { + setEditValue(value); + }, [value]); + + const handleEdit = () => { + if (disabled) return; + setIsEditing(true); + setEditValue(value); + setError(''); + }; + + const handleCancel = () => { + setIsEditing(false); + setEditValue(value); + setError(''); + if (onCancel) onCancel(); + }; + + const handleSave = async () => { + if (disabled || isLoading) return; + + // Validate if validator function provided + if (validate) { + const validationError = validate(editValue); + if (validationError) { + setError(validationError); + return; + } + } + + // Check if value actually changed + if (editValue.trim() === value.trim()) { + setIsEditing(false); + return; + } + + setIsLoading(true); + setError(''); + + try { + await onSave(editValue.trim()); + setIsEditing(false); + } catch (err) { + setError(err.message || 'Failed to save'); + } finally { + setIsLoading(false); + } + }; + + const handleKeyDown = (e) => { + if (e.key === 'Enter') { + e.preventDefault(); + handleSave(); + } else if (e.key === 'Escape') { + e.preventDefault(); + handleCancel(); + } + }; + + if (isEditing) { + return ( +
+ setEditValue(e.target.value)} + onKeyDown={handleKeyDown} + placeholder={placeholder} + maxLength={maxLength} + disabled={isLoading} + className={`flex-1 px-2 py-1 text-sm border border-secondary-300 dark:border-secondary-600 rounded-md bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white focus:outline-none focus:ring-2 focus:ring-primary-500 focus:border-transparent ${ + error ? 'border-red-500' : '' + } ${isLoading ? 'opacity-50' : ''}`} + /> + + + {error && ( + {error} + )} +
+ ); + } + + const displayValue = linkTo ? ( + + {value} + + ) : ( + + {value} + + ); + + return ( +
+ {displayValue} + {!disabled && ( + + )} +
+ ); +}; + +export default InlineEdit; diff --git a/frontend/src/components/InlineGroupEdit.jsx b/frontend/src/components/InlineGroupEdit.jsx new file mode 100644 index 0000000..d126d84 --- /dev/null +++ b/frontend/src/components/InlineGroupEdit.jsx @@ -0,0 +1,262 @@ +import React, { useState, useRef, useEffect } from 'react'; +import { Edit2, Check, X, ChevronDown } from 'lucide-react'; + +const InlineGroupEdit = ({ + value, + onSave, + onCancel, + options = [], + className = "", + disabled = false, + placeholder = "Select group..." +}) => { + const [isEditing, setIsEditing] = useState(false); + const [selectedValue, setSelectedValue] = useState(value); + const [isLoading, setIsLoading] = useState(false); + const [error, setError] = useState(''); + const [isOpen, setIsOpen] = useState(false); + const [dropdownPosition, setDropdownPosition] = useState({ top: 0, left: 0, width: 0 }); + const dropdownRef = useRef(null); + const buttonRef = useRef(null); + + useEffect(() => { + if (isEditing && dropdownRef.current) { + dropdownRef.current.focus(); + } + }, [isEditing]); + + useEffect(() => { + setSelectedValue(value); + // Force re-render when value changes + if (!isEditing) { + setIsOpen(false); + } + }, [value, isEditing]); + + // Calculate dropdown position + const calculateDropdownPosition = () => { + if (buttonRef.current) { + const rect = buttonRef.current.getBoundingClientRect(); + setDropdownPosition({ + top: rect.bottom + window.scrollY + 4, + left: rect.left + window.scrollX, + width: rect.width + }); + } + }; + + // Close dropdown when clicking outside + useEffect(() => { + const handleClickOutside = (event) => { + if (dropdownRef.current && !dropdownRef.current.contains(event.target)) { + setIsOpen(false); + } + }; + + if (isOpen) { + calculateDropdownPosition(); + document.addEventListener('mousedown', handleClickOutside); + window.addEventListener('resize', calculateDropdownPosition); + window.addEventListener('scroll', calculateDropdownPosition); + return () => { + document.removeEventListener('mousedown', handleClickOutside); + window.removeEventListener('resize', calculateDropdownPosition); + window.removeEventListener('scroll', calculateDropdownPosition); + }; + } + }, [isOpen]); + + const handleEdit = () => { + if (disabled) return; + setIsEditing(true); + setSelectedValue(value); + setError(''); + // Automatically open dropdown when editing starts + setTimeout(() => { + setIsOpen(true); + }, 0); + }; + + const handleCancel = () => { + setIsEditing(false); + setSelectedValue(value); + setError(''); + setIsOpen(false); + if (onCancel) onCancel(); + }; + + const handleSave = async () => { + if (disabled || isLoading) return; + + console.log('handleSave called:', { selectedValue, originalValue: value, changed: selectedValue !== value }); + + // Check if value actually changed + if (selectedValue === value) { + console.log('No change detected, closing edit mode'); + setIsEditing(false); + setIsOpen(false); + return; + } + + setIsLoading(true); + setError(''); + + try { + console.log('Calling onSave with:', selectedValue); + await onSave(selectedValue); + console.log('Save successful'); + // Update the local value to match the saved value + setSelectedValue(selectedValue); + setIsEditing(false); + setIsOpen(false); + } catch (err) { + console.error('Save failed:', err); + setError(err.message || 'Failed to save'); + } finally { + setIsLoading(false); + } + }; + + const handleKeyDown = (e) => { + if (e.key === 'Enter') { + e.preventDefault(); + handleSave(); + } else if (e.key === 'Escape') { + e.preventDefault(); + handleCancel(); + } + }; + + const getDisplayValue = () => { + console.log('getDisplayValue called with:', { value, options }); + if (!value) { + console.log('No value, returning Ungrouped'); + return 'Ungrouped'; + } + const option = options.find(opt => opt.id === value); + console.log('Found option:', option); + return option ? option.name : 'Unknown Group'; + }; + + const getDisplayColor = () => { + if (!value) return 'bg-secondary-100 text-secondary-800'; + const option = options.find(opt => opt.id === value); + return option ? `text-white` : 'bg-secondary-100 text-secondary-800'; + }; + + if (isEditing) { + return ( +
+
+
+ + + {isOpen && ( +
+
+ + {options.map((option) => ( + + ))} +
+
+ )} +
+ + +
+ {error && ( + {error} + )} +
+ ); + } + + return ( +
+ opt.id === value)?.color } : {}} + > + {getDisplayValue()} + + {!disabled && ( + + )} +
+ ); +}; + +export default InlineGroupEdit; diff --git a/frontend/src/components/Layout.jsx b/frontend/src/components/Layout.jsx index f162eeb..5c294b5 100644 --- a/frontend/src/components/Layout.jsx +++ b/frontend/src/components/Layout.jsx @@ -20,7 +20,10 @@ import { GitBranch, Wrench, Container, - Plus + Plus, + Activity, + Cog, + FileText } from 'lucide-react' import { useState, useEffect, useRef } from 'react' import { useQuery } from '@tanstack/react-query' @@ -65,7 +68,7 @@ const Layout = ({ children }) => { ...(canViewHosts() ? [{ name: 'Hosts', href: '/hosts', icon: Server }] : []), ...(canViewPackages() ? [{ name: 'Packages', href: '/packages', icon: Package }] : []), ...(canViewHosts() ? [{ name: 'Repos', href: '/repositories', icon: GitBranch }] : []), - { name: 'Services', href: '/services', icon: Wrench, comingSoon: true }, + { name: 'Services', href: '/services', icon: Activity, comingSoon: true }, { name: 'Docker', href: '/docker', icon: Container, comingSoon: true }, { name: 'Reporting', href: '/reporting', icon: BarChart3, comingSoon: true }, ] @@ -80,17 +83,18 @@ const Layout = ({ children }) => { { section: 'Settings', items: [ + ...(canManageHosts() ? [{ + name: 'PatchMon Options', + href: '/options', + icon: Settings + }] : []), + { name: 'Audit Log', href: '/audit-log', icon: FileText, comingSoon: true }, ...(canManageSettings() ? [{ name: 'Server Config', href: '/settings', - icon: Settings, + icon: Wrench, showUpgradeIcon: updateAvailable }] : []), - ...(canManageHosts() ? [{ - name: 'Options', - href: '/options', - icon: Settings - }] : []), ] } ] @@ -110,7 +114,8 @@ const Layout = ({ children }) => { if (path === '/users') return 'Users' if (path === '/permissions') return 'Permissions' if (path === '/settings') return 'Settings' - if (path === '/options') return 'Options' + if (path === '/options') return 'PatchMon Options' + if (path === '/audit-log') return 'Audit Log' if (path === '/profile') return 'My Profile' if (path.startsWith('/hosts/')) return 'Host Details' if (path.startsWith('/packages/')) return 'Package Details' diff --git a/frontend/src/pages/Dashboard.jsx b/frontend/src/pages/Dashboard.jsx index 6a84aea..9dce1ca 100644 --- a/frontend/src/pages/Dashboard.jsx +++ b/frontend/src/pages/Dashboard.jsx @@ -28,7 +28,7 @@ const Dashboard = () => { // Navigation handlers const handleTotalHostsClick = () => { - navigate('/hosts') + navigate('/hosts', { replace: true }) } const handleHostsNeedingUpdatesClick = () => { @@ -52,11 +52,11 @@ const Dashboard = () => { } const handleOSDistributionClick = () => { - navigate('/hosts') + navigate('/hosts', { replace: true }) } const handleUpdateStatusClick = () => { - navigate('/hosts') + navigate('/hosts', { replace: true }) } const handlePackagePriorityClick = () => { diff --git a/frontend/src/pages/HostDetail.jsx b/frontend/src/pages/HostDetail.jsx index 3429a4f..c45db48 100644 --- a/frontend/src/pages/HostDetail.jsx +++ b/frontend/src/pages/HostDetail.jsx @@ -23,9 +23,19 @@ import { ToggleLeft, ToggleRight, Edit, - Check + Check, + ChevronDown, + ChevronUp, + Cpu, + MemoryStick, + Globe, + Wifi, + Terminal, + Activity } from 'lucide-react' import { dashboardAPI, adminHostsAPI, settingsAPI, formatRelativeTime, formatDate } from '../utils/api' +import { OSIcon } from '../utils/osIcons.jsx' +import InlineEdit from '../components/InlineEdit' const HostDetail = () => { const { hostId } = useParams() @@ -33,8 +43,9 @@ const HostDetail = () => { const queryClient = useQueryClient() const [showCredentialsModal, setShowCredentialsModal] = useState(false) const [showDeleteModal, setShowDeleteModal] = useState(false) - const [isEditingHostname, setIsEditingHostname] = useState(false) - const [editedHostname, setEditedHostname] = useState('') + const [isEditingFriendlyName, setIsEditingFriendlyName] = useState(false) + const [editedFriendlyName, setEditedFriendlyName] = useState('') + const [showAllUpdates, setShowAllUpdates] = useState(false) const { data: host, isLoading, error, refetch } = useQuery({ queryKey: ['host', hostId], @@ -67,8 +78,16 @@ const HostDetail = () => { } }) + const updateFriendlyNameMutation = useMutation({ + mutationFn: (friendlyName) => adminHostsAPI.updateFriendlyName(hostId, friendlyName).then(res => res.data), + onSuccess: () => { + queryClient.invalidateQueries(['host', hostId]) + queryClient.invalidateQueries(['hosts']) + } + }) + const handleDeleteHost = async () => { - if (window.confirm(`Are you sure you want to delete host "${host.hostname}"? This action cannot be undone.`)) { + if (window.confirm(`Are you sure you want to delete host "${host.friendlyName}"? This action cannot be undone.`)) { try { await deleteHostMutation.mutateAsync(hostId) } catch (error) { @@ -162,46 +181,49 @@ const HostDetail = () => { return (
- {/* Header */} -
-
- - - -

{host.hostname}

-
-
- - -
-
- {/* Host Information */}
{/* Basic Info */}
-

Host Information

+
+

Host Information

+
+ + + +
+
-
-

Hostname

-

{host.hostname}

+
+

Friendly Name

+ updateFriendlyNameMutation.mutate(newName)} + placeholder="Enter friendly name..." + maxLength={100} + validate={(value) => { + if (!value.trim()) return 'Friendly name is required'; + if (value.trim().length < 1) return 'Friendly name must be at least 1 character'; + if (value.trim().length > 100) return 'Friendly name must be less than 100 characters'; + return null; + }} + className="w-full" + />
+ {host.hostname && ( +
+ +
+

System Hostname

+

{host.hostname}

+
+
+ )} +
@@ -225,7 +247,10 @@ const HostDetail = () => {

Operating System

-

{host.osType} {host.osVersion}

+
+ +

{host.osType} {host.osVersion}

+
@@ -289,6 +314,24 @@ const HostDetail = () => {
)}
+ + {/* Action Buttons */} +
+ + +
{/* Statistics */} @@ -303,13 +346,17 @@ const HostDetail = () => {

Total Packages

-
-
+
+
@@ -330,124 +377,277 @@ const HostDetail = () => {
- {/* Packages */} -
-
-

Packages

-
- -
- - - - - - - - - - - {host.hostPackages?.map((hostPackage) => ( - - - - - - - ))} - -
- Package - - Current Version - - Available Version - - Status -
-
- -
-
- {hostPackage.package.name} -
- {hostPackage.package.description && ( -
- {hostPackage.package.description} -
- )} -
-
-
- {hostPackage.currentVersion} - - {hostPackage.availableVersion || '-'} - - {hostPackage.needsUpdate ? ( -
- - {hostPackage.isSecurityUpdate ? 'Security Update' : 'Update Available'} - - {hostPackage.isSecurityUpdate && ( - - )} -
- ) : ( - Up to date - )} -
-
- - {host.hostPackages?.length === 0 && ( -
- -

No packages found

-
- )} -
- - {/* Update History */} -
-
-

Update History

-
- -
- {host.updateHistory?.length > 0 ? ( -
- {host.updateHistory.map((update, index) => ( -
-
-
-
-

- {update.status === 'success' ? 'Update Successful' : 'Update Failed'} -

-

- {formatDate(update.timestamp)} -

-
-
-
-

- {update.packagesCount} packages -

- {update.securityCount > 0 && ( -

- {update.securityCount} security updates -

- )} -
+ {/* Hardware Information */} + {(host.cpuModel || host.ramInstalled || host.diskDetails) && ( +
+

Hardware Information

+
+ {host.cpuModel && ( +
+ +
+

CPU Model

+

{host.cpuModel}

- ))} -
- ) : ( -
- -

No update history available

+
+ )} + + {host.cpuCores && ( +
+ +
+

CPU Cores

+

{host.cpuCores}

+
+
+ )} + + {host.ramInstalled && ( +
+ +
+

RAM Installed

+

{host.ramInstalled} GB

+
+
+ )} + + {host.swapSize !== undefined && ( +
+ +
+

Swap Size

+

{host.swapSize} GB

+
+
+ )} +
+ + {host.diskDetails && Array.isArray(host.diskDetails) && host.diskDetails.length > 0 && ( +
+

Disk Details

+
+ {host.diskDetails.map((disk, index) => ( +
+
+ + {disk.name} +
+

Size: {disk.size}

+ {disk.mountpoint && ( +

Mount: {disk.mountpoint}

+ )} +
+ ))} +
)}
+ )} + + {/* Network Information */} + {(host.gatewayIp || host.dnsServers || host.networkInterfaces) && ( +
+

Network Information

+
+ {host.gatewayIp && ( +
+ +
+

Gateway IP

+

{host.gatewayIp}

+
+
+ )} + + {host.dnsServers && Array.isArray(host.dnsServers) && host.dnsServers.length > 0 && ( +
+ +
+

DNS Servers

+
+ {host.dnsServers.map((dns, index) => ( +

{dns}

+ ))} +
+
+
+ )} + + {host.networkInterfaces && Array.isArray(host.networkInterfaces) && host.networkInterfaces.length > 0 && ( +
+ +
+

Network Interfaces

+
+ {host.networkInterfaces.map((iface, index) => ( +

{iface.name}

+ ))} +
+
+
+ )} +
+
+ )} + + {/* System Information */} + {(host.kernelVersion || host.selinuxStatus || host.systemUptime || host.loadAverage) && ( +
+

System Information

+
+ {host.kernelVersion && ( +
+ +
+

Kernel Version

+

{host.kernelVersion}

+
+
+ )} + + {host.selinuxStatus && ( +
+ +
+

SELinux Status

+ + {host.selinuxStatus} + +
+
+ )} + + {host.systemUptime && ( +
+ +
+

System Uptime

+

{host.systemUptime}

+
+
+ )} + + {host.loadAverage && Array.isArray(host.loadAverage) && host.loadAverage.length > 0 && ( +
+ +
+

Load Average

+

+ {host.loadAverage.map((load, index) => ( + + {load.toFixed(2)} + {index < host.loadAverage.length - 1 && ', '} + + ))} +

+
+
+ )} +
+
+ )} + + {/* Update History */} +
+
+
+

Agent Update History

+
+ +
+ {host.updateHistory?.length > 0 ? ( + <> + + + + + + + + + + + {(showAllUpdates ? host.updateHistory : host.updateHistory.slice(0, 3)).map((update, index) => ( + + + + + + + ))} + +
+ Status + + Date + + Packages + + Security +
+
+
+ + {update.status === 'success' ? 'Success' : 'Failed'} + +
+
+ {formatDate(update.timestamp)} + + {update.packagesCount} + + {update.securityCount > 0 ? ( +
+ + + {update.securityCount} + +
+ ) : ( + - + )} +
+ + {host.updateHistory.length > 3 && ( +
+ +
+ )} + + ) : ( +
+ +

No update history available

+
+ )} +
+
{/* Credentials Modal */} @@ -476,7 +676,7 @@ const HostDetail = () => { // Credentials Modal Component const CredentialsModal = ({ host, isOpen, onClose }) => { const [showApiKey, setShowApiKey] = useState(false) - const [activeTab, setActiveTab] = useState('credentials') + const [activeTab, setActiveTab] = useState('quick-install') const { data: serverUrlData } = useQuery({ queryKey: ['serverUrl'], @@ -490,7 +690,7 @@ const CredentialsModal = ({ host, isOpen, onClose }) => { } const getSetupCommands = () => { - return `# Run this on the target host: ${host?.hostname} + return `# Run this on the target host: ${host?.friendlyName} echo "🔄 Setting up PatchMon agent..." @@ -532,7 +732,7 @@ echo " - View logs: tail -f /var/log/patchmon-agent.log"`
-

Host Setup - {host.hostname}

+

Host Setup - {host.friendlyName}

@@ -541,16 +741,6 @@ echo " - View logs: tail -f /var/log/patchmon-agent.log"` {/* Tabs */}
{/* Tab Content */} + {activeTab === 'quick-install' && ( +
+
+

One-Line Installation

+

+ Copy and run this command on the target host to automatically install and configure the PatchMon agent: +

+
+ + +
+
+ +
+

Manual Installation

+

+ If you prefer to install manually, follow these steps: +

+
+
+
1. Download Agent Script
+
+ + +
+
+ +
+
2. Install Agent
+
+ + +
+
+ +
+
3. Configure Credentials
+
+ + +
+
+ +
+
4. Test Configuration
+
+ + +
+
+ +
+
5. Send Initial Data
+
+ + +
+
+ +
+
6. Setup Crontab (Optional)
+
+ + +
+
+
+
+
+ )} + {activeTab === 'credentials' && (
@@ -630,48 +978,6 @@ echo " - View logs: tail -f /var/log/patchmon-agent.log"`
)} - {activeTab === 'quick-install' && ( -
-
-

One-Line Installation

-

- Copy and run this command on the target host to automatically install and configure the PatchMon agent: -

-
- - -
-
- -
-

Manual Installation

-

- If you prefer manual installation, run these commands on the target host: -

-
-                {commands}
-              
- -
-
- )}
@@ -348,7 +351,7 @@ echo " - View logs: tail -f /var/log/patchmon-agent.log"`

🚀 One-Line Installation

- Copy and paste this single command on {host.hostname} to install and configure the PatchMon agent automatically. + Copy and paste this single command on {host.friendlyName} to install and configure the PatchMon agent automatically.

@@ -375,7 +378,7 @@ echo " - View logs: tail -f /var/log/patchmon-agent.log"`
  • • Downloads the PatchMon installation script
  • • Installs the agent to /usr/local/bin/patchmon-agent.sh
  • -
  • • Configures API credentials for {host.hostname}
  • +
  • • Configures API credentials for {host.friendlyName}
  • • Tests the connection to PatchMon server
  • • Sends initial package data
  • • Sets up hourly automatic updates via crontab
  • @@ -441,7 +444,7 @@ echo " - View logs: tail -f /var/log/patchmon-agent.log"`

    ⚠️ Security Note

    - Keep these credentials secure. They provide access to update package information for {host.hostname} only. + Keep these credentials secure. They provide access to update package information for {host.friendlyName} only.

@@ -452,7 +455,7 @@ echo " - View logs: tail -f /var/log/patchmon-agent.log"`

📋 Step-by-Step Setup

- Follow these commands on {host.hostname} to install and configure the PatchMon agent. + Follow these commands on {host.friendlyName} to install and configure the PatchMon agent.

@@ -549,7 +552,7 @@ echo " - View logs: tail -f /var/log/patchmon-agent.log"`

🚀 Automated Setup

- Copy this complete setup script to {host.hostname} and run it to automatically install and configure everything. + Copy this complete setup script to {host.friendlyName} and run it to automatically install and configure everything.

@@ -570,7 +573,7 @@ echo " - View logs: tail -f /var/log/patchmon-agent.log"`

Usage:

1. Copy the script above

-

2. Save it to a file on {host.hostname} (e.g., setup-patchmon.sh)

+

2. Save it to a file on {host.friendlyName} (e.g., setup-patchmon.sh)

3. Run: chmod +x setup-patchmon.sh && sudo ./setup-patchmon.sh

@@ -642,13 +645,24 @@ const Hosts = () => { newSearchParams.delete('action') navigate(`/hosts${newSearchParams.toString() ? `?${newSearchParams.toString()}` : ''}`, { replace: true }) } + + // Handle selected hosts from packages page + const selected = searchParams.get('selected') + if (selected) { + const hostIds = selected.split(',').filter(Boolean) + setSelectedHosts(hostIds) + // Remove the selected parameter from URL without triggering a page reload + const newSearchParams = new URLSearchParams(searchParams) + newSearchParams.delete('selected') + navigate(`/hosts${newSearchParams.toString() ? `?${newSearchParams.toString()}` : ''}`, { replace: true }) + } }, [searchParams, navigate]) // Column configuration const [columnConfig, setColumnConfig] = useState(() => { const defaultConfig = [ { id: 'select', label: 'Select', visible: true, order: 0 }, - { id: 'host', label: 'Host', visible: true, order: 1 }, + { id: 'host', label: 'Friendly Name', visible: true, order: 1 }, { id: 'ip', label: 'IP Address', visible: false, order: 2 }, { id: 'group', label: 'Group', visible: true, order: 3 }, { id: 'os', label: 'OS', visible: true, order: 4 }, @@ -732,7 +746,28 @@ const Hosts = () => { const bulkUpdateGroupMutation = useMutation({ mutationFn: ({ hostIds, hostGroupId }) => adminHostsAPI.bulkUpdateGroup(hostIds, hostGroupId), - onSuccess: () => { + onSuccess: (data) => { + console.log('bulkUpdateGroupMutation success:', data); + + // Update the cache with the new host data + if (data && data.hosts) { + queryClient.setQueryData(['hosts'], (oldData) => { + if (!oldData) return oldData; + return oldData.map(host => { + const updatedHost = data.hosts.find(h => h.id === host.id); + if (updatedHost) { + // Ensure hostGroupId is set correctly + return { + ...updatedHost, + hostGroupId: updatedHost.hostGroup?.id || null + }; + } + return host; + }); + }); + } + + // Also invalidate to ensure consistency queryClient.invalidateQueries(['hosts']) setSelectedHosts([]) setShowBulkAssignModal(false) @@ -747,6 +782,55 @@ const Hosts = () => { } }) + const updateFriendlyNameMutation = useMutation({ + mutationFn: ({ hostId, friendlyName }) => adminHostsAPI.updateFriendlyName(hostId, friendlyName).then(res => res.data), + onSuccess: () => { + queryClient.invalidateQueries(['hosts']) + } + }) + + const updateHostGroupMutation = useMutation({ + mutationFn: ({ hostId, hostGroupId }) => { + console.log('updateHostGroupMutation called with:', { hostId, hostGroupId }); + return adminHostsAPI.updateGroup(hostId, hostGroupId).then(res => { + console.log('updateGroup API response:', res); + return res.data; + }); + }, + onSuccess: (data) => { + console.log('updateHostGroupMutation success:', data); + console.log('Updated host data:', data.host); + console.log('Host group in response:', data.host.hostGroup); + + // Update the cache with the new host data + queryClient.setQueryData(['hosts'], (oldData) => { + console.log('Old cache data before update:', oldData); + if (!oldData) return oldData; + const updatedData = oldData.map(host => { + if (host.id === data.host.id) { + console.log('Updating host in cache:', host.id, 'with new data:', data.host); + // Ensure hostGroupId is set correctly + const updatedHost = { + ...data.host, + hostGroupId: data.host.hostGroup?.id || null + }; + console.log('Updated host with hostGroupId:', updatedHost); + return updatedHost; + } + return host; + }); + console.log('New cache data after update:', updatedData); + return updatedData; + }); + + // Also invalidate to ensure consistency + queryClient.invalidateQueries(['hosts']) + }, + onError: (error) => { + console.error('updateHostGroupMutation error:', error); + } + }) + // Helper functions for bulk selection const handleSelectHost = (hostId) => { setSelectedHosts(prev => @@ -775,7 +859,7 @@ const Hosts = () => { let filtered = hosts.filter(host => { // Search filter const matchesSearch = searchTerm === '' || - host.hostname.toLowerCase().includes(searchTerm.toLowerCase()) || + host.friendlyName.toLowerCase().includes(searchTerm.toLowerCase()) || host.ip?.toLowerCase().includes(searchTerm.toLowerCase()) || host.osType?.toLowerCase().includes(searchTerm.toLowerCase()) @@ -808,9 +892,13 @@ const Hosts = () => { let aValue, bValue switch (sortField) { + case 'friendlyName': + aValue = a.friendlyName.toLowerCase() + bValue = b.friendlyName.toLowerCase() + break case 'hostname': - aValue = a.hostname.toLowerCase() - bValue = b.hostname.toLowerCase() + aValue = a.hostname?.toLowerCase() || 'zzz_no_hostname' + bValue = b.hostname?.toLowerCase() || 'zzz_no_hostname' break case 'ip': aValue = a.ip?.toLowerCase() || 'zzz_no_ip' @@ -929,15 +1017,16 @@ const Hosts = () => { const resetColumns = () => { const defaultConfig = [ { id: 'select', label: 'Select', visible: true, order: 0 }, - { id: 'host', label: 'Host', visible: true, order: 1 }, - { id: 'ip', label: 'IP Address', visible: false, order: 2 }, - { id: 'group', label: 'Group', visible: true, order: 3 }, - { id: 'os', label: 'OS', visible: true, order: 4 }, - { id: 'osVersion', label: 'OS Version', visible: false, order: 5 }, - { id: 'status', label: 'Status', visible: true, order: 6 }, - { id: 'updates', label: 'Updates', visible: true, order: 7 }, - { id: 'lastUpdate', label: 'Last Update', visible: true, order: 8 }, - { id: 'actions', label: 'Actions', visible: true, order: 9 } + { id: 'host', label: 'Friendly Name', visible: true, order: 1 }, + { id: 'hostname', label: 'System Hostname', visible: true, order: 2 }, + { id: 'ip', label: 'IP Address', visible: false, order: 3 }, + { id: 'group', label: 'Group', visible: true, order: 4 }, + { id: 'os', label: 'OS', visible: true, order: 5 }, + { id: 'osVersion', label: 'OS Version', visible: false, order: 6 }, + { id: 'status', label: 'Status', visible: true, order: 7 }, + { id: 'updates', label: 'Updates', visible: true, order: 8 }, + { id: 'lastUpdate', label: 'Last Update', visible: true, order: 9 }, + { id: 'actions', label: 'Actions', visible: true, order: 10 } ] updateColumnConfig(defaultConfig) } @@ -965,12 +1054,26 @@ const Hosts = () => { ) case 'host': return ( - - {host.hostname} - + updateFriendlyNameMutation.mutate({ hostId: host.id, friendlyName: newName })} + placeholder="Enter friendly name..." + maxLength={100} + linkTo={`/hosts/${host.id}`} + validate={(value) => { + if (!value.trim()) return 'Friendly name is required'; + if (value.trim().length < 1) return 'Friendly name must be at least 1 character'; + if (value.trim().length > 100) return 'Friendly name must be less than 100 characters'; + return null; + }} + className="w-full" + /> + ) + case 'hostname': + return ( +
+ {host.hostname || 'N/A'} +
) case 'ip': return ( @@ -979,22 +1082,27 @@ const Hosts = () => {
) case 'group': - return host.hostGroup ? ( - - {host.hostGroup.name} - - ) : ( - - Ungrouped - + console.log('Rendering group for host:', { + hostId: host.id, + hostGroupId: host.hostGroupId, + hostGroup: host.hostGroup, + availableGroups: hostGroups + }); + return ( + updateHostGroupMutation.mutate({ hostId: host.id, hostGroupId: newGroupId })} + options={hostGroups || []} + placeholder="Select group..." + className="w-full" + /> ) case 'os': return ( -
- {host.osType} +
+ + {host.osType}
) case 'osVersion': @@ -1068,6 +1176,8 @@ const Hosts = () => { setGroupBy('none') setHideStale(false) setShowFilters(false) + // Clear URL parameters to ensure no filters are applied + navigate('/hosts', { replace: true }) } const handleUpToDateClick = () => { @@ -1401,6 +1511,14 @@ const Hosts = () => { )} ) : column.id === 'host' ? ( + + ) : column.id === 'hostname' ? ( + ) + case 'priority': + return pkg.isSecurityUpdate ? ( + + + Security Update + + ) : ( + Regular Update + ) + case 'latestVersion': + return ( +
+ {pkg.latestVersion || 'Unknown'} +
+ ) + default: + return null + } + } + + // Get unique categories + const categories = [...new Set(packages?.map(pkg => pkg.category).filter(Boolean))] || [] + + // Calculate unique affected hosts + const uniqueAffectedHosts = new Set() + packages?.forEach(pkg => { + pkg.affectedHosts.forEach(host => { + uniqueAffectedHosts.add(host.hostId) + }) + }) + const uniqueAffectedHostsCount = uniqueAffectedHosts.size + + // Calculate total packages across all hosts (including up-to-date ones) + const totalPackagesCount = hosts?.reduce((total, host) => { + return total + (host.totalPackagesCount || 0) + }, 0) || 0 + + // Calculate outdated packages (packages that need updates) + const outdatedPackagesCount = packages?.length || 0 + + // Calculate security updates + const securityUpdatesCount = packages?.filter(pkg => pkg.isSecurityUpdate).length || 0 + if (isLoading) { return (
@@ -74,64 +328,38 @@ const Packages = () => { ) } - // Filter packages based on search and filters - const filteredPackages = packages?.filter(pkg => { - const matchesSearch = pkg.name.toLowerCase().includes(searchTerm.toLowerCase()) || - (pkg.description && pkg.description.toLowerCase().includes(searchTerm.toLowerCase())) - - const matchesCategory = categoryFilter === 'all' || pkg.category === categoryFilter - - const matchesSecurity = securityFilter === 'all' || - (securityFilter === 'security' && pkg.isSecurityUpdate) || - (securityFilter === 'regular' && !pkg.isSecurityUpdate) - - return matchesSearch && matchesCategory && matchesSecurity - }) || [] - - // Get unique categories - const categories = [...new Set(packages?.map(pkg => pkg.category).filter(Boolean))] || [] - - // Calculate unique affected hosts - const uniqueAffectedHosts = new Set() - packages?.forEach(pkg => { - pkg.affectedHosts.forEach(host => { - uniqueAffectedHosts.add(host.hostId) - }) - }) - const uniqueAffectedHostsCount = uniqueAffectedHosts.size - return ( -
+
{/* Summary Stats */} -
-
+
+

Total Packages

-

{packages?.length || 0}

+

{totalPackagesCount}

-
+
- +
-

Security Updates

+

Total Outdated Packages

- {packages?.filter(pkg => pkg.isSecurityUpdate).length || 0} + {outdatedPackagesCount}

-
+
-

Affected Hosts

+

Hosts Pending Updates

{uniqueAffectedHostsCount}

@@ -139,152 +367,235 @@ const Packages = () => {
-
+
- +
-

Categories

-

{categories.length}

+

Security Updates Across All Hosts

+

{securityUpdatesCount}

- {/* Filters */} -
-
- {/* Search */} -
-
- - setSearchTerm(e.target.value)} - className="w-full pl-10 pr-4 py-2 border border-secondary-300 dark:border-secondary-600 rounded-md focus:ring-2 focus:ring-primary-500 focus:border-transparent bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white placeholder-secondary-500 dark:placeholder-secondary-400" - /> -
-
- - {/* Category Filter */} -
- -
- - {/* Security Filter */} -
- -
-
-
- {/* Packages List */} -
-
-

- Packages Needing Updates ({filteredPackages.length}) -

+
+
+
+ {/* Empty selection controls area to match hosts page spacing */} +
- {filteredPackages.length === 0 ? ( -
- -

- {packages?.length === 0 ? 'No packages need updates' : 'No packages match your filters'} -

- {packages?.length === 0 && ( -

- All packages are up to date across all hosts -

- )} + {/* Table Controls */} +
+
+ {/* Search */} +
+
+ + setSearchTerm(e.target.value)} + className="w-full pl-10 pr-4 py-2 border border-secondary-300 dark:border-secondary-600 rounded-md focus:ring-2 focus:ring-primary-500 focus:border-transparent bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white placeholder-secondary-500 dark:placeholder-secondary-400" + /> +
+
+ + {/* Category Filter */} +
+ +
+ + {/* Security Filter */} +
+ +
+ + {/* Host Filter */} +
+ +
+ + {/* Columns Button */} +
+ +
- ) : ( -
- - + + +
+ {filteredAndSortedPackages.length === 0 ? ( +
+ +

+ {packages?.length === 0 ? 'No packages need updates' : 'No packages match your filters'} +

+ {packages?.length === 0 && ( +

+ All packages are up to date across all hosts +

+ )} +
+ ) : ( +
+
+ - - - - + {visibleColumns.map((column) => ( + + ))} - {filteredPackages.map((pkg) => ( - - - - - + {filteredAndSortedPackages.map((pkg) => ( + + {visibleColumns.map((column) => ( + + ))} ))} -
- Package - - Latest Version - - Affected Hosts - - Priority - + +
-
- -
-
- {pkg.name} -
- {pkg.description && ( -
- {pkg.description} -
- )} - {pkg.category && ( -
- Category: {pkg.category} -
- )} -
-
-
- {pkg.latestVersion || 'Unknown'} - -
- {pkg.affectedHostsCount} host{pkg.affectedHostsCount !== 1 ? 's' : ''} -
-
- {pkg.affectedHosts.slice(0, 2).map(host => host.hostname).join(', ')} - {pkg.affectedHosts.length > 2 && ` +${pkg.affectedHosts.length - 2} more`} -
-
- {pkg.isSecurityUpdate ? ( - - - Security Update - - ) : ( - Regular Update - )} -
+ {renderCellContent(column, pkg)} +
+ +
+ )} +
+
+
+ + {/* Column Settings Modal */} + {showColumnSettings && ( + setShowColumnSettings(false)} + onToggleVisibility={toggleColumnVisibility} + onReorder={reorderColumns} + onReset={resetColumns} + /> + )} +
+ ) +} + +// Column Settings Modal Component +const ColumnSettingsModal = ({ columnConfig, onClose, onToggleVisibility, onReorder, onReset }) => { + const [draggedIndex, setDraggedIndex] = useState(null) + + const handleDragStart = (e, index) => { + setDraggedIndex(index) + e.dataTransfer.effectAllowed = 'move' + } + + const handleDragOver = (e) => { + e.preventDefault() + e.dataTransfer.dropEffect = 'move' + } + + const handleDrop = (e, dropIndex) => { + e.preventDefault() + if (draggedIndex !== null && draggedIndex !== dropIndex) { + onReorder(draggedIndex, dropIndex) + } + setDraggedIndex(null) + } + + return ( +
+
+
+

Customize Columns

+ +
+ +
+ {columnConfig.map((column, index) => ( +
handleDragStart(e, index)} + onDragOver={handleDragOver} + onDrop={(e) => handleDrop(e, index)} + className={`flex items-center justify-between p-3 border rounded-lg cursor-move ${ + draggedIndex === index ? 'opacity-50' : 'hover:bg-secondary-50 dark:hover:bg-secondary-700' + } border-secondary-200 dark:border-secondary-600`} + > +
+ + + {column.label} + +
+
- )} + ))} +
+ +
+ +
diff --git a/frontend/src/pages/Repositories.jsx b/frontend/src/pages/Repositories.jsx index 3ef0371..ad9546e 100644 --- a/frontend/src/pages/Repositories.jsx +++ b/frontend/src/pages/Repositories.jsx @@ -1,4 +1,4 @@ -import React, { useState } from 'react'; +import React, { useState, useMemo } from 'react'; import { useQuery } from '@tanstack/react-query'; import { Link } from 'react-router-dom'; import { @@ -11,7 +11,15 @@ import { Lock, Unlock, Database, - Eye + Eye, + Search, + Columns, + ArrowUpDown, + ArrowUp, + ArrowDown, + X, + GripVertical, + Check } from 'lucide-react'; import { repositoryAPI } from '../utils/api'; @@ -19,6 +27,37 @@ const Repositories = () => { const [searchTerm, setSearchTerm] = useState(''); const [filterType, setFilterType] = useState('all'); // all, secure, insecure const [filterStatus, setFilterStatus] = useState('all'); // all, active, inactive + const [sortField, setSortField] = useState('name'); + const [sortDirection, setSortDirection] = useState('asc'); + const [showColumnSettings, setShowColumnSettings] = useState(false); + + // Column configuration + const [columnConfig, setColumnConfig] = useState(() => { + const defaultConfig = [ + { id: 'name', label: 'Repository', visible: true, order: 0 }, + { id: 'url', label: 'URL', visible: true, order: 1 }, + { id: 'distribution', label: 'Distribution', visible: true, order: 2 }, + { id: 'security', label: 'Security', visible: true, order: 3 }, + { id: 'status', label: 'Status', visible: true, order: 4 }, + { id: 'hostCount', label: 'Hosts', visible: true, order: 5 }, + { id: 'actions', label: 'Actions', visible: true, order: 6 } + ]; + + const saved = localStorage.getItem('repositories-column-config'); + if (saved) { + try { + return JSON.parse(saved); + } catch (e) { + console.error('Failed to parse saved column config:', e); + } + } + return defaultConfig; + }); + + const updateColumnConfig = (newConfig) => { + setColumnConfig(newConfig); + localStorage.setItem('repositories-column-config', JSON.stringify(newConfig)); + }; // Fetch repositories const { data: repositories = [], isLoading, error } = useQuery({ @@ -32,22 +71,122 @@ const Repositories = () => { queryFn: () => repositoryAPI.getStats().then(res => res.data) }); - // Filter repositories based on search and filters - const filteredRepositories = repositories.filter(repo => { - const matchesSearch = repo.name.toLowerCase().includes(searchTerm.toLowerCase()) || - repo.url.toLowerCase().includes(searchTerm.toLowerCase()) || - repo.distribution.toLowerCase().includes(searchTerm.toLowerCase()); + // Get visible columns in order + const visibleColumns = columnConfig + .filter(col => col.visible) + .sort((a, b) => a.order - b.order); + + // Sorting functions + const handleSort = (field) => { + if (sortField === field) { + setSortDirection(sortDirection === 'asc' ? 'desc' : 'asc'); + } else { + setSortField(field); + setSortDirection('asc'); + } + }; + + const getSortIcon = (field) => { + if (sortField !== field) return + return sortDirection === 'asc' ? : + }; + + // Column management functions + const toggleColumnVisibility = (columnId) => { + const newConfig = columnConfig.map(col => + col.id === columnId ? { ...col, visible: !col.visible } : col + ) + updateColumnConfig(newConfig) + }; + + const reorderColumns = (fromIndex, toIndex) => { + const newConfig = [...columnConfig] + const [movedColumn] = newConfig.splice(fromIndex, 1) + newConfig.splice(toIndex, 0, movedColumn) - const matchesType = filterType === 'all' || - (filterType === 'secure' && repo.isSecure) || - (filterType === 'insecure' && !repo.isSecure); + // Update order values + const updatedConfig = newConfig.map((col, index) => ({ ...col, order: index })) + updateColumnConfig(updatedConfig) + }; + + const resetColumns = () => { + const defaultConfig = [ + { id: 'name', label: 'Repository', visible: true, order: 0 }, + { id: 'url', label: 'URL', visible: true, order: 1 }, + { id: 'distribution', label: 'Distribution', visible: true, order: 2 }, + { id: 'security', label: 'Security', visible: true, order: 3 }, + { id: 'status', label: 'Status', visible: true, order: 4 }, + { id: 'hostCount', label: 'Hosts', visible: true, order: 5 }, + { id: 'actions', label: 'Actions', visible: true, order: 6 } + ] + updateColumnConfig(defaultConfig) + }; + + // Filter and sort repositories + const filteredAndSortedRepositories = useMemo(() => { + if (!repositories) return [] - const matchesStatus = filterStatus === 'all' || - (filterStatus === 'active' && repo.isActive) || - (filterStatus === 'inactive' && !repo.isActive); - - return matchesSearch && matchesType && matchesStatus; - }); + // Filter repositories + const filtered = repositories.filter(repo => { + const matchesSearch = repo.name.toLowerCase().includes(searchTerm.toLowerCase()) || + repo.url.toLowerCase().includes(searchTerm.toLowerCase()) || + repo.distribution.toLowerCase().includes(searchTerm.toLowerCase()); + + // Debug logging + console.log('Filtering repo:', { + name: repo.name, + isSecure: repo.isSecure, + filterType, + url: repo.url + }); + + // Check security based on URL if isSecure property doesn't exist + const isSecure = repo.isSecure !== undefined ? repo.isSecure : repo.url.startsWith('https://'); + + const matchesType = filterType === 'all' || + (filterType === 'secure' && isSecure) || + (filterType === 'insecure' && !isSecure); + + const matchesStatus = filterStatus === 'all' || + (filterStatus === 'active' && repo.isActive === true) || + (filterStatus === 'inactive' && repo.isActive === false); + + console.log('Filter results:', { + matchesSearch, + matchesType, + matchesStatus, + final: matchesSearch && matchesType && matchesStatus + }); + + return matchesSearch && matchesType && matchesStatus; + }); + + // Sort repositories + const sorted = filtered.sort((a, b) => { + let aValue = a[sortField]; + let bValue = b[sortField]; + + // Handle special cases + if (sortField === 'security') { + aValue = a.isSecure ? 'Secure' : 'Insecure'; + bValue = b.isSecure ? 'Secure' : 'Insecure'; + } else if (sortField === 'status') { + aValue = a.isActive ? 'Active' : 'Inactive'; + bValue = b.isActive ? 'Active' : 'Inactive'; + } + + if (typeof aValue === 'string') { + aValue = aValue.toLowerCase(); + bValue = bValue.toLowerCase(); + } + + if (aValue < bValue) return sortDirection === 'asc' ? -1 : 1; + if (aValue > bValue) return sortDirection === 'asc' ? 1 : -1; + return 0; + }); + + return sorted; + }, [repositories, searchTerm, filterType, filterStatus, sortField, sortDirection]); if (isLoading) { return ( @@ -71,202 +210,331 @@ const Repositories = () => { } return ( -
- {/* Header */} -
-
-

- Repositories -

-

- Manage and monitor package repositories across your infrastructure -

-
-
+
- {/* Statistics Cards */} - {stats && ( -
-
-
-
- -
-
-

Total Repositories

-

{stats.totalRepositories}

-
-
-
- -
-
-
- -
-
-

Active Repositories

-

{stats.activeRepositories}

-
-
-
- -
-
-
- -
-
-

Secure (HTTPS)

-

{stats.secureRepositories}

-
-
-
- -
-
-
-
- - - {stats.securityPercentage}% - -
-
-
-

Security Score

-

{stats.securityPercentage}%

-
+ {/* Summary Stats */} +
+
+
+ +
+

Total Repositories

+

{stats?.totalRepositories || 0}

- )} - - {/* Search and Filters */} -
-
- {/* Search */} -
- setSearchTerm(e.target.value)} - className="w-full px-3 py-2 border border-secondary-300 dark:border-secondary-600 rounded-md focus:outline-none focus:ring-2 focus:ring-primary-500 dark:bg-secondary-700 dark:text-white" - /> + +
+
+ +
+

Active Repositories

+

{stats?.activeRepositories || 0}

+
- - {/* Security Filter */} -
- +
+ +
+
+ +
+

Secure (HTTPS)

+

{stats?.secureRepositories || 0}

+
- - {/* Status Filter */} -
- +
+ +
+
+ +
+

Security Score

+

{stats?.securityPercentage || 0}%

+
{/* Repositories List */} -
-
-

- Repositories ({filteredRepositories.length}) -

-
- - {filteredRepositories.length === 0 ? ( -
- -

No repositories found

-

- {searchTerm || filterType !== 'all' || filterStatus !== 'all' - ? 'Try adjusting your search or filters.' - : 'No repositories have been reported by your hosts yet.'} -

+
+
+
+ {/* Empty selection controls area to match packages page spacing */}
- ) : ( -
- {filteredRepositories.map((repo) => ( -
-
-
-
-
- {repo.isSecure ? ( - - ) : ( - - )} -

- {repo.name} -

- - {repo.isActive ? 'Active' : 'Inactive'} - -
-
- -
-

- - {repo.url} -

-
- Distribution: {repo.distribution} - Type: {repo.repoType} - Components: {repo.components} -
-
-
- -
- {/* Host Count */} -
-
- - {repo.hostCount} hosts -
-
- - {/* View Details */} - - - View - -
+ + {/* Table Controls */} +
+
+ {/* Search */} +
+
+ + setSearchTerm(e.target.value)} + className="w-full pl-10 pr-4 py-2 border border-secondary-300 dark:border-secondary-600 rounded-md focus:ring-2 focus:ring-primary-500 focus:border-transparent bg-white dark:bg-secondary-800 text-secondary-900 dark:text-white placeholder-secondary-500 dark:placeholder-secondary-400" + />
- ))} + + {/* Security Filter */} +
+ +
+ + {/* Status Filter */} +
+ +
+ + {/* Columns Button */} +
+ +
+
- )} + +
+ {filteredAndSortedRepositories.length === 0 ? ( +
+ +

+ {repositories?.length === 0 ? 'No repositories found' : 'No repositories match your filters'} +

+ {repositories?.length === 0 && ( +

+ No repositories have been reported by your hosts yet +

+ )} +
+ ) : ( +
+ + + + {visibleColumns.map((column) => ( + + ))} + + + + {filteredAndSortedRepositories.map((repo) => ( + + {visibleColumns.map((column) => ( + + ))} + + ))} + +
+ +
+ {renderCellContent(column, repo)} +
+
+ )} +
+
+ + {/* Column Settings Modal */} + {showColumnSettings && ( + setShowColumnSettings(false)} + onToggleVisibility={toggleColumnVisibility} + onReorder={reorderColumns} + onReset={resetColumns} + /> + )}
); + + // Render cell content based on column type + function renderCellContent(column, repo) { + switch (column.id) { + case 'name': + return ( +
+ +
+
+ {repo.name} +
+
+
+ ) + case 'url': + return ( +
+ {repo.url} +
+ ) + case 'distribution': + return ( +
+ {repo.distribution} +
+ ) + case 'security': + const isSecure = repo.isSecure !== undefined ? repo.isSecure : repo.url.startsWith('https://'); + return ( +
+ {isSecure ? ( +
+ + Secure +
+ ) : ( +
+ + Insecure +
+ )} +
+ ) + case 'status': + return ( + + {repo.isActive ? 'Active' : 'Inactive'} + + ) + case 'hostCount': + return ( +
+ + {repo.hostCount} +
+ ) + case 'actions': + return ( + + View + + + ) + default: + return null + } + } +}; + +// Column Settings Modal Component +const ColumnSettingsModal = ({ columnConfig, onClose, onToggleVisibility, onReorder, onReset }) => { + const [draggedIndex, setDraggedIndex] = useState(null) + + const handleDragStart = (e, index) => { + setDraggedIndex(index) + e.dataTransfer.effectAllowed = 'move' + } + + const handleDragOver = (e) => { + e.preventDefault() + e.dataTransfer.dropEffect = 'move' + } + + const handleDrop = (e, dropIndex) => { + e.preventDefault() + if (draggedIndex !== null && draggedIndex !== dropIndex) { + onReorder(draggedIndex, dropIndex) + } + setDraggedIndex(null) + } + + return ( +
+
+
+

Column Settings

+ +
+ +
+ {columnConfig.map((column, index) => ( +
handleDragStart(e, index)} + onDragOver={handleDragOver} + onDrop={(e) => handleDrop(e, index)} + className="flex items-center justify-between p-3 bg-secondary-50 dark:bg-secondary-700 rounded-lg cursor-move hover:bg-secondary-100 dark:hover:bg-secondary-600 transition-colors" + > +
+ + + {column.label} + +
+ +
+ ))} +
+ +
+ + +
+
+
+ ) }; export default Repositories; diff --git a/frontend/src/pages/RepositoryDetail.jsx b/frontend/src/pages/RepositoryDetail.jsx index dbac0a0..e726f2a 100644 --- a/frontend/src/pages/RepositoryDetail.jsx +++ b/frontend/src/pages/RepositoryDetail.jsx @@ -339,7 +339,7 @@ const RepositoryDetail = () => { to={`/hosts/${hostRepo.host.id}`} className="text-primary-600 hover:text-primary-700 font-medium" > - {hostRepo.host.hostname} + {hostRepo.host.friendlyName}
IP: {hostRepo.host.ip} diff --git a/frontend/src/utils/api.js b/frontend/src/utils/api.js index 0991d31..c022d8c 100644 --- a/frontend/src/utils/api.js +++ b/frontend/src/utils/api.js @@ -63,7 +63,8 @@ export const adminHostsAPI = { regenerateCredentials: (hostId) => api.post(`/hosts/${hostId}/regenerate-credentials`), updateGroup: (hostId, hostGroupId) => api.put(`/hosts/${hostId}/group`, { hostGroupId }), bulkUpdateGroup: (hostIds, hostGroupId) => api.put('/hosts/bulk/group', { hostIds, hostGroupId }), - toggleAutoUpdate: (hostId, autoUpdate) => api.patch(`/hosts/${hostId}/auto-update`, { autoUpdate }) + toggleAutoUpdate: (hostId, autoUpdate) => api.patch(`/hosts/${hostId}/auto-update`, { autoUpdate }), + updateFriendlyName: (hostId, friendlyName) => api.patch(`/hosts/${hostId}/friendly-name`, { friendlyName }) } // Host Groups API diff --git a/frontend/src/utils/osIcons.jsx b/frontend/src/utils/osIcons.jsx new file mode 100644 index 0000000..2f1c0ba --- /dev/null +++ b/frontend/src/utils/osIcons.jsx @@ -0,0 +1,130 @@ +import { + Monitor, + Server, + HardDrive, + Cpu, + Zap, + Shield, + Globe, + Terminal +} from 'lucide-react'; + +// Import OS icons from react-icons +import { + SiUbuntu, + SiDebian, + SiCentos, + SiFedora, + SiArchlinux, + SiAlpinelinux, + SiLinux, + SiMacos +} from 'react-icons/si'; + +import { + DiUbuntu, + DiDebian, + DiLinux, + DiWindows +} from 'react-icons/di'; + +/** + * OS Icon mapping utility + * Maps operating system types to appropriate react-icons components + */ +export const getOSIcon = (osType) => { + if (!osType) return Monitor; + + const os = osType.toLowerCase(); + + // Linux distributions with authentic react-icons + if (os.includes('ubuntu')) return SiUbuntu; + if (os.includes('debian')) return SiDebian; + if (os.includes('centos') || os.includes('rhel') || os.includes('red hat')) return SiCentos; + if (os.includes('fedora')) return SiFedora; + if (os.includes('arch')) return SiArchlinux; + if (os.includes('alpine')) return SiAlpinelinux; + if (os.includes('suse') || os.includes('opensuse')) return SiLinux; // SUSE uses generic Linux icon + + // Generic Linux + if (os.includes('linux')) return SiLinux; + + // Windows + if (os.includes('windows')) return DiWindows; + + // macOS + if (os.includes('mac') || os.includes('darwin')) return SiMacos; + + // FreeBSD + if (os.includes('freebsd')) return Server; + + // Default fallback + return Monitor; +}; + +/** + * OS Color mapping utility + * Maps operating system types to appropriate colors (react-icons have built-in brand colors) + */ +export const getOSColor = (osType) => { + if (!osType) return 'text-gray-500'; + + // react-icons already have the proper brand colors built-in + // This function is kept for compatibility but returns neutral colors + return 'text-gray-600'; +}; + +/** + * OS Display name utility + * Provides clean, formatted OS names for display + */ +export const getOSDisplayName = (osType) => { + if (!osType) return 'Unknown'; + + const os = osType.toLowerCase(); + + // Linux distributions + if (os.includes('ubuntu')) return 'Ubuntu'; + if (os.includes('debian')) return 'Debian'; + if (os.includes('centos')) return 'CentOS'; + if (os.includes('rhel') || os.includes('red hat')) return 'Red Hat Enterprise Linux'; + if (os.includes('fedora')) return 'Fedora'; + if (os.includes('arch')) return 'Arch Linux'; + if (os.includes('suse')) return 'SUSE Linux'; + if (os.includes('opensuse')) return 'openSUSE'; + if (os.includes('alpine')) return 'Alpine Linux'; + + // Generic Linux + if (os.includes('linux')) return 'Linux'; + + // Windows + if (os.includes('windows')) return 'Windows'; + + // macOS + if (os.includes('mac') || os.includes('darwin')) return 'macOS'; + + // FreeBSD + if (os.includes('freebsd')) return 'FreeBSD'; + + // Return original if no match + return osType; +}; + +/** + * OS Icon component with proper styling + */ +export const OSIcon = ({ osType, className = "h-4 w-4", showText = false }) => { + const IconComponent = getOSIcon(osType); + const displayName = getOSDisplayName(osType); + + if (showText) { + return ( +
+ + {displayName} +
+ ); + } + + return ; +}; diff --git a/package-lock.json b/package-lock.json index 9021368..ee29dc0 100644 --- a/package-lock.json +++ b/package-lock.json @@ -63,6 +63,7 @@ "react": "^18.2.0", "react-chartjs-2": "^5.2.0", "react-dom": "^18.2.0", + "react-icons": "^5.5.0", "react-router-dom": "^6.20.1" }, "devDependencies": { @@ -6201,6 +6202,15 @@ "react": "^18.3.1" } }, + "node_modules/react-icons": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/react-icons/-/react-icons-5.5.0.tgz", + "integrity": "sha512-MEFcXdkP3dLo8uumGI5xN3lDFNsRtrjbOEKDLD7yv76v4wpnEq2Lt2qeHaQOr34I/wPN3s3+N08WkQ+CW37Xiw==", + "license": "MIT", + "peerDependencies": { + "react": "*" + } + }, "node_modules/react-is": { "version": "16.13.1", "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",