6 Commits
dev ... demo

Author SHA1 Message Date
V
49532e6fda Merge branch 'demo' of https://github.com/DumbWareio/DumbDrop into demo 2025-02-27 11:15:16 -07:00
V
6316082a95 Create demo.md 2025-02-27 11:14:07 -07:00
V
686f0d9fb6 Update README.md 2025-02-27 11:13:44 -07:00
V
839ae032d5 Update README.md 2025-02-27 11:12:37 -07:00
V
539a977afb Update README.md 2025-02-27 11:08:58 -07:00
V
dfee063ee1 demo things. 2025-02-27 05:51:24 -07:00
37 changed files with 2249 additions and 5220 deletions

View File

@@ -1,130 +1,18 @@
#########################################
# SERVER CONFIGURATION
#########################################
# Server Configuration
PORT=3000 # The port the server will listen on
BASE_URL=http://localhost:3000 # The base URL for the application
# Port for the server (default: 3000)
PORT=3000
# Upload Settings
MAX_FILE_SIZE=1024 # Maximum file size in MB
AUTO_UPLOAD=false # Enable automatic upload on file selection
# Base URL for the application (must end with '/', default: http://localhost:PORT/)
BASE_URL=http://localhost:3000/
# Security
DUMBDROP_PIN= # Optional PIN protection (4-10 digits)
DUMBDROP_TITLE=DumbDrop # Site title displayed in header
# Node environment (default: development)
NODE_ENV=development
# Notifications (Optional)
APPRISE_URL= # Apprise URL for notifications (e.g., tgram://bottoken/ChatID)
APPRISE_MESSAGE=New file uploaded - {filename} ({size}), Storage used {storage}
APPRISE_SIZE_UNIT=auto # Size unit for notifications (auto, B, KB, MB, GB, TB)
#########################################
# STORAGE CONFIGURATION
#########################################
# Storage type ('local' or 's3', default: local)
STORAGE_TYPE=local
#########################################
# LOCAL STORAGE SETTINGS (if STORAGE_TYPE=local)
#########################################
# Directory for uploads (local dev, fallback: './local_uploads')
LOCAL_UPLOAD_DIR=./local_uploads
# Directory for uploads (Docker/production; optional, overrides LOCAL_UPLOAD_DIR if set)
UPLOAD_DIR=
#########################################
# S3 STORAGE SETTINGS (if STORAGE_TYPE=s3)
#########################################
# S3 Region (e.g., us-east-1 for AWS, us-west-000 for B2)
S3_REGION=
# S3 Bucket Name
S3_BUCKET_NAME=
# S3 Access Key ID
S3_ACCESS_KEY_ID=
# S3 Secret Access Key
S3_SECRET_ACCESS_KEY=
# Optional: S3 Endpoint URL (for non-AWS S3-compatible providers like MinIO, Backblaze B2)
# Example Backblaze B2: https://s3.us-west-000.backblazeb2.com
# Example MinIO: http://minio.local:9000
S3_ENDPOINT_URL=
# Optional: Force Path Style (true/false, default: false). Needed for some providers like MinIO.
S3_FORCE_PATH_STYLE=false
#########################################
# FILE UPLOAD LIMITS & OPTIONS
#########################################
# Maximum file size in MB (default: 1024)
MAX_FILE_SIZE=1024
# Comma-separated list of allowed file extensions (optional, e.g. .jpg,.png,.pdf)
# ALLOWED_EXTENSIONS=.jpg,.png,.pdf
ALLOWED_EXTENSIONS=
#########################################
# SECURITY
#########################################
# PIN protection (4-10 digits, optional)
# DUMBDROP_PIN=1234
DUMBDROP_PIN=
#########################################
# UI SETTINGS
#########################################
# Site title displayed in header (default: DumbDrop)
DUMBDROP_TITLE=DumbDrop
# Custom footer links (comma-separated, format: "Link Text @ URL")
# Example: FOOTER_LINKS=My Site @ https://example.com, Another Link @ https://another.org
FOOTER_LINKS=
#########################################
# NOTIFICATION SETTINGS
#########################################
# Apprise URL for notifications (optional)
APPRISE_URL=
# Notification message template (default: New file uploaded {filename} ({size}), Storage used {storage})
APPRISE_MESSAGE=New file uploaded {filename} ({size}), Storage used {storage}
# Size unit for notifications (B, KB, MB, GB, TB, or Auto; default: Auto)
APPRISE_SIZE_UNIT=Auto
#########################################
# ADVANCED
#########################################
# Enable automatic upload on file selection (true/false, default: false)
AUTO_UPLOAD=false
# Comma-separated list of origins allowed to embed the app in an iframe (optional)
# ALLOWED_IFRAME_ORIGINS=https://example.com,https://another.com
ALLOWED_IFRAME_ORIGINS=
# --- Docker Specific Settings ---
# User and Group IDs for file permissions
# Sets the user/group the application runs as inside the container.
# Files created in the mapped volume (e.g., ./local_uploads) will have this ownership.
# Set these to match your host user's ID/GID to avoid permission issues.
# Find your IDs with `id -u` and `id -g` on Linux/macOS.
# PUID=1000
# PGID=1000
# File Mode Creation Mask (Umask)
# Controls the default permissions for newly created files.
# 022 (default): Files 644 (rw-r--r--), Dirs 755 (rwxr-xr-x)
# 002: Files 664 (rw-rw-r--), Dirs 775 (rwxrwxr-x) - Good for group sharing
# 007: Files 660 (rw-rw----), Dirs 770 (rwxrwx---) - More restrictive
# 077: Files 600 (rw-------), Dirs 700 (rwx------) - Most restrictive
# UMASK=022
# Max number of retries for client-side chunk uploads (default: 5)
CLIENT_MAX_RETRIES=5
# Demo Mode (true/false, default: false). Overrides storage settings.
DEMO_MODE=false

View File

@@ -4,7 +4,6 @@ on:
push:
branches:
- main # Trigger the workflow on pushes to the main branch
- dev # Trigger the workflow on pushes to the dev branch
jobs:
build-and-push:
@@ -40,8 +39,6 @@ jobs:
images: |
name=dumbwareio/dumbdrop
tags: |
# Add :dev tag for pushes to the dev branch
type=raw,value=dev,enable=${{ github.ref == 'refs/heads/dev' }}
# the semantic versioning tags add "latest" when a version tag is present
# but since version tags aren't being used (yet?) let's add "latest" anyway
type=raw,value=latest

37
.gitignore vendored
View File

@@ -196,45 +196,8 @@ Thumbs.db
!uploads/.gitkeep
!local_uploads/.gitkeep
# Generated PWA Files
/public/*manifest.json
# Misc
*.log
.env.*
!.env.example
!dev/.env.dev.example
# Added by Claude Task Master
dev-debug.log
# Environment variables
# Editor directories and files
.idea
.vscode
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?
# OS specific
# Task files
.windsurfrules
README-task-master.md
.cursor/mcp.json
.cursor/rules/cursor_rules.mdc
.cursor/rules/dev_workflow.mdc
.cursor/rules/self_improve.mdc
.cursor/rules/taskmaster.mdc
scripts/example_prd.txt
scripts/prd.txt
tasks/task_001.txt
tasks/task_002.txt
tasks/task_003.txt
tasks/task_004.txt
tasks/task_005.txt
tasks/task_006.txt
tasks/task_007.txt
tasks/task_008.txt
tasks/task_009.txt
tasks/task_010.txt
tasks/tasks.json

View File

@@ -1,16 +1,8 @@
# Base stage for shared configurations
FROM node:20-alpine as base
# Add user and group IDs as arguments with defaults
ARG PUID=1000
ARG PGID=1000
# Default umask (complement of 022 is 755 for dirs, 644 for files)
ARG UMASK=022
# Install necessary packages:
# - su-exec: lightweight sudo alternative
# - python3, pip: for apprise dependency
RUN apk add --no-cache su-exec python3 py3-pip && \
# Install python and create virtual environment with minimal dependencies
RUN apk add --no-cache python3 py3-pip && \
python3 -m venv /opt/venv && \
rm -rf /var/cache/apk/*
@@ -22,194 +14,51 @@ RUN . /opt/venv/bin/activate && \
# Add virtual environment to PATH
ENV PATH="/opt/venv/bin:$PATH"
# Create group and user with fallback to prevent build failures
# We use the ARG values here, but with a fallback mechanism to avoid build failures
RUN ( \
set -e; \
echo "Attempting to create/verify user with PUID=${PUID} and PGID=${PGID}..."; \
\
# Initialize variables \
TARGET_USER="nodeuser"; \
TARGET_GROUP="nodeuser"; \
NEW_GID="${PGID}"; \
NEW_UID="${PUID}"; \
\
# Step 1: Handle GID and group first \
echo "Setting up group for GID ${NEW_GID}..."; \
if getent group "${NEW_GID}" > /dev/null; then \
# GID exists, check which group has it \
EXISTING_GROUP=$(getent group "${NEW_GID}" | cut -d: -f1); \
echo "GID ${NEW_GID} is already used by group '${EXISTING_GROUP}'."; \
\
if [ "${EXISTING_GROUP}" = "${TARGET_GROUP}" ]; then \
echo "Group '${TARGET_GROUP}' already exists with correct GID ${NEW_GID}."; \
else \
# GID exists but used by a different group (likely 'node') \
echo "Will create '${TARGET_GROUP}' with a different GID to avoid conflict."; \
# Check if TARGET_GROUP exists but with wrong GID \
if getent group "${TARGET_GROUP}" > /dev/null; then \
echo "Group '${TARGET_GROUP}' exists but with wrong GID. Deleting it."; \
delgroup "${TARGET_GROUP}" || true; \
fi; \
# Create TARGET_GROUP with GID+1 (or find next available GID) \
NEXT_GID=$((${NEW_GID} + 1)); \
while getent group "${NEXT_GID}" > /dev/null; do \
NEXT_GID=$((${NEXT_GID} + 1)); \
done; \
echo "Creating group '${TARGET_GROUP}' with new GID ${NEXT_GID}."; \
addgroup -S -g "${NEXT_GID}" "${TARGET_GROUP}"; \
NEW_GID="${NEXT_GID}"; \
fi; \
else \
# GID does not exist - create group with desired GID \
echo "Creating group '${TARGET_GROUP}' with GID ${NEW_GID}."; \
addgroup -S -g "${NEW_GID}" "${TARGET_GROUP}"; \
fi; \
\
# Verify group was created \
echo "Verifying group '${TARGET_GROUP}' exists..."; \
getent group "${TARGET_GROUP}" || (echo "ERROR: Failed to find group '${TARGET_GROUP}'!"; exit 1); \
GID_FOR_USER=$(getent group "${TARGET_GROUP}" | cut -d: -f3); \
echo "Final group: '${TARGET_GROUP}' with GID ${GID_FOR_USER}"; \
\
# Step 2: Handle UID and user \
echo "Setting up user with UID ${NEW_UID}..."; \
if getent passwd "${NEW_UID}" > /dev/null; then \
# UID exists, check which user has it \
EXISTING_USER=$(getent passwd "${NEW_UID}" | cut -d: -f1); \
echo "UID ${NEW_UID} is already used by user '${EXISTING_USER}'."; \
\
if [ "${EXISTING_USER}" = "${TARGET_USER}" ]; then \
echo "User '${TARGET_USER}' already exists with correct UID ${NEW_UID}."; \
# Check if user needs group update \
CURRENT_GID=$(getent passwd "${TARGET_USER}" | cut -d: -f4); \
if [ "${CURRENT_GID}" != "${GID_FOR_USER}" ]; then \
echo "User '${TARGET_USER}' has wrong GID (${CURRENT_GID}). Modifying..."; \
deluser "${TARGET_USER}"; \
adduser -S -D -u "${NEW_UID}" -G "${TARGET_GROUP}" -s /bin/sh "${TARGET_USER}"; \
fi; \
else \
# Another user has our UID (e.g., 'node'). Delete it. \
echo "Deleting existing user '${EXISTING_USER}' with UID ${NEW_UID}."; \
deluser "${EXISTING_USER}" || true; \
\
# Now check if TARGET_USER exists but with wrong UID \
if getent passwd "${TARGET_USER}" > /dev/null; then \
echo "User '${TARGET_USER}' exists but with wrong UID. Updating..."; \
deluser "${TARGET_USER}" || true; \
fi; \
\
# Create user \
echo "Creating user '${TARGET_USER}' with UID ${NEW_UID} and group '${TARGET_GROUP}'."; \
adduser -S -D -u "${NEW_UID}" -G "${TARGET_GROUP}" -s /bin/sh "${TARGET_USER}"; \
fi; \
else \
# UID does not exist - check if user exists with wrong UID \
if getent passwd "${TARGET_USER}" > /dev/null; then \
echo "User '${TARGET_USER}' exists but with wrong UID. Updating..."; \
deluser "${TARGET_USER}" || true; \
fi; \
\
# Create user with desired UID \
echo "Creating user '${TARGET_USER}' with UID ${NEW_UID} and group '${TARGET_GROUP}'."; \
adduser -S -D -u "${NEW_UID}" -G "${TARGET_GROUP}" -s /bin/sh "${TARGET_USER}"; \
fi; \
\
# Create and set permissions on home directory \
echo "Setting up home directory for ${TARGET_USER}..."; \
mkdir -p /home/${TARGET_USER} && \
chown -R ${TARGET_USER}:${TARGET_GROUP} /home/${TARGET_USER} && \
chmod 755 /home/${TARGET_USER}; \
\
# Verify user was created \
echo "Verifying user '${TARGET_USER}' exists..."; \
getent passwd "${TARGET_USER}" || (echo "ERROR: Failed to find user '${TARGET_USER}'!"; exit 1); \
\
# Clean up and verify system files \
echo "Ensuring root user definition is pristine..."; \
chown root:root /etc/passwd /etc/group && \
chmod 644 /etc/passwd /etc/group && \
getent passwd root || (echo "ERROR: root not found after user/group operations!"; exit 1); \
\
# Print final status \
echo "Final user/group setup:"; \
id "${TARGET_USER}"; \
)
WORKDIR /usr/src/app
# Set UMASK - this applies to processes run by the user created in this stage
# The entrypoint will also set it based on the ENV var at runtime.
RUN umask ${UMASK}
# Dependencies stage
FROM base as deps
# Change ownership early so npm cache is owned correctly
RUN chown nodeuser:nodeuser /usr/src/app
# Switch to nodeuser before running npm commands
USER nodeuser
COPY --chown=nodeuser:nodeuser package*.json ./
COPY package*.json ./
RUN npm ci --only=production && \
# Remove npm cache
npm cache clean --force
# Switch back to root for the next stages if needed
USER root
# Development stage
FROM deps as development
USER root
ENV NODE_ENV=development
# Create and set up directories
RUN mkdir -p /usr/src/app/local_uploads /usr/src/app/uploads && \
chown -R nodeuser:nodeuser /usr/src/app/local_uploads /usr/src/app/uploads
COPY --chown=nodeuser:nodeuser package*.json ./
# Install dev dependencies
RUN npm install && \
npm cache clean --force
COPY --chown=nodeuser:nodeuser src/ ./src/
COPY --chown=nodeuser:nodeuser public/ ./public/
# Check if __tests__ and dev exist in your project root, if not, these COPY lines will fail for dev target
# COPY --chown=nodeuser:nodeuser __tests__/ ./__tests__/
# COPY --chown=nodeuser:nodeuser dev/ ./dev/
COPY --chown=nodeuser:nodeuser .eslintrc.json .eslintignore .prettierrc nodemon.json ./
# Create upload directories
RUN mkdir -p uploads local_uploads
# Switch back to nodeuser for runtime
USER nodeuser
EXPOSE 3000
# Production stage
FROM deps as production
USER root
ENV NODE_ENV=production
ENV UPLOAD_DIR /app/uploads
# Create and set up directories
RUN mkdir -p /usr/src/app/local_uploads /usr/src/app/uploads && \
chown -R nodeuser:nodeuser /usr/src/app/local_uploads /usr/src/app/uploads
# Copy only necessary source files and ensure ownership
COPY --chown=nodeuser:nodeuser src/ ./src/
COPY --chown=nodeuser:nodeuser public/ ./public/
# Copy the entrypoint script and make it executable
COPY --chown=root:root src/scripts/entrypoint.sh /usr/local/bin/entrypoint.sh
RUN chmod +x /usr/local/bin/entrypoint.sh
# Copy source with specific paths to avoid unnecessary files
COPY src/ ./src/
COPY public/ ./public/
COPY __tests__/ ./__tests__/
COPY dev/ ./dev/
COPY .eslintrc.json .eslintignore ./
# Expose port
EXPOSE 3000
# Set the entrypoint
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
CMD ["npm", "run", "dev"]
# Final user should be nodeuser for runtime
USER nodeuser
# Production stage
FROM deps as production
ENV NODE_ENV=production
# Create upload directory
RUN mkdir -p uploads
# Copy only necessary source files
COPY src/ ./src/
COPY public/ ./public/
# Expose port
EXPOSE 3000
# Default command to run (passed to entrypoint)
CMD ["npm", "start"]

View File

@@ -1,122 +0,0 @@
# Local Development (Recommended Quick Start)
## Prerequisites
- **Node.js** >= 20.0.0
_Why?_: The app uses features only available in Node 20+.
- **npm** (comes with Node.js)
- **Python 3** (for notification testing, optional)
- **Apprise** (for notification testing, optional)
## Setup Instructions
1. **Clone the repository**
```bash
git clone https://github.com/yourusername/dumbdrop.git
cd dumbdrop
```
2. **Copy and configure environment variables**
```bash
cp .env.example .env
```
- Open `.env` in your editor and review the variables.
- At minimum, set:
- `PORT=3000`
- `LOCAL_UPLOAD_DIR=./local_uploads`
- `MAX_FILE_SIZE=1024`
- `DUMBDROP_PIN=` (optional, for PIN protection)
- `APPRISE_URL=` (optional, for notifications)
3. **Install dependencies**
```bash
npm install
```
4. **Start the development server**
```bash
npm run dev
```
- You should see output like:
```
DumbDrop server running on http://localhost:3000
```
5. **Open the app**
- Go to [http://localhost:3000](http://localhost:3000) in your browser.
---
## Testing File Uploads
- Drag and drop files onto the web interface.
- Supported file types: _All_, unless restricted by `ALLOWED_EXTENSIONS` in `.env`.
- Maximum file size: as set by `MAX_FILE_SIZE` (default: 1024 MB).
- Uploaded files are stored in the directory specified by `LOCAL_UPLOAD_DIR` (default: `./local_uploads`).
- To verify uploads:
- Check the `local_uploads` folder for your files.
- The UI will show a success message on upload.
---
## Notification Testing (Python/Apprise)
If you want to test notifications (e.g., for new uploads):
1. **Install Python 3**
- [Download Python](https://www.python.org/downloads/) if not already installed.
2. **Install Apprise**
```bash
pip install apprise
```
3. **Configure Apprise in `.env`**
- Set `APPRISE_URL` to your notification service URL (see [Apprise documentation](https://github.com/caronc/apprise)).
- Example for a local test:
```
APPRISE_URL=mailto://your@email.com
```
4. **Trigger a test notification**
- Upload a file via the web UI.
- If configured, you should receive a notification.
---
## Troubleshooting
**Problem:** Port already in use
**Solution:**
- Change the `PORT` in `.env` to a free port.
**Problem:** "Cannot find module 'express'"
**Solution:**
- Run `npm install` to install dependencies.
**Problem:** File uploads not working
**Solution:**
- Ensure `LOCAL_UPLOAD_DIR` exists and is writable.
- Check file size and extension restrictions in `.env`.
**Problem:** Notifications not sent
**Solution:**
- Verify `APPRISE_URL` is set and correct.
- Ensure Apprise is installed and accessible.
**Problem:** Permission denied on uploads
**Solution:**
- Make sure your user has write permissions to `local_uploads`.
**Problem:** Environment variables not loading
**Solution:**
- Double-check that `.env` exists and is formatted correctly.
- Restart the server after making changes.
---
## Additional Notes
- For Docker-based development, see the "Quick Start" and "Docker Compose" sections in the main README.
- For more advanced configuration, review the "Configuration" section in the main README.
- If you encounter issues not listed here, please open an issue on GitHub or check the Discussions tab.

243
README.md
View File

@@ -4,15 +4,14 @@ A stupid simple file upload application that provides a clean, modern interface
![DumbDrop](https://github.com/user-attachments/assets/1b909d26-9ead-4dc7-85bc-8bfda0d366c1)
No auth (unless you want it!), no complicated setup (unless you want to!), no nothing. Just a simple way to drop dumb files into a dumb folder... or an S3 bucket!
No auth (unless you want it now!), no storage, no nothing. Just a simple file uploader to drop dumb files into a dumb folder.
## Table of Contents
- [Quick Start](#quick-start)
- [Production Deployment with Docker](#production-deployment-with-docker)
- [Local Development (Recommended Quick Start)](LOCAL_DEVELOPMENT.md)
- [Features](#features)
- [Configuration](#configuration)
- [Security](#security)
- [Development](#development)
- [Technical Details](#technical-details)
- [Demo Mode](demo.md)
- [Contributing](#contributing)
@@ -20,225 +19,171 @@ No auth (unless you want it!), no complicated setup (unless you want to!), no no
## Quick Start
### Option 1: Docker (For Dummies - Local Storage)
### Prerequisites
- Docker (recommended)
- Node.js >=20.0.0 (for local development)
### Option 1: Docker (For Dummies)
```bash
# Pull and run with one command (uses local storage)
docker run -p 3000:3000 -v ./uploads:/app/uploads dumbwareio/dumbdrop:latest
# Pull and run with one command
docker run -p 3000:3000 -v ./local_uploads:/app/uploads dumbwareio/dumbdrop:latest
```
1. Go to http://localhost:3000
2. Upload a File - It'll show up in `./uploads` on your host machine.
3. Celebrate on how dumb easy this was.
2. Upload a File - It'll show up in ./local_uploads
3. Celebrate on how dumb easy this was
### Option 2: Docker Compose (For Dummies who like customizing - Local or S3)
### Option 2: Docker Compose (For Dummies who like customizing)
Create a `docker-compose.yml` file:
```yaml
services:
dumbdrop:
image: dumbwareio/dumbdrop:latest # Use the desired tag/version
ports:
- "3000:3000" # Map host port 3000 to container port 3000
volumes:
# Mount a host directory to store metadata (.metadata folder)
# This is needed even for S3 mode to track ongoing uploads.
# For local storage mode, this is also where files land.
- ./uploads:/app/uploads
environment:
# --- Core Settings ---
# STORAGE_TYPE: "local" # Options: "local", "s3" (Defaults to "local" if unset)
DUMBDROP_TITLE: "My DumbDrop"
BASE_URL: "http://localhost:3000/" # Must end with a slash!
MAX_FILE_SIZE: 1024 # Max file size in MB
DUMBDROP_PIN: "" # Optional PIN (4-10 digits)
AUTO_UPLOAD: "false" # Set to "true" to upload immediately
# --- Local Storage Settings (if STORAGE_TYPE="local") ---
UPLOAD_DIR: "/app/uploads" # *Must* be set inside container if using local storage
# --- S3 Storage Settings (if STORAGE_TYPE="s3") ---
# S3_REGION: "us-east-1" # Your S3 region (e.g., us-west-000 for B2)
# S3_BUCKET_NAME: "your-s3-bucket-name" # Your bucket name
# S3_ACCESS_KEY_ID: "YOUR_ACCESS_KEY" # Your S3 Access Key
# S3_SECRET_ACCESS_KEY: "YOUR_SECRET_KEY" # Your S3 Secret Key
# S3_ENDPOINT_URL: "" # Optional: e.g., https://s3.us-west-000.backblazeb2.com for B2, http://minio.local:9000 for Minio
# S3_FORCE_PATH_STYLE: "false" # Optional: Set to "true" for providers like Minio
# --- Optional Settings ---
# ALLOWED_EXTENSIONS: ".jpg,.png,.pdf" # Comma-separated allowed extensions
# ALLOWED_IFRAME_ORIGINS: "https://organizr.example.com" # Allow embedding in specific origins
# APPRISE_URL: "" # For notifications
# FOOTER_LINKS: "My Site @ https://example.com" # Custom footer links
# CLIENT_MAX_RETRIES: 5 # Client-side chunk retry attempts
restart: unless-stopped
dumbdrop:
image: dumbwareio/dumbdrop:latest
ports:
- 3000:3000
volumes:
# Where your uploaded files will land
- ./local_uploads:/app/uploads
environment:
# The title shown in the web interface
DUMBDROP_TITLE: DumbDrop
# Maximum file size in MB
MAX_FILE_SIZE: 1024
# Optional PIN protection (leave empty to disable)
DUMBDROP_PIN: 123456
# Upload without clicking button
AUTO_UPLOAD: false
# The base URL for the application
BASE_URL: http://localhost:3000
```
Then run:
```bash
docker compose up -d
```
1. Go to http://localhost:3000
2. Upload a File - It'll show up in `./uploads` (if local) or your S3 bucket (if S3).
3. Rejoice in the glory of your dumb uploads, now potentially in the cloud!
> **Note:** When using `STORAGE_TYPE=s3`, the local volume mount (`./uploads:/app/uploads`) is still used to store temporary metadata files (`.metadata` folder) for tracking multipart uploads. The actual files go to S3.
1. Go to http://localhost:3000
2. Upload a File - It'll show up in ./local_uploads
3. Rejoice in the glory of your dumb uploads
### Option 3: Running Locally (For Developers)
For local development setup without Docker, see the dedicated guide:
> If you're a developer, check out our [Dev Guide](#development) for the dumb setup.
👉 [Local Development Guide](LOCAL_DEVELOPMENT.md)
1. Install dependencies:
```bash
npm install
```
2. Set environment variables in `.env`:
```env
PORT=3000 # Port to run the server on
MAX_FILE_SIZE=1024 # Maximum file size in MB
DUMBDROP_PIN=123456 # Optional PIN protection
```
3. Start the server:
```bash
npm start
```
#### Windows Users
If you're using Windows PowerShell with Docker, use this format for paths:
```bash
docker run -p 3000:3000 -v "${PWD}\local_uploads:/app/uploads" dumbwareio/dumbdrop:latest
```
## Features
- 🚀 Drag and drop file uploads
- 📁 Multiple file selection
- ☁️ **Optional S3 Storage:** Store files in AWS S3, Backblaze B2, MinIO, or other S3-compatible services.
- 💾 **Local Storage:** Default simple file storage on the server's disk.
- 🎨 Clean, responsive UI with Dark Mode
- 📦 Docker support with easy configuration
- 📂 Directory upload support (maintains structure in local storage or as S3 keys)
- 📂 Directory upload support (maintains structure)
- 🔒 Optional PIN protection
- 📱 Mobile-friendly interface
- 🔔 Configurable notifications via Apprise
- ⚡ Zero dependencies on client-side
- 🛡️ Built-in security features (rate limiting, security headers)
- 🛡️ Built-in security features
- 💾 Configurable file size limits
- 🎯 File extension filtering
- ⚙️ Native S3 Multipart Upload for large files when using S3 storage.
- 🔗 S3 Presigned URLs for efficient downloads (offloads server bandwidth).
## Configuration
DumbDrop is configured primarily through environment variables.
### Environment Variables
| Variable | Description | Default | Required |
|--------------------------|------------------------------------------------------------------------------------------------------------|----------------------------------------------|------------------------------|
| **`STORAGE_TYPE`** | Storage backend: `local` or `s3` | `local` | No |
| `PORT` | Server port | `3000` | No |
| `BASE_URL` | Base URL for the application (must end with `/`) | `http://localhost:PORT/` | No |
| `MAX_FILE_SIZE` | Maximum file size in MB | `1024` | No |
| `DUMBDROP_PIN` | PIN protection (4-10 digits) | None | No |
| `DUMBDROP_TITLE` | Title displayed in the browser tab/header | `DumbDrop` | No |
| `AUTO_UPLOAD` | Enable automatic upload on file selection (`true`/`false`) | `false` | No |
| `ALLOWED_EXTENSIONS` | Comma-separated list of allowed file extensions (e.g., `.jpg,.png`) | None (all allowed) | No |
| `ALLOWED_IFRAME_ORIGINS` | Comma-separated list of origins allowed to embed in an iframe | None | No |
| `FOOTER_LINKS` | Comma-separated custom footer links (Format: `"Text @ URL"`) | None | No |
| `CLIENT_MAX_RETRIES` | Max retry attempts for client-side chunk uploads | `5` | No |
| `DEMO_MODE` | Run in demo mode (`true`/`false`). Overrides storage settings. | `false` | No |
| `APPRISE_URL` | Apprise URL for notifications | None | No |
| `APPRISE_MESSAGE` | Notification message template (`{filename}`, `{size}`, `{storage}`) | `New file uploaded...` | No |
| `APPRISE_SIZE_UNIT` | Size unit for notifications (`B`, `KB`, `MB`, `GB`, `TB`, `Auto`) | `Auto` | No |
| --- | --- | --- | --- |
| **Local Storage Only:** | | | |
| `UPLOAD_DIR` | **(Docker)** Directory for uploads/metadata inside container | None | Yes (if `STORAGE_TYPE=local`) |
| `LOCAL_UPLOAD_DIR` | **(Local Dev)** Directory for uploads/metadata on host machine | `./local_uploads` | No (if `STORAGE_TYPE=local`) |
| --- | --- | --- | --- |
| **S3 Storage Only:** | | | |
| `S3_REGION` | S3 Region (e.g., `us-east-1`, `us-west-000`) | None | Yes (if `STORAGE_TYPE=s3`) |
| `S3_BUCKET_NAME` | Name of the S3 Bucket | None | Yes (if `STORAGE_TYPE=s3`) |
| `S3_ACCESS_KEY_ID` | S3 Access Key ID | None | Yes (if `STORAGE_TYPE=s3`) |
| `S3_SECRET_ACCESS_KEY` | S3 Secret Access Key | None | Yes (if `STORAGE_TYPE=s3`) |
| `S3_ENDPOINT_URL` | **(Optional)** Custom S3 endpoint URL (for B2, MinIO, etc.) | None (uses default AWS endpoint) | No |
| `S3_FORCE_PATH_STYLE` | **(Optional)** Force path-style S3 requests (`true`/`false`). Needed for MinIO, etc. | `false` | No |
| Variable | Description | Default | Required |
|------------------|---------------------------------------|---------|----------|
| PORT | Server port | 3000 | No |
| BASE_URL | Base URL for the application | http://localhost:PORT | No |
| MAX_FILE_SIZE | Maximum file size in MB | 1024 | No |
| DUMBDROP_PIN | PIN protection (4-10 digits) | None | No |
| DUMBDROP_TITLE | Site title displayed in header | DumbDrop| No |
| APPRISE_URL | Apprise URL for notifications | None | No |
| APPRISE_MESSAGE | Notification message template | New file uploaded {filename} ({size}), Storage used {storage} | No |
| APPRISE_SIZE_UNIT| Size unit for notifications | Auto | No |
| AUTO_UPLOAD | Enable automatic upload on file selection | false | No |
| ALLOWED_EXTENSIONS| Comma-separated list of allowed file extensions | None | No |
- **Storage:** Set `STORAGE_TYPE` to `s3` to enable S3 storage. Otherwise, it defaults to `local`.
- **Local Storage:** If `STORAGE_TYPE=local`, `UPLOAD_DIR` (in Docker) or `LOCAL_UPLOAD_DIR` (local dev) determines where files are stored.
- **S3 Storage:** If `STORAGE_TYPE=s3`, the `S3_*` variables are required. `UPLOAD_DIR`/`LOCAL_UPLOAD_DIR` is still used for storing temporary `.metadata` files locally.
- **S3 Endpoint/Path Style:** Use `S3_ENDPOINT_URL` and `S3_FORCE_PATH_STYLE` only if connecting to a non-AWS S3-compatible service.
- **BASE_URL**: Must end with a trailing slash (`/`). The app will fail to start otherwise. Example: `http://your.domain.com/dumbdrop/`.
- **Security Note (S3):** For production, using IAM Roles (e.g., EC2 Instance Profiles, ECS Task Roles) is strongly recommended over embedding Access Keys in environment variables.
See `.env.example` for a template.
<details>
<summary>ALLOWED_IFRAME_ORIGINS</summary>
To allow this app to be embedded in an iframe on specific origins (such as Organizr), set the `ALLOWED_IFRAME_ORIGINS` environment variable. For example:
```env
ALLOWED_IFRAME_ORIGINS=https://organizr.example.com,https://myportal.com
```
- If not set, the app will only allow itself to be embedded in an iframe on the same origin (default security).
- If set, the app will allow embedding in iframes on the specified origins and itself.
- **Security Note:** Only add trusted origins. Allowing arbitrary origins can expose your app to clickjacking and other attacks.
</details>
<details>
<summary>File Extension Filtering</summary>
To restrict which file types can be uploaded, set the `ALLOWED_EXTENSIONS` environment variable with comma-separated extensions (including the dot):
### File Extension Filtering
To restrict which file types can be uploaded, set the `ALLOWED_EXTENSIONS` environment variable. For example:
```env
ALLOWED_EXTENSIONS=.jpg,.jpeg,.png,.pdf,.doc,.docx,.txt
```
If not set, all file extensions will be allowed.
</details>
<details>
<summary>Notification Setup</summary>
### Notification Setup
#### Message Templates
The notification message supports the following placeholders:
- `{filename}`: Name of the uploaded file (or S3 Key)
- `{filename}`: Name of the uploaded file
- `{size}`: Size of the file (formatted according to APPRISE_SIZE_UNIT)
- `{storage}`: Total size of all files in upload directory (Local storage only)
- `{storage}`: Total size of all files in upload directory
Example message template:
```env
APPRISE_MESSAGE: New file dropped: {filename} ({size})!
APPRISE_MESSAGE: New file uploaded {filename} ({size}), Storage used {storage}
```
Size formatting examples:
- Auto (default): Chooses nearest unit (e.g., "1.44MB", "256KB")
- Fixed unit: Set APPRISE_SIZE_UNIT to B, KB, MB, GB, or TB
Both {size} and {storage} use the same formatting rules based on APPRISE_SIZE_UNIT.
#### Notification Support
- Integration with [Apprise](https://github.com/caronc/apprise?tab=readme-ov-file#supported-notifications) for flexible notifications
- Customizable notification messages
- Support for all Apprise notification services
- Customizable notification messages with filename templating
- Optional - disabled if no APPRISE_URL is set
</details>
<details>
<summary>S3 Cleanup Recommendation</summary>
When using `STORAGE_TYPE=s3`, DumbDrop relies on the native S3 Multipart Upload mechanism. If an upload is interrupted, incomplete parts may remain in your S3 bucket.
**It is strongly recommended to configure a Lifecycle Rule on your S3 bucket** (or use your provider's equivalent tool) to automatically abort and delete incomplete multipart uploads after a reasonable period (e.g., 1-7 days). This prevents orphaned parts from accumulating costs. DumbDrop's cleanup only removes local tracking files, not the actual S3 parts.
</details>
## Security
### Features
- Variable-length PIN support (4-10 digits)
- Constant-time PIN comparison
- Input sanitization (filenames, paths)
- Rate limiting on API endpoints
- Security headers (CSP, HSTS, etc.)
- Input sanitization
- Rate limiting
- File extension filtering
- No client-side PIN storage
- Secure file handling (uses S3 presigned URLs for downloads if S3 is enabled)
- Secure file handling
## Technical Details
### Stack
- **Backend**: Node.js (>=20.0.0) with Express
- **Frontend**: Vanilla JavaScript (ES6+)
- **Storage**: Local Filesystem or S3-compatible Object Storage
- **Container**: Docker with multi-stage builds
- **Security**: Express security middleware
- **Upload**: Chunked uploads via client-side logic, processed via Express middleware, using native S3 Multipart Upload when `STORAGE_TYPE=s3`.
- **Upload**: Chunked file handling via Multer
- **Notifications**: Apprise integration
- **SDK**: AWS SDK for JavaScript v3 (`@aws-sdk/client-s3`, `@aws-sdk/s3-request-presigner`) when `STORAGE_TYPE=s3`.
### Dependencies
- `express`: Web framework
- `@aws-sdk/client-s3`: AWS S3 SDK (used if `STORAGE_TYPE=s3`)
- `@aws-sdk/s3-request-presigner`: For S3 presigned URLs (used if `STORAGE_TYPE=s3`)
- `cookie-parser`: Parse cookies
- `cors`: Cross-origin resource sharing
- `dotenv`: Environment configuration
- `express-rate-limit`: Rate limiting
- express: Web framework
- multer: File upload handling
- apprise: Notification system
- cors: Cross-origin resource sharing
- dotenv: Environment configuration
- express-rate-limit: Rate limiting
## Contributing
@@ -248,10 +193,14 @@ When using `STORAGE_TYPE=s3`, DumbDrop relies on the native S3 Multipart Upload
4. Push to the branch (`git push origin feature/amazing-feature`)
5. Open a Pull Request
See [Local Development (Recommended Quick Start)](LOCAL_DEVELOPMENT.md) for local setup and guidelines.
See [Development Guide](dev/README.md) for local setup and guidelines.
---
Made with ❤️ by [DumbWare.io](https://dumbware.io)
## Future Features
- Camera Upload for Mobile
> Got an idea? [Open an issue](https://github.com/dumbwareio/dumbdrop/issues) or [submit a PR](https://github.com/dumbwareio/dumbdrop/pulls)

50
dev/.dockerignore Normal file
View File

@@ -0,0 +1,50 @@
# Version control
.git
.gitignore
# Dependencies
node_modules
npm-debug.log
yarn-debug.log
yarn-error.log
# Environment variables
.env
.env.*
!.env.example
# Development
.vscode
.idea
*.swp
*.swo
# Build outputs
dist
build
coverage
# Local uploads (development only)
local_uploads
# Logs
logs
*.log
# System files
.DS_Store
Thumbs.db
# Docker
.docker
docker-compose*.yml
Dockerfile*
# Documentation
README.md
CHANGELOG.md
docs
# Development configurations
.editorconfig
nodemon.json

22
dev/.env.dev.example Normal file
View File

@@ -0,0 +1,22 @@
# Development Environment Settings
# Server Configuration
PORT=3000 # Development server port
# Upload Settings
MAX_FILE_SIZE=1024 # Maximum file size in MB for development
AUTO_UPLOAD=false # Disable auto-upload by default in development
UPLOAD_DIR=../local_uploads # Local development upload directory
# Development Specific
DUMBDROP_TITLE=DumbDrop-Dev # Development environment indicator
DUMBDROP_PIN=123456 # Default development PIN (change in production)
# Optional Development Features
NODE_ENV=development # Ensures development mode
DEBUG=dumbdrop:* # Enable debug logging (if implemented)
# Development Notifications (Optional)
APPRISE_URL= # Test notification endpoint
APPRISE_MESSAGE=[DEV] New file uploaded - {filename} ({size}), Storage used {storage}
APPRISE_SIZE_UNIT=auto

46
dev/Dockerfile.dev Normal file
View File

@@ -0,0 +1,46 @@
# Base stage for shared configurations
FROM node:20-alpine as base
# Install python and create virtual environment with minimal dependencies
RUN apk add --no-cache python3 py3-pip && \
python3 -m venv /opt/venv && \
rm -rf /var/cache/apk/*
# Activate virtual environment and install apprise
RUN . /opt/venv/bin/activate && \
pip install --no-cache-dir apprise && \
find /opt/venv -type d -name "__pycache__" -exec rm -r {} +
# Add virtual environment to PATH
ENV PATH="/opt/venv/bin:$PATH"
WORKDIR /usr/src/app
# Dependencies stage
FROM base as deps
COPY package*.json ./
RUN npm ci --only=production && \
npm cache clean --force
# Development stage
FROM deps as development
ENV NODE_ENV=development
# Install dev dependencies
RUN npm install && \
npm cache clean --force
# Create upload directories
RUN mkdir -p uploads local_uploads
# Copy source with specific paths to avoid unnecessary files
COPY src/ ./src/
COPY public/ ./public/
COPY dev/ ./dev/
COPY .eslintrc.json .eslintignore ./
# Expose port
EXPOSE 3000
CMD ["npm", "run", "dev"]

73
dev/README.md Normal file
View File

@@ -0,0 +1,73 @@
# DumbDrop Development Guide
## Quick Start
1. Clone the repository:
```bash
git clone https://github.com/yourusername/DumbDrop.git
cd DumbDrop
```
2. Set up development environment:
```bash
cd dev
cp .env.dev.example .env.dev
```
3. Start development server:
```bash
docker-compose -f docker-compose.dev.yml up
```
The application will be available at http://localhost:3000 with hot-reloading enabled.
## Development Environment Features
- Hot-reloading with nodemon
- Development-specific environment variables
- Local file storage in `../local_uploads`
- Debug logging enabled
- Development-specific notifications
## Project Structure
```
DumbDrop/
├── dev/ # Development configurations
│ ├── docker-compose.dev.yml
│ ├── .env.dev.example
│ └── README.md
├── src/ # Application source code
├── public/ # Static assets
├── local_uploads/ # Development file storage
└── [Production files in root]
```
## Development Workflow
1. Create feature branches from `main`:
```bash
git checkout -b feature/your-feature-name
```
2. Make changes and test locally
3. Commit using conventional commits:
```bash
feat: add new feature
fix: resolve bug
docs: update documentation
```
4. Push and create pull request
## Debugging
- Use `DEBUG=dumbdrop:*` for detailed logs
- Container shell access: `docker-compose -f docker-compose.dev.yml exec app sh`
- Logs: `docker-compose -f docker-compose.dev.yml logs -f app`
## Common Issues
1. Port conflicts: Change port in `.env.dev`
2. File permissions: Ensure proper ownership of `local_uploads`
3. Node modules: Remove and rebuild with `docker-compose -f docker-compose.dev.yml build --no-cache`

74
dev/dev.sh Executable file
View File

@@ -0,0 +1,74 @@
#!/bin/bash
# Set script to exit on error
set -e
# Enable Docker BuildKit
export DOCKER_BUILDKIT=1
# Colors for pretty output
GREEN='\033[0;32m'
BLUE='\033[0;34m'
RED='\033[0;31m'
NC='\033[0m' # No Color
# Helper function for pretty printing
print_message() {
echo -e "${BLUE}🔧 ${1}${NC}"
}
# Ensure we're in the right directory
cd "$(dirname "$0")"
case "$1" in
"up")
print_message "Starting DumbDrop in development mode..."
if [ ! -f .env.dev ]; then
print_message "No .env.dev found. Creating from example..."
cp .env.dev.example .env.dev
fi
docker compose -f docker-compose.dev.yml up -d --build
print_message "Container logs:"
docker compose -f docker-compose.dev.yml logs
;;
"down")
print_message "Stopping DumbDrop development environment..."
docker compose -f docker-compose.dev.yml down
;;
"logs")
print_message "Showing DumbDrop logs..."
docker compose -f docker-compose.dev.yml logs -f
;;
"rebuild")
print_message "Rebuilding DumbDrop..."
docker compose -f docker-compose.dev.yml build --no-cache
docker compose -f docker-compose.dev.yml up
;;
"clean")
print_message "Cleaning up development environment..."
docker compose -f docker-compose.dev.yml down -v --remove-orphans
rm -f .env.dev
print_message "Cleaned up containers, volumes, and env file"
;;
"shell")
print_message "Opening shell in container..."
docker compose -f docker-compose.dev.yml exec app sh
;;
"lint")
print_message "Running linter..."
docker compose -f docker-compose.dev.yml exec app npm run lint
;;
*)
echo -e "${GREEN}DumbDrop Development Helper${NC}"
echo "Usage: ./dev.sh [command]"
echo ""
echo "Commands:"
echo " up - Start development environment (creates .env.dev if missing)"
echo " down - Stop development environment"
echo " logs - Show container logs"
echo " rebuild - Rebuild container without cache and start"
echo " clean - Clean up everything (containers, volumes, env)"
echo " shell - Open shell in container"
echo " lint - Run linter"
;;
esac

View File

@@ -0,0 +1,30 @@
services:
app:
build:
context: ..
dockerfile: dev/Dockerfile.dev
target: development
args:
DOCKER_BUILDKIT: 1
x-bake:
options:
dockerignore: dev/.dockerignore
volumes:
- ..:/usr/src/app
- /usr/src/app/node_modules
ports:
- "3000:3000"
environment:
- NODE_ENV=development
- PORT=3000
- MAX_FILE_SIZE=1024
- AUTO_UPLOAD=false
- DUMBDROP_TITLE=DumbDrop-Dev
command: npm run dev
restart: unless-stopped
# Enable container debugging if needed
# stdin_open: true
# tty: true
# Add development labels
labels:
- "dev.dumbware.environment=development"

View File

@@ -7,8 +7,6 @@ services:
# Replace "./local_uploads" ( before the colon ) with the path where the files land
- ./local_uploads:/app/uploads
environment: # Environment variables for the DumbDrop service
# Explicitly set upload directory inside the container
UPLOAD_DIR: /app/uploads
DUMBDROP_TITLE: DumbDrop # The title shown in the web interface
MAX_FILE_SIZE: 1024 # Maximum file size in MB
DUMBDROP_PIN: 123456 # Optional PIN protection (4-10 digits, leave empty to disable)
@@ -16,27 +14,10 @@ services:
BASE_URL: http://localhost:3000 # The base URL for the application
# Additional available environment variables (commented out with defaults)
# FOOTER_LINKS: "My Site @ https://example.com,Docs @ https://docs.example.com" # Custom footer links
# PORT: 3000 # Server port (default: 3000)
# NODE_ENV: production # Node environment (development/production)
# DEBUG: false # Debug mode for verbose logging (default: false in production, true in development)
# APPRISE_URL: "" # Apprise notification URL for upload notifications (default: none)
# APPRISE_MESSAGE: "New file uploaded - {filename} ({size}), Storage used {storage}" # Notification message template with placeholders: {filename}, {size}, {storage}
# APPRISE_SIZE_UNIT: "Auto" # Size unit for notifications (B, KB, MB, GB, TB, or Auto)
# ALLOWED_EXTENSIONS: ".jpg,.jpeg,.png,.pdf,.doc,.docx,.txt" # Comma-separated list of allowed file extensions (default: all allowed)
# PUID: 1000 # User ID for file ownership (default: 1000)
# PGID: 1000 # Group ID for file ownership (default: 1000)
# UMASK: "000" # File permissions mask (default: 000)
restart: unless-stopped
# user: "${PUID}:${PGID}" # Don't set user here, entrypoint handles it
# Consider adding healthcheck
# healthcheck:
# test: ["CMD", "curl", "--fail", "http://localhost:3000/health"] # Assuming a /health endpoint exists
# interval: 30s
# timeout: 10s
# retries: 3
# healthcheck:
# test: ["CMD", "curl", "--fail", "http://localhost:3000/health"]
# interval: 30s
# timeout: 10s
# retries: 3
# start_period: 30s

View File

Before

Width:  |  Height:  |  Size: 3.6 KiB

After

Width:  |  Height:  |  Size: 3.6 KiB

View File

Before

Width:  |  Height:  |  Size: 639 B

After

Width:  |  Height:  |  Size: 639 B

View File

@@ -1,3 +0,0 @@
{
"ignore": ["asset-manifest.json", "manifest.json"]
}

2094
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -4,19 +4,16 @@
"main": "src/server.js",
"scripts": {
"start": "node src/server.js",
"dev": "nodemon src/server.js",
"dev": "nodemon --legacy-watch src/server.js",
"lint": "eslint .",
"lint:fix": "eslint . --fix",
"format": "prettier --write .",
"predev": "node -e \"const v=process.versions.node.split('.');if(v[0]<20) {console.error('Node.js >=20.0.0 required');process.exit(1)}\""
"format": "prettier --write ."
},
"keywords": [],
"author": "",
"license": "ISC",
"description": "A simple file upload application",
"dependencies": {
"@aws-sdk/client-s3": "^3.803.0",
"@aws-sdk/s3-request-presigner": "^3.803.0",
"apprise": "^1.0.0",
"cookie-parser": "^1.4.7",
"cors": "^2.8.5",

File diff suppressed because it is too large Load Diff

View File

@@ -4,8 +4,7 @@
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>{{SITE_TITLE}} - Login</title>
<link rel="stylesheet" href="{{BASE_URL}}styles.css">
<link rel="icon" type="image/svg+xml" href="{{BASE_URL}}assets/icon.svg">
<link rel="stylesheet" href="styles.css">
<style>
.login-container {
display: flex;
@@ -54,7 +53,6 @@
background-color: var(--textarea-bg);
}
</style>
<script>window.BASE_URL = '{{BASE_URL}}';</script>
</head>
<body>
<div class="login-container">
@@ -126,7 +124,7 @@
// Handle form submission
const verifyPin = async (pin) => {
try {
const response = await fetch(window.BASE_URL + 'api/auth/verify-pin', {
const response = await fetch('/api/auth/verify-pin', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ pin })
@@ -212,7 +210,7 @@
};
// Check PIN length and initialize
fetch(window.BASE_URL + 'api/auth/pin-required')
fetch('/api/auth/pin-required')
.then(response => {
if (response.status === 429) {
throw new Error('Too many attempts. Please wait before trying again.');
@@ -241,17 +239,6 @@
pinContainer.style.pointerEvents = 'none';
}
});
document.addEventListener('DOMContentLoaded', function() {
// Rewrite asset URLs to use BASE_URL as prefix if not absolute
const baseUrl = window.BASE_URL;
document.querySelectorAll('link[rel="stylesheet"], link[rel="icon"]').forEach(link => {
const href = link.getAttribute('href');
if (href && !href.startsWith('http') && !href.startsWith('data:') && !href.startsWith(baseUrl)) {
link.setAttribute('href', baseUrl + href.replace(/^\//, ''));
}
});
});
</script>
</body>
</html>

View File

@@ -1,32 +0,0 @@
const CACHE_NAME = "DUMBDROP_PWA_CACHE_V1";
const ASSETS_TO_CACHE = [];
const preload = async () => {
console.log("Installing web app");
return await caches.open(CACHE_NAME)
.then(async (cache) => {
console.log("caching index and important routes");
const response = await fetch("/asset-manifest.json");
const assets = await response.json();
ASSETS_TO_CACHE.push(...assets);
console.log("Assets Cached:", ASSETS_TO_CACHE);
return cache.addAll(ASSETS_TO_CACHE);
});
}
// Fetch asset manifest dynamically
globalThis.addEventListener("install", (event) => {
event.waitUntil(preload());
});
globalThis.addEventListener("activate", (event) => {
event.waitUntil(clients.claim());
});
globalThis.addEventListener("fetch", (event) => {
event.respondWith(
caches.match(event.request).then((cachedResponse) => {
return cachedResponse || fetch(event.request);
})
);
});

View File

@@ -39,7 +39,6 @@ body {
display: flex;
justify-content: center;
padding-top: 2rem;
padding-bottom: 150px;
color: var(--text-color);
transition: background-color 0.3s ease, color 0.3s ease;
}
@@ -47,7 +46,7 @@ body {
.container {
width: 100%;
max-width: 600px;
padding: 20px 20px 80px 20px;
padding: 20px;
text-align: center;
position: relative;
}
@@ -360,46 +359,3 @@ button:disabled {
font-size: 1.125rem;
}
}
/* Footer Styles */
footer {
position: fixed;
bottom: 0;
left: 0;
right: 0;
width: 100%;
padding: 15px;
text-align: center;
font-size: 0.85rem;
color: var(--text-color);
opacity: 0.9;
border-top: 1px solid var(--border-color);
transition: background-color 0.3s ease, color 0.3s ease;
background-color: var(--bg-color);
z-index: 100;
}
footer a {
color: var(--text-color);
text-decoration: none;
transition: opacity 0.2s ease;
}
footer a:hover {
opacity: 1;
text-decoration: underline;
}
.footer-separator {
margin: 0 0.5em;
}
@media (max-width: 480px) {
footer {
font-size: 0.75rem;
}
.footer-separator {
margin: 0 0.3em;
}
}

View File

@@ -2,34 +2,17 @@
* Main application setup and configuration.
* Initializes Express app, middleware, routes, and static file serving.
* Handles core application bootstrapping and configuration validation.
* Imports and makes use of the configured storage adapter.
*/
const express = require('express');
const cors = require('cors');
const cookieParser = require('cookie-parser');
const path = require('path');
const fs = require('fs'); // Needed for reading HTML templates
const fs = require('fs');
// Load configuration FIRST
const { config, validateConfig } = require('./config');
const logger = require('./utils/logger');
// Validate config EARLY, before loading anything else that depends on it
try {
validateConfig();
logger.info("Configuration loaded and validated successfully.");
} catch (validationError) {
logger.error("!!! Configuration validation failed. Server cannot start. !!!");
logger.error(validationError.message);
process.exit(1); // Exit if config is invalid
}
// Load storage adapter AFTER config is validated
// The storage/index.js file itself will log which adapter is being used.
const { storageAdapter } = require('./storage'); // This will load the correct adapter
// Load other utilities and middleware
// const { ensureDirectoryExists } = require('./utils/fileUtils'); // No longer needed here
const { ensureDirectoryExists } = require('./utils/fileUtils');
const { securityHeaders, requirePin } = require('./middleware/security');
const { safeCompare } = require('./utils/security');
const { initUploadLimiter, pinVerifyLimiter, downloadLimiter } = require('./middleware/rateLimiter');
@@ -38,152 +21,129 @@ const { injectDemoBanner, demoMiddleware } = require('./utils/demoMode');
// Create Express app
const app = express();
// Trust proxy headers (important for rate limiting and secure cookies if behind proxy)
app.set('trust proxy', 1); // Adjust the number based on your proxy setup depth
// Add this line to trust the first proxy
app.set('trust proxy', 1);
// --- Middleware Setup ---
app.use(cors()); // TODO: Configure CORS more strictly for production if needed
// Middleware setup
app.use(cors());
app.use(cookieParser());
app.use(express.json()); // For parsing application/json
app.use(securityHeaders); // Apply security headers
app.use(express.json());
app.use(securityHeaders);
// --- Demo Mode Middleware ---
// Apply demo middleware early if demo mode is active
// Note: Demo mode is now also checked within adapters/storage factory
if (config.isDemoMode) {
app.use(demoMiddleware); // This might intercept routes if demoAdapter is fully implemented
}
// --- Route Definitions ---
// Import route handlers AFTER middleware setup
// Note: uploadRouter is now an object { router }, so destructure it
// Import routes
const { router: uploadRouter } = require('./routes/upload');
const fileRoutes = require('./routes/files');
const authRoutes = require('./routes/auth');
// Apply Rate Limiting and Auth Middleware to Routes
// Add demo middleware before your routes
app.use(demoMiddleware);
// Use routes with appropriate middleware
app.use('/api/auth', pinVerifyLimiter, authRoutes);
// Apply PIN check and rate limiting to upload/file routes
// The requirePin middleware now checks config.pin internally
app.use('/api/upload', requirePin(config.pin), initUploadLimiter, uploadRouter);
app.use('/api/files', requirePin(config.pin), downloadLimiter, fileRoutes);
// --- Frontend Routes (Serving HTML) ---
// Root route ('/')
// Root route
app.get('/', (req, res) => {
// Redirect to login if PIN is required and not authenticated
// Check if the PIN is configured and the cookie exists
if (config.pin && (!req.cookies?.DUMBDROP_PIN || !safeCompare(req.cookies.DUMBDROP_PIN, config.pin))) {
logger.debug('[/] PIN required, redirecting to login.html');
return res.redirect('/login.html'); // Use relative path
return res.redirect('/login.html');
}
try {
const filePath = path.join(__dirname, '../public', 'index.html');
let html = fs.readFileSync(filePath, 'utf8');
// Perform template replacements
html = html.replace(/{{SITE_TITLE}}/g, config.siteTitle);
html = html.replace('{{AUTO_UPLOAD}}', config.autoUpload.toString());
html = html.replace('{{MAX_RETRIES}}', config.clientMaxRetries.toString());
// Ensure baseUrl has a trailing slash
const baseUrlWithSlash = config.baseUrl.endsWith('/') ? config.baseUrl : config.baseUrl + '/';
html = html.replace(/{{BASE_URL}}/g, baseUrlWithSlash);
// Generate Footer Content
let footerHtml = '';
if (config.footerLinks && config.footerLinks.length > 0) {
footerHtml = config.footerLinks.map(link =>
`<a href="${link.url}" target="_blank" rel="noopener noreferrer">${link.text}</a>`
).join('<span class="footer-separator"> | </span>');
} else {
footerHtml = `<span class="footer-static">Built by <a href="https://www.dumbware.io/" target="_blank" rel="noopener noreferrer">Dumbwareio</a></span>`;
}
html = html.replace('{{FOOTER_CONTENT}}', footerHtml);
// Inject Demo Banner if needed
html = injectDemoBanner(html);
res.setHeader('Content-Type', 'text/html');
res.send(html);
} catch (err) {
logger.error(`Error processing index.html: ${err.message}`);
res.status(500).send('Error loading page');
}
let html = fs.readFileSync(path.join(__dirname, '../public', 'index.html'), 'utf8');
html = html.replace(/{{SITE_TITLE}}/g, config.siteTitle);
html = html.replace('{{AUTO_UPLOAD}}', config.autoUpload.toString());
html = injectDemoBanner(html);
res.send(html);
});
// Login route ('/login.html')
// Login route
app.get('/login.html', (req, res) => {
// Prevent caching of the login page
// Add cache control headers
res.set('Cache-Control', 'no-store, no-cache, must-revalidate, private');
res.set('Pragma', 'no-cache');
res.set('Expires', '0');
let html = fs.readFileSync(path.join(__dirname, '../public', 'login.html'), 'utf8');
html = html.replace(/{{SITE_TITLE}}/g, config.siteTitle);
html = injectDemoBanner(html);
res.send(html);
});
// Serve static files with template variable replacement for HTML files
app.use((req, res, next) => {
if (!req.path.endsWith('.html')) {
return next();
}
try {
const filePath = path.join(__dirname, '../public', 'login.html');
let html = fs.readFileSync(filePath, 'utf8');
html = html.replace(/{{SITE_TITLE}}/g, config.siteTitle);
const baseUrlWithSlash = config.baseUrl.endsWith('/') ? config.baseUrl : config.baseUrl + '/';
html = html.replace(/{{BASE_URL}}/g, baseUrlWithSlash);
html = injectDemoBanner(html); // Inject demo banner if needed
res.setHeader('Content-Type', 'text/html');
res.send(html);
const filePath = path.join(__dirname, '../public', req.path);
let html = fs.readFileSync(filePath, 'utf8');
html = html.replace(/{{SITE_TITLE}}/g, config.siteTitle);
if (req.path === 'index.html') {
html = html.replace('{{AUTO_UPLOAD}}', config.autoUpload.toString());
}
html = injectDemoBanner(html);
res.send(html);
} catch (err) {
logger.error(`Error processing login.html: ${err.message}`);
res.status(500).send('Error loading login page');
next();
}
});
// --- Health Check Endpoint ---
app.get('/health', (req, res) => {
res.status(200).json({ status: 'UP', message: 'Server is healthy' });
});
// Serve remaining static files
app.use(express.static('public'));
// --- Static File Serving ---
// Serve static files (CSS, JS, assets) from the 'public' directory
// Use express.static middleware, placed AFTER specific HTML routes
app.use(express.static(path.join(__dirname, '../public')));
// --- Error Handling Middleware ---
// Catch-all for unhandled errors
// Error handling middleware
app.use((err, req, res, next) => { // eslint-disable-line no-unused-vars
logger.error(`Unhandled application error: ${err.message}`, err.stack);
// Avoid sending stack trace in production
const errorResponse = {
message: 'Internal Server Error',
...(config.nodeEnv === 'development' && { error: err.message, stack: err.stack })
};
// Ensure response is sent only once
if (!res.headersSent) {
res.status(err.status || 500).json(errorResponse);
}
logger.error(`Unhandled error: ${err.message}`);
res.status(500).json({
message: 'Internal server error',
error: process.env.NODE_ENV === 'development' ? err.message : undefined
});
});
// --- Initialize Function (Simplified) ---
/**
* Initialize the application.
* Placeholder function, as most initialization is now handled
* by config loading, adapter loading, and server startup.
* Could be used for other async setup tasks if needed later.
* Initialize the application
* Sets up required directories and validates configuration
*/
async function initialize() {
try {
// Config validation happens at the top level now.
// Storage adapter is loaded at the top level now.
// Directory checks are handled within adapters/config.
// Validate configuration
validateConfig();
logger.info('Application initialized.');
// Example: Log active storage type
logger.info(`Active Storage Adapter: ${storageAdapter.constructor.name || config.storageType}`);
// Ensure upload directory exists and is writable
await ensureDirectoryExists(config.uploadDir);
return app; // Return the configured Express app instance
// Log configuration
logger.info(`Maximum file size set to: ${config.maxFileSize / (1024 * 1024)}MB`);
if (config.pin) {
logger.info('PIN protection enabled');
}
logger.info(`Auto upload is ${config.autoUpload ? 'enabled' : 'disabled'}`);
if (config.appriseUrl) {
logger.info('Apprise notifications enabled');
}
// After initializing demo middleware
if (process.env.DEMO_MODE === 'true') {
logger.info('[DEMO] Running in demo mode - uploads will not be saved');
// Clear any existing files in upload directory
try {
const files = fs.readdirSync(config.uploadDir);
for (const file of files) {
fs.unlinkSync(path.join(config.uploadDir, file));
}
logger.info('[DEMO] Cleared upload directory');
} catch (err) {
logger.error(`[DEMO] Failed to clear upload directory: ${err.message}`);
}
}
return app;
} catch (err) {
logger.error(`Application initialization failed: ${err.message}`);
throw err; // Propagate error to stop server start
logger.error(`Initialization failed: ${err.message}`);
throw err;
}
}
module.exports = { app, initialize, config }; // Export app, initialize, and config
module.exports = { app, initialize, config };

View File

@@ -1,222 +1,104 @@
// File: src/config/index.js
require('dotenv').config();
const { validatePin } = require('../utils/security');
const logger = require('../utils/logger');
const fs = require('fs');
const path = require('path');
// const { version } = require('../../package.json'); // version not currently used, can be removed or kept
// --- Environment Variables Reference ---
/* (Comments listing all ENV vars - keep as is) */
// --- Helper for clear configuration logging ---
const logConfig = (message, level = 'info') => {
const prefix = level === 'warning' ? '⚠️ WARNING:' : ' INFO:';
console.log(`${prefix} CONFIGURATION: ${message}`);
};
// --- Default configurations ---
const DEFAULT_PORT = 3000;
const DEFAULT_SITE_TITLE = 'DumbDrop';
const DEFAULT_BASE_URL_PREFIX = 'http://localhost'; // Prefix, port added later
const DEFAULT_CLIENT_MAX_RETRIES = 5;
const DEFAULT_STORAGE_TYPE = 'local';
const logAndReturn = (key, value, isDefault = false, sensitive = false) => {
const displayValue = sensitive ? '********' : value;
logConfig(`${key}: ${displayValue}${isDefault ? ' (default)' : ''}`);
return value;
};
function isLocalDevelopment() {
return process.env.NODE_ENV !== 'production' && !process.env.UPLOAD_DIR;
}
function determineLocalUploadDirectory() {
if (process.env.STORAGE_TYPE && process.env.STORAGE_TYPE.toLowerCase() !== 'local') {
return null; // Not using local storage
}
let uploadDir;
if (process.env.UPLOAD_DIR) {
uploadDir = process.env.UPLOAD_DIR;
// logger.info(`[Local Storage] Upload directory set from UPLOAD_DIR: ${uploadDir}`); // Logger might not be fully init here
} else if (process.env.LOCAL_UPLOAD_DIR) {
uploadDir = process.env.LOCAL_UPLOAD_DIR;
// logger.warn(`[Local Storage] Upload directory using LOCAL_UPLOAD_DIR fallback: ${uploadDir}`);
} else {
uploadDir = './local_uploads';
// logger.warn(`[Local Storage] Upload directory using default fallback: ${uploadDir}`);
}
// logger.info(`[Local Storage] Final upload directory path: ${path.resolve(uploadDir)}`);
return path.resolve(uploadDir); // Always resolve to absolute
}
function ensureLocalUploadDirExists(dirPath) {
if (!dirPath || !isLocalDevelopment()) {
return;
}
/**
* Get the host path from Docker mount point
* @returns {string} Host path or fallback to container path
*/
function getHostPath() {
try {
if (!fs.existsSync(dirPath)) {
fs.mkdirSync(dirPath, { recursive: true });
console.log(`[INFO] CONFIGURATION: [Local Storage] Created local upload directory: ${dirPath}`);
} else {
console.log(`[INFO] CONFIGURATION: [Local Storage] Local upload directory exists: ${dirPath}`);
// Read Docker mountinfo to get the host path
const mountInfo = fs.readFileSync('/proc/self/mountinfo', 'utf8');
const lines = mountInfo.split('\n');
// Find the line containing our upload directory
const uploadMount = lines.find(line => line.includes('/app/uploads'));
if (uploadMount) {
// Extract the host path from the mount info
const parts = uploadMount.split(' ');
// The host path is typically in the 4th space-separated field
const hostPath = parts[3];
return hostPath;
}
fs.accessSync(dirPath, fs.constants.W_OK);
console.log(`[SUCCESS] CONFIGURATION: [Local Storage] Local upload directory is writable: ${dirPath}`);
} catch (err) {
console.error(`[ERROR] CONFIGURATION: [Local Storage] Failed to create or access local upload directory: ${dirPath}. Error: ${err.message}`);
throw new Error(`Upload directory "${dirPath}" is not accessible or writable.`);
logger.debug('Could not determine host path from mount info');
}
// Fallback to container path if we can't determine host path
return '/app/uploads';
}
const storageTypeInput = process.env.STORAGE_TYPE || DEFAULT_STORAGE_TYPE;
const storageType = ['local', 's3'].includes(storageTypeInput.toLowerCase())
? storageTypeInput.toLowerCase()
: DEFAULT_STORAGE_TYPE;
if (storageTypeInput.toLowerCase() !== storageType) {
console.warn(`[WARN] CONFIGURATION: Invalid STORAGE_TYPE "${storageTypeInput}", using default: "${storageType}"`);
}
const resolvedLocalUploadDir = determineLocalUploadDirectory();
if (storageType === 'local' && resolvedLocalUploadDir) { // Only ensure if actually using local storage
ensureLocalUploadDirExists(resolvedLocalUploadDir);
}
const parseFooterLinks = (linksString) => {
if (!linksString) return [];
return linksString.split(',')
.map(linkPair => {
const parts = linkPair.split('@').map(part => part.trim());
if (parts.length === 2 && parts[0] && parts[1] && (parts[1].startsWith('http://') || parts[1].startsWith('https://'))) {
return { text: parts[0], url: parts[1] };
}
// logger.warn(`Invalid format or URL in FOOTER_LINKS: "${linkPair}".`); // Logger might not be fully init
return null;
})
.filter(link => link !== null);
};
const port = parseInt(process.env.PORT || DEFAULT_PORT, 10);
const baseUrl = process.env.BASE_URL || `${DEFAULT_BASE_URL_PREFIX}:${port}/`;
/**
* Application configuration
* Loads and validates environment variables
*/
const config = {
port,
// Server settings
port: process.env.PORT || 3000,
nodeEnv: process.env.NODE_ENV || 'development',
baseUrl,
isDemoMode: process.env.DEMO_MODE === 'true',
storageType,
uploadDir: storageType === 'local' ? resolvedLocalUploadDir : path.resolve(process.env.UPLOAD_DIR || process.env.LOCAL_UPLOAD_DIR || './uploads'), // For S3, metadata dir. Fallback required.
s3Region: process.env.S3_REGION || null,
s3BucketName: process.env.S3_BUCKET_NAME || null,
s3AccessKeyId: process.env.S3_ACCESS_KEY_ID || null,
s3SecretAccessKey: process.env.S3_SECRET_ACCESS_KEY || null,
s3EndpointUrl: process.env.S3_ENDPOINT_URL || null,
s3ForcePathStyle: process.env.S3_FORCE_PATH_STYLE === 'true',
baseUrl: process.env.BASE_URL || `http://localhost:${process.env.PORT || 3000}`,
// Upload settings
uploadDir: '/app/uploads', // Internal Docker path
uploadDisplayPath: getHostPath(), // Dynamically determined from Docker mount
maxFileSize: (() => {
const sizeInMB = parseInt(process.env.MAX_FILE_SIZE || '1024', 10);
return (isNaN(sizeInMB) || sizeInMB <= 0 ? 1024 : sizeInMB) * 1024 * 1024;
if (isNaN(sizeInMB) || sizeInMB <= 0) {
throw new Error('MAX_FILE_SIZE must be a positive number');
}
return sizeInMB * 1024 * 1024; // Convert MB to bytes
})(),
autoUpload: process.env.AUTO_UPLOAD === 'true',
allowedExtensions: process.env.ALLOWED_EXTENSIONS ?
process.env.ALLOWED_EXTENSIONS.split(',').map(ext => ext.trim().toLowerCase().replace(/^\./, '.')).filter(Boolean) :
null,
clientMaxRetries: (() => {
const retries = parseInt(process.env.CLIENT_MAX_RETRIES || DEFAULT_CLIENT_MAX_RETRIES, 10);
return (isNaN(retries) || retries < 0) ? DEFAULT_CLIENT_MAX_RETRIES : retries;
})(),
pin: validatePin(process.env.DUMBDROP_PIN), // validatePin uses logger, ensure logger is available
allowedIframeOrigins: process.env.ALLOWED_IFRAME_ORIGINS ?
process.env.ALLOWED_IFRAME_ORIGINS.split(',').map(origin => origin.trim()).filter(Boolean) :
null,
siteTitle: process.env.DUMBDROP_TITLE || DEFAULT_SITE_TITLE,
footerLinks: parseFooterLinks(process.env.FOOTER_LINKS),
appriseUrl: process.env.APPRISE_URL || null,
// Security
pin: validatePin(process.env.DUMBDROP_PIN),
// UI settings
siteTitle: process.env.DUMBDROP_TITLE || 'DumbDrop',
// Notification settings
appriseUrl: process.env.APPRISE_URL,
appriseMessage: process.env.APPRISE_MESSAGE || 'New file uploaded - {filename} ({size}), Storage used {storage}',
appriseSizeUnit: process.env.APPRISE_SIZE_UNIT || 'Auto',
appriseSizeUnit: process.env.APPRISE_SIZE_UNIT,
// File extensions
allowedExtensions: process.env.ALLOWED_EXTENSIONS ?
process.env.ALLOWED_EXTENSIONS.split(',').map(ext => ext.trim().toLowerCase()) :
null
};
// --- Log Configuration (after logger is confirmed available) ---
// Moved logging to after config object is built, so logger is definitely available
logger.info(`--- Configuration Start ---`);
logAndReturn('NODE_ENV', config.nodeEnv);
logAndReturn('PORT', config.port);
logAndReturn('BASE_URL', config.baseUrl);
logAndReturn('DEMO_MODE', config.isDemoMode);
logAndReturn('STORAGE_TYPE', config.storageType);
if (config.storageType === 'local') {
logAndReturn('Upload Directory (Local Storage)', config.uploadDir);
} else {
logAndReturn('Metadata Directory (S3 Mode)', config.uploadDir); // Clarify role for S3
logAndReturn('S3_REGION', config.s3Region);
logAndReturn('S3_BUCKET_NAME', config.s3BucketName);
logAndReturn('S3_ACCESS_KEY_ID', config.s3AccessKeyId, false, true);
logAndReturn('S3_SECRET_ACCESS_KEY', config.s3SecretAccessKey, false, true);
if (config.s3EndpointUrl) logAndReturn('S3_ENDPOINT_URL', config.s3EndpointUrl);
logAndReturn('S3_FORCE_PATH_STYLE', config.s3ForcePathStyle);
}
logger.info(`Max File Size: ${config.maxFileSize / (1024 * 1024)}MB`);
logger.info(`Auto Upload: ${config.autoUpload}`);
if (config.allowedExtensions) logger.info(`Allowed Extensions: ${config.allowedExtensions.join(', ')}`);
if (config.pin) logAndReturn('DUMBDROP_PIN', config.pin, false, true);
if (config.allowedIframeOrigins) logger.info(`Allowed Iframe Origins: ${config.allowedIframeOrigins.join(', ')}`);
if (config.appriseUrl) logAndReturn('APPRISE_URL', config.appriseUrl);
logger.info(`Client Max Retries: ${config.clientMaxRetries}`);
logger.info(`--- Configuration End ---`);
// Validate required settings
function validateConfig() {
const errors = [];
if (config.port <= 0 || config.port > 65535) errors.push('PORT must be a valid number between 1 and 65535');
if (config.maxFileSize <= 0) errors.push('MAX_FILE_SIZE must be greater than 0');
if (config.maxFileSize <= 0) {
errors.push('MAX_FILE_SIZE must be greater than 0');
}
// Validate BASE_URL format
try {
new URL(config.baseUrl);
if (!config.baseUrl.endsWith('/')) errors.push('BASE_URL must end with a trailing slash ("/"). Current: ' + config.baseUrl);
} catch (err) { errors.push(`BASE_URL must be a valid URL. Error: ${err.message}`); }
} catch (err) {
errors.push('BASE_URL must be a valid URL');
}
if (config.storageType === 's3') {
if (!config.s3Region) errors.push('S3_REGION is required for S3 storage');
if (!config.s3BucketName) errors.push('S3_BUCKET_NAME is required for S3 storage');
if (!config.s3AccessKeyId) errors.push('S3_ACCESS_KEY_ID is required for S3 storage');
if (!config.s3SecretAccessKey) errors.push('S3_SECRET_ACCESS_KEY is required for S3 storage');
if (config.s3ForcePathStyle && !config.s3EndpointUrl) {
logger.warn('[Config Validation] S3_FORCE_PATH_STYLE is true, but S3_ENDPOINT_URL is not set. This may not work as expected with default AWS endpoints.');
if (config.nodeEnv === 'production') {
if (!config.appriseUrl) {
logger.info('Notifications disabled - No Configuration');
}
} else if (config.storageType === 'local') {
if (!config.uploadDir) errors.push('Upload directory (UPLOAD_DIR or LOCAL_UPLOAD_DIR) is required for local storage.');
else {
try { fs.accessSync(config.uploadDir, fs.constants.W_OK); }
catch (err) { errors.push(`Local upload directory "${config.uploadDir}" is not writable or does not exist.`); }
}
}
// Metadata directory check (for both local file metadata and S3 upload state metadata)
if (!config.uploadDir) { // This condition might be redundant if local storage dir is already checked
errors.push('A base directory (UPLOAD_DIR or LOCAL_UPLOAD_DIR) is required for metadata storage.');
} else {
try {
const metadataBase = path.resolve(config.uploadDir); // Base for .metadata
if (!fs.existsSync(metadataBase)) {
fs.mkdirSync(metadataBase, { recursive: true });
logger.info(`[Config Validation] Created base directory for metadata: ${metadataBase}`);
}
fs.accessSync(metadataBase, fs.constants.W_OK); // Check writability of the parent of .metadata
} catch (err) {
errors.push(`Cannot access or create base directory for metadata at "${config.uploadDir}". Error: ${err.message}`);
}
}
if (errors.length > 0) {
logger.error('--- CONFIGURATION ERRORS ---');
errors.forEach(err => logger.error(`- ${err}`));
logger.error('-----------------------------');
throw new Error('Configuration validation failed. Please check environment variables and correct the issues.');
throw new Error('Configuration validation failed:\n' + errors.join('\n'));
}
logger.success('[Config Validation] Configuration validated successfully.');
}
Object.freeze(config); // Freeze after logging and validation
// Freeze configuration to prevent modifications
Object.freeze(config);
module.exports = { config, validateConfig };
module.exports = {
config,
validateConfig
};

View File

@@ -6,33 +6,27 @@
const { safeCompare } = require('../utils/security');
const logger = require('../utils/logger');
const { config } = require('../config');
/**
* Security headers middleware
*/
function securityHeaders(req, res, next) {
// Content Security Policy
let csp =
res.setHeader(
'Content-Security-Policy',
"default-src 'self'; " +
"connect-src 'self'; " +
"style-src 'self' 'unsafe-inline' cdn.jsdelivr.net; " +
"script-src 'self' 'unsafe-inline' cdn.jsdelivr.net; " +
"img-src 'self' data: blob:;";
"img-src 'self' data: blob:;"
);
// If allowedIframeOrigins is set, allow those origins to embed via iframe
if (config.allowedIframeOrigins && config.allowedIframeOrigins.length > 0) {
// Remove X-Frame-Options header (do not set it)
// Add frame-ancestors directive to CSP
const frameAncestors = ["'self'", ...config.allowedIframeOrigins].join(' ');
csp += ` frame-ancestors ${frameAncestors};`;
} else {
// Default: only allow same origin if not configured
res.setHeader('X-Frame-Options', 'SAMEORIGIN');
}
res.setHeader('Content-Security-Policy', csp);
// X-Content-Type-Options
res.setHeader('X-Content-Type-Options', 'nosniff');
// X-Frame-Options
res.setHeader('X-Frame-Options', 'SAMEORIGIN');
// X-XSS-Protection
res.setHeader('X-XSS-Protection', '1; mode=block');
// Strict Transport Security (when in production)

View File

@@ -1,210 +1,132 @@
/**
* File management route handlers.
* Provides endpoints for listing and deleting files using the configured storage adapter.
* Handles file downloads by either providing a presigned URL (S3) or streaming (local).
* File management and listing route handlers.
* Provides endpoints for listing, downloading, and managing uploaded files.
* Handles file metadata, stats, and directory operations.
*/
const express = require('express');
const router = express.Router();
const path = require('path'); // Needed for sanitization
const fs = require('fs'); // Needed ONLY for local file streaming
const { storageAdapter } = require('../storage'); // Import the selected adapter
const path = require('path');
const fs = require('fs').promises;
const { config } = require('../config');
const logger = require('../utils/logger');
const { isDemoMode } = require('../utils/demoMode'); // Keep demo check if needed
const { formatFileSize } = require('../utils/fileUtils');
/**
* List all files from the storage backend.
* Get file information
*/
router.get('/', async (req, res) => {
// Demo mode handling (simplified list)
if (isDemoMode()) {
logger.info('[DEMO /files] Listing demo files');
// Return a mock list or call demoAdapter.listFiles() if implemented
return res.json({
files: [{ filename: 'demo_file.txt', size: 1234, formattedSize: '1.21KB', uploadDate: new Date().toISOString() }],
totalFiles: 1,
totalSize: 1234,
message: 'Demo Mode: Showing mock file list'
});
}
router.get('/:filename/info', async (req, res) => {
const filePath = path.join(config.uploadDir, req.params.filename);
try {
const files = await storageAdapter.listFiles();
const totalSize = files.reduce((acc, file) => acc + (file.size || 0), 0);
const stats = await fs.stat(filePath);
const fileInfo = {
filename: req.params.filename,
size: stats.size,
formattedSize: formatFileSize(stats.size),
uploadDate: stats.mtime,
mimetype: path.extname(req.params.filename).slice(1)
};
res.json({
files: files,
totalFiles: files.length,
totalSize: totalSize
// Note: formattedTotalSize could be calculated here if needed
});
res.json(fileInfo);
} catch (err) {
logger.error(`[Route /files GET] Failed to list files: ${err.message}`, err.stack);
// Map common errors
let statusCode = 500;
let clientMessage = 'Failed to list files.';
if (err.name === 'NoSuchBucket' || err.name === 'AccessDenied') { // S3 Specific
clientMessage = 'Storage configuration error.';
} else if (err.code === 'ENOENT') { // Local Specific
clientMessage = 'Storage directory not found.';
} else if (err.code === 'EACCES' || err.code === 'EPERM') { // Local Specific
clientMessage = 'Storage permission error.';
}
res.status(statusCode).json({ error: clientMessage, details: err.message });
logger.error(`Failed to get file info: ${err.message}`);
res.status(404).json({ error: 'File not found' });
}
});
/**
* Get a download URL or stream a file.
* For S3, returns a presigned URL.
* For Local, streams the file content.
* Download file
*/
router.get('/:filename/download', async (req, res) => {
const rawFilename = req.params.filename;
// Basic sanitization: Prevent directory traversal.
// Adapters should also validate/sanitize keys/paths.
const filename = path.basename(rawFilename);
if (filename !== rawFilename || filename.includes('..')) {
logger.error(`[Route /download] Invalid filename detected: ${rawFilename}`);
return res.status(400).json({ error: 'Invalid filename' });
}
// Demo mode handling
if (isDemoMode()) {
logger.info(`[DEMO /download] Download request for ${filename}`);
return res.json({
message: 'Demo Mode: This would initiate download in production.',
filename: filename
});
}
const filePath = path.join(config.uploadDir, req.params.filename);
try {
const result = await storageAdapter.getDownloadUrlOrStream(filename);
await fs.access(filePath);
if (result.type === 'url') {
// S3 Adapter returned a presigned URL
logger.info(`[Route /download] Providing presigned URL for: ${filename}`);
// Option 1: Redirect (Simple, but might hide URL from client)
// res.redirect(result.value);
// Set headers for download
res.setHeader('Content-Disposition', `attachment; filename="${req.params.filename}"`);
res.setHeader('Content-Type', 'application/octet-stream');
// Option 2: Return URL in JSON (Gives client more control)
res.json({ downloadUrl: result.value });
// Stream the file
const fileStream = require('fs').createReadStream(filePath);
fileStream.pipe(res);
} else if (result.type === 'path') {
// Local Adapter returned a file path
const filePath = result.value;
logger.info(`[Route /download] Streaming local file: ${filePath}`);
// Check if file still exists before streaming
try {
await fs.promises.access(filePath, fs.constants.R_OK);
} catch (accessErr) {
if (accessErr.code === 'ENOENT') {
logger.warn(`[Route /download] Local file not found just before streaming: ${filePath}`);
return res.status(404).json({ error: 'File not found' });
}
logger.error(`[Route /download] Cannot access local file for streaming ${filePath}: ${accessErr.message}`);
return res.status(500).json({ error: 'Failed to access file for download' });
// Handle errors during streaming
fileStream.on('error', (err) => {
logger.error(`File streaming error: ${err.message}`);
if (!res.headersSent) {
res.status(500).json({ error: 'Failed to download file' });
}
});
// Set headers for download
res.setHeader('Content-Disposition', `attachment; filename="${filename}"`); // Use the sanitized basename
res.setHeader('Content-Type', 'application/octet-stream'); // Generic type
// Stream the file
const fileStream = fs.createReadStream(filePath);
fileStream.on('error', (streamErr) => {
logger.error(`[Route /download] File streaming error for ${filePath}: ${streamErr.message}`);
if (!res.headersSent) {
// Try to send an error response if headers haven't been sent yet
res.status(500).json({ error: 'Failed to stream file' });
} else {
// If headers already sent, we can only terminate the connection
res.end();
}
});
fileStream.pipe(res);
} else {
// Unknown result type from adapter
logger.error(`[Route /download] Unknown result type from storage adapter: ${result.type}`);
res.status(500).json({ error: 'Internal server error during download preparation' });
}
logger.info(`File download started: ${req.params.filename}`);
} catch (err) {
logger.error(`[Route /download] Failed to get download for ${filename}: ${err.message}`, err.stack);
let statusCode = 500;
let clientMessage = 'Failed to initiate download.';
// Use specific errors thrown by adapters if available
if (err.message === 'File not found' || err.message === 'File not found in S3' || err.name === 'NoSuchKey' || err.code === 'ENOENT') {
statusCode = 404;
clientMessage = 'File not found.';
} else if (err.message === 'Permission denied' || err.code === 'EACCES' || err.name === 'AccessDenied') {
statusCode = 500; // Treat permission issues as internal server errors generally
clientMessage = 'Storage permission error during download.';
} else if (err.message === 'Invalid filename') {
statusCode = 400;
clientMessage = 'Invalid filename specified.';
}
// Avoid sending error if headers might have been partially sent by streaming
if (!res.headersSent) {
res.status(statusCode).json({ error: clientMessage, details: err.message });
} else {
logger.warn(`[Route /download] Error occurred after headers sent for ${filename}. Cannot send JSON error.`);
res.end(); // Terminate response if possible
}
logger.error(`File download failed: ${err.message}`);
res.status(404).json({ error: 'File not found' });
}
});
/**
* List all files
*/
router.get('/', async (req, res) => {
try {
const files = await fs.readdir(config.uploadDir);
// Get stats for all files first
const fileStatsPromises = files.map(async filename => {
try {
const stats = await fs.stat(path.join(config.uploadDir, filename));
return { filename, stats, valid: stats.isFile() };
} catch (err) {
logger.error(`Failed to get stats for file ${filename}: ${err.message}`);
return { filename, valid: false };
}
});
const fileStats = await Promise.all(fileStatsPromises);
// Filter and map valid files
const fileList = fileStats
.filter(file => file.valid)
.map(({ filename, stats }) => ({
filename,
size: stats.size,
formattedSize: formatFileSize(stats.size),
uploadDate: stats.mtime
}));
// Sort files by upload date (newest first)
fileList.sort((a, b) => b.uploadDate - a.uploadDate);
res.json({
files: fileList,
totalFiles: fileList.length,
totalSize: fileList.reduce((acc, file) => acc + file.size, 0)
});
} catch (err) {
logger.error(`Failed to list files: ${err.message}`);
res.status(500).json({ error: 'Failed to list files' });
}
});
/**
* Delete a file from the storage backend.
* Delete file
*/
router.delete('/:filename', async (req, res) => {
const rawFilename = req.params.filename;
// Basic sanitization
const filename = path.basename(rawFilename);
if (filename !== rawFilename || filename.includes('..')) {
logger.error(`[Route /delete] Invalid filename detected: ${rawFilename}`);
return res.status(400).json({ error: 'Invalid filename' });
}
// Demo mode handling
if (isDemoMode()) {
logger.info(`[DEMO /delete] Delete request for ${filename}`);
// Call demoAdapter.deleteFile(filename) if implemented?
return res.json({ message: 'File deleted (Demo)', filename: filename });
}
logger.info(`[Route /delete] Received delete request for: ${filename}`);
const filePath = path.join(config.uploadDir, req.params.filename);
try {
await storageAdapter.deleteFile(filename);
await fs.access(filePath);
await fs.unlink(filePath);
logger.info(`File deleted: ${req.params.filename}`);
res.json({ message: 'File deleted successfully' });
} catch (err) {
logger.error(`[Route /delete] Failed to delete file ${filename}: ${err.message}`, err.stack);
let statusCode = 500;
let clientMessage = 'Failed to delete file.';
// Use specific errors thrown by adapters if available
if (err.message === 'File not found' || err.message === 'File not found in S3' || err.name === 'NoSuchKey' || err.code === 'ENOENT') {
statusCode = 404;
clientMessage = 'File not found.';
} else if (err.message === 'Permission denied' || err.code === 'EACCES' || err.name === 'AccessDenied') {
statusCode = 500;
clientMessage = 'Storage permission error during delete.';
} else if (err.message === 'Invalid filename') {
statusCode = 400;
clientMessage = 'Invalid filename specified.';
}
res.status(statusCode).json({ error: clientMessage, details: err.message });
logger.error(`File deletion failed: ${err.message}`);
res.status(err.code === 'ENOENT' ? 404 : 500).json({
error: err.code === 'ENOENT' ? 'File not found' : 'Failed to delete file'
});
}
});

View File

@@ -1,161 +1,411 @@
/**
* File upload route handlers.
* Delegates storage operations to the configured storage adapter.
* Handles multipart uploads via adapter logic.
* File upload route handlers and batch upload management.
* Handles file uploads, chunked transfers, and folder creation.
* Manages upload sessions, batch timeouts, and cleanup.
*/
const express = require('express');
const router = express.Router();
const path = require('path'); // Still needed for extension checks
const crypto = require('crypto');
const path = require('path');
const { config } = require('../config');
const logger = require('../utils/logger');
const { storageAdapter } = require('../storage'); // Import the adapter factory's result
const { isDemoMode } = require('../utils/demoMode'); // Keep demo check for specific route behavior if needed
const { getUniqueFilePath, getUniqueFolderPath } = require('../utils/fileUtils');
const { sendNotification } = require('../services/notifications');
const fs = require('fs');
const { cleanupIncompleteUploads } = require('../utils/cleanup');
const { isDemoMode, createMockUploadResponse } = require('../utils/demoMode');
// --- Routes ---
// Store ongoing uploads
const uploads = new Map();
// Store folder name mappings for batch uploads with timestamps
const folderMappings = new Map();
// Store batch activity timestamps
const batchActivity = new Map();
// Store upload to batch mappings
const uploadToBatch = new Map();
const BATCH_TIMEOUT = 30 * 60 * 1000; // 30 minutes
let cleanupInterval;
/**
* Start the cleanup interval for inactive batches
* @returns {NodeJS.Timeout} The interval handle
*/
function startBatchCleanup() {
if (cleanupInterval) {
clearInterval(cleanupInterval);
}
cleanupInterval = setInterval(() => {
const now = Date.now();
logger.info(`Running batch cleanup, checking ${batchActivity.size} active batches`);
for (const [batchId, lastActivity] of batchActivity.entries()) {
if (now - lastActivity >= BATCH_TIMEOUT) {
logger.info(`Cleaning up inactive batch: ${batchId}`);
batchActivity.delete(batchId);
}
}
}, 5 * 60 * 1000); // 5 minutes
return cleanupInterval;
}
/**
* Stop the batch cleanup interval
*/
function stopBatchCleanup() {
if (cleanupInterval) {
clearInterval(cleanupInterval);
cleanupInterval = null;
}
}
// Start cleanup interval unless disabled
if (!process.env.DISABLE_BATCH_CLEANUP) {
startBatchCleanup();
}
// Run cleanup periodically
const CLEANUP_INTERVAL = 5 * 60 * 1000; // 5 minutes
const cleanupTimer = setInterval(() => {
cleanupIncompleteUploads(uploads, uploadToBatch, batchActivity)
.catch(err => logger.error(`Cleanup failed: ${err.message}`));
}, CLEANUP_INTERVAL);
// Handle cleanup timer errors
cleanupTimer.unref(); // Don't keep process alive just for cleanup
process.on('SIGTERM', () => {
clearInterval(cleanupTimer);
// Final cleanup
cleanupIncompleteUploads(uploads, uploadToBatch, batchActivity)
.catch(err => logger.error(`Final cleanup failed: ${err.message}`));
});
/**
* Log the current state of uploads and mappings
* @param {string} context - The context where this log is being called from
*/
function logUploadState(context) {
logger.debug(`Upload State [${context}]:
Active Uploads: ${uploads.size}
Active Batches: ${batchActivity.size}
Folder Mappings: ${folderMappings.size}
Upload-Batch Mappings: ${uploadToBatch.size}
`);
}
/**
* Validate batch ID format
* @param {string} batchId - Batch ID to validate
* @returns {boolean} True if valid
*/
function isValidBatchId(batchId) {
return /^\d+-[a-z0-9]{9}$/.test(batchId);
}
// Initialize upload
router.post('/init', async (req, res) => {
if (isDemoMode() && config.storageType !== 's3') { // S3 demo might still hit the adapter for presigned URLs etc.
// but local demo can be simpler.
const { filename = 'demo_file.txt', fileSize = 0 } = req.body;
const demoUploadId = 'demo-' + Math.random().toString(36).substr(2, 9);
logger.info(`[DEMO /init] Req for ${filename}, size ${fileSize}. ID ${demoUploadId}`);
if (Number(fileSize) === 0) {
logger.success(`[DEMO /init] Sim complete zero-byte: ${filename}`);
}
return res.json({ uploadId: demoUploadId });
}
const { filename, fileSize } = req.body;
const clientBatchId = req.headers['x-batch-id'];
if (!filename) return res.status(400).json({ error: 'Missing filename' });
if (fileSize === undefined || fileSize === null) return res.status(400).json({ error: 'Missing fileSize' });
const size = Number(fileSize);
if (isNaN(size) || size < 0) return res.status(400).json({ error: 'Invalid file size' });
if (size > config.maxFileSize) {
logger.warn(`Upload rejected: File size ${size} exceeds limit ${config.maxFileSize} for ${filename}`);
return res.status(413).json({ error: 'File too large', limit: config.maxFileSize });
}
if (config.allowedExtensions && config.allowedExtensions.length > 0) {
const fileExt = path.extname(filename).toLowerCase();
if (!fileExt || !config.allowedExtensions.includes(fileExt)) {
logger.warn(`Upload rejected: File type not allowed: ${filename} (Ext: ${fileExt || 'none'})`);
return res.status(400).json({ error: 'File type not allowed', receivedExtension: fileExt || 'none' });
}
logger.debug(`File extension ${fileExt} allowed for ${filename}`);
}
try {
const result = await storageAdapter.initUpload(filename, size, clientBatchId);
res.json({ uploadId: result.uploadId });
} catch (err) {
logger.error(`[Route /init] Upload initialization failed for "${filename}": ${err.name} - ${err.message}`, err.stack);
let statusCode = 500;
let clientMessage = 'Failed to initialize upload.';
if (err.message.includes('Invalid batch ID format')) {
statusCode = 400; clientMessage = err.message;
} else if (err.name === 'NoSuchBucket' || err.name === 'AccessDenied') {
statusCode = 500; clientMessage = 'Storage configuration error.';
} else if (err.code === 'EACCES' || err.code === 'EPERM' || err.message.includes('writable') || err.message.includes('metadata directory')) {
statusCode = 500; clientMessage = 'Storage permission or access error.';
} else if (err.message.includes('S3 Client configuration failed')) {
statusCode = 503; clientMessage = 'Storage service unavailable or misconfigured.';
// Log request details for debugging
if (process.env.DEBUG === 'true' || process.env.NODE_ENV === 'development') {
logger.info(`Upload init request:
Filename: ${filename}
Size: ${fileSize} (${typeof fileSize})
Batch ID: ${clientBatchId || 'none'}
`);
} else {
logger.info(`Upload init request: ${filename} (${fileSize} bytes)`);
}
res.status(statusCode).json({ error: clientMessage, details: config.nodeEnv === 'development' ? err.message : undefined });
// Validate required fields with detailed errors
if (!filename) {
return res.status(400).json({
error: 'Missing filename',
details: 'The filename field is required'
});
}
if (fileSize === undefined || fileSize === null) {
return res.status(400).json({
error: 'Missing fileSize',
details: 'The fileSize field is required'
});
}
// Convert fileSize to number if it's a string
const size = Number(fileSize);
if (isNaN(size) || size < 0) { // Changed from size <= 0 to allow zero-byte files
return res.status(400).json({
error: 'Invalid file size',
details: `File size must be a non-negative number, received: ${fileSize} (${typeof fileSize})`
});
}
// Validate file size
const maxSizeInBytes = config.maxFileSize;
if (size > maxSizeInBytes) {
const message = `File size ${size} bytes exceeds limit of ${maxSizeInBytes} bytes`;
logger.warn(message);
return res.status(413).json({
error: 'File too large',
message,
limit: maxSizeInBytes,
limitInMB: Math.floor(maxSizeInBytes / (1024 * 1024))
});
}
// Generate batch ID from header or create new one
const batchId = req.headers['x-batch-id'] || `${Date.now()}-${crypto.randomBytes(4).toString('hex').substring(0, 9)}`;
// Validate batch ID if provided in header
if (req.headers['x-batch-id'] && !isValidBatchId(batchId)) {
return res.status(400).json({
error: 'Invalid batch ID format',
details: `Batch ID must match format: timestamp-[9 alphanumeric chars], received: ${batchId}`
});
}
// Update batch activity
batchActivity.set(batchId, Date.now());
// Sanitize filename and convert to forward slashes
const safeFilename = path.normalize(filename)
.replace(/^(\.\.(\/|\\|$))+/, '')
.replace(/\\/g, '/')
.replace(/^\/+/, ''); // Remove leading slashes
// Log sanitized filename
logger.info(`Processing upload: ${safeFilename}`);
// Validate file extension if configured
if (config.allowedExtensions) {
const fileExt = path.extname(safeFilename).toLowerCase();
if (!config.allowedExtensions.includes(fileExt)) {
return res.status(400).json({
error: 'File type not allowed',
allowedExtensions: config.allowedExtensions,
receivedExtension: fileExt
});
}
}
const uploadId = crypto.randomBytes(16).toString('hex');
let filePath = path.join(config.uploadDir, safeFilename);
let fileHandle;
try {
// Handle file/folder paths
const pathParts = safeFilename.split('/').filter(Boolean); // Remove empty parts
if (pathParts.length > 1) {
// Handle files within folders
const originalFolderName = pathParts[0];
const folderPath = path.join(config.uploadDir, originalFolderName);
let newFolderName = folderMappings.get(`${originalFolderName}-${batchId}`);
if (!newFolderName) {
try {
// First ensure parent directories exist
await fs.promises.mkdir(path.dirname(folderPath), { recursive: true });
// Then try to create the target folder
await fs.promises.mkdir(folderPath, { recursive: false });
newFolderName = originalFolderName;
} catch (err) {
if (err.code === 'EEXIST') {
const uniqueFolderPath = await getUniqueFolderPath(folderPath);
newFolderName = path.basename(uniqueFolderPath);
logger.info(`Folder "${originalFolderName}" exists, using "${newFolderName}"`);
} else {
throw err;
}
}
folderMappings.set(`${originalFolderName}-${batchId}`, newFolderName);
}
pathParts[0] = newFolderName;
filePath = path.join(config.uploadDir, ...pathParts);
// Ensure all parent directories exist
await fs.promises.mkdir(path.dirname(filePath), { recursive: true });
}
// Get unique file path and handle
const result = await getUniqueFilePath(filePath);
filePath = result.path;
fileHandle = result.handle;
// Create upload entry
uploads.set(uploadId, {
safeFilename: path.relative(config.uploadDir, filePath),
filePath,
fileSize: size,
bytesReceived: 0,
writeStream: fileHandle.createWriteStream()
});
// Associate upload with batch
uploadToBatch.set(uploadId, batchId);
logger.info(`Initialized upload for ${path.relative(config.uploadDir, filePath)} (${size} bytes)`);
// Log state after initialization
logUploadState('After Upload Init');
// Handle zero-byte files immediately
if (size === 0) {
const upload = uploads.get(uploadId);
upload.writeStream.end();
uploads.delete(uploadId);
logger.success(`Completed zero-byte file upload: ${upload.safeFilename}`);
await sendNotification(upload.safeFilename, 0, config);
}
// Send response
return res.json({ uploadId });
} catch (err) {
if (fileHandle) {
await fileHandle.close().catch(() => {});
fs.promises.unlink(filePath).catch(() => {});
}
throw err;
}
} catch (err) {
logger.error(`Upload initialization failed:
Error: ${err.message}
Stack: ${err.stack}
Filename: ${filename}
Size: ${fileSize}
Batch ID: ${clientBatchId || 'none'}
`);
return res.status(500).json({
error: 'Failed to initialize upload',
details: err.message
});
}
});
// Upload chunk
router.post('/chunk/:uploadId', express.raw({
limit: config.maxFileSize + (10 * 1024 * 1024),
limit: '10mb',
type: 'application/octet-stream'
}), async (req, res) => {
const { uploadId } = req.params;
const chunk = req.body;
const partNumber = parseInt(req.query.partNumber, 10); // Ensure partNumber is parsed
const upload = uploads.get(uploadId);
const chunkSize = req.body.length;
const batchId = req.headers['x-batch-id'];
if (isNaN(partNumber) || partNumber < 1) {
logger.error(`[Route /chunk] Invalid partNumber for ${uploadId}: ${req.query.partNumber}`);
return res.status(400).json({ error: 'Missing or invalid partNumber query parameter (must be >= 1)' });
}
if (isDemoMode() && config.storageType !== 's3') {
logger.debug(`[DEMO /chunk] Chunk for ${uploadId}, part ${partNumber}, size ${chunk?.length || 0}`);
const demoProgress = Math.min(100, (Math.random() * 50) + (partNumber * 10) ); // Simulate increasing progress
const completed = demoProgress >= 100;
if (completed) logger.info(`[DEMO /chunk] Sim completion for ${uploadId}`);
return res.json({ bytesReceived: 0, progress: demoProgress, completed });
}
if (!chunk || chunk.length === 0) {
logger.warn(`[Route /chunk] Empty chunk for ${uploadId}, part ${partNumber}`);
return res.status(400).json({ error: 'Empty chunk received' });
if (!upload) {
logger.warn(`Upload not found: ${uploadId}, Batch ID: ${batchId || 'none'}`);
return res.status(404).json({ error: 'Upload not found' });
}
try {
const result = await storageAdapter.storeChunk(uploadId, chunk, partNumber);
// Update batch activity if batch ID provided
if (batchId && isValidBatchId(batchId)) {
batchActivity.set(batchId, Date.now());
}
if (result.completed) {
logger.info(`[Route /chunk] Part ${partNumber} for ${uploadId} triggered completion. Finalizing...`);
try {
const completionResult = await storageAdapter.completeUpload(uploadId);
logger.success(`[Route /chunk] Finalized upload ${uploadId}. Path/Key: ${completionResult.finalPath}`);
return res.json({ bytesReceived: result.bytesReceived, progress: 100, completed: true });
} catch (completionError) {
logger.error(`[Route /chunk] CRITICAL: Failed to finalize ${uploadId} after part ${partNumber}: ${completionError.message}`, completionError.stack);
return res.status(500).json({ error: 'Upload chunk received, but failed to finalize.', details: config.nodeEnv === 'development' ? completionError.message : undefined });
// Write chunk
await new Promise((resolve, reject) => {
upload.writeStream.write(Buffer.from(req.body), (err) => {
if (err) reject(err);
else resolve();
});
});
upload.bytesReceived += chunkSize;
// Calculate progress, ensuring it doesn't exceed 100%
const progress = Math.min(
Math.round((upload.bytesReceived / upload.fileSize) * 100),
100
);
logger.debug(`Chunk received:
File: ${upload.safeFilename}
Progress: ${progress}%
Bytes Received: ${upload.bytesReceived}/${upload.fileSize}
Chunk Size: ${chunkSize}
Upload ID: ${uploadId}
Batch ID: ${batchId || 'none'}
`);
// Check if upload is complete
if (upload.bytesReceived >= upload.fileSize) {
await new Promise((resolve, reject) => {
upload.writeStream.end((err) => {
if (err) reject(err);
else resolve();
});
});
uploads.delete(uploadId);
// Format completion message based on debug mode
if (process.env.DEBUG === 'true' || process.env.NODE_ENV === 'development') {
logger.success(`Upload completed:
File: ${upload.safeFilename}
Size: ${upload.fileSize}
Upload ID: ${uploadId}
Batch ID: ${batchId || 'none'}
`);
} else {
logger.success(`Upload completed: ${upload.safeFilename} (${upload.fileSize} bytes)`);
}
} else {
res.json({ bytesReceived: result.bytesReceived, progress: result.progress, completed: false });
}
} catch (err) {
logger.error(`[Route /chunk] Chunk upload failed for ${uploadId}, part ${partNumber}: ${err.name} - ${err.message}`, err.stack);
let statusCode = 500;
let clientMessage = 'Failed to process chunk.';
if (err.message.includes('Upload session not found') || err.name === 'NoSuchUpload' || err.code === 'ENOENT' || err.name === 'NotFound' || err.name === 'NoSuchKey') {
statusCode = 404; clientMessage = 'Upload session not found or already completed/aborted.';
} else if (err.name === 'InvalidPart' || err.name === 'InvalidPartOrder') {
statusCode = 400; clientMessage = 'Invalid upload chunk sequence or data.';
} else if (err.name === 'SlowDown' || (err.$metadata && err.$metadata.httpStatusCode === 503) ) {
statusCode = 429; clientMessage = 'Storage provider rate limit exceeded, please try again later.';
} else if (err.code === 'EACCES' || err.code === 'EPERM' ) {
statusCode = 500; clientMessage = 'Storage permission error while writing chunk.';
// Send notification
await sendNotification(upload.safeFilename, upload.fileSize, config);
logUploadState('After Upload Complete');
}
res.status(statusCode).json({ error: clientMessage, details: config.nodeEnv === 'development' ? err.message : undefined });
res.json({
bytesReceived: upload.bytesReceived,
progress
});
} catch (err) {
logger.error(`Chunk upload failed:
Error: ${err.message}
Stack: ${err.stack}
File: ${upload.safeFilename}
Upload ID: ${uploadId}
Batch ID: ${batchId || 'none'}
Bytes Received: ${upload.bytesReceived}/${upload.fileSize}
`);
res.status(500).json({ error: 'Failed to process chunk' });
}
});
// Cancel upload
router.post('/cancel/:uploadId', async (req, res) => {
const { uploadId } = req.params;
const upload = uploads.get(uploadId);
if (isDemoMode() && config.storageType !== 's3') {
logger.info(`[DEMO /cancel] Request for ${uploadId}`);
return res.json({ message: 'Upload cancelled (Demo)' });
}
logger.info(`[Route /cancel] Cancel request for upload: ${uploadId}`);
try {
await storageAdapter.abortUpload(uploadId);
res.json({ message: 'Upload cancelled successfully or was already inactive.' });
} catch (err) {
logger.error(`[Route /cancel] Error during cancellation for ${uploadId}: ${err.name} - ${err.message}`, err.stack);
// Generally, client doesn't need to know if server-side abort failed catastrophically,
// as long as client stops sending. However, if it's a config error, 500 is appropriate.
let statusCode = err.name === 'NoSuchUpload' ? 200 : 500; // If not found, it's like success for client
let clientMessage = err.name === 'NoSuchUpload' ? 'Upload already inactive or not found.' : 'Failed to cancel upload on server.';
if (err.name === 'AccessDenied' || err.name === 'NoSuchBucket') {
clientMessage = 'Storage configuration error during cancel.';
statusCode = 500;
if (upload) {
upload.writeStream.end();
try {
await fs.promises.unlink(upload.filePath);
} catch (err) {
logger.error(`Failed to delete incomplete upload: ${err.message}`);
}
res.status(statusCode).json({ message: clientMessage, details: config.nodeEnv === 'development' ? err.message : undefined });
uploads.delete(uploadId);
uploadToBatch.delete(uploadId);
logger.info(`Upload cancelled: ${upload.safeFilename}`);
}
res.json({ message: 'Upload cancelled' });
});
module.exports = { router }; // Only export the router object
module.exports = {
router,
startBatchCleanup,
stopBatchCleanup,
// Export for testing
batchActivity,
BATCH_TIMEOUT
};

View File

@@ -1,110 +0,0 @@
#!/bin/sh
# Simple entrypoint script to manage user permissions and execute CMD
# Exit immediately if a command exits with a non-zero status.
set -e
# Function to log messages
log_info() {
echo "[INFO] Entrypoint: $1"
}
log_warning() {
echo "[WARN] Entrypoint: $1"
}
log_error() {
echo "[ERROR] Entrypoint: $1" >&2
}
log_info "Starting entrypoint script..."
# Default user/group/umask values
DEFAULT_UID=1000
DEFAULT_GID=1000
DEFAULT_UMASK=022
# Default upload directory if not set by user (should align with Dockerfile/compose)
DEFAULT_UPLOAD_DIR="/usr/src/app/local_uploads"
# Check if PUID or PGID environment variables are set by the user
if [ -z "${PUID}" ] && [ -z "${PGID}" ]; then
# --- Run as Root ---
log_info "PUID/PGID not set, running as root."
# Set umask (use UMASK env var if provided, otherwise default)
CURRENT_UMASK=${UMASK:-$DEFAULT_UMASK}
log_info "Setting umask to ${CURRENT_UMASK}"
umask "${CURRENT_UMASK}"
# Execute the command passed to the entrypoint as root
log_info "Executing command as root: $@"
exec "$@"
else
# --- Run as Custom User (nodeuser with adjusted UID/GID) ---
log_info "PUID/PGID set, configuring user 'nodeuser'..."
# Use provided UID/GID or default if only one is set
CURRENT_UID=${PUID:-$DEFAULT_UID}
CURRENT_GID=${PGID:-$DEFAULT_GID}
CURRENT_UMASK=${UMASK:-$DEFAULT_UMASK}
# Read the upload directory from ENV var or use default
TARGET_UPLOAD_DIR=${UPLOAD_DIR:-$DEFAULT_UPLOAD_DIR}
log_info "Target UID: ${CURRENT_UID}, GID: ${CURRENT_GID}, UMASK: ${CURRENT_UMASK}"
log_info "Target Upload Dir: ${TARGET_UPLOAD_DIR}"
# Check if user/group exists (should exist from Dockerfile)
if ! getent group nodeuser > /dev/null 2>&1; then
log_warning "Group 'nodeuser' not found, creating with GID ${CURRENT_GID}..."
addgroup -g "${CURRENT_GID}" nodeuser
else
EXISTING_GID=$(getent group nodeuser | cut -d: -f3)
if [ "${EXISTING_GID}" != "${CURRENT_GID}" ]; then
log_info "Updating 'nodeuser' group GID from ${EXISTING_GID} to ${CURRENT_GID}..."
groupmod -o -g "${CURRENT_GID}" nodeuser
fi
fi
if ! getent passwd nodeuser > /dev/null 2>&1; then
log_warning "User 'nodeuser' not found, creating with UID ${CURRENT_UID}..."
adduser -u "${CURRENT_UID}" -G nodeuser -s /bin/sh -D nodeuser
else
EXISTING_UID=$(getent passwd nodeuser | cut -d: -f3)
if [ "${EXISTING_UID}" != "${CURRENT_UID}" ]; then
log_info "Updating 'nodeuser' user UID from ${EXISTING_UID} to ${CURRENT_UID}..."
usermod -o -u "${CURRENT_UID}" nodeuser
fi
fi
# Ensure the base application directory ownership is correct
log_info "Ensuring ownership of /usr/src/app..."
chown -R nodeuser:nodeuser /usr/src/app || log_warning "Could not chown /usr/src/app"
# Ensure the target upload directory exists and has correct ownership
if [ -n "${TARGET_UPLOAD_DIR}" ]; then
if [ ! -d "${TARGET_UPLOAD_DIR}" ]; then
log_info "Creating directory: ${TARGET_UPLOAD_DIR}"
# Use -p to create parent directories as needed
mkdir -p "${TARGET_UPLOAD_DIR}"
# Chown after creation
chown nodeuser:nodeuser "${TARGET_UPLOAD_DIR}" || log_warning "Could not chown ${TARGET_UPLOAD_DIR}"
else
# Directory exists, ensure ownership
log_info "Ensuring ownership of ${TARGET_UPLOAD_DIR}..."
chown -R nodeuser:nodeuser "${TARGET_UPLOAD_DIR}" || log_warning "Could not chown ${TARGET_UPLOAD_DIR}"
fi
else
log_warning "UPLOAD_DIR variable is not set or is empty, skipping ownership check for upload directory."
fi
# Set the umask
log_info "Setting umask to ${CURRENT_UMASK}"
umask "${CURRENT_UMASK}"
# Execute the command passed to the entrypoint using su-exec to drop privileges
log_info "Executing command as nodeuser (${CURRENT_UID}:${CURRENT_GID}): $@"
exec su-exec nodeuser "$@"
fi
log_info "Entrypoint script finished (should not reach here if exec worked)."

View File

@@ -1,60 +0,0 @@
const fs = require("fs");
const path = require("path");
const PUBLIC_DIR = path.join(__dirname, "..", "..", "public");
function getFiles(dir, basePath = "/") {
let fileList = [];
const files = fs.readdirSync(dir);
files.forEach((file) => {
const filePath = path.join(dir, file);
const fileUrl = path.join(basePath, file).replace(/\\/g, "/");
if (fs.statSync(filePath).isDirectory()) {
fileList = fileList.concat(getFiles(filePath, fileUrl));
} else {
fileList.push(fileUrl);
}
});
return fileList;
}
function generateAssetManifest() {
const assets = getFiles(PUBLIC_DIR);
fs.writeFileSync(path.join(PUBLIC_DIR, "asset-manifest.json"), JSON.stringify(assets, null, 2));
console.log("Asset manifest generated!", assets);
}
function generatePWAManifest() {
generateAssetManifest(); // fetched later in service-worker
const siteTitle = process.env.DUMBDROP_TITLE || process.env.SITE_TITLE || "DumbDrop";
const pwaManifest = {
name: siteTitle,
short_name: siteTitle,
description: "A simple file upload application",
start_url: "/",
display: "standalone",
background_color: "#ffffff",
theme_color: "#000000",
icons: [
{
src: "/assets/icon.png",
type: "image/png",
sizes: "192x192"
},
{
src: "/assets/icon.png",
type: "image/png",
sizes: "512x512"
}
],
orientation: "any"
};
fs.writeFileSync(path.join(PUBLIC_DIR, "manifest.json"), JSON.stringify(pwaManifest, null, 2));
console.log("PWA manifest generated!", pwaManifest);
}
module.exports = { generatePWAManifest };

View File

@@ -1,110 +1,116 @@
/**
* Server entry point that starts the HTTP server and manages connections.
* Handles graceful shutdown, connection tracking, and server initialization.
* Provides development mode directory listing functionality.
*/
const { app, initialize, config } = require('./app'); // config is now also exported from app.js
const { app, initialize, config } = require('./app');
const logger = require('./utils/logger');
const fs = require('fs'); // Keep for readdirSync if needed for local dev logging
const fs = require('fs');
const { executeCleanup } = require('./utils/cleanup');
const { generatePWAManifest } = require('./scripts/pwa-manifest-generator');
// Track open connections
const connections = new Set();
/**
* Start the server and initialize the application
* @returns {Promise<http.Server>} The HTTP server instance
*/
async function startServer() {
try {
await initialize(); // This will call validateConfig and load storage adapter via app.js
// Initialize the application
await initialize();
// Start the server
const server = app.listen(config.port, () => {
logger.info(`Server running at ${config.baseUrl}`);
// ** MODIFIED LOGGING **
logger.info(`Active Storage Type: ${config.storageType}`);
logger.info(`Data Directory (for uploads or metadata): ${config.uploadDir}`);
logger.info(`Upload directory: ${config.uploadDisplayPath}`);
if (config.nodeEnv === 'development' && config.storageType === 'local') {
// List directory contents in development
if (config.nodeEnv === 'development') {
try {
// Only list contents if it's local storage and dev mode
if (fs.existsSync(config.uploadDir)) {
const files = fs.readdirSync(config.uploadDir);
logger.info(`Current local upload directory contents (${config.uploadDir}):`);
files.forEach(file => logger.info(`- ${file}`));
} else {
logger.warn(`Local upload directory ${config.uploadDir} does not exist for listing.`);
}
const files = fs.readdirSync(config.uploadDir);
logger.info(`Current directory contents (${files.length} files):`);
files.forEach(file => {
logger.info(`- ${file}`);
});
} catch (err) {
logger.error(`Failed to list local upload directory contents: ${err.message}`);
logger.error(`Failed to list directory contents: ${err.message}`);
}
}
});
generatePWAManifest();
// Track new connections
server.on('connection', (connection) => {
connections.add(connection);
connection.on('close', () => connections.delete(connection));
connection.on('close', () => {
connections.delete(connection);
});
});
let isShuttingDown = false;
// Shutdown handler function
const shutdownHandler = async (signal) => {
if (isShuttingDown) return;
isShuttingDown = true;
logger.info(`${signal} received. Shutting down gracefully...`);
// Start a shorter force shutdown timer
const forceShutdownTimer = setTimeout(() => {
logger.error('Force shutdown due to timeout.');
process.exit(1);
}, 5000); // Increased slightly
logger.error('Force shutdown initiated');
throw new Error('Force shutdown due to timeout');
}, 3000); // 3 seconds maximum for total shutdown
try {
server.closeIdleConnections?.(); // Node 18+
// 1. Stop accepting new connections immediately
server.unref();
const closePromises = Array.from(connections).map(conn => new Promise(resolve => {
conn.on('close', resolve); // Ensure close event resolves
conn.destroy(); // Actively destroy connections
}));
await Promise.race([
Promise.all(closePromises),
new Promise(resolve => setTimeout(resolve, 2000)) // Max 2s for connections
]);
connections.clear();
await new Promise((resolve, reject) => {
server.close((err) => {
if (err) return reject(err);
logger.info('Server closed.');
resolve();
// 2. Close all existing connections with a shorter timeout
const connectionClosePromises = Array.from(connections).map(conn => {
return new Promise(resolve => {
conn.end(() => {
connections.delete(conn);
resolve();
});
});
});
await executeCleanup(1500); // Max 1.5s for cleanup
// Wait for connections to close with a timeout
await Promise.race([
Promise.all(connectionClosePromises),
new Promise(resolve => setTimeout(resolve, 1000)) // 1 second timeout for connections
]);
// 3. Close the server
await new Promise((resolve) => server.close(resolve));
logger.info('Server closed');
// 4. Run cleanup tasks with a shorter timeout
await executeCleanup(1000); // 1 second timeout for cleanup
// Clear the force shutdown timer since we completed gracefully
clearTimeout(forceShutdownTimer);
logger.info('Shutdown complete.');
process.exit(0);
process.exitCode = 0;
} catch (error) {
clearTimeout(forceShutdownTimer); // Clear timer on error too
logger.error(`Error during shutdown: ${error.message}`);
process.exit(1);
throw error;
}
};
// Handle both SIGTERM and SIGINT
process.on('SIGTERM', () => shutdownHandler('SIGTERM'));
process.on('SIGINT', () => shutdownHandler('SIGINT'));
return server;
} catch (error) {
logger.error('Failed to start server:', error);
// Ensure process exits if startServer itself fails before listener setup
process.exitCode = 1;
throw error;
}
}
// Only start the server if this file is run directly
if (require.main === module) {
startServer().catch((error) => {
// Error already logged by startServer
// process.exitCode is already set if startServer throws
logger.error('Server failed to start:', error);
process.exitCode = 1;
throw error;
});
}

View File

@@ -4,10 +4,13 @@
* Handles message formatting and notification delivery.
*/
const { spawn } = require('child_process');
const { formatFileSize, calculateDirectorySize, sanitizeFilename } = require('../utils/fileUtils');
const { exec } = require('child_process');
const util = require('util');
const { formatFileSize, calculateDirectorySize } = require('../utils/fileUtils');
const logger = require('../utils/logger');
const execAsync = util.promisify(exec);
/**
* Send a notification using Apprise
* @param {string} filename - Name of uploaded file
@@ -16,56 +19,34 @@ const logger = require('../utils/logger');
* @returns {Promise<void>}
*/
async function sendNotification(filename, fileSize, config) {
const { appriseUrl, appriseMessage, appriseSizeUnit, uploadDir } = config;
const { APPRISE_URL, APPRISE_MESSAGE, APPRISE_SIZE_UNIT, uploadDir } = config;
console.debug("NOTIFICATIONS CONFIG:", filename, fileSize, config);
if (!appriseUrl) {
return;
}
if (!APPRISE_URL) {
return;
}
try {
const formattedSize = formatFileSize(fileSize, appriseSizeUnit);
const dirSize = await calculateDirectorySize(uploadDir);
const totalStorage = formatFileSize(dirSize);
try {
const formattedSize = formatFileSize(fileSize, APPRISE_SIZE_UNIT);
const dirSize = await calculateDirectorySize(uploadDir);
const totalStorage = formatFileSize(dirSize);
// Sanitize the filename to remove any special characters that could cause issues
const sanitizedFilename = sanitizeFilename(filename); // apply sanitization of filename again (in case)
// Sanitize the message components
const sanitizedFilename = JSON.stringify(filename).slice(1, -1);
const message = APPRISE_MESSAGE
.replace('{filename}', sanitizedFilename)
.replace('{size}', formattedSize)
.replace('{storage}', totalStorage);
// Construct the notification message by replacing placeholders
const message = appriseMessage
.replace('{filename}', sanitizedFilename)
.replace('{size}', formattedSize)
.replace('{storage}', totalStorage);
// Use string command for better escaping
const command = `apprise ${APPRISE_URL} -b "${message}"`;
await execAsync(command, { shell: true });
await new Promise((resolve, reject) => {
const appriseProcess = spawn('apprise', [appriseUrl, '-b', message]);
appriseProcess.stdout.on('data', (data) => {
logger.info(`Apprise Output: ${data.toString().trim()}`);
});
appriseProcess.stderr.on('data', (data) => {
logger.error(`Apprise Error: ${data.toString().trim()}`);
});
appriseProcess.on('close', (code) => {
if (code === 0) {
logger.info(`Notification sent for: ${sanitizedFilename} (${formattedSize}, Total storage: ${totalStorage})`);
resolve();
} else {
reject(new Error(`Apprise process exited with code ${code}`));
}
});
appriseProcess.on('error', (err) => {
reject(new Error(`Apprise process failed to start: ${err.message}`));
});
});
} catch (err) {
logger.error(`Failed to send notification: ${err.message}`);
}
logger.info(`Notification sent for: ${sanitizedFilename} (${formattedSize}, Total storage: ${totalStorage})`);
} catch (err) {
logger.error(`Failed to send notification: ${err.message}`);
}
}
module.exports = {
sendNotification,
sendNotification
};

View File

@@ -1,59 +0,0 @@
/**
* Storage Adapter Factory
* Reads the application configuration and exports the appropriate storage adapter
* (either local or S3) based on the STORAGE_TYPE environment variable.
* This provides a single point of import for storage operations throughout the app.
*/
const { config } = require('../config'); // Assuming config is initialized before this runs
const logger = require('../utils/logger');
let storageAdapter;
logger.info(`Initializing storage adapter based on STORAGE_TYPE: "${config.storageType}"`);
if (config.isDemoMode) {
logger.warn('[Storage] DEMO MODE ENABLED. Using mock storage adapter.');
// In demo mode, we might want a completely separate mock adapter
// or potentially just disable storage operations. For now, let's use local
// but be aware demo mode might need its own logic if strict separation is needed.
// Or, create a dedicated demoAdapter.js
// For simplicity now, let's log and maybe default to local (which is non-persistent in demo anyway).
// A dedicated demoAdapter would be cleaner:
// storageAdapter = require('./demoAdapter'); // Requires creating demoAdapter.js
// Fallback for now:
storageAdapter = require('./localAdapter');
logger.info('[Storage] Using Local Adapter for Demo Mode (operations will be mocked or non-persistent).');
} else if (config.storageType === 's3') {
logger.info('[Storage] Using S3 Storage Adapter.');
try {
storageAdapter = require('./s3Adapter');
} catch (error) {
logger.error(`[Storage] Failed to load S3 Adapter: ${error.message}`);
logger.error('[Storage] Check S3 configuration environment variables and AWS SDK installation.');
process.exit(1); // Exit if the configured adapter fails to load
}
} else {
// Default to local storage if type is 'local' or invalid/not specified
if (config.storageType !== 'local') {
logger.warn(`[Storage] Invalid or unspecified STORAGE_TYPE "${config.storageType}", defaulting to "local".`);
}
logger.info('[Storage] Using Local Storage Adapter.');
try {
storageAdapter = require('./localAdapter');
} catch (error) {
logger.error(`[Storage] Failed to load Local Adapter: ${error.message}`);
process.exit(1); // Exit if the default adapter fails
}
}
// Ensure the selected adapter is valid before exporting
if (!storageAdapter || typeof storageAdapter.initUpload !== 'function') {
logger.error('[Storage] Failed to initialize a valid storage adapter. Exiting.');
process.exit(1);
}
logger.success(`[Storage] Storage adapter "${config.storageType}" initialized successfully.`);
module.exports = { storageAdapter };

View File

@@ -1,641 +0,0 @@
/**
* Local Storage Adapter
* Handles file operations for storing files on the local filesystem.
* Implements the storage interface expected by the application routes.
*/
const fs = require('fs').promises;
const fsSync = require('fs'); // For synchronous checks like existsSync
const path = require('path');
const crypto = require('crypto');
const { config } = require('../config');
const logger = require('../utils/logger');
const {
getUniqueFolderPath,
sanitizePathPreserveDirs,
isValidBatchId,
formatFileSize // Keep formatFileSize accessible if needed by notifications later
} = require('../utils/fileUtils');
const { sendNotification } = require('../services/notifications'); // Needed for completion
const METADATA_DIR = path.join(config.uploadDir, '.metadata');
const UPLOAD_TIMEOUT = 30 * 60 * 1000; // 30 minutes timeout for stale uploads
// --- In-Memory Maps (Session-level optimizations) ---
// Store folder name mappings for batch uploads (avoids FS lookups during session)
// NOTE: This state is specific to this adapter instance and might not scale across multiple server instances.
const folderMappings = new Map();
// Store batch activity timestamps (for cleaning up stale batches/folder mappings)
const batchActivity = new Map();
const BATCH_TIMEOUT = 30 * 60 * 1000; // 30 minutes for batch/folderMapping cleanup
// --- Metadata Helper Functions (Copied and adapted from original upload.js) ---
/**
* Ensures the metadata directory exists.
* Should be called once during adapter initialization or before first use.
*/
async function ensureMetadataDirExists() {
try {
if (!fsSync.existsSync(METADATA_DIR)) {
await fs.mkdir(METADATA_DIR, { recursive: true });
logger.info(`[Local Adapter] Created metadata directory: ${METADATA_DIR}`);
}
// Check writability
await fs.access(METADATA_DIR, fsSync.constants.W_OK);
} catch (err) {
logger.error(`[Local Adapter] Metadata directory error (${METADATA_DIR}): ${err.message}`);
throw new Error(`Failed to access or create metadata directory: ${METADATA_DIR}`);
}
}
async function readUploadMetadata(uploadId) {
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
logger.warn(`[Local Adapter] Attempted to read metadata with invalid uploadId: ${uploadId}`);
return null;
}
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
try {
const data = await fs.readFile(metaFilePath, 'utf8');
return JSON.parse(data);
} catch (err) {
if (err.code === 'ENOENT') {
return null; // Metadata file doesn't exist
}
logger.error(`[Local Adapter] Error reading metadata for ${uploadId}: ${err.message}`);
throw err; // Rethrow other errors
}
}
async function writeUploadMetadata(uploadId, metadata) {
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
logger.error(`[Local Adapter] Attempted to write metadata with invalid uploadId: ${uploadId}`);
return;
}
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
metadata.lastActivity = Date.now(); // Update timestamp on every write
try {
const tempMetaPath = `${metaFilePath}.${crypto.randomBytes(4).toString('hex')}.tmp`;
await fs.writeFile(tempMetaPath, JSON.stringify(metadata, null, 2));
await fs.rename(tempMetaPath, metaFilePath);
} catch (err) {
logger.error(`[Local Adapter] Error writing metadata for ${uploadId}: ${err.message}`);
try { await fs.unlink(tempMetaPath); } catch (unlinkErr) {/* ignore */}
throw err;
}
}
async function deleteUploadMetadata(uploadId) {
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
logger.warn(`[Local Adapter] Attempted to delete metadata with invalid uploadId: ${uploadId}`);
return;
}
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
try {
await fs.unlink(metaFilePath);
logger.debug(`[Local Adapter] Deleted metadata file: ${uploadId}.meta`);
} catch (err) {
if (err.code !== 'ENOENT') { // Ignore if already deleted
logger.error(`[Local Adapter] Error deleting metadata file ${uploadId}.meta: ${err.message}`);
}
}
}
// --- Batch Cleanup (In-memory session state cleanup) ---
// This logic remains relevant for the in-memory folderMappings if used across batches.
let batchCleanupInterval;
function startBatchCleanup() {
if (batchCleanupInterval) clearInterval(batchCleanupInterval);
batchCleanupInterval = setInterval(() => {
const now = Date.now();
logger.info(`[Local Adapter] Running batch session cleanup, checking ${batchActivity.size} active sessions`);
let cleanedCount = 0;
for (const [batchId, lastActivity] of batchActivity.entries()) {
if (now - lastActivity >= BATCH_TIMEOUT) {
logger.info(`[Local Adapter] Cleaning up inactive batch session: ${batchId}`);
batchActivity.delete(batchId);
// Clean up associated folder mappings
for (const key of folderMappings.keys()) {
if (key.endsWith(`-${batchId}`)) {
folderMappings.delete(key);
}
}
cleanedCount++;
}
}
if (cleanedCount > 0) logger.info(`[Local Adapter] Cleaned up ${cleanedCount} inactive batch sessions.`);
}, 5 * 60 * 1000); // Check every 5 minutes
batchCleanupInterval.unref();
}
// Ensure metadata dir exists before starting cleanup or other ops
ensureMetadataDirExists().then(() => {
logger.info('[Local Adapter] Initialized.');
// Start batch cleanup only after ensuring dir exists
if (!process.env.DISABLE_BATCH_CLEANUP) {
startBatchCleanup();
}
}).catch(err => {
logger.error(`[Local Adapter] Initialization failed: ${err.message}`);
// Potentially exit or prevent server start if metadata dir is critical
process.exit(1);
});
// --- Interface Implementation ---
/**
* Initializes an upload session.
* @param {string} filename - Original filename/path from client.
* @param {number} fileSize - Total size of the file.
* @param {string} clientBatchId - Optional batch ID from client.
* @returns {Promise<{uploadId: string}>} Object containing the application's upload ID.
*/
async function initUpload(filename, fileSize, clientBatchId) {
await ensureMetadataDirExists(); // Ensure it exists before proceeding
const size = Number(fileSize);
// Basic validations moved to route handler, assume valid inputs here
const batchId = clientBatchId || `${Date.now()}-${crypto.randomBytes(4).toString('hex').substring(0, 9)}`;
if (clientBatchId && !isValidBatchId(batchId)) {
throw new Error('Invalid batch ID format'); // Throw error for route handler
}
batchActivity.set(batchId, Date.now()); // Track batch session activity
// --- Path handling and Sanitization ---
const sanitizedFilename = sanitizePathPreserveDirs(filename);
const safeFilename = path.normalize(sanitizedFilename)
.replace(/^(\.\.(\/|\\|$))+/, '')
.replace(/\\/g, '/')
.replace(/^\/+/, '');
logger.info(`[Local Adapter] Init request for: ${safeFilename}`);
// --- Determine Paths & Handle Folders ---
const uploadId = crypto.randomBytes(16).toString('hex');
let finalFilePath = path.resolve(config.uploadDir, safeFilename); // Use resolve for absolute path
const pathParts = safeFilename.split('/').filter(Boolean);
if (pathParts.length > 1) {
const originalFolderName = pathParts[0];
const folderMapKey = `${originalFolderName}-${batchId}`;
let newFolderName = folderMappings.get(folderMapKey);
const relativeFolderPath = newFolderName || originalFolderName; // Folder name relative to uploadDir
if (!newFolderName) {
const baseFolderPath = path.resolve(config.uploadDir, relativeFolderPath);
await fs.mkdir(path.dirname(baseFolderPath), { recursive: true }); // Ensure parent of potential new folder exists
try {
await fs.mkdir(baseFolderPath, { recursive: false }); // Try creating the original/mapped name
newFolderName = originalFolderName; // Success, use original
} catch (err) {
if (err.code === 'EEXIST') {
// Folder exists, generate a unique name for this batch
const uniqueFolderPath = await getUniqueFolderPath(baseFolderPath); // Pass absolute path
newFolderName = path.basename(uniqueFolderPath); // Get only the unique folder name part
logger.info(`[Local Adapter] Folder "${originalFolderName}" exists or conflict, using unique "${newFolderName}" for batch ${batchId}`);
// No need to mkdir again, getUniqueFolderPath created it.
} else {
logger.error(`[Local Adapter] Error creating directory ${baseFolderPath}: ${err.message}`);
throw err; // Re-throw other errors
}
}
folderMappings.set(folderMapKey, newFolderName); // Store mapping for this batch
}
// Reconstruct the final path using the potentially unique folder name
pathParts[0] = newFolderName;
finalFilePath = path.resolve(config.uploadDir, ...pathParts);
// Ensure the immediate parent directory for the file exists
await fs.mkdir(path.dirname(finalFilePath), { recursive: true });
} else {
// Ensure base upload dir exists (already done by ensureLocalUploadDirExists, but safe to repeat)
await fs.mkdir(config.uploadDir, { recursive: true });
}
// --- Check Final Path Collision & Get Unique Name if Needed ---
// Check if the *final* destination exists (not the partial)
let checkPath = finalFilePath;
let counter = 1;
while (fsSync.existsSync(checkPath)) {
logger.warn(`[Local Adapter] Final destination file already exists: ${checkPath}. Generating unique name.`);
const dir = path.dirname(finalFilePath);
const ext = path.extname(finalFilePath);
const baseName = path.basename(finalFilePath, ext);
checkPath = path.resolve(dir, `${baseName} (${counter})${ext}`); // Use resolve
counter++;
}
if (checkPath !== finalFilePath) {
logger.info(`[Local Adapter] Using unique final path: ${checkPath}`);
finalFilePath = checkPath;
// If path changed, ensure directory exists again (might be needed if baseName contained '/')
await fs.mkdir(path.dirname(finalFilePath), { recursive: true });
}
const partialFilePath = finalFilePath + '.partial';
// --- Create and Persist Metadata ---
const metadata = {
uploadId,
originalFilename: safeFilename, // Store the path as received by client
filePath: finalFilePath, // The final, possibly unique, path
partialFilePath,
fileSize: size,
bytesReceived: 0,
batchId,
createdAt: Date.now(),
lastActivity: Date.now()
};
await writeUploadMetadata(uploadId, metadata);
logger.info(`[Local Adapter] Initialized upload: ${uploadId} for ${safeFilename} -> ${finalFilePath}`);
// --- Handle Zero-Byte Files ---
if (size === 0) {
try {
await fs.writeFile(finalFilePath, ''); // Create the empty file directly
logger.success(`[Local Adapter] Completed zero-byte file: ${metadata.originalFilename} as ${finalFilePath}`);
await deleteUploadMetadata(uploadId); // Clean up metadata
sendNotification(metadata.originalFilename, 0, config); // Send notification
} catch (writeErr) {
logger.error(`[Local Adapter] Failed to create zero-byte file ${finalFilePath}: ${writeErr.message}`);
await deleteUploadMetadata(uploadId).catch(() => {}); // Attempt cleanup
throw writeErr; // Let the route handler catch it
}
}
return { uploadId };
}
/**
* Stores a chunk of data for a given uploadId.
* @param {string} uploadId - The application's upload ID.
* @param {Buffer} chunk - The data chunk to store.
* @returns {Promise<{bytesReceived: number, progress: number, completed: boolean}>} Upload status.
*/
async function storeChunk(uploadId, chunk) {
const chunkSize = chunk.length;
if (!chunkSize) {
throw new Error('Empty chunk received');
}
const metadata = await readUploadMetadata(uploadId);
if (!metadata) {
// Maybe the upload completed *just* before this chunk arrived? Check final file.
// This is hard to do reliably without knowing the final path from metadata.
// Return a specific error or status code might be better.
logger.warn(`[Local Adapter] Metadata not found for chunk: ${uploadId}. Upload might be complete or cancelled.`);
throw new Error('Upload session not found or already completed'); // Let route handler return 404
}
// Update batch activity
if (metadata.batchId && isValidBatchId(metadata.batchId)) {
batchActivity.set(metadata.batchId, Date.now());
}
// --- Sanity Checks ---
if (metadata.bytesReceived >= metadata.fileSize) {
logger.warn(`[Local Adapter] Received chunk for already completed upload ${uploadId}. Finalizing again.`);
// Attempt to finalize just in case, then return completed status
await completeUpload(uploadId); // This handles metadata deletion etc.
return { bytesReceived: metadata.fileSize, progress: 100, completed: true };
}
let chunkToWrite = chunk;
let actualChunkSize = chunkSize;
// Prevent writing beyond expected file size
if (metadata.bytesReceived + chunkSize > metadata.fileSize) {
logger.warn(`[Local Adapter] Chunk for ${uploadId} exceeds expected size. Truncating.`);
const bytesToWrite = metadata.fileSize - metadata.bytesReceived;
chunkToWrite = chunk.slice(0, bytesToWrite);
actualChunkSize = chunkToWrite.length;
if (actualChunkSize <= 0) {
logger.info(`[Local Adapter] Upload ${uploadId} already has expected bytes. Skipping write.`);
metadata.bytesReceived = metadata.fileSize; // Correct state for completion check
}
}
// --- Write Chunk (Append Mode) ---
if (actualChunkSize > 0) {
try {
await fs.appendFile(metadata.partialFilePath, chunkToWrite);
metadata.bytesReceived += actualChunkSize;
} catch (writeErr) {
logger.error(`[Local Adapter] Failed to write chunk for ${uploadId} to ${metadata.partialFilePath}: ${writeErr.message}`);
throw new Error(`Failed to write chunk for ${uploadId}: ${writeErr.code}`); // Propagate error
}
}
// --- Update State ---
const progress = metadata.fileSize === 0 ? 100 :
Math.min(Math.round((metadata.bytesReceived / metadata.fileSize) * 100), 100);
logger.debug(`[Local Adapter] Chunk written for ${uploadId}: ${metadata.bytesReceived}/${metadata.fileSize} (${progress}%)`);
// Persist updated metadata *before* final completion check
await writeUploadMetadata(uploadId, metadata);
// --- Check for Completion ---
const completed = metadata.bytesReceived >= metadata.fileSize;
if (completed) {
// Don't call completeUpload here, let the route handler do it
// after sending the final progress response back to the client.
logger.info(`[Local Adapter] Upload ${uploadId} ready for completion (${metadata.bytesReceived} bytes).`);
}
return { bytesReceived: metadata.bytesReceived, progress, completed };
}
/**
* Finalizes a completed upload.
* @param {string} uploadId - The application's upload ID.
* @returns {Promise<{filename: string, size: number}>} Details of the completed file.
*/
async function completeUpload(uploadId) {
const metadata = await readUploadMetadata(uploadId);
if (!metadata) {
// Might have been completed by a concurrent request. Check if final file exists.
// This is still tricky without the metadata. Log a warning.
logger.warn(`[Local Adapter] completeUpload called for ${uploadId}, but metadata is missing. Assuming already completed.`);
// We don't know the filename or size here, return minimal success or throw?
// Let's throw, as the calling route expects metadata info.
throw new Error('Upload completion failed: Metadata not found');
}
// Ensure we have received all bytes (redundant check, but safe)
if (metadata.bytesReceived < metadata.fileSize) {
logger.error(`[Local Adapter] Attempted to complete upload ${uploadId} prematurely. Received ${metadata.bytesReceived}/${metadata.fileSize} bytes.`);
throw new Error('Cannot complete upload: Not all bytes received.');
}
try {
// Ensure partial file exists before rename
await fs.access(metadata.partialFilePath);
await fs.rename(metadata.partialFilePath, metadata.filePath);
logger.success(`[Local Adapter] Finalized: ${metadata.originalFilename} as ${metadata.filePath} (${metadata.fileSize} bytes)`);
// Clean up metadata AFTER successful rename
await deleteUploadMetadata(uploadId);
// Send notification
sendNotification(metadata.originalFilename, metadata.fileSize, config);
return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.filePath };
} catch (renameErr) {
if (renameErr.code === 'ENOENT') {
// Partial file missing. Maybe completed by another request? Check final file.
try {
await fs.access(metadata.filePath);
logger.warn(`[Local Adapter] Partial file ${metadata.partialFilePath} missing for ${uploadId}, but final file ${metadata.filePath} exists. Assuming already finalized.`);
await deleteUploadMetadata(uploadId).catch(()=>{}); // Cleanup metadata anyway
return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.filePath };
} catch (finalAccessErr) {
logger.error(`[Local Adapter] CRITICAL: Partial file ${metadata.partialFilePath} missing and final file ${metadata.filePath} not found during completion of ${uploadId}.`);
await deleteUploadMetadata(uploadId).catch(()=>{}); // Cleanup metadata to prevent retries
throw new Error(`Completion failed: Partial file missing and final file not found.`);
}
} else {
logger.error(`[Local Adapter] CRITICAL: Failed to rename ${metadata.partialFilePath} to ${metadata.filePath}: ${renameErr.message}`);
// Keep metadata and partial file for potential manual recovery.
throw renameErr; // Propagate the error
}
}
}
/**
* Aborts an ongoing upload.
* @param {string} uploadId - The application's upload ID.
* @returns {Promise<void>}
*/
async function abortUpload(uploadId) {
const metadata = await readUploadMetadata(uploadId);
if (!metadata) {
logger.warn(`[Local Adapter] Abort request for non-existent or completed upload: ${uploadId}`);
return; // Nothing to abort
}
// Delete partial file first
try {
await fs.unlink(metadata.partialFilePath);
logger.info(`[Local Adapter] Deleted partial file on cancellation: ${metadata.partialFilePath}`);
} catch (unlinkErr) {
if (unlinkErr.code !== 'ENOENT') { // Ignore if already gone
logger.error(`[Local Adapter] Failed to delete partial file ${metadata.partialFilePath} on cancel: ${unlinkErr.message}`);
// Continue to delete metadata anyway
}
}
// Then delete metadata file
await deleteUploadMetadata(uploadId);
logger.info(`[Local Adapter] Upload cancelled and cleaned up: ${uploadId} (${metadata.originalFilename})`);
}
/**
* Lists files in the upload directory.
* @returns {Promise<Array<{filename: string, size: number, formattedSize: string, uploadDate: Date}>>} List of files.
*/
async function listFiles() {
let entries = [];
try {
entries = await fs.readdir(config.uploadDir, { withFileTypes: true });
} catch (err) {
if (err.code === 'ENOENT') {
logger.warn('[Local Adapter] Upload directory does not exist for listing.');
return []; // Return empty list if dir doesn't exist
}
logger.error(`[Local Adapter] Failed to read upload directory: ${err.message}`);
throw err; // Re-throw other errors
}
const fileDetails = [];
for (const entry of entries) {
// Skip directories and the special metadata directory/files within it
if (!entry.isFile() || entry.name === '.metadata' || entry.name.endsWith('.partial') || entry.name.endsWith('.meta') || entry.name.endsWith('.tmp')) {
continue;
}
try {
const filePath = path.join(config.uploadDir, entry.name);
const stats = await fs.stat(filePath);
fileDetails.push({
filename: entry.name, // Use the actual filename on disk
size: stats.size,
formattedSize: formatFileSize(stats.size), // Use fileUtils helper
uploadDate: stats.mtime // Use modification time as upload date
});
} catch (statErr) {
// Handle case where file might be deleted between readdir and stat
if (statErr.code !== 'ENOENT') {
logger.error(`[Local Adapter] Failed to get stats for file ${entry.name}: ${statErr.message}`);
}
// Skip this file if stat fails
}
}
// Sort by date, newest first
fileDetails.sort((a, b) => b.uploadDate.getTime() - a.uploadDate.getTime());
return fileDetails;
}
/**
* Gets information needed to download a file.
* For local storage, this is the file path.
* @param {string} filename - The name of the file to download.
* @returns {Promise<{type: string, value: string}>} Object indicating type ('path') and value (the full file path).
*/
async function getDownloadUrlOrStream(filename) {
// IMPORTANT: Sanitize filename input to prevent directory traversal
const safeBaseName = path.basename(filename);
if (safeBaseName !== filename || filename.includes('..')) {
logger.error(`[Local Adapter] Invalid filename detected for download: ${filename}`);
throw new Error('Invalid filename');
}
const filePath = path.resolve(config.uploadDir, safeBaseName); // Use resolve for security
try {
await fs.access(filePath, fsSync.constants.R_OK); // Check existence and readability
return { type: 'path', value: filePath };
} catch (err) {
if (err.code === 'ENOENT') {
logger.warn(`[Local Adapter] Download request for non-existent file: ${filePath}`);
throw new Error('File not found'); // Specific error for 404 handling
} else if (err.code === 'EACCES') {
logger.error(`[Local Adapter] Permission denied trying to access file for download: ${filePath}`);
throw new Error('Permission denied');
} else {
logger.error(`[Local Adapter] Error accessing file for download ${filePath}: ${err.message}`);
throw err; // Re-throw other errors
}
}
}
/**
* Deletes a file from the local storage.
* @param {string} filename - The name of the file to delete.
* @returns {Promise<void>}
*/
async function deleteFile(filename) {
// IMPORTANT: Sanitize filename input
const safeBaseName = path.basename(filename);
if (safeBaseName !== filename || filename.includes('..')) {
logger.error(`[Local Adapter] Invalid filename detected for delete: ${filename}`);
throw new Error('Invalid filename');
}
const filePath = path.resolve(config.uploadDir, safeBaseName);
try {
await fs.unlink(filePath);
logger.info(`[Local Adapter] Deleted file: ${filePath}`);
} catch (err) {
if (err.code === 'ENOENT') {
logger.warn(`[Local Adapter] Delete request for non-existent file: ${filePath}`);
throw new Error('File not found'); // Specific error for 404
} else {
logger.error(`[Local Adapter] Failed to delete file ${filePath}: ${err.message}`);
throw err; // Re-throw other errors
}
}
}
/**
* Cleans up stale resources (incomplete uploads based on metadata).
* @returns {Promise<void>}
*/
async function cleanupStale() {
logger.info('[Local Adapter] Running cleanup for stale metadata/partial uploads...');
let cleanedCount = 0;
let checkedCount = 0;
try {
// Ensure metadata directory exists before trying to read it
await ensureMetadataDirExists(); // Re-check just in case
const files = await fs.readdir(METADATA_DIR);
const now = Date.now();
for (const file of files) {
if (file.endsWith('.meta')) {
checkedCount++;
const uploadId = file.replace('.meta', '');
const metaFilePath = path.join(METADATA_DIR, file);
let metadata;
try {
const data = await fs.readFile(metaFilePath, 'utf8');
metadata = JSON.parse(data);
// Check inactivity
if (now - (metadata.lastActivity || metadata.createdAt || 0) > UPLOAD_TIMEOUT) {
logger.warn(`[Local Adapter] Found stale metadata: ${file}. Last activity: ${new Date(metadata.lastActivity || metadata.createdAt)}`);
// Attempt to delete partial file
if (metadata.partialFilePath) {
try {
await fs.unlink(metadata.partialFilePath);
logger.info(`[Local Adapter] Deleted stale partial file: ${metadata.partialFilePath}`);
} catch (unlinkPartialErr) {
if (unlinkPartialErr.code !== 'ENOENT') {
logger.error(`[Local Adapter] Failed to delete stale partial ${metadata.partialFilePath}: ${unlinkPartialErr.message}`);
}
}
}
// Attempt to delete metadata file
await deleteUploadMetadata(uploadId); // Use helper
cleanedCount++;
}
} catch (readErr) {
logger.error(`[Local Adapter] Error reading/parsing ${metaFilePath} during cleanup: ${readErr.message}. Skipping.`);
// Optionally attempt to delete the corrupt meta file?
await fs.unlink(metaFilePath).catch(()=>{ logger.warn(`[Local Adapter] Failed to delete potentially corrupt metadata file: ${metaFilePath}`) });
}
} else if (file.endsWith('.tmp')) {
// Clean up potential leftover temp metadata files
const tempMetaPath = path.join(METADATA_DIR, file);
try {
const stats = await fs.stat(tempMetaPath);
// Use a shorter timeout for temp files? e.g., UPLOAD_TIMEOUT / 2
if (now - stats.mtime.getTime() > UPLOAD_TIMEOUT) {
logger.warn(`[Local Adapter] Deleting stale temporary metadata file: ${file}`);
await fs.unlink(tempMetaPath);
}
} catch (statErr) {
if (statErr.code !== 'ENOENT') {
logger.error(`[Local Adapter] Error checking temp metadata file ${tempMetaPath}: ${statErr.message}`);
}
}
}
}
if (checkedCount > 0 || cleanedCount > 0) {
logger.info(`[Local Adapter] Metadata cleanup finished. Checked: ${checkedCount}, Cleaned stale: ${cleanedCount}.`);
}
} catch (err) {
if (err.code === 'ENOENT' && err.path === METADATA_DIR) {
// This case should be handled by ensureMetadataDirExists, but log just in case
logger.warn('[Local Adapter] Metadata directory not found during cleanup scan.');
} else {
logger.error(`[Local Adapter] Error during metadata cleanup scan: ${err.message}`);
}
}
// Note: Empty folder cleanup is handled by the main cleanup utility for now.
// If needed, the logic from utils/cleanup.js -> cleanupEmptyFolders could be moved here.
}
module.exports = {
initUpload,
storeChunk,
completeUpload,
abortUpload,
listFiles,
getDownloadUrlOrStream,
deleteFile,
cleanupStale
};

View File

@@ -1,439 +0,0 @@
/**
* S3 Storage Adapter
* Handles file operations for storing files on AWS S3 or S3-compatible services.
* Implements the storage interface expected by the application routes.
* Uses local files in '.metadata' directory to track multipart upload progress.
* Attempts to make top-level folder prefixes unique per batch if collisions occur.
*/
const {
S3Client,
CreateMultipartUploadCommand,
UploadPartCommand,
CompleteMultipartUploadCommand,
AbortMultipartUploadCommand,
ListObjectsV2Command,
GetObjectCommand,
DeleteObjectCommand,
PutObjectCommand,
HeadObjectCommand
} = require('@aws-sdk/client-s3');
const { getSignedUrl } = require("@aws-sdk/s3-request-presigner");
const fs = require('fs').promises;
const fsSync = require('fs');
const path = require('path');
const crypto = require('crypto');
const util = require('util'); // For detailed error logging
const { config } = require('../config');
const logger = require('../utils/logger');
const {
sanitizePathPreserveDirs,
formatFileSize
} = require('../utils/fileUtils');
const { sendNotification } = require('../services/notifications');
const METADATA_DIR = path.join(config.uploadDir, '.metadata');
const UPLOAD_TIMEOUT = 30 * 60 * 1000; // For local metadata cleanup
// --- S3 Client Initialization ---
let s3Client;
try {
const s3ClientConfig = {
region: config.s3Region,
credentials: {
accessKeyId: config.s3AccessKeyId,
secretAccessKey: config.s3SecretAccessKey,
},
...(config.s3EndpointUrl && { endpoint: config.s3EndpointUrl }),
...(config.s3ForcePathStyle && { forcePathStyle: true }),
};
if (s3ClientConfig.endpoint) logger.info(`[S3 Adapter] Configuring S3 client for endpoint: ${s3ClientConfig.endpoint}`);
if (s3ClientConfig.forcePathStyle) logger.info(`[S3 Adapter] Configuring S3 client with forcePathStyle: true`);
s3Client = new S3Client(s3ClientConfig);
logger.success('[S3 Adapter] S3 Client configured successfully.');
} catch (error) {
logger.error(`[S3 Adapter] Failed to configure S3 client: ${error.message}`);
throw new Error('S3 Client configuration failed. Check S3 environment variables.');
}
// --- Metadata Helper Functions ---
async function ensureMetadataDirExists() {
try {
if (!fsSync.existsSync(METADATA_DIR)) {
await fs.mkdir(METADATA_DIR, { recursive: true });
logger.info(`[S3 Adapter] Created local metadata directory: ${METADATA_DIR}`);
}
await fs.access(METADATA_DIR, fsSync.constants.W_OK);
} catch (err) {
logger.error(`[S3 Adapter] Local metadata directory error (${METADATA_DIR}): ${err.message}`);
throw new Error(`Failed to access or create local metadata directory for S3 adapter state: ${METADATA_DIR}`);
}
}
async function readUploadMetadata(uploadId) {
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
logger.warn(`[S3 Adapter] Attempted to read metadata with invalid uploadId: ${uploadId}`);
return null;
}
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
try {
const data = await fs.readFile(metaFilePath, 'utf8');
const metadata = JSON.parse(data);
metadata.parts = metadata.parts || [];
return metadata;
} catch (err) {
if (err.code === 'ENOENT') return null;
logger.error(`[S3 Adapter] Error reading metadata for ${uploadId}: ${err.message}`);
throw err;
}
}
async function writeUploadMetadata(uploadId, metadata) {
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
logger.error(`[S3 Adapter] Attempted to write metadata with invalid uploadId: ${uploadId}`);
return;
}
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
metadata.lastActivity = Date.now();
metadata.parts = metadata.parts || [];
try {
const tempMetaPath = `${metaFilePath}.${crypto.randomBytes(4).toString('hex')}.tmp`;
await fs.writeFile(tempMetaPath, JSON.stringify(metadata, null, 2));
await fs.rename(tempMetaPath, metaFilePath);
} catch (err) {
logger.error(`[S3 Adapter] Error writing metadata for ${uploadId}: ${err.message}`);
try { await fs.unlink(tempMetaPath); } catch (unlinkErr) {/* ignore */}
throw err;
}
}
async function deleteUploadMetadata(uploadId) {
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
logger.warn(`[S3 Adapter] Attempted to delete metadata with invalid uploadId: ${uploadId}`);
return;
}
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
try {
await fs.unlink(metaFilePath);
logger.debug(`[S3 Adapter] Deleted metadata file: ${uploadId}.meta`);
} catch (err) {
if (err.code !== 'ENOENT') logger.error(`[S3 Adapter] Error deleting metadata file ${uploadId}.meta: ${err.message}`);
}
}
ensureMetadataDirExists().catch(err => {
logger.error(`[S3 Adapter] Initialization failed (metadata dir): ${err.message}`);
process.exit(1);
});
// --- S3 Object/Prefix Utilities ---
const batchS3PrefixMappings = new Map(); // In-memory: originalTopLevelFolder-batchId -> actualS3Prefix
async function s3ObjectExists(key) {
logger.info(`[S3 Adapter] s3ObjectExists: Checking key "${key}"`);
try {
await s3Client.send(new HeadObjectCommand({ Bucket: config.s3BucketName, Key: key }));
logger.info(`[S3 Adapter] s3ObjectExists: HeadObject success for key "${key}". Key EXISTS.`);
return true;
} catch (error) {
// logger.error(`[S3 Adapter DEBUG] Full error object for HeadObject on key "${key}":\n`, util.inspect(error, { showHidden: false, depth: null, colors: false }));
if (error.name === 'NotFound' || error.name === 'NoSuchKey' || (error.$metadata && error.$metadata.httpStatusCode === 404)) {
logger.info(`[S3 Adapter] s3ObjectExists: Key "${key}" NOT found (404-like error).`);
return false;
}
if (error.name === '403' || (error.$metadata && error.$metadata.httpStatusCode === 403)) {
logger.warn(`[S3 Adapter] s3ObjectExists: Received 403 Forbidden for key "${key}". For unique key generation, treating this as 'likely does not exist'.`);
return false;
}
logger.error(`[S3 Adapter] s3ObjectExists: Unhandled error type "${error.name}" for key "${key}": ${error.message}`);
throw error;
}
}
async function getUniqueS3FolderPrefix(originalPrefix, batchId) {
if (!originalPrefix || !originalPrefix.endsWith('/')) {
logger.error("[S3 Adapter] getUniqueS3FolderPrefix: originalPrefix must be a non-empty string ending with '/'");
return originalPrefix; // Or throw error
}
const prefixMapKey = `${originalPrefix}-${batchId}`;
if (batchS3PrefixMappings.has(prefixMapKey)) {
return batchS3PrefixMappings.get(prefixMapKey);
}
let currentPrefixToCheck = originalPrefix;
let counter = 1;
const baseName = originalPrefix.slice(0, -1); // "MyFolder" from "MyFolder/"
async function prefixHasObjects(prefix) {
try {
const listResponse = await s3Client.send(new ListObjectsV2Command({
Bucket: config.s3BucketName, Prefix: prefix, MaxKeys: 1
}));
return listResponse.KeyCount > 0;
} catch (error) {
logger.error(`[S3 Adapter] Error listing objects for prefix check "${prefix}": ${error.message}`);
throw error; // Propagate error if listing fails for permission reasons etc.
}
}
while (await prefixHasObjects(currentPrefixToCheck)) {
logger.warn(`[S3 Adapter] S3 prefix "${currentPrefixToCheck}" is not empty. Generating unique prefix for base "${baseName}/".`);
currentPrefixToCheck = `${baseName}-${counter}/`; // Use hyphen for suffix
counter++;
}
if (currentPrefixToCheck !== originalPrefix) {
logger.info(`[S3 Adapter] Using unique S3 folder prefix: "${currentPrefixToCheck}" for original "${originalPrefix}" in batch "${batchId}"`);
}
batchS3PrefixMappings.set(prefixMapKey, currentPrefixToCheck);
return currentPrefixToCheck;
}
// --- Interface Implementation ---
async function initUpload(filename, fileSize, clientBatchId) {
await ensureMetadataDirExists();
const size = Number(fileSize);
const appUploadId = crypto.randomBytes(16).toString('hex');
const batchId = clientBatchId || `${Date.now()}-${crypto.randomBytes(4).toString('hex').substring(0, 9)}`;
const originalSanitizedFullpath = sanitizePathPreserveDirs(filename); // e.g., "MyFolder/image.jpg" or "image.jpg"
let s3KeyStructure = path.normalize(originalSanitizedFullpath)
.replace(/^(\.\.(\/|\\|$))+/, '').replace(/\\/g, '/').replace(/^\/+/, '');
let effectiveBasePrefix = ""; // e.g., "MyFolder-1/" or ""
const pathParts = s3KeyStructure.split('/');
const isNestedPath = pathParts.length > 1;
let relativePathInFolder = s3KeyStructure;
if (isNestedPath) {
const originalTopLevelFolder = pathParts[0] + '/'; // "MyFolder/"
effectiveBasePrefix = await getUniqueS3FolderPrefix(originalTopLevelFolder, batchId);
relativePathInFolder = pathParts.slice(1).join('/'); // "SubFolder/image.jpg" or "image.jpg"
s3KeyStructure = effectiveBasePrefix + relativePathInFolder;
}
logger.info(`[S3 Adapter] Init: Original Full Path: "${originalSanitizedFullpath}", Effective Base Prefix: "${effectiveBasePrefix}", Relative Path In Folder: "${relativePathInFolder}"`);
let finalS3Key = s3KeyStructure;
let fileCounter = 1;
const fileDir = path.dirname(s3KeyStructure);
const fileExt = path.extname(s3KeyStructure);
const fileBaseName = path.basename(s3KeyStructure, fileExt);
while (await s3ObjectExists(finalS3Key)) {
logger.warn(`[S3 Adapter] S3 file key already exists: "${finalS3Key}". Generating unique file key.`);
finalS3Key = (fileDir === "." ? "" : fileDir + "/") + `${fileBaseName}-${fileCounter}${fileExt}`; // Use hyphen
fileCounter++;
}
if (finalS3Key !== s3KeyStructure) {
logger.info(`[S3 Adapter] Using unique S3 file key: "${finalS3Key}"`);
}
if (size === 0) {
try {
await s3Client.send(new PutObjectCommand({
Bucket: config.s3BucketName, Key: finalS3Key, Body: '', ContentLength: 0
}));
logger.success(`[S3 Adapter] Completed zero-byte file: ${finalS3Key}`);
sendNotification(originalSanitizedFullpath, 0, config);
return { uploadId: `zero-byte-${appUploadId}` };
} catch (putErr) {
logger.error(`[S3 Adapter] Failed zero-byte PUT for ${finalS3Key}: ${putErr.message}`);
throw putErr;
}
}
try {
const createCommand = new CreateMultipartUploadCommand({ Bucket: config.s3BucketName, Key: finalS3Key });
const response = await s3Client.send(createCommand);
const s3UploadId = response.UploadId;
if (!s3UploadId) throw new Error('S3 did not return UploadId');
logger.info(`[S3 Adapter] Multipart initiated for ${finalS3Key} (S3 UploadId: ${s3UploadId})`);
const metadata = {
appUploadId, s3UploadId, s3Key: finalS3Key,
originalFilename: originalSanitizedFullpath, // Use the full original path for notification
fileSize: size, bytesReceived: 0, parts: [], batchId,
createdAt: Date.now(), lastActivity: Date.now()
};
await writeUploadMetadata(appUploadId, metadata);
return { uploadId: appUploadId };
} catch (err) {
logger.error(`[S3 Adapter] Failed multipart init for ${finalS3Key}: ${err.message}`);
throw err;
}
}
async function storeChunk(appUploadId, chunk, partNumber) {
const chunkSize = chunk.length;
if (!chunkSize) throw new Error('Empty chunk received');
if (partNumber < 1) throw new Error('PartNumber must be 1 or greater');
const metadata = await readUploadMetadata(appUploadId);
if (!metadata || !metadata.s3UploadId) {
logger.warn(`[S3 Adapter] Metadata or S3 UploadId not found for chunk: ${appUploadId}`);
throw new Error('Upload session not found or already completed');
}
if (metadata.bytesReceived >= metadata.fileSize && metadata.fileSize > 0) {
logger.warn(`[S3 Adapter] Chunk for already completed upload ${appUploadId}. Ignoring.`);
return { bytesReceived: metadata.bytesReceived, progress: 100, completed: true };
}
try {
const cmd = new UploadPartCommand({
Bucket: config.s3BucketName, Key: metadata.s3Key, UploadId: metadata.s3UploadId,
Body: chunk, PartNumber: partNumber, ContentLength: chunkSize
});
const response = await s3Client.send(cmd);
const etag = response.ETag;
if (!etag) throw new Error(`S3 ETag missing for Part ${partNumber}`);
metadata.parts.push({ PartNumber: partNumber, ETag: etag });
metadata.parts.sort((a, b) => a.PartNumber - b.PartNumber);
metadata.bytesReceived = Math.min((metadata.bytesReceived || 0) + chunkSize, metadata.fileSize);
await writeUploadMetadata(appUploadId, metadata);
const progress = metadata.fileSize === 0 ? 100 : Math.min(Math.round((metadata.bytesReceived / metadata.fileSize) * 100), 100);
const completed = metadata.bytesReceived >= metadata.fileSize;
logger.debug(`[S3 Adapter] Part ${partNumber} for ${appUploadId} (Key: ${metadata.s3Key}). ETag: ${etag}. Progress: ~${progress}%. Completed: ${completed}`);
return { bytesReceived: metadata.bytesReceived, progress, completed };
} catch (err) {
logger.error(`[S3 Adapter] Failed Part ${partNumber} for ${appUploadId} (Key: ${metadata.s3Key}): ${err.message}`);
throw err;
}
}
async function completeUpload(appUploadId) {
const metadata = await readUploadMetadata(appUploadId);
if (!metadata || !metadata.s3UploadId || !metadata.parts || metadata.parts.length === 0) {
throw new Error('Upload completion failed: Missing metadata/parts');
}
if (metadata.bytesReceived < metadata.fileSize) {
logger.warn(`[S3 Adapter] Completing ${appUploadId} with ${metadata.bytesReceived}/${metadata.fileSize} bytes tracked.`);
}
try {
const cmd = new CompleteMultipartUploadCommand({
Bucket: config.s3BucketName, Key: metadata.s3Key, UploadId: metadata.s3UploadId,
MultipartUpload: { Parts: metadata.parts },
});
const response = await s3Client.send(cmd);
logger.success(`[S3 Adapter] Finalized: ${metadata.s3Key} (ETag: ${response.ETag})`);
await deleteUploadMetadata(appUploadId);
sendNotification(metadata.originalFilename, metadata.fileSize, config);
return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.s3Key };
} catch (err) {
logger.error(`[S3 Adapter] Failed CompleteMultipartUpload for ${metadata.s3Key}: ${err.message}`);
if (err.Code === 'NoSuchUpload' || err.name === 'NoSuchUpload') {
logger.warn(`[S3 Adapter] NoSuchUpload on complete for ${appUploadId}. Assuming completed/aborted.`);
await deleteUploadMetadata(appUploadId).catch(()=>{});
try {
await s3Client.send(new HeadObjectCommand({ Bucket: config.s3BucketName, Key: metadata.s3Key }));
logger.info(`[S3 Adapter] Final object ${metadata.s3Key} exists after NoSuchUpload. Treating as completed.`);
return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.s3Key };
} catch (headErr) { throw new Error('Completion failed: Session & final object not found.'); }
}
throw err;
}
}
async function abortUpload(appUploadId) {
const metadata = await readUploadMetadata(appUploadId);
if (!metadata || !metadata.s3UploadId) {
logger.warn(`[S3 Adapter] Abort for non-existent/completed upload: ${appUploadId}`);
await deleteUploadMetadata(appUploadId); return;
}
try {
await s3Client.send(new AbortMultipartUploadCommand({
Bucket: config.s3BucketName, Key: metadata.s3Key, UploadId: metadata.s3UploadId,
}));
logger.info(`[S3 Adapter] Aborted: ${appUploadId} (Key: ${metadata.s3Key})`);
} catch (err) {
if (err.name !== 'NoSuchUpload') {
logger.error(`[S3 Adapter] Failed Abort for ${metadata.s3Key}: ${err.message}`); throw err;
}
logger.warn(`[S3 Adapter] NoSuchUpload on abort for ${metadata.s3Key}. Already aborted/completed.`);
}
await deleteUploadMetadata(appUploadId);
}
async function listFiles() {
try {
let isTruncated = true; let continuationToken; const allFiles = [];
while(isTruncated) {
const params = { Bucket: config.s3BucketName };
if (continuationToken) params.ContinuationToken = continuationToken;
const response = await s3Client.send(new ListObjectsV2Command(params));
(response.Contents || []).forEach(item => allFiles.push({
filename: item.Key, size: item.Size,
formattedSize: formatFileSize(item.Size), uploadDate: item.LastModified
}));
isTruncated = response.IsTruncated;
continuationToken = response.NextContinuationToken;
}
allFiles.sort((a, b) => b.uploadDate.getTime() - a.uploadDate.getTime());
return allFiles;
} catch (err) {
logger.error(`[S3 Adapter] Failed list objects in ${config.s3BucketName}: ${err.message}`); throw err;
}
}
async function getDownloadUrlOrStream(s3Key) {
if (!s3Key || s3Key.includes('..') || s3Key.startsWith('/')) throw new Error('Invalid filename for download');
try {
const cmd = new GetObjectCommand({ Bucket: config.s3BucketName, Key: s3Key });
const url = await getSignedUrl(s3Client, cmd, { expiresIn: 3600 });
logger.info(`[S3 Adapter] Presigned URL for ${s3Key}`);
return { type: 'url', value: url };
} catch (err) {
logger.error(`[S3 Adapter] Failed presigned URL for ${s3Key}: ${err.message}`);
if (err.name === 'NoSuchKey') throw new Error('File not found in S3'); throw err;
}
}
async function deleteFile(s3Key) {
if (!s3Key || s3Key.includes('..') || s3Key.startsWith('/')) throw new Error('Invalid filename for delete');
try {
await s3Client.send(new DeleteObjectCommand({ Bucket: config.s3BucketName, Key: s3Key }));
logger.info(`[S3 Adapter] Deleted: ${s3Key}`);
} catch (err) {
logger.error(`[S3 Adapter] Failed delete for ${s3Key}: ${err.message}`); throw err;
}
}
async function cleanupStale() {
logger.info('[S3 Adapter] Cleaning stale local metadata...');
let cleaned = 0, checked = 0;
try {
await ensureMetadataDirExists(); const files = await fs.readdir(METADATA_DIR); const now = Date.now();
for (const file of files) {
if (file.endsWith('.meta')) {
checked++; const id = file.replace('.meta',''); const fp = path.join(METADATA_DIR, file);
try {
const meta = JSON.parse(await fs.readFile(fp, 'utf8'));
if (now - (meta.lastActivity || meta.createdAt || 0) > UPLOAD_TIMEOUT) {
logger.warn(`[S3 Adapter] Stale local meta: ${file}, S3 ID: ${meta.s3UploadId||'N/A'}`);
await deleteUploadMetadata(id); cleaned++;
}
} catch (e) { logger.error(`[S3 Adapter] Error parsing meta ${fp}: ${e.message}`); await fs.unlink(fp).catch(()=>{}); }
} else if (file.endsWith('.tmp')) {
const tmpP = path.join(METADATA_DIR, file);
try { if (now - (await fs.stat(tmpP)).mtime.getTime() > UPLOAD_TIMEOUT) { logger.warn(`[S3 Adapter] Deleting stale tmp meta: ${file}`); await fs.unlink(tmpP); }}
catch (e) { if (e.code!=='ENOENT') logger.error(`[S3 Adapter] Error stat/unlink tmp meta ${tmpP}: ${e.message}`);}
}
}
if (checked > 0 || cleaned > 0) logger.info(`[S3 Adapter] Local meta cleanup: Checked ${checked}, Cleaned ${cleaned}.`);
logger.warn(`[S3 Adapter] IMPORTANT: Configure S3 Lifecycle Rules on bucket '${config.s3BucketName}' to clean incomplete multipart uploads.`);
} catch (err) {
if (err.code==='ENOENT'&&err.path===METADATA_DIR) logger.warn('[S3 Adapter] Local meta dir not found for cleanup.');
else logger.error(`[S3 Adapter] Error local meta cleanup: ${err.message}`);
}
// Basic batchS3PrefixMappings cleanup
if (batchS3PrefixMappings.size > 1000) {
logger.warn(`[S3 Adapter] Clearing batchS3PrefixMappings (size: ${batchS3PrefixMappings.size}).`);
batchS3PrefixMappings.clear();
}
}
module.exports = {
initUpload, storeChunk, completeUpload, abortUpload,
listFiles, getDownloadUrlOrStream, deleteFile, cleanupStale
};

View File

@@ -1,234 +1,175 @@
/**
* Cleanup utilities for managing application resources.
* Handles registration and execution of cleanup tasks, including delegation
* of storage-specific cleanup (like stale uploads) to the storage adapter.
* Also includes generic cleanup like removing empty folders (for local storage).
* Handles incomplete uploads, empty folders, and shutdown tasks.
* Provides cleanup task registration and execution system.
*/
const fs = require('fs').promises;
const fs = require('fs');
const path = require('path');
const logger = require('./logger');
const { config } = require('../config');
const { storageAdapter } = require('../storage'); // Import the selected adapter
// --- Generic Cleanup Task Management ---
let cleanupTasks = [];
/**
* Register a generic cleanup task to be executed during shutdown.
* @param {Function} task - Async function to be executed during cleanup.
* Stores cleanup tasks that need to be run during shutdown
* @type {Set<Function>}
*/
const cleanupTasks = new Set();
/**
* Register a cleanup task to be executed during shutdown
* @param {Function} task - Async function to be executed during cleanup
*/
function registerCleanupTask(task) {
cleanupTasks.push(task);
cleanupTasks.add(task);
}
/**
* Remove a generic cleanup task.
* @param {Function} task - Task to remove.
* Remove a cleanup task
* @param {Function} task - Task to remove
*/
function removeCleanupTask(task) {
cleanupTasks = cleanupTasks.filter((t) => t !== task);
cleanupTasks.delete(task);
}
/**
* Execute all registered generic cleanup tasks.
* @param {number} [timeout=1000] - Maximum time in ms to wait for cleanup.
* Execute all registered cleanup tasks
* @param {number} [timeout=1000] - Maximum time in ms to wait for cleanup
* @returns {Promise<void>}
*/
async function executeCleanup(timeout = 1000) {
const taskCount = cleanupTasks.length;
const taskCount = cleanupTasks.size;
if (taskCount === 0) {
logger.info('[Cleanup] No generic cleanup tasks to execute');
logger.info('No cleanup tasks to execute');
return;
}
logger.info(`[Cleanup] Executing ${taskCount} generic cleanup tasks...`);
logger.info(`Executing ${taskCount} cleanup tasks...`);
try {
// Run all tasks concurrently with individual and global timeouts
// Run all cleanup tasks in parallel with timeout
await Promise.race([
Promise.all(
cleanupTasks.map(async (task, index) => {
Array.from(cleanupTasks).map(async (task) => {
try {
await Promise.race([
task(),
new Promise((_, reject) =>
setTimeout(() => reject(new Error(`Task ${index + 1} timeout`)), timeout / 2) // Individual timeout
setTimeout(() => reject(new Error('Task timeout')), timeout / 2)
)
]);
logger.debug(`[Cleanup] Task ${index + 1} completed.`);
} catch (error) {
logger.warn(`[Cleanup] Task ${index + 1} failed or timed out: ${error.message}`);
if (error.message === 'Task timeout') {
logger.warn('Cleanup task timed out');
} else {
logger.error(`Cleanup task failed: ${error.message}`);
}
}
})
),
new Promise((_, reject) =>
setTimeout(() => reject(new Error('Global cleanup timeout')), timeout) // Global timeout
setTimeout(() => reject(new Error('Global timeout')), timeout)
)
]);
logger.info('[Cleanup] Generic cleanup tasks completed successfully');
logger.info('Cleanup completed successfully');
} catch (error) {
logger.warn(`[Cleanup] Generic cleanup process ended with error or timeout: ${error.message}`);
} finally {
cleanupTasks = []; // Clear tasks regardless of outcome
}
}
// --- Storage-Specific Cleanup ---
// How often to run the storage cleanup check (e.g., every 15 minutes)
const STORAGE_CLEANUP_INTERVAL = 15 * 60 * 1000;
let storageCleanupTimer = null;
/**
* Performs cleanup of stale storage resources by calling the adapter's method.
* This is typically run periodically.
*/
async function runStorageCleanup() {
logger.info('[Cleanup] Running periodic storage cleanup...');
try {
if (storageAdapter && typeof storageAdapter.cleanupStale === 'function') {
await storageAdapter.cleanupStale();
logger.info('[Cleanup] Storage adapter cleanup task finished.');
// Additionally, run empty folder cleanup if using local storage
if (config.storageType === 'local') {
await cleanupEmptyFolders(config.uploadDir);
}
} else {
logger.warn('[Cleanup] Storage adapter or cleanupStale method not available.');
}
} catch (error) {
logger.error(`[Cleanup] Error during periodic storage cleanup: ${error.message}`, error.stack);
if (error.message === 'Global timeout') {
logger.warn(`Cleanup timed out after ${timeout}ms`);
} else {
logger.error(`Cleanup failed: ${error.message}`);
}
}
/**
* Starts the periodic storage cleanup task.
*/
function startStorageCleanupInterval() {
if (storageCleanupTimer) {
clearInterval(storageCleanupTimer);
} finally {
// Clear all tasks regardless of success/failure
cleanupTasks.clear();
}
logger.info(`[Cleanup] Starting periodic storage cleanup interval (${STORAGE_CLEANUP_INTERVAL / 60000} minutes).`);
// Run once immediately on start? Optional.
// runStorageCleanup();
storageCleanupTimer = setInterval(runStorageCleanup, STORAGE_CLEANUP_INTERVAL);
storageCleanupTimer.unref(); // Allow process to exit if this is the only timer
}
/**
* Stops the periodic storage cleanup task.
* Clean up incomplete uploads and temporary files
* @param {Map} uploads - Map of active uploads
* @param {Map} uploadToBatch - Map of upload IDs to batch IDs
* @param {Map} batchActivity - Map of batch IDs to last activity timestamp
*/
function stopStorageCleanupInterval() {
if (storageCleanupTimer) {
clearInterval(storageCleanupTimer);
storageCleanupTimer = null;
logger.info('[Cleanup] Stopped periodic storage cleanup interval.');
}
async function cleanupIncompleteUploads(uploads, uploadToBatch, batchActivity) {
try {
// Get current time
const now = Date.now();
const inactivityThreshold = config.uploadTimeout || 30 * 60 * 1000; // 30 minutes default
// Check each upload
for (const [uploadId, upload] of uploads.entries()) {
try {
const batchId = uploadToBatch.get(uploadId);
const lastActivity = batchActivity.get(batchId);
// If upload is inactive for too long
if (now - lastActivity > inactivityThreshold) {
// Close write stream
if (upload.writeStream) {
await new Promise((resolve) => {
upload.writeStream.end(() => resolve());
});
}
// Delete incomplete file
try {
await fs.promises.unlink(upload.filePath);
logger.info(`Cleaned up incomplete upload: ${upload.safeFilename}`);
} catch (err) {
if (err.code !== 'ENOENT') {
logger.error(`Failed to delete incomplete upload ${upload.safeFilename}: ${err.message}`);
}
}
// Remove from maps
uploads.delete(uploadId);
uploadToBatch.delete(uploadId);
}
} catch (err) {
logger.error(`Error cleaning up upload ${uploadId}: ${err.message}`);
}
}
// Clean up empty folders
await cleanupEmptyFolders(config.uploadDir);
} catch (err) {
logger.error(`Cleanup error: ${err.message}`);
}
}
// Start interval automatically
// Note: Ensure storageAdapter is initialized before this might run effectively.
// Consider starting this interval after server initialization in server.js if needed.
if (!config.isDemoMode) { // Don't run cleanup in demo mode
startStorageCleanupInterval();
} else {
logger.info('[Cleanup] Periodic storage cleanup disabled in Demo Mode.');
}
// Stop interval on shutdown
process.on('SIGTERM', stopStorageCleanupInterval);
process.on('SIGINT', stopStorageCleanupInterval);
// --- Empty Folder Cleanup (Primarily for Local Storage) ---
/**
* Recursively remove empty folders within a given directory.
* Skips the special '.metadata' directory.
* @param {string} dir - Directory path to clean.
* Recursively remove empty folders
* @param {string} dir - Directory to clean
*/
async function cleanupEmptyFolders(dir) {
// Check if the path exists and is a directory first
try {
const stats = await fs.stat(dir);
if (!stats.isDirectory()) {
logger.debug(`[Cleanup] Skipping non-directory path for empty folder cleanup: ${dir}`);
return;
}
} catch (err) {
if (err.code === 'ENOENT') {
logger.debug(`[Cleanup] Directory not found for empty folder cleanup: ${dir}`);
return; // Directory doesn't exist, nothing to clean
}
logger.error(`[Cleanup] Error stating directory ${dir} for cleanup: ${err.message}`);
return; // Don't proceed if we can't stat
}
const files = await fs.promises.readdir(dir);
for (const file of files) {
const fullPath = path.join(dir, file);
const stats = await fs.promises.stat(fullPath);
logger.debug(`[Cleanup] Checking for empty folders within: ${dir}`);
const isMetadataDir = path.basename(dir) === '.metadata';
if (isMetadataDir) {
logger.debug(`[Cleanup] Skipping cleanup of metadata directory itself: ${dir}`);
return;
}
if (stats.isDirectory()) {
await cleanupEmptyFolders(fullPath);
let entries;
try {
entries = await fs.readdir(dir, { withFileTypes: true });
} catch (err) {
logger.error(`[Cleanup] Failed to read directory ${dir} for empty folder cleanup: ${err.message}`);
return; // Cannot proceed
}
// Recursively clean subdirectories first
const subDirPromises = entries
.filter(entry => entry.isDirectory() && entry.name !== '.metadata')
.map(entry => cleanupEmptyFolders(path.join(dir, entry.name)));
await Promise.all(subDirPromises);
// Re-read directory contents after cleaning subdirectories
try {
entries = await fs.readdir(dir); // Just need names now
} catch (err) {
logger.error(`[Cleanup] Failed to re-read directory ${dir} after sub-cleanup: ${err.message}`);
return;
}
// Check if directory is now empty (or only contains .metadata)
const isEmpty = entries.length === 0 || (entries.length === 1 && entries[0] === '.metadata');
if (isEmpty) {
// Make sure we don't delete the main configured upload dir or the metadata dir
const resolvedUploadDir = path.resolve(config.uploadDir);
const resolvedCurrentDir = path.resolve(dir);
if (resolvedCurrentDir !== resolvedUploadDir && path.basename(resolvedCurrentDir) !== '.metadata') {
try {
await fs.rmdir(resolvedCurrentDir);
logger.info(`[Cleanup] Removed empty directory: ${resolvedCurrentDir}`);
} catch (rmErr) {
if (rmErr.code !== 'ENOENT') { // Ignore if already deleted
logger.error(`[Cleanup] Failed to remove supposedly empty directory ${resolvedCurrentDir}: ${rmErr.message}`);
// Check if directory is empty after cleaning subdirectories
const remaining = await fs.promises.readdir(fullPath);
if (remaining.length === 0) {
await fs.promises.rmdir(fullPath);
logger.info(`Removed empty directory: ${fullPath}`);
}
}
} else {
logger.debug(`[Cleanup] Skipping removal of root upload directory or metadata directory: ${resolvedCurrentDir}`);
}
} catch (err) {
logger.error(`Failed to clean empty folders: ${err.message}`);
}
}
// --- Export ---
module.exports = {
registerCleanupTask,
removeCleanupTask,
executeCleanup,
// Exporting runStorageCleanup might be useful for triggering manually if needed
runStorageCleanup,
startStorageCleanupInterval,
stopStorageCleanupInterval,
cleanupEmptyFolders // Export if needed elsewhere, though mainly used internally now
cleanupIncompleteUploads,
cleanupEmptyFolders
};

View File

@@ -9,6 +9,19 @@ const path = require('path');
const logger = require('./logger');
const { config } = require('../config');
/**
* Get display path for logs
* @param {string} internalPath - Internal Docker path
* @returns {string} Display path for host machine
*/
function getDisplayPath(internalPath) {
if (!internalPath.startsWith(config.uploadDir)) return internalPath;
// Replace the container path with the host path
const relativePath = path.relative(config.uploadDir, internalPath);
return path.join(config.uploadDisplayPath, relativePath);
}
/**
* Format file size to human readable format
* @param {number} bytes - Size in bytes
@@ -77,13 +90,13 @@ async function ensureDirectoryExists(directoryPath) {
try {
if (!fs.existsSync(directoryPath)) {
await fs.promises.mkdir(directoryPath, { recursive: true });
logger.info(`Created directory: ${directoryPath}`);
logger.info(`Created directory: ${getDisplayPath(directoryPath)}`);
}
await fs.promises.access(directoryPath, fs.constants.W_OK);
logger.success(`Directory is writable: ${directoryPath}`);
logger.success(`Directory is writable: ${getDisplayPath(directoryPath)}`);
} catch (err) {
logger.error(`Directory error: ${err.message}`);
throw new Error(`Failed to access or create directory: ${directoryPath}`);
throw new Error(`Failed to access or create directory: ${getDisplayPath(directoryPath)}`);
}
}
@@ -116,8 +129,8 @@ async function getUniqueFilePath(filePath) {
}
}
// Log using actual path
logger.info(`Using unique path: ${finalPath}`);
// Log using display path
logger.info(`Using unique path: ${getDisplayPath(finalPath)}`);
return { path: finalPath, handle: fileHandle };
}
@@ -147,36 +160,10 @@ async function getUniqueFolderPath(folderPath) {
return finalPath;
}
function sanitizeFilename(fileName) {
const sanitized = fileName.replace(/[<>:"/\\|?*]+/g, '').replace(/["`$|;&<>]/g, '');
return sanitized;
}
function sanitizePathPreserveDirs(filePath) {
// Split on forward slashes, sanitize each part, and rejoin
return filePath
.split('/')
.map(part => sanitizeFilename(part))
.join('/');
}
/**
* Validate batch ID format
* @param {string} batchId - Batch ID to validate
* @returns {boolean} True if valid (matches timestamp-9_alphanumeric format)
*/
function isValidBatchId(batchId) {
if (!batchId) return false;
return /^\d+-[a-z0-9]{9}$/.test(batchId);
}
module.exports = {
formatFileSize,
calculateDirectorySize,
ensureDirectoryExists,
getUniqueFilePath,
getUniqueFolderPath,
sanitizeFilename,
sanitizePathPreserveDirs,
isValidBatchId
getUniqueFolderPath
};

View File

@@ -5,7 +5,7 @@
*/
const crypto = require('crypto');
const logger = require('./logger'); // Corrected path
const logger = require('./logger');
/**
* Store for login attempts with rate limiting
@@ -42,9 +42,6 @@ function startCleanupInterval() {
}
}, 60000); // Check every minute
// Allow node to exit even if this interval is running
cleanupInterval.unref();
return cleanupInterval;
}
@@ -63,11 +60,6 @@ if (!process.env.DISABLE_SECURITY_CLEANUP) {
startCleanupInterval();
}
// Stop interval on shutdown signals
process.on('SIGTERM', stopCleanupInterval);
process.on('SIGINT', stopCleanupInterval);
/**
* Reset login attempts for an IP
* @param {string} ip - IP address
@@ -91,7 +83,6 @@ function isLockedOut(ip) {
if (timeElapsed < LOCKOUT_DURATION) {
return true;
}
// Lockout expired, reset attempts before proceeding
resetAttempts(ip);
}
return false;
@@ -118,41 +109,28 @@ function recordAttempt(ip) {
*/
function validatePin(pin) {
if (!pin || typeof pin !== 'string') return null;
// Remove non-digit characters
const cleanPin = pin.replace(/\D/g, '');
// Check length constraints (e.g., 4-10 digits)
return cleanPin.length >= 4 && cleanPin.length <= 10 ? cleanPin : null;
}
/**
* Compare two strings in constant time using crypto.timingSafeEqual
* Pads strings to a fixed length to prevent timing attacks based on length.
* Compare two strings in constant time
* @param {string} a - First string
* @param {string} b - Second string
* @returns {boolean} True if strings match
*/
function safeCompare(a, b) {
// Ensure inputs are strings
if (typeof a !== 'string' || typeof b !== 'string') {
logger.warn('safeCompare received non-string input.');
return false;
}
try {
// Choose a fixed length significantly longer than expected max input length
const fixedLength = 64;
const bufferA = Buffer.alloc(fixedLength, 0); // Allocate buffer filled with zeros
const bufferB = Buffer.alloc(fixedLength, 0);
// Copy input strings into buffers, truncated if necessary
bufferA.write(a.slice(0, fixedLength));
bufferB.write(b.slice(0, fixedLength));
// Perform timing-safe comparison
return crypto.timingSafeEqual(bufferA, bufferB);
return crypto.timingSafeEqual(
Buffer.from(a.padEnd(32)),
Buffer.from(b.padEnd(32))
);
} catch (err) {
// Handle potential errors like if inputs are unexpectedly huge (though sliced above)
logger.error(`Error during safeCompare: ${err.message}`);
logger.error(`Safe compare error: ${err.message}`);
return false;
}
}