mirror of
https://github.com/DumbWareio/DumbDrop.git
synced 2025-11-15 11:21:26 +00:00
Compare commits
15 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f59771420 | ||
|
|
520c73b726 | ||
|
|
543ebffb8d | ||
|
|
8270c7ccd0 | ||
|
|
e4143c38db | ||
|
|
1f236ce086 | ||
|
|
982b7b49e3 | ||
|
|
369077676d | ||
|
|
165223f8ed | ||
|
|
1273fe92b1 | ||
|
|
c24e866074 | ||
|
|
cb7e49b0e1 | ||
|
|
5666569580 | ||
|
|
6f1b93ed39 | ||
|
|
105d2a7412 |
@@ -45,14 +45,12 @@ README.md
|
||||
CHANGELOG.md
|
||||
docs
|
||||
|
||||
# Testing
|
||||
test/
|
||||
*.test.js
|
||||
*.spec.js
|
||||
coverage/
|
||||
|
||||
# Development configs
|
||||
# Keep test files and configs for development builds
|
||||
# __tests__
|
||||
# jest.config.js
|
||||
# *.test.js
|
||||
# *.spec.js
|
||||
# .eslintrc*
|
||||
# .prettierrc*
|
||||
.editorconfig
|
||||
nodemon.json
|
||||
eslint.config.js
|
||||
.prettierrc
|
||||
|
||||
98
.env.example
98
.env.example
@@ -5,35 +5,60 @@
|
||||
# Port for the server (default: 3000)
|
||||
PORT=3000
|
||||
|
||||
# Base URL for the application (default: http://localhost:PORT) -
|
||||
# You must update this to the url you use to access your site
|
||||
# Base URL for the application (must end with '/', default: http://localhost:PORT/)
|
||||
BASE_URL=http://localhost:3000/
|
||||
|
||||
#ALLOWED_IFRAME_ORIGINS= #DEPRECATED and will be used as ALLOWED_ORIGINS if SET
|
||||
|
||||
# Comma-separated list of allowed origins for CORS
|
||||
# (default: '*' if empty, add your base_url if you want to restrict only to base_url)
|
||||
# When adding multiple origins, base_url will be included by default
|
||||
# ALLOWED_ORIGINS: http://internalip:port,https://subdomain.example.com
|
||||
ALLOWED_ORIGINS=
|
||||
|
||||
# Node environment (default: production)
|
||||
# When set to 'development', ALLOWED_ORIGINS will default to '*'
|
||||
NODE_ENV=production
|
||||
# Node environment (default: development)
|
||||
NODE_ENV=development
|
||||
|
||||
#########################################
|
||||
# FILE UPLOAD SETTINGS
|
||||
# STORAGE CONFIGURATION
|
||||
#########################################
|
||||
|
||||
# Storage type ('local' or 's3', default: local)
|
||||
STORAGE_TYPE=local
|
||||
|
||||
#########################################
|
||||
# LOCAL STORAGE SETTINGS (if STORAGE_TYPE=local)
|
||||
#########################################
|
||||
|
||||
# Directory for uploads (local dev, fallback: './local_uploads')
|
||||
LOCAL_UPLOAD_DIR=./local_uploads
|
||||
|
||||
# Directory for uploads (Docker/production; optional, overrides LOCAL_UPLOAD_DIR if set)
|
||||
UPLOAD_DIR=
|
||||
|
||||
#########################################
|
||||
# S3 STORAGE SETTINGS (if STORAGE_TYPE=s3)
|
||||
#########################################
|
||||
|
||||
# S3 Region (e.g., us-east-1 for AWS, us-west-000 for B2)
|
||||
S3_REGION=
|
||||
|
||||
# S3 Bucket Name
|
||||
S3_BUCKET_NAME=
|
||||
|
||||
# S3 Access Key ID
|
||||
S3_ACCESS_KEY_ID=
|
||||
|
||||
# S3 Secret Access Key
|
||||
S3_SECRET_ACCESS_KEY=
|
||||
|
||||
# Optional: S3 Endpoint URL (for non-AWS S3-compatible providers like MinIO, Backblaze B2)
|
||||
# Example Backblaze B2: https://s3.us-west-000.backblazeb2.com
|
||||
# Example MinIO: http://minio.local:9000
|
||||
S3_ENDPOINT_URL=
|
||||
|
||||
# Optional: Force Path Style (true/false, default: false). Needed for some providers like MinIO.
|
||||
S3_FORCE_PATH_STYLE=false
|
||||
|
||||
#########################################
|
||||
# FILE UPLOAD LIMITS & OPTIONS
|
||||
#########################################
|
||||
|
||||
# Maximum file size in MB (default: 1024)
|
||||
MAX_FILE_SIZE=1024
|
||||
|
||||
# Directory for uploads (Docker/production; optional)
|
||||
UPLOAD_DIR=
|
||||
|
||||
# Directory for uploads (local dev, fallback: './local_uploads')
|
||||
LOCAL_UPLOAD_DIR=./local_uploads
|
||||
|
||||
# Comma-separated list of allowed file extensions (optional, e.g. .jpg,.png,.pdf)
|
||||
# ALLOWED_EXTENSIONS=.jpg,.png,.pdf
|
||||
ALLOWED_EXTENSIONS=
|
||||
@@ -53,6 +78,10 @@ DUMBDROP_PIN=
|
||||
# Site title displayed in header (default: DumbDrop)
|
||||
DUMBDROP_TITLE=DumbDrop
|
||||
|
||||
# Custom footer links (comma-separated, format: "Link Text @ URL")
|
||||
# Example: FOOTER_LINKS=My Site @ https://example.com, Another Link @ https://another.org
|
||||
FOOTER_LINKS=
|
||||
|
||||
#########################################
|
||||
# NOTIFICATION SETTINGS
|
||||
#########################################
|
||||
@@ -71,4 +100,31 @@ APPRISE_SIZE_UNIT=Auto
|
||||
#########################################
|
||||
|
||||
# Enable automatic upload on file selection (true/false, default: false)
|
||||
AUTO_UPLOAD=false
|
||||
AUTO_UPLOAD=false
|
||||
|
||||
# Comma-separated list of origins allowed to embed the app in an iframe (optional)
|
||||
# ALLOWED_IFRAME_ORIGINS=https://example.com,https://another.com
|
||||
ALLOWED_IFRAME_ORIGINS=
|
||||
|
||||
# --- Docker Specific Settings ---
|
||||
# User and Group IDs for file permissions
|
||||
# Sets the user/group the application runs as inside the container.
|
||||
# Files created in the mapped volume (e.g., ./local_uploads) will have this ownership.
|
||||
# Set these to match your host user's ID/GID to avoid permission issues.
|
||||
# Find your IDs with `id -u` and `id -g` on Linux/macOS.
|
||||
# PUID=1000
|
||||
# PGID=1000
|
||||
|
||||
# File Mode Creation Mask (Umask)
|
||||
# Controls the default permissions for newly created files.
|
||||
# 022 (default): Files 644 (rw-r--r--), Dirs 755 (rwxr-xr-x)
|
||||
# 002: Files 664 (rw-rw-r--), Dirs 775 (rwxrwxr-x) - Good for group sharing
|
||||
# 007: Files 660 (rw-rw----), Dirs 770 (rwxrwx---) - More restrictive
|
||||
# 077: Files 600 (rw-------), Dirs 700 (rwx------) - Most restrictive
|
||||
# UMASK=022
|
||||
|
||||
# Max number of retries for client-side chunk uploads (default: 5)
|
||||
CLIENT_MAX_RETRIES=5
|
||||
|
||||
# Demo Mode (true/false, default: false). Overrides storage settings.
|
||||
DEMO_MODE=false
|
||||
14
.eslintignore
Normal file
14
.eslintignore
Normal file
@@ -0,0 +1,14 @@
|
||||
# Dependencies
|
||||
node_modules/
|
||||
|
||||
# Upload directories
|
||||
local_uploads/
|
||||
uploads/
|
||||
test_uploads/
|
||||
|
||||
# Build directories
|
||||
dist/
|
||||
build/
|
||||
|
||||
# Coverage directory
|
||||
coverage/
|
||||
25
.eslintrc.json
Normal file
25
.eslintrc.json
Normal file
@@ -0,0 +1,25 @@
|
||||
{
|
||||
"env": {
|
||||
"node": true,
|
||||
"es2022": true
|
||||
},
|
||||
"extends": [
|
||||
"eslint:recommended",
|
||||
"plugin:node/recommended",
|
||||
"prettier"
|
||||
],
|
||||
"parserOptions": {
|
||||
"ecmaVersion": 2022
|
||||
},
|
||||
"rules": {
|
||||
"node/exports-style": ["error", "module.exports"],
|
||||
"node/file-extension-in-import": ["error", "always"],
|
||||
"node/prefer-global/buffer": ["error", "always"],
|
||||
"node/prefer-global/console": ["error", "always"],
|
||||
"node/prefer-global/process": ["error", "always"],
|
||||
"node/prefer-global/url-search-params": ["error", "always"],
|
||||
"node/prefer-global/url": ["error", "always"],
|
||||
"node/prefer-promises/dns": "error",
|
||||
"node/prefer-promises/fs": "error"
|
||||
}
|
||||
}
|
||||
197
Dockerfile
197
Dockerfile
@@ -1,8 +1,16 @@
|
||||
# Base stage for shared configurations
|
||||
FROM node:22-alpine as base
|
||||
FROM node:20-alpine as base
|
||||
|
||||
# Install python and create virtual environment with minimal dependencies
|
||||
RUN apk add --no-cache python3 py3-pip && \
|
||||
# Add user and group IDs as arguments with defaults
|
||||
ARG PUID=1000
|
||||
ARG PGID=1000
|
||||
# Default umask (complement of 022 is 755 for dirs, 644 for files)
|
||||
ARG UMASK=022
|
||||
|
||||
# Install necessary packages:
|
||||
# - su-exec: lightweight sudo alternative
|
||||
# - python3, pip: for apprise dependency
|
||||
RUN apk add --no-cache su-exec python3 py3-pip && \
|
||||
python3 -m venv /opt/venv && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
@@ -14,51 +22,194 @@ RUN . /opt/venv/bin/activate && \
|
||||
# Add virtual environment to PATH
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
|
||||
# Create group and user with fallback to prevent build failures
|
||||
# We use the ARG values here, but with a fallback mechanism to avoid build failures
|
||||
RUN ( \
|
||||
set -e; \
|
||||
echo "Attempting to create/verify user with PUID=${PUID} and PGID=${PGID}..."; \
|
||||
\
|
||||
# Initialize variables \
|
||||
TARGET_USER="nodeuser"; \
|
||||
TARGET_GROUP="nodeuser"; \
|
||||
NEW_GID="${PGID}"; \
|
||||
NEW_UID="${PUID}"; \
|
||||
\
|
||||
# Step 1: Handle GID and group first \
|
||||
echo "Setting up group for GID ${NEW_GID}..."; \
|
||||
if getent group "${NEW_GID}" > /dev/null; then \
|
||||
# GID exists, check which group has it \
|
||||
EXISTING_GROUP=$(getent group "${NEW_GID}" | cut -d: -f1); \
|
||||
echo "GID ${NEW_GID} is already used by group '${EXISTING_GROUP}'."; \
|
||||
\
|
||||
if [ "${EXISTING_GROUP}" = "${TARGET_GROUP}" ]; then \
|
||||
echo "Group '${TARGET_GROUP}' already exists with correct GID ${NEW_GID}."; \
|
||||
else \
|
||||
# GID exists but used by a different group (likely 'node') \
|
||||
echo "Will create '${TARGET_GROUP}' with a different GID to avoid conflict."; \
|
||||
# Check if TARGET_GROUP exists but with wrong GID \
|
||||
if getent group "${TARGET_GROUP}" > /dev/null; then \
|
||||
echo "Group '${TARGET_GROUP}' exists but with wrong GID. Deleting it."; \
|
||||
delgroup "${TARGET_GROUP}" || true; \
|
||||
fi; \
|
||||
# Create TARGET_GROUP with GID+1 (or find next available GID) \
|
||||
NEXT_GID=$((${NEW_GID} + 1)); \
|
||||
while getent group "${NEXT_GID}" > /dev/null; do \
|
||||
NEXT_GID=$((${NEXT_GID} + 1)); \
|
||||
done; \
|
||||
echo "Creating group '${TARGET_GROUP}' with new GID ${NEXT_GID}."; \
|
||||
addgroup -S -g "${NEXT_GID}" "${TARGET_GROUP}"; \
|
||||
NEW_GID="${NEXT_GID}"; \
|
||||
fi; \
|
||||
else \
|
||||
# GID does not exist - create group with desired GID \
|
||||
echo "Creating group '${TARGET_GROUP}' with GID ${NEW_GID}."; \
|
||||
addgroup -S -g "${NEW_GID}" "${TARGET_GROUP}"; \
|
||||
fi; \
|
||||
\
|
||||
# Verify group was created \
|
||||
echo "Verifying group '${TARGET_GROUP}' exists..."; \
|
||||
getent group "${TARGET_GROUP}" || (echo "ERROR: Failed to find group '${TARGET_GROUP}'!"; exit 1); \
|
||||
GID_FOR_USER=$(getent group "${TARGET_GROUP}" | cut -d: -f3); \
|
||||
echo "Final group: '${TARGET_GROUP}' with GID ${GID_FOR_USER}"; \
|
||||
\
|
||||
# Step 2: Handle UID and user \
|
||||
echo "Setting up user with UID ${NEW_UID}..."; \
|
||||
if getent passwd "${NEW_UID}" > /dev/null; then \
|
||||
# UID exists, check which user has it \
|
||||
EXISTING_USER=$(getent passwd "${NEW_UID}" | cut -d: -f1); \
|
||||
echo "UID ${NEW_UID} is already used by user '${EXISTING_USER}'."; \
|
||||
\
|
||||
if [ "${EXISTING_USER}" = "${TARGET_USER}" ]; then \
|
||||
echo "User '${TARGET_USER}' already exists with correct UID ${NEW_UID}."; \
|
||||
# Check if user needs group update \
|
||||
CURRENT_GID=$(getent passwd "${TARGET_USER}" | cut -d: -f4); \
|
||||
if [ "${CURRENT_GID}" != "${GID_FOR_USER}" ]; then \
|
||||
echo "User '${TARGET_USER}' has wrong GID (${CURRENT_GID}). Modifying..."; \
|
||||
deluser "${TARGET_USER}"; \
|
||||
adduser -S -D -u "${NEW_UID}" -G "${TARGET_GROUP}" -s /bin/sh "${TARGET_USER}"; \
|
||||
fi; \
|
||||
else \
|
||||
# Another user has our UID (e.g., 'node'). Delete it. \
|
||||
echo "Deleting existing user '${EXISTING_USER}' with UID ${NEW_UID}."; \
|
||||
deluser "${EXISTING_USER}" || true; \
|
||||
\
|
||||
# Now check if TARGET_USER exists but with wrong UID \
|
||||
if getent passwd "${TARGET_USER}" > /dev/null; then \
|
||||
echo "User '${TARGET_USER}' exists but with wrong UID. Updating..."; \
|
||||
deluser "${TARGET_USER}" || true; \
|
||||
fi; \
|
||||
\
|
||||
# Create user \
|
||||
echo "Creating user '${TARGET_USER}' with UID ${NEW_UID} and group '${TARGET_GROUP}'."; \
|
||||
adduser -S -D -u "${NEW_UID}" -G "${TARGET_GROUP}" -s /bin/sh "${TARGET_USER}"; \
|
||||
fi; \
|
||||
else \
|
||||
# UID does not exist - check if user exists with wrong UID \
|
||||
if getent passwd "${TARGET_USER}" > /dev/null; then \
|
||||
echo "User '${TARGET_USER}' exists but with wrong UID. Updating..."; \
|
||||
deluser "${TARGET_USER}" || true; \
|
||||
fi; \
|
||||
\
|
||||
# Create user with desired UID \
|
||||
echo "Creating user '${TARGET_USER}' with UID ${NEW_UID} and group '${TARGET_GROUP}'."; \
|
||||
adduser -S -D -u "${NEW_UID}" -G "${TARGET_GROUP}" -s /bin/sh "${TARGET_USER}"; \
|
||||
fi; \
|
||||
\
|
||||
# Create and set permissions on home directory \
|
||||
echo "Setting up home directory for ${TARGET_USER}..."; \
|
||||
mkdir -p /home/${TARGET_USER} && \
|
||||
chown -R ${TARGET_USER}:${TARGET_GROUP} /home/${TARGET_USER} && \
|
||||
chmod 755 /home/${TARGET_USER}; \
|
||||
\
|
||||
# Verify user was created \
|
||||
echo "Verifying user '${TARGET_USER}' exists..."; \
|
||||
getent passwd "${TARGET_USER}" || (echo "ERROR: Failed to find user '${TARGET_USER}'!"; exit 1); \
|
||||
\
|
||||
# Clean up and verify system files \
|
||||
echo "Ensuring root user definition is pristine..."; \
|
||||
chown root:root /etc/passwd /etc/group && \
|
||||
chmod 644 /etc/passwd /etc/group && \
|
||||
getent passwd root || (echo "ERROR: root not found after user/group operations!"; exit 1); \
|
||||
\
|
||||
# Print final status \
|
||||
echo "Final user/group setup:"; \
|
||||
id "${TARGET_USER}"; \
|
||||
)
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
# Set UMASK - this applies to processes run by the user created in this stage
|
||||
# The entrypoint will also set it based on the ENV var at runtime.
|
||||
RUN umask ${UMASK}
|
||||
|
||||
# Dependencies stage
|
||||
FROM base as deps
|
||||
|
||||
COPY package*.json ./
|
||||
# Change ownership early so npm cache is owned correctly
|
||||
RUN chown nodeuser:nodeuser /usr/src/app
|
||||
|
||||
# Switch to nodeuser before running npm commands
|
||||
USER nodeuser
|
||||
|
||||
COPY --chown=nodeuser:nodeuser package*.json ./
|
||||
RUN npm ci --only=production && \
|
||||
# Remove npm cache
|
||||
npm cache clean --force
|
||||
|
||||
# Switch back to root for the next stages if needed
|
||||
USER root
|
||||
|
||||
# Development stage
|
||||
FROM deps as development
|
||||
|
||||
USER root
|
||||
ENV NODE_ENV=development
|
||||
|
||||
# Install dev dependencies
|
||||
# Create and set up directories
|
||||
RUN mkdir -p /usr/src/app/local_uploads /usr/src/app/uploads && \
|
||||
chown -R nodeuser:nodeuser /usr/src/app/local_uploads /usr/src/app/uploads
|
||||
|
||||
COPY --chown=nodeuser:nodeuser package*.json ./
|
||||
RUN npm install && \
|
||||
npm cache clean --force
|
||||
|
||||
# Create upload directory
|
||||
RUN mkdir -p uploads
|
||||
COPY --chown=nodeuser:nodeuser src/ ./src/
|
||||
COPY --chown=nodeuser:nodeuser public/ ./public/
|
||||
# Check if __tests__ and dev exist in your project root, if not, these COPY lines will fail for dev target
|
||||
# COPY --chown=nodeuser:nodeuser __tests__/ ./__tests__/
|
||||
# COPY --chown=nodeuser:nodeuser dev/ ./dev/
|
||||
COPY --chown=nodeuser:nodeuser .eslintrc.json .eslintignore .prettierrc nodemon.json ./
|
||||
|
||||
# Copy source with specific paths to avoid unnecessary files
|
||||
COPY src/ ./src/
|
||||
COPY public/ ./public/
|
||||
COPY __tests__/ ./__tests__/
|
||||
COPY dev/ ./dev/
|
||||
COPY .eslintrc.json .eslintignore ./
|
||||
|
||||
# Expose port
|
||||
# Switch back to nodeuser for runtime
|
||||
USER nodeuser
|
||||
EXPOSE 3000
|
||||
|
||||
CMD ["npm", "run", "dev"]
|
||||
|
||||
# Production stage
|
||||
FROM deps as production
|
||||
|
||||
USER root
|
||||
ENV NODE_ENV=production
|
||||
ENV UPLOAD_DIR /app/uploads
|
||||
|
||||
# Create upload directory
|
||||
RUN mkdir -p uploads
|
||||
# Create and set up directories
|
||||
RUN mkdir -p /usr/src/app/local_uploads /usr/src/app/uploads && \
|
||||
chown -R nodeuser:nodeuser /usr/src/app/local_uploads /usr/src/app/uploads
|
||||
|
||||
# Copy only necessary source files
|
||||
COPY src/ ./src/
|
||||
COPY public/ ./public/
|
||||
# Copy only necessary source files and ensure ownership
|
||||
COPY --chown=nodeuser:nodeuser src/ ./src/
|
||||
COPY --chown=nodeuser:nodeuser public/ ./public/
|
||||
|
||||
# Copy the entrypoint script and make it executable
|
||||
COPY --chown=root:root src/scripts/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Expose port
|
||||
EXPOSE 3000
|
||||
|
||||
CMD ["npm", "start"]
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
||||
|
||||
# Final user should be nodeuser for runtime
|
||||
USER nodeuser
|
||||
|
||||
# Default command to run (passed to entrypoint)
|
||||
CMD ["npm", "start"]
|
||||
344
README.md
344
README.md
@@ -4,10 +4,9 @@ A stupid simple file upload application that provides a clean, modern interface
|
||||
|
||||

|
||||
|
||||
No auth (unless you want it now!), no storage, no nothing. Just a simple file uploader to drop dumb files into a dumb folder.
|
||||
No auth (unless you want it!), no complicated setup (unless you want to!), no nothing. Just a simple way to drop dumb files into a dumb folder... or an S3 bucket!
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Quick Start](#quick-start)
|
||||
- [Production Deployment with Docker](#production-deployment-with-docker)
|
||||
- [Local Development (Recommended Quick Start)](LOCAL_DEVELOPMENT.md)
|
||||
@@ -21,61 +20,70 @@ No auth (unless you want it now!), no storage, no nothing. Just a simple file up
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Option 1: Docker (For Dummies)
|
||||
|
||||
### Option 1: Docker (For Dummies - Local Storage)
|
||||
```bash
|
||||
# Pull and run with one command
|
||||
# Pull and run with one command (uses local storage)
|
||||
docker run -p 3000:3000 -v ./uploads:/app/uploads dumbwareio/dumbdrop:latest
|
||||
```
|
||||
|
||||
1. Go to http://localhost:3000
|
||||
2. Upload a File - It'll show up in ./uploads
|
||||
3. Celebrate on how dumb easy this was
|
||||
|
||||
### Option 2: Docker Compose (For Dummies who like customizing)
|
||||
2. Upload a File - It'll show up in `./uploads` on your host machine.
|
||||
3. Celebrate on how dumb easy this was.
|
||||
|
||||
### Option 2: Docker Compose (For Dummies who like customizing - Local or S3)
|
||||
Create a `docker-compose.yml` file:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
dumbdrop:
|
||||
image: dumbwareio/dumbdrop:latest
|
||||
image: dumbwareio/dumbdrop:latest # Use the desired tag/version
|
||||
ports:
|
||||
- 3000:3000
|
||||
- "3000:3000" # Map host port 3000 to container port 3000
|
||||
volumes:
|
||||
# Where your uploaded files will land
|
||||
# Mount a host directory to store metadata (.metadata folder)
|
||||
# This is needed even for S3 mode to track ongoing uploads.
|
||||
# For local storage mode, this is also where files land.
|
||||
- ./uploads:/app/uploads
|
||||
environment:
|
||||
# Explicitly set upload directory inside the container
|
||||
UPLOAD_DIR: /app/uploads
|
||||
# The title shown in the web interface
|
||||
DUMBDROP_TITLE: DumbDrop
|
||||
# Maximum file size in MB
|
||||
MAX_FILE_SIZE: 1024
|
||||
# Optional PIN protection (leave empty to disable)
|
||||
DUMBDROP_PIN: 123456
|
||||
# Upload without clicking button
|
||||
AUTO_UPLOAD: false
|
||||
# The base URL for the application
|
||||
# You must update this to the url you use to access your site
|
||||
BASE_URL: http://localhost:3000
|
||||
# --- Core Settings ---
|
||||
# STORAGE_TYPE: "local" # Options: "local", "s3" (Defaults to "local" if unset)
|
||||
DUMBDROP_TITLE: "My DumbDrop"
|
||||
BASE_URL: "http://localhost:3000/" # Must end with a slash!
|
||||
MAX_FILE_SIZE: 1024 # Max file size in MB
|
||||
DUMBDROP_PIN: "" # Optional PIN (4-10 digits)
|
||||
AUTO_UPLOAD: "false" # Set to "true" to upload immediately
|
||||
|
||||
# --- Local Storage Settings (if STORAGE_TYPE="local") ---
|
||||
UPLOAD_DIR: "/app/uploads" # *Must* be set inside container if using local storage
|
||||
|
||||
# --- S3 Storage Settings (if STORAGE_TYPE="s3") ---
|
||||
# S3_REGION: "us-east-1" # Your S3 region (e.g., us-west-000 for B2)
|
||||
# S3_BUCKET_NAME: "your-s3-bucket-name" # Your bucket name
|
||||
# S3_ACCESS_KEY_ID: "YOUR_ACCESS_KEY" # Your S3 Access Key
|
||||
# S3_SECRET_ACCESS_KEY: "YOUR_SECRET_KEY" # Your S3 Secret Key
|
||||
# S3_ENDPOINT_URL: "" # Optional: e.g., https://s3.us-west-000.backblazeb2.com for B2, http://minio.local:9000 for Minio
|
||||
# S3_FORCE_PATH_STYLE: "false" # Optional: Set to "true" for providers like Minio
|
||||
|
||||
# --- Optional Settings ---
|
||||
# ALLOWED_EXTENSIONS: ".jpg,.png,.pdf" # Comma-separated allowed extensions
|
||||
# ALLOWED_IFRAME_ORIGINS: "https://organizr.example.com" # Allow embedding in specific origins
|
||||
# APPRISE_URL: "" # For notifications
|
||||
# FOOTER_LINKS: "My Site @ https://example.com" # Custom footer links
|
||||
# CLIENT_MAX_RETRIES: 5 # Client-side chunk retry attempts
|
||||
restart: unless-stopped
|
||||
```
|
||||
|
||||
Then run:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
1. Go to http://localhost:3000
|
||||
2. Upload a File - It'll show up in ./uploads
|
||||
3. Rejoice in the glory of your dumb uploads
|
||||
2. Upload a File - It'll show up in `./uploads` (if local) or your S3 bucket (if S3).
|
||||
3. Rejoice in the glory of your dumb uploads, now potentially in the cloud!
|
||||
|
||||
> **Note:** The `UPLOAD_DIR` environment variable is now explicitly set to `/app/uploads` in the container. The Dockerfile only creates the `uploads` directory, not `local_uploads`. The host directory `./uploads` is mounted to `/app/uploads` for persistent storage.
|
||||
> **Note:** When using `STORAGE_TYPE=s3`, the local volume mount (`./uploads:/app/uploads`) is still used to store temporary metadata files (`.metadata` folder) for tracking multipart uploads. The actual files go to S3.
|
||||
|
||||
### Option 3: Running Locally (For Developers)
|
||||
|
||||
For local development setup, troubleshooting, and advanced usage, see the dedicated guide:
|
||||
For local development setup without Docker, see the dedicated guide:
|
||||
|
||||
👉 [Local Development Guide](LOCAL_DEVELOPMENT.md)
|
||||
|
||||
@@ -83,261 +91,154 @@ For local development setup, troubleshooting, and advanced usage, see the dedica
|
||||
|
||||
- 🚀 Drag and drop file uploads
|
||||
- 📁 Multiple file selection
|
||||
- ☁️ **Optional S3 Storage:** Store files in AWS S3, Backblaze B2, MinIO, or other S3-compatible services.
|
||||
- 💾 **Local Storage:** Default simple file storage on the server's disk.
|
||||
- 🎨 Clean, responsive UI with Dark Mode
|
||||
- 📦 Docker support with easy configuration
|
||||
- 📂 Directory upload support (maintains structure)
|
||||
- 📂 Directory upload support (maintains structure in local storage or as S3 keys)
|
||||
- 🔒 Optional PIN protection
|
||||
- 📱 Mobile-friendly interface
|
||||
- 🔔 Configurable notifications via Apprise
|
||||
- ⚡ Zero dependencies on client-side
|
||||
- 🛡️ Built-in security features
|
||||
- 🛡️ Built-in security features (rate limiting, security headers)
|
||||
- 💾 Configurable file size limits
|
||||
- 🎯 File extension filtering
|
||||
- 📋 Optional file listing with download/delete functionality
|
||||
- ⚙️ Native S3 Multipart Upload for large files when using S3 storage.
|
||||
- 🔗 S3 Presigned URLs for efficient downloads (offloads server bandwidth).
|
||||
|
||||
## Configuration
|
||||
|
||||
DumbDrop is configured primarily through environment variables.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Description | Default | Required |
|
||||
| -------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------- | -------- |
|
||||
| PORT | Server port | 3000 | No |
|
||||
| BASE_URL | Base URL for the application | http://localhost:PORT | No |
|
||||
| MAX_FILE_SIZE | Maximum file size in MB | 1024 | No |
|
||||
| DUMBDROP_PIN | PIN protection (4-10 digits) | None | No |
|
||||
| DUMBDROP_TITLE | Site title displayed in header | DumbDrop | No |
|
||||
| APPRISE_URL | Apprise URL for notifications | None | No |
|
||||
| APPRISE_MESSAGE | Notification message template | New file uploaded {filename} ({size}), Storage used {storage} | No |
|
||||
| APPRISE_SIZE_UNIT | Size unit for notifications (B, KB, MB, GB, TB, or Auto) | Auto | No |
|
||||
| AUTO_UPLOAD | Enable automatic upload on file selection | false | No |
|
||||
| SHOW_FILE_LIST | Enable file listing with download and delete functionality | false | No |
|
||||
| ALLOWED_EXTENSIONS | Comma-separated list of allowed file extensions | None | No |
|
||||
| ALLOWED_IFRAME_ORIGINS (deprecated: see ALLOWED_ORIGINS) | Comma-separated list of origins allowed to embed the app in an iframe | None | No |
|
||||
| ALLOWED_ORIGINS | You can restrict CORS to your BASE_URL or a comma-separated list of specified origins, which will automatically include your base_url | '\*' | No |
|
||||
| UPLOAD_DIR | Directory for uploads (Docker/production; should be `/app/uploads` in container) | None (see LOCAL_UPLOAD_DIR fallback) | No |
|
||||
| LOCAL_UPLOAD_DIR | Directory for uploads (local dev, fallback: './local_uploads') | ./local_uploads | No |
|
||||
| TRUST_PROXY | Trust proxy headers (X-Forwarded-For) - only enable if behind a reverse proxy | false | No |
|
||||
| TRUSTED_PROXY_IPS | Comma-separated list of trusted proxy IPs (optional, requires TRUST_PROXY=true) | None | No |
|
||||
| Variable | Description | Default | Required |
|
||||
|--------------------------|------------------------------------------------------------------------------------------------------------|----------------------------------------------|------------------------------|
|
||||
| **`STORAGE_TYPE`** | Storage backend: `local` or `s3` | `local` | No |
|
||||
| `PORT` | Server port | `3000` | No |
|
||||
| `BASE_URL` | Base URL for the application (must end with `/`) | `http://localhost:PORT/` | No |
|
||||
| `MAX_FILE_SIZE` | Maximum file size in MB | `1024` | No |
|
||||
| `DUMBDROP_PIN` | PIN protection (4-10 digits) | None | No |
|
||||
| `DUMBDROP_TITLE` | Title displayed in the browser tab/header | `DumbDrop` | No |
|
||||
| `AUTO_UPLOAD` | Enable automatic upload on file selection (`true`/`false`) | `false` | No |
|
||||
| `ALLOWED_EXTENSIONS` | Comma-separated list of allowed file extensions (e.g., `.jpg,.png`) | None (all allowed) | No |
|
||||
| `ALLOWED_IFRAME_ORIGINS` | Comma-separated list of origins allowed to embed in an iframe | None | No |
|
||||
| `FOOTER_LINKS` | Comma-separated custom footer links (Format: `"Text @ URL"`) | None | No |
|
||||
| `CLIENT_MAX_RETRIES` | Max retry attempts for client-side chunk uploads | `5` | No |
|
||||
| `DEMO_MODE` | Run in demo mode (`true`/`false`). Overrides storage settings. | `false` | No |
|
||||
| `APPRISE_URL` | Apprise URL for notifications | None | No |
|
||||
| `APPRISE_MESSAGE` | Notification message template (`{filename}`, `{size}`, `{storage}`) | `New file uploaded...` | No |
|
||||
| `APPRISE_SIZE_UNIT` | Size unit for notifications (`B`, `KB`, `MB`, `GB`, `TB`, `Auto`) | `Auto` | No |
|
||||
| --- | --- | --- | --- |
|
||||
| **Local Storage Only:** | | | |
|
||||
| `UPLOAD_DIR` | **(Docker)** Directory for uploads/metadata inside container | None | Yes (if `STORAGE_TYPE=local`) |
|
||||
| `LOCAL_UPLOAD_DIR` | **(Local Dev)** Directory for uploads/metadata on host machine | `./local_uploads` | No (if `STORAGE_TYPE=local`) |
|
||||
| --- | --- | --- | --- |
|
||||
| **S3 Storage Only:** | | | |
|
||||
| `S3_REGION` | S3 Region (e.g., `us-east-1`, `us-west-000`) | None | Yes (if `STORAGE_TYPE=s3`) |
|
||||
| `S3_BUCKET_NAME` | Name of the S3 Bucket | None | Yes (if `STORAGE_TYPE=s3`) |
|
||||
| `S3_ACCESS_KEY_ID` | S3 Access Key ID | None | Yes (if `STORAGE_TYPE=s3`) |
|
||||
| `S3_SECRET_ACCESS_KEY` | S3 Secret Access Key | None | Yes (if `STORAGE_TYPE=s3`) |
|
||||
| `S3_ENDPOINT_URL` | **(Optional)** Custom S3 endpoint URL (for B2, MinIO, etc.) | None (uses default AWS endpoint) | No |
|
||||
| `S3_FORCE_PATH_STYLE` | **(Optional)** Force path-style S3 requests (`true`/`false`). Needed for MinIO, etc. | `false` | No |
|
||||
|
||||
- **UPLOAD_DIR** is used in Docker/production. If not set, LOCAL_UPLOAD_DIR is used for local development. If neither is set, the default is `./local_uploads`.
|
||||
- **Docker Note:** The Dockerfile now only creates the `uploads` directory inside the container. The host's `./local_uploads` is mounted to `/app/uploads` and should be managed on the host system.
|
||||
- **BASE_URL**: If you are deploying DumbDrop under a subpath (e.g., `https://example.com/watchfolder/`), you **must** set `BASE_URL` to the full path including the trailing slash (e.g., `https://example.com/watchfolder/`). All API and asset requests will be prefixed with this value. If you deploy at the root, use `https://example.com/`.
|
||||
- **BASE_URL** must end with a trailing slash. The app will fail to start if this is not the case.
|
||||
- **Storage:** Set `STORAGE_TYPE` to `s3` to enable S3 storage. Otherwise, it defaults to `local`.
|
||||
- **Local Storage:** If `STORAGE_TYPE=local`, `UPLOAD_DIR` (in Docker) or `LOCAL_UPLOAD_DIR` (local dev) determines where files are stored.
|
||||
- **S3 Storage:** If `STORAGE_TYPE=s3`, the `S3_*` variables are required. `UPLOAD_DIR`/`LOCAL_UPLOAD_DIR` is still used for storing temporary `.metadata` files locally.
|
||||
- **S3 Endpoint/Path Style:** Use `S3_ENDPOINT_URL` and `S3_FORCE_PATH_STYLE` only if connecting to a non-AWS S3-compatible service.
|
||||
- **BASE_URL**: Must end with a trailing slash (`/`). The app will fail to start otherwise. Example: `http://your.domain.com/dumbdrop/`.
|
||||
- **Security Note (S3):** For production, using IAM Roles (e.g., EC2 Instance Profiles, ECS Task Roles) is strongly recommended over embedding Access Keys in environment variables.
|
||||
|
||||
See `.env.example` for a template and more details.
|
||||
See `.env.example` for a template.
|
||||
|
||||
<details>
|
||||
<summary>Reverse Proxy Configuration (TRUST_PROXY)</summary>
|
||||
<summary>ALLOWED_IFRAME_ORIGINS</summary>
|
||||
|
||||
### Important Security Notice
|
||||
|
||||
By default, DumbDrop **does not** trust proxy headers like `X-Forwarded-For`. This prevents attackers from spoofing IP addresses to bypass rate limiting and PIN brute-force protection.
|
||||
|
||||
### When to Enable TRUST_PROXY
|
||||
|
||||
Only enable `TRUST_PROXY=true` if you are deploying DumbDrop behind a **trusted reverse proxy** such as:
|
||||
- Nginx
|
||||
- Apache
|
||||
- Caddy
|
||||
- Traefik
|
||||
- Cloudflare
|
||||
- Other CDN or load balancer
|
||||
|
||||
### Basic Configuration
|
||||
|
||||
If behind a single reverse proxy:
|
||||
|
||||
```env
|
||||
TRUST_PROXY=true
|
||||
```
|
||||
|
||||
### Advanced Configuration (Recommended)
|
||||
|
||||
For additional security, specify the exact IP addresses of your trusted proxies:
|
||||
|
||||
```env
|
||||
TRUST_PROXY=true
|
||||
TRUSTED_PROXY_IPS=172.17.0.1,10.0.0.1
|
||||
```
|
||||
|
||||
**Common proxy IPs:**
|
||||
- Docker default bridge: `172.17.0.1`
|
||||
- Docker Compose networks: Check with `docker network inspect <network_name>`
|
||||
- Nginx/Apache on same host: `127.0.0.1` or `::1`
|
||||
- External proxy: Use the actual IP of your proxy server
|
||||
|
||||
### Security Warnings
|
||||
|
||||
⚠️ **DO NOT enable `TRUST_PROXY` if:**
|
||||
- DumbDrop is directly accessible from the internet
|
||||
- You are unsure whether you have a reverse proxy
|
||||
- You cannot verify the proxy IP addresses
|
||||
|
||||
⚠️ **Enabling proxy trust without a properly configured reverse proxy allows attackers to bypass security measures by spoofing headers.**
|
||||
|
||||
### Examples for Common Setups
|
||||
|
||||
**Nginx Reverse Proxy:**
|
||||
```env
|
||||
TRUST_PROXY=true
|
||||
TRUSTED_PROXY_IPS=172.17.0.1
|
||||
```
|
||||
|
||||
**Cloudflare:**
|
||||
```env
|
||||
TRUST_PROXY=true
|
||||
# List Cloudflare IPs or use their published IP ranges
|
||||
```
|
||||
|
||||
**Direct Access (No Proxy):**
|
||||
```env
|
||||
# TRUST_PROXY=false (default - no need to set)
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>ALLOWED_IFRAME_ORIGINS (DEPRECATED: see ALLOWED_ORIGINS)</summary>
|
||||
|
||||
- This is now deprecated but still works for backwards compatibility
|
||||
- ALLOWED_IFRAME_ORIGINS will be used as a fallback if ALLOWED_ORIGINS is not set
|
||||
- Please update to ALLOWED_ORIGINS for future compatibility
|
||||
|
||||
~~To allow this app to be embedded in an iframe on specific origins (such as Organizr), set the `ALLOWED_IFRAME_ORIGINS` environment variable. For example:~~
|
||||
To allow this app to be embedded in an iframe on specific origins (such as Organizr), set the `ALLOWED_IFRAME_ORIGINS` environment variable. For example:
|
||||
|
||||
```env
|
||||
ALLOWED_IFRAME_ORIGINS=https://organizr.example.com,https://myportal.com
|
||||
```
|
||||
|
||||
- ~~If not set, the app will only allow itself to be embedded in an iframe on the same origin (default security).~~
|
||||
- ~~If set, the app will allow embedding in iframes on the specified origins and itself.~~
|
||||
- ~~**Security Note:** Only add trusted origins. Allowing arbitrary origins can expose your app to clickjacking and other attacks.~~
|
||||
- If not set, the app will only allow itself to be embedded in an iframe on the same origin (default security).
|
||||
- If set, the app will allow embedding in iframes on the specified origins and itself.
|
||||
- **Security Note:** Only add trusted origins. Allowing arbitrary origins can expose your app to clickjacking and other attacks.
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>ALLOWED_ORIGINS</summary>
|
||||
|
||||
By default `ALLOWED_ORIGINS` is set to '\*'
|
||||
|
||||
```env
|
||||
ALLOWED_ORIGINS=https://organizr.example.com,https://myportal.com,http://internalip:port
|
||||
```
|
||||
|
||||
- If you would like to restrict CORS to your BASE_URL, you can set it like this: `ALLOWED_ORIGINS=http://localhost:3000`
|
||||
- If you would like to allow multiple origins, you can set it like this: `ALLOWED_ORIGINS=http://internalip:port,https://subdomain.domain.tld`
|
||||
- This will automatically include your BASE_URL in the list of allowed origins.
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>File Extension Filtering</summary>
|
||||
|
||||
To restrict which file types can be uploaded, set the `ALLOWED_EXTENSIONS` environment variable. For example:
|
||||
|
||||
To restrict which file types can be uploaded, set the `ALLOWED_EXTENSIONS` environment variable with comma-separated extensions (including the dot):
|
||||
```env
|
||||
ALLOWED_EXTENSIONS=.jpg,.jpeg,.png,.pdf,.doc,.docx,.txt
|
||||
```
|
||||
|
||||
If not set, all file extensions will be allowed.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>File Listing and Management</summary>
|
||||
|
||||
To enable the file listing feature that shows uploaded files with download and delete functionality, set the `SHOW_FILE_LIST` environment variable:
|
||||
|
||||
```env
|
||||
SHOW_FILE_LIST=true
|
||||
```
|
||||
|
||||
When enabled, this feature provides:
|
||||
|
||||
- **File Listing**: Displays all uploaded files and folders in a hierarchical structure
|
||||
- **Download**: Direct download links for individual files
|
||||
- **Delete**: Ability to delete files and entire folders (including all contents)
|
||||
- **Statistics**: Shows total number of files and total storage used
|
||||
- **Refresh**: Manual refresh button to update the file list
|
||||
- **Folder Support**: Properly displays folder structures with nested files
|
||||
|
||||
**Security Note:** The file listing respects the same security measures as the upload functionality. If a PIN is configured, users must authenticate before accessing file management features.
|
||||
|
||||
The file list automatically refreshes after successful uploads to keep the display current.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Notification Setup</summary>
|
||||
|
||||
#### Message Templates
|
||||
|
||||
The notification message supports the following placeholders:
|
||||
|
||||
- `{filename}`: Name of the uploaded file
|
||||
- `{filename}`: Name of the uploaded file (or S3 Key)
|
||||
- `{size}`: Size of the file (formatted according to APPRISE_SIZE_UNIT)
|
||||
- `{storage}`: Total size of all files in upload directory
|
||||
- `{storage}`: Total size of all files in upload directory (Local storage only)
|
||||
|
||||
Example message template:
|
||||
|
||||
```env
|
||||
APPRISE_MESSAGE: New file uploaded {filename} ({size}), Storage used {storage}
|
||||
APPRISE_MESSAGE: New file dropped: {filename} ({size})!
|
||||
```
|
||||
|
||||
Size formatting examples:
|
||||
|
||||
- Auto (default): Chooses nearest unit (e.g., "1.44MB", "256KB")
|
||||
- Fixed unit: Set APPRISE_SIZE_UNIT to B, KB, MB, GB, or TB
|
||||
|
||||
Both {size} and {storage} use the same formatting rules based on APPRISE_SIZE_UNIT.
|
||||
|
||||
#### Notification Support
|
||||
|
||||
- Integration with [Apprise](https://github.com/caronc/apprise?tab=readme-ov-file#supported-notifications) for flexible notifications
|
||||
- Support for all Apprise notification services
|
||||
- Customizable notification messages with filename templating
|
||||
- Customizable notification messages
|
||||
- Optional - disabled if no APPRISE_URL is set
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>S3 Cleanup Recommendation</summary>
|
||||
|
||||
When using `STORAGE_TYPE=s3`, DumbDrop relies on the native S3 Multipart Upload mechanism. If an upload is interrupted, incomplete parts may remain in your S3 bucket.
|
||||
|
||||
**It is strongly recommended to configure a Lifecycle Rule on your S3 bucket** (or use your provider's equivalent tool) to automatically abort and delete incomplete multipart uploads after a reasonable period (e.g., 1-7 days). This prevents orphaned parts from accumulating costs. DumbDrop's cleanup only removes local tracking files, not the actual S3 parts.
|
||||
</details>
|
||||
|
||||
## Security
|
||||
|
||||
### Features
|
||||
|
||||
- Variable-length PIN support (4-10 digits)
|
||||
- Constant-time PIN comparison
|
||||
- Input sanitization
|
||||
- Rate limiting with IP-based tracking
|
||||
- Protection against IP spoofing attacks
|
||||
- Configurable proxy trust for reverse proxy deployments
|
||||
- Input sanitization (filenames, paths)
|
||||
- Rate limiting on API endpoints
|
||||
- Security headers (CSP, HSTS, etc.)
|
||||
- File extension filtering
|
||||
- No client-side PIN storage
|
||||
- Secure file handling
|
||||
|
||||
### Security Best Practices
|
||||
|
||||
1. **PIN Protection**: Always set a strong PIN when deploying publicly
|
||||
2. **Proxy Trust**: Only enable `TRUST_PROXY` when behind a verified reverse proxy
|
||||
3. **HTTPS**: Use HTTPS in production (handled by your reverse proxy)
|
||||
4. **File Extensions**: Restrict allowed file types using `ALLOWED_EXTENSIONS` if possible
|
||||
5. **Regular Updates**: Keep DumbDrop and its dependencies up to date
|
||||
- Secure file handling (uses S3 presigned URLs for downloads if S3 is enabled)
|
||||
|
||||
## Technical Details
|
||||
|
||||
### Stack
|
||||
|
||||
- **Backend**: Node.js (>=20.0.0) with Express
|
||||
- **Frontend**: Vanilla JavaScript (ES6+)
|
||||
- **Storage**: Local Filesystem or S3-compatible Object Storage
|
||||
- **Container**: Docker with multi-stage builds
|
||||
- **Security**: Express security middleware
|
||||
- **Upload**: Chunked file handling via Multer
|
||||
- **Upload**: Chunked uploads via client-side logic, processed via Express middleware, using native S3 Multipart Upload when `STORAGE_TYPE=s3`.
|
||||
- **Notifications**: Apprise integration
|
||||
- **SDK**: AWS SDK for JavaScript v3 (`@aws-sdk/client-s3`, `@aws-sdk/s3-request-presigner`) when `STORAGE_TYPE=s3`.
|
||||
|
||||
### Dependencies
|
||||
|
||||
- express: Web framework
|
||||
- multer: File upload handling
|
||||
- apprise: Notification system
|
||||
- cors: Cross-origin resource sharing
|
||||
- dotenv: Environment configuration
|
||||
- express-rate-limit: Rate limiting
|
||||
- `express`: Web framework
|
||||
- `@aws-sdk/client-s3`: AWS S3 SDK (used if `STORAGE_TYPE=s3`)
|
||||
- `@aws-sdk/s3-request-presigner`: For S3 presigned URLs (used if `STORAGE_TYPE=s3`)
|
||||
- `cookie-parser`: Parse cookies
|
||||
- `cors`: Cross-origin resource sharing
|
||||
- `dotenv`: Environment configuration
|
||||
- `express-rate-limit`: Rate limiting
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -349,17 +250,8 @@ Both {size} and {storage} use the same formatting rules based on APPRISE_SIZE_UN
|
||||
|
||||
See [Local Development (Recommended Quick Start)](LOCAL_DEVELOPMENT.md) for local setup and guidelines.
|
||||
|
||||
## Support the Project
|
||||
|
||||
<a href="https://www.buymeacoffee.com/dumbware" target="_blank">
|
||||
<img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" height="60">
|
||||
</a>
|
||||
|
||||
---
|
||||
|
||||
Made with ❤️ by [DumbWare.io](https://dumbware.io)
|
||||
|
||||
## Future Features
|
||||
|
||||
- Camera Upload for Mobile
|
||||
> Got an idea? [Open an issue](https://github.com/dumbwareio/dumbdrop/issues) or [submit a PR](https://github.com/dumbwareio/dumbdrop/pulls)
|
||||
> Got an idea? [Open an issue](https://github.com/dumbwareio/dumbdrop/issues) or [submit a PR](https://github.com/dumbwareio/dumbdrop/pulls)
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
services:
|
||||
dumbdrop:
|
||||
image: dumbwareio/dumbdrop:latest
|
||||
# build: .
|
||||
container_name: dumbdrop
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 3000:3000
|
||||
volumes:
|
||||
@@ -16,24 +13,30 @@ services:
|
||||
MAX_FILE_SIZE: 1024 # Maximum file size in MB
|
||||
DUMBDROP_PIN: 123456 # Optional PIN protection (4-10 digits, leave empty to disable)
|
||||
AUTO_UPLOAD: true # Upload without clicking button
|
||||
BASE_URL: http://localhost:3000 # The base URL for the application, You must update this to the url you use to access your site
|
||||
|
||||
# Comma-separated list of allowed origins for CORS
|
||||
# (default: '*' if empty, replace with your base_url if you want to restrict only to base_url)
|
||||
# When adding multiple origins, base_url will be included by default and does not need to the list
|
||||
# ALLOWED_IFRAME_ORIGINS: #DEPRECATED and will be used as ALLOWED_ORIGINS if SET
|
||||
# ALLOWED_ORIGINS: http://internalip:port,https://subdomain.example.com
|
||||
|
||||
BASE_URL: http://localhost:3000 # The base URL for the application
|
||||
|
||||
# Additional available environment variables (commented out with defaults)
|
||||
# FOOTER_LINKS: "My Site @ https://example.com,Docs @ https://docs.example.com" # Custom footer links
|
||||
# PORT: 3000 # Server port (default: 3000)
|
||||
# NODE_ENV: production # Node environment (development/production) - when not using production ALLOWED_ORIGINS will be set to '*' by default
|
||||
# DEBUG: false # Debug mode for verbose logging (default: false in production, true in development)
|
||||
# NODE_ENV: production # Node environment (development/production)
|
||||
# APPRISE_URL: "" # Apprise notification URL for upload notifications (default: none)
|
||||
# APPRISE_MESSAGE: "New file uploaded - {filename} ({size}), Storage used {storage}" # Notification message template with placeholders: {filename}, {size}, {storage}
|
||||
# APPRISE_SIZE_UNIT: "Auto" # Size unit for notifications (B, KB, MB, GB, TB, or Auto)
|
||||
# ALLOWED_EXTENSIONS: ".jpg,.jpeg,.png,.pdf,.doc,.docx,.txt" # Comma-separated list of allowed file extensions (default: all allowed)
|
||||
|
||||
# Reverse Proxy / Security Settings (IMPORTANT: Read documentation before enabling)
|
||||
# TRUST_PROXY: false # Set to 'true' ONLY if behind a trusted reverse proxy (Nginx, Caddy, Traefik, etc.)
|
||||
# TRUSTED_PROXY_IPS: 172.17.0.1,10.0.0.1 # Optional: Comma-separated list of trusted proxy IPs for additional security
|
||||
# WARNING: Enabling TRUST_PROXY without a proper reverse proxy allows attackers to bypass rate limiting!
|
||||
# PUID: 1000 # User ID for file ownership (default: 1000)
|
||||
# PGID: 1000 # Group ID for file ownership (default: 1000)
|
||||
# UMASK: "000" # File permissions mask (default: 000)
|
||||
restart: unless-stopped
|
||||
# user: "${PUID}:${PGID}" # Don't set user here, entrypoint handles it
|
||||
# Consider adding healthcheck
|
||||
# healthcheck:
|
||||
# test: ["CMD", "curl", "--fail", "http://localhost:3000/health"] # Assuming a /health endpoint exists
|
||||
# interval: 30s
|
||||
# timeout: 10s
|
||||
# retries: 3
|
||||
# healthcheck:
|
||||
# test: ["CMD", "curl", "--fail", "http://localhost:3000/health"]
|
||||
# interval: 30s
|
||||
# timeout: 10s
|
||||
# retries: 3
|
||||
# start_period: 30s
|
||||
@@ -1,217 +0,0 @@
|
||||
# Bind Mount Compatibility Fix
|
||||
|
||||
## Problem
|
||||
|
||||
Files uploaded to DumbDrop were disappearing when using Docker bind mounts (e.g., `-v ./uploads:/app/uploads`). The application only worked correctly with named Docker volumes.
|
||||
|
||||
### Root Cause
|
||||
|
||||
The path validation function `isPathWithinUploadDir()` was using `fs.realpathSync()` on file paths that didn't exist yet. This caused issues because:
|
||||
|
||||
1. **Non-existent paths**: During upload initialization, files haven't been created yet. `fs.realpathSync()` requires the path to exist.
|
||||
2. **Docker bind mount behavior**: With bind mounts, path resolution behaves differently than with named volumes.
|
||||
3. **Validation failures**: Path validation would fail silently, causing files to be rejected or written to unexpected locations.
|
||||
|
||||
### Previous Implementation
|
||||
|
||||
```javascript
|
||||
function isPathWithinUploadDir(filePath, uploadDir) {
|
||||
try {
|
||||
// This would fail for non-existent files!
|
||||
const realFilePath = fs.realpathSync(filePath);
|
||||
const realUploadDir = fs.realpathSync(uploadDir);
|
||||
|
||||
const relativePath = path.relative(realUploadDir, realFilePath);
|
||||
return !relativePath.startsWith('..');
|
||||
} catch (err) {
|
||||
return false; // Silently fail - files disappear!
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Solution
|
||||
|
||||
Created a new implementation of `isPathWithinUploadDir()` that:
|
||||
|
||||
1. **Works with non-existent files**: Uses `path.resolve()` and `path.normalize()` for files that don't exist yet
|
||||
2. **Supports existing files**: Uses `fs.realpathSync()` only when the file exists and `requireExists=true`
|
||||
3. **Handles bind mounts correctly**: Path normalization works consistently across Docker volume types
|
||||
4. **Provides security**: Still detects path traversal attempts using `path.relative()`
|
||||
|
||||
### New Implementation
|
||||
|
||||
```javascript
|
||||
function isPathWithinUploadDir(filePath, uploadDir, requireExists = false) {
|
||||
try {
|
||||
// Always resolve the upload directory (must exist)
|
||||
const realUploadDir = fs.realpathSync(uploadDir);
|
||||
|
||||
let resolvedFilePath;
|
||||
if (requireExists && fs.existsSync(filePath)) {
|
||||
// For existing files, resolve symlinks for security
|
||||
resolvedFilePath = fs.realpathSync(filePath);
|
||||
} else {
|
||||
// For non-existent files (uploads), use path.resolve
|
||||
resolvedFilePath = path.resolve(filePath);
|
||||
}
|
||||
|
||||
// Normalize paths for consistent comparison
|
||||
const relativePath = path.relative(
|
||||
path.normalize(realUploadDir),
|
||||
path.normalize(resolvedFilePath)
|
||||
);
|
||||
|
||||
// Reject paths outside upload directory
|
||||
if (relativePath === '') return true; // Same directory
|
||||
if (relativePath.startsWith('..')) return false; // Path traversal
|
||||
|
||||
// Windows: Check same drive
|
||||
if (process.platform === 'win32') {
|
||||
if (resolvedFilePath.split(':')[0] !== realUploadDir.split(':')[0]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
} catch (err) {
|
||||
logger.error(`Path validation error: ${err.message}`, err);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Changes Made
|
||||
|
||||
### 1. Updated `src/utils/fileUtils.js`
|
||||
|
||||
- Added the new `isPathWithinUploadDir()` function
|
||||
- Exported it for use across the application
|
||||
- Made it a shared utility to ensure consistency
|
||||
|
||||
### 2. Updated `src/routes/files.js`
|
||||
|
||||
- Import `isPathWithinUploadDir` from `fileUtils`
|
||||
- Use `requireExists=true` for operations on existing files:
|
||||
- File info (`/info/*`)
|
||||
- File download (`/download/*`)
|
||||
- File deletion (`DELETE /*`)
|
||||
- File rename source path
|
||||
- Use `requireExists=false` for rename destination (doesn't exist yet)
|
||||
|
||||
### 3. Updated `src/routes/upload.js`
|
||||
|
||||
- Import `isPathWithinUploadDir` from `fileUtils`
|
||||
- Added path validation at key points:
|
||||
- Initial file path construction
|
||||
- After folder mapping
|
||||
- After unique path generation
|
||||
- For `.partial` file paths
|
||||
- All use `requireExists=false` since files are being created
|
||||
|
||||
### 4. Added Tests
|
||||
|
||||
Created `test/path-validation.test.js` with comprehensive tests:
|
||||
- ✅ Valid paths within upload directory
|
||||
- ✅ Nested folder structures
|
||||
- ✅ Paths with spaces and special characters
|
||||
- ✅ Path traversal attack detection
|
||||
- ✅ .partial file extensions
|
||||
- ✅ Existing vs non-existing files
|
||||
- ✅ Windows drive letter validation
|
||||
- ✅ Unicode filenames
|
||||
- ✅ Deeply nested folders
|
||||
|
||||
## Testing
|
||||
|
||||
### Run Path Validation Tests
|
||||
|
||||
```bash
|
||||
npm test -- test/path-validation.test.js
|
||||
```
|
||||
|
||||
### Test with Bind Mounts
|
||||
|
||||
#### Docker Compose (bind mount)
|
||||
|
||||
```yaml
|
||||
services:
|
||||
dumbdrop:
|
||||
image: dumbwareio/dumbdrop:latest
|
||||
ports:
|
||||
- 3000:3000
|
||||
volumes:
|
||||
- ./uploads:/app/uploads # Bind mount - now works!
|
||||
environment:
|
||||
UPLOAD_DIR: /app/uploads
|
||||
```
|
||||
|
||||
#### Docker Compose (named volume - already worked)
|
||||
|
||||
```yaml
|
||||
services:
|
||||
dumbdrop:
|
||||
image: dumbwareio/dumbdrop:latest
|
||||
ports:
|
||||
- 3000:3000
|
||||
volumes:
|
||||
- dumbdrop_uploads:/app/uploads # Named volume
|
||||
|
||||
volumes:
|
||||
dumbdrop_uploads:
|
||||
```
|
||||
|
||||
### Verification Steps
|
||||
|
||||
1. Start the application with bind mount configuration
|
||||
2. Upload a file through the web interface
|
||||
3. Verify the file appears in the web interface
|
||||
4. Verify the file exists in the host's `./uploads` directory
|
||||
5. Restart the container and verify files persist
|
||||
6. Test file operations:
|
||||
- Download
|
||||
- Rename
|
||||
- Delete
|
||||
- Folder uploads
|
||||
|
||||
## Security Considerations
|
||||
|
||||
The fix maintains security while improving compatibility:
|
||||
|
||||
1. **Path traversal protection**: Still detects and blocks `../` attempts
|
||||
2. **Symlink security**: For existing files, symlinks are resolved and validated
|
||||
3. **Drive separation** (Windows): Files on different drives are rejected
|
||||
4. **Upload directory validation**: Upload directory must exist and be accessible
|
||||
5. **Consistent validation**: Same validation logic used across all routes
|
||||
|
||||
## Backward Compatibility
|
||||
|
||||
✅ **Fully backward compatible**:
|
||||
- Named Docker volumes continue to work
|
||||
- Local development (`./local_uploads`) unaffected
|
||||
- All existing file operations work as before
|
||||
- No breaking changes to API or configuration
|
||||
|
||||
## Performance Impact
|
||||
|
||||
**Minimal**:
|
||||
- `path.resolve()` and `path.normalize()` are fast operations
|
||||
- Only use `fs.realpathSync()` when necessary (existing files)
|
||||
- No additional filesystem I/O for new uploads
|
||||
|
||||
## Related Commits
|
||||
|
||||
- [d69a8b2](https://github.com/DumbWareio/DumbDrop/commit/d69a8b25b4008f0a5f037ae56d9647651554af11) - Previous attempt (caused the issue)
|
||||
- [fc8bff9](https://github.com/DumbWareio/DumbDrop/commit/fc8bff9a1422004d159e19bd5c698da77536a62f) - Related security improvements
|
||||
|
||||
## Future Improvements
|
||||
|
||||
Potential enhancements:
|
||||
- Add integration tests with actual Docker bind mounts
|
||||
- Monitor for performance impact in high-load scenarios
|
||||
- Consider caching upload directory resolution
|
||||
- Add metrics for path validation failures
|
||||
|
||||
## Summary
|
||||
|
||||
This fix resolves the critical issue where files would disappear when using Docker bind mounts. The solution properly handles path validation for both existing and non-existing files while maintaining security against path traversal attacks.
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
const js = require('@eslint/js');
|
||||
const prettierConfig = require('eslint-config-prettier');
|
||||
const nodePlugin = require('eslint-plugin-n');
|
||||
|
||||
module.exports = [
|
||||
{
|
||||
ignores: [
|
||||
'node_modules/**',
|
||||
'uploads/**',
|
||||
'local_uploads/**',
|
||||
'dist/**',
|
||||
'build/**',
|
||||
'.metadata/**',
|
||||
'test/**',
|
||||
],
|
||||
},
|
||||
js.configs.recommended,
|
||||
prettierConfig,
|
||||
{
|
||||
files: ['**/*.js'],
|
||||
ignores: ['public/service-worker.js'],
|
||||
languageOptions: {
|
||||
ecmaVersion: 2022,
|
||||
sourceType: 'commonjs',
|
||||
globals: {
|
||||
console: 'readonly',
|
||||
process: 'readonly',
|
||||
Buffer: 'readonly',
|
||||
__dirname: 'readonly',
|
||||
__filename: 'readonly',
|
||||
module: 'readonly',
|
||||
require: 'readonly',
|
||||
exports: 'readonly',
|
||||
setTimeout: 'readonly',
|
||||
setInterval: 'readonly',
|
||||
clearTimeout: 'readonly',
|
||||
clearInterval: 'readonly',
|
||||
URL: 'readonly',
|
||||
},
|
||||
},
|
||||
plugins: {
|
||||
n: nodePlugin,
|
||||
},
|
||||
rules: {
|
||||
...nodePlugin.configs.recommended.rules,
|
||||
'n/exports-style': ['error', 'module.exports'],
|
||||
'n/file-extension-in-import': ['error', 'always'],
|
||||
'n/prefer-global/buffer': ['error', 'always'],
|
||||
'n/prefer-global/console': ['error', 'always'],
|
||||
'n/prefer-global/process': ['error', 'always'],
|
||||
'n/prefer-global/url-search-params': ['error', 'always'],
|
||||
'n/prefer-global/url': ['error', 'always'],
|
||||
'n/prefer-promises/dns': 'error',
|
||||
'n/prefer-promises/fs': 'error',
|
||||
'n/no-extraneous-require': 'off',
|
||||
'n/no-unpublished-require': 'off',
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['public/service-worker.js'],
|
||||
languageOptions: {
|
||||
ecmaVersion: 2022,
|
||||
sourceType: 'script',
|
||||
globals: {
|
||||
self: 'readonly',
|
||||
caches: 'readonly',
|
||||
clients: 'readonly',
|
||||
fetch: 'readonly',
|
||||
console: 'readonly',
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
'no-undef': 'error',
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
2617
package-lock.json
generated
2617
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
13
package.json
13
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "dumbdrop",
|
||||
"version": "1.0.2",
|
||||
"version": "1.0.0",
|
||||
"main": "src/server.js",
|
||||
"scripts": {
|
||||
"start": "node src/server.js",
|
||||
@@ -8,7 +8,6 @@
|
||||
"lint": "eslint .",
|
||||
"lint:fix": "eslint . --fix",
|
||||
"format": "prettier --write .",
|
||||
"test": "node --test test/**/*.test.js",
|
||||
"predev": "node -e \"const v=process.versions.node.split('.');if(v[0]<20) {console.error('Node.js >=20.0.0 required');process.exit(1)}\""
|
||||
},
|
||||
"keywords": [],
|
||||
@@ -16,20 +15,20 @@
|
||||
"license": "ISC",
|
||||
"description": "A simple file upload application",
|
||||
"dependencies": {
|
||||
"@aws-sdk/client-s3": "^3.803.0",
|
||||
"@aws-sdk/s3-request-presigner": "^3.803.0",
|
||||
"apprise": "^1.0.0",
|
||||
"cookie-parser": "^1.4.7",
|
||||
"cors": "^2.8.5",
|
||||
"dotenv": "^16.0.3",
|
||||
"express": "^4.18.2",
|
||||
"express-rate-limit": "^7.1.5",
|
||||
"helmet": "^8.1.0",
|
||||
"multer": "^2.0.2",
|
||||
"toastify-js": "^1.12.0"
|
||||
"multer": "^1.4.5-lts.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"eslint": "^9.0.0",
|
||||
"eslint": "^8.56.0",
|
||||
"eslint-config-prettier": "^9.1.0",
|
||||
"eslint-plugin-n": "^17.0.0",
|
||||
"eslint-plugin-node": "^11.1.0",
|
||||
"nodemon": "^3.1.9",
|
||||
"prettier": "^3.2.5"
|
||||
},
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 3.6 KiB |
@@ -1 +1,14 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xml:space="preserve" viewBox="0 0 512 512"><circle cx="256" cy="256" r="232.7" style="opacity:.2;fill:#487bb7"/><path d="M256 512C114.8 512 0 397.2 0 256S114.8 0 256 0s256 114.8 256 256-114.8 256-256 256m0-465.4c-115.5 0-209.5 94-209.5 209.5s94 209.5 209.5 209.5 209.5-94 209.5-209.5c0-115.6-94-209.5-209.5-209.5M175.9 353H336c3.3 0 5.9 2.9 5.9 6.5V377c0 3.6-2.6 6.5-5.9 6.5H175.9c-3.3 0-5.9-2.9-5.9-6.5v-17.5c0-3.5 2.7-6.5 5.9-6.5m75.3-238.6-79.3 81.1c-4.1 4.2-1.1 11.3 4.8 11.3h36.9c3.7 0 6.7 3 6.7 6.7v108.4c0 3.7 3 6.7 6.7 6.7h58c3.7 0 6.7-3 6.7-6.7V213.5c0-3.7 3-6.7 6.7-6.7h36.9c5.9 0 8.9-7.1 4.8-11.3l-79.3-81.1c-2.6-2.6-7-2.6-9.6 0" style="fill:#487bb7"/></svg>
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg width="256" height="256" viewBox="0 0 256 256" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<!-- Background -->
|
||||
<rect width="256" height="256" rx="32" fill="#4CAF50"/>
|
||||
|
||||
<!-- File outline -->
|
||||
<path d="M76 56C76 47.1634 83.1634 40 92 40H140L180 80V200C180 208.837 172.837 216 164 216H92C83.1634 216 76 208.837 76 200V56Z" fill="white"/>
|
||||
|
||||
<!-- Folded corner -->
|
||||
<path d="M140 40L180 80H148C143.582 80 140 76.4183 140 72V40Z" fill="#E8E8E8"/>
|
||||
|
||||
<!-- Arrow -->
|
||||
<path d="M128 96L96 128H116V168H140V128H160L128 96Z" fill="#4CAF50"/>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 709 B After Width: | Height: | Size: 639 B |
1699
public/index.html
1699
public/index.html
File diff suppressed because it is too large
Load Diff
@@ -4,8 +4,8 @@
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>{{SITE_TITLE}} - Login</title>
|
||||
<link rel="stylesheet" href="styles.css">
|
||||
<link rel="icon" type="image/svg+xml" href="assets/icon.svg">
|
||||
<link rel="stylesheet" href="{{BASE_URL}}styles.css">
|
||||
<link rel="icon" type="image/svg+xml" href="{{BASE_URL}}assets/icon.svg">
|
||||
<style>
|
||||
.login-container {
|
||||
display: flex;
|
||||
@@ -54,6 +54,7 @@
|
||||
background-color: var(--textarea-bg);
|
||||
}
|
||||
</style>
|
||||
<script>window.BASE_URL = '{{BASE_URL}}';</script>
|
||||
</head>
|
||||
<body>
|
||||
<div class="login-container">
|
||||
@@ -125,12 +126,10 @@
|
||||
// Handle form submission
|
||||
const verifyPin = async (pin) => {
|
||||
try {
|
||||
const response = await fetch('/api/auth/verify-pin', {
|
||||
const response = await fetch(window.BASE_URL + 'api/auth/verify-pin', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ pin }),
|
||||
credentials: 'include', // Ensure cookies are sent
|
||||
// redirect: 'follow' // Follow server redirects
|
||||
body: JSON.stringify({ pin })
|
||||
});
|
||||
const data = await response.json();
|
||||
|
||||
@@ -213,7 +212,7 @@
|
||||
};
|
||||
|
||||
// Check PIN length and initialize
|
||||
fetch('/api/auth/pin-required')
|
||||
fetch(window.BASE_URL + 'api/auth/pin-required')
|
||||
.then(response => {
|
||||
if (response.status === 429) {
|
||||
throw new Error('Too many attempts. Please wait before trying again.');
|
||||
@@ -242,6 +241,17 @@
|
||||
pinContainer.style.pointerEvents = 'none';
|
||||
}
|
||||
});
|
||||
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
// Rewrite asset URLs to use BASE_URL as prefix if not absolute
|
||||
const baseUrl = window.BASE_URL;
|
||||
document.querySelectorAll('link[rel="stylesheet"], link[rel="icon"]').forEach(link => {
|
||||
const href = link.getAttribute('href');
|
||||
if (href && !href.startsWith('http') && !href.startsWith('data:') && !href.startsWith(baseUrl)) {
|
||||
link.setAttribute('href', baseUrl + href.replace(/^\//, ''));
|
||||
}
|
||||
});
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
@@ -39,6 +39,7 @@ body {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
padding-top: 2rem;
|
||||
padding-bottom: 150px;
|
||||
color: var(--text-color);
|
||||
transition: background-color 0.3s ease, color 0.3s ease;
|
||||
}
|
||||
@@ -46,7 +47,7 @@ body {
|
||||
.container {
|
||||
width: 100%;
|
||||
max-width: 600px;
|
||||
padding: 20px;
|
||||
padding: 20px 20px 80px 20px;
|
||||
text-align: center;
|
||||
position: relative;
|
||||
}
|
||||
@@ -359,3 +360,46 @@ button:disabled {
|
||||
font-size: 1.125rem;
|
||||
}
|
||||
}
|
||||
|
||||
/* Footer Styles */
|
||||
footer {
|
||||
position: fixed;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
width: 100%;
|
||||
padding: 15px;
|
||||
text-align: center;
|
||||
font-size: 0.85rem;
|
||||
color: var(--text-color);
|
||||
opacity: 0.9;
|
||||
border-top: 1px solid var(--border-color);
|
||||
transition: background-color 0.3s ease, color 0.3s ease;
|
||||
background-color: var(--bg-color);
|
||||
z-index: 100;
|
||||
}
|
||||
|
||||
footer a {
|
||||
color: var(--text-color);
|
||||
text-decoration: none;
|
||||
transition: opacity 0.2s ease;
|
||||
}
|
||||
|
||||
footer a:hover {
|
||||
opacity: 1;
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
.footer-separator {
|
||||
margin: 0 0.5em;
|
||||
}
|
||||
|
||||
@media (max-width: 480px) {
|
||||
footer {
|
||||
font-size: 0.75rem;
|
||||
}
|
||||
|
||||
.footer-separator {
|
||||
margin: 0 0.3em;
|
||||
}
|
||||
}
|
||||
|
||||
307
src/app.js
307
src/app.js
@@ -2,231 +2,188 @@
|
||||
* Main application setup and configuration.
|
||||
* Initializes Express app, middleware, routes, and static file serving.
|
||||
* Handles core application bootstrapping and configuration validation.
|
||||
* Imports and makes use of the configured storage adapter.
|
||||
*/
|
||||
|
||||
const express = require('express');
|
||||
const cors = require('cors');
|
||||
const helmet = require('helmet');
|
||||
const cookieParser = require('cookie-parser');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const fsPromises = require('fs').promises;
|
||||
const fs = require('fs'); // Needed for reading HTML templates
|
||||
|
||||
// Load configuration FIRST
|
||||
const { config, validateConfig } = require('./config');
|
||||
const logger = require('./utils/logger');
|
||||
const { ensureDirectoryExists } = require('./utils/fileUtils');
|
||||
const { getHelmetConfig, requirePin } = require('./middleware/security');
|
||||
// Validate config EARLY, before loading anything else that depends on it
|
||||
try {
|
||||
validateConfig();
|
||||
logger.info("Configuration loaded and validated successfully.");
|
||||
} catch (validationError) {
|
||||
logger.error("!!! Configuration validation failed. Server cannot start. !!!");
|
||||
logger.error(validationError.message);
|
||||
process.exit(1); // Exit if config is invalid
|
||||
}
|
||||
|
||||
// Load storage adapter AFTER config is validated
|
||||
// The storage/index.js file itself will log which adapter is being used.
|
||||
const { storageAdapter } = require('./storage'); // This will load the correct adapter
|
||||
|
||||
// Load other utilities and middleware
|
||||
// const { ensureDirectoryExists } = require('./utils/fileUtils'); // No longer needed here
|
||||
const { securityHeaders, requirePin } = require('./middleware/security');
|
||||
const { safeCompare } = require('./utils/security');
|
||||
const { initUploadLimiter, pinVerifyLimiter, pinStatusLimiter, downloadLimiter } = require('./middleware/rateLimiter');
|
||||
const { initUploadLimiter, pinVerifyLimiter, downloadLimiter } = require('./middleware/rateLimiter');
|
||||
const { injectDemoBanner, demoMiddleware } = require('./utils/demoMode');
|
||||
const { originValidationMiddleware, getCorsOptions } = require('./middleware/cors');
|
||||
|
||||
// Create Express app
|
||||
const app = express();
|
||||
const PORT = process.env.PORT || 3000;
|
||||
const BASE_URL = process.env.BASE_URL || `http://localhost:${PORT}`;
|
||||
|
||||
// Configure proxy trust based on environment (security-sensitive)
|
||||
if (config.trustProxy) {
|
||||
if (config.trustedProxyIps && config.trustedProxyIps.length > 0) {
|
||||
// Trust only specific proxy IPs
|
||||
app.set('trust proxy', config.trustedProxyIps);
|
||||
logger.warn(`Proxy trust enabled for specific IPs: ${config.trustedProxyIps.join(', ')}`);
|
||||
} else {
|
||||
// Trust first proxy only
|
||||
app.set('trust proxy', 1);
|
||||
logger.warn('Proxy trust enabled for first proxy - ensure reverse proxy is properly configured');
|
||||
}
|
||||
} else {
|
||||
// Secure default: do not trust proxy headers
|
||||
app.set('trust proxy', false);
|
||||
logger.info('Proxy trust disabled (secure default mode)');
|
||||
// Trust proxy headers (important for rate limiting and secure cookies if behind proxy)
|
||||
app.set('trust proxy', 1); // Adjust the number based on your proxy setup depth
|
||||
|
||||
// --- Middleware Setup ---
|
||||
app.use(cors()); // TODO: Configure CORS more strictly for production if needed
|
||||
app.use(cookieParser());
|
||||
app.use(express.json()); // For parsing application/json
|
||||
app.use(securityHeaders); // Apply security headers
|
||||
|
||||
// --- Demo Mode Middleware ---
|
||||
// Apply demo middleware early if demo mode is active
|
||||
// Note: Demo mode is now also checked within adapters/storage factory
|
||||
if (config.isDemoMode) {
|
||||
app.use(demoMiddleware); // This might intercept routes if demoAdapter is fully implemented
|
||||
}
|
||||
|
||||
// Middleware setup
|
||||
app.use(cors(getCorsOptions(BASE_URL)));
|
||||
app.use(cookieParser());
|
||||
app.use(express.json());
|
||||
app.use(helmet(getHelmetConfig()));
|
||||
|
||||
// --- AUTHENTICATION MIDDLEWARE FOR ALL PROTECTED ROUTES ---
|
||||
app.use((req, res, next) => {
|
||||
// List of paths that should be publicly accessible
|
||||
const publicPaths = [
|
||||
'/login',
|
||||
'/login.html',
|
||||
'/api/auth/logout',
|
||||
'/api/auth/verify-pin',
|
||||
'/api/auth/pin-required',
|
||||
'/api/auth/pin-length',
|
||||
'/pin-length',
|
||||
'/verify-pin',
|
||||
'/config.js',
|
||||
'/assets/',
|
||||
'/styles.css',
|
||||
'/manifest.json',
|
||||
'/asset-manifest.json',
|
||||
'/toastify',
|
||||
];
|
||||
|
||||
// Check if the current path matches any of the public paths
|
||||
if (publicPaths.some(path => req.path.startsWith(path))) {
|
||||
return next();
|
||||
}
|
||||
|
||||
// For all other paths, apply both origin validation and auth middleware
|
||||
originValidationMiddleware(req, res, () => {
|
||||
demoMiddleware(req, res, next);
|
||||
});
|
||||
});
|
||||
|
||||
// Import routes
|
||||
// --- Route Definitions ---
|
||||
// Import route handlers AFTER middleware setup
|
||||
// Note: uploadRouter is now an object { router }, so destructure it
|
||||
const { router: uploadRouter } = require('./routes/upload');
|
||||
const fileRoutes = require('./routes/files');
|
||||
const authRoutes = require('./routes/auth');
|
||||
|
||||
// Use routes with appropriate middleware
|
||||
// Apply strict rate limiting to PIN verification, but more permissive to status checks
|
||||
app.use('/api/auth/pin-required', pinStatusLimiter);
|
||||
app.use('/api/auth/logout', pinStatusLimiter);
|
||||
// Apply Rate Limiting and Auth Middleware to Routes
|
||||
app.use('/api/auth', pinVerifyLimiter, authRoutes);
|
||||
// Apply PIN check and rate limiting to upload/file routes
|
||||
// The requirePin middleware now checks config.pin internally
|
||||
app.use('/api/upload', requirePin(config.pin), initUploadLimiter, uploadRouter);
|
||||
app.use('/api/files', requirePin(config.pin), downloadLimiter, fileRoutes);
|
||||
|
||||
// Root route
|
||||
|
||||
// --- Frontend Routes (Serving HTML) ---
|
||||
|
||||
// Root route ('/')
|
||||
app.get('/', (req, res) => {
|
||||
// Check if the PIN is configured and the cookie exists
|
||||
// Redirect to login if PIN is required and not authenticated
|
||||
if (config.pin && (!req.cookies?.DUMBDROP_PIN || !safeCompare(req.cookies.DUMBDROP_PIN, config.pin))) {
|
||||
return res.redirect('/login.html');
|
||||
logger.debug('[/] PIN required, redirecting to login.html');
|
||||
return res.redirect('/login.html'); // Use relative path
|
||||
}
|
||||
|
||||
let html = fs.readFileSync(path.join(__dirname, '../public', 'index.html'), 'utf8');
|
||||
html = html.replace(/{{SITE_TITLE}}/g, config.siteTitle);
|
||||
html = html.replace('{{AUTO_UPLOAD}}', config.autoUpload.toString());
|
||||
html = html.replace('{{MAX_RETRIES}}', config.clientMaxRetries.toString());
|
||||
html = html.replace('{{SHOW_FILE_LIST}}', config.showFileList.toString());
|
||||
html = injectDemoBanner(html);
|
||||
res.send(html);
|
||||
});
|
||||
|
||||
// Login route
|
||||
app.get('/login.html', (req, res) => {
|
||||
// Add cache control headers
|
||||
res.set('Cache-Control', 'no-store, no-cache, must-revalidate, private');
|
||||
res.set('Pragma', 'no-cache');
|
||||
res.set('Expires', '0');
|
||||
|
||||
let html = fs.readFileSync(path.join(__dirname, '../public', 'login.html'), 'utf8');
|
||||
html = html.replace(/{{SITE_TITLE}}/g, config.siteTitle);
|
||||
html = injectDemoBanner(html);
|
||||
res.send(html);
|
||||
});
|
||||
|
||||
// Serve static files with template variable replacement for HTML files
|
||||
app.use((req, res, next) => {
|
||||
if (!req.path.endsWith('.html')) {
|
||||
return next();
|
||||
}
|
||||
|
||||
try {
|
||||
const filePath = path.join(__dirname, '../public', req.path);
|
||||
const filePath = path.join(__dirname, '../public', 'index.html');
|
||||
let html = fs.readFileSync(filePath, 'utf8');
|
||||
|
||||
// Perform template replacements
|
||||
html = html.replace(/{{SITE_TITLE}}/g, config.siteTitle);
|
||||
if (req.path === '/index.html' || req.path === 'index.html') {
|
||||
html = html.replace('{{AUTO_UPLOAD}}', config.autoUpload.toString());
|
||||
html = html.replace('{{MAX_RETRIES}}', config.clientMaxRetries.toString());
|
||||
}
|
||||
html = html.replace('{{AUTO_UPLOAD}}', config.autoUpload.toString());
|
||||
html = html.replace('{{MAX_RETRIES}}', config.clientMaxRetries.toString());
|
||||
// Ensure baseUrl has a trailing slash
|
||||
const baseUrlWithSlash = config.baseUrl.endsWith('/') ? config.baseUrl : config.baseUrl + '/';
|
||||
html = html.replace(/{{BASE_URL}}/g, baseUrlWithSlash);
|
||||
|
||||
// Generate Footer Content
|
||||
let footerHtml = '';
|
||||
if (config.footerLinks && config.footerLinks.length > 0) {
|
||||
footerHtml = config.footerLinks.map(link =>
|
||||
`<a href="${link.url}" target="_blank" rel="noopener noreferrer">${link.text}</a>`
|
||||
).join('<span class="footer-separator"> | </span>');
|
||||
} else {
|
||||
footerHtml = `<span class="footer-static">Built by <a href="https://www.dumbware.io/" target="_blank" rel="noopener noreferrer">Dumbwareio</a></span>`;
|
||||
}
|
||||
html = html.replace('{{FOOTER_CONTENT}}', footerHtml);
|
||||
|
||||
// Inject Demo Banner if needed
|
||||
html = injectDemoBanner(html);
|
||||
|
||||
res.setHeader('Content-Type', 'text/html');
|
||||
res.send(html);
|
||||
} catch {
|
||||
next();
|
||||
} catch (err) {
|
||||
logger.error(`Error processing index.html: ${err.message}`);
|
||||
res.status(500).send('Error loading page');
|
||||
}
|
||||
});
|
||||
|
||||
// Serve remaining static files
|
||||
app.use(express.static('public'));
|
||||
// Serve Toastify assets under /toastify
|
||||
app.use('/toastify', express.static(path.join(__dirname, '../node_modules/toastify-js/src')));
|
||||
// Login route ('/login.html')
|
||||
app.get('/login.html', (req, res) => {
|
||||
// Prevent caching of the login page
|
||||
res.set('Cache-Control', 'no-store, no-cache, must-revalidate, private');
|
||||
res.set('Pragma', 'no-cache');
|
||||
res.set('Expires', '0');
|
||||
|
||||
// Error handling middleware
|
||||
// Express requires all 4 parameters for error handling middleware
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
app.use((err, req, res, next) => {
|
||||
logger.error(`Unhandled error: ${err.message}`);
|
||||
res.status(500).json({
|
||||
message: 'Internal server error',
|
||||
error: process.env.NODE_ENV === 'development' ? err.message : undefined
|
||||
});
|
||||
try {
|
||||
const filePath = path.join(__dirname, '../public', 'login.html');
|
||||
let html = fs.readFileSync(filePath, 'utf8');
|
||||
html = html.replace(/{{SITE_TITLE}}/g, config.siteTitle);
|
||||
const baseUrlWithSlash = config.baseUrl.endsWith('/') ? config.baseUrl : config.baseUrl + '/';
|
||||
html = html.replace(/{{BASE_URL}}/g, baseUrlWithSlash);
|
||||
html = injectDemoBanner(html); // Inject demo banner if needed
|
||||
|
||||
res.setHeader('Content-Type', 'text/html');
|
||||
res.send(html);
|
||||
} catch (err) {
|
||||
logger.error(`Error processing login.html: ${err.message}`);
|
||||
res.status(500).send('Error loading login page');
|
||||
}
|
||||
});
|
||||
|
||||
// --- Add this after config is loaded ---
|
||||
const METADATA_DIR = path.join(config.uploadDir, '.metadata');
|
||||
// --- End addition ---
|
||||
// --- Health Check Endpoint ---
|
||||
app.get('/health', (req, res) => {
|
||||
res.status(200).json({ status: 'UP', message: 'Server is healthy' });
|
||||
});
|
||||
|
||||
// --- Static File Serving ---
|
||||
// Serve static files (CSS, JS, assets) from the 'public' directory
|
||||
// Use express.static middleware, placed AFTER specific HTML routes
|
||||
app.use(express.static(path.join(__dirname, '../public')));
|
||||
|
||||
|
||||
// --- Error Handling Middleware ---
|
||||
// Catch-all for unhandled errors
|
||||
app.use((err, req, res, next) => { // eslint-disable-line no-unused-vars
|
||||
logger.error(`Unhandled application error: ${err.message}`, err.stack);
|
||||
// Avoid sending stack trace in production
|
||||
const errorResponse = {
|
||||
message: 'Internal Server Error',
|
||||
...(config.nodeEnv === 'development' && { error: err.message, stack: err.stack })
|
||||
};
|
||||
// Ensure response is sent only once
|
||||
if (!res.headersSent) {
|
||||
res.status(err.status || 500).json(errorResponse);
|
||||
}
|
||||
});
|
||||
|
||||
// --- Initialize Function (Simplified) ---
|
||||
/**
|
||||
* Initialize the application
|
||||
* Sets up required directories and validates configuration
|
||||
* Initialize the application.
|
||||
* Placeholder function, as most initialization is now handled
|
||||
* by config loading, adapter loading, and server startup.
|
||||
* Could be used for other async setup tasks if needed later.
|
||||
*/
|
||||
async function initialize() {
|
||||
try {
|
||||
// Validate configuration
|
||||
validateConfig();
|
||||
|
||||
// Ensure upload directory exists and is writable
|
||||
await ensureDirectoryExists(config.uploadDir);
|
||||
// Config validation happens at the top level now.
|
||||
// Storage adapter is loaded at the top level now.
|
||||
// Directory checks are handled within adapters/config.
|
||||
|
||||
// --- Add this section ---
|
||||
// Ensure metadata directory exists
|
||||
try {
|
||||
if (!fs.existsSync(METADATA_DIR)) {
|
||||
await fsPromises.mkdir(METADATA_DIR, { recursive: true });
|
||||
logger.info(`Created metadata directory: ${METADATA_DIR}`);
|
||||
} else {
|
||||
logger.info(`Metadata directory exists: ${METADATA_DIR}`);
|
||||
}
|
||||
// Check writability (optional but good practice)
|
||||
await fsPromises.access(METADATA_DIR, fs.constants.W_OK);
|
||||
logger.success(`Metadata directory is writable: ${METADATA_DIR}`);
|
||||
} catch (err) {
|
||||
logger.error(`Metadata directory error (${METADATA_DIR}): ${err.message}`);
|
||||
// Decide if this is fatal. If resumability is critical, maybe throw.
|
||||
throw new Error(`Failed to access or create metadata directory: ${METADATA_DIR}`);
|
||||
}
|
||||
// --- End added section ---
|
||||
|
||||
// Log configuration
|
||||
logger.info(`Maximum file size set to: ${config.maxFileSize / (1024 * 1024)}MB`);
|
||||
if (config.pin) {
|
||||
logger.info('PIN protection enabled');
|
||||
}
|
||||
logger.info(`Auto upload is ${config.autoUpload ? 'enabled' : 'disabled'}`);
|
||||
if (config.appriseUrl) {
|
||||
logger.info('Apprise notifications enabled');
|
||||
}
|
||||
|
||||
// After initializing demo middleware
|
||||
if (process.env.DEMO_MODE === 'true') {
|
||||
logger.info('[DEMO] Running in demo mode - uploads will not be saved');
|
||||
// Clear any existing files in upload directory
|
||||
try {
|
||||
const files = fs.readdirSync(config.uploadDir);
|
||||
for (const file of files) {
|
||||
fs.unlinkSync(path.join(config.uploadDir, file));
|
||||
}
|
||||
logger.info('[DEMO] Cleared upload directory');
|
||||
} catch (err) {
|
||||
logger.error(`[DEMO] Failed to clear upload directory: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
return app;
|
||||
logger.info('Application initialized.');
|
||||
// Example: Log active storage type
|
||||
logger.info(`Active Storage Adapter: ${storageAdapter.constructor.name || config.storageType}`);
|
||||
|
||||
return app; // Return the configured Express app instance
|
||||
} catch (err) {
|
||||
logger.error(`Initialization failed: ${err.message}`);
|
||||
throw err;
|
||||
logger.error(`Application initialization failed: ${err.message}`);
|
||||
throw err; // Propagate error to stop server start
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { app, initialize, config };
|
||||
module.exports = { app, initialize, config }; // Export app, initialize, and config
|
||||
@@ -1,291 +1,222 @@
|
||||
// File: src/config/index.js
|
||||
require('dotenv').config();
|
||||
|
||||
const { validatePin } = require('../utils/security');
|
||||
const logger = require('../utils/logger');
|
||||
const fs = require('fs'); // Get version from package.json
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
// const { version } = require('../../package.json'); // version not currently used, can be removed or kept
|
||||
|
||||
/**
|
||||
* Environment Variables Reference
|
||||
*
|
||||
* PORT - Port for the server (default: 3000)
|
||||
* NODE_ENV - Node environment (default: 'development')
|
||||
* BASE_URL - Base URL for the app (default: http://localhost:${PORT})
|
||||
* UPLOAD_DIR - Directory for uploads (Docker/production)
|
||||
* LOCAL_UPLOAD_DIR - Directory for uploads (local dev, fallback: './local_uploads')
|
||||
* MAX_FILE_SIZE - Max upload size in MB (default: 1024)
|
||||
* AUTO_UPLOAD - Enable auto-upload (true/false, default: false)
|
||||
* SHOW_FILE_LIST - Enable file listing in frontend (true/false, default: false)
|
||||
* DUMBDROP_PIN - Security PIN for uploads (required for protected endpoints)
|
||||
* DUMBDROP_TITLE - Site title (default: 'DumbDrop')
|
||||
* APPRISE_URL - Apprise notification URL (optional)
|
||||
* APPRISE_MESSAGE - Notification message template (default provided)
|
||||
* APPRISE_SIZE_UNIT - Size unit for notifications (optional)
|
||||
* ALLOWED_EXTENSIONS - Comma-separated list of allowed file extensions (optional)
|
||||
*/
|
||||
// --- Environment Variables Reference ---
|
||||
/* (Comments listing all ENV vars - keep as is) */
|
||||
|
||||
// Helper for clear configuration logging
|
||||
// --- Helper for clear configuration logging ---
|
||||
const logConfig = (message, level = 'info') => {
|
||||
const prefix = level === 'warning' ? '⚠️ WARNING:' : 'ℹ️ INFO:';
|
||||
console.log(`${prefix} CONFIGURATION: ${message}`);
|
||||
};
|
||||
|
||||
// Default configurations
|
||||
// --- Default configurations ---
|
||||
const DEFAULT_PORT = 3000;
|
||||
const DEFAULT_SITE_TITLE = 'DumbDrop';
|
||||
const NODE_ENV = process.env.NODE_ENV || 'production';
|
||||
const PORT = process.env.PORT || 3000;
|
||||
const BASE_URL = process.env.BASE_URL || `http://localhost:${PORT}`;
|
||||
const DEFAULT_CLIENT_MAX_RETRIES = 5; // Default retry count
|
||||
console.log('Loaded ENV:', {
|
||||
PORT,
|
||||
UPLOAD_DIR: process.env.UPLOAD_DIR,
|
||||
LOCAL_UPLOAD_DIR: process.env.LOCAL_UPLOAD_DIR,
|
||||
NODE_ENV,
|
||||
BASE_URL,
|
||||
ALLOWED_ORIGINS: process.env.ALLOWED_ORIGINS || '*',
|
||||
});
|
||||
const logAndReturn = (key, value, isDefault = false) => {
|
||||
logConfig(`${key}: ${value}${isDefault ? ' (default)' : ''}`);
|
||||
const DEFAULT_BASE_URL_PREFIX = 'http://localhost'; // Prefix, port added later
|
||||
const DEFAULT_CLIENT_MAX_RETRIES = 5;
|
||||
const DEFAULT_STORAGE_TYPE = 'local';
|
||||
|
||||
const logAndReturn = (key, value, isDefault = false, sensitive = false) => {
|
||||
const displayValue = sensitive ? '********' : value;
|
||||
logConfig(`${key}: ${displayValue}${isDefault ? ' (default)' : ''}`);
|
||||
return value;
|
||||
};
|
||||
|
||||
/**
|
||||
* Determine the upload directory based on environment variables.
|
||||
* Priority:
|
||||
* 1. UPLOAD_DIR (for Docker/production)
|
||||
* 2. LOCAL_UPLOAD_DIR (for local development)
|
||||
* 3. './local_uploads' (default fallback)
|
||||
* @returns {string} The upload directory path
|
||||
*/
|
||||
function determineUploadDirectory() {
|
||||
function isLocalDevelopment() {
|
||||
return process.env.NODE_ENV !== 'production' && !process.env.UPLOAD_DIR;
|
||||
}
|
||||
|
||||
function determineLocalUploadDirectory() {
|
||||
if (process.env.STORAGE_TYPE && process.env.STORAGE_TYPE.toLowerCase() !== 'local') {
|
||||
return null; // Not using local storage
|
||||
}
|
||||
let uploadDir;
|
||||
if (process.env.UPLOAD_DIR) {
|
||||
uploadDir = process.env.UPLOAD_DIR;
|
||||
logConfig(`Upload directory set from UPLOAD_DIR: ${uploadDir}`);
|
||||
// logger.info(`[Local Storage] Upload directory set from UPLOAD_DIR: ${uploadDir}`); // Logger might not be fully init here
|
||||
} else if (process.env.LOCAL_UPLOAD_DIR) {
|
||||
uploadDir = process.env.LOCAL_UPLOAD_DIR;
|
||||
logConfig(`Upload directory using LOCAL_UPLOAD_DIR fallback: ${uploadDir}`, 'warning');
|
||||
// logger.warn(`[Local Storage] Upload directory using LOCAL_UPLOAD_DIR fallback: ${uploadDir}`);
|
||||
} else {
|
||||
uploadDir = './local_uploads';
|
||||
logConfig(`Upload directory using default fallback: ${uploadDir}`, 'warning');
|
||||
// logger.warn(`[Local Storage] Upload directory using default fallback: ${uploadDir}`);
|
||||
}
|
||||
logConfig(`Final upload directory path: ${require('path').resolve(uploadDir)}`);
|
||||
return uploadDir;
|
||||
// logger.info(`[Local Storage] Final upload directory path: ${path.resolve(uploadDir)}`);
|
||||
return path.resolve(uploadDir); // Always resolve to absolute
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility to detect if running in local development mode
|
||||
* Returns true if NODE_ENV is not 'production' and UPLOAD_DIR is not set (i.e., not Docker)
|
||||
*/
|
||||
function isLocalDevelopment() {
|
||||
return process.env.NODE_ENV !== 'production';
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure the upload directory exists (for local development only)
|
||||
* Creates the directory if it does not exist
|
||||
*/
|
||||
function ensureLocalUploadDirExists(uploadDir) {
|
||||
if (!isLocalDevelopment()) return;
|
||||
function ensureLocalUploadDirExists(dirPath) {
|
||||
if (!dirPath || !isLocalDevelopment()) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
if (!fs.existsSync(uploadDir)) {
|
||||
fs.mkdirSync(uploadDir, { recursive: true });
|
||||
logConfig(`Created local upload directory: ${uploadDir}`);
|
||||
if (!fs.existsSync(dirPath)) {
|
||||
fs.mkdirSync(dirPath, { recursive: true });
|
||||
console.log(`[INFO] CONFIGURATION: [Local Storage] Created local upload directory: ${dirPath}`);
|
||||
} else {
|
||||
logConfig(`Local upload directory exists: ${uploadDir}`);
|
||||
console.log(`[INFO] CONFIGURATION: [Local Storage] Local upload directory exists: ${dirPath}`);
|
||||
}
|
||||
fs.accessSync(dirPath, fs.constants.W_OK);
|
||||
console.log(`[SUCCESS] CONFIGURATION: [Local Storage] Local upload directory is writable: ${dirPath}`);
|
||||
} catch (err) {
|
||||
logConfig(`Failed to create local upload directory: ${uploadDir}. Error: ${err.message}`, 'warning');
|
||||
console.error(`[ERROR] CONFIGURATION: [Local Storage] Failed to create or access local upload directory: ${dirPath}. Error: ${err.message}`);
|
||||
throw new Error(`Upload directory "${dirPath}" is not accessible or writable.`);
|
||||
}
|
||||
}
|
||||
|
||||
// Determine and ensure upload directory (for local dev)
|
||||
const resolvedUploadDir = determineUploadDirectory();
|
||||
ensureLocalUploadDirExists(resolvedUploadDir);
|
||||
const storageTypeInput = process.env.STORAGE_TYPE || DEFAULT_STORAGE_TYPE;
|
||||
const storageType = ['local', 's3'].includes(storageTypeInput.toLowerCase())
|
||||
? storageTypeInput.toLowerCase()
|
||||
: DEFAULT_STORAGE_TYPE;
|
||||
|
||||
/**
|
||||
* Application configuration
|
||||
* Loads and validates environment variables
|
||||
*/
|
||||
const config = {
|
||||
// =====================
|
||||
// =====================
|
||||
// Server settings
|
||||
// =====================
|
||||
/**
|
||||
* Port for the server (default: 3000)
|
||||
* Set via PORT in .env
|
||||
*/
|
||||
port: PORT,
|
||||
/**
|
||||
* Node environment (default: 'production')
|
||||
* Set via NODE_ENV in .env
|
||||
*/
|
||||
nodeEnv: NODE_ENV,
|
||||
/**
|
||||
* Base URL for the app (default: http://localhost:${PORT})
|
||||
* Set via BASE_URL in .env
|
||||
*/
|
||||
baseUrl: BASE_URL,
|
||||
|
||||
// =====================
|
||||
// =====================
|
||||
// Upload settings
|
||||
// =====================
|
||||
/**
|
||||
* Directory for uploads
|
||||
* Priority: UPLOAD_DIR (Docker/production) > LOCAL_UPLOAD_DIR (local dev) > './local_uploads' (fallback)
|
||||
*/
|
||||
uploadDir: resolvedUploadDir,
|
||||
/**
|
||||
* Max upload size in bytes (default: 1024MB)
|
||||
* Set via MAX_FILE_SIZE in .env (in MB)
|
||||
*/
|
||||
maxFileSize: (() => {
|
||||
const sizeInMB = parseInt(process.env.MAX_FILE_SIZE || '1024', 10);
|
||||
if (isNaN(sizeInMB) || sizeInMB <= 0) {
|
||||
throw new Error('MAX_FILE_SIZE must be a positive number');
|
||||
}
|
||||
return sizeInMB * 1024 * 1024; // Convert MB to bytes
|
||||
})(),
|
||||
/**
|
||||
* Enable auto-upload (true/false, default: false)
|
||||
* Set via AUTO_UPLOAD in .env
|
||||
*/
|
||||
autoUpload: process.env.AUTO_UPLOAD === 'true',
|
||||
/**
|
||||
* Enable file listing in frontend (true/false, default: false)
|
||||
* Set via SHOW_FILE_LIST in .env
|
||||
*/
|
||||
showFileList: process.env.SHOW_FILE_LIST === 'true',
|
||||
|
||||
// =====================
|
||||
// =====================
|
||||
// Security
|
||||
// =====================
|
||||
/**
|
||||
* Security PIN for uploads (required for protected endpoints)
|
||||
* Set via DUMBDROP_PIN in .env
|
||||
*/
|
||||
pin: validatePin(process.env.DUMBDROP_PIN),
|
||||
/**
|
||||
* Trust proxy for X-Forwarded-For header (default: false for security)
|
||||
* Only enable if behind a trusted reverse proxy
|
||||
* Set via TRUST_PROXY in .env
|
||||
*/
|
||||
trustProxy: process.env.TRUST_PROXY === 'true',
|
||||
/**
|
||||
* Comma-separated list of trusted proxy IPs (optional)
|
||||
* Restricts which proxies can set X-Forwarded-For header
|
||||
* Set via TRUSTED_PROXY_IPS in .env
|
||||
*/
|
||||
trustedProxyIps: process.env.TRUSTED_PROXY_IPS ?
|
||||
process.env.TRUSTED_PROXY_IPS.split(',').map(ip => ip.trim()) :
|
||||
null,
|
||||
|
||||
// =====================
|
||||
// =====================
|
||||
// UI settings
|
||||
// =====================
|
||||
/**
|
||||
* Site title (default: 'DumbDrop')
|
||||
* Set via DUMBDROP_TITLE in .env
|
||||
*/
|
||||
siteTitle: process.env.DUMBDROP_TITLE || DEFAULT_SITE_TITLE,
|
||||
|
||||
// =====================
|
||||
// =====================
|
||||
// Notification settings
|
||||
// =====================
|
||||
/**
|
||||
* Apprise notification URL (optional)
|
||||
* Set via APPRISE_URL in .env
|
||||
*/
|
||||
appriseUrl: process.env.APPRISE_URL,
|
||||
/**
|
||||
* Notification message template (default provided)
|
||||
* Set via APPRISE_MESSAGE in .env
|
||||
*/
|
||||
appriseMessage: process.env.APPRISE_MESSAGE || 'New file uploaded - {filename} ({size}), Storage used {storage}',
|
||||
/**
|
||||
* Size unit for notifications (optional)
|
||||
* Set via APPRISE_SIZE_UNIT in .env
|
||||
*/
|
||||
appriseSizeUnit: process.env.APPRISE_SIZE_UNIT,
|
||||
|
||||
// =====================
|
||||
// =====================
|
||||
// File extensions
|
||||
// =====================
|
||||
/**
|
||||
* Allowed file extensions (comma-separated, optional)
|
||||
* Set via ALLOWED_EXTENSIONS in .env
|
||||
*/
|
||||
allowedExtensions: process.env.ALLOWED_EXTENSIONS ?
|
||||
process.env.ALLOWED_EXTENSIONS.split(',').map(ext => ext.trim().toLowerCase()) :
|
||||
null,
|
||||
if (storageTypeInput.toLowerCase() !== storageType) {
|
||||
console.warn(`[WARN] CONFIGURATION: Invalid STORAGE_TYPE "${storageTypeInput}", using default: "${storageType}"`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Max number of retries for client-side chunk uploads (default: 5)
|
||||
* Set via CLIENT_MAX_RETRIES in .env
|
||||
*/
|
||||
clientMaxRetries: (() => {
|
||||
const envValue = process.env.CLIENT_MAX_RETRIES;
|
||||
const defaultValue = DEFAULT_CLIENT_MAX_RETRIES;
|
||||
if (envValue === undefined) {
|
||||
return logAndReturn('CLIENT_MAX_RETRIES', defaultValue, true);
|
||||
}
|
||||
const retries = parseInt(envValue, 10);
|
||||
if (isNaN(retries) || retries < 0) {
|
||||
logConfig(
|
||||
`Invalid CLIENT_MAX_RETRIES value: "${envValue}". Using default: ${defaultValue}`,
|
||||
'warning',
|
||||
);
|
||||
return logAndReturn('CLIENT_MAX_RETRIES', defaultValue, true);
|
||||
}
|
||||
return logAndReturn('CLIENT_MAX_RETRIES', retries);
|
||||
})(),
|
||||
const resolvedLocalUploadDir = determineLocalUploadDirectory();
|
||||
if (storageType === 'local' && resolvedLocalUploadDir) { // Only ensure if actually using local storage
|
||||
ensureLocalUploadDirExists(resolvedLocalUploadDir);
|
||||
}
|
||||
|
||||
uploadPin: logAndReturn('UPLOAD_PIN', process.env.UPLOAD_PIN || null),
|
||||
const parseFooterLinks = (linksString) => {
|
||||
if (!linksString) return [];
|
||||
return linksString.split(',')
|
||||
.map(linkPair => {
|
||||
const parts = linkPair.split('@').map(part => part.trim());
|
||||
if (parts.length === 2 && parts[0] && parts[1] && (parts[1].startsWith('http://') || parts[1].startsWith('https://'))) {
|
||||
return { text: parts[0], url: parts[1] };
|
||||
}
|
||||
// logger.warn(`Invalid format or URL in FOOTER_LINKS: "${linkPair}".`); // Logger might not be fully init
|
||||
return null;
|
||||
})
|
||||
.filter(link => link !== null);
|
||||
};
|
||||
|
||||
console.log(`Upload directory configured as: ${config.uploadDir}`);
|
||||
const port = parseInt(process.env.PORT || DEFAULT_PORT, 10);
|
||||
const baseUrl = process.env.BASE_URL || `${DEFAULT_BASE_URL_PREFIX}:${port}/`;
|
||||
|
||||
const config = {
|
||||
port,
|
||||
nodeEnv: process.env.NODE_ENV || 'development',
|
||||
baseUrl,
|
||||
isDemoMode: process.env.DEMO_MODE === 'true',
|
||||
storageType,
|
||||
uploadDir: storageType === 'local' ? resolvedLocalUploadDir : path.resolve(process.env.UPLOAD_DIR || process.env.LOCAL_UPLOAD_DIR || './uploads'), // For S3, metadata dir. Fallback required.
|
||||
s3Region: process.env.S3_REGION || null,
|
||||
s3BucketName: process.env.S3_BUCKET_NAME || null,
|
||||
s3AccessKeyId: process.env.S3_ACCESS_KEY_ID || null,
|
||||
s3SecretAccessKey: process.env.S3_SECRET_ACCESS_KEY || null,
|
||||
s3EndpointUrl: process.env.S3_ENDPOINT_URL || null,
|
||||
s3ForcePathStyle: process.env.S3_FORCE_PATH_STYLE === 'true',
|
||||
maxFileSize: (() => {
|
||||
const sizeInMB = parseInt(process.env.MAX_FILE_SIZE || '1024', 10);
|
||||
return (isNaN(sizeInMB) || sizeInMB <= 0 ? 1024 : sizeInMB) * 1024 * 1024;
|
||||
})(),
|
||||
autoUpload: process.env.AUTO_UPLOAD === 'true',
|
||||
allowedExtensions: process.env.ALLOWED_EXTENSIONS ?
|
||||
process.env.ALLOWED_EXTENSIONS.split(',').map(ext => ext.trim().toLowerCase().replace(/^\./, '.')).filter(Boolean) :
|
||||
null,
|
||||
clientMaxRetries: (() => {
|
||||
const retries = parseInt(process.env.CLIENT_MAX_RETRIES || DEFAULT_CLIENT_MAX_RETRIES, 10);
|
||||
return (isNaN(retries) || retries < 0) ? DEFAULT_CLIENT_MAX_RETRIES : retries;
|
||||
})(),
|
||||
pin: validatePin(process.env.DUMBDROP_PIN), // validatePin uses logger, ensure logger is available
|
||||
allowedIframeOrigins: process.env.ALLOWED_IFRAME_ORIGINS ?
|
||||
process.env.ALLOWED_IFRAME_ORIGINS.split(',').map(origin => origin.trim()).filter(Boolean) :
|
||||
null,
|
||||
siteTitle: process.env.DUMBDROP_TITLE || DEFAULT_SITE_TITLE,
|
||||
footerLinks: parseFooterLinks(process.env.FOOTER_LINKS),
|
||||
appriseUrl: process.env.APPRISE_URL || null,
|
||||
appriseMessage: process.env.APPRISE_MESSAGE || 'New file uploaded - {filename} ({size}), Storage used {storage}',
|
||||
appriseSizeUnit: process.env.APPRISE_SIZE_UNIT || 'Auto',
|
||||
};
|
||||
|
||||
// --- Log Configuration (after logger is confirmed available) ---
|
||||
// Moved logging to after config object is built, so logger is definitely available
|
||||
logger.info(`--- Configuration Start ---`);
|
||||
logAndReturn('NODE_ENV', config.nodeEnv);
|
||||
logAndReturn('PORT', config.port);
|
||||
logAndReturn('BASE_URL', config.baseUrl);
|
||||
logAndReturn('DEMO_MODE', config.isDemoMode);
|
||||
logAndReturn('STORAGE_TYPE', config.storageType);
|
||||
if (config.storageType === 'local') {
|
||||
logAndReturn('Upload Directory (Local Storage)', config.uploadDir);
|
||||
} else {
|
||||
logAndReturn('Metadata Directory (S3 Mode)', config.uploadDir); // Clarify role for S3
|
||||
logAndReturn('S3_REGION', config.s3Region);
|
||||
logAndReturn('S3_BUCKET_NAME', config.s3BucketName);
|
||||
logAndReturn('S3_ACCESS_KEY_ID', config.s3AccessKeyId, false, true);
|
||||
logAndReturn('S3_SECRET_ACCESS_KEY', config.s3SecretAccessKey, false, true);
|
||||
if (config.s3EndpointUrl) logAndReturn('S3_ENDPOINT_URL', config.s3EndpointUrl);
|
||||
logAndReturn('S3_FORCE_PATH_STYLE', config.s3ForcePathStyle);
|
||||
}
|
||||
logger.info(`Max File Size: ${config.maxFileSize / (1024 * 1024)}MB`);
|
||||
logger.info(`Auto Upload: ${config.autoUpload}`);
|
||||
if (config.allowedExtensions) logger.info(`Allowed Extensions: ${config.allowedExtensions.join(', ')}`);
|
||||
if (config.pin) logAndReturn('DUMBDROP_PIN', config.pin, false, true);
|
||||
if (config.allowedIframeOrigins) logger.info(`Allowed Iframe Origins: ${config.allowedIframeOrigins.join(', ')}`);
|
||||
if (config.appriseUrl) logAndReturn('APPRISE_URL', config.appriseUrl);
|
||||
logger.info(`Client Max Retries: ${config.clientMaxRetries}`);
|
||||
logger.info(`--- Configuration End ---`);
|
||||
|
||||
|
||||
// Validate required settings
|
||||
function validateConfig() {
|
||||
const errors = [];
|
||||
|
||||
if (config.maxFileSize <= 0) {
|
||||
errors.push('MAX_FILE_SIZE must be greater than 0');
|
||||
if (config.port <= 0 || config.port > 65535) errors.push('PORT must be a valid number between 1 and 65535');
|
||||
if (config.maxFileSize <= 0) errors.push('MAX_FILE_SIZE must be greater than 0');
|
||||
try {
|
||||
new URL(config.baseUrl);
|
||||
if (!config.baseUrl.endsWith('/')) errors.push('BASE_URL must end with a trailing slash ("/"). Current: ' + config.baseUrl);
|
||||
} catch (err) { errors.push(`BASE_URL must be a valid URL. Error: ${err.message}`); }
|
||||
|
||||
if (config.storageType === 's3') {
|
||||
if (!config.s3Region) errors.push('S3_REGION is required for S3 storage');
|
||||
if (!config.s3BucketName) errors.push('S3_BUCKET_NAME is required for S3 storage');
|
||||
if (!config.s3AccessKeyId) errors.push('S3_ACCESS_KEY_ID is required for S3 storage');
|
||||
if (!config.s3SecretAccessKey) errors.push('S3_SECRET_ACCESS_KEY is required for S3 storage');
|
||||
if (config.s3ForcePathStyle && !config.s3EndpointUrl) {
|
||||
logger.warn('[Config Validation] S3_FORCE_PATH_STYLE is true, but S3_ENDPOINT_URL is not set. This may not work as expected with default AWS endpoints.');
|
||||
}
|
||||
} else if (config.storageType === 'local') {
|
||||
if (!config.uploadDir) errors.push('Upload directory (UPLOAD_DIR or LOCAL_UPLOAD_DIR) is required for local storage.');
|
||||
else {
|
||||
try { fs.accessSync(config.uploadDir, fs.constants.W_OK); }
|
||||
catch (err) { errors.push(`Local upload directory "${config.uploadDir}" is not writable or does not exist.`); }
|
||||
}
|
||||
}
|
||||
|
||||
// Validate BASE_URL format
|
||||
try {
|
||||
// Ensure BASE_URL ends with a slash
|
||||
if (!config.baseUrl.endsWith('/')) {
|
||||
logger.warn('BASE_URL did not end with a trailing slash. Automatically appending "/".');
|
||||
config.baseUrl = config.baseUrl + '/';
|
||||
}
|
||||
} catch (err) {
|
||||
const errorMsg = `BASE_URL must be a valid URL: ${err.message || err}`;
|
||||
logger.error(errorMsg);
|
||||
errors.push(errorMsg);
|
||||
// Metadata directory check (for both local file metadata and S3 upload state metadata)
|
||||
if (!config.uploadDir) { // This condition might be redundant if local storage dir is already checked
|
||||
errors.push('A base directory (UPLOAD_DIR or LOCAL_UPLOAD_DIR) is required for metadata storage.');
|
||||
} else {
|
||||
try {
|
||||
const metadataBase = path.resolve(config.uploadDir); // Base for .metadata
|
||||
if (!fs.existsSync(metadataBase)) {
|
||||
fs.mkdirSync(metadataBase, { recursive: true });
|
||||
logger.info(`[Config Validation] Created base directory for metadata: ${metadataBase}`);
|
||||
}
|
||||
fs.accessSync(metadataBase, fs.constants.W_OK); // Check writability of the parent of .metadata
|
||||
} catch (err) {
|
||||
errors.push(`Cannot access or create base directory for metadata at "${config.uploadDir}". Error: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (config.nodeEnv === 'production') {
|
||||
if (!config.appriseUrl) {
|
||||
logger.info('Notifications disabled - No Configuration');
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
if (errors.length > 0) {
|
||||
throw new Error('Configuration validation failed:\n' + errors.join('\n'));
|
||||
logger.error('--- CONFIGURATION ERRORS ---');
|
||||
errors.forEach(err => logger.error(`- ${err}`));
|
||||
logger.error('-----------------------------');
|
||||
throw new Error('Configuration validation failed. Please check environment variables and correct the issues.');
|
||||
}
|
||||
logger.success('[Config Validation] Configuration validated successfully.');
|
||||
}
|
||||
|
||||
// Freeze configuration to prevent modifications
|
||||
Object.freeze(config);
|
||||
Object.freeze(config); // Freeze after logging and validation
|
||||
|
||||
module.exports = {
|
||||
config,
|
||||
validateConfig
|
||||
};
|
||||
module.exports = { config, validateConfig };
|
||||
@@ -1,84 +0,0 @@
|
||||
const ALLOWED_ORIGINS = process.env.ALLOWED_ORIGINS || process.env.ALLOWED_IFRAME_ORIGINS || '*';
|
||||
const NODE_ENV = process.env.NODE_ENV || 'production';
|
||||
let allowedOrigins = [];
|
||||
|
||||
function setupOrigins(baseUrl) {
|
||||
const normalizedBaseUrl = normalizeOrigin(baseUrl);
|
||||
allowedOrigins = [ normalizedBaseUrl ];
|
||||
|
||||
if (NODE_ENV === 'development' || ALLOWED_ORIGINS === '*') allowedOrigins = '*';
|
||||
else if (ALLOWED_ORIGINS && typeof ALLOWED_ORIGINS === 'string') {
|
||||
try {
|
||||
const allowed = ALLOWED_ORIGINS.split(',').map(origin => origin.trim());
|
||||
allowed.forEach(origin => {
|
||||
const normalizedOrigin = normalizeOrigin(origin);
|
||||
if (normalizedOrigin !== baseUrl) allowedOrigins.push(normalizedOrigin);
|
||||
});
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`Error setting up ALLOWED_ORIGINS: ${ALLOWED_ORIGINS}:`, error);
|
||||
}
|
||||
}
|
||||
console.log("ALLOWED ORIGINS:", allowedOrigins);
|
||||
return allowedOrigins;
|
||||
}
|
||||
|
||||
function normalizeOrigin(origin) {
|
||||
if (origin) {
|
||||
try {
|
||||
const normalizedOrigin = new URL(origin).origin;
|
||||
return normalizedOrigin;
|
||||
} catch (error) {
|
||||
console.error("Error parsing referer URL:", error);
|
||||
throw new Error("Error parsing referer URL:", error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function validateOrigin(origin) {
|
||||
if (NODE_ENV === 'development' || allowedOrigins === '*') return true;
|
||||
|
||||
try {
|
||||
if (origin) origin = normalizeOrigin(origin);
|
||||
else {
|
||||
console.warn("No origin to validate.");
|
||||
return false;
|
||||
}
|
||||
|
||||
console.log("Validating Origin:", origin);
|
||||
if (allowedOrigins.includes(origin)) {
|
||||
console.log("Allowed request from origin:", origin);
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
console.warn("Blocked request from origin:", origin);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
console.error(error);
|
||||
}
|
||||
}
|
||||
|
||||
function originValidationMiddleware(req, res, next) {
|
||||
const origin = req.headers.origin || req.headers.referer || `${req.protocol}://${req.headers.host}`;
|
||||
const isOriginValid = validateOrigin(origin);
|
||||
if (isOriginValid) {
|
||||
next();
|
||||
} else {
|
||||
res.status(403).json({ error: 'Forbidden' });
|
||||
}
|
||||
}
|
||||
|
||||
function getCorsOptions(baseUrl) {
|
||||
const allowedOrigins = setupOrigins(baseUrl);
|
||||
const corsOptions = {
|
||||
origin: allowedOrigins,
|
||||
credentials: true,
|
||||
methods: ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS'],
|
||||
allowedHeaders: ['Content-Type', 'Authorization', 'X-Pin', 'X-Batch-Id'],
|
||||
};
|
||||
return corsOptions;
|
||||
}
|
||||
|
||||
module.exports = { getCorsOptions, originValidationMiddleware, validateOrigin, allowedOrigins };
|
||||
@@ -1,6 +1,5 @@
|
||||
const rateLimit = require('express-rate-limit');
|
||||
const { registerCleanupTask } = require('../utils/cleanup');
|
||||
const { getClientIp } = require('../utils/ipExtractor');
|
||||
|
||||
// Create rate limiters
|
||||
const createLimiter = (options) => {
|
||||
@@ -27,8 +26,6 @@ const initUploadLimiter = createLimiter({
|
||||
},
|
||||
standardHeaders: true,
|
||||
legacyHeaders: false,
|
||||
// Use secure IP extraction to prevent header spoofing
|
||||
keyGenerator: (req) => getClientIp(req),
|
||||
// Skip rate limiting for chunk uploads within an existing batch
|
||||
skip: (req) => {
|
||||
return req.headers['x-batch-id'] !== undefined;
|
||||
@@ -46,14 +43,12 @@ const chunkUploadLimiter = createLimiter({
|
||||
error: 'Upload rate limit exceeded. Please wait before continuing.'
|
||||
},
|
||||
standardHeaders: true,
|
||||
legacyHeaders: false,
|
||||
// Use secure IP extraction to prevent header spoofing
|
||||
keyGenerator: (req) => getClientIp(req)
|
||||
legacyHeaders: false
|
||||
});
|
||||
|
||||
/**
|
||||
* Rate limiter for PIN verification attempts
|
||||
* Prevents brute force attacks on actual PIN verification
|
||||
* Prevents brute force attacks
|
||||
*/
|
||||
const pinVerifyLimiter = createLimiter({
|
||||
windowMs: 15 * 60 * 1000, // 15 minutes
|
||||
@@ -62,29 +57,7 @@ const pinVerifyLimiter = createLimiter({
|
||||
error: 'Too many PIN verification attempts. Please try again later.'
|
||||
},
|
||||
standardHeaders: true,
|
||||
legacyHeaders: false,
|
||||
// Use secure IP extraction to prevent header spoofing
|
||||
keyGenerator: (req) => getClientIp(req),
|
||||
// Apply strict rate limiting only to PIN verification, not PIN status checks
|
||||
skip: (req) => {
|
||||
return req.path === '/pin-required'; // Skip rate limiting for PIN requirement checks
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Rate limiter for PIN status checks
|
||||
* More permissive for checking if PIN is required
|
||||
*/
|
||||
const pinStatusLimiter = createLimiter({
|
||||
windowMs: 60 * 1000, // 1 minute window
|
||||
max: 30, // 30 requests per minute
|
||||
message: {
|
||||
error: 'Too many requests. Please wait before trying again.'
|
||||
},
|
||||
standardHeaders: true,
|
||||
legacyHeaders: false,
|
||||
// Use secure IP extraction to prevent header spoofing
|
||||
keyGenerator: (req) => getClientIp(req)
|
||||
legacyHeaders: false
|
||||
});
|
||||
|
||||
/**
|
||||
@@ -98,15 +71,12 @@ const downloadLimiter = createLimiter({
|
||||
error: 'Download rate limit exceeded. Please wait before downloading more files.'
|
||||
},
|
||||
standardHeaders: true,
|
||||
legacyHeaders: false,
|
||||
// Use secure IP extraction to prevent header spoofing
|
||||
keyGenerator: (req) => getClientIp(req)
|
||||
legacyHeaders: false
|
||||
});
|
||||
|
||||
module.exports = {
|
||||
initUploadLimiter,
|
||||
chunkUploadLimiter,
|
||||
pinVerifyLimiter,
|
||||
pinStatusLimiter,
|
||||
downloadLimiter
|
||||
};
|
||||
@@ -6,67 +6,41 @@
|
||||
|
||||
const { safeCompare } = require('../utils/security');
|
||||
const logger = require('../utils/logger');
|
||||
const PORT = process.env.PORT || 3000;
|
||||
const NODE_ENV = process.env.NODE_ENV || 'production';
|
||||
const BASE_URL = process.env.BASE_URL || `http://localhost:${PORT}`;
|
||||
const { config } = require('../config');
|
||||
|
||||
// const { config } = require('../config');
|
||||
/**
|
||||
* Security headers middleware
|
||||
* DEPRECATED: Use helmet middleware instead for security headers
|
||||
*/
|
||||
// function securityHeaders(req, res, next) {
|
||||
// // Content Security Policy
|
||||
// let csp =
|
||||
// "default-src 'self'; " +
|
||||
// "connect-src 'self'; " +
|
||||
// "style-src 'self' 'unsafe-inline' cdn.jsdelivr.net; " +
|
||||
// "script-src 'self' 'unsafe-inline' cdn.jsdelivr.net; " +
|
||||
// "img-src 'self' data: blob:;";
|
||||
function securityHeaders(req, res, next) {
|
||||
// Content Security Policy
|
||||
let csp =
|
||||
"default-src 'self'; " +
|
||||
"connect-src 'self'; " +
|
||||
"style-src 'self' 'unsafe-inline' cdn.jsdelivr.net; " +
|
||||
"script-src 'self' 'unsafe-inline' cdn.jsdelivr.net; " +
|
||||
"img-src 'self' data: blob:;";
|
||||
|
||||
// // If allowedIframeOrigins is set, allow those origins to embed via iframe
|
||||
// if (config.allowedIframeOrigins && config.allowedIframeOrigins.length > 0) {
|
||||
// // Remove X-Frame-Options header (do not set it)
|
||||
// // Add frame-ancestors directive to CSP
|
||||
// const frameAncestors = ["'self'", ...config.allowedIframeOrigins].join(' ');
|
||||
// csp += ` frame-ancestors ${frameAncestors};`;
|
||||
// } else {
|
||||
// // Default: only allow same origin if not configured
|
||||
// res.setHeader('X-Frame-Options', 'SAMEORIGIN');
|
||||
// }
|
||||
// If allowedIframeOrigins is set, allow those origins to embed via iframe
|
||||
if (config.allowedIframeOrigins && config.allowedIframeOrigins.length > 0) {
|
||||
// Remove X-Frame-Options header (do not set it)
|
||||
// Add frame-ancestors directive to CSP
|
||||
const frameAncestors = ["'self'", ...config.allowedIframeOrigins].join(' ');
|
||||
csp += ` frame-ancestors ${frameAncestors};`;
|
||||
} else {
|
||||
// Default: only allow same origin if not configured
|
||||
res.setHeader('X-Frame-Options', 'SAMEORIGIN');
|
||||
}
|
||||
|
||||
// res.setHeader('Content-Security-Policy', csp);
|
||||
// res.setHeader('X-Content-Type-Options', 'nosniff');
|
||||
// res.setHeader('X-XSS-Protection', '1; mode=block');
|
||||
res.setHeader('Content-Security-Policy', csp);
|
||||
res.setHeader('X-Content-Type-Options', 'nosniff');
|
||||
res.setHeader('X-XSS-Protection', '1; mode=block');
|
||||
|
||||
// // Strict Transport Security (when in production)
|
||||
// if (process.env.NODE_ENV === 'production') {
|
||||
// res.setHeader('Strict-Transport-Security', 'max-age=31536000; includeSubDomains');
|
||||
// }
|
||||
// Strict Transport Security (when in production)
|
||||
if (process.env.NODE_ENV === 'production') {
|
||||
res.setHeader('Strict-Transport-Security', 'max-age=31536000; includeSubDomains');
|
||||
}
|
||||
|
||||
// next();
|
||||
// }
|
||||
|
||||
function getHelmetConfig() {
|
||||
// const isSecure = BASE_URL.startsWith('https://');
|
||||
|
||||
return {
|
||||
noSniff: true, // Prevent MIME type sniffing
|
||||
frameguard: { action: 'deny' }, // Prevent clickjacking
|
||||
crossOriginEmbedderPolicy: false, // Disable for local network access
|
||||
crossOriginOpenerPolicy: false, // Disable to prevent warnings on HTTP
|
||||
crossOriginResourcePolicy: { policy: 'cross-origin' }, // Allow cross-origin for local network
|
||||
referrerPolicy: { policy: 'no-referrer-when-downgrade' }, // Set referrer policy
|
||||
ieNoOpen: true, // Prevent IE from executing downloads
|
||||
// hsts: isSecure ? { maxAge: 31536000, includeSubDomains: true } : false, // Only enforce HTTPS if using HTTPS
|
||||
// Disabled Helmet middlewares:
|
||||
hsts: false,
|
||||
contentSecurityPolicy: false, // Disable CSP for now
|
||||
dnsPrefetchControl: true, // Disable DNS prefetching
|
||||
permittedCrossDomainPolicies: false,
|
||||
originAgentCluster: false,
|
||||
xssFilter: false,
|
||||
};
|
||||
next();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -92,7 +66,7 @@ function requirePin(PIN) {
|
||||
// Set cookie for subsequent requests with enhanced security
|
||||
const cookieOptions = {
|
||||
httpOnly: true, // Always enable HttpOnly
|
||||
secure: req.secure || (BASE_URL.startsWith('https') && NODE_ENV === 'production'),
|
||||
secure: req.secure || req.headers['x-forwarded-proto'] === 'https', // Enable secure flag only if the request is over HTTPS
|
||||
sameSite: 'strict',
|
||||
path: '/',
|
||||
maxAge: 24 * 60 * 60 * 1000 // 24 hour expiry
|
||||
@@ -108,7 +82,6 @@ function requirePin(PIN) {
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
// securityHeaders, // Deprecated, use helmet instead
|
||||
getHelmetConfig,
|
||||
securityHeaders,
|
||||
requirePin
|
||||
};
|
||||
@@ -11,28 +11,24 @@ const {
|
||||
MAX_ATTEMPTS,
|
||||
LOCKOUT_DURATION
|
||||
} = require('../utils/security');
|
||||
const { getClientIp } = require('../utils/ipExtractor');
|
||||
const PORT = process.env.PORT || 3000;
|
||||
const NODE_ENV = process.env.NODE_ENV || 'production';
|
||||
const BASE_URL = process.env.BASE_URL || `http://localhost:${PORT}`;
|
||||
|
||||
/**
|
||||
* Verify PIN
|
||||
*/
|
||||
router.post('/verify-pin', (req, res) => {
|
||||
const { pin } = req.body;
|
||||
const ip = getClientIp(req);
|
||||
const ip = req.ip;
|
||||
|
||||
try {
|
||||
// If no PIN is set in config, always return success
|
||||
if (!config.pin) {
|
||||
// res.cookie('DUMBDROP_PIN', '', {
|
||||
// httpOnly: true,
|
||||
// secure: req.secure || (BASE_URL.startsWith('https') && NODE_ENV === 'production'),
|
||||
// sameSite: 'strict',
|
||||
// path: '/'
|
||||
// });
|
||||
res.clearCookie('DUMBDROP_PIN', { path: '/' });
|
||||
return res.json({ success: true, error: null, path: '/' });
|
||||
res.cookie('DUMBDROP_PIN', '', {
|
||||
httpOnly: true,
|
||||
secure: req.secure || (process.env.NODE_ENV === 'production' && config.baseUrl.startsWith('https')),
|
||||
sameSite: 'strict',
|
||||
path: '/'
|
||||
});
|
||||
return res.json({ success: true, error: null });
|
||||
}
|
||||
|
||||
// Validate PIN format
|
||||
@@ -67,7 +63,7 @@ router.post('/verify-pin', (req, res) => {
|
||||
// Set secure cookie with cleaned PIN
|
||||
res.cookie('DUMBDROP_PIN', cleanedPin, {
|
||||
httpOnly: true,
|
||||
secure: req.secure || (BASE_URL.startsWith('https') && NODE_ENV === 'production'),
|
||||
secure: req.secure || (process.env.NODE_ENV === 'production' && config.baseUrl.startsWith('https')),
|
||||
sameSite: 'strict',
|
||||
path: '/'
|
||||
});
|
||||
@@ -114,7 +110,7 @@ router.get('/pin-required', (req, res) => {
|
||||
router.post('/logout', (req, res) => {
|
||||
try {
|
||||
res.clearCookie('DUMBDROP_PIN', { path: '/' });
|
||||
logger.info(`Logout successful for IP: ${getClientIp(req)}`);
|
||||
logger.info(`Logout successful for IP: ${req.ip}`);
|
||||
res.json({ success: true });
|
||||
} catch (err) {
|
||||
logger.error(`Logout error: ${err.message}`);
|
||||
|
||||
@@ -1,350 +1,211 @@
|
||||
/**
|
||||
* File management and listing route handlers.
|
||||
* Provides endpoints for listing, downloading, and managing uploaded files.
|
||||
* Handles file metadata, stats, and directory operations.
|
||||
* File management route handlers.
|
||||
* Provides endpoints for listing and deleting files using the configured storage adapter.
|
||||
* Handles file downloads by either providing a presigned URL (S3) or streaming (local).
|
||||
*/
|
||||
|
||||
const express = require('express');
|
||||
const router = express.Router();
|
||||
const path = require('path');
|
||||
const fs = require('fs').promises;
|
||||
const { config } = require('../config');
|
||||
const path = require('path'); // Needed for sanitization
|
||||
const fs = require('fs'); // Needed ONLY for local file streaming
|
||||
const { storageAdapter } = require('../storage'); // Import the selected adapter
|
||||
const logger = require('../utils/logger');
|
||||
const { formatFileSize, sanitizeFilenameSafe, isPathWithinUploadDir } = require('../utils/fileUtils');
|
||||
const { isDemoMode } = require('../utils/demoMode'); // Keep demo check if needed
|
||||
|
||||
/**
|
||||
* Safely encode filename for Content-Disposition header
|
||||
* Prevents header injection and handles special characters
|
||||
* @param {string} filename - The filename to encode
|
||||
* @returns {string} Properly formatted Content-Disposition value
|
||||
*/
|
||||
function createSafeContentDisposition(filename) {
|
||||
// Remove any path separators to ensure we only get the filename
|
||||
const basename = path.basename(filename);
|
||||
|
||||
// Remove or replace characters that could cause issues
|
||||
// Remove control characters (0x00-0x1F, 0x7F) and quotes
|
||||
// eslint-disable-next-line no-control-regex
|
||||
const sanitized = basename.replace(/[\u0000-\u001F\u007F"\\]/g, '_');
|
||||
|
||||
// For ASCII-only filenames, use simple format
|
||||
if (/^[\u0020-\u007E]*$/.test(sanitized)) {
|
||||
// Escape any remaining quotes and backslashes
|
||||
const escaped = sanitized.replace(/["\\]/g, '\\$&');
|
||||
return `attachment; filename="${escaped}"`;
|
||||
}
|
||||
|
||||
// For filenames with non-ASCII characters, use RFC 5987 encoding
|
||||
// This provides better international character support
|
||||
const encoded = encodeURIComponent(sanitized);
|
||||
const asciiSafe = sanitized.replace(/[^\u0020-\u007E]/g, '_');
|
||||
|
||||
return `attachment; filename="${asciiSafe}"; filename*=UTF-8''${encoded}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get file information
|
||||
*/
|
||||
router.get('/info/*', async (req, res) => {
|
||||
const filePath = path.join(config.uploadDir, req.params[0]);
|
||||
|
||||
try {
|
||||
// Ensure the path is within the upload directory (security check)
|
||||
// Use requireExists=true since we're getting info on an existing file
|
||||
if (!isPathWithinUploadDir(filePath, config.uploadDir, true)) {
|
||||
logger.warn(`Attempted path traversal attack: ${req.params[0]}`);
|
||||
return res.status(403).json({ error: 'Access denied' });
|
||||
}
|
||||
|
||||
const stats = await fs.stat(filePath);
|
||||
const fileInfo = {
|
||||
filename: req.params[0],
|
||||
size: stats.size,
|
||||
formattedSize: formatFileSize(stats.size),
|
||||
uploadDate: stats.mtime,
|
||||
mimetype: path.extname(req.params[0]).slice(1),
|
||||
type: stats.isDirectory() ? 'directory' : 'file'
|
||||
};
|
||||
|
||||
res.json(fileInfo);
|
||||
} catch (err) {
|
||||
logger.error(`Failed to get file info: ${err.message}`);
|
||||
res.status(404).json({ error: 'File not found' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Download file
|
||||
*/
|
||||
router.get('/download/*', async (req, res) => {
|
||||
// Get the file path from the wildcard parameter
|
||||
const filePath = path.join(config.uploadDir, req.params[0]);
|
||||
const fileName = path.basename(req.params[0]);
|
||||
|
||||
try {
|
||||
// Ensure the file is within the upload directory (security check)
|
||||
// This must be done BEFORE any filesystem operations to prevent path traversal
|
||||
// Use requireExists=true since we're downloading an existing file
|
||||
if (!isPathWithinUploadDir(filePath, config.uploadDir, true)) {
|
||||
logger.warn(`Attempted path traversal attack: ${req.params[0]}`);
|
||||
return res.status(403).json({ error: 'Access denied' });
|
||||
}
|
||||
|
||||
await fs.access(filePath);
|
||||
|
||||
// Set headers for download with safe Content-Disposition
|
||||
res.setHeader('Content-Disposition', createSafeContentDisposition(fileName));
|
||||
res.setHeader('Content-Type', 'application/octet-stream');
|
||||
|
||||
// Stream the file
|
||||
const fileStream = require('fs').createReadStream(filePath);
|
||||
fileStream.pipe(res);
|
||||
|
||||
// Handle errors during streaming
|
||||
fileStream.on('error', (err) => {
|
||||
logger.error(`File streaming error: ${err.message}`);
|
||||
if (!res.headersSent) {
|
||||
res.status(500).json({ error: 'Failed to download file' });
|
||||
}
|
||||
});
|
||||
|
||||
logger.info(`File download started: ${req.params[0]}`);
|
||||
} catch (err) {
|
||||
logger.error(`File download failed: ${err.message}`);
|
||||
res.status(404).json({ error: 'File not found' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* List all files and folders recursively
|
||||
* List all files from the storage backend.
|
||||
*/
|
||||
router.get('/', async (req, res) => {
|
||||
// Demo mode handling (simplified list)
|
||||
if (isDemoMode()) {
|
||||
logger.info('[DEMO /files] Listing demo files');
|
||||
// Return a mock list or call demoAdapter.listFiles() if implemented
|
||||
return res.json({
|
||||
files: [{ filename: 'demo_file.txt', size: 1234, formattedSize: '1.21KB', uploadDate: new Date().toISOString() }],
|
||||
totalFiles: 1,
|
||||
totalSize: 1234,
|
||||
message: 'Demo Mode: Showing mock file list'
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const items = await getDirectoryContents(config.uploadDir);
|
||||
|
||||
// Calculate total size across all files
|
||||
const totalSize = calculateTotalSize(items);
|
||||
|
||||
res.json({
|
||||
items: items,
|
||||
totalFiles: countFiles(items),
|
||||
totalSize: totalSize,
|
||||
formattedTotalSize: formatFileSize(totalSize)
|
||||
const files = await storageAdapter.listFiles();
|
||||
const totalSize = files.reduce((acc, file) => acc + (file.size || 0), 0);
|
||||
|
||||
res.json({
|
||||
files: files,
|
||||
totalFiles: files.length,
|
||||
totalSize: totalSize
|
||||
// Note: formattedTotalSize could be calculated here if needed
|
||||
});
|
||||
} catch (err) {
|
||||
logger.error(`Failed to list files: ${err.message}`);
|
||||
res.status(500).json({ error: 'Failed to list files' });
|
||||
logger.error(`[Route /files GET] Failed to list files: ${err.message}`, err.stack);
|
||||
// Map common errors
|
||||
let statusCode = 500;
|
||||
let clientMessage = 'Failed to list files.';
|
||||
if (err.name === 'NoSuchBucket' || err.name === 'AccessDenied') { // S3 Specific
|
||||
clientMessage = 'Storage configuration error.';
|
||||
} else if (err.code === 'ENOENT') { // Local Specific
|
||||
clientMessage = 'Storage directory not found.';
|
||||
} else if (err.code === 'EACCES' || err.code === 'EPERM') { // Local Specific
|
||||
clientMessage = 'Storage permission error.';
|
||||
}
|
||||
res.status(statusCode).json({ error: clientMessage, details: err.message });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Recursively get directory contents
|
||||
* Get a download URL or stream a file.
|
||||
* For S3, returns a presigned URL.
|
||||
* For Local, streams the file content.
|
||||
*/
|
||||
async function getDirectoryContents(dirPath, relativePath = '') {
|
||||
const items = [];
|
||||
|
||||
router.get('/:filename/download', async (req, res) => {
|
||||
const rawFilename = req.params.filename;
|
||||
|
||||
// Basic sanitization: Prevent directory traversal.
|
||||
// Adapters should also validate/sanitize keys/paths.
|
||||
const filename = path.basename(rawFilename);
|
||||
if (filename !== rawFilename || filename.includes('..')) {
|
||||
logger.error(`[Route /download] Invalid filename detected: ${rawFilename}`);
|
||||
return res.status(400).json({ error: 'Invalid filename' });
|
||||
}
|
||||
|
||||
// Demo mode handling
|
||||
if (isDemoMode()) {
|
||||
logger.info(`[DEMO /download] Download request for ${filename}`);
|
||||
return res.json({
|
||||
message: 'Demo Mode: This would initiate download in production.',
|
||||
filename: filename
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const entries = await fs.readdir(dirPath);
|
||||
|
||||
for (const entry of entries) {
|
||||
// Skip metadata directory and hidden files
|
||||
if (entry === '.metadata' || entry.startsWith('.')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const fullPath = path.join(dirPath, entry);
|
||||
const itemRelativePath = relativePath ? `${relativePath}/${entry}` : entry;
|
||||
|
||||
const result = await storageAdapter.getDownloadUrlOrStream(filename);
|
||||
|
||||
if (result.type === 'url') {
|
||||
// S3 Adapter returned a presigned URL
|
||||
logger.info(`[Route /download] Providing presigned URL for: ${filename}`);
|
||||
// Option 1: Redirect (Simple, but might hide URL from client)
|
||||
// res.redirect(result.value);
|
||||
|
||||
// Option 2: Return URL in JSON (Gives client more control)
|
||||
res.json({ downloadUrl: result.value });
|
||||
|
||||
} else if (result.type === 'path') {
|
||||
// Local Adapter returned a file path
|
||||
const filePath = result.value;
|
||||
logger.info(`[Route /download] Streaming local file: ${filePath}`);
|
||||
|
||||
// Check if file still exists before streaming
|
||||
try {
|
||||
const stats = await fs.stat(fullPath);
|
||||
|
||||
if (stats.isDirectory()) {
|
||||
const subItems = await getDirectoryContents(fullPath, itemRelativePath);
|
||||
items.push({
|
||||
name: entry,
|
||||
type: 'directory',
|
||||
path: itemRelativePath,
|
||||
size: calculateTotalSize(subItems),
|
||||
formattedSize: formatFileSize(calculateTotalSize(subItems)),
|
||||
uploadDate: stats.mtime,
|
||||
children: subItems
|
||||
});
|
||||
} else if (stats.isFile()) {
|
||||
items.push({
|
||||
name: entry,
|
||||
type: 'file',
|
||||
path: itemRelativePath,
|
||||
size: stats.size,
|
||||
formattedSize: formatFileSize(stats.size),
|
||||
uploadDate: stats.mtime,
|
||||
extension: path.extname(entry).toLowerCase()
|
||||
});
|
||||
await fs.promises.access(filePath, fs.constants.R_OK);
|
||||
} catch (accessErr) {
|
||||
if (accessErr.code === 'ENOENT') {
|
||||
logger.warn(`[Route /download] Local file not found just before streaming: ${filePath}`);
|
||||
return res.status(404).json({ error: 'File not found' });
|
||||
}
|
||||
logger.error(`[Route /download] Cannot access local file for streaming ${filePath}: ${accessErr.message}`);
|
||||
return res.status(500).json({ error: 'Failed to access file for download' });
|
||||
}
|
||||
|
||||
// Set headers for download
|
||||
res.setHeader('Content-Disposition', `attachment; filename="${filename}"`); // Use the sanitized basename
|
||||
res.setHeader('Content-Type', 'application/octet-stream'); // Generic type
|
||||
|
||||
// Stream the file
|
||||
const fileStream = fs.createReadStream(filePath);
|
||||
|
||||
fileStream.on('error', (streamErr) => {
|
||||
logger.error(`[Route /download] File streaming error for ${filePath}: ${streamErr.message}`);
|
||||
if (!res.headersSent) {
|
||||
// Try to send an error response if headers haven't been sent yet
|
||||
res.status(500).json({ error: 'Failed to stream file' });
|
||||
} else {
|
||||
// If headers already sent, we can only terminate the connection
|
||||
res.end();
|
||||
}
|
||||
} catch (statErr) {
|
||||
logger.error(`Failed to get stats for ${fullPath}: ${statErr.message}`);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Sort items: directories first, then files, both alphabetically
|
||||
items.sort((a, b) => {
|
||||
if (a.type !== b.type) {
|
||||
return a.type === 'directory' ? -1 : 1;
|
||||
}
|
||||
return a.name.localeCompare(b.name);
|
||||
});
|
||||
|
||||
} catch (err) {
|
||||
logger.error(`Failed to read directory ${dirPath}: ${err.message}`);
|
||||
}
|
||||
|
||||
return items;
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Calculate total size of all files in a directory structure
|
||||
*/
|
||||
function calculateTotalSize(items) {
|
||||
return items.reduce((total, item) => {
|
||||
if (item.type === 'file') {
|
||||
return total + item.size;
|
||||
} else if (item.type === 'directory' && item.children) {
|
||||
return total + calculateTotalSize(item.children);
|
||||
}
|
||||
return total;
|
||||
}, 0);
|
||||
}
|
||||
fileStream.pipe(res);
|
||||
|
||||
/**
|
||||
* Count total number of files in a directory structure
|
||||
*/
|
||||
function countFiles(items) {
|
||||
return items.reduce((count, item) => {
|
||||
if (item.type === 'file') {
|
||||
return count + 1;
|
||||
} else if (item.type === 'directory' && item.children) {
|
||||
return count + countFiles(item.children);
|
||||
}
|
||||
return count;
|
||||
}, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete file or directory
|
||||
*/
|
||||
router.delete('/*', async (req, res) => {
|
||||
// Get the file/directory path from the wildcard parameter
|
||||
const itemPath = path.join(config.uploadDir, req.params[0]);
|
||||
|
||||
try {
|
||||
// Ensure the path is within the upload directory (security check)
|
||||
// Use requireExists=true since we're deleting an existing file
|
||||
if (!isPathWithinUploadDir(itemPath, config.uploadDir, true)) {
|
||||
logger.warn(`Attempted path traversal attack: ${req.params[0]}`);
|
||||
return res.status(403).json({ error: 'Access denied' });
|
||||
}
|
||||
|
||||
await fs.access(itemPath);
|
||||
const stats = await fs.stat(itemPath);
|
||||
|
||||
if (stats.isDirectory()) {
|
||||
// Delete directory recursively
|
||||
await fs.rm(itemPath, { recursive: true, force: true });
|
||||
logger.info(`Directory deleted: ${req.params[0]}`);
|
||||
res.json({ message: 'Directory deleted successfully' });
|
||||
} else {
|
||||
// Delete file
|
||||
await fs.unlink(itemPath);
|
||||
logger.info(`File deleted: ${req.params[0]}`);
|
||||
res.json({ message: 'File deleted successfully' });
|
||||
// Unknown result type from adapter
|
||||
logger.error(`[Route /download] Unknown result type from storage adapter: ${result.type}`);
|
||||
res.status(500).json({ error: 'Internal server error during download preparation' });
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
logger.error(`Deletion failed: ${err.message}`);
|
||||
res.status(err.code === 'ENOENT' ? 404 : 500).json({
|
||||
error: err.code === 'ENOENT' ? 'File or directory not found' : 'Failed to delete item'
|
||||
});
|
||||
logger.error(`[Route /download] Failed to get download for ${filename}: ${err.message}`, err.stack);
|
||||
let statusCode = 500;
|
||||
let clientMessage = 'Failed to initiate download.';
|
||||
|
||||
// Use specific errors thrown by adapters if available
|
||||
if (err.message === 'File not found' || err.message === 'File not found in S3' || err.name === 'NoSuchKey' || err.code === 'ENOENT') {
|
||||
statusCode = 404;
|
||||
clientMessage = 'File not found.';
|
||||
} else if (err.message === 'Permission denied' || err.code === 'EACCES' || err.name === 'AccessDenied') {
|
||||
statusCode = 500; // Treat permission issues as internal server errors generally
|
||||
clientMessage = 'Storage permission error during download.';
|
||||
} else if (err.message === 'Invalid filename') {
|
||||
statusCode = 400;
|
||||
clientMessage = 'Invalid filename specified.';
|
||||
}
|
||||
|
||||
// Avoid sending error if headers might have been partially sent by streaming
|
||||
if (!res.headersSent) {
|
||||
res.status(statusCode).json({ error: clientMessage, details: err.message });
|
||||
} else {
|
||||
logger.warn(`[Route /download] Error occurred after headers sent for ${filename}. Cannot send JSON error.`);
|
||||
res.end(); // Terminate response if possible
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
/**
|
||||
* Rename file or directory
|
||||
* Delete a file from the storage backend.
|
||||
*/
|
||||
router.put('/rename/*', async (req, res) => {
|
||||
const { newName } = req.body;
|
||||
|
||||
if (!newName || typeof newName !== 'string' || newName.trim() === '') {
|
||||
return res.status(400).json({ error: 'New name is required' });
|
||||
router.delete('/:filename', async (req, res) => {
|
||||
const rawFilename = req.params.filename;
|
||||
|
||||
// Basic sanitization
|
||||
const filename = path.basename(rawFilename);
|
||||
if (filename !== rawFilename || filename.includes('..')) {
|
||||
logger.error(`[Route /delete] Invalid filename detected: ${rawFilename}`);
|
||||
return res.status(400).json({ error: 'Invalid filename' });
|
||||
}
|
||||
|
||||
// Get the current file/directory path from the wildcard parameter
|
||||
const currentPath = path.join(config.uploadDir, req.params[0]);
|
||||
const currentDir = path.dirname(currentPath);
|
||||
|
||||
|
||||
// Demo mode handling
|
||||
if (isDemoMode()) {
|
||||
logger.info(`[DEMO /delete] Delete request for ${filename}`);
|
||||
// Call demoAdapter.deleteFile(filename) if implemented?
|
||||
return res.json({ message: 'File deleted (Demo)', filename: filename });
|
||||
}
|
||||
|
||||
logger.info(`[Route /delete] Received delete request for: ${filename}`);
|
||||
|
||||
try {
|
||||
// Ensure the current path is within the upload directory (security check)
|
||||
// Use requireExists=true since we're renaming an existing file
|
||||
if (!isPathWithinUploadDir(currentPath, config.uploadDir, true)) {
|
||||
logger.warn(`Attempted path traversal attack: ${req.params[0]}`);
|
||||
return res.status(403).json({ error: 'Access denied' });
|
||||
}
|
||||
|
||||
// Check if the current file/directory exists
|
||||
await fs.access(currentPath);
|
||||
const stats = await fs.stat(currentPath);
|
||||
|
||||
// Sanitize the new name using our safe sanitization function
|
||||
const sanitizedNewName = sanitizeFilenameSafe(newName.trim());
|
||||
|
||||
// Validate that sanitization didn't result in an empty filename
|
||||
if (!sanitizedNewName || sanitizedNewName.trim() === '') {
|
||||
logger.warn(`Rename rejected: sanitized filename is empty (original: "${newName}")`);
|
||||
return res.status(400).json({ error: 'Invalid or empty filename after sanitization' });
|
||||
}
|
||||
|
||||
// Construct the new path
|
||||
const newPath = path.join(currentDir, sanitizedNewName);
|
||||
|
||||
// Ensure the new path is also within the upload directory
|
||||
// Use requireExists=false since the new path doesn't exist yet
|
||||
if (!isPathWithinUploadDir(newPath, config.uploadDir, false)) {
|
||||
logger.warn(`Attempted to rename outside upload directory: ${newPath}`);
|
||||
return res.status(403).json({ error: 'Invalid destination path' });
|
||||
}
|
||||
|
||||
// Check if a file/directory with the new name already exists
|
||||
try {
|
||||
await fs.access(newPath);
|
||||
return res.status(409).json({ error: 'A file or directory with that name already exists' });
|
||||
} catch (err) {
|
||||
// File doesn't exist, which is what we want
|
||||
if (err.code !== 'ENOENT') {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
// Perform the rename operation
|
||||
await fs.rename(currentPath, newPath);
|
||||
|
||||
// Log the operation
|
||||
const itemType = stats.isDirectory() ? 'Directory' : 'File';
|
||||
logger.info(`${itemType} renamed: "${req.params[0]}" -> "${sanitizedNewName}"`);
|
||||
|
||||
// Calculate relative path for response
|
||||
const relativePath = path.relative(config.uploadDir, newPath).replace(/\\/g, '/');
|
||||
|
||||
res.json({
|
||||
message: `${itemType} renamed successfully`,
|
||||
oldName: path.basename(req.params[0]),
|
||||
newName: sanitizedNewName,
|
||||
newPath: relativePath
|
||||
});
|
||||
|
||||
await storageAdapter.deleteFile(filename);
|
||||
res.json({ message: 'File deleted successfully' });
|
||||
} catch (err) {
|
||||
logger.error(`Rename failed: ${err.message}`);
|
||||
res.status(err.code === 'ENOENT' ? 404 : 500).json({
|
||||
error: err.code === 'ENOENT' ? 'File or directory not found' : 'Failed to rename item'
|
||||
});
|
||||
logger.error(`[Route /delete] Failed to delete file ${filename}: ${err.message}`, err.stack);
|
||||
let statusCode = 500;
|
||||
let clientMessage = 'Failed to delete file.';
|
||||
|
||||
// Use specific errors thrown by adapters if available
|
||||
if (err.message === 'File not found' || err.message === 'File not found in S3' || err.name === 'NoSuchKey' || err.code === 'ENOENT') {
|
||||
statusCode = 404;
|
||||
clientMessage = 'File not found.';
|
||||
} else if (err.message === 'Permission denied' || err.code === 'EACCES' || err.name === 'AccessDenied') {
|
||||
statusCode = 500;
|
||||
clientMessage = 'Storage permission error during delete.';
|
||||
} else if (err.message === 'Invalid filename') {
|
||||
statusCode = 400;
|
||||
clientMessage = 'Invalid filename specified.';
|
||||
}
|
||||
|
||||
res.status(statusCode).json({ error: clientMessage, details: err.message });
|
||||
}
|
||||
});
|
||||
|
||||
module.exports = router;
|
||||
module.exports = router;
|
||||
@@ -1,496 +1,161 @@
|
||||
/**
|
||||
* File upload route handlers and batch upload management.
|
||||
* Handles file uploads, chunked transfers, and folder creation.
|
||||
* Manages upload sessions using persistent metadata for resumability.
|
||||
* File upload route handlers.
|
||||
* Delegates storage operations to the configured storage adapter.
|
||||
* Handles multipart uploads via adapter logic.
|
||||
*/
|
||||
|
||||
const express = require('express');
|
||||
const router = express.Router();
|
||||
const crypto = require('crypto');
|
||||
const path = require('path');
|
||||
const fs = require('fs').promises; // Use promise-based fs
|
||||
const fsSync = require('fs'); // For sync checks like existsSync
|
||||
const path = require('path'); // Still needed for extension checks
|
||||
const { config } = require('../config');
|
||||
const logger = require('../utils/logger');
|
||||
const { getUniqueFolderPath, sanitizePathPreserveDirsSafe, isValidBatchId, isPathWithinUploadDir } = require('../utils/fileUtils');
|
||||
const { sendNotification } = require('../services/notifications');
|
||||
const { isDemoMode } = require('../utils/demoMode');
|
||||
|
||||
// --- Persistence Setup ---
|
||||
const METADATA_DIR = path.join(config.uploadDir, '.metadata');
|
||||
|
||||
// --- In-Memory Maps (Still useful for session-level data) ---
|
||||
// Store folder name mappings for batch uploads (avoids FS lookups during session)
|
||||
const folderMappings = new Map();
|
||||
// Store batch activity timestamps (for cleaning up stale batches/folder mappings)
|
||||
const batchActivity = new Map();
|
||||
|
||||
const BATCH_TIMEOUT = 30 * 60 * 1000; // 30 minutes for batch/folderMapping cleanup
|
||||
|
||||
// --- Helper Functions for Metadata ---
|
||||
|
||||
async function readUploadMetadata(uploadId) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.warn(`Attempted to read metadata with invalid uploadId: ${uploadId}`);
|
||||
return null;
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
try {
|
||||
const data = await fs.readFile(metaFilePath, 'utf8');
|
||||
return JSON.parse(data);
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOENT') {
|
||||
return null; // Metadata file doesn't exist - normal case for new/finished uploads
|
||||
}
|
||||
logger.error(`Error reading metadata for ${uploadId}: ${err.message}`);
|
||||
throw err; // Rethrow other errors
|
||||
}
|
||||
}
|
||||
|
||||
async function writeUploadMetadata(uploadId, metadata) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.error(`Attempted to write metadata with invalid uploadId: ${uploadId}`);
|
||||
return; // Prevent writing
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
metadata.lastActivity = Date.now(); // Update timestamp on every write
|
||||
const tempMetaPath = `${metaFilePath}.${crypto.randomBytes(4).toString('hex')}.tmp`;
|
||||
try {
|
||||
// Write atomically if possible (write to temp then rename) for more safety
|
||||
await fs.writeFile(tempMetaPath, JSON.stringify(metadata, null, 2));
|
||||
await fs.rename(tempMetaPath, metaFilePath);
|
||||
} catch (err) {
|
||||
logger.error(`Error writing metadata for ${uploadId}: ${err.message}`);
|
||||
// Attempt to clean up temp file if rename failed
|
||||
try { await fs.unlink(tempMetaPath); } catch {/* ignore */}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteUploadMetadata(uploadId) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.warn(`Attempted to delete metadata with invalid uploadId: ${uploadId}`);
|
||||
return;
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
try {
|
||||
await fs.unlink(metaFilePath);
|
||||
logger.debug(`Deleted metadata file for upload: ${uploadId}.meta`);
|
||||
} catch (err) {
|
||||
if (err.code !== 'ENOENT') { // Ignore if already deleted
|
||||
logger.error(`Error deleting metadata file ${uploadId}.meta: ${err.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- Batch Cleanup (Focuses on batchActivity map, not primary upload state) ---
|
||||
let batchCleanupInterval;
|
||||
function startBatchCleanup() {
|
||||
if (batchCleanupInterval) clearInterval(batchCleanupInterval);
|
||||
batchCleanupInterval = setInterval(() => {
|
||||
const now = Date.now();
|
||||
logger.info(`Running batch cleanup, checking ${batchActivity.size} active batch sessions`);
|
||||
let cleanedCount = 0;
|
||||
for (const [batchId, lastActivity] of batchActivity.entries()) {
|
||||
if (now - lastActivity >= BATCH_TIMEOUT) {
|
||||
logger.info(`Cleaning up inactive batch session: ${batchId}`);
|
||||
batchActivity.delete(batchId);
|
||||
// Clean up associated folder mappings for this batch
|
||||
for (const key of folderMappings.keys()) {
|
||||
if (key.endsWith(`-${batchId}`)) {
|
||||
folderMappings.delete(key);
|
||||
}
|
||||
}
|
||||
cleanedCount++;
|
||||
}
|
||||
}
|
||||
if (cleanedCount > 0) logger.info(`Cleaned up ${cleanedCount} inactive batch sessions.`);
|
||||
}, 5 * 60 * 1000); // Check every 5 minutes
|
||||
batchCleanupInterval.unref(); // Allow process to exit if this is the only timer
|
||||
return batchCleanupInterval;
|
||||
}
|
||||
function stopBatchCleanup() {
|
||||
if (batchCleanupInterval) {
|
||||
clearInterval(batchCleanupInterval);
|
||||
batchCleanupInterval = null;
|
||||
}
|
||||
}
|
||||
if (!process.env.DISABLE_BATCH_CLEANUP) {
|
||||
startBatchCleanup();
|
||||
}
|
||||
const { storageAdapter } = require('../storage'); // Import the adapter factory's result
|
||||
const { isDemoMode } = require('../utils/demoMode'); // Keep demo check for specific route behavior if needed
|
||||
|
||||
// --- Routes ---
|
||||
|
||||
// Initialize upload
|
||||
router.post('/init', async (req, res) => {
|
||||
// DEMO MODE CHECK - Bypass persistence if in demo mode
|
||||
if (isDemoMode()) {
|
||||
const { filename, fileSize } = req.body;
|
||||
const sanitizedDemoFilename = sanitizePathPreserveDirsSafe(filename);
|
||||
const uploadId = 'demo-' + crypto.randomBytes(16).toString('hex');
|
||||
|
||||
// Log if the filename was changed during sanitization
|
||||
if (filename !== sanitizedDemoFilename) {
|
||||
logger.info(`[DEMO] Filename sanitized: "${filename}" -> "${sanitizedDemoFilename}"`);
|
||||
}
|
||||
|
||||
logger.info(`[DEMO] Initialized upload for ${sanitizedDemoFilename} (${fileSize} bytes) with ID ${uploadId}`);
|
||||
// Simulate zero-byte completion for demo
|
||||
if (Number(fileSize) === 0) {
|
||||
logger.success(`[DEMO] Completed zero-byte file upload: ${sanitizedDemoFilename}`);
|
||||
sendNotification(sanitizedDemoFilename, 0, config); // Still send notification if configured
|
||||
}
|
||||
return res.json({ uploadId });
|
||||
if (isDemoMode() && config.storageType !== 's3') { // S3 demo might still hit the adapter for presigned URLs etc.
|
||||
// but local demo can be simpler.
|
||||
const { filename = 'demo_file.txt', fileSize = 0 } = req.body;
|
||||
const demoUploadId = 'demo-' + Math.random().toString(36).substr(2, 9);
|
||||
logger.info(`[DEMO /init] Req for ${filename}, size ${fileSize}. ID ${demoUploadId}`);
|
||||
if (Number(fileSize) === 0) {
|
||||
logger.success(`[DEMO /init] Sim complete zero-byte: ${filename}`);
|
||||
}
|
||||
return res.json({ uploadId: demoUploadId });
|
||||
}
|
||||
|
||||
const { filename, fileSize } = req.body;
|
||||
const clientBatchId = req.headers['x-batch-id'];
|
||||
|
||||
// --- Basic validations ---
|
||||
if (!filename) return res.status(400).json({ error: 'Missing filename' });
|
||||
if (fileSize === undefined || fileSize === null) return res.status(400).json({ error: 'Missing fileSize' });
|
||||
const size = Number(fileSize);
|
||||
if (isNaN(size) || size < 0) return res.status(400).json({ error: 'Invalid file size' });
|
||||
const maxSizeInBytes = config.maxFileSize;
|
||||
if (size > maxSizeInBytes) return res.status(413).json({ error: 'File too large', limit: maxSizeInBytes });
|
||||
|
||||
const batchId = clientBatchId || `${Date.now()}-${crypto.randomBytes(4).toString('hex').substring(0, 9)}`;
|
||||
if (clientBatchId && !isValidBatchId(batchId)) return res.status(400).json({ error: 'Invalid batch ID format' });
|
||||
batchActivity.set(batchId, Date.now()); // Track batch session activity
|
||||
if (size > config.maxFileSize) {
|
||||
logger.warn(`Upload rejected: File size ${size} exceeds limit ${config.maxFileSize} for ${filename}`);
|
||||
return res.status(413).json({ error: 'File too large', limit: config.maxFileSize });
|
||||
}
|
||||
|
||||
if (config.allowedExtensions && config.allowedExtensions.length > 0) {
|
||||
const fileExt = path.extname(filename).toLowerCase();
|
||||
if (!fileExt || !config.allowedExtensions.includes(fileExt)) {
|
||||
logger.warn(`Upload rejected: File type not allowed: ${filename} (Ext: ${fileExt || 'none'})`);
|
||||
return res.status(400).json({ error: 'File type not allowed', receivedExtension: fileExt || 'none' });
|
||||
}
|
||||
logger.debug(`File extension ${fileExt} allowed for ${filename}`);
|
||||
}
|
||||
|
||||
try {
|
||||
// --- Path handling and Sanitization ---
|
||||
const sanitizedFilename = sanitizePathPreserveDirsSafe(filename);
|
||||
const safeFilename = path.normalize(sanitizedFilename)
|
||||
.replace(/^(\.\.(\/|\\|$))+/, '')
|
||||
.replace(/\\/g, '/')
|
||||
.replace(/^\/+/, '');
|
||||
|
||||
// Log if the filename was changed during sanitization
|
||||
if (filename !== safeFilename) {
|
||||
logger.info(`Upload filename sanitized: "${filename}" -> "${safeFilename}"`);
|
||||
} else {
|
||||
logger.info(`Upload init request for: ${safeFilename}`);
|
||||
}
|
||||
|
||||
// --- Extension Check ---
|
||||
if (config.allowedExtensions) {
|
||||
const fileExt = path.extname(safeFilename).toLowerCase();
|
||||
if (fileExt && !config.allowedExtensions.includes(fileExt)) {
|
||||
logger.warn(`File type not allowed: ${safeFilename} (Extension: ${fileExt})`);
|
||||
return res.status(400).json({ error: 'File type not allowed', receivedExtension: fileExt });
|
||||
}
|
||||
}
|
||||
|
||||
// --- Determine Paths & Handle Folders ---
|
||||
const uploadId = crypto.randomBytes(16).toString('hex');
|
||||
let finalFilePath = path.join(config.uploadDir, safeFilename);
|
||||
|
||||
// Validate that the constructed path is within the upload directory
|
||||
if (!isPathWithinUploadDir(finalFilePath, config.uploadDir, false)) {
|
||||
logger.error(`Path traversal detected in upload init: ${safeFilename} -> ${finalFilePath}`);
|
||||
return res.status(403).json({ error: 'Invalid file path' });
|
||||
}
|
||||
|
||||
const pathParts = safeFilename.split('/').filter(Boolean);
|
||||
|
||||
if (pathParts.length > 1) {
|
||||
const originalFolderName = pathParts[0];
|
||||
let newFolderName = folderMappings.get(`${originalFolderName}-${batchId}`);
|
||||
const baseFolderPath = path.join(config.uploadDir, newFolderName || originalFolderName);
|
||||
|
||||
if (!newFolderName) {
|
||||
await fs.mkdir(path.dirname(baseFolderPath), { recursive: true });
|
||||
try {
|
||||
await fs.mkdir(baseFolderPath, { recursive: false });
|
||||
newFolderName = originalFolderName;
|
||||
} catch (err) {
|
||||
if (err.code === 'EEXIST') {
|
||||
const uniqueFolderPath = await getUniqueFolderPath(baseFolderPath);
|
||||
newFolderName = path.basename(uniqueFolderPath);
|
||||
logger.info(`Folder "${originalFolderName}" exists or conflict, using unique "${newFolderName}" for batch ${batchId}`);
|
||||
await fs.mkdir(path.join(config.uploadDir, newFolderName), { recursive: true });
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
folderMappings.set(`${originalFolderName}-${batchId}`, newFolderName);
|
||||
}
|
||||
pathParts[0] = newFolderName;
|
||||
finalFilePath = path.join(config.uploadDir, ...pathParts);
|
||||
|
||||
// Validate the updated path
|
||||
if (!isPathWithinUploadDir(finalFilePath, config.uploadDir, false)) {
|
||||
logger.error(`Path traversal detected after folder mapping: ${pathParts.join('/')} -> ${finalFilePath}`);
|
||||
return res.status(403).json({ error: 'Invalid file path' });
|
||||
}
|
||||
|
||||
await fs.mkdir(path.dirname(finalFilePath), { recursive: true });
|
||||
} else {
|
||||
await fs.mkdir(config.uploadDir, { recursive: true }); // Ensure base upload dir exists
|
||||
}
|
||||
|
||||
// --- Check Final Path Collision & Get Unique Name if Needed ---
|
||||
let checkPath = finalFilePath;
|
||||
let counter = 1;
|
||||
while (fsSync.existsSync(checkPath)) {
|
||||
logger.warn(`Final destination file already exists: ${checkPath}. Generating unique name.`);
|
||||
const dir = path.dirname(finalFilePath);
|
||||
const ext = path.extname(finalFilePath);
|
||||
const baseName = path.basename(finalFilePath, ext);
|
||||
checkPath = path.join(dir, `${baseName} (${counter})${ext}`);
|
||||
counter++;
|
||||
}
|
||||
if (checkPath !== finalFilePath) {
|
||||
logger.info(`Using unique final path: ${checkPath}`);
|
||||
finalFilePath = checkPath;
|
||||
|
||||
// Validate the unique path
|
||||
if (!isPathWithinUploadDir(finalFilePath, config.uploadDir, false)) {
|
||||
logger.error(`Path traversal detected in unique path: ${finalFilePath}`);
|
||||
return res.status(403).json({ error: 'Invalid file path' });
|
||||
}
|
||||
|
||||
// If path changed, ensure directory exists (might be needed if baseName contained '/')
|
||||
await fs.mkdir(path.dirname(finalFilePath), { recursive: true });
|
||||
}
|
||||
|
||||
const partialFilePath = finalFilePath + '.partial';
|
||||
|
||||
// Validate the partial file path as well
|
||||
if (!isPathWithinUploadDir(partialFilePath, config.uploadDir, false)) {
|
||||
logger.error(`Path traversal detected in partial path: ${partialFilePath}`);
|
||||
return res.status(403).json({ error: 'Invalid file path' });
|
||||
}
|
||||
|
||||
// --- Create and Persist Metadata ---
|
||||
const metadata = {
|
||||
uploadId,
|
||||
originalFilename: safeFilename, // Store the path as received by client
|
||||
filePath: finalFilePath, // The final, possibly unique, path
|
||||
partialFilePath,
|
||||
fileSize: size,
|
||||
bytesReceived: 0,
|
||||
batchId,
|
||||
createdAt: Date.now(),
|
||||
lastActivity: Date.now()
|
||||
};
|
||||
|
||||
await writeUploadMetadata(uploadId, metadata);
|
||||
logger.info(`Initialized persistent upload: ${uploadId} for ${safeFilename} -> ${finalFilePath}`);
|
||||
|
||||
// --- Handle Zero-Byte Files --- // (Important: Handle *after* metadata potentially exists)
|
||||
if (size === 0) {
|
||||
try {
|
||||
await fs.writeFile(finalFilePath, ''); // Create the empty file
|
||||
logger.success(`Completed zero-byte file upload: ${metadata.originalFilename} as ${finalFilePath}`);
|
||||
await deleteUploadMetadata(uploadId); // Clean up metadata since it's done
|
||||
sendNotification(metadata.originalFilename, 0, config);
|
||||
} catch (writeErr) {
|
||||
logger.error(`Failed to create zero-byte file ${finalFilePath}: ${writeErr.message}`);
|
||||
await deleteUploadMetadata(uploadId).catch(() => {}); // Attempt cleanup on error
|
||||
throw writeErr; // Let the main catch block handle it
|
||||
}
|
||||
}
|
||||
|
||||
res.json({ uploadId });
|
||||
|
||||
const result = await storageAdapter.initUpload(filename, size, clientBatchId);
|
||||
res.json({ uploadId: result.uploadId });
|
||||
} catch (err) {
|
||||
logger.error(`Upload initialization failed: ${err.message} ${err.stack}`);
|
||||
return res.status(500).json({ error: 'Failed to initialize upload', details: err.message });
|
||||
logger.error(`[Route /init] Upload initialization failed for "${filename}": ${err.name} - ${err.message}`, err.stack);
|
||||
let statusCode = 500;
|
||||
let clientMessage = 'Failed to initialize upload.';
|
||||
|
||||
if (err.message.includes('Invalid batch ID format')) {
|
||||
statusCode = 400; clientMessage = err.message;
|
||||
} else if (err.name === 'NoSuchBucket' || err.name === 'AccessDenied') {
|
||||
statusCode = 500; clientMessage = 'Storage configuration error.';
|
||||
} else if (err.code === 'EACCES' || err.code === 'EPERM' || err.message.includes('writable') || err.message.includes('metadata directory')) {
|
||||
statusCode = 500; clientMessage = 'Storage permission or access error.';
|
||||
} else if (err.message.includes('S3 Client configuration failed')) {
|
||||
statusCode = 503; clientMessage = 'Storage service unavailable or misconfigured.';
|
||||
}
|
||||
res.status(statusCode).json({ error: clientMessage, details: config.nodeEnv === 'development' ? err.message : undefined });
|
||||
}
|
||||
});
|
||||
|
||||
// Upload chunk
|
||||
router.post('/chunk/:uploadId', express.raw({
|
||||
limit: config.maxFileSize + (10 * 1024 * 1024), // Generous limit for raw body
|
||||
type: 'application/octet-stream'
|
||||
router.post('/chunk/:uploadId', express.raw({
|
||||
limit: config.maxFileSize + (10 * 1024 * 1024),
|
||||
type: 'application/octet-stream'
|
||||
}), async (req, res) => {
|
||||
// DEMO MODE CHECK
|
||||
if (isDemoMode()) {
|
||||
const { uploadId } = req.params;
|
||||
logger.debug(`[DEMO] Received chunk for ${uploadId}`);
|
||||
// Fake progress - requires knowing file size which isn't easily available here in demo
|
||||
const demoProgress = Math.min(100, Math.random() * 100); // Placeholder
|
||||
return res.json({ bytesReceived: 0, progress: demoProgress });
|
||||
const { uploadId } = req.params;
|
||||
const chunk = req.body;
|
||||
const partNumber = parseInt(req.query.partNumber, 10); // Ensure partNumber is parsed
|
||||
|
||||
if (isNaN(partNumber) || partNumber < 1) {
|
||||
logger.error(`[Route /chunk] Invalid partNumber for ${uploadId}: ${req.query.partNumber}`);
|
||||
return res.status(400).json({ error: 'Missing or invalid partNumber query parameter (must be >= 1)' });
|
||||
}
|
||||
|
||||
const { uploadId } = req.params;
|
||||
let chunk = req.body;
|
||||
let chunkSize = chunk.length;
|
||||
const clientBatchId = req.headers['x-batch-id']; // Logged but not used directly here
|
||||
if (isDemoMode() && config.storageType !== 's3') {
|
||||
logger.debug(`[DEMO /chunk] Chunk for ${uploadId}, part ${partNumber}, size ${chunk?.length || 0}`);
|
||||
const demoProgress = Math.min(100, (Math.random() * 50) + (partNumber * 10) ); // Simulate increasing progress
|
||||
const completed = demoProgress >= 100;
|
||||
if (completed) logger.info(`[DEMO /chunk] Sim completion for ${uploadId}`);
|
||||
return res.json({ bytesReceived: 0, progress: demoProgress, completed });
|
||||
}
|
||||
|
||||
if (!chunkSize) return res.status(400).json({ error: 'Empty chunk received' });
|
||||
|
||||
let metadata;
|
||||
let fileHandle;
|
||||
if (!chunk || chunk.length === 0) {
|
||||
logger.warn(`[Route /chunk] Empty chunk for ${uploadId}, part ${partNumber}`);
|
||||
return res.status(400).json({ error: 'Empty chunk received' });
|
||||
}
|
||||
|
||||
try {
|
||||
metadata = await readUploadMetadata(uploadId);
|
||||
const result = await storageAdapter.storeChunk(uploadId, chunk, partNumber);
|
||||
|
||||
if (!metadata) {
|
||||
logger.warn(`Upload metadata not found for chunk request: ${uploadId}. Client Batch ID: ${clientBatchId || 'none'}. Upload may be complete or cancelled.`);
|
||||
// Check if the final file exists as a fallback for completed uploads
|
||||
// This is a bit fragile, but handles cases where metadata was deleted slightly early
|
||||
if (result.completed) {
|
||||
logger.info(`[Route /chunk] Part ${partNumber} for ${uploadId} triggered completion. Finalizing...`);
|
||||
try {
|
||||
// Need to guess the final path - THIS IS NOT ROBUST
|
||||
// A better approach might be needed if this is common
|
||||
// For now, just return 404
|
||||
// await fs.access(potentialFinalPath);
|
||||
// return res.json({ bytesReceived: fileSizeGuess, progress: 100 });
|
||||
return res.status(404).json({ error: 'Upload session not found or already completed' });
|
||||
} catch {
|
||||
return res.status(404).json({ error: 'Upload session not found or already completed' });
|
||||
const completionResult = await storageAdapter.completeUpload(uploadId);
|
||||
logger.success(`[Route /chunk] Finalized upload ${uploadId}. Path/Key: ${completionResult.finalPath}`);
|
||||
return res.json({ bytesReceived: result.bytesReceived, progress: 100, completed: true });
|
||||
} catch (completionError) {
|
||||
logger.error(`[Route /chunk] CRITICAL: Failed to finalize ${uploadId} after part ${partNumber}: ${completionError.message}`, completionError.stack);
|
||||
return res.status(500).json({ error: 'Upload chunk received, but failed to finalize.', details: config.nodeEnv === 'development' ? completionError.message : undefined });
|
||||
}
|
||||
} else {
|
||||
res.json({ bytesReceived: result.bytesReceived, progress: result.progress, completed: false });
|
||||
}
|
||||
|
||||
// Update batch activity using metadata's batchId
|
||||
if (metadata.batchId && isValidBatchId(metadata.batchId)) {
|
||||
batchActivity.set(metadata.batchId, Date.now());
|
||||
}
|
||||
|
||||
// --- Sanity Checks & Idempotency ---
|
||||
if (metadata.bytesReceived >= metadata.fileSize) {
|
||||
logger.warn(`Received chunk for already completed upload ${uploadId} (${metadata.originalFilename}). Finalizing again if needed.`);
|
||||
// Ensure finalization if possible, then return success
|
||||
try {
|
||||
await fs.access(metadata.filePath); // Check if final file exists
|
||||
logger.info(`Upload ${uploadId} already finalized at ${metadata.filePath}.`);
|
||||
} catch {
|
||||
// Final file doesn't exist, attempt rename
|
||||
try {
|
||||
await fs.rename(metadata.partialFilePath, metadata.filePath);
|
||||
logger.info(`Finalized ${uploadId} on redundant chunk request (renamed ${metadata.partialFilePath} -> ${metadata.filePath}).`);
|
||||
} catch (renameErr) {
|
||||
if (renameErr.code === 'ENOENT') {
|
||||
logger.warn(`Partial file ${metadata.partialFilePath} missing during redundant chunk finalization for ${uploadId}.`);
|
||||
} else {
|
||||
logger.error(`Error finalizing ${uploadId} on redundant chunk: ${renameErr.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Regardless of rename outcome, delete metadata if it still exists
|
||||
await deleteUploadMetadata(uploadId);
|
||||
return res.json({ bytesReceived: metadata.fileSize, progress: 100 });
|
||||
}
|
||||
|
||||
// Prevent writing beyond expected file size (simple protection)
|
||||
if (metadata.bytesReceived + chunkSize > metadata.fileSize) {
|
||||
logger.warn(`Chunk for ${uploadId} exceeds expected file size. Received ${metadata.bytesReceived + chunkSize}, expected ${metadata.fileSize}. Truncating chunk.`);
|
||||
const bytesToWrite = metadata.fileSize - metadata.bytesReceived;
|
||||
chunk = chunk.slice(0, bytesToWrite);
|
||||
chunkSize = chunk.length;
|
||||
if (chunkSize <= 0) { // If we already have exactly the right amount
|
||||
logger.info(`Upload ${uploadId} already has expected bytes. Skipping write, proceeding to finalize.`);
|
||||
// Skip write, proceed to finalization check below
|
||||
metadata.bytesReceived = metadata.fileSize; // Ensure state is correct for finalization
|
||||
} else {
|
||||
logger.info(`Truncated chunk for ${uploadId} to ${chunkSize} bytes.`);
|
||||
}
|
||||
}
|
||||
|
||||
// --- Write Chunk (Append Mode) --- // Only write if chunk has size after potential truncation
|
||||
if (chunkSize > 0) {
|
||||
fileHandle = await fs.open(metadata.partialFilePath, 'a');
|
||||
const writeResult = await fileHandle.write(chunk);
|
||||
await fileHandle.close(); // Close immediately
|
||||
|
||||
if (writeResult.bytesWritten !== chunkSize) {
|
||||
// This indicates a partial write, which is problematic.
|
||||
logger.error(`Partial write for chunk ${uploadId}! Expected ${chunkSize}, wrote ${writeResult.bytesWritten}. Disk full?`);
|
||||
// How to recover? Maybe revert bytesReceived? For now, throw.
|
||||
throw new Error(`Failed to write full chunk for ${uploadId}`);
|
||||
}
|
||||
metadata.bytesReceived += writeResult.bytesWritten;
|
||||
}
|
||||
|
||||
// --- Update State --- (bytesReceived updated above or set if truncated to zero)
|
||||
const progress = metadata.fileSize === 0 ? 100 :
|
||||
Math.min( Math.round((metadata.bytesReceived / metadata.fileSize) * 100), 100);
|
||||
|
||||
logger.debug(`Chunk written for ${uploadId}: ${metadata.bytesReceived}/${metadata.fileSize} (${progress}%)`);
|
||||
|
||||
// --- Persist Updated Metadata (Before potential finalization) ---
|
||||
await writeUploadMetadata(uploadId, metadata);
|
||||
|
||||
// --- Check for Completion --- // Now happens after metadata update
|
||||
if (metadata.bytesReceived >= metadata.fileSize) {
|
||||
logger.info(`Upload ${uploadId} (${metadata.originalFilename}) completed ${metadata.bytesReceived} bytes.`);
|
||||
try {
|
||||
await fs.rename(metadata.partialFilePath, metadata.filePath);
|
||||
logger.success(`Upload completed and finalized: ${metadata.originalFilename} as ${metadata.filePath} (${metadata.fileSize} bytes)`);
|
||||
await deleteUploadMetadata(uploadId); // Clean up metadata file AFTER successful rename
|
||||
sendNotification(metadata.originalFilename, metadata.fileSize, config);
|
||||
} catch (renameErr) {
|
||||
if (renameErr.code === 'ENOENT') {
|
||||
logger.warn(`Partial file ${metadata.partialFilePath} not found during finalization for ${uploadId}. Assuming already finalized elsewhere.`);
|
||||
// Attempt to delete metadata anyway if partial is gone
|
||||
await deleteUploadMetadata(uploadId).catch(() => {});
|
||||
} else {
|
||||
logger.error(`CRITICAL: Failed to rename partial file ${metadata.partialFilePath} to ${metadata.filePath}: ${renameErr.message}`);
|
||||
// Keep metadata and partial file for manual recovery.
|
||||
// Return success to client as data is likely there, but log server issue.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res.json({ bytesReceived: metadata.bytesReceived, progress });
|
||||
|
||||
} catch (err) {
|
||||
// Ensure file handle is closed on error
|
||||
if (fileHandle) {
|
||||
await fileHandle.close().catch(closeErr => logger.error(`Error closing file handle for ${uploadId} after error: ${closeErr.message}`));
|
||||
logger.error(`[Route /chunk] Chunk upload failed for ${uploadId}, part ${partNumber}: ${err.name} - ${err.message}`, err.stack);
|
||||
let statusCode = 500;
|
||||
let clientMessage = 'Failed to process chunk.';
|
||||
|
||||
if (err.message.includes('Upload session not found') || err.name === 'NoSuchUpload' || err.code === 'ENOENT' || err.name === 'NotFound' || err.name === 'NoSuchKey') {
|
||||
statusCode = 404; clientMessage = 'Upload session not found or already completed/aborted.';
|
||||
} else if (err.name === 'InvalidPart' || err.name === 'InvalidPartOrder') {
|
||||
statusCode = 400; clientMessage = 'Invalid upload chunk sequence or data.';
|
||||
} else if (err.name === 'SlowDown' || (err.$metadata && err.$metadata.httpStatusCode === 503) ) {
|
||||
statusCode = 429; clientMessage = 'Storage provider rate limit exceeded, please try again later.';
|
||||
} else if (err.code === 'EACCES' || err.code === 'EPERM' ) {
|
||||
statusCode = 500; clientMessage = 'Storage permission error while writing chunk.';
|
||||
}
|
||||
logger.error(`Chunk upload failed for ${uploadId}: ${err.message} ${err.stack}`);
|
||||
// Don't delete metadata on generic chunk errors, let client retry or cleanup handle stale files
|
||||
res.status(500).json({ error: 'Failed to process chunk', details: err.message });
|
||||
res.status(statusCode).json({ error: clientMessage, details: config.nodeEnv === 'development' ? err.message : undefined });
|
||||
}
|
||||
});
|
||||
|
||||
// Cancel upload
|
||||
router.post('/cancel/:uploadId', async (req, res) => {
|
||||
// DEMO MODE CHECK
|
||||
if (isDemoMode()) {
|
||||
logger.info(`[DEMO] Upload cancelled: ${req.params.uploadId}`);
|
||||
return res.json({ message: 'Upload cancelled (Demo)' });
|
||||
const { uploadId } = req.params;
|
||||
|
||||
if (isDemoMode() && config.storageType !== 's3') {
|
||||
logger.info(`[DEMO /cancel] Request for ${uploadId}`);
|
||||
return res.json({ message: 'Upload cancelled (Demo)' });
|
||||
}
|
||||
|
||||
const { uploadId } = req.params;
|
||||
logger.info(`Received cancel request for upload: ${uploadId}`);
|
||||
|
||||
logger.info(`[Route /cancel] Cancel request for upload: ${uploadId}`);
|
||||
try {
|
||||
const metadata = await readUploadMetadata(uploadId);
|
||||
|
||||
if (metadata) {
|
||||
// Delete partial file first
|
||||
try {
|
||||
await fs.unlink(metadata.partialFilePath);
|
||||
logger.info(`Deleted partial file on cancellation: ${metadata.partialFilePath}`);
|
||||
} catch (unlinkErr) {
|
||||
if (unlinkErr.code !== 'ENOENT') { // Ignore if already gone
|
||||
logger.error(`Failed to delete partial file ${metadata.partialFilePath} on cancel: ${unlinkErr.message}`);
|
||||
}
|
||||
}
|
||||
// Then delete metadata file
|
||||
await deleteUploadMetadata(uploadId);
|
||||
logger.info(`Upload cancelled and cleaned up: ${uploadId} (${metadata.originalFilename})`);
|
||||
} else {
|
||||
logger.warn(`Cancel request for non-existent or already completed upload: ${uploadId}`);
|
||||
}
|
||||
|
||||
res.json({ message: 'Upload cancelled or already complete' });
|
||||
await storageAdapter.abortUpload(uploadId);
|
||||
res.json({ message: 'Upload cancelled successfully or was already inactive.' });
|
||||
} catch (err) {
|
||||
logger.error(`Error during upload cancellation for ${uploadId}: ${err.message}`);
|
||||
res.status(500).json({ error: 'Failed to cancel upload' });
|
||||
logger.error(`[Route /cancel] Error during cancellation for ${uploadId}: ${err.name} - ${err.message}`, err.stack);
|
||||
// Generally, client doesn't need to know if server-side abort failed catastrophically,
|
||||
// as long as client stops sending. However, if it's a config error, 500 is appropriate.
|
||||
let statusCode = err.name === 'NoSuchUpload' ? 200 : 500; // If not found, it's like success for client
|
||||
let clientMessage = err.name === 'NoSuchUpload' ? 'Upload already inactive or not found.' : 'Failed to cancel upload on server.';
|
||||
if (err.name === 'AccessDenied' || err.name === 'NoSuchBucket') {
|
||||
clientMessage = 'Storage configuration error during cancel.';
|
||||
statusCode = 500;
|
||||
}
|
||||
res.status(statusCode).json({ message: clientMessage, details: config.nodeEnv === 'development' ? err.message : undefined });
|
||||
}
|
||||
});
|
||||
|
||||
module.exports = {
|
||||
router,
|
||||
startBatchCleanup,
|
||||
stopBatchCleanup,
|
||||
// Export for testing if required
|
||||
readUploadMetadata,
|
||||
writeUploadMetadata,
|
||||
deleteUploadMetadata
|
||||
};
|
||||
module.exports = { router }; // Only export the router object
|
||||
110
src/scripts/entrypoint.sh
Normal file
110
src/scripts/entrypoint.sh
Normal file
@@ -0,0 +1,110 @@
|
||||
#!/bin/sh
|
||||
# Simple entrypoint script to manage user permissions and execute CMD
|
||||
|
||||
# Exit immediately if a command exits with a non-zero status.
|
||||
set -e
|
||||
|
||||
# Function to log messages
|
||||
log_info() {
|
||||
echo "[INFO] Entrypoint: $1"
|
||||
}
|
||||
|
||||
log_warning() {
|
||||
echo "[WARN] Entrypoint: $1"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo "[ERROR] Entrypoint: $1" >&2
|
||||
}
|
||||
|
||||
log_info "Starting entrypoint script..."
|
||||
|
||||
# Default user/group/umask values
|
||||
DEFAULT_UID=1000
|
||||
DEFAULT_GID=1000
|
||||
DEFAULT_UMASK=022
|
||||
# Default upload directory if not set by user (should align with Dockerfile/compose)
|
||||
DEFAULT_UPLOAD_DIR="/usr/src/app/local_uploads"
|
||||
|
||||
# Check if PUID or PGID environment variables are set by the user
|
||||
if [ -z "${PUID}" ] && [ -z "${PGID}" ]; then
|
||||
# --- Run as Root ---
|
||||
log_info "PUID/PGID not set, running as root."
|
||||
|
||||
# Set umask (use UMASK env var if provided, otherwise default)
|
||||
CURRENT_UMASK=${UMASK:-$DEFAULT_UMASK}
|
||||
log_info "Setting umask to ${CURRENT_UMASK}"
|
||||
umask "${CURRENT_UMASK}"
|
||||
|
||||
# Execute the command passed to the entrypoint as root
|
||||
log_info "Executing command as root: $@"
|
||||
exec "$@"
|
||||
|
||||
else
|
||||
# --- Run as Custom User (nodeuser with adjusted UID/GID) ---
|
||||
log_info "PUID/PGID set, configuring user 'nodeuser'..."
|
||||
|
||||
# Use provided UID/GID or default if only one is set
|
||||
CURRENT_UID=${PUID:-$DEFAULT_UID}
|
||||
CURRENT_GID=${PGID:-$DEFAULT_GID}
|
||||
CURRENT_UMASK=${UMASK:-$DEFAULT_UMASK}
|
||||
# Read the upload directory from ENV var or use default
|
||||
TARGET_UPLOAD_DIR=${UPLOAD_DIR:-$DEFAULT_UPLOAD_DIR}
|
||||
|
||||
log_info "Target UID: ${CURRENT_UID}, GID: ${CURRENT_GID}, UMASK: ${CURRENT_UMASK}"
|
||||
log_info "Target Upload Dir: ${TARGET_UPLOAD_DIR}"
|
||||
|
||||
# Check if user/group exists (should exist from Dockerfile)
|
||||
if ! getent group nodeuser > /dev/null 2>&1; then
|
||||
log_warning "Group 'nodeuser' not found, creating with GID ${CURRENT_GID}..."
|
||||
addgroup -g "${CURRENT_GID}" nodeuser
|
||||
else
|
||||
EXISTING_GID=$(getent group nodeuser | cut -d: -f3)
|
||||
if [ "${EXISTING_GID}" != "${CURRENT_GID}" ]; then
|
||||
log_info "Updating 'nodeuser' group GID from ${EXISTING_GID} to ${CURRENT_GID}..."
|
||||
groupmod -o -g "${CURRENT_GID}" nodeuser
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! getent passwd nodeuser > /dev/null 2>&1; then
|
||||
log_warning "User 'nodeuser' not found, creating with UID ${CURRENT_UID}..."
|
||||
adduser -u "${CURRENT_UID}" -G nodeuser -s /bin/sh -D nodeuser
|
||||
else
|
||||
EXISTING_UID=$(getent passwd nodeuser | cut -d: -f3)
|
||||
if [ "${EXISTING_UID}" != "${CURRENT_UID}" ]; then
|
||||
log_info "Updating 'nodeuser' user UID from ${EXISTING_UID} to ${CURRENT_UID}..."
|
||||
usermod -o -u "${CURRENT_UID}" nodeuser
|
||||
fi
|
||||
fi
|
||||
|
||||
# Ensure the base application directory ownership is correct
|
||||
log_info "Ensuring ownership of /usr/src/app..."
|
||||
chown -R nodeuser:nodeuser /usr/src/app || log_warning "Could not chown /usr/src/app"
|
||||
|
||||
# Ensure the target upload directory exists and has correct ownership
|
||||
if [ -n "${TARGET_UPLOAD_DIR}" ]; then
|
||||
if [ ! -d "${TARGET_UPLOAD_DIR}" ]; then
|
||||
log_info "Creating directory: ${TARGET_UPLOAD_DIR}"
|
||||
# Use -p to create parent directories as needed
|
||||
mkdir -p "${TARGET_UPLOAD_DIR}"
|
||||
# Chown after creation
|
||||
chown nodeuser:nodeuser "${TARGET_UPLOAD_DIR}" || log_warning "Could not chown ${TARGET_UPLOAD_DIR}"
|
||||
else
|
||||
# Directory exists, ensure ownership
|
||||
log_info "Ensuring ownership of ${TARGET_UPLOAD_DIR}..."
|
||||
chown -R nodeuser:nodeuser "${TARGET_UPLOAD_DIR}" || log_warning "Could not chown ${TARGET_UPLOAD_DIR}"
|
||||
fi
|
||||
else
|
||||
log_warning "UPLOAD_DIR variable is not set or is empty, skipping ownership check for upload directory."
|
||||
fi
|
||||
|
||||
# Set the umask
|
||||
log_info "Setting umask to ${CURRENT_UMASK}"
|
||||
umask "${CURRENT_UMASK}"
|
||||
|
||||
# Execute the command passed to the entrypoint using su-exec to drop privileges
|
||||
log_info "Executing command as nodeuser (${CURRENT_UID}:${CURRENT_GID}): $@"
|
||||
exec su-exec nodeuser "$@"
|
||||
fi
|
||||
|
||||
log_info "Entrypoint script finished (should not reach here if exec worked)."
|
||||
122
src/server.js
122
src/server.js
@@ -1,129 +1,111 @@
|
||||
/**
|
||||
* Server entry point that starts the HTTP server and manages connections.
|
||||
* Handles graceful shutdown, connection tracking, and server initialization.
|
||||
* Provides development mode directory listing functionality.
|
||||
*/
|
||||
|
||||
const { app, initialize, config } = require('./app');
|
||||
const { app, initialize, config } = require('./app'); // config is now also exported from app.js
|
||||
const logger = require('./utils/logger');
|
||||
const fs = require('fs');
|
||||
const fs = require('fs'); // Keep for readdirSync if needed for local dev logging
|
||||
const { executeCleanup } = require('./utils/cleanup');
|
||||
const { generatePWAManifest } = require('./scripts/pwa-manifest-generator')
|
||||
const { generatePWAManifest } = require('./scripts/pwa-manifest-generator');
|
||||
|
||||
// Track open connections
|
||||
const connections = new Set();
|
||||
|
||||
/**
|
||||
* Start the server and initialize the application
|
||||
* @returns {Promise<http.Server>} The HTTP server instance
|
||||
*/
|
||||
async function startServer() {
|
||||
try {
|
||||
// Initialize the application
|
||||
await initialize();
|
||||
|
||||
// Start the server - bind to 0.0.0.0 for Docker compatibility
|
||||
const server = app.listen(config.port, '0.0.0.0', () => {
|
||||
await initialize(); // This will call validateConfig and load storage adapter via app.js
|
||||
|
||||
const server = app.listen(config.port, () => {
|
||||
logger.info(`Server running at ${config.baseUrl}`);
|
||||
logger.info(`Server listening on 0.0.0.0:${config.port}`);
|
||||
logger.info(`Upload directory: ${config.uploadDir}`);
|
||||
|
||||
// List directory contents in development
|
||||
if (config.nodeEnv === 'development') {
|
||||
// ** MODIFIED LOGGING **
|
||||
logger.info(`Active Storage Type: ${config.storageType}`);
|
||||
logger.info(`Data Directory (for uploads or metadata): ${config.uploadDir}`);
|
||||
|
||||
if (config.nodeEnv === 'development' && config.storageType === 'local') {
|
||||
try {
|
||||
const files = fs.readdirSync(config.uploadDir);
|
||||
logger.info(`Current directory contents (${files.length} files):`);
|
||||
files.forEach(file => {
|
||||
logger.info(`- ${file}`);
|
||||
});
|
||||
// Only list contents if it's local storage and dev mode
|
||||
if (fs.existsSync(config.uploadDir)) {
|
||||
const files = fs.readdirSync(config.uploadDir);
|
||||
logger.info(`Current local upload directory contents (${config.uploadDir}):`);
|
||||
files.forEach(file => logger.info(`- ${file}`));
|
||||
} else {
|
||||
logger.warn(`Local upload directory ${config.uploadDir} does not exist for listing.`);
|
||||
}
|
||||
} catch (err) {
|
||||
logger.error(`Failed to list directory contents: ${err.message}`);
|
||||
logger.error(`Failed to list local upload directory contents: ${err.message}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Dynamically generate PWA manifest into public folder
|
||||
generatePWAManifest();
|
||||
|
||||
// Track new connections
|
||||
server.on('connection', (connection) => {
|
||||
connections.add(connection);
|
||||
connection.on('close', () => {
|
||||
connections.delete(connection);
|
||||
});
|
||||
connection.on('close', () => connections.delete(connection));
|
||||
});
|
||||
|
||||
// Shutdown handler function
|
||||
let isShuttingDown = false; // Prevent multiple shutdowns
|
||||
let isShuttingDown = false;
|
||||
const shutdownHandler = async (signal) => {
|
||||
if (isShuttingDown) return;
|
||||
isShuttingDown = true;
|
||||
logger.info(`${signal} received. Shutting down gracefully...`);
|
||||
|
||||
// Start a shorter force shutdown timer
|
||||
const forceShutdownTimer = setTimeout(() => {
|
||||
logger.error('Force shutdown initiated');
|
||||
// eslint-disable-next-line n/no-process-exit
|
||||
logger.error('Force shutdown due to timeout.');
|
||||
process.exit(1);
|
||||
}, 3000); // 3 seconds maximum for total shutdown
|
||||
|
||||
}, 5000); // Increased slightly
|
||||
|
||||
try {
|
||||
// 1. Stop accepting new connections immediately
|
||||
server.unref();
|
||||
server.closeIdleConnections?.(); // Node 18+
|
||||
|
||||
// 2. Close all existing connections with a shorter timeout
|
||||
const connectionClosePromises = Array.from(connections).map(conn => {
|
||||
return new Promise(resolve => {
|
||||
conn.end(() => {
|
||||
connections.delete(conn);
|
||||
resolve();
|
||||
const closePromises = Array.from(connections).map(conn => new Promise(resolve => {
|
||||
conn.on('close', resolve); // Ensure close event resolves
|
||||
conn.destroy(); // Actively destroy connections
|
||||
}));
|
||||
|
||||
await Promise.race([
|
||||
Promise.all(closePromises),
|
||||
new Promise(resolve => setTimeout(resolve, 2000)) // Max 2s for connections
|
||||
]);
|
||||
connections.clear();
|
||||
|
||||
|
||||
await new Promise((resolve, reject) => {
|
||||
server.close((err) => {
|
||||
if (err) return reject(err);
|
||||
logger.info('Server closed.');
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// Wait for connections to close with a timeout
|
||||
await Promise.race([
|
||||
Promise.all(connectionClosePromises),
|
||||
new Promise(resolve => setTimeout(resolve, 1000)) // 1 second timeout for connections
|
||||
]);
|
||||
await executeCleanup(1500); // Max 1.5s for cleanup
|
||||
|
||||
// 3. Close the server
|
||||
await new Promise((resolve) => server.close(resolve));
|
||||
logger.info('Server closed');
|
||||
|
||||
// 4. Run cleanup tasks with a shorter timeout
|
||||
await executeCleanup(1000); // 1 second timeout for cleanup
|
||||
|
||||
// Clear the force shutdown timer since we completed gracefully
|
||||
clearTimeout(forceShutdownTimer);
|
||||
process.exitCode = 0;
|
||||
// eslint-disable-next-line n/no-process-exit
|
||||
process.exit(0); // Ensure immediate exit
|
||||
logger.info('Shutdown complete.');
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
clearTimeout(forceShutdownTimer); // Clear timer on error too
|
||||
logger.error(`Error during shutdown: ${error.message}`);
|
||||
// eslint-disable-next-line n/no-process-exit
|
||||
process.exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
// Handle both SIGTERM and SIGINT
|
||||
process.on('SIGTERM', () => shutdownHandler('SIGTERM'));
|
||||
process.on('SIGINT', () => shutdownHandler('SIGINT'));
|
||||
|
||||
return server;
|
||||
} catch (error) {
|
||||
logger.error('Failed to start server:', error);
|
||||
// Ensure process exits if startServer itself fails before listener setup
|
||||
process.exitCode = 1;
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Only start the server if this file is run directly
|
||||
if (require.main === module) {
|
||||
startServer().catch((error) => {
|
||||
logger.error('Server failed to start:', error);
|
||||
process.exitCode = 1;
|
||||
throw error;
|
||||
// Error already logged by startServer
|
||||
// process.exitCode is already set if startServer throws
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = { app, startServer };
|
||||
module.exports = { app, startServer };
|
||||
59
src/storage/index.js
Normal file
59
src/storage/index.js
Normal file
@@ -0,0 +1,59 @@
|
||||
/**
|
||||
* Storage Adapter Factory
|
||||
* Reads the application configuration and exports the appropriate storage adapter
|
||||
* (either local or S3) based on the STORAGE_TYPE environment variable.
|
||||
* This provides a single point of import for storage operations throughout the app.
|
||||
*/
|
||||
|
||||
const { config } = require('../config'); // Assuming config is initialized before this runs
|
||||
const logger = require('../utils/logger');
|
||||
|
||||
let storageAdapter;
|
||||
|
||||
logger.info(`Initializing storage adapter based on STORAGE_TYPE: "${config.storageType}"`);
|
||||
|
||||
if (config.isDemoMode) {
|
||||
logger.warn('[Storage] DEMO MODE ENABLED. Using mock storage adapter.');
|
||||
// In demo mode, we might want a completely separate mock adapter
|
||||
// or potentially just disable storage operations. For now, let's use local
|
||||
// but be aware demo mode might need its own logic if strict separation is needed.
|
||||
// Or, create a dedicated demoAdapter.js
|
||||
// For simplicity now, let's log and maybe default to local (which is non-persistent in demo anyway).
|
||||
// A dedicated demoAdapter would be cleaner:
|
||||
// storageAdapter = require('./demoAdapter'); // Requires creating demoAdapter.js
|
||||
// Fallback for now:
|
||||
storageAdapter = require('./localAdapter');
|
||||
logger.info('[Storage] Using Local Adapter for Demo Mode (operations will be mocked or non-persistent).');
|
||||
|
||||
} else if (config.storageType === 's3') {
|
||||
logger.info('[Storage] Using S3 Storage Adapter.');
|
||||
try {
|
||||
storageAdapter = require('./s3Adapter');
|
||||
} catch (error) {
|
||||
logger.error(`[Storage] Failed to load S3 Adapter: ${error.message}`);
|
||||
logger.error('[Storage] Check S3 configuration environment variables and AWS SDK installation.');
|
||||
process.exit(1); // Exit if the configured adapter fails to load
|
||||
}
|
||||
} else {
|
||||
// Default to local storage if type is 'local' or invalid/not specified
|
||||
if (config.storageType !== 'local') {
|
||||
logger.warn(`[Storage] Invalid or unspecified STORAGE_TYPE "${config.storageType}", defaulting to "local".`);
|
||||
}
|
||||
logger.info('[Storage] Using Local Storage Adapter.');
|
||||
try {
|
||||
storageAdapter = require('./localAdapter');
|
||||
} catch (error) {
|
||||
logger.error(`[Storage] Failed to load Local Adapter: ${error.message}`);
|
||||
process.exit(1); // Exit if the default adapter fails
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the selected adapter is valid before exporting
|
||||
if (!storageAdapter || typeof storageAdapter.initUpload !== 'function') {
|
||||
logger.error('[Storage] Failed to initialize a valid storage adapter. Exiting.');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
logger.success(`[Storage] Storage adapter "${config.storageType}" initialized successfully.`);
|
||||
|
||||
module.exports = { storageAdapter };
|
||||
641
src/storage/localAdapter.js
Normal file
641
src/storage/localAdapter.js
Normal file
@@ -0,0 +1,641 @@
|
||||
/**
|
||||
* Local Storage Adapter
|
||||
* Handles file operations for storing files on the local filesystem.
|
||||
* Implements the storage interface expected by the application routes.
|
||||
*/
|
||||
|
||||
const fs = require('fs').promises;
|
||||
const fsSync = require('fs'); // For synchronous checks like existsSync
|
||||
const path = require('path');
|
||||
const crypto = require('crypto');
|
||||
const { config } = require('../config');
|
||||
const logger = require('../utils/logger');
|
||||
const {
|
||||
getUniqueFolderPath,
|
||||
sanitizePathPreserveDirs,
|
||||
isValidBatchId,
|
||||
formatFileSize // Keep formatFileSize accessible if needed by notifications later
|
||||
} = require('../utils/fileUtils');
|
||||
const { sendNotification } = require('../services/notifications'); // Needed for completion
|
||||
|
||||
const METADATA_DIR = path.join(config.uploadDir, '.metadata');
|
||||
const UPLOAD_TIMEOUT = 30 * 60 * 1000; // 30 minutes timeout for stale uploads
|
||||
|
||||
// --- In-Memory Maps (Session-level optimizations) ---
|
||||
// Store folder name mappings for batch uploads (avoids FS lookups during session)
|
||||
// NOTE: This state is specific to this adapter instance and might not scale across multiple server instances.
|
||||
const folderMappings = new Map();
|
||||
// Store batch activity timestamps (for cleaning up stale batches/folder mappings)
|
||||
const batchActivity = new Map();
|
||||
const BATCH_TIMEOUT = 30 * 60 * 1000; // 30 minutes for batch/folderMapping cleanup
|
||||
|
||||
// --- Metadata Helper Functions (Copied and adapted from original upload.js) ---
|
||||
|
||||
/**
|
||||
* Ensures the metadata directory exists.
|
||||
* Should be called once during adapter initialization or before first use.
|
||||
*/
|
||||
async function ensureMetadataDirExists() {
|
||||
try {
|
||||
if (!fsSync.existsSync(METADATA_DIR)) {
|
||||
await fs.mkdir(METADATA_DIR, { recursive: true });
|
||||
logger.info(`[Local Adapter] Created metadata directory: ${METADATA_DIR}`);
|
||||
}
|
||||
// Check writability
|
||||
await fs.access(METADATA_DIR, fsSync.constants.W_OK);
|
||||
} catch (err) {
|
||||
logger.error(`[Local Adapter] Metadata directory error (${METADATA_DIR}): ${err.message}`);
|
||||
throw new Error(`Failed to access or create metadata directory: ${METADATA_DIR}`);
|
||||
}
|
||||
}
|
||||
|
||||
async function readUploadMetadata(uploadId) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.warn(`[Local Adapter] Attempted to read metadata with invalid uploadId: ${uploadId}`);
|
||||
return null;
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
try {
|
||||
const data = await fs.readFile(metaFilePath, 'utf8');
|
||||
return JSON.parse(data);
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOENT') {
|
||||
return null; // Metadata file doesn't exist
|
||||
}
|
||||
logger.error(`[Local Adapter] Error reading metadata for ${uploadId}: ${err.message}`);
|
||||
throw err; // Rethrow other errors
|
||||
}
|
||||
}
|
||||
|
||||
async function writeUploadMetadata(uploadId, metadata) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.error(`[Local Adapter] Attempted to write metadata with invalid uploadId: ${uploadId}`);
|
||||
return;
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
metadata.lastActivity = Date.now(); // Update timestamp on every write
|
||||
try {
|
||||
const tempMetaPath = `${metaFilePath}.${crypto.randomBytes(4).toString('hex')}.tmp`;
|
||||
await fs.writeFile(tempMetaPath, JSON.stringify(metadata, null, 2));
|
||||
await fs.rename(tempMetaPath, metaFilePath);
|
||||
} catch (err) {
|
||||
logger.error(`[Local Adapter] Error writing metadata for ${uploadId}: ${err.message}`);
|
||||
try { await fs.unlink(tempMetaPath); } catch (unlinkErr) {/* ignore */}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteUploadMetadata(uploadId) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.warn(`[Local Adapter] Attempted to delete metadata with invalid uploadId: ${uploadId}`);
|
||||
return;
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
try {
|
||||
await fs.unlink(metaFilePath);
|
||||
logger.debug(`[Local Adapter] Deleted metadata file: ${uploadId}.meta`);
|
||||
} catch (err) {
|
||||
if (err.code !== 'ENOENT') { // Ignore if already deleted
|
||||
logger.error(`[Local Adapter] Error deleting metadata file ${uploadId}.meta: ${err.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- Batch Cleanup (In-memory session state cleanup) ---
|
||||
// This logic remains relevant for the in-memory folderMappings if used across batches.
|
||||
let batchCleanupInterval;
|
||||
function startBatchCleanup() {
|
||||
if (batchCleanupInterval) clearInterval(batchCleanupInterval);
|
||||
batchCleanupInterval = setInterval(() => {
|
||||
const now = Date.now();
|
||||
logger.info(`[Local Adapter] Running batch session cleanup, checking ${batchActivity.size} active sessions`);
|
||||
let cleanedCount = 0;
|
||||
for (const [batchId, lastActivity] of batchActivity.entries()) {
|
||||
if (now - lastActivity >= BATCH_TIMEOUT) {
|
||||
logger.info(`[Local Adapter] Cleaning up inactive batch session: ${batchId}`);
|
||||
batchActivity.delete(batchId);
|
||||
// Clean up associated folder mappings
|
||||
for (const key of folderMappings.keys()) {
|
||||
if (key.endsWith(`-${batchId}`)) {
|
||||
folderMappings.delete(key);
|
||||
}
|
||||
}
|
||||
cleanedCount++;
|
||||
}
|
||||
}
|
||||
if (cleanedCount > 0) logger.info(`[Local Adapter] Cleaned up ${cleanedCount} inactive batch sessions.`);
|
||||
}, 5 * 60 * 1000); // Check every 5 minutes
|
||||
batchCleanupInterval.unref();
|
||||
}
|
||||
// Ensure metadata dir exists before starting cleanup or other ops
|
||||
ensureMetadataDirExists().then(() => {
|
||||
logger.info('[Local Adapter] Initialized.');
|
||||
// Start batch cleanup only after ensuring dir exists
|
||||
if (!process.env.DISABLE_BATCH_CLEANUP) {
|
||||
startBatchCleanup();
|
||||
}
|
||||
}).catch(err => {
|
||||
logger.error(`[Local Adapter] Initialization failed: ${err.message}`);
|
||||
// Potentially exit or prevent server start if metadata dir is critical
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
|
||||
// --- Interface Implementation ---
|
||||
|
||||
/**
|
||||
* Initializes an upload session.
|
||||
* @param {string} filename - Original filename/path from client.
|
||||
* @param {number} fileSize - Total size of the file.
|
||||
* @param {string} clientBatchId - Optional batch ID from client.
|
||||
* @returns {Promise<{uploadId: string}>} Object containing the application's upload ID.
|
||||
*/
|
||||
async function initUpload(filename, fileSize, clientBatchId) {
|
||||
await ensureMetadataDirExists(); // Ensure it exists before proceeding
|
||||
|
||||
const size = Number(fileSize);
|
||||
// Basic validations moved to route handler, assume valid inputs here
|
||||
|
||||
const batchId = clientBatchId || `${Date.now()}-${crypto.randomBytes(4).toString('hex').substring(0, 9)}`;
|
||||
if (clientBatchId && !isValidBatchId(batchId)) {
|
||||
throw new Error('Invalid batch ID format'); // Throw error for route handler
|
||||
}
|
||||
batchActivity.set(batchId, Date.now()); // Track batch session activity
|
||||
|
||||
// --- Path handling and Sanitization ---
|
||||
const sanitizedFilename = sanitizePathPreserveDirs(filename);
|
||||
const safeFilename = path.normalize(sanitizedFilename)
|
||||
.replace(/^(\.\.(\/|\\|$))+/, '')
|
||||
.replace(/\\/g, '/')
|
||||
.replace(/^\/+/, '');
|
||||
logger.info(`[Local Adapter] Init request for: ${safeFilename}`);
|
||||
|
||||
// --- Determine Paths & Handle Folders ---
|
||||
const uploadId = crypto.randomBytes(16).toString('hex');
|
||||
let finalFilePath = path.resolve(config.uploadDir, safeFilename); // Use resolve for absolute path
|
||||
const pathParts = safeFilename.split('/').filter(Boolean);
|
||||
|
||||
if (pathParts.length > 1) {
|
||||
const originalFolderName = pathParts[0];
|
||||
const folderMapKey = `${originalFolderName}-${batchId}`;
|
||||
let newFolderName = folderMappings.get(folderMapKey);
|
||||
const relativeFolderPath = newFolderName || originalFolderName; // Folder name relative to uploadDir
|
||||
|
||||
if (!newFolderName) {
|
||||
const baseFolderPath = path.resolve(config.uploadDir, relativeFolderPath);
|
||||
await fs.mkdir(path.dirname(baseFolderPath), { recursive: true }); // Ensure parent of potential new folder exists
|
||||
try {
|
||||
await fs.mkdir(baseFolderPath, { recursive: false }); // Try creating the original/mapped name
|
||||
newFolderName = originalFolderName; // Success, use original
|
||||
} catch (err) {
|
||||
if (err.code === 'EEXIST') {
|
||||
// Folder exists, generate a unique name for this batch
|
||||
const uniqueFolderPath = await getUniqueFolderPath(baseFolderPath); // Pass absolute path
|
||||
newFolderName = path.basename(uniqueFolderPath); // Get only the unique folder name part
|
||||
logger.info(`[Local Adapter] Folder "${originalFolderName}" exists or conflict, using unique "${newFolderName}" for batch ${batchId}`);
|
||||
// No need to mkdir again, getUniqueFolderPath created it.
|
||||
} else {
|
||||
logger.error(`[Local Adapter] Error creating directory ${baseFolderPath}: ${err.message}`);
|
||||
throw err; // Re-throw other errors
|
||||
}
|
||||
}
|
||||
folderMappings.set(folderMapKey, newFolderName); // Store mapping for this batch
|
||||
}
|
||||
// Reconstruct the final path using the potentially unique folder name
|
||||
pathParts[0] = newFolderName;
|
||||
finalFilePath = path.resolve(config.uploadDir, ...pathParts);
|
||||
// Ensure the immediate parent directory for the file exists
|
||||
await fs.mkdir(path.dirname(finalFilePath), { recursive: true });
|
||||
} else {
|
||||
// Ensure base upload dir exists (already done by ensureLocalUploadDirExists, but safe to repeat)
|
||||
await fs.mkdir(config.uploadDir, { recursive: true });
|
||||
}
|
||||
|
||||
// --- Check Final Path Collision & Get Unique Name if Needed ---
|
||||
// Check if the *final* destination exists (not the partial)
|
||||
let checkPath = finalFilePath;
|
||||
let counter = 1;
|
||||
while (fsSync.existsSync(checkPath)) {
|
||||
logger.warn(`[Local Adapter] Final destination file already exists: ${checkPath}. Generating unique name.`);
|
||||
const dir = path.dirname(finalFilePath);
|
||||
const ext = path.extname(finalFilePath);
|
||||
const baseName = path.basename(finalFilePath, ext);
|
||||
checkPath = path.resolve(dir, `${baseName} (${counter})${ext}`); // Use resolve
|
||||
counter++;
|
||||
}
|
||||
if (checkPath !== finalFilePath) {
|
||||
logger.info(`[Local Adapter] Using unique final path: ${checkPath}`);
|
||||
finalFilePath = checkPath;
|
||||
// If path changed, ensure directory exists again (might be needed if baseName contained '/')
|
||||
await fs.mkdir(path.dirname(finalFilePath), { recursive: true });
|
||||
}
|
||||
|
||||
const partialFilePath = finalFilePath + '.partial';
|
||||
|
||||
// --- Create and Persist Metadata ---
|
||||
const metadata = {
|
||||
uploadId,
|
||||
originalFilename: safeFilename, // Store the path as received by client
|
||||
filePath: finalFilePath, // The final, possibly unique, path
|
||||
partialFilePath,
|
||||
fileSize: size,
|
||||
bytesReceived: 0,
|
||||
batchId,
|
||||
createdAt: Date.now(),
|
||||
lastActivity: Date.now()
|
||||
};
|
||||
|
||||
await writeUploadMetadata(uploadId, metadata);
|
||||
logger.info(`[Local Adapter] Initialized upload: ${uploadId} for ${safeFilename} -> ${finalFilePath}`);
|
||||
|
||||
// --- Handle Zero-Byte Files ---
|
||||
if (size === 0) {
|
||||
try {
|
||||
await fs.writeFile(finalFilePath, ''); // Create the empty file directly
|
||||
logger.success(`[Local Adapter] Completed zero-byte file: ${metadata.originalFilename} as ${finalFilePath}`);
|
||||
await deleteUploadMetadata(uploadId); // Clean up metadata
|
||||
sendNotification(metadata.originalFilename, 0, config); // Send notification
|
||||
} catch (writeErr) {
|
||||
logger.error(`[Local Adapter] Failed to create zero-byte file ${finalFilePath}: ${writeErr.message}`);
|
||||
await deleteUploadMetadata(uploadId).catch(() => {}); // Attempt cleanup
|
||||
throw writeErr; // Let the route handler catch it
|
||||
}
|
||||
}
|
||||
|
||||
return { uploadId };
|
||||
}
|
||||
|
||||
/**
|
||||
* Stores a chunk of data for a given uploadId.
|
||||
* @param {string} uploadId - The application's upload ID.
|
||||
* @param {Buffer} chunk - The data chunk to store.
|
||||
* @returns {Promise<{bytesReceived: number, progress: number, completed: boolean}>} Upload status.
|
||||
*/
|
||||
async function storeChunk(uploadId, chunk) {
|
||||
const chunkSize = chunk.length;
|
||||
if (!chunkSize) {
|
||||
throw new Error('Empty chunk received');
|
||||
}
|
||||
|
||||
const metadata = await readUploadMetadata(uploadId);
|
||||
|
||||
if (!metadata) {
|
||||
// Maybe the upload completed *just* before this chunk arrived? Check final file.
|
||||
// This is hard to do reliably without knowing the final path from metadata.
|
||||
// Return a specific error or status code might be better.
|
||||
logger.warn(`[Local Adapter] Metadata not found for chunk: ${uploadId}. Upload might be complete or cancelled.`);
|
||||
throw new Error('Upload session not found or already completed'); // Let route handler return 404
|
||||
}
|
||||
|
||||
// Update batch activity
|
||||
if (metadata.batchId && isValidBatchId(metadata.batchId)) {
|
||||
batchActivity.set(metadata.batchId, Date.now());
|
||||
}
|
||||
|
||||
// --- Sanity Checks ---
|
||||
if (metadata.bytesReceived >= metadata.fileSize) {
|
||||
logger.warn(`[Local Adapter] Received chunk for already completed upload ${uploadId}. Finalizing again.`);
|
||||
// Attempt to finalize just in case, then return completed status
|
||||
await completeUpload(uploadId); // This handles metadata deletion etc.
|
||||
return { bytesReceived: metadata.fileSize, progress: 100, completed: true };
|
||||
}
|
||||
|
||||
let chunkToWrite = chunk;
|
||||
let actualChunkSize = chunkSize;
|
||||
|
||||
// Prevent writing beyond expected file size
|
||||
if (metadata.bytesReceived + chunkSize > metadata.fileSize) {
|
||||
logger.warn(`[Local Adapter] Chunk for ${uploadId} exceeds expected size. Truncating.`);
|
||||
const bytesToWrite = metadata.fileSize - metadata.bytesReceived;
|
||||
chunkToWrite = chunk.slice(0, bytesToWrite);
|
||||
actualChunkSize = chunkToWrite.length;
|
||||
if (actualChunkSize <= 0) {
|
||||
logger.info(`[Local Adapter] Upload ${uploadId} already has expected bytes. Skipping write.`);
|
||||
metadata.bytesReceived = metadata.fileSize; // Correct state for completion check
|
||||
}
|
||||
}
|
||||
|
||||
// --- Write Chunk (Append Mode) ---
|
||||
if (actualChunkSize > 0) {
|
||||
try {
|
||||
await fs.appendFile(metadata.partialFilePath, chunkToWrite);
|
||||
metadata.bytesReceived += actualChunkSize;
|
||||
} catch (writeErr) {
|
||||
logger.error(`[Local Adapter] Failed to write chunk for ${uploadId} to ${metadata.partialFilePath}: ${writeErr.message}`);
|
||||
throw new Error(`Failed to write chunk for ${uploadId}: ${writeErr.code}`); // Propagate error
|
||||
}
|
||||
}
|
||||
|
||||
// --- Update State ---
|
||||
const progress = metadata.fileSize === 0 ? 100 :
|
||||
Math.min(Math.round((metadata.bytesReceived / metadata.fileSize) * 100), 100);
|
||||
|
||||
logger.debug(`[Local Adapter] Chunk written for ${uploadId}: ${metadata.bytesReceived}/${metadata.fileSize} (${progress}%)`);
|
||||
|
||||
// Persist updated metadata *before* final completion check
|
||||
await writeUploadMetadata(uploadId, metadata);
|
||||
|
||||
// --- Check for Completion ---
|
||||
const completed = metadata.bytesReceived >= metadata.fileSize;
|
||||
if (completed) {
|
||||
// Don't call completeUpload here, let the route handler do it
|
||||
// after sending the final progress response back to the client.
|
||||
logger.info(`[Local Adapter] Upload ${uploadId} ready for completion (${metadata.bytesReceived} bytes).`);
|
||||
}
|
||||
|
||||
return { bytesReceived: metadata.bytesReceived, progress, completed };
|
||||
}
|
||||
|
||||
/**
|
||||
* Finalizes a completed upload.
|
||||
* @param {string} uploadId - The application's upload ID.
|
||||
* @returns {Promise<{filename: string, size: number}>} Details of the completed file.
|
||||
*/
|
||||
async function completeUpload(uploadId) {
|
||||
const metadata = await readUploadMetadata(uploadId);
|
||||
if (!metadata) {
|
||||
// Might have been completed by a concurrent request. Check if final file exists.
|
||||
// This is still tricky without the metadata. Log a warning.
|
||||
logger.warn(`[Local Adapter] completeUpload called for ${uploadId}, but metadata is missing. Assuming already completed.`);
|
||||
// We don't know the filename or size here, return minimal success or throw?
|
||||
// Let's throw, as the calling route expects metadata info.
|
||||
throw new Error('Upload completion failed: Metadata not found');
|
||||
}
|
||||
|
||||
// Ensure we have received all bytes (redundant check, but safe)
|
||||
if (metadata.bytesReceived < metadata.fileSize) {
|
||||
logger.error(`[Local Adapter] Attempted to complete upload ${uploadId} prematurely. Received ${metadata.bytesReceived}/${metadata.fileSize} bytes.`);
|
||||
throw new Error('Cannot complete upload: Not all bytes received.');
|
||||
}
|
||||
|
||||
try {
|
||||
// Ensure partial file exists before rename
|
||||
await fs.access(metadata.partialFilePath);
|
||||
await fs.rename(metadata.partialFilePath, metadata.filePath);
|
||||
logger.success(`[Local Adapter] Finalized: ${metadata.originalFilename} as ${metadata.filePath} (${metadata.fileSize} bytes)`);
|
||||
|
||||
// Clean up metadata AFTER successful rename
|
||||
await deleteUploadMetadata(uploadId);
|
||||
|
||||
// Send notification
|
||||
sendNotification(metadata.originalFilename, metadata.fileSize, config);
|
||||
|
||||
return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.filePath };
|
||||
|
||||
} catch (renameErr) {
|
||||
if (renameErr.code === 'ENOENT') {
|
||||
// Partial file missing. Maybe completed by another request? Check final file.
|
||||
try {
|
||||
await fs.access(metadata.filePath);
|
||||
logger.warn(`[Local Adapter] Partial file ${metadata.partialFilePath} missing for ${uploadId}, but final file ${metadata.filePath} exists. Assuming already finalized.`);
|
||||
await deleteUploadMetadata(uploadId).catch(()=>{}); // Cleanup metadata anyway
|
||||
return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.filePath };
|
||||
} catch (finalAccessErr) {
|
||||
logger.error(`[Local Adapter] CRITICAL: Partial file ${metadata.partialFilePath} missing and final file ${metadata.filePath} not found during completion of ${uploadId}.`);
|
||||
await deleteUploadMetadata(uploadId).catch(()=>{}); // Cleanup metadata to prevent retries
|
||||
throw new Error(`Completion failed: Partial file missing and final file not found.`);
|
||||
}
|
||||
} else {
|
||||
logger.error(`[Local Adapter] CRITICAL: Failed to rename ${metadata.partialFilePath} to ${metadata.filePath}: ${renameErr.message}`);
|
||||
// Keep metadata and partial file for potential manual recovery.
|
||||
throw renameErr; // Propagate the error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Aborts an ongoing upload.
|
||||
* @param {string} uploadId - The application's upload ID.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function abortUpload(uploadId) {
|
||||
const metadata = await readUploadMetadata(uploadId);
|
||||
if (!metadata) {
|
||||
logger.warn(`[Local Adapter] Abort request for non-existent or completed upload: ${uploadId}`);
|
||||
return; // Nothing to abort
|
||||
}
|
||||
|
||||
// Delete partial file first
|
||||
try {
|
||||
await fs.unlink(metadata.partialFilePath);
|
||||
logger.info(`[Local Adapter] Deleted partial file on cancellation: ${metadata.partialFilePath}`);
|
||||
} catch (unlinkErr) {
|
||||
if (unlinkErr.code !== 'ENOENT') { // Ignore if already gone
|
||||
logger.error(`[Local Adapter] Failed to delete partial file ${metadata.partialFilePath} on cancel: ${unlinkErr.message}`);
|
||||
// Continue to delete metadata anyway
|
||||
}
|
||||
}
|
||||
|
||||
// Then delete metadata file
|
||||
await deleteUploadMetadata(uploadId);
|
||||
logger.info(`[Local Adapter] Upload cancelled and cleaned up: ${uploadId} (${metadata.originalFilename})`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Lists files in the upload directory.
|
||||
* @returns {Promise<Array<{filename: string, size: number, formattedSize: string, uploadDate: Date}>>} List of files.
|
||||
*/
|
||||
async function listFiles() {
|
||||
let entries = [];
|
||||
try {
|
||||
entries = await fs.readdir(config.uploadDir, { withFileTypes: true });
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOENT') {
|
||||
logger.warn('[Local Adapter] Upload directory does not exist for listing.');
|
||||
return []; // Return empty list if dir doesn't exist
|
||||
}
|
||||
logger.error(`[Local Adapter] Failed to read upload directory: ${err.message}`);
|
||||
throw err; // Re-throw other errors
|
||||
}
|
||||
|
||||
const fileDetails = [];
|
||||
for (const entry of entries) {
|
||||
// Skip directories and the special metadata directory/files within it
|
||||
if (!entry.isFile() || entry.name === '.metadata' || entry.name.endsWith('.partial') || entry.name.endsWith('.meta') || entry.name.endsWith('.tmp')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
const filePath = path.join(config.uploadDir, entry.name);
|
||||
const stats = await fs.stat(filePath);
|
||||
fileDetails.push({
|
||||
filename: entry.name, // Use the actual filename on disk
|
||||
size: stats.size,
|
||||
formattedSize: formatFileSize(stats.size), // Use fileUtils helper
|
||||
uploadDate: stats.mtime // Use modification time as upload date
|
||||
});
|
||||
} catch (statErr) {
|
||||
// Handle case where file might be deleted between readdir and stat
|
||||
if (statErr.code !== 'ENOENT') {
|
||||
logger.error(`[Local Adapter] Failed to get stats for file ${entry.name}: ${statErr.message}`);
|
||||
}
|
||||
// Skip this file if stat fails
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by date, newest first
|
||||
fileDetails.sort((a, b) => b.uploadDate.getTime() - a.uploadDate.getTime());
|
||||
|
||||
return fileDetails;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets information needed to download a file.
|
||||
* For local storage, this is the file path.
|
||||
* @param {string} filename - The name of the file to download.
|
||||
* @returns {Promise<{type: string, value: string}>} Object indicating type ('path') and value (the full file path).
|
||||
*/
|
||||
async function getDownloadUrlOrStream(filename) {
|
||||
// IMPORTANT: Sanitize filename input to prevent directory traversal
|
||||
const safeBaseName = path.basename(filename);
|
||||
if (safeBaseName !== filename || filename.includes('..')) {
|
||||
logger.error(`[Local Adapter] Invalid filename detected for download: ${filename}`);
|
||||
throw new Error('Invalid filename');
|
||||
}
|
||||
|
||||
const filePath = path.resolve(config.uploadDir, safeBaseName); // Use resolve for security
|
||||
|
||||
try {
|
||||
await fs.access(filePath, fsSync.constants.R_OK); // Check existence and readability
|
||||
return { type: 'path', value: filePath };
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOENT') {
|
||||
logger.warn(`[Local Adapter] Download request for non-existent file: ${filePath}`);
|
||||
throw new Error('File not found'); // Specific error for 404 handling
|
||||
} else if (err.code === 'EACCES') {
|
||||
logger.error(`[Local Adapter] Permission denied trying to access file for download: ${filePath}`);
|
||||
throw new Error('Permission denied');
|
||||
} else {
|
||||
logger.error(`[Local Adapter] Error accessing file for download ${filePath}: ${err.message}`);
|
||||
throw err; // Re-throw other errors
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a file from the local storage.
|
||||
* @param {string} filename - The name of the file to delete.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function deleteFile(filename) {
|
||||
// IMPORTANT: Sanitize filename input
|
||||
const safeBaseName = path.basename(filename);
|
||||
if (safeBaseName !== filename || filename.includes('..')) {
|
||||
logger.error(`[Local Adapter] Invalid filename detected for delete: ${filename}`);
|
||||
throw new Error('Invalid filename');
|
||||
}
|
||||
|
||||
const filePath = path.resolve(config.uploadDir, safeBaseName);
|
||||
|
||||
try {
|
||||
await fs.unlink(filePath);
|
||||
logger.info(`[Local Adapter] Deleted file: ${filePath}`);
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOENT') {
|
||||
logger.warn(`[Local Adapter] Delete request for non-existent file: ${filePath}`);
|
||||
throw new Error('File not found'); // Specific error for 404
|
||||
} else {
|
||||
logger.error(`[Local Adapter] Failed to delete file ${filePath}: ${err.message}`);
|
||||
throw err; // Re-throw other errors
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleans up stale resources (incomplete uploads based on metadata).
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function cleanupStale() {
|
||||
logger.info('[Local Adapter] Running cleanup for stale metadata/partial uploads...');
|
||||
let cleanedCount = 0;
|
||||
let checkedCount = 0;
|
||||
|
||||
try {
|
||||
// Ensure metadata directory exists before trying to read it
|
||||
await ensureMetadataDirExists(); // Re-check just in case
|
||||
|
||||
const files = await fs.readdir(METADATA_DIR);
|
||||
const now = Date.now();
|
||||
|
||||
for (const file of files) {
|
||||
if (file.endsWith('.meta')) {
|
||||
checkedCount++;
|
||||
const uploadId = file.replace('.meta', '');
|
||||
const metaFilePath = path.join(METADATA_DIR, file);
|
||||
let metadata;
|
||||
|
||||
try {
|
||||
const data = await fs.readFile(metaFilePath, 'utf8');
|
||||
metadata = JSON.parse(data);
|
||||
|
||||
// Check inactivity
|
||||
if (now - (metadata.lastActivity || metadata.createdAt || 0) > UPLOAD_TIMEOUT) {
|
||||
logger.warn(`[Local Adapter] Found stale metadata: ${file}. Last activity: ${new Date(metadata.lastActivity || metadata.createdAt)}`);
|
||||
|
||||
// Attempt to delete partial file
|
||||
if (metadata.partialFilePath) {
|
||||
try {
|
||||
await fs.unlink(metadata.partialFilePath);
|
||||
logger.info(`[Local Adapter] Deleted stale partial file: ${metadata.partialFilePath}`);
|
||||
} catch (unlinkPartialErr) {
|
||||
if (unlinkPartialErr.code !== 'ENOENT') {
|
||||
logger.error(`[Local Adapter] Failed to delete stale partial ${metadata.partialFilePath}: ${unlinkPartialErr.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to delete metadata file
|
||||
await deleteUploadMetadata(uploadId); // Use helper
|
||||
cleanedCount++;
|
||||
|
||||
}
|
||||
} catch (readErr) {
|
||||
logger.error(`[Local Adapter] Error reading/parsing ${metaFilePath} during cleanup: ${readErr.message}. Skipping.`);
|
||||
// Optionally attempt to delete the corrupt meta file?
|
||||
await fs.unlink(metaFilePath).catch(()=>{ logger.warn(`[Local Adapter] Failed to delete potentially corrupt metadata file: ${metaFilePath}`) });
|
||||
}
|
||||
} else if (file.endsWith('.tmp')) {
|
||||
// Clean up potential leftover temp metadata files
|
||||
const tempMetaPath = path.join(METADATA_DIR, file);
|
||||
try {
|
||||
const stats = await fs.stat(tempMetaPath);
|
||||
// Use a shorter timeout for temp files? e.g., UPLOAD_TIMEOUT / 2
|
||||
if (now - stats.mtime.getTime() > UPLOAD_TIMEOUT) {
|
||||
logger.warn(`[Local Adapter] Deleting stale temporary metadata file: ${file}`);
|
||||
await fs.unlink(tempMetaPath);
|
||||
}
|
||||
} catch (statErr) {
|
||||
if (statErr.code !== 'ENOENT') {
|
||||
logger.error(`[Local Adapter] Error checking temp metadata file ${tempMetaPath}: ${statErr.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (checkedCount > 0 || cleanedCount > 0) {
|
||||
logger.info(`[Local Adapter] Metadata cleanup finished. Checked: ${checkedCount}, Cleaned stale: ${cleanedCount}.`);
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOENT' && err.path === METADATA_DIR) {
|
||||
// This case should be handled by ensureMetadataDirExists, but log just in case
|
||||
logger.warn('[Local Adapter] Metadata directory not found during cleanup scan.');
|
||||
} else {
|
||||
logger.error(`[Local Adapter] Error during metadata cleanup scan: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Note: Empty folder cleanup is handled by the main cleanup utility for now.
|
||||
// If needed, the logic from utils/cleanup.js -> cleanupEmptyFolders could be moved here.
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
initUpload,
|
||||
storeChunk,
|
||||
completeUpload,
|
||||
abortUpload,
|
||||
listFiles,
|
||||
getDownloadUrlOrStream,
|
||||
deleteFile,
|
||||
cleanupStale
|
||||
};
|
||||
439
src/storage/s3Adapter.js
Normal file
439
src/storage/s3Adapter.js
Normal file
@@ -0,0 +1,439 @@
|
||||
/**
|
||||
* S3 Storage Adapter
|
||||
* Handles file operations for storing files on AWS S3 or S3-compatible services.
|
||||
* Implements the storage interface expected by the application routes.
|
||||
* Uses local files in '.metadata' directory to track multipart upload progress.
|
||||
* Attempts to make top-level folder prefixes unique per batch if collisions occur.
|
||||
*/
|
||||
|
||||
const {
|
||||
S3Client,
|
||||
CreateMultipartUploadCommand,
|
||||
UploadPartCommand,
|
||||
CompleteMultipartUploadCommand,
|
||||
AbortMultipartUploadCommand,
|
||||
ListObjectsV2Command,
|
||||
GetObjectCommand,
|
||||
DeleteObjectCommand,
|
||||
PutObjectCommand,
|
||||
HeadObjectCommand
|
||||
} = require('@aws-sdk/client-s3');
|
||||
const { getSignedUrl } = require("@aws-sdk/s3-request-presigner");
|
||||
const fs = require('fs').promises;
|
||||
const fsSync = require('fs');
|
||||
const path = require('path');
|
||||
const crypto = require('crypto');
|
||||
const util = require('util'); // For detailed error logging
|
||||
const { config } = require('../config');
|
||||
const logger = require('../utils/logger');
|
||||
const {
|
||||
sanitizePathPreserveDirs,
|
||||
formatFileSize
|
||||
} = require('../utils/fileUtils');
|
||||
const { sendNotification } = require('../services/notifications');
|
||||
|
||||
const METADATA_DIR = path.join(config.uploadDir, '.metadata');
|
||||
const UPLOAD_TIMEOUT = 30 * 60 * 1000; // For local metadata cleanup
|
||||
|
||||
// --- S3 Client Initialization ---
|
||||
let s3Client;
|
||||
try {
|
||||
const s3ClientConfig = {
|
||||
region: config.s3Region,
|
||||
credentials: {
|
||||
accessKeyId: config.s3AccessKeyId,
|
||||
secretAccessKey: config.s3SecretAccessKey,
|
||||
},
|
||||
...(config.s3EndpointUrl && { endpoint: config.s3EndpointUrl }),
|
||||
...(config.s3ForcePathStyle && { forcePathStyle: true }),
|
||||
};
|
||||
if (s3ClientConfig.endpoint) logger.info(`[S3 Adapter] Configuring S3 client for endpoint: ${s3ClientConfig.endpoint}`);
|
||||
if (s3ClientConfig.forcePathStyle) logger.info(`[S3 Adapter] Configuring S3 client with forcePathStyle: true`);
|
||||
s3Client = new S3Client(s3ClientConfig);
|
||||
logger.success('[S3 Adapter] S3 Client configured successfully.');
|
||||
} catch (error) {
|
||||
logger.error(`[S3 Adapter] Failed to configure S3 client: ${error.message}`);
|
||||
throw new Error('S3 Client configuration failed. Check S3 environment variables.');
|
||||
}
|
||||
|
||||
// --- Metadata Helper Functions ---
|
||||
async function ensureMetadataDirExists() {
|
||||
try {
|
||||
if (!fsSync.existsSync(METADATA_DIR)) {
|
||||
await fs.mkdir(METADATA_DIR, { recursive: true });
|
||||
logger.info(`[S3 Adapter] Created local metadata directory: ${METADATA_DIR}`);
|
||||
}
|
||||
await fs.access(METADATA_DIR, fsSync.constants.W_OK);
|
||||
} catch (err) {
|
||||
logger.error(`[S3 Adapter] Local metadata directory error (${METADATA_DIR}): ${err.message}`);
|
||||
throw new Error(`Failed to access or create local metadata directory for S3 adapter state: ${METADATA_DIR}`);
|
||||
}
|
||||
}
|
||||
|
||||
async function readUploadMetadata(uploadId) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.warn(`[S3 Adapter] Attempted to read metadata with invalid uploadId: ${uploadId}`);
|
||||
return null;
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
try {
|
||||
const data = await fs.readFile(metaFilePath, 'utf8');
|
||||
const metadata = JSON.parse(data);
|
||||
metadata.parts = metadata.parts || [];
|
||||
return metadata;
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOENT') return null;
|
||||
logger.error(`[S3 Adapter] Error reading metadata for ${uploadId}: ${err.message}`);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function writeUploadMetadata(uploadId, metadata) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.error(`[S3 Adapter] Attempted to write metadata with invalid uploadId: ${uploadId}`);
|
||||
return;
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
metadata.lastActivity = Date.now();
|
||||
metadata.parts = metadata.parts || [];
|
||||
try {
|
||||
const tempMetaPath = `${metaFilePath}.${crypto.randomBytes(4).toString('hex')}.tmp`;
|
||||
await fs.writeFile(tempMetaPath, JSON.stringify(metadata, null, 2));
|
||||
await fs.rename(tempMetaPath, metaFilePath);
|
||||
} catch (err) {
|
||||
logger.error(`[S3 Adapter] Error writing metadata for ${uploadId}: ${err.message}`);
|
||||
try { await fs.unlink(tempMetaPath); } catch (unlinkErr) {/* ignore */}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteUploadMetadata(uploadId) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.warn(`[S3 Adapter] Attempted to delete metadata with invalid uploadId: ${uploadId}`);
|
||||
return;
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
try {
|
||||
await fs.unlink(metaFilePath);
|
||||
logger.debug(`[S3 Adapter] Deleted metadata file: ${uploadId}.meta`);
|
||||
} catch (err) {
|
||||
if (err.code !== 'ENOENT') logger.error(`[S3 Adapter] Error deleting metadata file ${uploadId}.meta: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
ensureMetadataDirExists().catch(err => {
|
||||
logger.error(`[S3 Adapter] Initialization failed (metadata dir): ${err.message}`);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// --- S3 Object/Prefix Utilities ---
|
||||
const batchS3PrefixMappings = new Map(); // In-memory: originalTopLevelFolder-batchId -> actualS3Prefix
|
||||
|
||||
async function s3ObjectExists(key) {
|
||||
logger.info(`[S3 Adapter] s3ObjectExists: Checking key "${key}"`);
|
||||
try {
|
||||
await s3Client.send(new HeadObjectCommand({ Bucket: config.s3BucketName, Key: key }));
|
||||
logger.info(`[S3 Adapter] s3ObjectExists: HeadObject success for key "${key}". Key EXISTS.`);
|
||||
return true;
|
||||
} catch (error) {
|
||||
// logger.error(`[S3 Adapter DEBUG] Full error object for HeadObject on key "${key}":\n`, util.inspect(error, { showHidden: false, depth: null, colors: false }));
|
||||
if (error.name === 'NotFound' || error.name === 'NoSuchKey' || (error.$metadata && error.$metadata.httpStatusCode === 404)) {
|
||||
logger.info(`[S3 Adapter] s3ObjectExists: Key "${key}" NOT found (404-like error).`);
|
||||
return false;
|
||||
}
|
||||
if (error.name === '403' || (error.$metadata && error.$metadata.httpStatusCode === 403)) {
|
||||
logger.warn(`[S3 Adapter] s3ObjectExists: Received 403 Forbidden for key "${key}". For unique key generation, treating this as 'likely does not exist'.`);
|
||||
return false;
|
||||
}
|
||||
logger.error(`[S3 Adapter] s3ObjectExists: Unhandled error type "${error.name}" for key "${key}": ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async function getUniqueS3FolderPrefix(originalPrefix, batchId) {
|
||||
if (!originalPrefix || !originalPrefix.endsWith('/')) {
|
||||
logger.error("[S3 Adapter] getUniqueS3FolderPrefix: originalPrefix must be a non-empty string ending with '/'");
|
||||
return originalPrefix; // Or throw error
|
||||
}
|
||||
const prefixMapKey = `${originalPrefix}-${batchId}`;
|
||||
if (batchS3PrefixMappings.has(prefixMapKey)) {
|
||||
return batchS3PrefixMappings.get(prefixMapKey);
|
||||
}
|
||||
|
||||
let currentPrefixToCheck = originalPrefix;
|
||||
let counter = 1;
|
||||
const baseName = originalPrefix.slice(0, -1); // "MyFolder" from "MyFolder/"
|
||||
|
||||
async function prefixHasObjects(prefix) {
|
||||
try {
|
||||
const listResponse = await s3Client.send(new ListObjectsV2Command({
|
||||
Bucket: config.s3BucketName, Prefix: prefix, MaxKeys: 1
|
||||
}));
|
||||
return listResponse.KeyCount > 0;
|
||||
} catch (error) {
|
||||
logger.error(`[S3 Adapter] Error listing objects for prefix check "${prefix}": ${error.message}`);
|
||||
throw error; // Propagate error if listing fails for permission reasons etc.
|
||||
}
|
||||
}
|
||||
|
||||
while (await prefixHasObjects(currentPrefixToCheck)) {
|
||||
logger.warn(`[S3 Adapter] S3 prefix "${currentPrefixToCheck}" is not empty. Generating unique prefix for base "${baseName}/".`);
|
||||
currentPrefixToCheck = `${baseName}-${counter}/`; // Use hyphen for suffix
|
||||
counter++;
|
||||
}
|
||||
|
||||
if (currentPrefixToCheck !== originalPrefix) {
|
||||
logger.info(`[S3 Adapter] Using unique S3 folder prefix: "${currentPrefixToCheck}" for original "${originalPrefix}" in batch "${batchId}"`);
|
||||
}
|
||||
batchS3PrefixMappings.set(prefixMapKey, currentPrefixToCheck);
|
||||
return currentPrefixToCheck;
|
||||
}
|
||||
|
||||
// --- Interface Implementation ---
|
||||
async function initUpload(filename, fileSize, clientBatchId) {
|
||||
await ensureMetadataDirExists();
|
||||
const size = Number(fileSize);
|
||||
const appUploadId = crypto.randomBytes(16).toString('hex');
|
||||
const batchId = clientBatchId || `${Date.now()}-${crypto.randomBytes(4).toString('hex').substring(0, 9)}`;
|
||||
|
||||
const originalSanitizedFullpath = sanitizePathPreserveDirs(filename); // e.g., "MyFolder/image.jpg" or "image.jpg"
|
||||
let s3KeyStructure = path.normalize(originalSanitizedFullpath)
|
||||
.replace(/^(\.\.(\/|\\|$))+/, '').replace(/\\/g, '/').replace(/^\/+/, '');
|
||||
|
||||
let effectiveBasePrefix = ""; // e.g., "MyFolder-1/" or ""
|
||||
const pathParts = s3KeyStructure.split('/');
|
||||
const isNestedPath = pathParts.length > 1;
|
||||
let relativePathInFolder = s3KeyStructure;
|
||||
|
||||
if (isNestedPath) {
|
||||
const originalTopLevelFolder = pathParts[0] + '/'; // "MyFolder/"
|
||||
effectiveBasePrefix = await getUniqueS3FolderPrefix(originalTopLevelFolder, batchId);
|
||||
relativePathInFolder = pathParts.slice(1).join('/'); // "SubFolder/image.jpg" or "image.jpg"
|
||||
s3KeyStructure = effectiveBasePrefix + relativePathInFolder;
|
||||
}
|
||||
logger.info(`[S3 Adapter] Init: Original Full Path: "${originalSanitizedFullpath}", Effective Base Prefix: "${effectiveBasePrefix}", Relative Path In Folder: "${relativePathInFolder}"`);
|
||||
|
||||
let finalS3Key = s3KeyStructure;
|
||||
let fileCounter = 1;
|
||||
const fileDir = path.dirname(s3KeyStructure);
|
||||
const fileExt = path.extname(s3KeyStructure);
|
||||
const fileBaseName = path.basename(s3KeyStructure, fileExt);
|
||||
|
||||
while (await s3ObjectExists(finalS3Key)) {
|
||||
logger.warn(`[S3 Adapter] S3 file key already exists: "${finalS3Key}". Generating unique file key.`);
|
||||
finalS3Key = (fileDir === "." ? "" : fileDir + "/") + `${fileBaseName}-${fileCounter}${fileExt}`; // Use hyphen
|
||||
fileCounter++;
|
||||
}
|
||||
if (finalS3Key !== s3KeyStructure) {
|
||||
logger.info(`[S3 Adapter] Using unique S3 file key: "${finalS3Key}"`);
|
||||
}
|
||||
|
||||
if (size === 0) {
|
||||
try {
|
||||
await s3Client.send(new PutObjectCommand({
|
||||
Bucket: config.s3BucketName, Key: finalS3Key, Body: '', ContentLength: 0
|
||||
}));
|
||||
logger.success(`[S3 Adapter] Completed zero-byte file: ${finalS3Key}`);
|
||||
sendNotification(originalSanitizedFullpath, 0, config);
|
||||
return { uploadId: `zero-byte-${appUploadId}` };
|
||||
} catch (putErr) {
|
||||
logger.error(`[S3 Adapter] Failed zero-byte PUT for ${finalS3Key}: ${putErr.message}`);
|
||||
throw putErr;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const createCommand = new CreateMultipartUploadCommand({ Bucket: config.s3BucketName, Key: finalS3Key });
|
||||
const response = await s3Client.send(createCommand);
|
||||
const s3UploadId = response.UploadId;
|
||||
if (!s3UploadId) throw new Error('S3 did not return UploadId');
|
||||
logger.info(`[S3 Adapter] Multipart initiated for ${finalS3Key} (S3 UploadId: ${s3UploadId})`);
|
||||
|
||||
const metadata = {
|
||||
appUploadId, s3UploadId, s3Key: finalS3Key,
|
||||
originalFilename: originalSanitizedFullpath, // Use the full original path for notification
|
||||
fileSize: size, bytesReceived: 0, parts: [], batchId,
|
||||
createdAt: Date.now(), lastActivity: Date.now()
|
||||
};
|
||||
await writeUploadMetadata(appUploadId, metadata);
|
||||
return { uploadId: appUploadId };
|
||||
} catch (err) {
|
||||
logger.error(`[S3 Adapter] Failed multipart init for ${finalS3Key}: ${err.message}`);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function storeChunk(appUploadId, chunk, partNumber) {
|
||||
const chunkSize = chunk.length;
|
||||
if (!chunkSize) throw new Error('Empty chunk received');
|
||||
if (partNumber < 1) throw new Error('PartNumber must be 1 or greater');
|
||||
|
||||
const metadata = await readUploadMetadata(appUploadId);
|
||||
if (!metadata || !metadata.s3UploadId) {
|
||||
logger.warn(`[S3 Adapter] Metadata or S3 UploadId not found for chunk: ${appUploadId}`);
|
||||
throw new Error('Upload session not found or already completed');
|
||||
}
|
||||
if (metadata.bytesReceived >= metadata.fileSize && metadata.fileSize > 0) {
|
||||
logger.warn(`[S3 Adapter] Chunk for already completed upload ${appUploadId}. Ignoring.`);
|
||||
return { bytesReceived: metadata.bytesReceived, progress: 100, completed: true };
|
||||
}
|
||||
|
||||
try {
|
||||
const cmd = new UploadPartCommand({
|
||||
Bucket: config.s3BucketName, Key: metadata.s3Key, UploadId: metadata.s3UploadId,
|
||||
Body: chunk, PartNumber: partNumber, ContentLength: chunkSize
|
||||
});
|
||||
const response = await s3Client.send(cmd);
|
||||
const etag = response.ETag;
|
||||
if (!etag) throw new Error(`S3 ETag missing for Part ${partNumber}`);
|
||||
|
||||
metadata.parts.push({ PartNumber: partNumber, ETag: etag });
|
||||
metadata.parts.sort((a, b) => a.PartNumber - b.PartNumber);
|
||||
metadata.bytesReceived = Math.min((metadata.bytesReceived || 0) + chunkSize, metadata.fileSize);
|
||||
await writeUploadMetadata(appUploadId, metadata);
|
||||
|
||||
const progress = metadata.fileSize === 0 ? 100 : Math.min(Math.round((metadata.bytesReceived / metadata.fileSize) * 100), 100);
|
||||
const completed = metadata.bytesReceived >= metadata.fileSize;
|
||||
logger.debug(`[S3 Adapter] Part ${partNumber} for ${appUploadId} (Key: ${metadata.s3Key}). ETag: ${etag}. Progress: ~${progress}%. Completed: ${completed}`);
|
||||
return { bytesReceived: metadata.bytesReceived, progress, completed };
|
||||
} catch (err) {
|
||||
logger.error(`[S3 Adapter] Failed Part ${partNumber} for ${appUploadId} (Key: ${metadata.s3Key}): ${err.message}`);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function completeUpload(appUploadId) {
|
||||
const metadata = await readUploadMetadata(appUploadId);
|
||||
if (!metadata || !metadata.s3UploadId || !metadata.parts || metadata.parts.length === 0) {
|
||||
throw new Error('Upload completion failed: Missing metadata/parts');
|
||||
}
|
||||
if (metadata.bytesReceived < metadata.fileSize) {
|
||||
logger.warn(`[S3 Adapter] Completing ${appUploadId} with ${metadata.bytesReceived}/${metadata.fileSize} bytes tracked.`);
|
||||
}
|
||||
try {
|
||||
const cmd = new CompleteMultipartUploadCommand({
|
||||
Bucket: config.s3BucketName, Key: metadata.s3Key, UploadId: metadata.s3UploadId,
|
||||
MultipartUpload: { Parts: metadata.parts },
|
||||
});
|
||||
const response = await s3Client.send(cmd);
|
||||
logger.success(`[S3 Adapter] Finalized: ${metadata.s3Key} (ETag: ${response.ETag})`);
|
||||
await deleteUploadMetadata(appUploadId);
|
||||
sendNotification(metadata.originalFilename, metadata.fileSize, config);
|
||||
return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.s3Key };
|
||||
} catch (err) {
|
||||
logger.error(`[S3 Adapter] Failed CompleteMultipartUpload for ${metadata.s3Key}: ${err.message}`);
|
||||
if (err.Code === 'NoSuchUpload' || err.name === 'NoSuchUpload') {
|
||||
logger.warn(`[S3 Adapter] NoSuchUpload on complete for ${appUploadId}. Assuming completed/aborted.`);
|
||||
await deleteUploadMetadata(appUploadId).catch(()=>{});
|
||||
try {
|
||||
await s3Client.send(new HeadObjectCommand({ Bucket: config.s3BucketName, Key: metadata.s3Key }));
|
||||
logger.info(`[S3 Adapter] Final object ${metadata.s3Key} exists after NoSuchUpload. Treating as completed.`);
|
||||
return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.s3Key };
|
||||
} catch (headErr) { throw new Error('Completion failed: Session & final object not found.'); }
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function abortUpload(appUploadId) {
|
||||
const metadata = await readUploadMetadata(appUploadId);
|
||||
if (!metadata || !metadata.s3UploadId) {
|
||||
logger.warn(`[S3 Adapter] Abort for non-existent/completed upload: ${appUploadId}`);
|
||||
await deleteUploadMetadata(appUploadId); return;
|
||||
}
|
||||
try {
|
||||
await s3Client.send(new AbortMultipartUploadCommand({
|
||||
Bucket: config.s3BucketName, Key: metadata.s3Key, UploadId: metadata.s3UploadId,
|
||||
}));
|
||||
logger.info(`[S3 Adapter] Aborted: ${appUploadId} (Key: ${metadata.s3Key})`);
|
||||
} catch (err) {
|
||||
if (err.name !== 'NoSuchUpload') {
|
||||
logger.error(`[S3 Adapter] Failed Abort for ${metadata.s3Key}: ${err.message}`); throw err;
|
||||
}
|
||||
logger.warn(`[S3 Adapter] NoSuchUpload on abort for ${metadata.s3Key}. Already aborted/completed.`);
|
||||
}
|
||||
await deleteUploadMetadata(appUploadId);
|
||||
}
|
||||
|
||||
async function listFiles() {
|
||||
try {
|
||||
let isTruncated = true; let continuationToken; const allFiles = [];
|
||||
while(isTruncated) {
|
||||
const params = { Bucket: config.s3BucketName };
|
||||
if (continuationToken) params.ContinuationToken = continuationToken;
|
||||
const response = await s3Client.send(new ListObjectsV2Command(params));
|
||||
(response.Contents || []).forEach(item => allFiles.push({
|
||||
filename: item.Key, size: item.Size,
|
||||
formattedSize: formatFileSize(item.Size), uploadDate: item.LastModified
|
||||
}));
|
||||
isTruncated = response.IsTruncated;
|
||||
continuationToken = response.NextContinuationToken;
|
||||
}
|
||||
allFiles.sort((a, b) => b.uploadDate.getTime() - a.uploadDate.getTime());
|
||||
return allFiles;
|
||||
} catch (err) {
|
||||
logger.error(`[S3 Adapter] Failed list objects in ${config.s3BucketName}: ${err.message}`); throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function getDownloadUrlOrStream(s3Key) {
|
||||
if (!s3Key || s3Key.includes('..') || s3Key.startsWith('/')) throw new Error('Invalid filename for download');
|
||||
try {
|
||||
const cmd = new GetObjectCommand({ Bucket: config.s3BucketName, Key: s3Key });
|
||||
const url = await getSignedUrl(s3Client, cmd, { expiresIn: 3600 });
|
||||
logger.info(`[S3 Adapter] Presigned URL for ${s3Key}`);
|
||||
return { type: 'url', value: url };
|
||||
} catch (err) {
|
||||
logger.error(`[S3 Adapter] Failed presigned URL for ${s3Key}: ${err.message}`);
|
||||
if (err.name === 'NoSuchKey') throw new Error('File not found in S3'); throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteFile(s3Key) {
|
||||
if (!s3Key || s3Key.includes('..') || s3Key.startsWith('/')) throw new Error('Invalid filename for delete');
|
||||
try {
|
||||
await s3Client.send(new DeleteObjectCommand({ Bucket: config.s3BucketName, Key: s3Key }));
|
||||
logger.info(`[S3 Adapter] Deleted: ${s3Key}`);
|
||||
} catch (err) {
|
||||
logger.error(`[S3 Adapter] Failed delete for ${s3Key}: ${err.message}`); throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function cleanupStale() {
|
||||
logger.info('[S3 Adapter] Cleaning stale local metadata...');
|
||||
let cleaned = 0, checked = 0;
|
||||
try {
|
||||
await ensureMetadataDirExists(); const files = await fs.readdir(METADATA_DIR); const now = Date.now();
|
||||
for (const file of files) {
|
||||
if (file.endsWith('.meta')) {
|
||||
checked++; const id = file.replace('.meta',''); const fp = path.join(METADATA_DIR, file);
|
||||
try {
|
||||
const meta = JSON.parse(await fs.readFile(fp, 'utf8'));
|
||||
if (now - (meta.lastActivity || meta.createdAt || 0) > UPLOAD_TIMEOUT) {
|
||||
logger.warn(`[S3 Adapter] Stale local meta: ${file}, S3 ID: ${meta.s3UploadId||'N/A'}`);
|
||||
await deleteUploadMetadata(id); cleaned++;
|
||||
}
|
||||
} catch (e) { logger.error(`[S3 Adapter] Error parsing meta ${fp}: ${e.message}`); await fs.unlink(fp).catch(()=>{}); }
|
||||
} else if (file.endsWith('.tmp')) {
|
||||
const tmpP = path.join(METADATA_DIR, file);
|
||||
try { if (now - (await fs.stat(tmpP)).mtime.getTime() > UPLOAD_TIMEOUT) { logger.warn(`[S3 Adapter] Deleting stale tmp meta: ${file}`); await fs.unlink(tmpP); }}
|
||||
catch (e) { if (e.code!=='ENOENT') logger.error(`[S3 Adapter] Error stat/unlink tmp meta ${tmpP}: ${e.message}`);}
|
||||
}
|
||||
}
|
||||
if (checked > 0 || cleaned > 0) logger.info(`[S3 Adapter] Local meta cleanup: Checked ${checked}, Cleaned ${cleaned}.`);
|
||||
logger.warn(`[S3 Adapter] IMPORTANT: Configure S3 Lifecycle Rules on bucket '${config.s3BucketName}' to clean incomplete multipart uploads.`);
|
||||
} catch (err) {
|
||||
if (err.code==='ENOENT'&&err.path===METADATA_DIR) logger.warn('[S3 Adapter] Local meta dir not found for cleanup.');
|
||||
else logger.error(`[S3 Adapter] Error local meta cleanup: ${err.message}`);
|
||||
}
|
||||
// Basic batchS3PrefixMappings cleanup
|
||||
if (batchS3PrefixMappings.size > 1000) {
|
||||
logger.warn(`[S3 Adapter] Clearing batchS3PrefixMappings (size: ${batchS3PrefixMappings.size}).`);
|
||||
batchS3PrefixMappings.clear();
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
initUpload, storeChunk, completeUpload, abortUpload,
|
||||
listFiles, getDownloadUrlOrStream, deleteFile, cleanupStale
|
||||
};
|
||||
@@ -1,320 +1,234 @@
|
||||
/**
|
||||
* Cleanup utilities for managing application resources.
|
||||
* Handles incomplete uploads, empty folders, and shutdown tasks.
|
||||
* Provides cleanup task registration and execution system.
|
||||
* Handles registration and execution of cleanup tasks, including delegation
|
||||
* of storage-specific cleanup (like stale uploads) to the storage adapter.
|
||||
* Also includes generic cleanup like removing empty folders (for local storage).
|
||||
*/
|
||||
|
||||
const fs = require('fs').promises;
|
||||
const path = require('path');
|
||||
const logger = require('./logger');
|
||||
const { config } = require('../config');
|
||||
const { storageAdapter } = require('../storage'); // Import the selected adapter
|
||||
|
||||
const METADATA_DIR = path.join(config.uploadDir, '.metadata');
|
||||
const UPLOAD_TIMEOUT = config.uploadTimeout || 30 * 60 * 1000; // Use a config or default (e.g., 30 mins)
|
||||
|
||||
// --- Generic Cleanup Task Management ---
|
||||
let cleanupTasks = [];
|
||||
|
||||
/**
|
||||
* Register a cleanup task to be executed during shutdown
|
||||
* @param {Function} task - Async function to be executed during cleanup
|
||||
* Register a generic cleanup task to be executed during shutdown.
|
||||
* @param {Function} task - Async function to be executed during cleanup.
|
||||
*/
|
||||
function registerCleanupTask(task) {
|
||||
cleanupTasks.push(task);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a cleanup task
|
||||
* @param {Function} task - Task to remove
|
||||
* Remove a generic cleanup task.
|
||||
* @param {Function} task - Task to remove.
|
||||
*/
|
||||
function removeCleanupTask(task) {
|
||||
cleanupTasks = cleanupTasks.filter((t) => t !== task);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute all registered cleanup tasks
|
||||
* @param {number} [timeout=1000] - Maximum time in ms to wait for cleanup
|
||||
* Execute all registered generic cleanup tasks.
|
||||
* @param {number} [timeout=1000] - Maximum time in ms to wait for cleanup.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function executeCleanup(timeout = 1000) {
|
||||
const taskCount = cleanupTasks.length;
|
||||
if (taskCount === 0) {
|
||||
logger.info('No cleanup tasks to execute');
|
||||
logger.info('[Cleanup] No generic cleanup tasks to execute');
|
||||
return;
|
||||
}
|
||||
|
||||
logger.info(`Executing ${taskCount} cleanup tasks...`);
|
||||
|
||||
|
||||
logger.info(`[Cleanup] Executing ${taskCount} generic cleanup tasks...`);
|
||||
|
||||
try {
|
||||
// Run all cleanup tasks in parallel with timeout
|
||||
// Run all tasks concurrently with individual and global timeouts
|
||||
await Promise.race([
|
||||
Promise.all(
|
||||
cleanupTasks.map(async (task) => {
|
||||
cleanupTasks.map(async (task, index) => {
|
||||
try {
|
||||
await Promise.race([
|
||||
task(),
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error('Task timeout')), timeout / 2)
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error(`Task ${index + 1} timeout`)), timeout / 2) // Individual timeout
|
||||
)
|
||||
]);
|
||||
logger.debug(`[Cleanup] Task ${index + 1} completed.`);
|
||||
} catch (error) {
|
||||
if (error.message === 'Task timeout') {
|
||||
logger.warn('Cleanup task timed out');
|
||||
} else {
|
||||
logger.error(`Cleanup task failed: ${error.message}`);
|
||||
}
|
||||
logger.warn(`[Cleanup] Task ${index + 1} failed or timed out: ${error.message}`);
|
||||
}
|
||||
})
|
||||
),
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error('Global timeout')), timeout)
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error('Global cleanup timeout')), timeout) // Global timeout
|
||||
)
|
||||
]);
|
||||
|
||||
logger.info('Cleanup completed successfully');
|
||||
|
||||
logger.info('[Cleanup] Generic cleanup tasks completed successfully');
|
||||
} catch (error) {
|
||||
if (error.message === 'Global timeout') {
|
||||
logger.warn(`Cleanup timed out after ${timeout}ms`);
|
||||
} else {
|
||||
logger.error(`Cleanup failed: ${error.message}`);
|
||||
}
|
||||
logger.warn(`[Cleanup] Generic cleanup process ended with error or timeout: ${error.message}`);
|
||||
} finally {
|
||||
// Clear all tasks regardless of success/failure
|
||||
cleanupTasks = [];
|
||||
cleanupTasks = []; // Clear tasks regardless of outcome
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up incomplete uploads and temporary files
|
||||
* @param {Map} uploads - Map of active uploads
|
||||
* @param {Map} uploadToBatch - Map of upload IDs to batch IDs
|
||||
* @param {Map} batchActivity - Map of batch IDs to last activity timestamp
|
||||
*/
|
||||
async function cleanupIncompleteUploads(uploads, uploadToBatch, batchActivity) {
|
||||
try {
|
||||
// Get current time
|
||||
const now = Date.now();
|
||||
const inactivityThreshold = config.uploadTimeout || 30 * 60 * 1000; // 30 minutes default
|
||||
|
||||
// Check each upload
|
||||
for (const [uploadId, upload] of uploads.entries()) {
|
||||
try {
|
||||
const batchId = uploadToBatch.get(uploadId);
|
||||
const lastActivity = batchActivity.get(batchId);
|
||||
// --- Storage-Specific Cleanup ---
|
||||
|
||||
// If upload is inactive for too long
|
||||
if (now - lastActivity > inactivityThreshold) {
|
||||
// Close write stream
|
||||
if (upload.writeStream) {
|
||||
await new Promise((resolve) => {
|
||||
upload.writeStream.end(() => resolve());
|
||||
});
|
||||
}
|
||||
|
||||
// Delete incomplete file
|
||||
try {
|
||||
await fs.unlink(upload.filePath);
|
||||
logger.info(`Cleaned up incomplete upload: ${upload.safeFilename}`);
|
||||
} catch (err) {
|
||||
if (err.code !== 'ENOENT') {
|
||||
logger.error(`Failed to delete incomplete upload ${upload.safeFilename}: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove from maps
|
||||
uploads.delete(uploadId);
|
||||
uploadToBatch.delete(uploadId);
|
||||
}
|
||||
} catch (err) {
|
||||
logger.error(`Error cleaning up upload ${uploadId}: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up empty folders
|
||||
await cleanupEmptyFolders(config.uploadDir);
|
||||
|
||||
} catch (err) {
|
||||
logger.error(`Cleanup error: ${err.message}`);
|
||||
}
|
||||
}
|
||||
// How often to run the storage cleanup check (e.g., every 15 minutes)
|
||||
const STORAGE_CLEANUP_INTERVAL = 15 * 60 * 1000;
|
||||
let storageCleanupTimer = null;
|
||||
|
||||
/**
|
||||
* Clean up stale/incomplete uploads based on metadata files.
|
||||
* Performs cleanup of stale storage resources by calling the adapter's method.
|
||||
* This is typically run periodically.
|
||||
*/
|
||||
async function cleanupIncompleteMetadataUploads() {
|
||||
logger.info('Running cleanup for stale metadata/partial uploads...');
|
||||
let cleanedCount = 0;
|
||||
let checkedCount = 0;
|
||||
|
||||
try {
|
||||
// Ensure metadata directory exists before trying to read it
|
||||
async function runStorageCleanup() {
|
||||
logger.info('[Cleanup] Running periodic storage cleanup...');
|
||||
try {
|
||||
await fs.access(METADATA_DIR);
|
||||
} catch (accessErr) {
|
||||
if (accessErr.code === 'ENOENT') {
|
||||
logger.info('Metadata directory does not exist, skipping metadata cleanup.');
|
||||
return;
|
||||
}
|
||||
throw accessErr; // Rethrow other access errors
|
||||
}
|
||||
|
||||
const files = await fs.readdir(METADATA_DIR);
|
||||
const now = Date.now();
|
||||
|
||||
for (const file of files) {
|
||||
if (file.endsWith('.meta')) {
|
||||
checkedCount++;
|
||||
const metaFilePath = path.join(METADATA_DIR, file);
|
||||
let metadata;
|
||||
|
||||
try {
|
||||
const data = await fs.readFile(metaFilePath, 'utf8');
|
||||
metadata = JSON.parse(data);
|
||||
|
||||
// Check inactivity based on lastActivity timestamp in metadata
|
||||
if (now - (metadata.lastActivity || metadata.createdAt || 0) > UPLOAD_TIMEOUT) {
|
||||
logger.warn(`Found stale upload metadata: ${file}. Last activity: ${new Date(metadata.lastActivity || metadata.createdAt)}`);
|
||||
|
||||
// Attempt to delete partial file
|
||||
if (metadata.partialFilePath) {
|
||||
try {
|
||||
await fs.unlink(metadata.partialFilePath);
|
||||
logger.info(`Deleted stale partial file: ${metadata.partialFilePath}`);
|
||||
} catch (unlinkPartialErr) {
|
||||
if (unlinkPartialErr.code !== 'ENOENT') { // Ignore if already gone
|
||||
logger.error(`Failed to delete stale partial file ${metadata.partialFilePath}: ${unlinkPartialErr.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to delete metadata file
|
||||
try {
|
||||
await fs.unlink(metaFilePath);
|
||||
logger.info(`Deleted stale metadata file: ${file}`);
|
||||
cleanedCount++;
|
||||
} catch (unlinkMetaErr) {
|
||||
logger.error(`Failed to delete stale metadata file ${metaFilePath}: ${unlinkMetaErr.message}`);
|
||||
}
|
||||
|
||||
}
|
||||
} catch (readErr) {
|
||||
logger.error(`Error reading or parsing metadata file ${metaFilePath} during cleanup: ${readErr.message}. Skipping.`);
|
||||
// Optionally attempt to delete the corrupt meta file?
|
||||
// await fs.unlink(metaFilePath).catch(()=>{});
|
||||
if (storageAdapter && typeof storageAdapter.cleanupStale === 'function') {
|
||||
await storageAdapter.cleanupStale();
|
||||
logger.info('[Cleanup] Storage adapter cleanup task finished.');
|
||||
// Additionally, run empty folder cleanup if using local storage
|
||||
if (config.storageType === 'local') {
|
||||
await cleanupEmptyFolders(config.uploadDir);
|
||||
}
|
||||
} else {
|
||||
logger.warn('[Cleanup] Storage adapter or cleanupStale method not available.');
|
||||
}
|
||||
} else if (file.endsWith('.tmp')) {
|
||||
// Clean up potential leftover temp metadata files
|
||||
const tempMetaPath = path.join(METADATA_DIR, file);
|
||||
try {
|
||||
const stats = await fs.stat(tempMetaPath);
|
||||
if (now - stats.mtime.getTime() > UPLOAD_TIMEOUT) { // If temp file is also old
|
||||
logger.warn(`Deleting stale temporary metadata file: ${file}`);
|
||||
await fs.unlink(tempMetaPath);
|
||||
}
|
||||
} catch (statErr) {
|
||||
if (statErr.code !== 'ENOENT') { // Ignore if already gone
|
||||
logger.error(`Error checking temporary metadata file ${tempMetaPath}: ${statErr.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`[Cleanup] Error during periodic storage cleanup: ${error.message}`, error.stack);
|
||||
}
|
||||
|
||||
if (checkedCount > 0 || cleanedCount > 0) {
|
||||
logger.info(`Metadata cleanup finished. Checked: ${checkedCount}, Cleaned stale: ${cleanedCount}.`);
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
// Handle errors reading the METADATA_DIR itself
|
||||
if (err.code === 'ENOENT') {
|
||||
logger.info('Metadata directory not found during cleanup scan.'); // Should have been created on init
|
||||
} else {
|
||||
logger.error(`Error during metadata cleanup scan: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Also run empty folder cleanup
|
||||
await cleanupEmptyFolders(config.uploadDir);
|
||||
}
|
||||
|
||||
// Schedule the new cleanup function
|
||||
const METADATA_CLEANUP_INTERVAL = 15 * 60 * 1000; // e.g., every 15 minutes
|
||||
let metadataCleanupTimer;
|
||||
|
||||
if (!process.env.DISABLE_BATCH_CLEANUP) {
|
||||
metadataCleanupTimer = setInterval(cleanupIncompleteMetadataUploads, METADATA_CLEANUP_INTERVAL);
|
||||
metadataCleanupTimer.unref(); // Allow process to exit if this is the only timer
|
||||
|
||||
process.on('SIGTERM', () => clearInterval(metadataCleanupTimer));
|
||||
process.on('SIGINT', () => clearInterval(metadataCleanupTimer));
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively remove empty folders
|
||||
* @param {string} dir - Directory to clean
|
||||
* Starts the periodic storage cleanup task.
|
||||
*/
|
||||
function startStorageCleanupInterval() {
|
||||
if (storageCleanupTimer) {
|
||||
clearInterval(storageCleanupTimer);
|
||||
}
|
||||
logger.info(`[Cleanup] Starting periodic storage cleanup interval (${STORAGE_CLEANUP_INTERVAL / 60000} minutes).`);
|
||||
// Run once immediately on start? Optional.
|
||||
// runStorageCleanup();
|
||||
storageCleanupTimer = setInterval(runStorageCleanup, STORAGE_CLEANUP_INTERVAL);
|
||||
storageCleanupTimer.unref(); // Allow process to exit if this is the only timer
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the periodic storage cleanup task.
|
||||
*/
|
||||
function stopStorageCleanupInterval() {
|
||||
if (storageCleanupTimer) {
|
||||
clearInterval(storageCleanupTimer);
|
||||
storageCleanupTimer = null;
|
||||
logger.info('[Cleanup] Stopped periodic storage cleanup interval.');
|
||||
}
|
||||
}
|
||||
|
||||
// Start interval automatically
|
||||
// Note: Ensure storageAdapter is initialized before this might run effectively.
|
||||
// Consider starting this interval after server initialization in server.js if needed.
|
||||
if (!config.isDemoMode) { // Don't run cleanup in demo mode
|
||||
startStorageCleanupInterval();
|
||||
} else {
|
||||
logger.info('[Cleanup] Periodic storage cleanup disabled in Demo Mode.');
|
||||
}
|
||||
|
||||
// Stop interval on shutdown
|
||||
process.on('SIGTERM', stopStorageCleanupInterval);
|
||||
process.on('SIGINT', stopStorageCleanupInterval);
|
||||
|
||||
|
||||
// --- Empty Folder Cleanup (Primarily for Local Storage) ---
|
||||
|
||||
/**
|
||||
* Recursively remove empty folders within a given directory.
|
||||
* Skips the special '.metadata' directory.
|
||||
* @param {string} dir - Directory path to clean.
|
||||
*/
|
||||
async function cleanupEmptyFolders(dir) {
|
||||
// Check if the path exists and is a directory first
|
||||
try {
|
||||
// Avoid trying to clean the special .metadata directory itself
|
||||
if (path.basename(dir) === '.metadata') {
|
||||
logger.debug(`Skipping cleanup of metadata directory: ${dir}`);
|
||||
return;
|
||||
}
|
||||
|
||||
const files = await fs.readdir(dir);
|
||||
for (const file of files) {
|
||||
const fullPath = path.join(dir, file);
|
||||
|
||||
// Skip the metadata directory during traversal
|
||||
if (path.basename(fullPath) === '.metadata') {
|
||||
logger.debug(`Skipping traversal into metadata directory: ${fullPath}`);
|
||||
continue;
|
||||
const stats = await fs.stat(dir);
|
||||
if (!stats.isDirectory()) {
|
||||
logger.debug(`[Cleanup] Skipping non-directory path for empty folder cleanup: ${dir}`);
|
||||
return;
|
||||
}
|
||||
|
||||
let stats;
|
||||
try {
|
||||
stats = await fs.stat(fullPath);
|
||||
} catch (statErr) {
|
||||
if (statErr.code === 'ENOENT') continue; // File might have been deleted concurrently
|
||||
throw statErr;
|
||||
}
|
||||
|
||||
if (stats.isDirectory()) {
|
||||
await cleanupEmptyFolders(fullPath);
|
||||
// Check if directory is empty after cleaning subdirectories
|
||||
let remaining = [];
|
||||
try {
|
||||
remaining = await fs.readdir(fullPath);
|
||||
} catch (readErr) {
|
||||
if (readErr.code === 'ENOENT') continue; // Directory was deleted
|
||||
throw readErr;
|
||||
}
|
||||
|
||||
if (remaining.length === 0) {
|
||||
// Make sure we don't delete the main upload dir
|
||||
if (fullPath !== path.resolve(config.uploadDir)) {
|
||||
try {
|
||||
await fs.rmdir(fullPath);
|
||||
logger.info(`Removed empty directory: ${fullPath}`);
|
||||
} catch (rmErr) {
|
||||
if (rmErr.code !== 'ENOENT') { // Ignore if already deleted
|
||||
logger.error(`Failed to remove supposedly empty directory ${fullPath}: ${rmErr.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
if (err.code !== 'ENOENT') { // Ignore if dir was already deleted
|
||||
logger.error(`Failed to clean empty folders in ${dir}: ${err.message}`);
|
||||
if (err.code === 'ENOENT') {
|
||||
logger.debug(`[Cleanup] Directory not found for empty folder cleanup: ${dir}`);
|
||||
return; // Directory doesn't exist, nothing to clean
|
||||
}
|
||||
logger.error(`[Cleanup] Error stating directory ${dir} for cleanup: ${err.message}`);
|
||||
return; // Don't proceed if we can't stat
|
||||
}
|
||||
|
||||
|
||||
logger.debug(`[Cleanup] Checking for empty folders within: ${dir}`);
|
||||
const isMetadataDir = path.basename(dir) === '.metadata';
|
||||
if (isMetadataDir) {
|
||||
logger.debug(`[Cleanup] Skipping cleanup of metadata directory itself: ${dir}`);
|
||||
return;
|
||||
}
|
||||
|
||||
let entries;
|
||||
try {
|
||||
entries = await fs.readdir(dir, { withFileTypes: true });
|
||||
} catch (err) {
|
||||
logger.error(`[Cleanup] Failed to read directory ${dir} for empty folder cleanup: ${err.message}`);
|
||||
return; // Cannot proceed
|
||||
}
|
||||
|
||||
// Recursively clean subdirectories first
|
||||
const subDirPromises = entries
|
||||
.filter(entry => entry.isDirectory() && entry.name !== '.metadata')
|
||||
.map(entry => cleanupEmptyFolders(path.join(dir, entry.name)));
|
||||
|
||||
await Promise.all(subDirPromises);
|
||||
|
||||
// Re-read directory contents after cleaning subdirectories
|
||||
try {
|
||||
entries = await fs.readdir(dir); // Just need names now
|
||||
} catch (err) {
|
||||
logger.error(`[Cleanup] Failed to re-read directory ${dir} after sub-cleanup: ${err.message}`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if directory is now empty (or only contains .metadata)
|
||||
const isEmpty = entries.length === 0 || (entries.length === 1 && entries[0] === '.metadata');
|
||||
|
||||
if (isEmpty) {
|
||||
// Make sure we don't delete the main configured upload dir or the metadata dir
|
||||
const resolvedUploadDir = path.resolve(config.uploadDir);
|
||||
const resolvedCurrentDir = path.resolve(dir);
|
||||
|
||||
if (resolvedCurrentDir !== resolvedUploadDir && path.basename(resolvedCurrentDir) !== '.metadata') {
|
||||
try {
|
||||
await fs.rmdir(resolvedCurrentDir);
|
||||
logger.info(`[Cleanup] Removed empty directory: ${resolvedCurrentDir}`);
|
||||
} catch (rmErr) {
|
||||
if (rmErr.code !== 'ENOENT') { // Ignore if already deleted
|
||||
logger.error(`[Cleanup] Failed to remove supposedly empty directory ${resolvedCurrentDir}: ${rmErr.message}`);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logger.debug(`[Cleanup] Skipping removal of root upload directory or metadata directory: ${resolvedCurrentDir}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- Export ---
|
||||
module.exports = {
|
||||
registerCleanupTask,
|
||||
removeCleanupTask,
|
||||
executeCleanup,
|
||||
cleanupIncompleteUploads,
|
||||
cleanupIncompleteMetadataUploads,
|
||||
cleanupEmptyFolders
|
||||
};
|
||||
// Exporting runStorageCleanup might be useful for triggering manually if needed
|
||||
runStorageCleanup,
|
||||
startStorageCleanupInterval,
|
||||
stopStorageCleanupInterval,
|
||||
cleanupEmptyFolders // Export if needed elsewhere, though mainly used internally now
|
||||
};
|
||||
@@ -6,6 +6,7 @@
|
||||
|
||||
const multer = require('multer');
|
||||
const express = require('express');
|
||||
const router = express.Router();
|
||||
const logger = require('./logger');
|
||||
const { config } = require('../config');
|
||||
|
||||
@@ -41,9 +42,9 @@ const injectDemoBanner = (html) => {
|
||||
const demoFiles = new Map();
|
||||
const demoUploads = new Map();
|
||||
|
||||
// Configure demo upload handling (storage configured for multer but not directly used)
|
||||
// Configure demo upload handling
|
||||
const storage = multer.memoryStorage();
|
||||
multer({ storage });
|
||||
const upload = multer({ storage });
|
||||
|
||||
// Create demo routes with exact path matching
|
||||
const demoRouter = express.Router();
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const logger = require('./logger');
|
||||
const { config } = require('../config');
|
||||
|
||||
/**
|
||||
* Format file size to human readable format
|
||||
@@ -146,125 +147,11 @@ async function getUniqueFolderPath(folderPath) {
|
||||
return finalPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Comprehensive filename sanitization for safe file storage
|
||||
* Removes spaces, special characters, and normalizes to ASCII-safe characters
|
||||
* @param {string} fileName - Original filename
|
||||
* @returns {string} Sanitized filename safe for all operating systems
|
||||
*/
|
||||
function sanitizeFilenameSafe(fileName) {
|
||||
if (!fileName || typeof fileName !== 'string') {
|
||||
return 'unnamed_file';
|
||||
}
|
||||
|
||||
// Get the file extension first (preserve it)
|
||||
const ext = path.extname(fileName);
|
||||
let baseName = path.basename(fileName, ext);
|
||||
|
||||
// If no base name after removing extension, use a default
|
||||
if (!baseName || baseName.trim() === '') {
|
||||
baseName = 'unnamed_file';
|
||||
}
|
||||
|
||||
// Step 1: Normalize Unicode characters to ASCII equivalents
|
||||
baseName = baseName
|
||||
.normalize('NFD') // Decompose Unicode characters
|
||||
.replace(/[\u0300-\u036f]/g, '') // Remove diacritical marks
|
||||
// eslint-disable-next-line no-control-regex
|
||||
.replace(/[^\x00-\x7F]/g, ''); // Remove non-ASCII characters
|
||||
|
||||
// Step 2: Replace spaces and common separators with underscores
|
||||
baseName = baseName
|
||||
.replace(/\s+/g, '_') // Replace all whitespace with underscores
|
||||
.replace(/[+\-\s]+/g, '_'); // Replace + and - with underscores
|
||||
|
||||
// Step 3: Remove or replace problematic characters
|
||||
baseName = baseName
|
||||
.replace(/[<>:"/\\|?*]/g, '') // Remove filesystem reserved characters
|
||||
.replace(/[`"'$|;&<>(){}[\]]/g, '') // Remove shell/command problematic chars
|
||||
.replace(/[~#%&*{}\\:<>?/+|"']/g, '') // Remove additional problematic chars
|
||||
.replace(/[^\w\-_.]/g, '') // Keep only word chars, hyphens, underscores, dots
|
||||
.replace(/_{2,}/g, '_') // Replace multiple underscores with single
|
||||
.replace(/^[._-]+/, '') // Remove leading dots, underscores, hyphens
|
||||
.replace(/[._-]+$/, ''); // Remove trailing dots, underscores, hyphens
|
||||
|
||||
// Step 4: Ensure the filename isn't empty and isn't reserved
|
||||
if (!baseName || baseName.length === 0) {
|
||||
baseName = 'file';
|
||||
}
|
||||
|
||||
// Step 5: Check for Windows reserved names
|
||||
const reservedNames = [
|
||||
'CON', 'PRN', 'AUX', 'NUL',
|
||||
'COM1', 'COM2', 'COM3', 'COM4', 'COM5', 'COM6', 'COM7', 'COM8', 'COM9',
|
||||
'LPT1', 'LPT2', 'LPT3', 'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8', 'LPT9'
|
||||
];
|
||||
|
||||
if (reservedNames.includes(baseName.toUpperCase())) {
|
||||
baseName = baseName + '_file';
|
||||
}
|
||||
|
||||
// Step 6: Limit length (keep reasonable length, reserve space for extension)
|
||||
const maxLength = 200; // Leave room for path length limits
|
||||
if (baseName.length > maxLength) {
|
||||
baseName = baseName.substring(0, maxLength);
|
||||
}
|
||||
|
||||
// Step 7: Clean up the extension too
|
||||
let cleanExt = ext;
|
||||
if (cleanExt) {
|
||||
cleanExt = cleanExt
|
||||
.replace(/[^a-zA-Z0-9.]/g, '') // Only allow alphanumeric and dots in extension
|
||||
.toLowerCase(); // Normalize to lowercase
|
||||
|
||||
// Ensure extension starts with a dot
|
||||
if (cleanExt && !cleanExt.startsWith('.')) {
|
||||
cleanExt = '.' + cleanExt;
|
||||
}
|
||||
}
|
||||
|
||||
const finalName = baseName + cleanExt;
|
||||
|
||||
// Final safety check - if somehow we end up with an empty name
|
||||
if (!finalName || finalName === cleanExt) {
|
||||
return 'file' + (cleanExt || '.txt');
|
||||
}
|
||||
|
||||
return finalName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sanitize a file path while preserving directory structure
|
||||
* Each path component is individually sanitized
|
||||
* @param {string} filePath - Original file path
|
||||
* @returns {string} Sanitized file path
|
||||
*/
|
||||
function sanitizePathPreserveDirsSafe(filePath) {
|
||||
if (!filePath || typeof filePath !== 'string') {
|
||||
return 'unnamed_file.txt';
|
||||
}
|
||||
|
||||
// Split on forward slashes, sanitize each part, and rejoin
|
||||
return filePath
|
||||
.split('/')
|
||||
.filter(part => part.length > 0 && part !== '.' && part !== '..') // Remove empty parts and path navigation tokens
|
||||
.map(part => sanitizeFilenameSafe(part))
|
||||
.join('/');
|
||||
}
|
||||
|
||||
/**
|
||||
* Legacy filename sanitization (kept for compatibility)
|
||||
* @deprecated Use sanitizeFilenameSafe instead
|
||||
*/
|
||||
function sanitizeFilename(fileName) {
|
||||
const sanitized = fileName.replace(/[<>:"/\\|?*]+/g, '').replace(/["`$|;&<>]/g, '');
|
||||
return sanitized;
|
||||
}
|
||||
|
||||
/**
|
||||
* Legacy path sanitization (kept for compatibility)
|
||||
* @deprecated Use sanitizePathPreserveDirsSafe instead
|
||||
*/
|
||||
function sanitizePathPreserveDirs(filePath) {
|
||||
// Split on forward slashes, sanitize each part, and rejoin
|
||||
return filePath
|
||||
@@ -283,86 +170,6 @@ function isValidBatchId(batchId) {
|
||||
return /^\d+-[a-z0-9]{9}$/.test(batchId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a file path is within the upload directory
|
||||
* Works with both existing and non-existing files, and handles Docker bind mounts correctly
|
||||
* This function does NOT require the file to exist, making it suitable for upload validation
|
||||
* @param {string} filePath - The file path to check (may not exist yet)
|
||||
* @param {string} uploadDir - The upload directory (must exist)
|
||||
* @param {boolean} requireExists - If true, file must exist (default: false for compatibility with uploads)
|
||||
* @returns {boolean} True if the path is within the upload directory
|
||||
*/
|
||||
function isPathWithinUploadDir(filePath, uploadDir, requireExists = false) {
|
||||
try {
|
||||
// Resolve the upload directory to its real path (should always exist)
|
||||
// This handles symlinks in the upload directory path
|
||||
let realUploadDir;
|
||||
try {
|
||||
realUploadDir = fs.realpathSync(uploadDir);
|
||||
} catch (err) {
|
||||
logger.error(`Upload directory does not exist or is inaccessible: ${uploadDir}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
// For the file path, we need different handling based on whether it exists
|
||||
let resolvedFilePath;
|
||||
if (requireExists) {
|
||||
// When requireExists is true, the file must exist
|
||||
if (!fs.existsSync(filePath)) {
|
||||
// File must exist but doesn't - return false immediately
|
||||
return false;
|
||||
}
|
||||
// File exists, resolve symlinks for security
|
||||
try {
|
||||
resolvedFilePath = fs.realpathSync(filePath);
|
||||
} catch (err) {
|
||||
logger.error(`Failed to resolve existing file path: ${filePath}`);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
// For non-existing files (like during upload), use path.resolve
|
||||
// This normalizes the path without requiring it to exist
|
||||
resolvedFilePath = path.resolve(filePath);
|
||||
|
||||
// Normalize both paths to use consistent separators
|
||||
resolvedFilePath = path.normalize(resolvedFilePath);
|
||||
}
|
||||
|
||||
// Normalize the upload directory path as well
|
||||
realUploadDir = path.normalize(realUploadDir);
|
||||
|
||||
// Use path.relative() to check if file path is relative to upload dir
|
||||
// This is more reliable than startsWith() checks, especially with bind mounts
|
||||
const relativePath = path.relative(realUploadDir, resolvedFilePath);
|
||||
|
||||
// If relative path is empty, the paths are the same (upload dir itself) - allow it
|
||||
if (relativePath === '') {
|
||||
return true;
|
||||
}
|
||||
|
||||
// If relative path starts with '..', it's outside the upload directory
|
||||
// This catches path traversal attempts
|
||||
if (relativePath.startsWith('..')) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Additional check: On Windows, ensure we're on the same drive
|
||||
if (process.platform === 'win32') {
|
||||
const fileDrive = resolvedFilePath.split(':')[0];
|
||||
const uploadDrive = realUploadDir.split(':')[0];
|
||||
if (fileDrive !== uploadDrive) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here, the path is within the upload directory
|
||||
return true;
|
||||
} catch (err) {
|
||||
logger.error(`Path validation error: ${err.message}`, err);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
formatFileSize,
|
||||
calculateDirectorySize,
|
||||
@@ -371,8 +178,5 @@ module.exports = {
|
||||
getUniqueFolderPath,
|
||||
sanitizeFilename,
|
||||
sanitizePathPreserveDirs,
|
||||
sanitizeFilenameSafe,
|
||||
sanitizePathPreserveDirsSafe,
|
||||
isValidBatchId,
|
||||
isPathWithinUploadDir
|
||||
isValidBatchId
|
||||
};
|
||||
@@ -1,99 +0,0 @@
|
||||
/**
|
||||
* Secure IP address extraction utility.
|
||||
* Prevents IP spoofing attacks by properly validating proxy trust.
|
||||
* Implements defense against X-Forwarded-For header manipulation.
|
||||
*/
|
||||
|
||||
const { config } = require('../config');
|
||||
const logger = require('./logger');
|
||||
|
||||
/**
|
||||
* Safely extract the real client IP address from a request.
|
||||
*
|
||||
* Security Behavior:
|
||||
* - If proxy NOT trusted (default): Always use socket IP, ignore headers
|
||||
* - If proxy trusted WITH specific IPs: Validate proxy IP before trusting headers
|
||||
* - If proxy trusted WITHOUT specific IPs: Trust Express's req.ip (trust proxy: 1)
|
||||
*
|
||||
* This prevents attackers from spoofing X-Forwarded-For headers to bypass
|
||||
* rate limiting and brute-force protection.
|
||||
*
|
||||
* @param {object} req - Express request object
|
||||
* @returns {string} The real client IP address
|
||||
*
|
||||
* @example
|
||||
* // In route handlers or middleware:
|
||||
* const ip = getClientIp(req);
|
||||
* if (isRateLimited(ip)) { ... }
|
||||
*/
|
||||
function getClientIp(req) {
|
||||
// If proxy trust is disabled (secure default), always use socket IP
|
||||
if (!config.trustProxy) {
|
||||
const socketIp = req.socket.remoteAddress || req.connection.remoteAddress || 'unknown';
|
||||
return normalizeIp(socketIp);
|
||||
}
|
||||
|
||||
// If specific trusted proxy IPs are configured, validate the proxy
|
||||
if (config.trustedProxyIps && config.trustedProxyIps.length > 0) {
|
||||
const proxyIp = req.socket.remoteAddress || req.connection.remoteAddress;
|
||||
|
||||
if (validateProxyChain(proxyIp, config.trustedProxyIps)) {
|
||||
// Proxy is trusted, use Express's parsed IP (respects trust proxy setting)
|
||||
return normalizeIp(req.ip || proxyIp || 'unknown');
|
||||
} else {
|
||||
// Proxy is NOT in trusted list, ignore headers and use socket IP
|
||||
logger.warn(`Untrusted proxy attempted to set X-Forwarded-For: ${proxyIp}`);
|
||||
return normalizeIp(proxyIp || 'unknown');
|
||||
}
|
||||
}
|
||||
|
||||
// Proxy trust enabled without specific IPs - trust Express's req.ip
|
||||
// (Express already configured with 'trust proxy': 1)
|
||||
return normalizeIp(req.ip || req.socket.remoteAddress || 'unknown');
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate that the immediate proxy is in the trusted proxy list.
|
||||
*
|
||||
* @param {string} proxyIp - IP address of the immediate proxy
|
||||
* @param {string[]} trustedIps - Array of trusted proxy IPs
|
||||
* @returns {boolean} True if proxy is trusted
|
||||
*/
|
||||
function validateProxyChain(proxyIp, trustedIps) {
|
||||
if (!proxyIp || !trustedIps || trustedIps.length === 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const normalizedProxyIp = normalizeIp(proxyIp);
|
||||
|
||||
// Check if proxy IP is in the trusted list
|
||||
return trustedIps.some(trustedIp => {
|
||||
const normalizedTrustedIp = normalizeIp(trustedIp);
|
||||
return normalizedProxyIp === normalizedTrustedIp;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize IP address format.
|
||||
* Handles IPv6-mapped IPv4 addresses (::ffff:192.168.1.1 -> 192.168.1.1)
|
||||
*
|
||||
* @param {string} ip - IP address to normalize
|
||||
* @returns {string} Normalized IP address
|
||||
*/
|
||||
function normalizeIp(ip) {
|
||||
if (!ip) return 'unknown';
|
||||
|
||||
// Convert IPv6-mapped IPv4 to standard IPv4
|
||||
if (ip.startsWith('::ffff:')) {
|
||||
return ip.substring(7);
|
||||
}
|
||||
|
||||
return ip;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getClientIp,
|
||||
validateProxyChain,
|
||||
normalizeIp
|
||||
};
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
*/
|
||||
|
||||
const crypto = require('crypto');
|
||||
const logger = require('./logger');
|
||||
const logger = require('./logger'); // Corrected path
|
||||
|
||||
/**
|
||||
* Store for login attempts with rate limiting
|
||||
@@ -27,7 +27,7 @@ function startCleanupInterval() {
|
||||
if (cleanupInterval) {
|
||||
clearInterval(cleanupInterval);
|
||||
}
|
||||
|
||||
|
||||
cleanupInterval = setInterval(() => {
|
||||
const now = Date.now();
|
||||
let cleaned = 0;
|
||||
@@ -41,7 +41,10 @@ function startCleanupInterval() {
|
||||
logger.info(`Cleaned up ${cleaned} expired lockouts`);
|
||||
}
|
||||
}, 60000); // Check every minute
|
||||
|
||||
|
||||
// Allow node to exit even if this interval is running
|
||||
cleanupInterval.unref();
|
||||
|
||||
return cleanupInterval;
|
||||
}
|
||||
|
||||
@@ -56,10 +59,15 @@ function stopCleanupInterval() {
|
||||
}
|
||||
|
||||
// Start cleanup interval unless disabled
|
||||
if (!process.env.DISABLE_BATCH_CLEANUP && !process.env.DISABLE_SECURITY_CLEANUP) {
|
||||
if (!process.env.DISABLE_SECURITY_CLEANUP) {
|
||||
startCleanupInterval();
|
||||
}
|
||||
|
||||
// Stop interval on shutdown signals
|
||||
process.on('SIGTERM', stopCleanupInterval);
|
||||
process.on('SIGINT', stopCleanupInterval);
|
||||
|
||||
|
||||
/**
|
||||
* Reset login attempts for an IP
|
||||
* @param {string} ip - IP address
|
||||
@@ -77,12 +85,13 @@ function resetAttempts(ip) {
|
||||
function isLockedOut(ip) {
|
||||
const attempts = loginAttempts.get(ip);
|
||||
if (!attempts) return false;
|
||||
|
||||
|
||||
if (attempts.count >= MAX_ATTEMPTS) {
|
||||
const timeElapsed = Date.now() - attempts.lastAttempt;
|
||||
if (timeElapsed < LOCKOUT_DURATION) {
|
||||
return true;
|
||||
}
|
||||
// Lockout expired, reset attempts before proceeding
|
||||
resetAttempts(ip);
|
||||
}
|
||||
return false;
|
||||
@@ -109,28 +118,41 @@ function recordAttempt(ip) {
|
||||
*/
|
||||
function validatePin(pin) {
|
||||
if (!pin || typeof pin !== 'string') return null;
|
||||
// Remove non-digit characters
|
||||
const cleanPin = pin.replace(/\D/g, '');
|
||||
// Check length constraints (e.g., 4-10 digits)
|
||||
return cleanPin.length >= 4 && cleanPin.length <= 10 ? cleanPin : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare two strings in constant time
|
||||
* Compare two strings in constant time using crypto.timingSafeEqual
|
||||
* Pads strings to a fixed length to prevent timing attacks based on length.
|
||||
* @param {string} a - First string
|
||||
* @param {string} b - Second string
|
||||
* @returns {boolean} True if strings match
|
||||
*/
|
||||
function safeCompare(a, b) {
|
||||
// Ensure inputs are strings
|
||||
if (typeof a !== 'string' || typeof b !== 'string') {
|
||||
logger.warn('safeCompare received non-string input.');
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
return crypto.timingSafeEqual(
|
||||
Buffer.from(a.padEnd(32)),
|
||||
Buffer.from(b.padEnd(32))
|
||||
);
|
||||
// Choose a fixed length significantly longer than expected max input length
|
||||
const fixedLength = 64;
|
||||
const bufferA = Buffer.alloc(fixedLength, 0); // Allocate buffer filled with zeros
|
||||
const bufferB = Buffer.alloc(fixedLength, 0);
|
||||
|
||||
// Copy input strings into buffers, truncated if necessary
|
||||
bufferA.write(a.slice(0, fixedLength));
|
||||
bufferB.write(b.slice(0, fixedLength));
|
||||
|
||||
// Perform timing-safe comparison
|
||||
return crypto.timingSafeEqual(bufferA, bufferB);
|
||||
} catch (err) {
|
||||
logger.error(`Safe compare error: ${err.message}`);
|
||||
// Handle potential errors like if inputs are unexpectedly huge (though sliced above)
|
||||
logger.error(`Error during safeCompare: ${err.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -145,4 +167,4 @@ module.exports = {
|
||||
safeCompare,
|
||||
startCleanupInterval,
|
||||
stopCleanupInterval
|
||||
};
|
||||
};
|
||||
@@ -1,217 +0,0 @@
|
||||
/**
|
||||
* Authentication tests
|
||||
* Tests PIN protection and authentication middleware
|
||||
*/
|
||||
|
||||
// Disable batch cleanup for tests
|
||||
process.env.DISABLE_BATCH_CLEANUP = 'true';
|
||||
|
||||
const { describe, it, before, after, beforeEach } = require('node:test');
|
||||
const assert = require('node:assert');
|
||||
const http = require('node:http');
|
||||
|
||||
// Import the app
|
||||
const { app, initialize } = require('../src/app');
|
||||
|
||||
let server;
|
||||
let baseUrl;
|
||||
const originalPin = process.env.PIN;
|
||||
|
||||
before(async () => {
|
||||
// Set PIN for testing
|
||||
process.env.PIN = '1234';
|
||||
|
||||
// Initialize app
|
||||
await initialize();
|
||||
|
||||
// Start server on random port
|
||||
server = http.createServer(app);
|
||||
await new Promise((resolve) => {
|
||||
server.listen(0, () => {
|
||||
const { port } = server.address();
|
||||
baseUrl = `http://localhost:${port}`;
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
after(async () => {
|
||||
// Restore original PIN
|
||||
if (originalPin) {
|
||||
process.env.PIN = originalPin;
|
||||
} else {
|
||||
delete process.env.PIN;
|
||||
}
|
||||
|
||||
// Close server
|
||||
if (server) {
|
||||
await new Promise((resolve) => server.close(resolve));
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Helper function to make HTTP requests
|
||||
*/
|
||||
async function makeRequest(options, body = null) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const req = http.request(options, (res) => {
|
||||
let data = '';
|
||||
res.on('data', (chunk) => {
|
||||
data += chunk;
|
||||
});
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const parsed = data ? JSON.parse(data) : {};
|
||||
resolve({ status: res.statusCode, data: parsed, headers: res.headers, cookies: res.headers['set-cookie'] });
|
||||
} catch {
|
||||
resolve({ status: res.statusCode, data, headers: res.headers, cookies: res.headers['set-cookie'] });
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', reject);
|
||||
|
||||
if (body) {
|
||||
req.write(JSON.stringify(body));
|
||||
}
|
||||
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
describe('Authentication API Tests', () => {
|
||||
describe('GET /api/auth/pin-required', () => {
|
||||
it('should indicate if PIN is required', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/auth/pin-required',
|
||||
method: 'GET',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 200);
|
||||
assert.strictEqual(typeof response.data.required, 'boolean');
|
||||
});
|
||||
});
|
||||
|
||||
describe('POST /api/auth/verify-pin', () => {
|
||||
it('should accept correct PIN', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/auth/verify-pin',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
pin: '1234',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 200);
|
||||
assert.ok(response.cookies);
|
||||
});
|
||||
|
||||
it('should reject incorrect PIN', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/auth/verify-pin',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
pin: 'wrong',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 401);
|
||||
});
|
||||
|
||||
it('should reject empty PIN', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/auth/verify-pin',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
pin: '',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 400);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Protected Routes', () => {
|
||||
it('should require PIN for upload init', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/upload/init',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
filename: 'test.txt',
|
||||
fileSize: 100,
|
||||
});
|
||||
|
||||
// Should be redirected or unauthorized without PIN
|
||||
assert.ok(response.status === 401 || response.status === 403);
|
||||
});
|
||||
|
||||
it('should allow upload with valid PIN cookie', async () => {
|
||||
// First, get PIN cookie
|
||||
const authResponse = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/auth/verify-pin',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
pin: '1234',
|
||||
});
|
||||
|
||||
// Extract cookie
|
||||
const cookies = authResponse.cookies;
|
||||
const cookie = cookies ? cookies[0].split(';')[0] : '';
|
||||
|
||||
// Try upload with cookie
|
||||
const uploadResponse = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/upload/init',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Cookie': cookie,
|
||||
},
|
||||
}, {
|
||||
filename: 'test.txt',
|
||||
fileSize: 100,
|
||||
});
|
||||
|
||||
assert.strictEqual(uploadResponse.status, 200);
|
||||
});
|
||||
});
|
||||
|
||||
describe('POST /api/auth/logout', () => {
|
||||
it('should clear authentication cookie', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/auth/logout',
|
||||
method: 'POST',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 200);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,291 +0,0 @@
|
||||
/**
|
||||
* File management tests
|
||||
* Tests file listing, downloading, deletion, and renaming operations
|
||||
*/
|
||||
|
||||
// Disable batch cleanup for tests
|
||||
process.env.DISABLE_BATCH_CLEANUP = 'true';
|
||||
|
||||
const { describe, it, before, after } = require('node:test');
|
||||
const assert = require('node:assert');
|
||||
const http = require('node:http');
|
||||
const fs = require('fs').promises;
|
||||
const path = require('path');
|
||||
|
||||
// Import the app
|
||||
const { app, initialize, config } = require('../src/app');
|
||||
|
||||
let server;
|
||||
let baseUrl;
|
||||
let testFilePath;
|
||||
|
||||
before(async () => {
|
||||
// Initialize app
|
||||
await initialize();
|
||||
|
||||
// Create a test file
|
||||
testFilePath = path.join(config.uploadDir, 'test-file.txt');
|
||||
await fs.writeFile(testFilePath, 'Test content');
|
||||
|
||||
// Start server on random port
|
||||
server = http.createServer(app);
|
||||
await new Promise((resolve) => {
|
||||
server.listen(0, () => {
|
||||
const { port } = server.address();
|
||||
baseUrl = `http://localhost:${port}`;
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
after(async () => {
|
||||
// Close server
|
||||
if (server) {
|
||||
await new Promise((resolve) => server.close(resolve));
|
||||
}
|
||||
|
||||
// Clean up test files
|
||||
try {
|
||||
const testFiles = await fs.readdir(config.uploadDir);
|
||||
for (const file of testFiles) {
|
||||
if (file !== '.metadata') {
|
||||
const filePath = path.join(config.uploadDir, file);
|
||||
const stat = await fs.stat(filePath);
|
||||
if (stat.isFile()) {
|
||||
await fs.unlink(filePath);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Helper function to make HTTP requests
|
||||
*/
|
||||
async function makeRequest(options, body = null) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const req = http.request(options, (res) => {
|
||||
let data = '';
|
||||
res.on('data', (chunk) => {
|
||||
data += chunk;
|
||||
});
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const parsed = data ? JSON.parse(data) : {};
|
||||
resolve({ status: res.statusCode, data: parsed, headers: res.headers });
|
||||
} catch {
|
||||
resolve({ status: res.statusCode, data, headers: res.headers });
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', reject);
|
||||
|
||||
if (body) {
|
||||
req.write(JSON.stringify(body));
|
||||
}
|
||||
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
describe('File Management API Tests', () => {
|
||||
describe('GET /api/files', () => {
|
||||
it('should list all files', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/files',
|
||||
method: 'GET',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 200);
|
||||
assert.ok(Array.isArray(response.data.items));
|
||||
assert.ok(response.data.totalFiles >= 0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('GET /api/files/info/*', () => {
|
||||
it('should return file info for existing file', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/files/info/test-file.txt',
|
||||
method: 'GET',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 200);
|
||||
assert.strictEqual(response.data.filename, 'test-file.txt');
|
||||
assert.ok(response.data.size >= 0);
|
||||
});
|
||||
|
||||
it('should return 404 for non-existent file', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/files/info/nonexistent.txt',
|
||||
method: 'GET',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 404);
|
||||
});
|
||||
|
||||
it('should prevent path traversal attacks', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/files/info/../../../etc/passwd',
|
||||
method: 'GET',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 403);
|
||||
});
|
||||
});
|
||||
|
||||
describe('GET /api/files/download/*', () => {
|
||||
it('should download existing file', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/files/download/test-file.txt',
|
||||
method: 'GET',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 200);
|
||||
assert.ok(response.headers['content-disposition']);
|
||||
});
|
||||
|
||||
it('should return 404 for non-existent file', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/files/download/nonexistent.txt',
|
||||
method: 'GET',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 404);
|
||||
});
|
||||
|
||||
it('should prevent path traversal in download', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/files/download/../../../etc/passwd',
|
||||
method: 'GET',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 403);
|
||||
});
|
||||
});
|
||||
|
||||
describe('DELETE /api/files/*', () => {
|
||||
it('should delete existing file', async () => {
|
||||
// Create a file to delete
|
||||
const deleteTestPath = path.join(config.uploadDir, 'delete-test.txt');
|
||||
await fs.writeFile(deleteTestPath, 'To be deleted');
|
||||
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/files/delete-test.txt',
|
||||
method: 'DELETE',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 200);
|
||||
|
||||
// Verify file is deleted
|
||||
try {
|
||||
await fs.access(deleteTestPath);
|
||||
assert.fail('File should have been deleted');
|
||||
} catch (err) {
|
||||
assert.strictEqual(err.code, 'ENOENT');
|
||||
}
|
||||
});
|
||||
|
||||
it('should return 404 for non-existent file', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/files/nonexistent.txt',
|
||||
method: 'DELETE',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 404);
|
||||
});
|
||||
|
||||
it('should prevent path traversal in deletion', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/files/../../../etc/passwd',
|
||||
method: 'DELETE',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 403);
|
||||
});
|
||||
});
|
||||
|
||||
describe('PUT /api/files/rename/*', () => {
|
||||
it('should rename existing file', async () => {
|
||||
// Create a file to rename
|
||||
const renameTestPath = path.join(config.uploadDir, 'rename-test.txt');
|
||||
await fs.writeFile(renameTestPath, 'To be renamed');
|
||||
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/files/rename/rename-test.txt',
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
newName: 'renamed-file.txt',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 200);
|
||||
|
||||
// Verify new file exists
|
||||
const newPath = path.join(config.uploadDir, 'renamed-file.txt');
|
||||
await fs.access(newPath);
|
||||
|
||||
// Clean up
|
||||
await fs.unlink(newPath);
|
||||
});
|
||||
|
||||
it('should reject empty new name', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/files/rename/test-file.txt',
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
newName: '',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 400);
|
||||
});
|
||||
|
||||
it('should prevent path traversal in rename', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/files/rename/../../../etc/passwd',
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
newName: 'hacked.txt',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 403);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,152 +0,0 @@
|
||||
/**
|
||||
* Path validation tests for bind mount compatibility
|
||||
* Tests the isPathWithinUploadDir function with various scenarios
|
||||
* Validates both existing and non-existing file paths
|
||||
*/
|
||||
|
||||
const { test, describe, before, after } = require('node:test');
|
||||
const assert = require('node:assert');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const { isPathWithinUploadDir } = require('../src/utils/fileUtils');
|
||||
|
||||
describe('Path Validation for Bind Mounts', () => {
|
||||
let testUploadDir;
|
||||
|
||||
before(() => {
|
||||
// Create a temporary upload directory for testing
|
||||
testUploadDir = path.join(os.tmpdir(), 'dumbdrop-test-uploads-' + Date.now());
|
||||
fs.mkdirSync(testUploadDir, { recursive: true });
|
||||
});
|
||||
|
||||
after(() => {
|
||||
// Clean up test directory
|
||||
try {
|
||||
fs.rmSync(testUploadDir, { recursive: true, force: true });
|
||||
} catch (err) {
|
||||
console.error('Failed to clean up test directory:', err);
|
||||
}
|
||||
});
|
||||
|
||||
test('should allow valid file path within upload directory (non-existent file)', () => {
|
||||
const filePath = path.join(testUploadDir, 'test-file.txt');
|
||||
assert.strictEqual(isPathWithinUploadDir(filePath, testUploadDir, false), true);
|
||||
});
|
||||
|
||||
test('should allow valid nested file path within upload directory (non-existent)', () => {
|
||||
const filePath = path.join(testUploadDir, 'subfolder', 'test-file.txt');
|
||||
assert.strictEqual(isPathWithinUploadDir(filePath, testUploadDir, false), true);
|
||||
});
|
||||
|
||||
test('should allow valid file path with spaces (non-existent)', () => {
|
||||
const filePath = path.join(testUploadDir, 'test file with spaces.txt');
|
||||
assert.strictEqual(isPathWithinUploadDir(filePath, testUploadDir, false), true);
|
||||
});
|
||||
|
||||
test('should reject path traversal with ../ (non-existent)', () => {
|
||||
const filePath = path.join(testUploadDir, '..', 'malicious.txt');
|
||||
assert.strictEqual(isPathWithinUploadDir(filePath, testUploadDir, false), false);
|
||||
});
|
||||
|
||||
test('should reject path traversal with nested ../ (non-existent)', () => {
|
||||
const filePath = path.join(testUploadDir, 'folder', '..', '..', 'malicious.txt');
|
||||
assert.strictEqual(isPathWithinUploadDir(filePath, testUploadDir, false), false);
|
||||
});
|
||||
|
||||
test('should allow upload directory itself', () => {
|
||||
assert.strictEqual(isPathWithinUploadDir(testUploadDir, testUploadDir, false), true);
|
||||
});
|
||||
|
||||
test('should work with .partial file extensions', () => {
|
||||
const filePath = path.join(testUploadDir, 'upload.txt.partial');
|
||||
assert.strictEqual(isPathWithinUploadDir(filePath, testUploadDir, false), true);
|
||||
});
|
||||
|
||||
test('should handle paths with normalized separators', () => {
|
||||
// Test with forward slashes (cross-platform)
|
||||
const filePath = path.normalize(path.join(testUploadDir, 'folder/subfolder/file.txt'));
|
||||
assert.strictEqual(isPathWithinUploadDir(filePath, testUploadDir, false), true);
|
||||
});
|
||||
|
||||
test('should work with existing files when requireExists=true', () => {
|
||||
// Create an actual file
|
||||
const filePath = path.join(testUploadDir, 'existing-file.txt');
|
||||
fs.writeFileSync(filePath, 'test content');
|
||||
|
||||
assert.strictEqual(isPathWithinUploadDir(filePath, testUploadDir, true), true);
|
||||
|
||||
// Clean up
|
||||
fs.unlinkSync(filePath);
|
||||
});
|
||||
|
||||
test('should reject existing file outside upload directory', () => {
|
||||
// Create a file outside the upload directory
|
||||
const outsideFile = path.join(os.tmpdir(), 'outside-file.txt');
|
||||
fs.writeFileSync(outsideFile, 'test content');
|
||||
|
||||
assert.strictEqual(isPathWithinUploadDir(outsideFile, testUploadDir, true), false);
|
||||
|
||||
// Clean up
|
||||
fs.unlinkSync(outsideFile);
|
||||
});
|
||||
|
||||
test('should reject paths on different drives (Windows only)', () => {
|
||||
if (process.platform !== 'win32') {
|
||||
return; // Skip on non-Windows
|
||||
}
|
||||
|
||||
// Try to use a different drive letter
|
||||
const currentDrive = testUploadDir.split(':')[0];
|
||||
const differentDrive = currentDrive === 'C' ? 'D' : 'C';
|
||||
const differentDrivePath = `${differentDrive}:\\temp\\file.txt`;
|
||||
|
||||
// This should be rejected
|
||||
assert.strictEqual(isPathWithinUploadDir(differentDrivePath, testUploadDir, false), false);
|
||||
});
|
||||
|
||||
test('should handle deeply nested folder structures', () => {
|
||||
const deepPath = path.join(testUploadDir, 'a', 'b', 'c', 'd', 'e', 'file.txt');
|
||||
assert.strictEqual(isPathWithinUploadDir(deepPath, testUploadDir, false), true);
|
||||
});
|
||||
|
||||
test('should reject absolute paths outside upload directory', () => {
|
||||
const outsidePath = path.join(os.tmpdir(), 'outside', 'file.txt');
|
||||
assert.strictEqual(isPathWithinUploadDir(outsidePath, testUploadDir, false), false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Path Validation Edge Cases', () => {
|
||||
let testUploadDir;
|
||||
|
||||
before(() => {
|
||||
testUploadDir = path.join(os.tmpdir(), 'dumbdrop-edge-test-' + Date.now());
|
||||
fs.mkdirSync(testUploadDir, { recursive: true });
|
||||
});
|
||||
|
||||
after(() => {
|
||||
try {
|
||||
fs.rmSync(testUploadDir, { recursive: true, force: true });
|
||||
} catch (err) {
|
||||
console.error('Failed to clean up test directory:', err);
|
||||
}
|
||||
});
|
||||
|
||||
test('should handle special characters in filenames', () => {
|
||||
const filePath = path.join(testUploadDir, 'file (1).txt');
|
||||
assert.strictEqual(isPathWithinUploadDir(filePath, testUploadDir, false), true);
|
||||
});
|
||||
|
||||
test('should handle Unicode filenames', () => {
|
||||
const filePath = path.join(testUploadDir, 'файл.txt'); // Russian
|
||||
assert.strictEqual(isPathWithinUploadDir(filePath, testUploadDir, false), true);
|
||||
});
|
||||
|
||||
test('should reject non-existent upload directory', () => {
|
||||
const fakeUploadDir = path.join(os.tmpdir(), 'non-existent-dir-' + Date.now());
|
||||
const filePath = path.join(fakeUploadDir, 'file.txt');
|
||||
|
||||
assert.strictEqual(isPathWithinUploadDir(filePath, fakeUploadDir, false), false);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,442 +0,0 @@
|
||||
/**
|
||||
* Security tests
|
||||
* Tests path traversal protection, file extension validation, and other security features
|
||||
*/
|
||||
|
||||
// Disable batch cleanup for tests
|
||||
process.env.DISABLE_BATCH_CLEANUP = 'true';
|
||||
|
||||
const { describe, it, before, after } = require('node:test');
|
||||
const assert = require('node:assert');
|
||||
const http = require('node:http');
|
||||
const fs = require('fs').promises;
|
||||
const path = require('path');
|
||||
|
||||
// Import the app and utilities
|
||||
const { app, initialize, config } = require('../src/app');
|
||||
const { sanitizeFilenameSafe, sanitizePathPreserveDirsSafe } = require('../src/utils/fileUtils');
|
||||
|
||||
let server;
|
||||
let baseUrl;
|
||||
|
||||
before(async () => {
|
||||
// Initialize app
|
||||
await initialize();
|
||||
|
||||
// Start server on random port
|
||||
server = http.createServer(app);
|
||||
await new Promise((resolve) => {
|
||||
server.listen(0, () => {
|
||||
const { port } = server.address();
|
||||
baseUrl = `http://localhost:${port}`;
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
after(async () => {
|
||||
// Close server
|
||||
if (server) {
|
||||
await new Promise((resolve) => server.close(resolve));
|
||||
}
|
||||
|
||||
// Clean up test files
|
||||
try {
|
||||
const testFiles = await fs.readdir(config.uploadDir);
|
||||
for (const file of testFiles) {
|
||||
if (file !== '.metadata') {
|
||||
const filePath = path.join(config.uploadDir, file);
|
||||
const stat = await fs.stat(filePath);
|
||||
if (stat.isFile()) {
|
||||
await fs.unlink(filePath);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Helper function to make HTTP requests
|
||||
*/
|
||||
async function makeRequest(options, body = null) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const req = http.request(options, (res) => {
|
||||
let data = '';
|
||||
res.on('data', (chunk) => {
|
||||
data += chunk;
|
||||
});
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const parsed = data ? JSON.parse(data) : {};
|
||||
resolve({ status: res.statusCode, data: parsed, headers: res.headers });
|
||||
} catch {
|
||||
resolve({ status: res.statusCode, data, headers: res.headers });
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', reject);
|
||||
|
||||
if (body) {
|
||||
req.write(JSON.stringify(body));
|
||||
}
|
||||
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
describe('Security Tests', () => {
|
||||
describe('Path Traversal Protection', () => {
|
||||
it('should block path traversal in file download', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/files/download/../../../etc/passwd',
|
||||
method: 'GET',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 403);
|
||||
});
|
||||
|
||||
it('should block path traversal in file info', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/files/info/../../package.json',
|
||||
method: 'GET',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 403);
|
||||
});
|
||||
|
||||
it('should block path traversal in file deletion', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/files/../../../important-file.txt',
|
||||
method: 'DELETE',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 403);
|
||||
});
|
||||
|
||||
it('should block absolute paths in upload', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/upload/init',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
filename: '/etc/passwd',
|
||||
fileSize: 100,
|
||||
});
|
||||
|
||||
// Should either succeed with sanitized name or reject
|
||||
if (response.status === 200) {
|
||||
// Verify it was sanitized
|
||||
assert.ok(!response.data.uploadId.includes('/etc'));
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('Filename Sanitization', () => {
|
||||
it('should sanitize dangerous characters', () => {
|
||||
const dangerous = '../../../etc/passwd';
|
||||
const sanitized = sanitizeFilenameSafe(dangerous);
|
||||
|
||||
assert.ok(!sanitized.includes('..'));
|
||||
assert.ok(!sanitized.includes('/'));
|
||||
});
|
||||
|
||||
it('should handle null bytes', () => {
|
||||
const nullByte = 'file\x00.txt';
|
||||
const sanitized = sanitizeFilenameSafe(nullByte);
|
||||
|
||||
assert.ok(!sanitized.includes('\x00'));
|
||||
});
|
||||
|
||||
it('should preserve safe filenames', () => {
|
||||
const safe = 'my-file_123.txt';
|
||||
const sanitized = sanitizeFilenameSafe(safe);
|
||||
|
||||
assert.strictEqual(sanitized, safe);
|
||||
});
|
||||
|
||||
it('should handle Unicode characters', () => {
|
||||
const unicode = 'файл.txt';
|
||||
const sanitized = sanitizeFilenameSafe(unicode);
|
||||
|
||||
// Should be sanitized to ASCII-safe format
|
||||
assert.ok(sanitized.length > 0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('File Size Limits', () => {
|
||||
it('should reject files exceeding size limit', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/upload/init',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
filename: 'huge-file.bin',
|
||||
fileSize: config.maxFileSize + 1,
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 413);
|
||||
});
|
||||
|
||||
it('should accept files within size limit', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/upload/init',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
filename: 'small-file.txt',
|
||||
fileSize: 1024,
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 200);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Content Type Validation', () => {
|
||||
it('should handle various content types safely', async () => {
|
||||
const contentTypes = [
|
||||
'text/plain',
|
||||
'application/json',
|
||||
'image/png',
|
||||
'application/pdf',
|
||||
];
|
||||
|
||||
for (const contentType of contentTypes) {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/upload/init',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
filename: `test.${contentType.split('/')[1]}`,
|
||||
fileSize: 100,
|
||||
});
|
||||
|
||||
// Should handle all content types (unless restricted by config)
|
||||
assert.ok(response.status === 200 || response.status === 400);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('Rate Limiting', () => {
|
||||
it('should enforce rate limits on repeated requests', async () => {
|
||||
// Make multiple rapid requests
|
||||
const requests = [];
|
||||
for (let i = 0; i < 50; i++) {
|
||||
requests.push(
|
||||
makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/upload/init',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
filename: `test-${i}.txt`,
|
||||
fileSize: 100,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
const responses = await Promise.all(requests);
|
||||
|
||||
// At least some should be rate limited (429)
|
||||
const rateLimited = responses.filter((r) => r.status === 429);
|
||||
|
||||
// Rate limiting should kick in for excessive requests
|
||||
assert.ok(rateLimited.length > 0 || responses[0].status === 200);
|
||||
});
|
||||
});
|
||||
|
||||
describe('CORS Protection', () => {
|
||||
it('should include CORS headers', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/files',
|
||||
method: 'GET',
|
||||
});
|
||||
|
||||
// CORS headers should be present
|
||||
assert.ok(response.headers['access-control-allow-origin'] !== undefined);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Path Sanitization Functions', () => {
|
||||
it('should sanitize paths while preserving directories', () => {
|
||||
const dirPath = 'folder/subfolder/file.txt';
|
||||
const sanitized = sanitizePathPreserveDirsSafe(dirPath);
|
||||
|
||||
// Should preserve structure but sanitize dangerous chars
|
||||
assert.ok(!sanitized.includes('..'));
|
||||
assert.ok(sanitized.includes('/') || sanitized.length > 0);
|
||||
});
|
||||
|
||||
it('should block directory traversal attempts', () => {
|
||||
const malicious = '../../etc/passwd';
|
||||
const sanitized = sanitizePathPreserveDirsSafe(malicious);
|
||||
|
||||
// Should not allow traversal
|
||||
assert.ok(!sanitized.startsWith('..'));
|
||||
});
|
||||
});
|
||||
|
||||
describe('IP Spoofing Protection', () => {
|
||||
it('should not trust X-Forwarded-For header when TRUST_PROXY is false', async () => {
|
||||
// Verify config.trustProxy is false by default
|
||||
assert.strictEqual(config.trustProxy, false, 'TRUST_PROXY should be false by default');
|
||||
|
||||
// Make multiple requests with spoofed X-Forwarded-For headers
|
||||
const spoofedIps = ['1.2.3.4', '5.6.7.8', '9.10.11.12', '13.14.15.16', '17.18.19.20', '21.22.23.24'];
|
||||
const responses = [];
|
||||
|
||||
for (const spoofedIp of spoofedIps) {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/auth/verify-pin',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Forwarded-For': spoofedIp,
|
||||
},
|
||||
}, {
|
||||
pin: '9999', // Wrong PIN
|
||||
});
|
||||
|
||||
responses.push(response);
|
||||
}
|
||||
|
||||
// Should be rate limited because all requests come from same real IP
|
||||
// (spoofed headers should be ignored)
|
||||
const rateLimitedOrLocked = responses.filter(
|
||||
(r) => r.status === 429 || (r.status === 401 && r.data.error && r.data.error.includes('locked'))
|
||||
);
|
||||
|
||||
// After 5 failed attempts, should be locked out
|
||||
assert.ok(rateLimitedOrLocked.length > 0, 'Rate limiting should apply despite spoofed headers');
|
||||
});
|
||||
|
||||
it('should use socket IP when proxy trust is disabled', () => {
|
||||
const { getClientIp } = require('../src/utils/ipExtractor');
|
||||
|
||||
// Mock request with spoofed X-Forwarded-For
|
||||
const mockReq = {
|
||||
ip: '192.168.1.100', // This would be from X-Forwarded-For if trusted
|
||||
socket: {
|
||||
remoteAddress: '::ffff:127.0.0.1', // Real socket IP
|
||||
},
|
||||
headers: {
|
||||
'x-forwarded-for': '192.168.1.100',
|
||||
},
|
||||
};
|
||||
|
||||
const extractedIp = getClientIp(mockReq);
|
||||
|
||||
// Should use socket IP, not req.ip (which comes from X-Forwarded-For when trusted)
|
||||
assert.strictEqual(extractedIp, '127.0.0.1', 'Should extract from socket, not trust headers');
|
||||
});
|
||||
|
||||
it('should normalize IPv6-mapped IPv4 addresses', () => {
|
||||
const { normalizeIp } = require('../src/utils/ipExtractor');
|
||||
|
||||
const ipv6Mapped = '::ffff:192.168.1.1';
|
||||
const normalized = normalizeIp(ipv6Mapped);
|
||||
|
||||
assert.strictEqual(normalized, '192.168.1.1', 'Should convert IPv6-mapped to IPv4');
|
||||
});
|
||||
|
||||
it('should validate proxy chain when specific IPs are configured', () => {
|
||||
const { validateProxyChain } = require('../src/utils/ipExtractor');
|
||||
|
||||
const trustedIps = ['172.17.0.1', '10.0.0.1'];
|
||||
|
||||
// Trusted proxy should pass
|
||||
assert.strictEqual(validateProxyChain('172.17.0.1', trustedIps), true);
|
||||
assert.strictEqual(validateProxyChain('10.0.0.1', trustedIps), true);
|
||||
|
||||
// Untrusted proxy should fail
|
||||
assert.strictEqual(validateProxyChain('192.168.1.1', trustedIps), false);
|
||||
assert.strictEqual(validateProxyChain('8.8.8.8', trustedIps), false);
|
||||
});
|
||||
|
||||
it('should handle IPv6-mapped addresses in proxy validation', () => {
|
||||
const { validateProxyChain } = require('../src/utils/ipExtractor');
|
||||
|
||||
const trustedIps = ['127.0.0.1'];
|
||||
|
||||
// IPv6-mapped localhost should match
|
||||
assert.strictEqual(validateProxyChain('::ffff:127.0.0.1', trustedIps), true);
|
||||
});
|
||||
|
||||
it('should prevent rate limit bypass via header spoofing', async () => {
|
||||
// This test verifies the fix for the reported vulnerability
|
||||
// Make 6 requests with different X-Forwarded-For headers but same real IP
|
||||
const attempts = [];
|
||||
|
||||
for (let i = 0; i < 6; i++) {
|
||||
attempts.push(
|
||||
makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/auth/verify-pin',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Forwarded-For': `${Math.floor(Math.random() * 255)}.${Math.floor(Math.random() * 255)}.${Math.floor(Math.random() * 255)}.${Math.floor(Math.random() * 255)}`,
|
||||
},
|
||||
}, {
|
||||
pin: '0000', // Wrong PIN
|
||||
})
|
||||
);
|
||||
|
||||
// Small delay between requests to avoid race conditions
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
}
|
||||
|
||||
const responses = await Promise.all(attempts);
|
||||
|
||||
// Count failures (401) and rate limits (429)
|
||||
const failures = responses.filter(r => r.status === 401);
|
||||
const rateLimited = responses.filter(r => r.status === 429);
|
||||
|
||||
// Should be locked out after 5 attempts, despite spoofed headers
|
||||
// Either the 6th request is rate limited (429), or shows lockout message
|
||||
const lastResponse = responses[responses.length - 1];
|
||||
const isLockedOut =
|
||||
lastResponse.status === 429 ||
|
||||
(lastResponse.status === 401 && lastResponse.data.error &&
|
||||
(lastResponse.data.error.includes('locked') || lastResponse.data.error.includes('Too many')));
|
||||
|
||||
assert.ok(
|
||||
failures.length >= 5 || rateLimited.length > 0 || isLockedOut,
|
||||
'Should enforce rate limiting despite header spoofing attempts'
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,291 +0,0 @@
|
||||
/**
|
||||
* Upload functionality tests
|
||||
* Tests file upload initialization, chunked uploads, and batch operations
|
||||
*/
|
||||
|
||||
// Disable batch cleanup for tests
|
||||
process.env.DISABLE_BATCH_CLEANUP = 'true';
|
||||
|
||||
const { describe, it, before, after } = require('node:test');
|
||||
const assert = require('node:assert');
|
||||
const http = require('node:http');
|
||||
const fs = require('fs').promises;
|
||||
const path = require('path');
|
||||
const crypto = require('crypto');
|
||||
|
||||
// Import the app
|
||||
const { app, initialize, config } = require('../src/app');
|
||||
|
||||
let server;
|
||||
let baseUrl;
|
||||
|
||||
before(async () => {
|
||||
// Initialize app
|
||||
await initialize();
|
||||
|
||||
// Start server on random port
|
||||
server = http.createServer(app);
|
||||
await new Promise((resolve) => {
|
||||
server.listen(0, () => {
|
||||
const { port } = server.address();
|
||||
baseUrl = `http://localhost:${port}`;
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
after(async () => {
|
||||
// Close server
|
||||
if (server) {
|
||||
await new Promise((resolve) => server.close(resolve));
|
||||
}
|
||||
|
||||
// Clean up test uploads
|
||||
try {
|
||||
const testFiles = await fs.readdir(config.uploadDir);
|
||||
for (const file of testFiles) {
|
||||
if (file !== '.metadata') {
|
||||
const filePath = path.join(config.uploadDir, file);
|
||||
const stat = await fs.stat(filePath);
|
||||
if (stat.isFile()) {
|
||||
await fs.unlink(filePath);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Helper function to make HTTP requests
|
||||
*/
|
||||
async function makeRequest(options, body = null) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const req = http.request(options, (res) => {
|
||||
let data = '';
|
||||
res.on('data', (chunk) => {
|
||||
data += chunk;
|
||||
});
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const parsed = data ? JSON.parse(data) : {};
|
||||
resolve({ status: res.statusCode, data: parsed, headers: res.headers });
|
||||
} catch {
|
||||
resolve({ status: res.statusCode, data, headers: res.headers });
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', reject);
|
||||
|
||||
if (body) {
|
||||
if (Buffer.isBuffer(body)) {
|
||||
req.write(body);
|
||||
} else {
|
||||
req.write(JSON.stringify(body));
|
||||
}
|
||||
}
|
||||
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
describe('Upload API Tests', () => {
|
||||
describe('POST /api/upload/init', () => {
|
||||
it('should initialize a new upload', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/upload/init',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
filename: 'test.txt',
|
||||
fileSize: 100,
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 200);
|
||||
assert.ok(response.data.uploadId);
|
||||
});
|
||||
|
||||
it('should reject uploads without filename', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/upload/init',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
fileSize: 100,
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 400);
|
||||
assert.ok(response.data.error);
|
||||
});
|
||||
|
||||
it('should reject uploads without fileSize', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/upload/init',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
filename: 'test.txt',
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 400);
|
||||
assert.ok(response.data.error);
|
||||
});
|
||||
|
||||
it('should handle zero-byte files', async () => {
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/upload/init',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
filename: 'empty.txt',
|
||||
fileSize: 0,
|
||||
});
|
||||
|
||||
assert.strictEqual(response.status, 200);
|
||||
assert.ok(response.data.uploadId);
|
||||
});
|
||||
});
|
||||
|
||||
describe('POST /api/upload/chunk/:uploadId', () => {
|
||||
it('should accept chunks for a valid upload', async () => {
|
||||
// Initialize upload first
|
||||
const initResponse = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/upload/init',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
filename: 'chunk-test.txt',
|
||||
fileSize: 50,
|
||||
});
|
||||
|
||||
const { uploadId } = initResponse.data;
|
||||
|
||||
// Send chunk
|
||||
const chunk = Buffer.from('Hello, World!');
|
||||
const chunkResponse = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: `/api/upload/chunk/${uploadId}`,
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/octet-stream',
|
||||
},
|
||||
}, chunk);
|
||||
|
||||
assert.strictEqual(chunkResponse.status, 200);
|
||||
assert.ok(chunkResponse.data.bytesReceived > 0);
|
||||
});
|
||||
|
||||
it('should reject chunks for invalid uploadId', async () => {
|
||||
const chunk = Buffer.from('Test data');
|
||||
const response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/upload/chunk/invalid-id',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/octet-stream',
|
||||
},
|
||||
}, chunk);
|
||||
|
||||
assert.strictEqual(response.status, 404);
|
||||
});
|
||||
});
|
||||
|
||||
describe('POST /api/upload/cancel/:uploadId', () => {
|
||||
it('should cancel an active upload', async () => {
|
||||
// Initialize upload
|
||||
const initResponse = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/upload/init',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}, {
|
||||
filename: 'cancel-test.txt',
|
||||
fileSize: 100,
|
||||
});
|
||||
|
||||
const { uploadId } = initResponse.data;
|
||||
|
||||
// Cancel upload
|
||||
const cancelResponse = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: `/api/upload/cancel/${uploadId}`,
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
assert.strictEqual(cancelResponse.status, 200);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Batch uploads', () => {
|
||||
it('should handle multiple files with same batch ID', async () => {
|
||||
const batchId = `batch-${crypto.randomBytes(4).toString('hex')}`;
|
||||
|
||||
// Initialize first file
|
||||
const file1Response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/upload/init',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Batch-Id': batchId,
|
||||
},
|
||||
}, {
|
||||
filename: 'batch-file1.txt',
|
||||
fileSize: 50,
|
||||
});
|
||||
|
||||
assert.strictEqual(file1Response.status, 200);
|
||||
|
||||
// Initialize second file
|
||||
const file2Response = await makeRequest({
|
||||
host: 'localhost',
|
||||
port: server.address().port,
|
||||
path: '/api/upload/init',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Batch-Id': batchId,
|
||||
},
|
||||
}, {
|
||||
filename: 'batch-file2.txt',
|
||||
fileSize: 50,
|
||||
});
|
||||
|
||||
assert.strictEqual(file2Response.status, 200);
|
||||
assert.notStrictEqual(file1Response.data.uploadId, file2Response.data.uploadId);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user