mirror of
https://github.com/DumbWareio/DumbDrop.git
synced 2025-10-23 07:41:58 +00:00
Compare commits
6 Commits
105d2a7412
...
c24e866074
Author | SHA1 | Date | |
---|---|---|---|
|
c24e866074 | ||
|
cb7e49b0e1 | ||
|
5666569580 | ||
|
6f1b93ed39 | ||
|
b256311822 | ||
|
bf1c9a2dbd |
63
.env.example
63
.env.example
@@ -5,25 +5,60 @@
|
||||
# Port for the server (default: 3000)
|
||||
PORT=3000
|
||||
|
||||
# Base URL for the application (default: http://localhost:PORT)
|
||||
# Base URL for the application (must end with '/', default: http://localhost:PORT/)
|
||||
BASE_URL=http://localhost:3000/
|
||||
|
||||
# Node environment (default: development)
|
||||
NODE_ENV=development
|
||||
|
||||
#########################################
|
||||
# FILE UPLOAD SETTINGS
|
||||
# STORAGE CONFIGURATION
|
||||
#########################################
|
||||
|
||||
# Storage type ('local' or 's3', default: local)
|
||||
STORAGE_TYPE=local
|
||||
|
||||
#########################################
|
||||
# LOCAL STORAGE SETTINGS (if STORAGE_TYPE=local)
|
||||
#########################################
|
||||
|
||||
# Directory for uploads (local dev, fallback: './local_uploads')
|
||||
LOCAL_UPLOAD_DIR=./local_uploads
|
||||
|
||||
# Directory for uploads (Docker/production; optional, overrides LOCAL_UPLOAD_DIR if set)
|
||||
UPLOAD_DIR=
|
||||
|
||||
#########################################
|
||||
# S3 STORAGE SETTINGS (if STORAGE_TYPE=s3)
|
||||
#########################################
|
||||
|
||||
# S3 Region (e.g., us-east-1 for AWS, us-west-000 for B2)
|
||||
S3_REGION=
|
||||
|
||||
# S3 Bucket Name
|
||||
S3_BUCKET_NAME=
|
||||
|
||||
# S3 Access Key ID
|
||||
S3_ACCESS_KEY_ID=
|
||||
|
||||
# S3 Secret Access Key
|
||||
S3_SECRET_ACCESS_KEY=
|
||||
|
||||
# Optional: S3 Endpoint URL (for non-AWS S3-compatible providers like MinIO, Backblaze B2)
|
||||
# Example Backblaze B2: https://s3.us-west-000.backblazeb2.com
|
||||
# Example MinIO: http://minio.local:9000
|
||||
S3_ENDPOINT_URL=
|
||||
|
||||
# Optional: Force Path Style (true/false, default: false). Needed for some providers like MinIO.
|
||||
S3_FORCE_PATH_STYLE=false
|
||||
|
||||
#########################################
|
||||
# FILE UPLOAD LIMITS & OPTIONS
|
||||
#########################################
|
||||
|
||||
# Maximum file size in MB (default: 1024)
|
||||
MAX_FILE_SIZE=1024
|
||||
|
||||
# Directory for uploads (Docker/production; optional)
|
||||
UPLOAD_DIR=
|
||||
|
||||
# Directory for uploads (local dev, fallback: './local_uploads')
|
||||
LOCAL_UPLOAD_DIR=./local_uploads
|
||||
|
||||
# Comma-separated list of allowed file extensions (optional, e.g. .jpg,.png,.pdf)
|
||||
# ALLOWED_EXTENSIONS=.jpg,.png,.pdf
|
||||
ALLOWED_EXTENSIONS=
|
||||
@@ -43,6 +78,10 @@ DUMBDROP_PIN=
|
||||
# Site title displayed in header (default: DumbDrop)
|
||||
DUMBDROP_TITLE=DumbDrop
|
||||
|
||||
# Custom footer links (comma-separated, format: "Link Text @ URL")
|
||||
# Example: FOOTER_LINKS=My Site @ https://example.com, Another Link @ https://another.org
|
||||
FOOTER_LINKS=
|
||||
|
||||
#########################################
|
||||
# NOTIFICATION SETTINGS
|
||||
#########################################
|
||||
@@ -65,4 +104,10 @@ AUTO_UPLOAD=false
|
||||
|
||||
# Comma-separated list of origins allowed to embed the app in an iframe (optional)
|
||||
# ALLOWED_IFRAME_ORIGINS=https://example.com,https://another.com
|
||||
ALLOWED_IFRAME_ORIGINS=
|
||||
ALLOWED_IFRAME_ORIGINS=
|
||||
|
||||
# Max number of retries for client-side chunk uploads (default: 5)
|
||||
CLIENT_MAX_RETRIES=5
|
||||
|
||||
# Demo Mode (true/false, default: false). Overrides storage settings.
|
||||
DEMO_MODE=false
|
3
.github/workflows/docker-publish.yml
vendored
3
.github/workflows/docker-publish.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- main # Trigger the workflow on pushes to the main branch
|
||||
- dev # Trigger the workflow on pushes to the dev branch
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
@@ -39,6 +40,8 @@ jobs:
|
||||
images: |
|
||||
name=dumbwareio/dumbdrop
|
||||
tags: |
|
||||
# Add :dev tag for pushes to the dev branch
|
||||
type=raw,value=dev,enable=${{ github.ref == 'refs/heads/dev' }}
|
||||
# the semantic versioning tags add "latest" when a version tag is present
|
||||
# but since version tags aren't being used (yet?) let's add "latest" anyway
|
||||
type=raw,value=latest
|
||||
|
@@ -50,9 +50,10 @@ CMD ["npm", "run", "dev"]
|
||||
# Production stage
|
||||
FROM deps as production
|
||||
ENV NODE_ENV=production
|
||||
ENV UPLOAD_DIR /app/uploads
|
||||
|
||||
# Create upload directory
|
||||
RUN mkdir -p uploads
|
||||
# RUN mkdir -p uploads # No longer strictly needed here as volume mapping is expected, but harmless
|
||||
|
||||
# Copy only necessary source files
|
||||
COPY src/ ./src/
|
||||
|
189
README.md
189
README.md
@@ -4,7 +4,7 @@ A stupid simple file upload application that provides a clean, modern interface
|
||||
|
||||

|
||||
|
||||
No auth (unless you want it now!), no storage, no nothing. Just a simple file uploader to drop dumb files into a dumb folder.
|
||||
No auth (unless you want it!), no complicated setup (unless you want to!), no nothing. Just a simple way to drop dumb files into a dumb folder... or an S3 bucket!
|
||||
|
||||
## Table of Contents
|
||||
- [Quick Start](#quick-start)
|
||||
@@ -20,53 +20,70 @@ No auth (unless you want it now!), no storage, no nothing. Just a simple file up
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Option 1: Docker (For Dummies)
|
||||
### Option 1: Docker (For Dummies - Local Storage)
|
||||
```bash
|
||||
# Pull and run with one command
|
||||
# Pull and run with one command (uses local storage)
|
||||
docker run -p 3000:3000 -v ./uploads:/app/uploads dumbwareio/dumbdrop:latest
|
||||
```
|
||||
1. Go to http://localhost:3000
|
||||
2. Upload a File - It'll show up in ./uploads
|
||||
3. Celebrate on how dumb easy this was
|
||||
2. Upload a File - It'll show up in `./uploads` on your host machine.
|
||||
3. Celebrate on how dumb easy this was.
|
||||
|
||||
### Option 2: Docker Compose (For Dummies who like customizing)
|
||||
### Option 2: Docker Compose (For Dummies who like customizing - Local or S3)
|
||||
Create a `docker-compose.yml` file:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
dumbdrop:
|
||||
image: dumbwareio/dumbdrop:latest
|
||||
ports:
|
||||
- 3000:3000
|
||||
volumes:
|
||||
# Where your uploaded files will land
|
||||
- ./uploads:/app/uploads
|
||||
environment:
|
||||
# Explicitly set upload directory inside the container
|
||||
UPLOAD_DIR: /app/uploads
|
||||
# The title shown in the web interface
|
||||
DUMBDROP_TITLE: DumbDrop
|
||||
# Maximum file size in MB
|
||||
MAX_FILE_SIZE: 1024
|
||||
# Optional PIN protection (leave empty to disable)
|
||||
DUMBDROP_PIN: 123456
|
||||
# Upload without clicking button
|
||||
AUTO_UPLOAD: false
|
||||
# The base URL for the application
|
||||
BASE_URL: http://localhost:3000
|
||||
dumbdrop:
|
||||
image: dumbwareio/dumbdrop:latest # Use the desired tag/version
|
||||
ports:
|
||||
- "3000:3000" # Map host port 3000 to container port 3000
|
||||
volumes:
|
||||
# Mount a host directory to store metadata (.metadata folder)
|
||||
# This is needed even for S3 mode to track ongoing uploads.
|
||||
# For local storage mode, this is also where files land.
|
||||
- ./uploads:/app/uploads
|
||||
environment:
|
||||
# --- Core Settings ---
|
||||
# STORAGE_TYPE: "local" # Options: "local", "s3" (Defaults to "local" if unset)
|
||||
DUMBDROP_TITLE: "My DumbDrop"
|
||||
BASE_URL: "http://localhost:3000/" # Must end with a slash!
|
||||
MAX_FILE_SIZE: 1024 # Max file size in MB
|
||||
DUMBDROP_PIN: "" # Optional PIN (4-10 digits)
|
||||
AUTO_UPLOAD: "false" # Set to "true" to upload immediately
|
||||
|
||||
# --- Local Storage Settings (if STORAGE_TYPE="local") ---
|
||||
UPLOAD_DIR: "/app/uploads" # *Must* be set inside container if using local storage
|
||||
|
||||
# --- S3 Storage Settings (if STORAGE_TYPE="s3") ---
|
||||
# S3_REGION: "us-east-1" # Your S3 region (e.g., us-west-000 for B2)
|
||||
# S3_BUCKET_NAME: "your-s3-bucket-name" # Your bucket name
|
||||
# S3_ACCESS_KEY_ID: "YOUR_ACCESS_KEY" # Your S3 Access Key
|
||||
# S3_SECRET_ACCESS_KEY: "YOUR_SECRET_KEY" # Your S3 Secret Key
|
||||
# S3_ENDPOINT_URL: "" # Optional: e.g., https://s3.us-west-000.backblazeb2.com for B2, http://minio.local:9000 for Minio
|
||||
# S3_FORCE_PATH_STYLE: "false" # Optional: Set to "true" for providers like Minio
|
||||
|
||||
# --- Optional Settings ---
|
||||
# ALLOWED_EXTENSIONS: ".jpg,.png,.pdf" # Comma-separated allowed extensions
|
||||
# ALLOWED_IFRAME_ORIGINS: "https://organizr.example.com" # Allow embedding in specific origins
|
||||
# APPRISE_URL: "" # For notifications
|
||||
# FOOTER_LINKS: "My Site @ https://example.com" # Custom footer links
|
||||
# CLIENT_MAX_RETRIES: 5 # Client-side chunk retry attempts
|
||||
restart: unless-stopped
|
||||
```
|
||||
Then run:
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
1. Go to http://localhost:3000
|
||||
2. Upload a File - It'll show up in ./uploads
|
||||
3. Rejoice in the glory of your dumb uploads
|
||||
2. Upload a File - It'll show up in `./uploads` (if local) or your S3 bucket (if S3).
|
||||
3. Rejoice in the glory of your dumb uploads, now potentially in the cloud!
|
||||
|
||||
> **Note:** The `UPLOAD_DIR` environment variable is now explicitly set to `/app/uploads` in the container. The Dockerfile only creates the `uploads` directory, not `local_uploads`. The host directory `./uploads` is mounted to `/app/uploads` for persistent storage.
|
||||
> **Note:** When using `STORAGE_TYPE=s3`, the local volume mount (`./uploads:/app/uploads`) is still used to store temporary metadata files (`.metadata` folder) for tracking multipart uploads. The actual files go to S3.
|
||||
|
||||
### Option 3: Running Locally (For Developers)
|
||||
|
||||
For local development setup, troubleshooting, and advanced usage, see the dedicated guide:
|
||||
For local development setup without Docker, see the dedicated guide:
|
||||
|
||||
👉 [Local Development Guide](LOCAL_DEVELOPMENT.md)
|
||||
|
||||
@@ -74,43 +91,65 @@ For local development setup, troubleshooting, and advanced usage, see the dedica
|
||||
|
||||
- 🚀 Drag and drop file uploads
|
||||
- 📁 Multiple file selection
|
||||
- ☁️ **Optional S3 Storage:** Store files in AWS S3, Backblaze B2, MinIO, or other S3-compatible services.
|
||||
- 💾 **Local Storage:** Default simple file storage on the server's disk.
|
||||
- 🎨 Clean, responsive UI with Dark Mode
|
||||
- 📦 Docker support with easy configuration
|
||||
- 📂 Directory upload support (maintains structure)
|
||||
- 📂 Directory upload support (maintains structure in local storage or as S3 keys)
|
||||
- 🔒 Optional PIN protection
|
||||
- 📱 Mobile-friendly interface
|
||||
- 🔔 Configurable notifications via Apprise
|
||||
- ⚡ Zero dependencies on client-side
|
||||
- 🛡️ Built-in security features
|
||||
- 🛡️ Built-in security features (rate limiting, security headers)
|
||||
- 💾 Configurable file size limits
|
||||
- 🎯 File extension filtering
|
||||
- ⚙️ Native S3 Multipart Upload for large files when using S3 storage.
|
||||
- 🔗 S3 Presigned URLs for efficient downloads (offloads server bandwidth).
|
||||
|
||||
## Configuration
|
||||
|
||||
DumbDrop is configured primarily through environment variables.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Description | Default | Required |
|
||||
|------------------------|------------------------------------------------------------------|-----------------------------------------|----------|
|
||||
| PORT | Server port | 3000 | No |
|
||||
| BASE_URL | Base URL for the application | http://localhost:PORT | No |
|
||||
| MAX_FILE_SIZE | Maximum file size in MB | 1024 | No |
|
||||
| DUMBDROP_PIN | PIN protection (4-10 digits) | None | No |
|
||||
| DUMBDROP_TITLE | Site title displayed in header | DumbDrop | No |
|
||||
| APPRISE_URL | Apprise URL for notifications | None | No |
|
||||
| APPRISE_MESSAGE | Notification message template | New file uploaded {filename} ({size}), Storage used {storage} | No |
|
||||
| APPRISE_SIZE_UNIT | Size unit for notifications (B, KB, MB, GB, TB, or Auto) | Auto | No |
|
||||
| AUTO_UPLOAD | Enable automatic upload on file selection | false | No |
|
||||
| ALLOWED_EXTENSIONS | Comma-separated list of allowed file extensions | None | No |
|
||||
| ALLOWED_IFRAME_ORIGINS | Comma-separated list of origins allowed to embed the app in an iframe | None | No |
|
||||
| UPLOAD_DIR | Directory for uploads (Docker/production; should be `/app/uploads` in container) | None (see LOCAL_UPLOAD_DIR fallback) | No |
|
||||
| LOCAL_UPLOAD_DIR | Directory for uploads (local dev, fallback: './local_uploads') | ./local_uploads | No |
|
||||
| Variable | Description | Default | Required |
|
||||
|--------------------------|------------------------------------------------------------------------------------------------------------|----------------------------------------------|------------------------------|
|
||||
| **`STORAGE_TYPE`** | Storage backend: `local` or `s3` | `local` | No |
|
||||
| `PORT` | Server port | `3000` | No |
|
||||
| `BASE_URL` | Base URL for the application (must end with `/`) | `http://localhost:PORT/` | No |
|
||||
| `MAX_FILE_SIZE` | Maximum file size in MB | `1024` | No |
|
||||
| `DUMBDROP_PIN` | PIN protection (4-10 digits) | None | No |
|
||||
| `DUMBDROP_TITLE` | Title displayed in the browser tab/header | `DumbDrop` | No |
|
||||
| `AUTO_UPLOAD` | Enable automatic upload on file selection (`true`/`false`) | `false` | No |
|
||||
| `ALLOWED_EXTENSIONS` | Comma-separated list of allowed file extensions (e.g., `.jpg,.png`) | None (all allowed) | No |
|
||||
| `ALLOWED_IFRAME_ORIGINS` | Comma-separated list of origins allowed to embed in an iframe | None | No |
|
||||
| `FOOTER_LINKS` | Comma-separated custom footer links (Format: `"Text @ URL"`) | None | No |
|
||||
| `CLIENT_MAX_RETRIES` | Max retry attempts for client-side chunk uploads | `5` | No |
|
||||
| `DEMO_MODE` | Run in demo mode (`true`/`false`). Overrides storage settings. | `false` | No |
|
||||
| `APPRISE_URL` | Apprise URL for notifications | None | No |
|
||||
| `APPRISE_MESSAGE` | Notification message template (`{filename}`, `{size}`, `{storage}`) | `New file uploaded...` | No |
|
||||
| `APPRISE_SIZE_UNIT` | Size unit for notifications (`B`, `KB`, `MB`, `GB`, `TB`, `Auto`) | `Auto` | No |
|
||||
| --- | --- | --- | --- |
|
||||
| **Local Storage Only:** | | | |
|
||||
| `UPLOAD_DIR` | **(Docker)** Directory for uploads/metadata inside container | None | Yes (if `STORAGE_TYPE=local`) |
|
||||
| `LOCAL_UPLOAD_DIR` | **(Local Dev)** Directory for uploads/metadata on host machine | `./local_uploads` | No (if `STORAGE_TYPE=local`) |
|
||||
| --- | --- | --- | --- |
|
||||
| **S3 Storage Only:** | | | |
|
||||
| `S3_REGION` | S3 Region (e.g., `us-east-1`, `us-west-000`) | None | Yes (if `STORAGE_TYPE=s3`) |
|
||||
| `S3_BUCKET_NAME` | Name of the S3 Bucket | None | Yes (if `STORAGE_TYPE=s3`) |
|
||||
| `S3_ACCESS_KEY_ID` | S3 Access Key ID | None | Yes (if `STORAGE_TYPE=s3`) |
|
||||
| `S3_SECRET_ACCESS_KEY` | S3 Secret Access Key | None | Yes (if `STORAGE_TYPE=s3`) |
|
||||
| `S3_ENDPOINT_URL` | **(Optional)** Custom S3 endpoint URL (for B2, MinIO, etc.) | None (uses default AWS endpoint) | No |
|
||||
| `S3_FORCE_PATH_STYLE` | **(Optional)** Force path-style S3 requests (`true`/`false`). Needed for MinIO, etc. | `false` | No |
|
||||
|
||||
- **UPLOAD_DIR** is used in Docker/production. If not set, LOCAL_UPLOAD_DIR is used for local development. If neither is set, the default is `./local_uploads`.
|
||||
- **Docker Note:** The Dockerfile now only creates the `uploads` directory inside the container. The host's `./local_uploads` is mounted to `/app/uploads` and should be managed on the host system.
|
||||
- **BASE_URL**: If you are deploying DumbDrop under a subpath (e.g., `https://example.com/watchfolder/`), you **must** set `BASE_URL` to the full path including the trailing slash (e.g., `https://example.com/watchfolder/`). All API and asset requests will be prefixed with this value. If you deploy at the root, use `https://example.com/`.
|
||||
- **BASE_URL** must end with a trailing slash. The app will fail to start if this is not the case.
|
||||
- **Storage:** Set `STORAGE_TYPE` to `s3` to enable S3 storage. Otherwise, it defaults to `local`.
|
||||
- **Local Storage:** If `STORAGE_TYPE=local`, `UPLOAD_DIR` (in Docker) or `LOCAL_UPLOAD_DIR` (local dev) determines where files are stored.
|
||||
- **S3 Storage:** If `STORAGE_TYPE=s3`, the `S3_*` variables are required. `UPLOAD_DIR`/`LOCAL_UPLOAD_DIR` is still used for storing temporary `.metadata` files locally.
|
||||
- **S3 Endpoint/Path Style:** Use `S3_ENDPOINT_URL` and `S3_FORCE_PATH_STYLE` only if connecting to a non-AWS S3-compatible service.
|
||||
- **BASE_URL**: Must end with a trailing slash (`/`). The app will fail to start otherwise. Example: `http://your.domain.com/dumbdrop/`.
|
||||
- **Security Note (S3):** For production, using IAM Roles (e.g., EC2 Instance Profiles, ECS Task Roles) is strongly recommended over embedding Access Keys in environment variables.
|
||||
|
||||
See `.env.example` for a template and more details.
|
||||
See `.env.example` for a template.
|
||||
|
||||
<details>
|
||||
<summary>ALLOWED_IFRAME_ORIGINS</summary>
|
||||
@@ -129,7 +168,7 @@ ALLOWED_IFRAME_ORIGINS=https://organizr.example.com,https://myportal.com
|
||||
<details>
|
||||
<summary>File Extension Filtering</summary>
|
||||
|
||||
To restrict which file types can be uploaded, set the `ALLOWED_EXTENSIONS` environment variable. For example:
|
||||
To restrict which file types can be uploaded, set the `ALLOWED_EXTENSIONS` environment variable with comma-separated extensions (including the dot):
|
||||
```env
|
||||
ALLOWED_EXTENSIONS=.jpg,.jpeg,.png,.pdf,.doc,.docx,.txt
|
||||
```
|
||||
@@ -141,56 +180,65 @@ If not set, all file extensions will be allowed.
|
||||
|
||||
#### Message Templates
|
||||
The notification message supports the following placeholders:
|
||||
- `{filename}`: Name of the uploaded file
|
||||
- `{filename}`: Name of the uploaded file (or S3 Key)
|
||||
- `{size}`: Size of the file (formatted according to APPRISE_SIZE_UNIT)
|
||||
- `{storage}`: Total size of all files in upload directory
|
||||
- `{storage}`: Total size of all files in upload directory (Local storage only)
|
||||
|
||||
Example message template:
|
||||
```env
|
||||
APPRISE_MESSAGE: New file uploaded {filename} ({size}), Storage used {storage}
|
||||
APPRISE_MESSAGE: New file dropped: {filename} ({size})!
|
||||
```
|
||||
|
||||
Size formatting examples:
|
||||
- Auto (default): Chooses nearest unit (e.g., "1.44MB", "256KB")
|
||||
- Fixed unit: Set APPRISE_SIZE_UNIT to B, KB, MB, GB, or TB
|
||||
|
||||
Both {size} and {storage} use the same formatting rules based on APPRISE_SIZE_UNIT.
|
||||
|
||||
#### Notification Support
|
||||
- Integration with [Apprise](https://github.com/caronc/apprise?tab=readme-ov-file#supported-notifications) for flexible notifications
|
||||
- Support for all Apprise notification services
|
||||
- Customizable notification messages with filename templating
|
||||
- Customizable notification messages
|
||||
- Optional - disabled if no APPRISE_URL is set
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>S3 Cleanup Recommendation</summary>
|
||||
|
||||
When using `STORAGE_TYPE=s3`, DumbDrop relies on the native S3 Multipart Upload mechanism. If an upload is interrupted, incomplete parts may remain in your S3 bucket.
|
||||
|
||||
**It is strongly recommended to configure a Lifecycle Rule on your S3 bucket** (or use your provider's equivalent tool) to automatically abort and delete incomplete multipart uploads after a reasonable period (e.g., 1-7 days). This prevents orphaned parts from accumulating costs. DumbDrop's cleanup only removes local tracking files, not the actual S3 parts.
|
||||
</details>
|
||||
|
||||
## Security
|
||||
|
||||
### Features
|
||||
- Variable-length PIN support (4-10 digits)
|
||||
- Constant-time PIN comparison
|
||||
- Input sanitization
|
||||
- Rate limiting
|
||||
- Input sanitization (filenames, paths)
|
||||
- Rate limiting on API endpoints
|
||||
- Security headers (CSP, HSTS, etc.)
|
||||
- File extension filtering
|
||||
- No client-side PIN storage
|
||||
- Secure file handling
|
||||
- Secure file handling (uses S3 presigned URLs for downloads if S3 is enabled)
|
||||
|
||||
## Technical Details
|
||||
|
||||
### Stack
|
||||
- **Backend**: Node.js (>=20.0.0) with Express
|
||||
- **Frontend**: Vanilla JavaScript (ES6+)
|
||||
- **Storage**: Local Filesystem or S3-compatible Object Storage
|
||||
- **Container**: Docker with multi-stage builds
|
||||
- **Security**: Express security middleware
|
||||
- **Upload**: Chunked file handling via Multer
|
||||
- **Upload**: Chunked uploads via client-side logic, processed via Express middleware, using native S3 Multipart Upload when `STORAGE_TYPE=s3`.
|
||||
- **Notifications**: Apprise integration
|
||||
- **SDK**: AWS SDK for JavaScript v3 (`@aws-sdk/client-s3`, `@aws-sdk/s3-request-presigner`) when `STORAGE_TYPE=s3`.
|
||||
|
||||
### Dependencies
|
||||
- express: Web framework
|
||||
- multer: File upload handling
|
||||
- apprise: Notification system
|
||||
- cors: Cross-origin resource sharing
|
||||
- dotenv: Environment configuration
|
||||
- express-rate-limit: Rate limiting
|
||||
- `express`: Web framework
|
||||
- `@aws-sdk/client-s3`: AWS S3 SDK (used if `STORAGE_TYPE=s3`)
|
||||
- `@aws-sdk/s3-request-presigner`: For S3 presigned URLs (used if `STORAGE_TYPE=s3`)
|
||||
- `cookie-parser`: Parse cookies
|
||||
- `cors`: Cross-origin resource sharing
|
||||
- `dotenv`: Environment configuration
|
||||
- `express-rate-limit`: Rate limiting
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -208,3 +256,4 @@ Made with ❤️ by [DumbWare.io](https://dumbware.io)
|
||||
## Future Features
|
||||
- Camera Upload for Mobile
|
||||
> Got an idea? [Open an issue](https://github.com/dumbwareio/dumbdrop/issues) or [submit a PR](https://github.com/dumbwareio/dumbdrop/pulls)
|
||||
```
|
@@ -16,6 +16,7 @@ services:
|
||||
BASE_URL: http://localhost:3000 # The base URL for the application
|
||||
|
||||
# Additional available environment variables (commented out with defaults)
|
||||
# FOOTER_LINKS: "My Site @ https://example.com,Docs @ https://docs.example.com" # Custom footer links
|
||||
# PORT: 3000 # Server port (default: 3000)
|
||||
# NODE_ENV: production # Node environment (development/production)
|
||||
# DEBUG: false # Debug mode for verbose logging (default: false in production, true in development)
|
||||
|
1662
package-lock.json
generated
1662
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -15,6 +15,8 @@
|
||||
"license": "ISC",
|
||||
"description": "A simple file upload application",
|
||||
"dependencies": {
|
||||
"@aws-sdk/client-s3": "^3.803.0",
|
||||
"@aws-sdk/s3-request-presigner": "^3.803.0",
|
||||
"apprise": "^1.0.0",
|
||||
"cookie-parser": "^1.4.7",
|
||||
"cors": "^2.8.5",
|
||||
|
1118
public/index.html
1118
public/index.html
File diff suppressed because it is too large
Load Diff
@@ -126,7 +126,7 @@
|
||||
// Handle form submission
|
||||
const verifyPin = async (pin) => {
|
||||
try {
|
||||
const response = await fetch(window.BASE_URL + '/api/auth/verify-pin', {
|
||||
const response = await fetch(window.BASE_URL + 'api/auth/verify-pin', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ pin })
|
||||
@@ -212,7 +212,7 @@
|
||||
};
|
||||
|
||||
// Check PIN length and initialize
|
||||
fetch(window.BASE_URL + '/api/auth/pin-required')
|
||||
fetch(window.BASE_URL + 'api/auth/pin-required')
|
||||
.then(response => {
|
||||
if (response.status === 429) {
|
||||
throw new Error('Too many attempts. Please wait before trying again.');
|
||||
|
@@ -39,6 +39,7 @@ body {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
padding-top: 2rem;
|
||||
padding-bottom: 80px;
|
||||
color: var(--text-color);
|
||||
transition: background-color 0.3s ease, color 0.3s ease;
|
||||
}
|
||||
@@ -359,3 +360,47 @@ button:disabled {
|
||||
font-size: 1.125rem;
|
||||
}
|
||||
}
|
||||
|
||||
/* Footer Styles */
|
||||
footer {
|
||||
position: fixed;
|
||||
bottom: 10px;
|
||||
left: 0;
|
||||
right: 0;
|
||||
width: 100%;
|
||||
max-width: 600px;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
padding: 15px;
|
||||
text-align: center;
|
||||
font-size: 0.85rem;
|
||||
color: var(--text-color);
|
||||
opacity: 0.7;
|
||||
border-top: 1px solid var(--border-color);
|
||||
transition: background-color 0.3s ease, color 0.3s ease;
|
||||
}
|
||||
|
||||
footer a {
|
||||
color: var(--text-color);
|
||||
text-decoration: none;
|
||||
transition: opacity 0.2s ease;
|
||||
}
|
||||
|
||||
footer a:hover {
|
||||
opacity: 1;
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
.footer-separator {
|
||||
margin: 0 0.5em;
|
||||
}
|
||||
|
||||
@media (max-width: 480px) {
|
||||
footer {
|
||||
font-size: 0.75rem;
|
||||
}
|
||||
|
||||
.footer-separator {
|
||||
margin: 0 0.3em;
|
||||
}
|
||||
}
|
||||
|
246
src/app.js
246
src/app.js
@@ -2,18 +2,34 @@
|
||||
* Main application setup and configuration.
|
||||
* Initializes Express app, middleware, routes, and static file serving.
|
||||
* Handles core application bootstrapping and configuration validation.
|
||||
* Imports and makes use of the configured storage adapter.
|
||||
*/
|
||||
|
||||
const express = require('express');
|
||||
const cors = require('cors');
|
||||
const cookieParser = require('cookie-parser');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const fsPromises = require('fs').promises;
|
||||
const fs = require('fs'); // Needed for reading HTML templates
|
||||
|
||||
// Load configuration FIRST
|
||||
const { config, validateConfig } = require('./config');
|
||||
const logger = require('./utils/logger');
|
||||
const { ensureDirectoryExists } = require('./utils/fileUtils');
|
||||
// Validate config EARLY, before loading anything else that depends on it
|
||||
try {
|
||||
validateConfig();
|
||||
logger.info("Configuration loaded and validated successfully.");
|
||||
} catch (validationError) {
|
||||
logger.error("!!! Configuration validation failed. Server cannot start. !!!");
|
||||
logger.error(validationError.message);
|
||||
process.exit(1); // Exit if config is invalid
|
||||
}
|
||||
|
||||
// Load storage adapter AFTER config is validated
|
||||
// The storage/index.js file itself will log which adapter is being used.
|
||||
const { storageAdapter } = require('./storage'); // This will load the correct adapter
|
||||
|
||||
// Load other utilities and middleware
|
||||
// const { ensureDirectoryExists } = require('./utils/fileUtils'); // No longer needed here
|
||||
const { securityHeaders, requirePin } = require('./middleware/security');
|
||||
const { safeCompare } = require('./utils/security');
|
||||
const { initUploadLimiter, pinVerifyLimiter, downloadLimiter } = require('./middleware/rateLimiter');
|
||||
@@ -22,163 +38,147 @@ const { injectDemoBanner, demoMiddleware } = require('./utils/demoMode');
|
||||
// Create Express app
|
||||
const app = express();
|
||||
|
||||
// Add this line to trust the first proxy
|
||||
app.set('trust proxy', 1);
|
||||
// Trust proxy headers (important for rate limiting and secure cookies if behind proxy)
|
||||
app.set('trust proxy', 1); // Adjust the number based on your proxy setup depth
|
||||
|
||||
// Middleware setup
|
||||
app.use(cors());
|
||||
// --- Middleware Setup ---
|
||||
app.use(cors()); // TODO: Configure CORS more strictly for production if needed
|
||||
app.use(cookieParser());
|
||||
app.use(express.json());
|
||||
app.use(securityHeaders);
|
||||
app.use(express.json()); // For parsing application/json
|
||||
app.use(securityHeaders); // Apply security headers
|
||||
|
||||
// Import routes
|
||||
// --- Demo Mode Middleware ---
|
||||
// Apply demo middleware early if demo mode is active
|
||||
// Note: Demo mode is now also checked within adapters/storage factory
|
||||
if (config.isDemoMode) {
|
||||
app.use(demoMiddleware); // This might intercept routes if demoAdapter is fully implemented
|
||||
}
|
||||
|
||||
// --- Route Definitions ---
|
||||
// Import route handlers AFTER middleware setup
|
||||
// Note: uploadRouter is now an object { router }, so destructure it
|
||||
const { router: uploadRouter } = require('./routes/upload');
|
||||
const fileRoutes = require('./routes/files');
|
||||
const authRoutes = require('./routes/auth');
|
||||
|
||||
// Add demo middleware before your routes
|
||||
app.use(demoMiddleware);
|
||||
|
||||
// Use routes with appropriate middleware
|
||||
// Apply Rate Limiting and Auth Middleware to Routes
|
||||
app.use('/api/auth', pinVerifyLimiter, authRoutes);
|
||||
// Apply PIN check and rate limiting to upload/file routes
|
||||
// The requirePin middleware now checks config.pin internally
|
||||
app.use('/api/upload', requirePin(config.pin), initUploadLimiter, uploadRouter);
|
||||
app.use('/api/files', requirePin(config.pin), downloadLimiter, fileRoutes);
|
||||
|
||||
// Root route
|
||||
|
||||
// --- Frontend Routes (Serving HTML) ---
|
||||
|
||||
// Root route ('/')
|
||||
app.get('/', (req, res) => {
|
||||
// Check if the PIN is configured and the cookie exists
|
||||
// Redirect to login if PIN is required and not authenticated
|
||||
if (config.pin && (!req.cookies?.DUMBDROP_PIN || !safeCompare(req.cookies.DUMBDROP_PIN, config.pin))) {
|
||||
return res.redirect('/login.html');
|
||||
logger.debug('[/] PIN required, redirecting to login.html');
|
||||
return res.redirect('/login.html'); // Use relative path
|
||||
}
|
||||
|
||||
let html = fs.readFileSync(path.join(__dirname, '../public', 'index.html'), 'utf8');
|
||||
html = html.replace(/{{SITE_TITLE}}/g, config.siteTitle);
|
||||
html = html.replace('{{AUTO_UPLOAD}}', config.autoUpload.toString());
|
||||
html = html.replace('{{MAX_RETRIES}}', config.clientMaxRetries.toString());
|
||||
// Ensure baseUrl has a trailing slash for correct asset linking
|
||||
const baseUrlWithSlash = config.baseUrl.endsWith('/') ? config.baseUrl : config.baseUrl + '/';
|
||||
html = html.replace(/{{BASE_URL}}/g, baseUrlWithSlash);
|
||||
html = injectDemoBanner(html);
|
||||
res.send(html);
|
||||
});
|
||||
|
||||
// Login route
|
||||
app.get('/login.html', (req, res) => {
|
||||
// Add cache control headers
|
||||
res.set('Cache-Control', 'no-store, no-cache, must-revalidate, private');
|
||||
res.set('Pragma', 'no-cache');
|
||||
res.set('Expires', '0');
|
||||
|
||||
let html = fs.readFileSync(path.join(__dirname, '../public', 'login.html'), 'utf8');
|
||||
html = html.replace(/{{SITE_TITLE}}/g, config.siteTitle);
|
||||
// Ensure baseUrl has a trailing slash
|
||||
const baseUrlWithSlash = config.baseUrl.endsWith('/') ? config.baseUrl : config.baseUrl + '/';
|
||||
html = html.replace(/{{BASE_URL}}/g, baseUrlWithSlash);
|
||||
html = injectDemoBanner(html);
|
||||
res.send(html);
|
||||
});
|
||||
|
||||
// Serve static files with template variable replacement for HTML files
|
||||
app.use((req, res, next) => {
|
||||
if (!req.path.endsWith('.html')) {
|
||||
return next();
|
||||
}
|
||||
|
||||
try {
|
||||
const filePath = path.join(__dirname, '../public', req.path);
|
||||
const filePath = path.join(__dirname, '../public', 'index.html');
|
||||
let html = fs.readFileSync(filePath, 'utf8');
|
||||
|
||||
// Perform template replacements
|
||||
html = html.replace(/{{SITE_TITLE}}/g, config.siteTitle);
|
||||
if (req.path === '/index.html' || req.path === 'index.html') {
|
||||
html = html.replace('{{AUTO_UPLOAD}}', config.autoUpload.toString());
|
||||
html = html.replace('{{MAX_RETRIES}}', config.clientMaxRetries.toString());
|
||||
}
|
||||
html = html.replace('{{AUTO_UPLOAD}}', config.autoUpload.toString());
|
||||
html = html.replace('{{MAX_RETRIES}}', config.clientMaxRetries.toString());
|
||||
// Ensure baseUrl has a trailing slash
|
||||
const baseUrlWithSlash = config.baseUrl.endsWith('/') ? config.baseUrl : config.baseUrl + '/';
|
||||
html = html.replace(/{{BASE_URL}}/g, baseUrlWithSlash);
|
||||
|
||||
// Generate Footer Content
|
||||
let footerHtml = '';
|
||||
if (config.footerLinks && config.footerLinks.length > 0) {
|
||||
footerHtml = config.footerLinks.map(link =>
|
||||
`<a href="${link.url}" target="_blank" rel="noopener noreferrer">${link.text}</a>`
|
||||
).join('<span class="footer-separator"> | </span>');
|
||||
} else {
|
||||
footerHtml = `<span class="footer-static">Built by <a href="https://www.dumbware.io/" target="_blank" rel="noopener noreferrer">Dumbwareio</a></span>`;
|
||||
}
|
||||
html = html.replace('{{FOOTER_CONTENT}}', footerHtml);
|
||||
|
||||
// Inject Demo Banner if needed
|
||||
html = injectDemoBanner(html);
|
||||
|
||||
res.setHeader('Content-Type', 'text/html');
|
||||
res.send(html);
|
||||
} catch (err) {
|
||||
next();
|
||||
logger.error(`Error processing index.html: ${err.message}`);
|
||||
res.status(500).send('Error loading page');
|
||||
}
|
||||
});
|
||||
|
||||
// Serve remaining static files
|
||||
app.use(express.static('public'));
|
||||
// Login route ('/login.html')
|
||||
app.get('/login.html', (req, res) => {
|
||||
// Prevent caching of the login page
|
||||
res.set('Cache-Control', 'no-store, no-cache, must-revalidate, private');
|
||||
res.set('Pragma', 'no-cache');
|
||||
res.set('Expires', '0');
|
||||
|
||||
// Error handling middleware
|
||||
app.use((err, req, res, next) => { // eslint-disable-line no-unused-vars
|
||||
logger.error(`Unhandled error: ${err.message}`);
|
||||
res.status(500).json({
|
||||
message: 'Internal server error',
|
||||
error: process.env.NODE_ENV === 'development' ? err.message : undefined
|
||||
});
|
||||
try {
|
||||
const filePath = path.join(__dirname, '../public', 'login.html');
|
||||
let html = fs.readFileSync(filePath, 'utf8');
|
||||
html = html.replace(/{{SITE_TITLE}}/g, config.siteTitle);
|
||||
const baseUrlWithSlash = config.baseUrl.endsWith('/') ? config.baseUrl : config.baseUrl + '/';
|
||||
html = html.replace(/{{BASE_URL}}/g, baseUrlWithSlash);
|
||||
html = injectDemoBanner(html); // Inject demo banner if needed
|
||||
|
||||
res.setHeader('Content-Type', 'text/html');
|
||||
res.send(html);
|
||||
} catch (err) {
|
||||
logger.error(`Error processing login.html: ${err.message}`);
|
||||
res.status(500).send('Error loading login page');
|
||||
}
|
||||
});
|
||||
|
||||
// --- Add this after config is loaded ---
|
||||
const METADATA_DIR = path.join(config.uploadDir, '.metadata');
|
||||
// --- End addition ---
|
||||
// --- Static File Serving ---
|
||||
// Serve static files (CSS, JS, assets) from the 'public' directory
|
||||
// Use express.static middleware, placed AFTER specific HTML routes
|
||||
app.use(express.static(path.join(__dirname, '../public')));
|
||||
|
||||
|
||||
// --- Error Handling Middleware ---
|
||||
// Catch-all for unhandled errors
|
||||
app.use((err, req, res, next) => { // eslint-disable-line no-unused-vars
|
||||
logger.error(`Unhandled application error: ${err.message}`, err.stack);
|
||||
// Avoid sending stack trace in production
|
||||
const errorResponse = {
|
||||
message: 'Internal Server Error',
|
||||
...(config.nodeEnv === 'development' && { error: err.message, stack: err.stack })
|
||||
};
|
||||
// Ensure response is sent only once
|
||||
if (!res.headersSent) {
|
||||
res.status(err.status || 500).json(errorResponse);
|
||||
}
|
||||
});
|
||||
|
||||
// --- Initialize Function (Simplified) ---
|
||||
/**
|
||||
* Initialize the application
|
||||
* Sets up required directories and validates configuration
|
||||
* Initialize the application.
|
||||
* Placeholder function, as most initialization is now handled
|
||||
* by config loading, adapter loading, and server startup.
|
||||
* Could be used for other async setup tasks if needed later.
|
||||
*/
|
||||
async function initialize() {
|
||||
try {
|
||||
// Validate configuration
|
||||
validateConfig();
|
||||
|
||||
// Ensure upload directory exists and is writable
|
||||
await ensureDirectoryExists(config.uploadDir);
|
||||
// Config validation happens at the top level now.
|
||||
// Storage adapter is loaded at the top level now.
|
||||
// Directory checks are handled within adapters/config.
|
||||
|
||||
// --- Add this section ---
|
||||
// Ensure metadata directory exists
|
||||
try {
|
||||
if (!fs.existsSync(METADATA_DIR)) {
|
||||
await fsPromises.mkdir(METADATA_DIR, { recursive: true });
|
||||
logger.info(`Created metadata directory: ${METADATA_DIR}`);
|
||||
} else {
|
||||
logger.info(`Metadata directory exists: ${METADATA_DIR}`);
|
||||
}
|
||||
// Check writability (optional but good practice)
|
||||
await fsPromises.access(METADATA_DIR, fs.constants.W_OK);
|
||||
logger.success(`Metadata directory is writable: ${METADATA_DIR}`);
|
||||
} catch (err) {
|
||||
logger.error(`Metadata directory error (${METADATA_DIR}): ${err.message}`);
|
||||
// Decide if this is fatal. If resumability is critical, maybe throw.
|
||||
throw new Error(`Failed to access or create metadata directory: ${METADATA_DIR}`);
|
||||
}
|
||||
// --- End added section ---
|
||||
|
||||
// Log configuration
|
||||
logger.info(`Maximum file size set to: ${config.maxFileSize / (1024 * 1024)}MB`);
|
||||
if (config.pin) {
|
||||
logger.info('PIN protection enabled');
|
||||
}
|
||||
logger.info(`Auto upload is ${config.autoUpload ? 'enabled' : 'disabled'}`);
|
||||
if (config.appriseUrl) {
|
||||
logger.info('Apprise notifications enabled');
|
||||
}
|
||||
|
||||
// After initializing demo middleware
|
||||
if (process.env.DEMO_MODE === 'true') {
|
||||
logger.info('[DEMO] Running in demo mode - uploads will not be saved');
|
||||
// Clear any existing files in upload directory
|
||||
try {
|
||||
const files = fs.readdirSync(config.uploadDir);
|
||||
for (const file of files) {
|
||||
fs.unlinkSync(path.join(config.uploadDir, file));
|
||||
}
|
||||
logger.info('[DEMO] Cleared upload directory');
|
||||
} catch (err) {
|
||||
logger.error(`[DEMO] Failed to clear upload directory: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
return app;
|
||||
logger.info('Application initialized.');
|
||||
// Example: Log active storage type
|
||||
logger.info(`Active Storage Adapter: ${storageAdapter.constructor.name || config.storageType}`);
|
||||
|
||||
return app; // Return the configured Express app instance
|
||||
} catch (err) {
|
||||
logger.error(`Initialization failed: ${err.message}`);
|
||||
throw err;
|
||||
logger.error(`Application initialization failed: ${err.message}`);
|
||||
throw err; // Propagate error to stop server start
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { app, initialize, config };
|
||||
module.exports = { app, initialize, config }; // Export app, initialize, and config
|
@@ -1,112 +1,148 @@
|
||||
require('dotenv').config();
|
||||
console.log('Loaded ENV:', {
|
||||
PORT: process.env.PORT,
|
||||
UPLOAD_DIR: process.env.UPLOAD_DIR,
|
||||
LOCAL_UPLOAD_DIR: process.env.LOCAL_UPLOAD_DIR,
|
||||
NODE_ENV: process.env.NODE_ENV
|
||||
});
|
||||
console.log('Loaded ENV:', {
|
||||
PORT: process.env.PORT,
|
||||
UPLOAD_DIR: process.env.UPLOAD_DIR,
|
||||
LOCAL_UPLOAD_DIR: process.env.LOCAL_UPLOAD_DIR,
|
||||
NODE_ENV: process.env.NODE_ENV
|
||||
});
|
||||
const { validatePin } = require('../utils/security');
|
||||
const logger = require('../utils/logger');
|
||||
const logger = require('../utils/logger'); // Use the default logger instance
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { version } = require('../../package.json'); // Get version from package.json
|
||||
|
||||
/**
|
||||
* Environment Variables Reference
|
||||
*
|
||||
* PORT - Port for the server (default: 3000)
|
||||
* NODE_ENV - Node environment (default: 'development')
|
||||
* BASE_URL - Base URL for the app (default: http://localhost:${PORT})
|
||||
* UPLOAD_DIR - Directory for uploads (Docker/production)
|
||||
* LOCAL_UPLOAD_DIR - Directory for uploads (local dev, fallback: './local_uploads')
|
||||
* MAX_FILE_SIZE - Max upload size in MB (default: 1024)
|
||||
* AUTO_UPLOAD - Enable auto-upload (true/false, default: false)
|
||||
* DUMBDROP_PIN - Security PIN for uploads (required for protected endpoints)
|
||||
* DUMBDROP_TITLE - Site title (default: 'DumbDrop')
|
||||
* APPRISE_URL - Apprise notification URL (optional)
|
||||
* APPRISE_MESSAGE - Notification message template (default provided)
|
||||
* APPRISE_SIZE_UNIT - Size unit for notifications (optional)
|
||||
* ALLOWED_EXTENSIONS - Comma-separated list of allowed file extensions (optional)
|
||||
* ALLOWED_IFRAME_ORIGINS - Comma-separated list of allowed iframe origins (optional)
|
||||
*/
|
||||
// --- Environment Variables Reference ---
|
||||
/*
|
||||
STORAGE_TYPE - Storage backend ('local' or 's3', default: 'local')
|
||||
// --- Local Storage ---
|
||||
UPLOAD_DIR - Directory for uploads (Docker/production, if STORAGE_TYPE=local)
|
||||
LOCAL_UPLOAD_DIR - Directory for uploads (local dev, fallback: './local_uploads', if STORAGE_TYPE=local)
|
||||
// --- S3 Storage ---
|
||||
S3_REGION - AWS Region for S3 Bucket (required if STORAGE_TYPE=s3)
|
||||
S3_BUCKET_NAME - Name of the S3 Bucket (required if STORAGE_TYPE=s3)
|
||||
S3_ACCESS_KEY_ID - S3 Access Key ID (required if STORAGE_TYPE=s3)
|
||||
S3_SECRET_ACCESS_KEY - S3 Secret Access Key (required if STORAGE_TYPE=s3)
|
||||
S3_ENDPOINT_URL - Custom S3 endpoint URL (optional, for non-AWS S3)
|
||||
S3_FORCE_PATH_STYLE - Force path-style access (true/false, optional, for non-AWS S3)
|
||||
// --- Common ---
|
||||
PORT - Port for the server (default: 3000)
|
||||
NODE_ENV - Node environment (default: 'development')
|
||||
BASE_URL - Base URL for the app (default: http://localhost:${PORT})
|
||||
MAX_FILE_SIZE - Max upload size in MB (default: 1024)
|
||||
AUTO_UPLOAD - Enable auto-upload (true/false, default: false)
|
||||
DUMBDROP_PIN - Security PIN for uploads (required for protected endpoints)
|
||||
DUMBDROP_TITLE - Site title (default: 'DumbDrop')
|
||||
APPRISE_URL - Apprise notification URL (optional)
|
||||
APPRISE_MESSAGE - Notification message template (default provided)
|
||||
APPRISE_SIZE_UNIT - Size unit for notifications (optional)
|
||||
ALLOWED_EXTENSIONS - Comma-separated list of allowed file extensions (optional)
|
||||
ALLOWED_IFRAME_ORIGINS- Comma-separated list of allowed iframe origins (optional)
|
||||
CLIENT_MAX_RETRIES - Max retries for client chunk uploads (default: 5)
|
||||
DEMO_MODE - Enable demo mode (true/false, default: false)
|
||||
*/
|
||||
|
||||
// Helper for clear configuration logging
|
||||
// --- Helper for clear configuration logging ---
|
||||
const logConfig = (message, level = 'info') => {
|
||||
const prefix = level === 'warning' ? '⚠️ WARNING:' : 'ℹ️ INFO:';
|
||||
console.log(`${prefix} CONFIGURATION: ${message}`);
|
||||
};
|
||||
|
||||
// Default configurations
|
||||
// --- Default configurations ---
|
||||
const DEFAULT_PORT = 3000;
|
||||
const DEFAULT_CHUNK_SIZE = 1024 * 1024 * 100; // 100MB
|
||||
const DEFAULT_SITE_TITLE = 'DumbDrop';
|
||||
const DEFAULT_BASE_URL = 'http://localhost:3000';
|
||||
const DEFAULT_CLIENT_MAX_RETRIES = 5; // Default retry count
|
||||
const DEFAULT_CLIENT_MAX_RETRIES = 5;
|
||||
const DEFAULT_STORAGE_TYPE = 'local';
|
||||
|
||||
const logAndReturn = (key, value, isDefault = false) => {
|
||||
logConfig(`${key}: ${value}${isDefault ? ' (default)' : ''}`);
|
||||
const logAndReturn = (key, value, isDefault = false, sensitive = false) => {
|
||||
const displayValue = sensitive ? '********' : value;
|
||||
logConfig(`${key}: ${displayValue}${isDefault ? ' (default)' : ''}`);
|
||||
return value;
|
||||
};
|
||||
|
||||
/**
|
||||
* Determine the upload directory based on environment variables.
|
||||
* Priority:
|
||||
* 1. UPLOAD_DIR (for Docker/production)
|
||||
* 2. LOCAL_UPLOAD_DIR (for local development)
|
||||
* 3. './local_uploads' (default fallback)
|
||||
* @returns {string} The upload directory path
|
||||
*/
|
||||
function determineUploadDirectory() {
|
||||
let uploadDir;
|
||||
if (process.env.UPLOAD_DIR) {
|
||||
uploadDir = process.env.UPLOAD_DIR;
|
||||
logConfig(`Upload directory set from UPLOAD_DIR: ${uploadDir}`);
|
||||
} else if (process.env.LOCAL_UPLOAD_DIR) {
|
||||
uploadDir = process.env.LOCAL_UPLOAD_DIR;
|
||||
logConfig(`Upload directory using LOCAL_UPLOAD_DIR fallback: ${uploadDir}`, 'warning');
|
||||
} else {
|
||||
uploadDir = './local_uploads';
|
||||
logConfig(`Upload directory using default fallback: ${uploadDir}`, 'warning');
|
||||
}
|
||||
logConfig(`Final upload directory path: ${require('path').resolve(uploadDir)}`);
|
||||
return uploadDir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility to detect if running in local development mode
|
||||
* Returns true if NODE_ENV is not 'production' and UPLOAD_DIR is not set (i.e., not Docker)
|
||||
*/
|
||||
// --- Utility to detect if running in local development mode ---
|
||||
// (This helps decide whether to *create* LOCAL_UPLOAD_DIR, but doesn't affect UPLOAD_DIR usage in Docker)
|
||||
function isLocalDevelopment() {
|
||||
return process.env.NODE_ENV !== 'production' && !process.env.UPLOAD_DIR;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure the upload directory exists (for local development only)
|
||||
* Creates the directory if it does not exist
|
||||
* Determine the local upload directory path.
|
||||
* Only relevant when STORAGE_TYPE is 'local'.
|
||||
* @returns {string|null} The path, or null if storage is not local.
|
||||
*/
|
||||
function ensureLocalUploadDirExists(uploadDir) {
|
||||
if (!isLocalDevelopment()) return;
|
||||
function determineLocalUploadDirectory() {
|
||||
if (process.env.STORAGE_TYPE && process.env.STORAGE_TYPE.toLowerCase() !== 'local') {
|
||||
return null; // Not using local storage
|
||||
}
|
||||
|
||||
let uploadDir;
|
||||
if (process.env.UPLOAD_DIR) {
|
||||
uploadDir = process.env.UPLOAD_DIR;
|
||||
logger.info(`[Local Storage] Upload directory set from UPLOAD_DIR: ${uploadDir}`);
|
||||
} else if (process.env.LOCAL_UPLOAD_DIR) {
|
||||
uploadDir = process.env.LOCAL_UPLOAD_DIR;
|
||||
logger.warn(`[Local Storage] Upload directory using LOCAL_UPLOAD_DIR fallback: ${uploadDir}`);
|
||||
} else {
|
||||
uploadDir = './local_uploads'; // Default local path
|
||||
logger.warn(`[Local Storage] Upload directory using default fallback: ${uploadDir}`);
|
||||
}
|
||||
logger.info(`[Local Storage] Final upload directory path: ${path.resolve(uploadDir)}`);
|
||||
return uploadDir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure the local upload directory exists (if applicable and in local dev).
|
||||
*/
|
||||
function ensureLocalUploadDirExists(dirPath) {
|
||||
if (!dirPath || !isLocalDevelopment()) {
|
||||
return; // Only create if using local storage in a local dev environment
|
||||
}
|
||||
try {
|
||||
if (!fs.existsSync(uploadDir)) {
|
||||
fs.mkdirSync(uploadDir, { recursive: true });
|
||||
logConfig(`Created local upload directory: ${uploadDir}`);
|
||||
if (!fs.existsSync(dirPath)) {
|
||||
fs.mkdirSync(dirPath, { recursive: true });
|
||||
logger.info(`[Local Storage] Created local upload directory: ${dirPath}`);
|
||||
} else {
|
||||
logConfig(`Local upload directory exists: ${uploadDir}`);
|
||||
logger.info(`[Local Storage] Local upload directory exists: ${dirPath}`);
|
||||
}
|
||||
// Basic writability check
|
||||
fs.accessSync(dirPath, fs.constants.W_OK);
|
||||
logger.success(`[Local Storage] Local upload directory is writable: ${dirPath}`);
|
||||
} catch (err) {
|
||||
logConfig(`Failed to create local upload directory: ${uploadDir}. Error: ${err.message}`, 'warning');
|
||||
logger.error(`[Local Storage] Failed to create or access local upload directory: ${dirPath}. Error: ${err.message}`);
|
||||
throw new Error(`Upload directory "${dirPath}" is not accessible or writable.`);
|
||||
}
|
||||
}
|
||||
|
||||
// Determine and ensure upload directory (for local dev)
|
||||
const resolvedUploadDir = determineUploadDirectory();
|
||||
ensureLocalUploadDirExists(resolvedUploadDir);
|
||||
// --- Determine Storage Type ---
|
||||
const storageTypeInput = process.env.STORAGE_TYPE || DEFAULT_STORAGE_TYPE;
|
||||
const storageType = ['local', 's3'].includes(storageTypeInput.toLowerCase())
|
||||
? storageTypeInput.toLowerCase()
|
||||
: DEFAULT_STORAGE_TYPE;
|
||||
|
||||
if (storageTypeInput.toLowerCase() !== storageType) {
|
||||
logger.warn(`Invalid STORAGE_TYPE "${storageTypeInput}", using default: "${storageType}"`);
|
||||
}
|
||||
|
||||
// Determine and potentially ensure local upload directory
|
||||
const resolvedLocalUploadDir = determineLocalUploadDirectory(); // Will be null if STORAGE_TYPE is 's3'
|
||||
if (resolvedLocalUploadDir) {
|
||||
ensureLocalUploadDirExists(resolvedLocalUploadDir);
|
||||
}
|
||||
|
||||
/**
|
||||
* Function to parse the FOOTER_LINKS environment variable
|
||||
* @param {string} linksString - The input string containing links
|
||||
* @returns {Array} - An array of objects containing text and URL
|
||||
*/
|
||||
const parseFooterLinks = (linksString) => {
|
||||
if (!linksString) return [];
|
||||
return linksString.split(',')
|
||||
.map(linkPair => {
|
||||
const parts = linkPair.split('@').map(part => part.trim());
|
||||
if (parts.length === 2 && parts[0] && parts[1] && (parts[1].startsWith('http://') || parts[1].startsWith('https://'))) {
|
||||
return { text: parts[0], url: parts[1] };
|
||||
} else {
|
||||
logger.warn(`Invalid format or URL in FOOTER_LINKS: "${linkPair}". Expected "Text @ http(s)://URL". Skipping.`);
|
||||
return null;
|
||||
}
|
||||
})
|
||||
.filter(link => link !== null);
|
||||
};
|
||||
|
||||
/**
|
||||
* Application configuration
|
||||
@@ -114,168 +150,185 @@ ensureLocalUploadDirExists(resolvedUploadDir);
|
||||
*/
|
||||
const config = {
|
||||
// =====================
|
||||
// Core Settings
|
||||
// =====================
|
||||
// Server settings
|
||||
// =====================
|
||||
/**
|
||||
* Port for the server (default: 3000)
|
||||
* Set via PORT in .env
|
||||
*/
|
||||
port: process.env.PORT || DEFAULT_PORT,
|
||||
/**
|
||||
* Node environment (default: 'development')
|
||||
* Set via NODE_ENV in .env
|
||||
*/
|
||||
port: parseInt(process.env.PORT || DEFAULT_PORT, 10),
|
||||
nodeEnv: process.env.NODE_ENV || 'development',
|
||||
/**
|
||||
* Base URL for the app (default: http://localhost:${PORT})
|
||||
* Set via BASE_URL in .env
|
||||
*/
|
||||
baseUrl: process.env.BASE_URL || DEFAULT_BASE_URL,
|
||||
|
||||
baseUrl: process.env.BASE_URL || `${DEFAULT_BASE_URL.replace(/:3000$/, '')}:${process.env.PORT || DEFAULT_PORT}/`, // Ensure trailing slash
|
||||
isDemoMode: process.env.DEMO_MODE === 'true',
|
||||
|
||||
// =====================
|
||||
// Storage Settings
|
||||
// =====================
|
||||
// Upload settings
|
||||
storageType: logAndReturn('STORAGE_TYPE', storageType, storageType === DEFAULT_STORAGE_TYPE),
|
||||
/**
|
||||
* The primary directory for storing files or metadata.
|
||||
* If STORAGE_TYPE=local, this is where files are stored.
|
||||
* If STORAGE_TYPE=s3, this is where '.metadata' lives.
|
||||
* We default to the determined local path or a standard './uploads' if S3 is used.
|
||||
*/
|
||||
uploadDir: resolvedLocalUploadDir || path.resolve('./uploads'), // S3 needs a place for metadata too
|
||||
|
||||
// --- S3 Specific (only relevant if storageType is 's3') ---
|
||||
s3Region: process.env.S3_REGION || null,
|
||||
s3BucketName: process.env.S3_BUCKET_NAME || null,
|
||||
s3AccessKeyId: process.env.S3_ACCESS_KEY_ID || null,
|
||||
s3SecretAccessKey: process.env.S3_SECRET_ACCESS_KEY || null,
|
||||
s3EndpointUrl: process.env.S3_ENDPOINT_URL || null, // Default to null (AWS default endpoint)
|
||||
s3ForcePathStyle: process.env.S3_FORCE_PATH_STYLE === 'true', // Default to false
|
||||
|
||||
// =====================
|
||||
// Upload Behavior
|
||||
// =====================
|
||||
/**
|
||||
* Directory for uploads
|
||||
* Priority: UPLOAD_DIR (Docker/production) > LOCAL_UPLOAD_DIR (local dev) > './local_uploads' (fallback)
|
||||
*/
|
||||
uploadDir: resolvedUploadDir,
|
||||
/**
|
||||
* Max upload size in bytes (default: 1024MB)
|
||||
* Set via MAX_FILE_SIZE in .env (in MB)
|
||||
*/
|
||||
maxFileSize: (() => {
|
||||
const sizeInMB = parseInt(process.env.MAX_FILE_SIZE || '1024', 10);
|
||||
if (isNaN(sizeInMB) || sizeInMB <= 0) {
|
||||
throw new Error('MAX_FILE_SIZE must be a positive number');
|
||||
logger.error('Invalid MAX_FILE_SIZE, must be a positive number. Using 1024MB.');
|
||||
return 1024 * 1024 * 1024;
|
||||
}
|
||||
return sizeInMB * 1024 * 1024; // Convert MB to bytes
|
||||
})(),
|
||||
/**
|
||||
* Enable auto-upload (true/false, default: false)
|
||||
* Set via AUTO_UPLOAD in .env
|
||||
*/
|
||||
autoUpload: process.env.AUTO_UPLOAD === 'true',
|
||||
|
||||
// =====================
|
||||
// =====================
|
||||
// Security
|
||||
// =====================
|
||||
/**
|
||||
* Security PIN for uploads (required for protected endpoints)
|
||||
* Set via DUMBDROP_PIN in .env
|
||||
*/
|
||||
pin: validatePin(process.env.DUMBDROP_PIN),
|
||||
|
||||
// =====================
|
||||
// =====================
|
||||
// UI settings
|
||||
// =====================
|
||||
/**
|
||||
* Site title (default: 'DumbDrop')
|
||||
* Set via DUMBDROP_TITLE in .env
|
||||
*/
|
||||
siteTitle: process.env.DUMBDROP_TITLE || DEFAULT_SITE_TITLE,
|
||||
|
||||
// =====================
|
||||
// =====================
|
||||
// Notification settings
|
||||
// =====================
|
||||
/**
|
||||
* Apprise notification URL (optional)
|
||||
* Set via APPRISE_URL in .env
|
||||
*/
|
||||
appriseUrl: process.env.APPRISE_URL,
|
||||
/**
|
||||
* Notification message template (default provided)
|
||||
* Set via APPRISE_MESSAGE in .env
|
||||
*/
|
||||
appriseMessage: process.env.APPRISE_MESSAGE || 'New file uploaded - {filename} ({size}), Storage used {storage}',
|
||||
/**
|
||||
* Size unit for notifications (optional)
|
||||
* Set via APPRISE_SIZE_UNIT in .env
|
||||
*/
|
||||
appriseSizeUnit: process.env.APPRISE_SIZE_UNIT,
|
||||
|
||||
// =====================
|
||||
// =====================
|
||||
// File extensions
|
||||
// =====================
|
||||
/**
|
||||
* Allowed file extensions (comma-separated, optional)
|
||||
* Set via ALLOWED_EXTENSIONS in .env
|
||||
*/
|
||||
allowedExtensions: process.env.ALLOWED_EXTENSIONS ?
|
||||
process.env.ALLOWED_EXTENSIONS.split(',').map(ext => ext.trim().toLowerCase()) :
|
||||
allowedExtensions: process.env.ALLOWED_EXTENSIONS ?
|
||||
process.env.ALLOWED_EXTENSIONS.split(',').map(ext => ext.trim().toLowerCase().replace(/^\./, '.')).filter(Boolean) : // Ensure dot prefix
|
||||
null,
|
||||
|
||||
allowedIframeOrigins: process.env.ALLOWED_IFRAME_ORIGINS
|
||||
? process.env.ALLOWED_IFRAME_ORIGINS.split(',').map(origin => origin.trim()).filter(Boolean)
|
||||
: null,
|
||||
|
||||
/**
|
||||
* Max number of retries for client-side chunk uploads (default: 5)
|
||||
* Set via CLIENT_MAX_RETRIES in .env
|
||||
*/
|
||||
clientMaxRetries: (() => {
|
||||
const envValue = process.env.CLIENT_MAX_RETRIES;
|
||||
const defaultValue = DEFAULT_CLIENT_MAX_RETRIES;
|
||||
if (envValue === undefined) {
|
||||
return logAndReturn('CLIENT_MAX_RETRIES', defaultValue, true);
|
||||
}
|
||||
if (envValue === undefined) return logAndReturn('CLIENT_MAX_RETRIES', defaultValue, true);
|
||||
const retries = parseInt(envValue, 10);
|
||||
if (isNaN(retries) || retries < 0) {
|
||||
logConfig(
|
||||
`Invalid CLIENT_MAX_RETRIES value: "${envValue}". Using default: ${defaultValue}`,
|
||||
'warning',
|
||||
);
|
||||
logger.warn(`Invalid CLIENT_MAX_RETRIES value: "${envValue}". Using default: ${defaultValue}`);
|
||||
return logAndReturn('CLIENT_MAX_RETRIES', defaultValue, true);
|
||||
}
|
||||
return logAndReturn('CLIENT_MAX_RETRIES', retries);
|
||||
})(),
|
||||
|
||||
uploadPin: logAndReturn('UPLOAD_PIN', process.env.UPLOAD_PIN || null),
|
||||
// =====================
|
||||
// Security
|
||||
// =====================
|
||||
pin: validatePin(process.env.DUMBDROP_PIN),
|
||||
allowedIframeOrigins: process.env.ALLOWED_IFRAME_ORIGINS ?
|
||||
process.env.ALLOWED_IFRAME_ORIGINS.split(',').map(origin => origin.trim()).filter(Boolean) :
|
||||
null,
|
||||
|
||||
// =====================
|
||||
// UI & Notifications
|
||||
// =====================
|
||||
siteTitle: process.env.DUMBDROP_TITLE || DEFAULT_SITE_TITLE,
|
||||
footerLinks: parseFooterLinks(process.env.FOOTER_LINKS),
|
||||
appriseUrl: process.env.APPRISE_URL || null,
|
||||
appriseMessage: process.env.APPRISE_MESSAGE || 'New file uploaded - {filename} ({size}), Storage used {storage}',
|
||||
appriseSizeUnit: process.env.APPRISE_SIZE_UNIT || 'Auto',
|
||||
};
|
||||
|
||||
console.log(`Upload directory configured as: ${config.uploadDir}`);
|
||||
// --- Log Sensitive & Conditional Config ---
|
||||
logConfig(`NODE_ENV: ${config.nodeEnv}`);
|
||||
logConfig(`PORT: ${config.port}`);
|
||||
logConfig(`BASE_URL: ${config.baseUrl}`);
|
||||
logConfig(`DEMO_MODE: ${config.isDemoMode}`);
|
||||
if (config.storageType === 'local') {
|
||||
logConfig(`Upload Directory (Local): ${config.uploadDir}`);
|
||||
} else {
|
||||
logConfig(`Metadata Directory (S3 Mode): ${config.uploadDir}`); // Clarify role in S3 mode
|
||||
logAndReturn('S3_REGION', config.s3Region);
|
||||
logAndReturn('S3_BUCKET_NAME', config.s3BucketName);
|
||||
logAndReturn('S3_ACCESS_KEY_ID', config.s3AccessKeyId, false, true); // Sensitive
|
||||
logAndReturn('S3_SECRET_ACCESS_KEY', config.s3SecretAccessKey, false, true); // Sensitive
|
||||
if (config.s3EndpointUrl) logAndReturn('S3_ENDPOINT_URL', config.s3EndpointUrl);
|
||||
logAndReturn('S3_FORCE_PATH_STYLE', config.s3ForcePathStyle);
|
||||
}
|
||||
logConfig(`Max File Size: ${config.maxFileSize / (1024 * 1024)}MB`);
|
||||
logConfig(`Auto Upload: ${config.autoUpload}`);
|
||||
if (config.allowedExtensions) logConfig(`Allowed Extensions: ${config.allowedExtensions.join(', ')}`);
|
||||
if (config.pin) logAndReturn('DUMBDROP_PIN', config.pin, false, true); // Sensitive
|
||||
if (config.allowedIframeOrigins) logConfig(`Allowed Iframe Origins: ${config.allowedIframeOrigins.join(', ')}`);
|
||||
if (config.appriseUrl) logAndReturn('APPRISE_URL', config.appriseUrl);
|
||||
|
||||
// Validate required settings
|
||||
|
||||
// --- Configuration Validation ---
|
||||
function validateConfig() {
|
||||
const errors = [];
|
||||
|
||||
|
||||
if (!config.port || config.port <= 0 || config.port > 65535) {
|
||||
errors.push('PORT must be a valid number between 1 and 65535');
|
||||
}
|
||||
|
||||
if (config.maxFileSize <= 0) {
|
||||
errors.push('MAX_FILE_SIZE must be greater than 0');
|
||||
}
|
||||
|
||||
// Validate BASE_URL format
|
||||
// Validate BASE_URL format and trailing slash
|
||||
try {
|
||||
let url = new URL(config.baseUrl);
|
||||
// Ensure BASE_URL ends with a slash
|
||||
if (!config.baseUrl.endsWith('/')) {
|
||||
logger.warn('BASE_URL did not end with a trailing slash. Automatically appending "/".');
|
||||
config.baseUrl = config.baseUrl + '/';
|
||||
errors.push('BASE_URL must end with a trailing slash ("/"). Current value: ' + config.baseUrl);
|
||||
// Attempt to fix it for runtime, but still report error
|
||||
// config.baseUrl = config.baseUrl + '/';
|
||||
}
|
||||
} catch (err) {
|
||||
errors.push('BASE_URL must be a valid URL');
|
||||
errors.push(`BASE_URL must be a valid URL. Error: ${err.message}`);
|
||||
}
|
||||
|
||||
if (config.nodeEnv === 'production') {
|
||||
if (!config.appriseUrl) {
|
||||
logger.info('Notifications disabled - No Configuration');
|
||||
|
||||
// Validate S3 configuration if STORAGE_TYPE is 's3'
|
||||
if (config.storageType === 's3') {
|
||||
if (!config.s3Region) errors.push('S3_REGION is required when STORAGE_TYPE is "s3"');
|
||||
if (!config.s3BucketName) errors.push('S3_BUCKET_NAME is required when STORAGE_TYPE is "s3"');
|
||||
if (!config.s3AccessKeyId) errors.push('S3_ACCESS_KEY_ID is required when STORAGE_TYPE is "s3"');
|
||||
if (!config.s3SecretAccessKey) errors.push('S3_SECRET_ACCESS_KEY is required when STORAGE_TYPE is "s3"');
|
||||
|
||||
if (config.s3ForcePathStyle && !config.s3EndpointUrl) {
|
||||
logger.warn('S3_FORCE_PATH_STYLE is true, but S3_ENDPOINT_URL is not set. This typically requires a custom endpoint.');
|
||||
}
|
||||
}
|
||||
|
||||
if (errors.length > 0) {
|
||||
throw new Error('Configuration validation failed:\n' + errors.join('\n'));
|
||||
|
||||
// Validate local storage dir only if type is local
|
||||
if (config.storageType === 'local') {
|
||||
if (!config.uploadDir) {
|
||||
errors.push('Upload directory could not be determined for local storage.');
|
||||
} else {
|
||||
// Check existence and writability again (ensureLocalUploadDirExists might have failed)
|
||||
try {
|
||||
fs.accessSync(config.uploadDir, fs.constants.W_OK);
|
||||
} catch (err) {
|
||||
errors.push(`Local upload directory "${config.uploadDir}" is not writable or does not exist.`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check metadata dir existence/writability regardless of storage type, as S3 uses it too
|
||||
try {
|
||||
const metadataParentDir = path.dirname(path.join(config.uploadDir, '.metadata'));
|
||||
if (!fs.existsSync(metadataParentDir)) {
|
||||
fs.mkdirSync(metadataParentDir, { recursive: true });
|
||||
logger.info(`Created base directory for metadata: ${metadataParentDir}`);
|
||||
}
|
||||
fs.accessSync(metadataParentDir, fs.constants.W_OK);
|
||||
} catch (err) {
|
||||
errors.push(`Cannot access or create directory for metadata storage at "${config.uploadDir}". Error: ${err.message}`);
|
||||
}
|
||||
|
||||
|
||||
if (config.nodeEnv === 'production') {
|
||||
if (!config.appriseUrl) {
|
||||
logger.info('Apprise notifications disabled (APPRISE_URL not set).');
|
||||
}
|
||||
}
|
||||
|
||||
if (errors.length > 0) {
|
||||
logger.error('--- CONFIGURATION ERRORS ---');
|
||||
errors.forEach(err => logger.error(`- ${err}`));
|
||||
logger.error('-----------------------------');
|
||||
throw new Error('Configuration validation failed. Please check environment variables.');
|
||||
}
|
||||
|
||||
logger.success('Configuration validated successfully.');
|
||||
}
|
||||
|
||||
// Freeze configuration to prevent modifications
|
||||
// Freeze configuration to prevent modifications after initial load
|
||||
Object.freeze(config);
|
||||
|
||||
module.exports = {
|
||||
config,
|
||||
validateConfig
|
||||
};
|
||||
};
|
@@ -1,133 +1,211 @@
|
||||
/**
|
||||
* File management and listing route handlers.
|
||||
* Provides endpoints for listing, downloading, and managing uploaded files.
|
||||
* Handles file metadata, stats, and directory operations.
|
||||
* File management route handlers.
|
||||
* Provides endpoints for listing and deleting files using the configured storage adapter.
|
||||
* Handles file downloads by either providing a presigned URL (S3) or streaming (local).
|
||||
*/
|
||||
|
||||
const express = require('express');
|
||||
const router = express.Router();
|
||||
const path = require('path');
|
||||
const fs = require('fs').promises;
|
||||
const { config } = require('../config');
|
||||
const path = require('path'); // Needed for sanitization
|
||||
const fs = require('fs'); // Needed ONLY for local file streaming
|
||||
const { storageAdapter } = require('../storage'); // Import the selected adapter
|
||||
const logger = require('../utils/logger');
|
||||
const { formatFileSize } = require('../utils/fileUtils');
|
||||
const { isDemoMode } = require('../utils/demoMode'); // Keep demo check if needed
|
||||
|
||||
/**
|
||||
* Get file information
|
||||
*/
|
||||
router.get('/:filename/info', async (req, res) => {
|
||||
const filePath = path.join(config.uploadDir, req.params.filename);
|
||||
|
||||
try {
|
||||
const stats = await fs.stat(filePath);
|
||||
const fileInfo = {
|
||||
filename: req.params.filename,
|
||||
size: stats.size,
|
||||
formattedSize: formatFileSize(stats.size),
|
||||
uploadDate: stats.mtime,
|
||||
mimetype: path.extname(req.params.filename).slice(1)
|
||||
};
|
||||
|
||||
res.json(fileInfo);
|
||||
} catch (err) {
|
||||
logger.error(`Failed to get file info: ${err.message}`);
|
||||
res.status(404).json({ error: 'File not found' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Download file
|
||||
*/
|
||||
router.get('/:filename/download', async (req, res) => {
|
||||
const filePath = path.join(config.uploadDir, req.params.filename);
|
||||
|
||||
try {
|
||||
await fs.access(filePath);
|
||||
|
||||
// Set headers for download
|
||||
res.setHeader('Content-Disposition', `attachment; filename="${req.params.filename}"`);
|
||||
res.setHeader('Content-Type', 'application/octet-stream');
|
||||
|
||||
// Stream the file
|
||||
const fileStream = require('fs').createReadStream(filePath);
|
||||
fileStream.pipe(res);
|
||||
|
||||
// Handle errors during streaming
|
||||
fileStream.on('error', (err) => {
|
||||
logger.error(`File streaming error: ${err.message}`);
|
||||
if (!res.headersSent) {
|
||||
res.status(500).json({ error: 'Failed to download file' });
|
||||
}
|
||||
});
|
||||
|
||||
logger.info(`File download started: ${req.params.filename}`);
|
||||
} catch (err) {
|
||||
logger.error(`File download failed: ${err.message}`);
|
||||
res.status(404).json({ error: 'File not found' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* List all files
|
||||
* List all files from the storage backend.
|
||||
*/
|
||||
router.get('/', async (req, res) => {
|
||||
// Demo mode handling (simplified list)
|
||||
if (isDemoMode()) {
|
||||
logger.info('[DEMO /files] Listing demo files');
|
||||
// Return a mock list or call demoAdapter.listFiles() if implemented
|
||||
return res.json({
|
||||
files: [{ filename: 'demo_file.txt', size: 1234, formattedSize: '1.21KB', uploadDate: new Date().toISOString() }],
|
||||
totalFiles: 1,
|
||||
totalSize: 1234,
|
||||
message: 'Demo Mode: Showing mock file list'
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const files = await fs.readdir(config.uploadDir);
|
||||
|
||||
// Get stats for all files first
|
||||
const fileStatsPromises = files.map(async filename => {
|
||||
try {
|
||||
const stats = await fs.stat(path.join(config.uploadDir, filename));
|
||||
return { filename, stats, valid: stats.isFile() };
|
||||
} catch (err) {
|
||||
logger.error(`Failed to get stats for file ${filename}: ${err.message}`);
|
||||
return { filename, valid: false };
|
||||
}
|
||||
});
|
||||
const files = await storageAdapter.listFiles();
|
||||
const totalSize = files.reduce((acc, file) => acc + (file.size || 0), 0);
|
||||
|
||||
const fileStats = await Promise.all(fileStatsPromises);
|
||||
|
||||
// Filter and map valid files
|
||||
const fileList = fileStats
|
||||
.filter(file => file.valid)
|
||||
.map(({ filename, stats }) => ({
|
||||
filename,
|
||||
size: stats.size,
|
||||
formattedSize: formatFileSize(stats.size),
|
||||
uploadDate: stats.mtime
|
||||
}));
|
||||
|
||||
// Sort files by upload date (newest first)
|
||||
fileList.sort((a, b) => b.uploadDate - a.uploadDate);
|
||||
|
||||
res.json({
|
||||
files: fileList,
|
||||
totalFiles: fileList.length,
|
||||
totalSize: fileList.reduce((acc, file) => acc + file.size, 0)
|
||||
res.json({
|
||||
files: files,
|
||||
totalFiles: files.length,
|
||||
totalSize: totalSize
|
||||
// Note: formattedTotalSize could be calculated here if needed
|
||||
});
|
||||
} catch (err) {
|
||||
logger.error(`Failed to list files: ${err.message}`);
|
||||
res.status(500).json({ error: 'Failed to list files' });
|
||||
logger.error(`[Route /files GET] Failed to list files: ${err.message}`, err.stack);
|
||||
// Map common errors
|
||||
let statusCode = 500;
|
||||
let clientMessage = 'Failed to list files.';
|
||||
if (err.name === 'NoSuchBucket' || err.name === 'AccessDenied') { // S3 Specific
|
||||
clientMessage = 'Storage configuration error.';
|
||||
} else if (err.code === 'ENOENT') { // Local Specific
|
||||
clientMessage = 'Storage directory not found.';
|
||||
} else if (err.code === 'EACCES' || err.code === 'EPERM') { // Local Specific
|
||||
clientMessage = 'Storage permission error.';
|
||||
}
|
||||
res.status(statusCode).json({ error: clientMessage, details: err.message });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Delete file
|
||||
* Get a download URL or stream a file.
|
||||
* For S3, returns a presigned URL.
|
||||
* For Local, streams the file content.
|
||||
*/
|
||||
router.delete('/:filename', async (req, res) => {
|
||||
const filePath = path.join(config.uploadDir, req.params.filename);
|
||||
|
||||
router.get('/:filename/download', async (req, res) => {
|
||||
const rawFilename = req.params.filename;
|
||||
|
||||
// Basic sanitization: Prevent directory traversal.
|
||||
// Adapters should also validate/sanitize keys/paths.
|
||||
const filename = path.basename(rawFilename);
|
||||
if (filename !== rawFilename || filename.includes('..')) {
|
||||
logger.error(`[Route /download] Invalid filename detected: ${rawFilename}`);
|
||||
return res.status(400).json({ error: 'Invalid filename' });
|
||||
}
|
||||
|
||||
// Demo mode handling
|
||||
if (isDemoMode()) {
|
||||
logger.info(`[DEMO /download] Download request for ${filename}`);
|
||||
return res.json({
|
||||
message: 'Demo Mode: This would initiate download in production.',
|
||||
filename: filename
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
await fs.access(filePath);
|
||||
await fs.unlink(filePath);
|
||||
logger.info(`File deleted: ${req.params.filename}`);
|
||||
res.json({ message: 'File deleted successfully' });
|
||||
const result = await storageAdapter.getDownloadUrlOrStream(filename);
|
||||
|
||||
if (result.type === 'url') {
|
||||
// S3 Adapter returned a presigned URL
|
||||
logger.info(`[Route /download] Providing presigned URL for: ${filename}`);
|
||||
// Option 1: Redirect (Simple, but might hide URL from client)
|
||||
// res.redirect(result.value);
|
||||
|
||||
// Option 2: Return URL in JSON (Gives client more control)
|
||||
res.json({ downloadUrl: result.value });
|
||||
|
||||
} else if (result.type === 'path') {
|
||||
// Local Adapter returned a file path
|
||||
const filePath = result.value;
|
||||
logger.info(`[Route /download] Streaming local file: ${filePath}`);
|
||||
|
||||
// Check if file still exists before streaming
|
||||
try {
|
||||
await fs.promises.access(filePath, fs.constants.R_OK);
|
||||
} catch (accessErr) {
|
||||
if (accessErr.code === 'ENOENT') {
|
||||
logger.warn(`[Route /download] Local file not found just before streaming: ${filePath}`);
|
||||
return res.status(404).json({ error: 'File not found' });
|
||||
}
|
||||
logger.error(`[Route /download] Cannot access local file for streaming ${filePath}: ${accessErr.message}`);
|
||||
return res.status(500).json({ error: 'Failed to access file for download' });
|
||||
}
|
||||
|
||||
// Set headers for download
|
||||
res.setHeader('Content-Disposition', `attachment; filename="${filename}"`); // Use the sanitized basename
|
||||
res.setHeader('Content-Type', 'application/octet-stream'); // Generic type
|
||||
|
||||
// Stream the file
|
||||
const fileStream = fs.createReadStream(filePath);
|
||||
|
||||
fileStream.on('error', (streamErr) => {
|
||||
logger.error(`[Route /download] File streaming error for ${filePath}: ${streamErr.message}`);
|
||||
if (!res.headersSent) {
|
||||
// Try to send an error response if headers haven't been sent yet
|
||||
res.status(500).json({ error: 'Failed to stream file' });
|
||||
} else {
|
||||
// If headers already sent, we can only terminate the connection
|
||||
res.end();
|
||||
}
|
||||
});
|
||||
|
||||
fileStream.pipe(res);
|
||||
|
||||
} else {
|
||||
// Unknown result type from adapter
|
||||
logger.error(`[Route /download] Unknown result type from storage adapter: ${result.type}`);
|
||||
res.status(500).json({ error: 'Internal server error during download preparation' });
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
logger.error(`File deletion failed: ${err.message}`);
|
||||
res.status(err.code === 'ENOENT' ? 404 : 500).json({
|
||||
error: err.code === 'ENOENT' ? 'File not found' : 'Failed to delete file'
|
||||
});
|
||||
logger.error(`[Route /download] Failed to get download for ${filename}: ${err.message}`, err.stack);
|
||||
let statusCode = 500;
|
||||
let clientMessage = 'Failed to initiate download.';
|
||||
|
||||
// Use specific errors thrown by adapters if available
|
||||
if (err.message === 'File not found' || err.message === 'File not found in S3' || err.name === 'NoSuchKey' || err.code === 'ENOENT') {
|
||||
statusCode = 404;
|
||||
clientMessage = 'File not found.';
|
||||
} else if (err.message === 'Permission denied' || err.code === 'EACCES' || err.name === 'AccessDenied') {
|
||||
statusCode = 500; // Treat permission issues as internal server errors generally
|
||||
clientMessage = 'Storage permission error during download.';
|
||||
} else if (err.message === 'Invalid filename') {
|
||||
statusCode = 400;
|
||||
clientMessage = 'Invalid filename specified.';
|
||||
}
|
||||
|
||||
// Avoid sending error if headers might have been partially sent by streaming
|
||||
if (!res.headersSent) {
|
||||
res.status(statusCode).json({ error: clientMessage, details: err.message });
|
||||
} else {
|
||||
logger.warn(`[Route /download] Error occurred after headers sent for ${filename}. Cannot send JSON error.`);
|
||||
res.end(); // Terminate response if possible
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
module.exports = router;
|
||||
|
||||
/**
|
||||
* Delete a file from the storage backend.
|
||||
*/
|
||||
router.delete('/:filename', async (req, res) => {
|
||||
const rawFilename = req.params.filename;
|
||||
|
||||
// Basic sanitization
|
||||
const filename = path.basename(rawFilename);
|
||||
if (filename !== rawFilename || filename.includes('..')) {
|
||||
logger.error(`[Route /delete] Invalid filename detected: ${rawFilename}`);
|
||||
return res.status(400).json({ error: 'Invalid filename' });
|
||||
}
|
||||
|
||||
// Demo mode handling
|
||||
if (isDemoMode()) {
|
||||
logger.info(`[DEMO /delete] Delete request for ${filename}`);
|
||||
// Call demoAdapter.deleteFile(filename) if implemented?
|
||||
return res.json({ message: 'File deleted (Demo)', filename: filename });
|
||||
}
|
||||
|
||||
logger.info(`[Route /delete] Received delete request for: ${filename}`);
|
||||
|
||||
try {
|
||||
await storageAdapter.deleteFile(filename);
|
||||
res.json({ message: 'File deleted successfully' });
|
||||
} catch (err) {
|
||||
logger.error(`[Route /delete] Failed to delete file ${filename}: ${err.message}`, err.stack);
|
||||
let statusCode = 500;
|
||||
let clientMessage = 'Failed to delete file.';
|
||||
|
||||
// Use specific errors thrown by adapters if available
|
||||
if (err.message === 'File not found' || err.message === 'File not found in S3' || err.name === 'NoSuchKey' || err.code === 'ENOENT') {
|
||||
statusCode = 404;
|
||||
clientMessage = 'File not found.';
|
||||
} else if (err.message === 'Permission denied' || err.code === 'EACCES' || err.name === 'AccessDenied') {
|
||||
statusCode = 500;
|
||||
clientMessage = 'Storage permission error during delete.';
|
||||
} else if (err.message === 'Invalid filename') {
|
||||
statusCode = 400;
|
||||
clientMessage = 'Invalid filename specified.';
|
||||
}
|
||||
|
||||
res.status(statusCode).json({ error: clientMessage, details: err.message });
|
||||
}
|
||||
});
|
||||
|
||||
module.exports = router;
|
@@ -1,456 +1,200 @@
|
||||
/**
|
||||
* File upload route handlers and batch upload management.
|
||||
* Handles file uploads, chunked transfers, and folder creation.
|
||||
* Manages upload sessions using persistent metadata for resumability.
|
||||
* File upload route handlers.
|
||||
* Delegates storage operations to the configured storage adapter.
|
||||
* Handles multipart uploads via adapter logic.
|
||||
*/
|
||||
|
||||
const express = require('express');
|
||||
const router = express.Router();
|
||||
const crypto = require('crypto');
|
||||
const path = require('path');
|
||||
const fs = require('fs').promises; // Use promise-based fs
|
||||
const fsSync = require('fs'); // For sync checks like existsSync
|
||||
const path = require('path'); // Still needed for extension checks
|
||||
const { config } = require('../config');
|
||||
const logger = require('../utils/logger');
|
||||
const { getUniqueFilePath, getUniqueFolderPath, sanitizeFilename, sanitizePathPreserveDirs, isValidBatchId } = require('../utils/fileUtils');
|
||||
const { sendNotification } = require('../services/notifications');
|
||||
const { isDemoMode } = require('../utils/demoMode');
|
||||
|
||||
// --- Persistence Setup ---
|
||||
const METADATA_DIR = path.join(config.uploadDir, '.metadata');
|
||||
|
||||
// --- In-Memory Maps (Still useful for session-level data) ---
|
||||
// Store folder name mappings for batch uploads (avoids FS lookups during session)
|
||||
const folderMappings = new Map();
|
||||
// Store batch activity timestamps (for cleaning up stale batches/folder mappings)
|
||||
const batchActivity = new Map();
|
||||
|
||||
const BATCH_TIMEOUT = 30 * 60 * 1000; // 30 minutes for batch/folderMapping cleanup
|
||||
|
||||
// --- Helper Functions for Metadata ---
|
||||
|
||||
async function readUploadMetadata(uploadId) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.warn(`Attempted to read metadata with invalid uploadId: ${uploadId}`);
|
||||
return null;
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
try {
|
||||
const data = await fs.readFile(metaFilePath, 'utf8');
|
||||
return JSON.parse(data);
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOENT') {
|
||||
return null; // Metadata file doesn't exist - normal case for new/finished uploads
|
||||
}
|
||||
logger.error(`Error reading metadata for ${uploadId}: ${err.message}`);
|
||||
throw err; // Rethrow other errors
|
||||
}
|
||||
}
|
||||
|
||||
async function writeUploadMetadata(uploadId, metadata) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.error(`Attempted to write metadata with invalid uploadId: ${uploadId}`);
|
||||
return; // Prevent writing
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
metadata.lastActivity = Date.now(); // Update timestamp on every write
|
||||
try {
|
||||
// Write atomically if possible (write to temp then rename) for more safety
|
||||
const tempMetaPath = `${metaFilePath}.${crypto.randomBytes(4).toString('hex')}.tmp`;
|
||||
await fs.writeFile(tempMetaPath, JSON.stringify(metadata, null, 2));
|
||||
await fs.rename(tempMetaPath, metaFilePath);
|
||||
} catch (err) {
|
||||
logger.error(`Error writing metadata for ${uploadId}: ${err.message}`);
|
||||
// Attempt to clean up temp file if rename failed
|
||||
try { await fs.unlink(tempMetaPath); } catch (unlinkErr) {/* ignore */}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteUploadMetadata(uploadId) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.warn(`Attempted to delete metadata with invalid uploadId: ${uploadId}`);
|
||||
return;
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
try {
|
||||
await fs.unlink(metaFilePath);
|
||||
logger.debug(`Deleted metadata file for upload: ${uploadId}.meta`);
|
||||
} catch (err) {
|
||||
if (err.code !== 'ENOENT') { // Ignore if already deleted
|
||||
logger.error(`Error deleting metadata file ${uploadId}.meta: ${err.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- Batch Cleanup (Focuses on batchActivity map, not primary upload state) ---
|
||||
let batchCleanupInterval;
|
||||
function startBatchCleanup() {
|
||||
if (batchCleanupInterval) clearInterval(batchCleanupInterval);
|
||||
batchCleanupInterval = setInterval(() => {
|
||||
const now = Date.now();
|
||||
logger.info(`Running batch cleanup, checking ${batchActivity.size} active batch sessions`);
|
||||
let cleanedCount = 0;
|
||||
for (const [batchId, lastActivity] of batchActivity.entries()) {
|
||||
if (now - lastActivity >= BATCH_TIMEOUT) {
|
||||
logger.info(`Cleaning up inactive batch session: ${batchId}`);
|
||||
batchActivity.delete(batchId);
|
||||
// Clean up associated folder mappings for this batch
|
||||
for (const key of folderMappings.keys()) {
|
||||
if (key.endsWith(`-${batchId}`)) {
|
||||
folderMappings.delete(key);
|
||||
}
|
||||
}
|
||||
cleanedCount++;
|
||||
}
|
||||
}
|
||||
if (cleanedCount > 0) logger.info(`Cleaned up ${cleanedCount} inactive batch sessions.`);
|
||||
}, 5 * 60 * 1000); // Check every 5 minutes
|
||||
batchCleanupInterval.unref(); // Allow process to exit if this is the only timer
|
||||
return batchCleanupInterval;
|
||||
}
|
||||
function stopBatchCleanup() {
|
||||
if (batchCleanupInterval) {
|
||||
clearInterval(batchCleanupInterval);
|
||||
batchCleanupInterval = null;
|
||||
}
|
||||
}
|
||||
if (!process.env.DISABLE_BATCH_CLEANUP) {
|
||||
startBatchCleanup();
|
||||
}
|
||||
const { storageAdapter } = require('../storage'); // Import the adapter factory's result
|
||||
const { isDemoMode } = require('../utils/demoMode'); // Keep demo check for specific route behavior if needed
|
||||
|
||||
// --- Routes ---
|
||||
|
||||
// Initialize upload
|
||||
router.post('/init', async (req, res) => {
|
||||
// DEMO MODE CHECK - Bypass persistence if in demo mode
|
||||
// Note: Demo mode might bypass storage adapter logic via middleware or adapter factory itself.
|
||||
// If specific demo responses are needed here, keep the check.
|
||||
if (isDemoMode()) {
|
||||
const { filename, fileSize } = req.body;
|
||||
const uploadId = 'demo-' + crypto.randomBytes(16).toString('hex');
|
||||
logger.info(`[DEMO] Initialized upload for ${filename} (${fileSize} bytes) with ID ${uploadId}`);
|
||||
// Simulate zero-byte completion for demo
|
||||
if (Number(fileSize) === 0) {
|
||||
logger.success(`[DEMO] Completed zero-byte file upload: ${filename}`);
|
||||
sendNotification(filename, 0, config); // Still send notification if configured
|
||||
}
|
||||
return res.json({ uploadId });
|
||||
// Simplified Demo Response (assuming demoAdapter handles non-persistence)
|
||||
const { filename = 'demo_file', fileSize = 0 } = req.body;
|
||||
const demoUploadId = 'demo-' + Math.random().toString(36).substr(2, 9);
|
||||
logger.info(`[DEMO] Init request for ${filename}, size ${fileSize}. Returning ID ${demoUploadId}`);
|
||||
if (Number(fileSize) === 0) {
|
||||
logger.success(`[DEMO] Simulated completion of zero-byte file: ${filename}`);
|
||||
// Potentially call demoAdapter.completeUpload or similar mock logic if needed
|
||||
}
|
||||
return res.json({ uploadId: demoUploadId });
|
||||
}
|
||||
|
||||
const { filename, fileSize } = req.body;
|
||||
const clientBatchId = req.headers['x-batch-id'];
|
||||
const clientBatchId = req.headers['x-batch-id']; // Adapter might use this
|
||||
|
||||
// --- Basic validations ---
|
||||
if (!filename) return res.status(400).json({ error: 'Missing filename' });
|
||||
if (fileSize === undefined || fileSize === null) return res.status(400).json({ error: 'Missing fileSize' });
|
||||
const size = Number(fileSize);
|
||||
if (isNaN(size) || size < 0) return res.status(400).json({ error: 'Invalid file size' });
|
||||
const maxSizeInBytes = config.maxFileSize;
|
||||
if (size > maxSizeInBytes) return res.status(413).json({ error: 'File too large', limit: maxSizeInBytes });
|
||||
|
||||
const batchId = clientBatchId || `${Date.now()}-${crypto.randomBytes(4).toString('hex').substring(0, 9)}`;
|
||||
if (clientBatchId && !isValidBatchId(batchId)) return res.status(400).json({ error: 'Invalid batch ID format' });
|
||||
batchActivity.set(batchId, Date.now()); // Track batch session activity
|
||||
// --- Max File Size Check ---
|
||||
if (size > config.maxFileSize) {
|
||||
logger.warn(`Upload rejected: File size ${size} exceeds limit ${config.maxFileSize}`);
|
||||
return res.status(413).json({ error: 'File too large', limit: config.maxFileSize });
|
||||
}
|
||||
|
||||
// --- Extension Check ---
|
||||
// Perform extension check before handing off to adapter
|
||||
if (config.allowedExtensions && config.allowedExtensions.length > 0) {
|
||||
const fileExt = path.extname(filename).toLowerCase();
|
||||
// Check if the extracted extension (including '.') is in the allowed list
|
||||
if (!fileExt || !config.allowedExtensions.includes(fileExt)) {
|
||||
logger.warn(`Upload rejected: File type not allowed: ${filename} (Extension: ${fileExt || 'none'})`);
|
||||
return res.status(400).json({ error: 'File type not allowed', receivedExtension: fileExt || 'none' });
|
||||
}
|
||||
logger.debug(`File extension ${fileExt} allowed for ${filename}`);
|
||||
}
|
||||
|
||||
try {
|
||||
// --- Path handling and Sanitization ---
|
||||
const sanitizedFilename = sanitizePathPreserveDirs(filename);
|
||||
const safeFilename = path.normalize(sanitizedFilename)
|
||||
.replace(/^(\.\.(\/|\\|$))+/, '')
|
||||
.replace(/\\/g, '/')
|
||||
.replace(/^\/+/, '');
|
||||
logger.info(`Upload init request for: ${safeFilename}`);
|
||||
// Delegate initialization to the storage adapter
|
||||
const result = await storageAdapter.initUpload(filename, size, clientBatchId);
|
||||
|
||||
// --- Extension Check ---
|
||||
if (config.allowedExtensions) {
|
||||
const fileExt = path.extname(safeFilename).toLowerCase();
|
||||
if (fileExt && !config.allowedExtensions.includes(fileExt)) {
|
||||
logger.warn(`File type not allowed: ${safeFilename} (Extension: ${fileExt})`);
|
||||
return res.status(400).json({ error: 'File type not allowed', receivedExtension: fileExt });
|
||||
}
|
||||
}
|
||||
|
||||
// --- Determine Paths & Handle Folders ---
|
||||
const uploadId = crypto.randomBytes(16).toString('hex');
|
||||
let finalFilePath = path.join(config.uploadDir, safeFilename);
|
||||
const pathParts = safeFilename.split('/').filter(Boolean);
|
||||
|
||||
if (pathParts.length > 1) {
|
||||
const originalFolderName = pathParts[0];
|
||||
let newFolderName = folderMappings.get(`${originalFolderName}-${batchId}`);
|
||||
const baseFolderPath = path.join(config.uploadDir, newFolderName || originalFolderName);
|
||||
|
||||
if (!newFolderName) {
|
||||
await fs.mkdir(path.dirname(baseFolderPath), { recursive: true });
|
||||
try {
|
||||
await fs.mkdir(baseFolderPath, { recursive: false });
|
||||
newFolderName = originalFolderName;
|
||||
} catch (err) {
|
||||
if (err.code === 'EEXIST') {
|
||||
const uniqueFolderPath = await getUniqueFolderPath(baseFolderPath);
|
||||
newFolderName = path.basename(uniqueFolderPath);
|
||||
logger.info(`Folder "${originalFolderName}" exists or conflict, using unique "${newFolderName}" for batch ${batchId}`);
|
||||
await fs.mkdir(path.join(config.uploadDir, newFolderName), { recursive: true });
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
folderMappings.set(`${originalFolderName}-${batchId}`, newFolderName);
|
||||
}
|
||||
pathParts[0] = newFolderName;
|
||||
finalFilePath = path.join(config.uploadDir, ...pathParts);
|
||||
await fs.mkdir(path.dirname(finalFilePath), { recursive: true });
|
||||
} else {
|
||||
await fs.mkdir(config.uploadDir, { recursive: true }); // Ensure base upload dir exists
|
||||
}
|
||||
|
||||
// --- Check Final Path Collision & Get Unique Name if Needed ---
|
||||
let checkPath = finalFilePath;
|
||||
let counter = 1;
|
||||
while (fsSync.existsSync(checkPath)) {
|
||||
logger.warn(`Final destination file already exists: ${checkPath}. Generating unique name.`);
|
||||
const dir = path.dirname(finalFilePath);
|
||||
const ext = path.extname(finalFilePath);
|
||||
const baseName = path.basename(finalFilePath, ext);
|
||||
checkPath = path.join(dir, `${baseName} (${counter})${ext}`);
|
||||
counter++;
|
||||
}
|
||||
if (checkPath !== finalFilePath) {
|
||||
logger.info(`Using unique final path: ${checkPath}`);
|
||||
finalFilePath = checkPath;
|
||||
// If path changed, ensure directory exists (might be needed if baseName contained '/')
|
||||
await fs.mkdir(path.dirname(finalFilePath), { recursive: true });
|
||||
}
|
||||
|
||||
const partialFilePath = finalFilePath + '.partial';
|
||||
|
||||
// --- Create and Persist Metadata ---
|
||||
const metadata = {
|
||||
uploadId,
|
||||
originalFilename: safeFilename, // Store the path as received by client
|
||||
filePath: finalFilePath, // The final, possibly unique, path
|
||||
partialFilePath,
|
||||
fileSize: size,
|
||||
bytesReceived: 0,
|
||||
batchId,
|
||||
createdAt: Date.now(),
|
||||
lastActivity: Date.now()
|
||||
};
|
||||
|
||||
await writeUploadMetadata(uploadId, metadata);
|
||||
logger.info(`Initialized persistent upload: ${uploadId} for ${safeFilename} -> ${finalFilePath}`);
|
||||
|
||||
// --- Handle Zero-Byte Files --- // (Important: Handle *after* metadata potentially exists)
|
||||
if (size === 0) {
|
||||
try {
|
||||
await fs.writeFile(finalFilePath, ''); // Create the empty file
|
||||
logger.success(`Completed zero-byte file upload: ${metadata.originalFilename} as ${finalFilePath}`);
|
||||
await deleteUploadMetadata(uploadId); // Clean up metadata since it's done
|
||||
sendNotification(metadata.originalFilename, 0, config);
|
||||
} catch (writeErr) {
|
||||
logger.error(`Failed to create zero-byte file ${finalFilePath}: ${writeErr.message}`);
|
||||
await deleteUploadMetadata(uploadId).catch(() => {}); // Attempt cleanup on error
|
||||
throw writeErr; // Let the main catch block handle it
|
||||
}
|
||||
}
|
||||
|
||||
res.json({ uploadId });
|
||||
// Respond with the uploadId generated by the adapter/system
|
||||
res.json({ uploadId: result.uploadId });
|
||||
|
||||
} catch (err) {
|
||||
logger.error(`Upload initialization failed: ${err.message} ${err.stack}`);
|
||||
return res.status(500).json({ error: 'Failed to initialize upload', details: err.message });
|
||||
logger.error(`[Route /init] Upload initialization failed: ${err.message}`, err.stack);
|
||||
// Map common errors
|
||||
let statusCode = 500;
|
||||
let clientMessage = 'Failed to initialize upload.';
|
||||
if (err.message.includes('Invalid batch ID format')) {
|
||||
statusCode = 400;
|
||||
clientMessage = err.message;
|
||||
} else if (err.name === 'NoSuchBucket' || err.name === 'AccessDenied') { // S3 Specific
|
||||
statusCode = 500; // Internal config error
|
||||
clientMessage = 'Storage configuration error.';
|
||||
} else if (err.code === 'EACCES' || err.code === 'EPERM' || err.message.includes('writable')) { // Local Specific
|
||||
statusCode = 500;
|
||||
clientMessage = 'Storage permission or access error.';
|
||||
}
|
||||
// Add more specific error mapping based on adapter exceptions if needed
|
||||
|
||||
res.status(statusCode).json({ error: clientMessage, details: err.message }); // Include details only for logging/debugging
|
||||
}
|
||||
});
|
||||
|
||||
// Upload chunk
|
||||
router.post('/chunk/:uploadId', express.raw({
|
||||
limit: config.maxFileSize + (10 * 1024 * 1024), // Generous limit for raw body
|
||||
type: 'application/octet-stream'
|
||||
router.post('/chunk/:uploadId', express.raw({
|
||||
limit: config.maxFileSize + (10 * 1024 * 1024), // Allow slightly larger raw body than max file size
|
||||
type: 'application/octet-stream'
|
||||
}), async (req, res) => {
|
||||
// DEMO MODE CHECK
|
||||
if (isDemoMode()) {
|
||||
const { uploadId } = req.params;
|
||||
logger.debug(`[DEMO] Received chunk for ${uploadId}`);
|
||||
// Fake progress - requires knowing file size which isn't easily available here in demo
|
||||
const demoProgress = Math.min(100, Math.random() * 100); // Placeholder
|
||||
return res.json({ bytesReceived: 0, progress: demoProgress });
|
||||
}
|
||||
|
||||
const { uploadId } = req.params;
|
||||
let chunk = req.body;
|
||||
let chunkSize = chunk.length;
|
||||
const clientBatchId = req.headers['x-batch-id']; // Logged but not used directly here
|
||||
const chunk = req.body;
|
||||
const clientBatchId = req.headers['x-batch-id']; // May be useful for logging context
|
||||
|
||||
if (!chunkSize) return res.status(400).json({ error: 'Empty chunk received' });
|
||||
// ** CRITICAL FOR S3: Get Part Number from client **
|
||||
// Client needs to send this, e.g., ?partNumber=1, ?partNumber=2, ...
|
||||
const partNumber = parseInt(req.query.partNumber || '1', 10);
|
||||
if (isNaN(partNumber) || partNumber < 1) {
|
||||
logger.error(`[Route /chunk] Invalid partNumber received: ${req.query.partNumber}`);
|
||||
return res.status(400).json({ error: 'Missing or invalid partNumber query parameter (must be >= 1)' });
|
||||
}
|
||||
|
||||
// Demo mode handling (simplified)
|
||||
if (isDemoMode()) {
|
||||
logger.debug(`[DEMO /chunk] Received chunk for ${uploadId}, part ${partNumber}, size ${chunk?.length || 0}`);
|
||||
// Simulate progress - more sophisticated logic could go in a demoAdapter
|
||||
const demoProgress = Math.min(100, Math.random() * 100);
|
||||
const completed = demoProgress > 95; // Simulate completion occasionally
|
||||
if (completed) {
|
||||
logger.info(`[DEMO /chunk] Simulated completion for ${uploadId}`);
|
||||
}
|
||||
return res.json({ bytesReceived: 0, progress: demoProgress, completed }); // Approximate response
|
||||
}
|
||||
|
||||
|
||||
if (!chunk || chunk.length === 0) {
|
||||
logger.warn(`[Route /chunk] Received empty chunk for uploadId: ${uploadId}, part ${partNumber}`);
|
||||
return res.status(400).json({ error: 'Empty chunk received' });
|
||||
}
|
||||
|
||||
let metadata;
|
||||
let fileHandle;
|
||||
|
||||
try {
|
||||
metadata = await readUploadMetadata(uploadId);
|
||||
// Delegate chunk storage to the adapter
|
||||
const result = await storageAdapter.storeChunk(uploadId, chunk, partNumber);
|
||||
|
||||
if (!metadata) {
|
||||
logger.warn(`Upload metadata not found for chunk request: ${uploadId}. Client Batch ID: ${clientBatchId || 'none'}. Upload may be complete or cancelled.`);
|
||||
// Check if the final file exists as a fallback for completed uploads
|
||||
// This is a bit fragile, but handles cases where metadata was deleted slightly early
|
||||
// If the adapter indicates completion after storing this chunk, finalize the upload
|
||||
if (result.completed) {
|
||||
logger.info(`[Route /chunk] Chunk ${partNumber} for ${uploadId} triggered completion. Finalizing...`);
|
||||
try {
|
||||
// Need to guess the final path - THIS IS NOT ROBUST
|
||||
// A better approach might be needed if this is common
|
||||
// For now, just return 404
|
||||
// await fs.access(potentialFinalPath);
|
||||
// return res.json({ bytesReceived: fileSizeGuess, progress: 100 });
|
||||
return res.status(404).json({ error: 'Upload session not found or already completed' });
|
||||
} catch (finalCheckErr) {
|
||||
return res.status(404).json({ error: 'Upload session not found or already completed' });
|
||||
const completionResult = await storageAdapter.completeUpload(uploadId);
|
||||
logger.success(`[Route /chunk] Successfully finalized upload ${uploadId}. Final path/key: ${completionResult.finalPath}`);
|
||||
// Send final success response (ensure progress is 100)
|
||||
return res.json({ bytesReceived: result.bytesReceived, progress: 100, completed: true });
|
||||
} catch (completionError) {
|
||||
logger.error(`[Route /chunk] CRITICAL: Failed to finalize completed upload ${uploadId} after storing chunk ${partNumber}: ${completionError.message}`, completionError.stack);
|
||||
// What to return to client? The chunk was stored, but completion failed.
|
||||
// Return 500, indicating server-side issue during finalization.
|
||||
return res.status(500).json({ error: 'Upload chunk received, but failed to finalize.', details: completionError.message });
|
||||
}
|
||||
} else {
|
||||
// Chunk stored, but upload not yet complete, return progress
|
||||
res.json({ bytesReceived: result.bytesReceived, progress: result.progress, completed: false });
|
||||
}
|
||||
|
||||
// Update batch activity using metadata's batchId
|
||||
if (metadata.batchId && isValidBatchId(metadata.batchId)) {
|
||||
batchActivity.set(metadata.batchId, Date.now());
|
||||
}
|
||||
|
||||
// --- Sanity Checks & Idempotency ---
|
||||
if (metadata.bytesReceived >= metadata.fileSize) {
|
||||
logger.warn(`Received chunk for already completed upload ${uploadId} (${metadata.originalFilename}). Finalizing again if needed.`);
|
||||
// Ensure finalization if possible, then return success
|
||||
try {
|
||||
await fs.access(metadata.filePath); // Check if final file exists
|
||||
logger.info(`Upload ${uploadId} already finalized at ${metadata.filePath}.`);
|
||||
} catch (accessErr) {
|
||||
// Final file doesn't exist, attempt rename
|
||||
try {
|
||||
await fs.rename(metadata.partialFilePath, metadata.filePath);
|
||||
logger.info(`Finalized ${uploadId} on redundant chunk request (renamed ${metadata.partialFilePath} -> ${metadata.filePath}).`);
|
||||
} catch (renameErr) {
|
||||
if (renameErr.code === 'ENOENT') {
|
||||
logger.warn(`Partial file ${metadata.partialFilePath} missing during redundant chunk finalization for ${uploadId}.`);
|
||||
} else {
|
||||
logger.error(`Error finalizing ${uploadId} on redundant chunk: ${renameErr.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Regardless of rename outcome, delete metadata if it still exists
|
||||
await deleteUploadMetadata(uploadId);
|
||||
return res.json({ bytesReceived: metadata.fileSize, progress: 100 });
|
||||
}
|
||||
|
||||
// Prevent writing beyond expected file size (simple protection)
|
||||
if (metadata.bytesReceived + chunkSize > metadata.fileSize) {
|
||||
logger.warn(`Chunk for ${uploadId} exceeds expected file size. Received ${metadata.bytesReceived + chunkSize}, expected ${metadata.fileSize}. Truncating chunk.`);
|
||||
const bytesToWrite = metadata.fileSize - metadata.bytesReceived;
|
||||
chunk = chunk.slice(0, bytesToWrite);
|
||||
chunkSize = chunk.length;
|
||||
if (chunkSize <= 0) { // If we already have exactly the right amount
|
||||
logger.info(`Upload ${uploadId} already has expected bytes. Skipping write, proceeding to finalize.`);
|
||||
// Skip write, proceed to finalization check below
|
||||
metadata.bytesReceived = metadata.fileSize; // Ensure state is correct for finalization
|
||||
} else {
|
||||
logger.info(`Truncated chunk for ${uploadId} to ${chunkSize} bytes.`);
|
||||
}
|
||||
}
|
||||
|
||||
// --- Write Chunk (Append Mode) --- // Only write if chunk has size after potential truncation
|
||||
if (chunkSize > 0) {
|
||||
fileHandle = await fs.open(metadata.partialFilePath, 'a');
|
||||
const writeResult = await fileHandle.write(chunk);
|
||||
await fileHandle.close(); // Close immediately
|
||||
|
||||
if (writeResult.bytesWritten !== chunkSize) {
|
||||
// This indicates a partial write, which is problematic.
|
||||
logger.error(`Partial write for chunk ${uploadId}! Expected ${chunkSize}, wrote ${writeResult.bytesWritten}. Disk full?`);
|
||||
// How to recover? Maybe revert bytesReceived? For now, throw.
|
||||
throw new Error(`Failed to write full chunk for ${uploadId}`);
|
||||
}
|
||||
metadata.bytesReceived += writeResult.bytesWritten;
|
||||
}
|
||||
|
||||
// --- Update State --- (bytesReceived updated above or set if truncated to zero)
|
||||
const progress = metadata.fileSize === 0 ? 100 :
|
||||
Math.min( Math.round((metadata.bytesReceived / metadata.fileSize) * 100), 100);
|
||||
|
||||
logger.debug(`Chunk written for ${uploadId}: ${metadata.bytesReceived}/${metadata.fileSize} (${progress}%)`);
|
||||
|
||||
// --- Persist Updated Metadata (Before potential finalization) ---
|
||||
await writeUploadMetadata(uploadId, metadata);
|
||||
|
||||
// --- Check for Completion --- // Now happens after metadata update
|
||||
if (metadata.bytesReceived >= metadata.fileSize) {
|
||||
logger.info(`Upload ${uploadId} (${metadata.originalFilename}) completed ${metadata.bytesReceived} bytes.`);
|
||||
try {
|
||||
await fs.rename(metadata.partialFilePath, metadata.filePath);
|
||||
logger.success(`Upload completed and finalized: ${metadata.originalFilename} as ${metadata.filePath} (${metadata.fileSize} bytes)`);
|
||||
await deleteUploadMetadata(uploadId); // Clean up metadata file AFTER successful rename
|
||||
sendNotification(metadata.originalFilename, metadata.fileSize, config);
|
||||
} catch (renameErr) {
|
||||
if (renameErr.code === 'ENOENT') {
|
||||
logger.warn(`Partial file ${metadata.partialFilePath} not found during finalization for ${uploadId}. Assuming already finalized elsewhere.`);
|
||||
// Attempt to delete metadata anyway if partial is gone
|
||||
await deleteUploadMetadata(uploadId).catch(() => {});
|
||||
} else {
|
||||
logger.error(`CRITICAL: Failed to rename partial file ${metadata.partialFilePath} to ${metadata.filePath}: ${renameErr.message}`);
|
||||
// Keep metadata and partial file for manual recovery.
|
||||
// Return success to client as data is likely there, but log server issue.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res.json({ bytesReceived: metadata.bytesReceived, progress });
|
||||
|
||||
} catch (err) {
|
||||
// Ensure file handle is closed on error
|
||||
if (fileHandle) {
|
||||
await fileHandle.close().catch(closeErr => logger.error(`Error closing file handle for ${uploadId} after error: ${closeErr.message}`));
|
||||
logger.error(`[Route /chunk] Chunk upload failed for ${uploadId}, part ${partNumber}: ${err.message}`, err.stack);
|
||||
// Map common errors
|
||||
let statusCode = 500;
|
||||
let clientMessage = 'Failed to process chunk.';
|
||||
|
||||
if (err.message.includes('Upload session not found') || err.name === 'NoSuchUpload' || err.code === 'ENOENT') {
|
||||
statusCode = 404;
|
||||
clientMessage = 'Upload session not found or already completed/aborted.';
|
||||
} else if (err.name === 'InvalidPart' || err.name === 'InvalidPartOrder') { // S3 Specific
|
||||
statusCode = 400;
|
||||
clientMessage = 'Invalid upload chunk sequence or data.';
|
||||
} else if (err.name === 'SlowDown') { // S3 Throttling
|
||||
statusCode = 429;
|
||||
clientMessage = 'Upload rate limit exceeded by storage provider, please try again later.';
|
||||
} else if (err.code === 'EACCES' || err.code === 'EPERM' ) { // Local specific
|
||||
statusCode = 500;
|
||||
clientMessage = 'Storage permission error while writing chunk.';
|
||||
}
|
||||
logger.error(`Chunk upload failed for ${uploadId}: ${err.message} ${err.stack}`);
|
||||
// Don't delete metadata on generic chunk errors, let client retry or cleanup handle stale files
|
||||
res.status(500).json({ error: 'Failed to process chunk', details: err.message });
|
||||
// Add more specific error mapping if needed
|
||||
|
||||
res.status(statusCode).json({ error: clientMessage, details: err.message });
|
||||
}
|
||||
});
|
||||
|
||||
// Cancel upload
|
||||
router.post('/cancel/:uploadId', async (req, res) => {
|
||||
// DEMO MODE CHECK
|
||||
const { uploadId } = req.params;
|
||||
|
||||
if (isDemoMode()) {
|
||||
logger.info(`[DEMO] Upload cancelled: ${req.params.uploadId}`);
|
||||
return res.json({ message: 'Upload cancelled (Demo)' });
|
||||
logger.info(`[DEMO /cancel] Request received for ${uploadId}`);
|
||||
// Call demoAdapter.abortUpload(uploadId) if it exists?
|
||||
return res.json({ message: 'Upload cancelled (Demo)' });
|
||||
}
|
||||
|
||||
const { uploadId } = req.params;
|
||||
logger.info(`Received cancel request for upload: ${uploadId}`);
|
||||
logger.info(`[Route /cancel] Received cancel request for upload: ${uploadId}`);
|
||||
|
||||
try {
|
||||
const metadata = await readUploadMetadata(uploadId);
|
||||
|
||||
if (metadata) {
|
||||
// Delete partial file first
|
||||
try {
|
||||
await fs.unlink(metadata.partialFilePath);
|
||||
logger.info(`Deleted partial file on cancellation: ${metadata.partialFilePath}`);
|
||||
} catch (unlinkErr) {
|
||||
if (unlinkErr.code !== 'ENOENT') { // Ignore if already gone
|
||||
logger.error(`Failed to delete partial file ${metadata.partialFilePath} on cancel: ${unlinkErr.message}`);
|
||||
}
|
||||
}
|
||||
// Then delete metadata file
|
||||
await deleteUploadMetadata(uploadId);
|
||||
logger.info(`Upload cancelled and cleaned up: ${uploadId} (${metadata.originalFilename})`);
|
||||
} else {
|
||||
logger.warn(`Cancel request for non-existent or already completed upload: ${uploadId}`);
|
||||
}
|
||||
|
||||
res.json({ message: 'Upload cancelled or already complete' });
|
||||
// Delegate cancellation to the storage adapter
|
||||
await storageAdapter.abortUpload(uploadId);
|
||||
res.json({ message: 'Upload cancelled successfully or was already inactive.' });
|
||||
} catch (err) {
|
||||
logger.error(`Error during upload cancellation for ${uploadId}: ${err.message}`);
|
||||
res.status(500).json({ error: 'Failed to cancel upload' });
|
||||
// Abort errors are often less critical, log them but maybe return success anyway
|
||||
logger.error(`[Route /cancel] Error during upload cancellation for ${uploadId}: ${err.message}`, err.stack);
|
||||
// Don't necessarily send 500, as the goal is just to stop the upload client-side
|
||||
// Maybe just return success but log the server-side issue?
|
||||
// Or return 500 if S3 abort fails significantly? Let's return 500 for now.
|
||||
res.status(500).json({ error: 'Failed to cancel upload on server.', details: err.message });
|
||||
}
|
||||
});
|
||||
|
||||
module.exports = {
|
||||
router,
|
||||
startBatchCleanup,
|
||||
stopBatchCleanup,
|
||||
// Export for testing if required
|
||||
readUploadMetadata,
|
||||
writeUploadMetadata,
|
||||
deleteUploadMetadata
|
||||
};
|
||||
// Export the router, remove previous function exports
|
||||
module.exports = { router };
|
59
src/storage/index.js
Normal file
59
src/storage/index.js
Normal file
@@ -0,0 +1,59 @@
|
||||
/**
|
||||
* Storage Adapter Factory
|
||||
* Reads the application configuration and exports the appropriate storage adapter
|
||||
* (either local or S3) based on the STORAGE_TYPE environment variable.
|
||||
* This provides a single point of import for storage operations throughout the app.
|
||||
*/
|
||||
|
||||
const { config } = require('../config'); // Assuming config is initialized before this runs
|
||||
const logger = require('../utils/logger');
|
||||
|
||||
let storageAdapter;
|
||||
|
||||
logger.info(`Initializing storage adapter based on STORAGE_TYPE: "${config.storageType}"`);
|
||||
|
||||
if (config.isDemoMode) {
|
||||
logger.warn('[Storage] DEMO MODE ENABLED. Using mock storage adapter.');
|
||||
// In demo mode, we might want a completely separate mock adapter
|
||||
// or potentially just disable storage operations. For now, let's use local
|
||||
// but be aware demo mode might need its own logic if strict separation is needed.
|
||||
// Or, create a dedicated demoAdapter.js
|
||||
// For simplicity now, let's log and maybe default to local (which is non-persistent in demo anyway).
|
||||
// A dedicated demoAdapter would be cleaner:
|
||||
// storageAdapter = require('./demoAdapter'); // Requires creating demoAdapter.js
|
||||
// Fallback for now:
|
||||
storageAdapter = require('./localAdapter');
|
||||
logger.info('[Storage] Using Local Adapter for Demo Mode (operations will be mocked or non-persistent).');
|
||||
|
||||
} else if (config.storageType === 's3') {
|
||||
logger.info('[Storage] Using S3 Storage Adapter.');
|
||||
try {
|
||||
storageAdapter = require('./s3Adapter');
|
||||
} catch (error) {
|
||||
logger.error(`[Storage] Failed to load S3 Adapter: ${error.message}`);
|
||||
logger.error('[Storage] Check S3 configuration environment variables and AWS SDK installation.');
|
||||
process.exit(1); // Exit if the configured adapter fails to load
|
||||
}
|
||||
} else {
|
||||
// Default to local storage if type is 'local' or invalid/not specified
|
||||
if (config.storageType !== 'local') {
|
||||
logger.warn(`[Storage] Invalid or unspecified STORAGE_TYPE "${config.storageType}", defaulting to "local".`);
|
||||
}
|
||||
logger.info('[Storage] Using Local Storage Adapter.');
|
||||
try {
|
||||
storageAdapter = require('./localAdapter');
|
||||
} catch (error) {
|
||||
logger.error(`[Storage] Failed to load Local Adapter: ${error.message}`);
|
||||
process.exit(1); // Exit if the default adapter fails
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the selected adapter is valid before exporting
|
||||
if (!storageAdapter || typeof storageAdapter.initUpload !== 'function') {
|
||||
logger.error('[Storage] Failed to initialize a valid storage adapter. Exiting.');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
logger.success(`[Storage] Storage adapter "${config.storageType}" initialized successfully.`);
|
||||
|
||||
module.exports = { storageAdapter };
|
641
src/storage/localAdapter.js
Normal file
641
src/storage/localAdapter.js
Normal file
@@ -0,0 +1,641 @@
|
||||
/**
|
||||
* Local Storage Adapter
|
||||
* Handles file operations for storing files on the local filesystem.
|
||||
* Implements the storage interface expected by the application routes.
|
||||
*/
|
||||
|
||||
const fs = require('fs').promises;
|
||||
const fsSync = require('fs'); // For synchronous checks like existsSync
|
||||
const path = require('path');
|
||||
const crypto = require('crypto');
|
||||
const { config } = require('../config');
|
||||
const logger = require('../utils/logger');
|
||||
const {
|
||||
getUniqueFolderPath,
|
||||
sanitizePathPreserveDirs,
|
||||
isValidBatchId,
|
||||
formatFileSize // Keep formatFileSize accessible if needed by notifications later
|
||||
} = require('../utils/fileUtils');
|
||||
const { sendNotification } = require('../services/notifications'); // Needed for completion
|
||||
|
||||
const METADATA_DIR = path.join(config.uploadDir, '.metadata');
|
||||
const UPLOAD_TIMEOUT = 30 * 60 * 1000; // 30 minutes timeout for stale uploads
|
||||
|
||||
// --- In-Memory Maps (Session-level optimizations) ---
|
||||
// Store folder name mappings for batch uploads (avoids FS lookups during session)
|
||||
// NOTE: This state is specific to this adapter instance and might not scale across multiple server instances.
|
||||
const folderMappings = new Map();
|
||||
// Store batch activity timestamps (for cleaning up stale batches/folder mappings)
|
||||
const batchActivity = new Map();
|
||||
const BATCH_TIMEOUT = 30 * 60 * 1000; // 30 minutes for batch/folderMapping cleanup
|
||||
|
||||
// --- Metadata Helper Functions (Copied and adapted from original upload.js) ---
|
||||
|
||||
/**
|
||||
* Ensures the metadata directory exists.
|
||||
* Should be called once during adapter initialization or before first use.
|
||||
*/
|
||||
async function ensureMetadataDirExists() {
|
||||
try {
|
||||
if (!fsSync.existsSync(METADATA_DIR)) {
|
||||
await fs.mkdir(METADATA_DIR, { recursive: true });
|
||||
logger.info(`[Local Adapter] Created metadata directory: ${METADATA_DIR}`);
|
||||
}
|
||||
// Check writability
|
||||
await fs.access(METADATA_DIR, fsSync.constants.W_OK);
|
||||
} catch (err) {
|
||||
logger.error(`[Local Adapter] Metadata directory error (${METADATA_DIR}): ${err.message}`);
|
||||
throw new Error(`Failed to access or create metadata directory: ${METADATA_DIR}`);
|
||||
}
|
||||
}
|
||||
|
||||
async function readUploadMetadata(uploadId) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.warn(`[Local Adapter] Attempted to read metadata with invalid uploadId: ${uploadId}`);
|
||||
return null;
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
try {
|
||||
const data = await fs.readFile(metaFilePath, 'utf8');
|
||||
return JSON.parse(data);
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOENT') {
|
||||
return null; // Metadata file doesn't exist
|
||||
}
|
||||
logger.error(`[Local Adapter] Error reading metadata for ${uploadId}: ${err.message}`);
|
||||
throw err; // Rethrow other errors
|
||||
}
|
||||
}
|
||||
|
||||
async function writeUploadMetadata(uploadId, metadata) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.error(`[Local Adapter] Attempted to write metadata with invalid uploadId: ${uploadId}`);
|
||||
return;
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
metadata.lastActivity = Date.now(); // Update timestamp on every write
|
||||
try {
|
||||
const tempMetaPath = `${metaFilePath}.${crypto.randomBytes(4).toString('hex')}.tmp`;
|
||||
await fs.writeFile(tempMetaPath, JSON.stringify(metadata, null, 2));
|
||||
await fs.rename(tempMetaPath, metaFilePath);
|
||||
} catch (err) {
|
||||
logger.error(`[Local Adapter] Error writing metadata for ${uploadId}: ${err.message}`);
|
||||
try { await fs.unlink(tempMetaPath); } catch (unlinkErr) {/* ignore */}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteUploadMetadata(uploadId) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.warn(`[Local Adapter] Attempted to delete metadata with invalid uploadId: ${uploadId}`);
|
||||
return;
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
try {
|
||||
await fs.unlink(metaFilePath);
|
||||
logger.debug(`[Local Adapter] Deleted metadata file: ${uploadId}.meta`);
|
||||
} catch (err) {
|
||||
if (err.code !== 'ENOENT') { // Ignore if already deleted
|
||||
logger.error(`[Local Adapter] Error deleting metadata file ${uploadId}.meta: ${err.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- Batch Cleanup (In-memory session state cleanup) ---
|
||||
// This logic remains relevant for the in-memory folderMappings if used across batches.
|
||||
let batchCleanupInterval;
|
||||
function startBatchCleanup() {
|
||||
if (batchCleanupInterval) clearInterval(batchCleanupInterval);
|
||||
batchCleanupInterval = setInterval(() => {
|
||||
const now = Date.now();
|
||||
logger.info(`[Local Adapter] Running batch session cleanup, checking ${batchActivity.size} active sessions`);
|
||||
let cleanedCount = 0;
|
||||
for (const [batchId, lastActivity] of batchActivity.entries()) {
|
||||
if (now - lastActivity >= BATCH_TIMEOUT) {
|
||||
logger.info(`[Local Adapter] Cleaning up inactive batch session: ${batchId}`);
|
||||
batchActivity.delete(batchId);
|
||||
// Clean up associated folder mappings
|
||||
for (const key of folderMappings.keys()) {
|
||||
if (key.endsWith(`-${batchId}`)) {
|
||||
folderMappings.delete(key);
|
||||
}
|
||||
}
|
||||
cleanedCount++;
|
||||
}
|
||||
}
|
||||
if (cleanedCount > 0) logger.info(`[Local Adapter] Cleaned up ${cleanedCount} inactive batch sessions.`);
|
||||
}, 5 * 60 * 1000); // Check every 5 minutes
|
||||
batchCleanupInterval.unref();
|
||||
}
|
||||
// Ensure metadata dir exists before starting cleanup or other ops
|
||||
ensureMetadataDirExists().then(() => {
|
||||
logger.info('[Local Adapter] Initialized.');
|
||||
// Start batch cleanup only after ensuring dir exists
|
||||
if (!process.env.DISABLE_BATCH_CLEANUP) {
|
||||
startBatchCleanup();
|
||||
}
|
||||
}).catch(err => {
|
||||
logger.error(`[Local Adapter] Initialization failed: ${err.message}`);
|
||||
// Potentially exit or prevent server start if metadata dir is critical
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
|
||||
// --- Interface Implementation ---
|
||||
|
||||
/**
|
||||
* Initializes an upload session.
|
||||
* @param {string} filename - Original filename/path from client.
|
||||
* @param {number} fileSize - Total size of the file.
|
||||
* @param {string} clientBatchId - Optional batch ID from client.
|
||||
* @returns {Promise<{uploadId: string}>} Object containing the application's upload ID.
|
||||
*/
|
||||
async function initUpload(filename, fileSize, clientBatchId) {
|
||||
await ensureMetadataDirExists(); // Ensure it exists before proceeding
|
||||
|
||||
const size = Number(fileSize);
|
||||
// Basic validations moved to route handler, assume valid inputs here
|
||||
|
||||
const batchId = clientBatchId || `${Date.now()}-${crypto.randomBytes(4).toString('hex').substring(0, 9)}`;
|
||||
if (clientBatchId && !isValidBatchId(batchId)) {
|
||||
throw new Error('Invalid batch ID format'); // Throw error for route handler
|
||||
}
|
||||
batchActivity.set(batchId, Date.now()); // Track batch session activity
|
||||
|
||||
// --- Path handling and Sanitization ---
|
||||
const sanitizedFilename = sanitizePathPreserveDirs(filename);
|
||||
const safeFilename = path.normalize(sanitizedFilename)
|
||||
.replace(/^(\.\.(\/|\\|$))+/, '')
|
||||
.replace(/\\/g, '/')
|
||||
.replace(/^\/+/, '');
|
||||
logger.info(`[Local Adapter] Init request for: ${safeFilename}`);
|
||||
|
||||
// --- Determine Paths & Handle Folders ---
|
||||
const uploadId = crypto.randomBytes(16).toString('hex');
|
||||
let finalFilePath = path.resolve(config.uploadDir, safeFilename); // Use resolve for absolute path
|
||||
const pathParts = safeFilename.split('/').filter(Boolean);
|
||||
|
||||
if (pathParts.length > 1) {
|
||||
const originalFolderName = pathParts[0];
|
||||
const folderMapKey = `${originalFolderName}-${batchId}`;
|
||||
let newFolderName = folderMappings.get(folderMapKey);
|
||||
const relativeFolderPath = newFolderName || originalFolderName; // Folder name relative to uploadDir
|
||||
|
||||
if (!newFolderName) {
|
||||
const baseFolderPath = path.resolve(config.uploadDir, relativeFolderPath);
|
||||
await fs.mkdir(path.dirname(baseFolderPath), { recursive: true }); // Ensure parent of potential new folder exists
|
||||
try {
|
||||
await fs.mkdir(baseFolderPath, { recursive: false }); // Try creating the original/mapped name
|
||||
newFolderName = originalFolderName; // Success, use original
|
||||
} catch (err) {
|
||||
if (err.code === 'EEXIST') {
|
||||
// Folder exists, generate a unique name for this batch
|
||||
const uniqueFolderPath = await getUniqueFolderPath(baseFolderPath); // Pass absolute path
|
||||
newFolderName = path.basename(uniqueFolderPath); // Get only the unique folder name part
|
||||
logger.info(`[Local Adapter] Folder "${originalFolderName}" exists or conflict, using unique "${newFolderName}" for batch ${batchId}`);
|
||||
// No need to mkdir again, getUniqueFolderPath created it.
|
||||
} else {
|
||||
logger.error(`[Local Adapter] Error creating directory ${baseFolderPath}: ${err.message}`);
|
||||
throw err; // Re-throw other errors
|
||||
}
|
||||
}
|
||||
folderMappings.set(folderMapKey, newFolderName); // Store mapping for this batch
|
||||
}
|
||||
// Reconstruct the final path using the potentially unique folder name
|
||||
pathParts[0] = newFolderName;
|
||||
finalFilePath = path.resolve(config.uploadDir, ...pathParts);
|
||||
// Ensure the immediate parent directory for the file exists
|
||||
await fs.mkdir(path.dirname(finalFilePath), { recursive: true });
|
||||
} else {
|
||||
// Ensure base upload dir exists (already done by ensureLocalUploadDirExists, but safe to repeat)
|
||||
await fs.mkdir(config.uploadDir, { recursive: true });
|
||||
}
|
||||
|
||||
// --- Check Final Path Collision & Get Unique Name if Needed ---
|
||||
// Check if the *final* destination exists (not the partial)
|
||||
let checkPath = finalFilePath;
|
||||
let counter = 1;
|
||||
while (fsSync.existsSync(checkPath)) {
|
||||
logger.warn(`[Local Adapter] Final destination file already exists: ${checkPath}. Generating unique name.`);
|
||||
const dir = path.dirname(finalFilePath);
|
||||
const ext = path.extname(finalFilePath);
|
||||
const baseName = path.basename(finalFilePath, ext);
|
||||
checkPath = path.resolve(dir, `${baseName} (${counter})${ext}`); // Use resolve
|
||||
counter++;
|
||||
}
|
||||
if (checkPath !== finalFilePath) {
|
||||
logger.info(`[Local Adapter] Using unique final path: ${checkPath}`);
|
||||
finalFilePath = checkPath;
|
||||
// If path changed, ensure directory exists again (might be needed if baseName contained '/')
|
||||
await fs.mkdir(path.dirname(finalFilePath), { recursive: true });
|
||||
}
|
||||
|
||||
const partialFilePath = finalFilePath + '.partial';
|
||||
|
||||
// --- Create and Persist Metadata ---
|
||||
const metadata = {
|
||||
uploadId,
|
||||
originalFilename: safeFilename, // Store the path as received by client
|
||||
filePath: finalFilePath, // The final, possibly unique, path
|
||||
partialFilePath,
|
||||
fileSize: size,
|
||||
bytesReceived: 0,
|
||||
batchId,
|
||||
createdAt: Date.now(),
|
||||
lastActivity: Date.now()
|
||||
};
|
||||
|
||||
await writeUploadMetadata(uploadId, metadata);
|
||||
logger.info(`[Local Adapter] Initialized upload: ${uploadId} for ${safeFilename} -> ${finalFilePath}`);
|
||||
|
||||
// --- Handle Zero-Byte Files ---
|
||||
if (size === 0) {
|
||||
try {
|
||||
await fs.writeFile(finalFilePath, ''); // Create the empty file directly
|
||||
logger.success(`[Local Adapter] Completed zero-byte file: ${metadata.originalFilename} as ${finalFilePath}`);
|
||||
await deleteUploadMetadata(uploadId); // Clean up metadata
|
||||
sendNotification(metadata.originalFilename, 0, config); // Send notification
|
||||
} catch (writeErr) {
|
||||
logger.error(`[Local Adapter] Failed to create zero-byte file ${finalFilePath}: ${writeErr.message}`);
|
||||
await deleteUploadMetadata(uploadId).catch(() => {}); // Attempt cleanup
|
||||
throw writeErr; // Let the route handler catch it
|
||||
}
|
||||
}
|
||||
|
||||
return { uploadId };
|
||||
}
|
||||
|
||||
/**
|
||||
* Stores a chunk of data for a given uploadId.
|
||||
* @param {string} uploadId - The application's upload ID.
|
||||
* @param {Buffer} chunk - The data chunk to store.
|
||||
* @returns {Promise<{bytesReceived: number, progress: number, completed: boolean}>} Upload status.
|
||||
*/
|
||||
async function storeChunk(uploadId, chunk) {
|
||||
const chunkSize = chunk.length;
|
||||
if (!chunkSize) {
|
||||
throw new Error('Empty chunk received');
|
||||
}
|
||||
|
||||
const metadata = await readUploadMetadata(uploadId);
|
||||
|
||||
if (!metadata) {
|
||||
// Maybe the upload completed *just* before this chunk arrived? Check final file.
|
||||
// This is hard to do reliably without knowing the final path from metadata.
|
||||
// Return a specific error or status code might be better.
|
||||
logger.warn(`[Local Adapter] Metadata not found for chunk: ${uploadId}. Upload might be complete or cancelled.`);
|
||||
throw new Error('Upload session not found or already completed'); // Let route handler return 404
|
||||
}
|
||||
|
||||
// Update batch activity
|
||||
if (metadata.batchId && isValidBatchId(metadata.batchId)) {
|
||||
batchActivity.set(metadata.batchId, Date.now());
|
||||
}
|
||||
|
||||
// --- Sanity Checks ---
|
||||
if (metadata.bytesReceived >= metadata.fileSize) {
|
||||
logger.warn(`[Local Adapter] Received chunk for already completed upload ${uploadId}. Finalizing again.`);
|
||||
// Attempt to finalize just in case, then return completed status
|
||||
await completeUpload(uploadId); // This handles metadata deletion etc.
|
||||
return { bytesReceived: metadata.fileSize, progress: 100, completed: true };
|
||||
}
|
||||
|
||||
let chunkToWrite = chunk;
|
||||
let actualChunkSize = chunkSize;
|
||||
|
||||
// Prevent writing beyond expected file size
|
||||
if (metadata.bytesReceived + chunkSize > metadata.fileSize) {
|
||||
logger.warn(`[Local Adapter] Chunk for ${uploadId} exceeds expected size. Truncating.`);
|
||||
const bytesToWrite = metadata.fileSize - metadata.bytesReceived;
|
||||
chunkToWrite = chunk.slice(0, bytesToWrite);
|
||||
actualChunkSize = chunkToWrite.length;
|
||||
if (actualChunkSize <= 0) {
|
||||
logger.info(`[Local Adapter] Upload ${uploadId} already has expected bytes. Skipping write.`);
|
||||
metadata.bytesReceived = metadata.fileSize; // Correct state for completion check
|
||||
}
|
||||
}
|
||||
|
||||
// --- Write Chunk (Append Mode) ---
|
||||
if (actualChunkSize > 0) {
|
||||
try {
|
||||
await fs.appendFile(metadata.partialFilePath, chunkToWrite);
|
||||
metadata.bytesReceived += actualChunkSize;
|
||||
} catch (writeErr) {
|
||||
logger.error(`[Local Adapter] Failed to write chunk for ${uploadId} to ${metadata.partialFilePath}: ${writeErr.message}`);
|
||||
throw new Error(`Failed to write chunk for ${uploadId}: ${writeErr.code}`); // Propagate error
|
||||
}
|
||||
}
|
||||
|
||||
// --- Update State ---
|
||||
const progress = metadata.fileSize === 0 ? 100 :
|
||||
Math.min(Math.round((metadata.bytesReceived / metadata.fileSize) * 100), 100);
|
||||
|
||||
logger.debug(`[Local Adapter] Chunk written for ${uploadId}: ${metadata.bytesReceived}/${metadata.fileSize} (${progress}%)`);
|
||||
|
||||
// Persist updated metadata *before* final completion check
|
||||
await writeUploadMetadata(uploadId, metadata);
|
||||
|
||||
// --- Check for Completion ---
|
||||
const completed = metadata.bytesReceived >= metadata.fileSize;
|
||||
if (completed) {
|
||||
// Don't call completeUpload here, let the route handler do it
|
||||
// after sending the final progress response back to the client.
|
||||
logger.info(`[Local Adapter] Upload ${uploadId} ready for completion (${metadata.bytesReceived} bytes).`);
|
||||
}
|
||||
|
||||
return { bytesReceived: metadata.bytesReceived, progress, completed };
|
||||
}
|
||||
|
||||
/**
|
||||
* Finalizes a completed upload.
|
||||
* @param {string} uploadId - The application's upload ID.
|
||||
* @returns {Promise<{filename: string, size: number}>} Details of the completed file.
|
||||
*/
|
||||
async function completeUpload(uploadId) {
|
||||
const metadata = await readUploadMetadata(uploadId);
|
||||
if (!metadata) {
|
||||
// Might have been completed by a concurrent request. Check if final file exists.
|
||||
// This is still tricky without the metadata. Log a warning.
|
||||
logger.warn(`[Local Adapter] completeUpload called for ${uploadId}, but metadata is missing. Assuming already completed.`);
|
||||
// We don't know the filename or size here, return minimal success or throw?
|
||||
// Let's throw, as the calling route expects metadata info.
|
||||
throw new Error('Upload completion failed: Metadata not found');
|
||||
}
|
||||
|
||||
// Ensure we have received all bytes (redundant check, but safe)
|
||||
if (metadata.bytesReceived < metadata.fileSize) {
|
||||
logger.error(`[Local Adapter] Attempted to complete upload ${uploadId} prematurely. Received ${metadata.bytesReceived}/${metadata.fileSize} bytes.`);
|
||||
throw new Error('Cannot complete upload: Not all bytes received.');
|
||||
}
|
||||
|
||||
try {
|
||||
// Ensure partial file exists before rename
|
||||
await fs.access(metadata.partialFilePath);
|
||||
await fs.rename(metadata.partialFilePath, metadata.filePath);
|
||||
logger.success(`[Local Adapter] Finalized: ${metadata.originalFilename} as ${metadata.filePath} (${metadata.fileSize} bytes)`);
|
||||
|
||||
// Clean up metadata AFTER successful rename
|
||||
await deleteUploadMetadata(uploadId);
|
||||
|
||||
// Send notification
|
||||
sendNotification(metadata.originalFilename, metadata.fileSize, config);
|
||||
|
||||
return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.filePath };
|
||||
|
||||
} catch (renameErr) {
|
||||
if (renameErr.code === 'ENOENT') {
|
||||
// Partial file missing. Maybe completed by another request? Check final file.
|
||||
try {
|
||||
await fs.access(metadata.filePath);
|
||||
logger.warn(`[Local Adapter] Partial file ${metadata.partialFilePath} missing for ${uploadId}, but final file ${metadata.filePath} exists. Assuming already finalized.`);
|
||||
await deleteUploadMetadata(uploadId).catch(()=>{}); // Cleanup metadata anyway
|
||||
return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.filePath };
|
||||
} catch (finalAccessErr) {
|
||||
logger.error(`[Local Adapter] CRITICAL: Partial file ${metadata.partialFilePath} missing and final file ${metadata.filePath} not found during completion of ${uploadId}.`);
|
||||
await deleteUploadMetadata(uploadId).catch(()=>{}); // Cleanup metadata to prevent retries
|
||||
throw new Error(`Completion failed: Partial file missing and final file not found.`);
|
||||
}
|
||||
} else {
|
||||
logger.error(`[Local Adapter] CRITICAL: Failed to rename ${metadata.partialFilePath} to ${metadata.filePath}: ${renameErr.message}`);
|
||||
// Keep metadata and partial file for potential manual recovery.
|
||||
throw renameErr; // Propagate the error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Aborts an ongoing upload.
|
||||
* @param {string} uploadId - The application's upload ID.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function abortUpload(uploadId) {
|
||||
const metadata = await readUploadMetadata(uploadId);
|
||||
if (!metadata) {
|
||||
logger.warn(`[Local Adapter] Abort request for non-existent or completed upload: ${uploadId}`);
|
||||
return; // Nothing to abort
|
||||
}
|
||||
|
||||
// Delete partial file first
|
||||
try {
|
||||
await fs.unlink(metadata.partialFilePath);
|
||||
logger.info(`[Local Adapter] Deleted partial file on cancellation: ${metadata.partialFilePath}`);
|
||||
} catch (unlinkErr) {
|
||||
if (unlinkErr.code !== 'ENOENT') { // Ignore if already gone
|
||||
logger.error(`[Local Adapter] Failed to delete partial file ${metadata.partialFilePath} on cancel: ${unlinkErr.message}`);
|
||||
// Continue to delete metadata anyway
|
||||
}
|
||||
}
|
||||
|
||||
// Then delete metadata file
|
||||
await deleteUploadMetadata(uploadId);
|
||||
logger.info(`[Local Adapter] Upload cancelled and cleaned up: ${uploadId} (${metadata.originalFilename})`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Lists files in the upload directory.
|
||||
* @returns {Promise<Array<{filename: string, size: number, formattedSize: string, uploadDate: Date}>>} List of files.
|
||||
*/
|
||||
async function listFiles() {
|
||||
let entries = [];
|
||||
try {
|
||||
entries = await fs.readdir(config.uploadDir, { withFileTypes: true });
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOENT') {
|
||||
logger.warn('[Local Adapter] Upload directory does not exist for listing.');
|
||||
return []; // Return empty list if dir doesn't exist
|
||||
}
|
||||
logger.error(`[Local Adapter] Failed to read upload directory: ${err.message}`);
|
||||
throw err; // Re-throw other errors
|
||||
}
|
||||
|
||||
const fileDetails = [];
|
||||
for (const entry of entries) {
|
||||
// Skip directories and the special metadata directory/files within it
|
||||
if (!entry.isFile() || entry.name === '.metadata' || entry.name.endsWith('.partial') || entry.name.endsWith('.meta') || entry.name.endsWith('.tmp')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
const filePath = path.join(config.uploadDir, entry.name);
|
||||
const stats = await fs.stat(filePath);
|
||||
fileDetails.push({
|
||||
filename: entry.name, // Use the actual filename on disk
|
||||
size: stats.size,
|
||||
formattedSize: formatFileSize(stats.size), // Use fileUtils helper
|
||||
uploadDate: stats.mtime // Use modification time as upload date
|
||||
});
|
||||
} catch (statErr) {
|
||||
// Handle case where file might be deleted between readdir and stat
|
||||
if (statErr.code !== 'ENOENT') {
|
||||
logger.error(`[Local Adapter] Failed to get stats for file ${entry.name}: ${statErr.message}`);
|
||||
}
|
||||
// Skip this file if stat fails
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by date, newest first
|
||||
fileDetails.sort((a, b) => b.uploadDate.getTime() - a.uploadDate.getTime());
|
||||
|
||||
return fileDetails;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets information needed to download a file.
|
||||
* For local storage, this is the file path.
|
||||
* @param {string} filename - The name of the file to download.
|
||||
* @returns {Promise<{type: string, value: string}>} Object indicating type ('path') and value (the full file path).
|
||||
*/
|
||||
async function getDownloadUrlOrStream(filename) {
|
||||
// IMPORTANT: Sanitize filename input to prevent directory traversal
|
||||
const safeBaseName = path.basename(filename);
|
||||
if (safeBaseName !== filename || filename.includes('..')) {
|
||||
logger.error(`[Local Adapter] Invalid filename detected for download: ${filename}`);
|
||||
throw new Error('Invalid filename');
|
||||
}
|
||||
|
||||
const filePath = path.resolve(config.uploadDir, safeBaseName); // Use resolve for security
|
||||
|
||||
try {
|
||||
await fs.access(filePath, fsSync.constants.R_OK); // Check existence and readability
|
||||
return { type: 'path', value: filePath };
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOENT') {
|
||||
logger.warn(`[Local Adapter] Download request for non-existent file: ${filePath}`);
|
||||
throw new Error('File not found'); // Specific error for 404 handling
|
||||
} else if (err.code === 'EACCES') {
|
||||
logger.error(`[Local Adapter] Permission denied trying to access file for download: ${filePath}`);
|
||||
throw new Error('Permission denied');
|
||||
} else {
|
||||
logger.error(`[Local Adapter] Error accessing file for download ${filePath}: ${err.message}`);
|
||||
throw err; // Re-throw other errors
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a file from the local storage.
|
||||
* @param {string} filename - The name of the file to delete.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function deleteFile(filename) {
|
||||
// IMPORTANT: Sanitize filename input
|
||||
const safeBaseName = path.basename(filename);
|
||||
if (safeBaseName !== filename || filename.includes('..')) {
|
||||
logger.error(`[Local Adapter] Invalid filename detected for delete: ${filename}`);
|
||||
throw new Error('Invalid filename');
|
||||
}
|
||||
|
||||
const filePath = path.resolve(config.uploadDir, safeBaseName);
|
||||
|
||||
try {
|
||||
await fs.unlink(filePath);
|
||||
logger.info(`[Local Adapter] Deleted file: ${filePath}`);
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOENT') {
|
||||
logger.warn(`[Local Adapter] Delete request for non-existent file: ${filePath}`);
|
||||
throw new Error('File not found'); // Specific error for 404
|
||||
} else {
|
||||
logger.error(`[Local Adapter] Failed to delete file ${filePath}: ${err.message}`);
|
||||
throw err; // Re-throw other errors
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleans up stale resources (incomplete uploads based on metadata).
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function cleanupStale() {
|
||||
logger.info('[Local Adapter] Running cleanup for stale metadata/partial uploads...');
|
||||
let cleanedCount = 0;
|
||||
let checkedCount = 0;
|
||||
|
||||
try {
|
||||
// Ensure metadata directory exists before trying to read it
|
||||
await ensureMetadataDirExists(); // Re-check just in case
|
||||
|
||||
const files = await fs.readdir(METADATA_DIR);
|
||||
const now = Date.now();
|
||||
|
||||
for (const file of files) {
|
||||
if (file.endsWith('.meta')) {
|
||||
checkedCount++;
|
||||
const uploadId = file.replace('.meta', '');
|
||||
const metaFilePath = path.join(METADATA_DIR, file);
|
||||
let metadata;
|
||||
|
||||
try {
|
||||
const data = await fs.readFile(metaFilePath, 'utf8');
|
||||
metadata = JSON.parse(data);
|
||||
|
||||
// Check inactivity
|
||||
if (now - (metadata.lastActivity || metadata.createdAt || 0) > UPLOAD_TIMEOUT) {
|
||||
logger.warn(`[Local Adapter] Found stale metadata: ${file}. Last activity: ${new Date(metadata.lastActivity || metadata.createdAt)}`);
|
||||
|
||||
// Attempt to delete partial file
|
||||
if (metadata.partialFilePath) {
|
||||
try {
|
||||
await fs.unlink(metadata.partialFilePath);
|
||||
logger.info(`[Local Adapter] Deleted stale partial file: ${metadata.partialFilePath}`);
|
||||
} catch (unlinkPartialErr) {
|
||||
if (unlinkPartialErr.code !== 'ENOENT') {
|
||||
logger.error(`[Local Adapter] Failed to delete stale partial ${metadata.partialFilePath}: ${unlinkPartialErr.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to delete metadata file
|
||||
await deleteUploadMetadata(uploadId); // Use helper
|
||||
cleanedCount++;
|
||||
|
||||
}
|
||||
} catch (readErr) {
|
||||
logger.error(`[Local Adapter] Error reading/parsing ${metaFilePath} during cleanup: ${readErr.message}. Skipping.`);
|
||||
// Optionally attempt to delete the corrupt meta file?
|
||||
await fs.unlink(metaFilePath).catch(()=>{ logger.warn(`[Local Adapter] Failed to delete potentially corrupt metadata file: ${metaFilePath}`) });
|
||||
}
|
||||
} else if (file.endsWith('.tmp')) {
|
||||
// Clean up potential leftover temp metadata files
|
||||
const tempMetaPath = path.join(METADATA_DIR, file);
|
||||
try {
|
||||
const stats = await fs.stat(tempMetaPath);
|
||||
// Use a shorter timeout for temp files? e.g., UPLOAD_TIMEOUT / 2
|
||||
if (now - stats.mtime.getTime() > UPLOAD_TIMEOUT) {
|
||||
logger.warn(`[Local Adapter] Deleting stale temporary metadata file: ${file}`);
|
||||
await fs.unlink(tempMetaPath);
|
||||
}
|
||||
} catch (statErr) {
|
||||
if (statErr.code !== 'ENOENT') {
|
||||
logger.error(`[Local Adapter] Error checking temp metadata file ${tempMetaPath}: ${statErr.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (checkedCount > 0 || cleanedCount > 0) {
|
||||
logger.info(`[Local Adapter] Metadata cleanup finished. Checked: ${checkedCount}, Cleaned stale: ${cleanedCount}.`);
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOENT' && err.path === METADATA_DIR) {
|
||||
// This case should be handled by ensureMetadataDirExists, but log just in case
|
||||
logger.warn('[Local Adapter] Metadata directory not found during cleanup scan.');
|
||||
} else {
|
||||
logger.error(`[Local Adapter] Error during metadata cleanup scan: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Note: Empty folder cleanup is handled by the main cleanup utility for now.
|
||||
// If needed, the logic from utils/cleanup.js -> cleanupEmptyFolders could be moved here.
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
initUpload,
|
||||
storeChunk,
|
||||
completeUpload,
|
||||
abortUpload,
|
||||
listFiles,
|
||||
getDownloadUrlOrStream,
|
||||
deleteFile,
|
||||
cleanupStale
|
||||
};
|
592
src/storage/s3Adapter.js
Normal file
592
src/storage/s3Adapter.js
Normal file
@@ -0,0 +1,592 @@
|
||||
/**
|
||||
* S3 Storage Adapter
|
||||
* Handles file operations for storing files on AWS S3 or S3-compatible services.
|
||||
* Implements the storage interface expected by the application routes.
|
||||
* Uses local files in '.metadata' directory to track multipart upload progress.
|
||||
*/
|
||||
|
||||
const {
|
||||
S3Client,
|
||||
CreateMultipartUploadCommand,
|
||||
UploadPartCommand,
|
||||
CompleteMultipartUploadCommand,
|
||||
AbortMultipartUploadCommand,
|
||||
ListObjectsV2Command,
|
||||
GetObjectCommand,
|
||||
DeleteObjectCommand,
|
||||
PutObjectCommand // For zero-byte files
|
||||
} = require('@aws-sdk/client-s3');
|
||||
const { getSignedUrl } = require("@aws-sdk/s3-request-presigner");
|
||||
const fs = require('fs').promises;
|
||||
const fsSync = require('fs'); // For synchronous checks
|
||||
const path = require('path');
|
||||
const crypto = require('crypto');
|
||||
const { config } = require('../config');
|
||||
const logger = require('../utils/logger');
|
||||
const {
|
||||
sanitizePathPreserveDirs,
|
||||
isValidBatchId,
|
||||
formatFileSize // Keep for potential future use or consistency
|
||||
} = require('../utils/fileUtils');
|
||||
const { sendNotification } = require('../services/notifications'); // Needed for completion
|
||||
|
||||
// --- Constants ---
|
||||
const METADATA_DIR = path.join(config.uploadDir, '.metadata'); // Use local dir for metadata state
|
||||
const UPLOAD_TIMEOUT = 30 * 60 * 1000; // 30 minutes timeout for stale *local* metadata cleanup
|
||||
|
||||
// --- S3 Client Initialization ---
|
||||
let s3Client;
|
||||
try {
|
||||
const s3ClientConfig = {
|
||||
region: config.s3Region,
|
||||
credentials: {
|
||||
accessKeyId: config.s3AccessKeyId,
|
||||
secretAccessKey: config.s3SecretAccessKey,
|
||||
},
|
||||
...(config.s3EndpointUrl && { endpoint: config.s3EndpointUrl }),
|
||||
...(config.s3ForcePathStyle && { forcePathStyle: true }),
|
||||
};
|
||||
|
||||
if (s3ClientConfig.endpoint) {
|
||||
logger.info(`[S3 Adapter] Configuring S3 client for endpoint: ${s3ClientConfig.endpoint}`);
|
||||
}
|
||||
if (s3ClientConfig.forcePathStyle) {
|
||||
logger.info(`[S3 Adapter] Configuring S3 client with forcePathStyle: true`);
|
||||
}
|
||||
|
||||
s3Client = new S3Client(s3ClientConfig);
|
||||
logger.success('[S3 Adapter] S3 Client configured successfully.');
|
||||
|
||||
} catch (error) {
|
||||
logger.error(`[S3 Adapter] Failed to configure S3 client: ${error.message}`);
|
||||
// This is critical, throw an error to prevent the adapter from being used incorrectly
|
||||
throw new Error('S3 Client configuration failed. Check S3 environment variables.');
|
||||
}
|
||||
|
||||
// --- Metadata Helper Functions (Adapted for S3, store state locally) ---
|
||||
|
||||
async function ensureMetadataDirExists() {
|
||||
// Reuse logic from local adapter - S3 adapter still needs local dir for state
|
||||
try {
|
||||
if (!fsSync.existsSync(METADATA_DIR)) {
|
||||
await fs.mkdir(METADATA_DIR, { recursive: true });
|
||||
logger.info(`[S3 Adapter] Created local metadata directory: ${METADATA_DIR}`);
|
||||
}
|
||||
await fs.access(METADATA_DIR, fsSync.constants.W_OK);
|
||||
} catch (err) {
|
||||
logger.error(`[S3 Adapter] Local metadata directory error (${METADATA_DIR}): ${err.message}`);
|
||||
throw new Error(`Failed to access or create local metadata directory for S3 adapter state: ${METADATA_DIR}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Read/Write/Delete functions are identical to localAdapter as they manage local state files
|
||||
async function readUploadMetadata(uploadId) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.warn(`[S3 Adapter] Attempted to read metadata with invalid uploadId: ${uploadId}`);
|
||||
return null;
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
try {
|
||||
const data = await fs.readFile(metaFilePath, 'utf8');
|
||||
// Ensure 'parts' is always an array on read
|
||||
const metadata = JSON.parse(data);
|
||||
metadata.parts = metadata.parts || [];
|
||||
return metadata;
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOENT') { return null; }
|
||||
logger.error(`[S3 Adapter] Error reading metadata for ${uploadId}: ${err.message}`);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function writeUploadMetadata(uploadId, metadata) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.error(`[S3 Adapter] Attempted to write metadata with invalid uploadId: ${uploadId}`);
|
||||
return;
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
metadata.lastActivity = Date.now();
|
||||
metadata.parts = metadata.parts || []; // Ensure parts array exists
|
||||
try {
|
||||
const tempMetaPath = `${metaFilePath}.${crypto.randomBytes(4).toString('hex')}.tmp`;
|
||||
await fs.writeFile(tempMetaPath, JSON.stringify(metadata, null, 2));
|
||||
await fs.rename(tempMetaPath, metaFilePath);
|
||||
} catch (err) {
|
||||
logger.error(`[S3 Adapter] Error writing metadata for ${uploadId}: ${err.message}`);
|
||||
try { await fs.unlink(tempMetaPath); } catch (unlinkErr) {/* ignore */}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteUploadMetadata(uploadId) {
|
||||
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
|
||||
logger.warn(`[S3 Adapter] Attempted to delete metadata with invalid uploadId: ${uploadId}`);
|
||||
return;
|
||||
}
|
||||
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
|
||||
try {
|
||||
await fs.unlink(metaFilePath);
|
||||
logger.debug(`[S3 Adapter] Deleted metadata file: ${uploadId}.meta`);
|
||||
} catch (err) {
|
||||
if (err.code !== 'ENOENT') {
|
||||
logger.error(`[S3 Adapter] Error deleting metadata file ${uploadId}.meta: ${err.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure metadata dir exists on initialization
|
||||
ensureMetadataDirExists().catch(err => {
|
||||
logger.error(`[S3 Adapter] Initialization failed: ${err.message}`);
|
||||
process.exit(1); // Exit if we can't manage metadata state
|
||||
});
|
||||
|
||||
|
||||
// --- Interface Implementation ---
|
||||
|
||||
/**
|
||||
* Initializes an S3 multipart upload session (or direct put for zero-byte).
|
||||
* @param {string} filename - Original filename/path from client.
|
||||
* @param {number} fileSize - Total size of the file.
|
||||
* @param {string} clientBatchId - Optional batch ID from client.
|
||||
* @returns {Promise<{uploadId: string}>} Object containing the application's upload ID.
|
||||
*/
|
||||
async function initUpload(filename, fileSize, clientBatchId) {
|
||||
await ensureMetadataDirExists(); // Re-check before operation
|
||||
|
||||
const size = Number(fileSize);
|
||||
const appUploadId = crypto.randomBytes(16).toString('hex'); // Our internal ID
|
||||
|
||||
// --- Path handling and Sanitization for S3 Key ---
|
||||
const sanitizedFilename = sanitizePathPreserveDirs(filename);
|
||||
// S3 keys should not start with /
|
||||
const s3Key = path.normalize(sanitizedFilename)
|
||||
.replace(/^(\.\.(\/|\\|$))+/, '')
|
||||
.replace(/\\/g, '/')
|
||||
.replace(/^\/+/, '');
|
||||
|
||||
logger.info(`[S3 Adapter] Init request for S3 Key: ${s3Key}`);
|
||||
|
||||
// --- Handle Zero-Byte Files ---
|
||||
if (size === 0) {
|
||||
try {
|
||||
const putCommand = new PutObjectCommand({
|
||||
Bucket: config.s3BucketName,
|
||||
Key: s3Key,
|
||||
Body: '', // Empty body
|
||||
ContentLength: 0
|
||||
});
|
||||
await s3Client.send(putCommand);
|
||||
logger.success(`[S3 Adapter] Completed zero-byte file upload directly: ${s3Key}`);
|
||||
// No metadata needed for zero-byte files as they are completed atomically
|
||||
sendNotification(filename, 0, config); // Send notification (use original filename)
|
||||
// Return an uploadId that won't conflict or be processable by chunk/complete
|
||||
return { uploadId: `zero-byte-${appUploadId}` }; // Or maybe return null/special status?
|
||||
// Returning a unique ID might be safer for client state.
|
||||
} catch (putErr) {
|
||||
logger.error(`[S3 Adapter] Failed to put zero-byte object ${s3Key}: ${putErr.message}`);
|
||||
throw putErr; // Let the route handler deal with it
|
||||
}
|
||||
}
|
||||
|
||||
// --- Initiate Multipart Upload for Non-Zero Files ---
|
||||
try {
|
||||
const createCommand = new CreateMultipartUploadCommand({
|
||||
Bucket: config.s3BucketName,
|
||||
Key: s3Key,
|
||||
// TODO: Consider adding ContentType if available/reliable: metadata.contentType
|
||||
// TODO: Consider adding Metadata: { 'original-filename': filename } ?
|
||||
});
|
||||
|
||||
const response = await s3Client.send(createCommand);
|
||||
const s3UploadId = response.UploadId;
|
||||
|
||||
if (!s3UploadId) {
|
||||
throw new Error('S3 did not return an UploadId');
|
||||
}
|
||||
|
||||
logger.info(`[S3 Adapter] Initiated multipart upload for ${s3Key} (S3 UploadId: ${s3UploadId})`);
|
||||
|
||||
// --- Create and Persist Local Metadata ---
|
||||
const batchId = clientBatchId || `${Date.now()}-${crypto.randomBytes(4).toString('hex').substring(0, 9)}`;
|
||||
const metadata = {
|
||||
appUploadId: appUploadId, // Store our ID
|
||||
s3UploadId: s3UploadId,
|
||||
s3Key: s3Key,
|
||||
originalFilename: filename, // Keep original for notifications etc.
|
||||
fileSize: size,
|
||||
bytesReceived: 0, // Track approximate bytes locally
|
||||
parts: [], // Array to store { PartNumber, ETag }
|
||||
batchId,
|
||||
createdAt: Date.now(),
|
||||
lastActivity: Date.now()
|
||||
};
|
||||
|
||||
await writeUploadMetadata(appUploadId, metadata); // Write metadata keyed by our appUploadId
|
||||
|
||||
return { uploadId: appUploadId }; // Return OUR internal upload ID to the client
|
||||
|
||||
} catch (err) {
|
||||
logger.error(`[S3 Adapter] Failed to initiate multipart upload for ${s3Key}: ${err.message}`);
|
||||
// TODO: Map specific S3 errors (e.g., NoSuchBucket, AccessDenied) to better client messages
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Uploads a chunk as a part to S3.
|
||||
* @param {string} appUploadId - The application's upload ID.
|
||||
* @param {Buffer} chunk - The data chunk to store.
|
||||
* @param {number} partNumber - The sequential number of this part (starting from 1).
|
||||
* @returns {Promise<{bytesReceived: number, progress: number, completed: boolean}>} Upload status.
|
||||
*/
|
||||
async function storeChunk(appUploadId, chunk, partNumber) {
|
||||
const chunkSize = chunk.length;
|
||||
if (!chunkSize) throw new Error('Empty chunk received');
|
||||
if (partNumber < 1) throw new Error('PartNumber must be 1 or greater');
|
||||
|
||||
const metadata = await readUploadMetadata(appUploadId);
|
||||
if (!metadata || !metadata.s3UploadId) { // Check for s3UploadId presence
|
||||
logger.warn(`[S3 Adapter] Metadata or S3 UploadId not found for chunk: ${appUploadId}. Upload might be complete, cancelled, or zero-byte.`);
|
||||
throw new Error('Upload session not found or already completed');
|
||||
}
|
||||
|
||||
// --- Sanity Check ---
|
||||
// S3 handles duplicate part uploads gracefully (last one wins), so less critical than local append.
|
||||
// We still track bytesReceived locally for progress approximation.
|
||||
if (metadata.bytesReceived >= metadata.fileSize && metadata.fileSize > 0) {
|
||||
logger.warn(`[S3 Adapter] Received chunk for already completed upload ${appUploadId}. Ignoring.`);
|
||||
// Can't really finalize again easily without full parts list. Indicate completion based on local state.
|
||||
const progress = metadata.fileSize > 0 ? 100 : 0;
|
||||
return { bytesReceived: metadata.bytesReceived, progress, completed: true };
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
const uploadPartCommand = new UploadPartCommand({
|
||||
Bucket: config.s3BucketName,
|
||||
Key: metadata.s3Key,
|
||||
UploadId: metadata.s3UploadId,
|
||||
Body: chunk,
|
||||
PartNumber: partNumber,
|
||||
ContentLength: chunkSize // Required for UploadPart
|
||||
});
|
||||
|
||||
const response = await s3Client.send(uploadPartCommand);
|
||||
const etag = response.ETag;
|
||||
|
||||
if (!etag) {
|
||||
throw new Error(`S3 did not return an ETag for PartNumber ${partNumber}`);
|
||||
}
|
||||
|
||||
// --- Update Local Metadata ---
|
||||
// Ensure parts are stored correctly
|
||||
metadata.parts = metadata.parts || [];
|
||||
metadata.parts.push({ PartNumber: partNumber, ETag: etag });
|
||||
// Sort parts just in case uploads happen out of order client-side (though unlikely with current client)
|
||||
metadata.parts.sort((a, b) => a.PartNumber - b.PartNumber);
|
||||
|
||||
// Update approximate bytes received
|
||||
metadata.bytesReceived = (metadata.bytesReceived || 0) + chunkSize;
|
||||
// Cap bytesReceived at fileSize for progress calculation
|
||||
metadata.bytesReceived = Math.min(metadata.bytesReceived, metadata.fileSize);
|
||||
|
||||
await writeUploadMetadata(appUploadId, metadata);
|
||||
|
||||
// --- Calculate Progress ---
|
||||
const progress = metadata.fileSize === 0 ? 100 :
|
||||
Math.min(Math.round((metadata.bytesReceived / metadata.fileSize) * 100), 100);
|
||||
|
||||
logger.debug(`[S3 Adapter] Part ${partNumber} uploaded for ${appUploadId} (ETag: ${etag}). Progress: ~${progress}%`);
|
||||
|
||||
// Check for completion potential based on local byte tracking
|
||||
const completed = metadata.bytesReceived >= metadata.fileSize;
|
||||
if (completed) {
|
||||
logger.info(`[S3 Adapter] Upload ${appUploadId} potentially complete based on bytes received.`);
|
||||
}
|
||||
|
||||
return { bytesReceived: metadata.bytesReceived, progress, completed };
|
||||
|
||||
} catch (err) {
|
||||
logger.error(`[S3 Adapter] Failed to upload part ${partNumber} for ${appUploadId} (Key: ${metadata.s3Key}): ${err.message}`);
|
||||
// TODO: Map specific S3 errors (InvalidPart, SlowDown, etc.)
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Finalizes a completed S3 multipart upload.
|
||||
* @param {string} appUploadId - The application's upload ID.
|
||||
* @returns {Promise<{filename: string, size: number, finalPath: string}>} Details of the completed file (finalPath is S3 Key).
|
||||
*/
|
||||
async function completeUpload(appUploadId) {
|
||||
const metadata = await readUploadMetadata(appUploadId);
|
||||
if (!metadata || !metadata.s3UploadId || !metadata.parts || metadata.parts.length === 0) {
|
||||
logger.warn(`[S3 Adapter] completeUpload called for ${appUploadId}, but metadata, S3 UploadId, or parts list is missing/empty. Assuming already completed or invalid state.`);
|
||||
// Check if object exists as a fallback? Risky.
|
||||
throw new Error('Upload completion failed: Required metadata or parts list not found');
|
||||
}
|
||||
|
||||
// Basic check if enough bytes were tracked locally (approximate check)
|
||||
if (metadata.bytesReceived < metadata.fileSize) {
|
||||
logger.warn(`[S3 Adapter] Attempting to complete upload ${appUploadId} but locally tracked bytes (${metadata.bytesReceived}) are less than expected size (${metadata.fileSize}). Proceeding anyway.`);
|
||||
}
|
||||
|
||||
try {
|
||||
const completeCommand = new CompleteMultipartUploadCommand({
|
||||
Bucket: config.s3BucketName,
|
||||
Key: metadata.s3Key,
|
||||
UploadId: metadata.s3UploadId,
|
||||
MultipartUpload: {
|
||||
Parts: metadata.parts // Use the collected parts { PartNumber, ETag }
|
||||
},
|
||||
});
|
||||
|
||||
const response = await s3Client.send(completeCommand);
|
||||
// Example response: { ETag: '"..."', Location: '...', Key: '...', Bucket: '...' }
|
||||
|
||||
logger.success(`[S3 Adapter] Finalized multipart upload: ${metadata.s3Key} (ETag: ${response.ETag})`);
|
||||
|
||||
// Clean up local metadata AFTER successful S3 completion
|
||||
await deleteUploadMetadata(appUploadId);
|
||||
|
||||
// Send notification
|
||||
sendNotification(metadata.originalFilename, metadata.fileSize, config);
|
||||
|
||||
// Return info consistent with local adapter where possible
|
||||
return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.s3Key };
|
||||
|
||||
} catch (err) {
|
||||
logger.error(`[S3 Adapter] Failed to complete multipart upload for ${appUploadId} (Key: ${metadata.s3Key}): ${err.message}`);
|
||||
// Specific S3 errors like InvalidPartOrder, EntityTooSmall might occur here.
|
||||
// If Complete fails, S3 *might* have already assembled it (rare).
|
||||
// Check if the object now exists? If so, maybe delete metadata? Complex recovery.
|
||||
// For now, just log the error and throw. The local metadata will persist.
|
||||
if (err.Code === 'NoSuchUpload') {
|
||||
logger.warn(`[S3 Adapter] CompleteMultipartUpload failed with NoSuchUpload for ${appUploadId}. Assuming already completed or aborted.`);
|
||||
await deleteUploadMetadata(appUploadId).catch(()=>{}); // Attempt metadata cleanup
|
||||
// Check if final object exists?
|
||||
try {
|
||||
// Use GetObject or HeadObject to check
|
||||
await s3Client.send(new GetObjectCommand({ Bucket: config.s3BucketName, Key: metadata.s3Key }));
|
||||
logger.info(`[S3 Adapter] Final object ${metadata.s3Key} exists after NoSuchUpload error. Treating as completed.`);
|
||||
return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.s3Key };
|
||||
} catch (headErr) {
|
||||
// Final object doesn't exist either.
|
||||
throw new Error('Completion failed: Upload session not found and final object does not exist.');
|
||||
}
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Aborts an ongoing S3 multipart upload.
|
||||
* @param {string} appUploadId - The application's upload ID.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function abortUpload(appUploadId) {
|
||||
const metadata = await readUploadMetadata(appUploadId);
|
||||
if (!metadata || !metadata.s3UploadId) {
|
||||
logger.warn(`[S3 Adapter] Abort request for non-existent or completed upload: ${appUploadId}`);
|
||||
await deleteUploadMetadata(appUploadId); // Clean up local metadata if it exists anyway
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const abortCommand = new AbortMultipartUploadCommand({
|
||||
Bucket: config.s3BucketName,
|
||||
Key: metadata.s3Key,
|
||||
UploadId: metadata.s3UploadId,
|
||||
});
|
||||
await s3Client.send(abortCommand);
|
||||
logger.info(`[S3 Adapter] Aborted multipart upload: ${appUploadId} (Key: ${metadata.s3Key})`);
|
||||
} catch (err) {
|
||||
if (err.name === 'NoSuchUpload') {
|
||||
logger.warn(`[S3 Adapter] Multipart upload ${appUploadId} (Key: ${metadata.s3Key}) not found during abort. Already aborted or completed.`);
|
||||
} else {
|
||||
logger.error(`[S3 Adapter] Failed to abort multipart upload for ${appUploadId} (Key: ${metadata.s3Key}): ${err.message}`);
|
||||
// Don't delete local metadata if abort failed, might be retryable or need manual cleanup
|
||||
throw err; // Rethrow S3 error
|
||||
}
|
||||
}
|
||||
|
||||
// Delete local metadata AFTER successful abort or if NoSuchUpload
|
||||
await deleteUploadMetadata(appUploadId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Lists files in the S3 bucket.
|
||||
* @returns {Promise<Array<{filename: string, size: number, formattedSize: string, uploadDate: Date}>>} List of files.
|
||||
*/
|
||||
async function listFiles() {
|
||||
try {
|
||||
const command = new ListObjectsV2Command({
|
||||
Bucket: config.s3BucketName,
|
||||
// Optional: Add Prefix if you want to list within a specific 'folder'
|
||||
// Prefix: 'uploads/'
|
||||
});
|
||||
// TODO: Add pagination handling if expecting >1000 objects
|
||||
const response = await s3Client.send(command);
|
||||
|
||||
const files = (response.Contents || [])
|
||||
// Optional: Filter out objects that might represent folders if necessary
|
||||
// .filter(item => !(item.Key.endsWith('/') && item.Size === 0))
|
||||
.map(item => ({
|
||||
filename: item.Key, // S3 Key is the filename/path
|
||||
size: item.Size,
|
||||
formattedSize: formatFileSize(item.Size), // Use utility
|
||||
uploadDate: item.LastModified
|
||||
}));
|
||||
|
||||
// Sort by date, newest first
|
||||
files.sort((a, b) => b.uploadDate.getTime() - a.uploadDate.getTime());
|
||||
|
||||
return files;
|
||||
|
||||
} catch (err) {
|
||||
logger.error(`[S3 Adapter] Failed to list objects in bucket ${config.s3BucketName}: ${err.message}`);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a presigned URL for downloading an S3 object.
|
||||
* @param {string} s3Key - The S3 Key (filename/path) of the object.
|
||||
* @returns {Promise<{type: string, value: string}>} Object indicating type ('url') and value (the presigned URL).
|
||||
*/
|
||||
async function getDownloadUrlOrStream(s3Key) {
|
||||
// Input `s3Key` is assumed to be sanitized by the calling route/logic
|
||||
if (!s3Key || s3Key.includes('..') || s3Key.startsWith('/')) {
|
||||
logger.error(`[S3 Adapter] Invalid S3 key detected for download: ${s3Key}`);
|
||||
throw new Error('Invalid filename');
|
||||
}
|
||||
|
||||
try {
|
||||
const command = new GetObjectCommand({
|
||||
Bucket: config.s3BucketName,
|
||||
Key: s3Key,
|
||||
// Optional: Override response headers like filename
|
||||
// ResponseContentDisposition: `attachment; filename="${path.basename(s3Key)}"`
|
||||
});
|
||||
|
||||
// Generate presigned URL (expires in 1 hour by default, adjustable)
|
||||
const url = await getSignedUrl(s3Client, command, { expiresIn: 3600 });
|
||||
logger.info(`[S3 Adapter] Generated presigned URL for ${s3Key}`);
|
||||
return { type: 'url', value: url };
|
||||
|
||||
} catch (err) {
|
||||
logger.error(`[S3 Adapter] Failed to generate presigned URL for ${s3Key}: ${err.message}`);
|
||||
if (err.name === 'NoSuchKey') {
|
||||
throw new Error('File not found in S3');
|
||||
}
|
||||
throw err; // Re-throw other S3 errors
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes an object from the S3 bucket.
|
||||
* @param {string} s3Key - The S3 Key (filename/path) of the object to delete.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function deleteFile(s3Key) {
|
||||
// Input `s3Key` is assumed to be sanitized
|
||||
if (!s3Key || s3Key.includes('..') || s3Key.startsWith('/')) {
|
||||
logger.error(`[S3 Adapter] Invalid S3 key detected for delete: ${s3Key}`);
|
||||
throw new Error('Invalid filename');
|
||||
}
|
||||
|
||||
try {
|
||||
const command = new DeleteObjectCommand({
|
||||
Bucket: config.s3BucketName,
|
||||
Key: s3Key,
|
||||
});
|
||||
await s3Client.send(command);
|
||||
logger.info(`[S3 Adapter] Deleted object: ${s3Key}`);
|
||||
} catch (err) {
|
||||
// DeleteObject is idempotent, so NoSuchKey isn't typically an error unless you need to know.
|
||||
logger.error(`[S3 Adapter] Failed to delete object ${s3Key}: ${err.message}`);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleans up stale *local* metadata files for S3 uploads.
|
||||
* Relies on S3 Lifecycle Policies for actual S3 cleanup.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function cleanupStale() {
|
||||
logger.info('[S3 Adapter] Running cleanup for stale local metadata files...');
|
||||
let cleanedCount = 0;
|
||||
let checkedCount = 0;
|
||||
|
||||
try {
|
||||
await ensureMetadataDirExists(); // Re-check
|
||||
|
||||
const files = await fs.readdir(METADATA_DIR);
|
||||
const now = Date.now();
|
||||
|
||||
for (const file of files) {
|
||||
if (file.endsWith('.meta')) {
|
||||
checkedCount++;
|
||||
const appUploadId = file.replace('.meta', '');
|
||||
const metaFilePath = path.join(METADATA_DIR, file);
|
||||
|
||||
try {
|
||||
const data = await fs.readFile(metaFilePath, 'utf8');
|
||||
const metadata = JSON.parse(data);
|
||||
|
||||
// Check inactivity based on local metadata timestamp
|
||||
if (now - (metadata.lastActivity || metadata.createdAt || 0) > UPLOAD_TIMEOUT) {
|
||||
logger.warn(`[S3 Adapter] Found stale local metadata: ${file}. Last activity: ${new Date(metadata.lastActivity || metadata.createdAt)}. S3 UploadId: ${metadata.s3UploadId || 'N/A'}`);
|
||||
|
||||
// Only delete the LOCAL metadata file. DO NOT ABORT S3 UPLOAD HERE.
|
||||
await deleteUploadMetadata(appUploadId); // Use helper
|
||||
cleanedCount++;
|
||||
}
|
||||
} catch (readErr) {
|
||||
logger.error(`[S3 Adapter] Error reading/parsing local metadata ${metaFilePath} during cleanup: ${readErr.message}. Skipping.`);
|
||||
await fs.unlink(metaFilePath).catch(()=>{ logger.warn(`[S3 Adapter] Failed to delete potentially corrupt local metadata file: ${metaFilePath}`) });
|
||||
}
|
||||
} else if (file.endsWith('.tmp')) {
|
||||
// Clean up potential leftover temp metadata files (same as local adapter)
|
||||
const tempMetaPath = path.join(METADATA_DIR, file);
|
||||
try {
|
||||
const stats = await fs.stat(tempMetaPath);
|
||||
if (now - stats.mtime.getTime() > UPLOAD_TIMEOUT) {
|
||||
logger.warn(`[S3 Adapter] Deleting stale temporary local metadata file: ${file}`);
|
||||
await fs.unlink(tempMetaPath);
|
||||
}
|
||||
} catch (statErr) {
|
||||
if (statErr.code !== 'ENOENT') {
|
||||
logger.error(`[S3 Adapter] Error checking temp local metadata file ${tempMetaPath}: ${statErr.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (checkedCount > 0 || cleanedCount > 0) {
|
||||
logger.info(`[S3 Adapter] Local metadata cleanup finished. Checked: ${checkedCount}, Cleaned stale local files: ${cleanedCount}.`);
|
||||
}
|
||||
|
||||
// Log the crucial recommendation
|
||||
logger.warn(`[S3 Adapter] IMPORTANT: For S3 storage, configure Lifecycle Rules on your bucket (${config.s3BucketName}) or use provider-specific tools to automatically clean up incomplete multipart uploads after a few days. This adapter only cleans up local tracking files.`);
|
||||
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOENT' && err.path === METADATA_DIR) {
|
||||
logger.warn('[S3 Adapter] Local metadata directory not found during cleanup scan.');
|
||||
} else {
|
||||
logger.error(`[S3 Adapter] Error during local metadata cleanup scan: ${err.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
initUpload,
|
||||
storeChunk,
|
||||
completeUpload,
|
||||
abortUpload,
|
||||
listFiles,
|
||||
getDownloadUrlOrStream,
|
||||
deleteFile,
|
||||
cleanupStale
|
||||
};
|
@@ -1,317 +1,234 @@
|
||||
/**
|
||||
* Cleanup utilities for managing application resources.
|
||||
* Handles incomplete uploads, empty folders, and shutdown tasks.
|
||||
* Provides cleanup task registration and execution system.
|
||||
* Handles registration and execution of cleanup tasks, including delegation
|
||||
* of storage-specific cleanup (like stale uploads) to the storage adapter.
|
||||
* Also includes generic cleanup like removing empty folders (for local storage).
|
||||
*/
|
||||
|
||||
const fs = require('fs').promises;
|
||||
const path = require('path');
|
||||
const logger = require('./logger');
|
||||
const { config } = require('../config');
|
||||
const { storageAdapter } = require('../storage'); // Import the selected adapter
|
||||
|
||||
const METADATA_DIR = path.join(config.uploadDir, '.metadata');
|
||||
const UPLOAD_TIMEOUT = config.uploadTimeout || 30 * 60 * 1000; // Use a config or default (e.g., 30 mins)
|
||||
|
||||
// --- Generic Cleanup Task Management ---
|
||||
let cleanupTasks = [];
|
||||
|
||||
/**
|
||||
* Register a cleanup task to be executed during shutdown
|
||||
* @param {Function} task - Async function to be executed during cleanup
|
||||
* Register a generic cleanup task to be executed during shutdown.
|
||||
* @param {Function} task - Async function to be executed during cleanup.
|
||||
*/
|
||||
function registerCleanupTask(task) {
|
||||
cleanupTasks.push(task);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a cleanup task
|
||||
* @param {Function} task - Task to remove
|
||||
* Remove a generic cleanup task.
|
||||
* @param {Function} task - Task to remove.
|
||||
*/
|
||||
function removeCleanupTask(task) {
|
||||
cleanupTasks = cleanupTasks.filter((t) => t !== task);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute all registered cleanup tasks
|
||||
* @param {number} [timeout=1000] - Maximum time in ms to wait for cleanup
|
||||
* Execute all registered generic cleanup tasks.
|
||||
* @param {number} [timeout=1000] - Maximum time in ms to wait for cleanup.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function executeCleanup(timeout = 1000) {
|
||||
const taskCount = cleanupTasks.length;
|
||||
if (taskCount === 0) {
|
||||
logger.info('No cleanup tasks to execute');
|
||||
logger.info('[Cleanup] No generic cleanup tasks to execute');
|
||||
return;
|
||||
}
|
||||
|
||||
logger.info(`Executing ${taskCount} cleanup tasks...`);
|
||||
|
||||
|
||||
logger.info(`[Cleanup] Executing ${taskCount} generic cleanup tasks...`);
|
||||
|
||||
try {
|
||||
// Run all cleanup tasks in parallel with timeout
|
||||
// Run all tasks concurrently with individual and global timeouts
|
||||
await Promise.race([
|
||||
Promise.all(
|
||||
cleanupTasks.map(async (task) => {
|
||||
cleanupTasks.map(async (task, index) => {
|
||||
try {
|
||||
await Promise.race([
|
||||
task(),
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error('Task timeout')), timeout / 2)
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error(`Task ${index + 1} timeout`)), timeout / 2) // Individual timeout
|
||||
)
|
||||
]);
|
||||
logger.debug(`[Cleanup] Task ${index + 1} completed.`);
|
||||
} catch (error) {
|
||||
if (error.message === 'Task timeout') {
|
||||
logger.warn('Cleanup task timed out');
|
||||
} else {
|
||||
logger.error(`Cleanup task failed: ${error.message}`);
|
||||
}
|
||||
logger.warn(`[Cleanup] Task ${index + 1} failed or timed out: ${error.message}`);
|
||||
}
|
||||
})
|
||||
),
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error('Global timeout')), timeout)
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error('Global cleanup timeout')), timeout) // Global timeout
|
||||
)
|
||||
]);
|
||||
|
||||
logger.info('Cleanup completed successfully');
|
||||
|
||||
logger.info('[Cleanup] Generic cleanup tasks completed successfully');
|
||||
} catch (error) {
|
||||
if (error.message === 'Global timeout') {
|
||||
logger.warn(`Cleanup timed out after ${timeout}ms`);
|
||||
} else {
|
||||
logger.error(`Cleanup failed: ${error.message}`);
|
||||
}
|
||||
logger.warn(`[Cleanup] Generic cleanup process ended with error or timeout: ${error.message}`);
|
||||
} finally {
|
||||
// Clear all tasks regardless of success/failure
|
||||
cleanupTasks = [];
|
||||
cleanupTasks = []; // Clear tasks regardless of outcome
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up incomplete uploads and temporary files
|
||||
* @param {Map} uploads - Map of active uploads
|
||||
* @param {Map} uploadToBatch - Map of upload IDs to batch IDs
|
||||
* @param {Map} batchActivity - Map of batch IDs to last activity timestamp
|
||||
*/
|
||||
async function cleanupIncompleteUploads(uploads, uploadToBatch, batchActivity) {
|
||||
try {
|
||||
// Get current time
|
||||
const now = Date.now();
|
||||
const inactivityThreshold = config.uploadTimeout || 30 * 60 * 1000; // 30 minutes default
|
||||
|
||||
// Check each upload
|
||||
for (const [uploadId, upload] of uploads.entries()) {
|
||||
try {
|
||||
const batchId = uploadToBatch.get(uploadId);
|
||||
const lastActivity = batchActivity.get(batchId);
|
||||
// --- Storage-Specific Cleanup ---
|
||||
|
||||
// If upload is inactive for too long
|
||||
if (now - lastActivity > inactivityThreshold) {
|
||||
// Close write stream
|
||||
if (upload.writeStream) {
|
||||
await new Promise((resolve) => {
|
||||
upload.writeStream.end(() => resolve());
|
||||
});
|
||||
}
|
||||
|
||||
// Delete incomplete file
|
||||
try {
|
||||
await fs.unlink(upload.filePath);
|
||||
logger.info(`Cleaned up incomplete upload: ${upload.safeFilename}`);
|
||||
} catch (err) {
|
||||
if (err.code !== 'ENOENT') {
|
||||
logger.error(`Failed to delete incomplete upload ${upload.safeFilename}: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove from maps
|
||||
uploads.delete(uploadId);
|
||||
uploadToBatch.delete(uploadId);
|
||||
}
|
||||
} catch (err) {
|
||||
logger.error(`Error cleaning up upload ${uploadId}: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up empty folders
|
||||
await cleanupEmptyFolders(config.uploadDir);
|
||||
|
||||
} catch (err) {
|
||||
logger.error(`Cleanup error: ${err.message}`);
|
||||
}
|
||||
}
|
||||
// How often to run the storage cleanup check (e.g., every 15 minutes)
|
||||
const STORAGE_CLEANUP_INTERVAL = 15 * 60 * 1000;
|
||||
let storageCleanupTimer = null;
|
||||
|
||||
/**
|
||||
* Clean up stale/incomplete uploads based on metadata files.
|
||||
* Performs cleanup of stale storage resources by calling the adapter's method.
|
||||
* This is typically run periodically.
|
||||
*/
|
||||
async function cleanupIncompleteMetadataUploads() {
|
||||
logger.info('Running cleanup for stale metadata/partial uploads...');
|
||||
let cleanedCount = 0;
|
||||
let checkedCount = 0;
|
||||
|
||||
try {
|
||||
// Ensure metadata directory exists before trying to read it
|
||||
async function runStorageCleanup() {
|
||||
logger.info('[Cleanup] Running periodic storage cleanup...');
|
||||
try {
|
||||
await fs.access(METADATA_DIR);
|
||||
} catch (accessErr) {
|
||||
if (accessErr.code === 'ENOENT') {
|
||||
logger.info('Metadata directory does not exist, skipping metadata cleanup.');
|
||||
return;
|
||||
}
|
||||
throw accessErr; // Rethrow other access errors
|
||||
}
|
||||
|
||||
const files = await fs.readdir(METADATA_DIR);
|
||||
const now = Date.now();
|
||||
|
||||
for (const file of files) {
|
||||
if (file.endsWith('.meta')) {
|
||||
checkedCount++;
|
||||
const uploadId = file.replace('.meta', '');
|
||||
const metaFilePath = path.join(METADATA_DIR, file);
|
||||
let metadata;
|
||||
|
||||
try {
|
||||
const data = await fs.readFile(metaFilePath, 'utf8');
|
||||
metadata = JSON.parse(data);
|
||||
|
||||
// Check inactivity based on lastActivity timestamp in metadata
|
||||
if (now - (metadata.lastActivity || metadata.createdAt || 0) > UPLOAD_TIMEOUT) {
|
||||
logger.warn(`Found stale upload metadata: ${file}. Last activity: ${new Date(metadata.lastActivity || metadata.createdAt)}`);
|
||||
|
||||
// Attempt to delete partial file
|
||||
if (metadata.partialFilePath) {
|
||||
try {
|
||||
await fs.unlink(metadata.partialFilePath);
|
||||
logger.info(`Deleted stale partial file: ${metadata.partialFilePath}`);
|
||||
} catch (unlinkPartialErr) {
|
||||
if (unlinkPartialErr.code !== 'ENOENT') { // Ignore if already gone
|
||||
logger.error(`Failed to delete stale partial file ${metadata.partialFilePath}: ${unlinkPartialErr.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to delete metadata file
|
||||
try {
|
||||
await fs.unlink(metaFilePath);
|
||||
logger.info(`Deleted stale metadata file: ${file}`);
|
||||
cleanedCount++;
|
||||
} catch (unlinkMetaErr) {
|
||||
logger.error(`Failed to delete stale metadata file ${metaFilePath}: ${unlinkMetaErr.message}`);
|
||||
}
|
||||
|
||||
}
|
||||
} catch (readErr) {
|
||||
logger.error(`Error reading or parsing metadata file ${metaFilePath} during cleanup: ${readErr.message}. Skipping.`);
|
||||
// Optionally attempt to delete the corrupt meta file?
|
||||
// await fs.unlink(metaFilePath).catch(()=>{});
|
||||
if (storageAdapter && typeof storageAdapter.cleanupStale === 'function') {
|
||||
await storageAdapter.cleanupStale();
|
||||
logger.info('[Cleanup] Storage adapter cleanup task finished.');
|
||||
// Additionally, run empty folder cleanup if using local storage
|
||||
if (config.storageType === 'local') {
|
||||
await cleanupEmptyFolders(config.uploadDir);
|
||||
}
|
||||
} else {
|
||||
logger.warn('[Cleanup] Storage adapter or cleanupStale method not available.');
|
||||
}
|
||||
} else if (file.endsWith('.tmp')) {
|
||||
// Clean up potential leftover temp metadata files
|
||||
const tempMetaPath = path.join(METADATA_DIR, file);
|
||||
try {
|
||||
const stats = await fs.stat(tempMetaPath);
|
||||
if (now - stats.mtime.getTime() > UPLOAD_TIMEOUT) { // If temp file is also old
|
||||
logger.warn(`Deleting stale temporary metadata file: ${file}`);
|
||||
await fs.unlink(tempMetaPath);
|
||||
}
|
||||
} catch (statErr) {
|
||||
if (statErr.code !== 'ENOENT') { // Ignore if already gone
|
||||
logger.error(`Error checking temporary metadata file ${tempMetaPath}: ${statErr.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`[Cleanup] Error during periodic storage cleanup: ${error.message}`, error.stack);
|
||||
}
|
||||
|
||||
if (checkedCount > 0 || cleanedCount > 0) {
|
||||
logger.info(`Metadata cleanup finished. Checked: ${checkedCount}, Cleaned stale: ${cleanedCount}.`);
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
// Handle errors reading the METADATA_DIR itself
|
||||
if (err.code === 'ENOENT') {
|
||||
logger.info('Metadata directory not found during cleanup scan.'); // Should have been created on init
|
||||
} else {
|
||||
logger.error(`Error during metadata cleanup scan: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Also run empty folder cleanup
|
||||
await cleanupEmptyFolders(config.uploadDir);
|
||||
}
|
||||
|
||||
// Schedule the new cleanup function
|
||||
const METADATA_CLEANUP_INTERVAL = 15 * 60 * 1000; // e.g., every 15 minutes
|
||||
let metadataCleanupTimer = setInterval(cleanupIncompleteMetadataUploads, METADATA_CLEANUP_INTERVAL);
|
||||
metadataCleanupTimer.unref(); // Allow process to exit if this is the only timer
|
||||
|
||||
process.on('SIGTERM', () => clearInterval(metadataCleanupTimer));
|
||||
process.on('SIGINT', () => clearInterval(metadataCleanupTimer));
|
||||
/**
|
||||
* Starts the periodic storage cleanup task.
|
||||
*/
|
||||
function startStorageCleanupInterval() {
|
||||
if (storageCleanupTimer) {
|
||||
clearInterval(storageCleanupTimer);
|
||||
}
|
||||
logger.info(`[Cleanup] Starting periodic storage cleanup interval (${STORAGE_CLEANUP_INTERVAL / 60000} minutes).`);
|
||||
// Run once immediately on start? Optional.
|
||||
// runStorageCleanup();
|
||||
storageCleanupTimer = setInterval(runStorageCleanup, STORAGE_CLEANUP_INTERVAL);
|
||||
storageCleanupTimer.unref(); // Allow process to exit if this is the only timer
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively remove empty folders
|
||||
* @param {string} dir - Directory to clean
|
||||
* Stops the periodic storage cleanup task.
|
||||
*/
|
||||
function stopStorageCleanupInterval() {
|
||||
if (storageCleanupTimer) {
|
||||
clearInterval(storageCleanupTimer);
|
||||
storageCleanupTimer = null;
|
||||
logger.info('[Cleanup] Stopped periodic storage cleanup interval.');
|
||||
}
|
||||
}
|
||||
|
||||
// Start interval automatically
|
||||
// Note: Ensure storageAdapter is initialized before this might run effectively.
|
||||
// Consider starting this interval after server initialization in server.js if needed.
|
||||
if (!config.isDemoMode) { // Don't run cleanup in demo mode
|
||||
startStorageCleanupInterval();
|
||||
} else {
|
||||
logger.info('[Cleanup] Periodic storage cleanup disabled in Demo Mode.');
|
||||
}
|
||||
|
||||
// Stop interval on shutdown
|
||||
process.on('SIGTERM', stopStorageCleanupInterval);
|
||||
process.on('SIGINT', stopStorageCleanupInterval);
|
||||
|
||||
|
||||
// --- Empty Folder Cleanup (Primarily for Local Storage) ---
|
||||
|
||||
/**
|
||||
* Recursively remove empty folders within a given directory.
|
||||
* Skips the special '.metadata' directory.
|
||||
* @param {string} dir - Directory path to clean.
|
||||
*/
|
||||
async function cleanupEmptyFolders(dir) {
|
||||
// Check if the path exists and is a directory first
|
||||
try {
|
||||
// Avoid trying to clean the special .metadata directory itself
|
||||
if (path.basename(dir) === '.metadata') {
|
||||
logger.debug(`Skipping cleanup of metadata directory: ${dir}`);
|
||||
return;
|
||||
}
|
||||
|
||||
const files = await fs.readdir(dir);
|
||||
for (const file of files) {
|
||||
const fullPath = path.join(dir, file);
|
||||
|
||||
// Skip the metadata directory during traversal
|
||||
if (path.basename(fullPath) === '.metadata') {
|
||||
logger.debug(`Skipping traversal into metadata directory: ${fullPath}`);
|
||||
continue;
|
||||
const stats = await fs.stat(dir);
|
||||
if (!stats.isDirectory()) {
|
||||
logger.debug(`[Cleanup] Skipping non-directory path for empty folder cleanup: ${dir}`);
|
||||
return;
|
||||
}
|
||||
|
||||
let stats;
|
||||
try {
|
||||
stats = await fs.stat(fullPath);
|
||||
} catch (statErr) {
|
||||
if (statErr.code === 'ENOENT') continue; // File might have been deleted concurrently
|
||||
throw statErr;
|
||||
}
|
||||
|
||||
if (stats.isDirectory()) {
|
||||
await cleanupEmptyFolders(fullPath);
|
||||
// Check if directory is empty after cleaning subdirectories
|
||||
let remaining = [];
|
||||
try {
|
||||
remaining = await fs.readdir(fullPath);
|
||||
} catch (readErr) {
|
||||
if (readErr.code === 'ENOENT') continue; // Directory was deleted
|
||||
throw readErr;
|
||||
}
|
||||
|
||||
if (remaining.length === 0) {
|
||||
// Make sure we don't delete the main upload dir
|
||||
if (fullPath !== path.resolve(config.uploadDir)) {
|
||||
try {
|
||||
await fs.rmdir(fullPath);
|
||||
logger.info(`Removed empty directory: ${fullPath}`);
|
||||
} catch (rmErr) {
|
||||
if (rmErr.code !== 'ENOENT') { // Ignore if already deleted
|
||||
logger.error(`Failed to remove supposedly empty directory ${fullPath}: ${rmErr.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
if (err.code !== 'ENOENT') { // Ignore if dir was already deleted
|
||||
logger.error(`Failed to clean empty folders in ${dir}: ${err.message}`);
|
||||
if (err.code === 'ENOENT') {
|
||||
logger.debug(`[Cleanup] Directory not found for empty folder cleanup: ${dir}`);
|
||||
return; // Directory doesn't exist, nothing to clean
|
||||
}
|
||||
logger.error(`[Cleanup] Error stating directory ${dir} for cleanup: ${err.message}`);
|
||||
return; // Don't proceed if we can't stat
|
||||
}
|
||||
|
||||
|
||||
logger.debug(`[Cleanup] Checking for empty folders within: ${dir}`);
|
||||
const isMetadataDir = path.basename(dir) === '.metadata';
|
||||
if (isMetadataDir) {
|
||||
logger.debug(`[Cleanup] Skipping cleanup of metadata directory itself: ${dir}`);
|
||||
return;
|
||||
}
|
||||
|
||||
let entries;
|
||||
try {
|
||||
entries = await fs.readdir(dir, { withFileTypes: true });
|
||||
} catch (err) {
|
||||
logger.error(`[Cleanup] Failed to read directory ${dir} for empty folder cleanup: ${err.message}`);
|
||||
return; // Cannot proceed
|
||||
}
|
||||
|
||||
// Recursively clean subdirectories first
|
||||
const subDirPromises = entries
|
||||
.filter(entry => entry.isDirectory() && entry.name !== '.metadata')
|
||||
.map(entry => cleanupEmptyFolders(path.join(dir, entry.name)));
|
||||
|
||||
await Promise.all(subDirPromises);
|
||||
|
||||
// Re-read directory contents after cleaning subdirectories
|
||||
try {
|
||||
entries = await fs.readdir(dir); // Just need names now
|
||||
} catch (err) {
|
||||
logger.error(`[Cleanup] Failed to re-read directory ${dir} after sub-cleanup: ${err.message}`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if directory is now empty (or only contains .metadata)
|
||||
const isEmpty = entries.length === 0 || (entries.length === 1 && entries[0] === '.metadata');
|
||||
|
||||
if (isEmpty) {
|
||||
// Make sure we don't delete the main configured upload dir or the metadata dir
|
||||
const resolvedUploadDir = path.resolve(config.uploadDir);
|
||||
const resolvedCurrentDir = path.resolve(dir);
|
||||
|
||||
if (resolvedCurrentDir !== resolvedUploadDir && path.basename(resolvedCurrentDir) !== '.metadata') {
|
||||
try {
|
||||
await fs.rmdir(resolvedCurrentDir);
|
||||
logger.info(`[Cleanup] Removed empty directory: ${resolvedCurrentDir}`);
|
||||
} catch (rmErr) {
|
||||
if (rmErr.code !== 'ENOENT') { // Ignore if already deleted
|
||||
logger.error(`[Cleanup] Failed to remove supposedly empty directory ${resolvedCurrentDir}: ${rmErr.message}`);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logger.debug(`[Cleanup] Skipping removal of root upload directory or metadata directory: ${resolvedCurrentDir}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- Export ---
|
||||
module.exports = {
|
||||
registerCleanupTask,
|
||||
removeCleanupTask,
|
||||
executeCleanup,
|
||||
cleanupIncompleteUploads,
|
||||
cleanupIncompleteMetadataUploads,
|
||||
cleanupEmptyFolders
|
||||
};
|
||||
// Exporting runStorageCleanup might be useful for triggering manually if needed
|
||||
runStorageCleanup,
|
||||
startStorageCleanupInterval,
|
||||
stopStorageCleanupInterval,
|
||||
cleanupEmptyFolders // Export if needed elsewhere, though mainly used internally now
|
||||
};
|
@@ -5,7 +5,7 @@
|
||||
*/
|
||||
|
||||
const crypto = require('crypto');
|
||||
const logger = require('./logger');
|
||||
const logger = require('./logger'); // Corrected path
|
||||
|
||||
/**
|
||||
* Store for login attempts with rate limiting
|
||||
@@ -27,7 +27,7 @@ function startCleanupInterval() {
|
||||
if (cleanupInterval) {
|
||||
clearInterval(cleanupInterval);
|
||||
}
|
||||
|
||||
|
||||
cleanupInterval = setInterval(() => {
|
||||
const now = Date.now();
|
||||
let cleaned = 0;
|
||||
@@ -41,7 +41,10 @@ function startCleanupInterval() {
|
||||
logger.info(`Cleaned up ${cleaned} expired lockouts`);
|
||||
}
|
||||
}, 60000); // Check every minute
|
||||
|
||||
|
||||
// Allow node to exit even if this interval is running
|
||||
cleanupInterval.unref();
|
||||
|
||||
return cleanupInterval;
|
||||
}
|
||||
|
||||
@@ -60,6 +63,11 @@ if (!process.env.DISABLE_SECURITY_CLEANUP) {
|
||||
startCleanupInterval();
|
||||
}
|
||||
|
||||
// Stop interval on shutdown signals
|
||||
process.on('SIGTERM', stopCleanupInterval);
|
||||
process.on('SIGINT', stopCleanupInterval);
|
||||
|
||||
|
||||
/**
|
||||
* Reset login attempts for an IP
|
||||
* @param {string} ip - IP address
|
||||
@@ -77,12 +85,13 @@ function resetAttempts(ip) {
|
||||
function isLockedOut(ip) {
|
||||
const attempts = loginAttempts.get(ip);
|
||||
if (!attempts) return false;
|
||||
|
||||
|
||||
if (attempts.count >= MAX_ATTEMPTS) {
|
||||
const timeElapsed = Date.now() - attempts.lastAttempt;
|
||||
if (timeElapsed < LOCKOUT_DURATION) {
|
||||
return true;
|
||||
}
|
||||
// Lockout expired, reset attempts before proceeding
|
||||
resetAttempts(ip);
|
||||
}
|
||||
return false;
|
||||
@@ -109,28 +118,41 @@ function recordAttempt(ip) {
|
||||
*/
|
||||
function validatePin(pin) {
|
||||
if (!pin || typeof pin !== 'string') return null;
|
||||
// Remove non-digit characters
|
||||
const cleanPin = pin.replace(/\D/g, '');
|
||||
// Check length constraints (e.g., 4-10 digits)
|
||||
return cleanPin.length >= 4 && cleanPin.length <= 10 ? cleanPin : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare two strings in constant time
|
||||
* Compare two strings in constant time using crypto.timingSafeEqual
|
||||
* Pads strings to a fixed length to prevent timing attacks based on length.
|
||||
* @param {string} a - First string
|
||||
* @param {string} b - Second string
|
||||
* @returns {boolean} True if strings match
|
||||
*/
|
||||
function safeCompare(a, b) {
|
||||
// Ensure inputs are strings
|
||||
if (typeof a !== 'string' || typeof b !== 'string') {
|
||||
logger.warn('safeCompare received non-string input.');
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
return crypto.timingSafeEqual(
|
||||
Buffer.from(a.padEnd(32)),
|
||||
Buffer.from(b.padEnd(32))
|
||||
);
|
||||
// Choose a fixed length significantly longer than expected max input length
|
||||
const fixedLength = 64;
|
||||
const bufferA = Buffer.alloc(fixedLength, 0); // Allocate buffer filled with zeros
|
||||
const bufferB = Buffer.alloc(fixedLength, 0);
|
||||
|
||||
// Copy input strings into buffers, truncated if necessary
|
||||
bufferA.write(a.slice(0, fixedLength));
|
||||
bufferB.write(b.slice(0, fixedLength));
|
||||
|
||||
// Perform timing-safe comparison
|
||||
return crypto.timingSafeEqual(bufferA, bufferB);
|
||||
} catch (err) {
|
||||
logger.error(`Safe compare error: ${err.message}`);
|
||||
// Handle potential errors like if inputs are unexpectedly huge (though sliced above)
|
||||
logger.error(`Error during safeCompare: ${err.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -145,4 +167,4 @@ module.exports = {
|
||||
safeCompare,
|
||||
startCleanupInterval,
|
||||
stopCleanupInterval
|
||||
};
|
||||
};
|
Reference in New Issue
Block a user