1 Commits
dev ... s3

Author SHA1 Message Date
greirson
bdd80020a0 feat(storage): Implement S3 and local storage adapters with enhanced configuration
- Introduced a storage adapter factory to dynamically select between local and S3 storage based on the STORAGE_TYPE environment variable.
- Added S3 adapter for handling file operations on AWS S3, including multipart uploads and presigned URLs.
- Implemented local storage adapter for managing file operations on the local filesystem.
- Enhanced configuration validation to ensure proper setup for both storage types.
- Updated .env.example and README.md to document new storage configuration options and usage.

This commit significantly improves the application's flexibility in handling file uploads by supporting both local and cloud storage options, enhancing user experience and deployment versatility.
2025-05-05 21:52:22 -07:00
12 changed files with 1275 additions and 1119 deletions

View File

@@ -106,23 +106,6 @@ AUTO_UPLOAD=false
# ALLOWED_IFRAME_ORIGINS=https://example.com,https://another.com # ALLOWED_IFRAME_ORIGINS=https://example.com,https://another.com
ALLOWED_IFRAME_ORIGINS= ALLOWED_IFRAME_ORIGINS=
# --- Docker Specific Settings ---
# User and Group IDs for file permissions
# Sets the user/group the application runs as inside the container.
# Files created in the mapped volume (e.g., ./local_uploads) will have this ownership.
# Set these to match your host user's ID/GID to avoid permission issues.
# Find your IDs with `id -u` and `id -g` on Linux/macOS.
# PUID=1000
# PGID=1000
# File Mode Creation Mask (Umask)
# Controls the default permissions for newly created files.
# 022 (default): Files 644 (rw-r--r--), Dirs 755 (rwxr-xr-x)
# 002: Files 664 (rw-rw-r--), Dirs 775 (rwxrwxr-x) - Good for group sharing
# 007: Files 660 (rw-rw----), Dirs 770 (rwxrwx---) - More restrictive
# 077: Files 600 (rw-------), Dirs 700 (rwx------) - Most restrictive
# UMASK=022
# Max number of retries for client-side chunk uploads (default: 5) # Max number of retries for client-side chunk uploads (default: 5)
CLIENT_MAX_RETRIES=5 CLIENT_MAX_RETRIES=5

View File

@@ -1,16 +1,8 @@
# Base stage for shared configurations # Base stage for shared configurations
FROM node:20-alpine as base FROM node:20-alpine as base
# Add user and group IDs as arguments with defaults # Install python and create virtual environment with minimal dependencies
ARG PUID=1000 RUN apk add --no-cache python3 py3-pip && \
ARG PGID=1000
# Default umask (complement of 022 is 755 for dirs, 644 for files)
ARG UMASK=022
# Install necessary packages:
# - su-exec: lightweight sudo alternative
# - python3, pip: for apprise dependency
RUN apk add --no-cache su-exec python3 py3-pip && \
python3 -m venv /opt/venv && \ python3 -m venv /opt/venv && \
rm -rf /var/cache/apk/* rm -rf /var/cache/apk/*
@@ -22,194 +14,52 @@ RUN . /opt/venv/bin/activate && \
# Add virtual environment to PATH # Add virtual environment to PATH
ENV PATH="/opt/venv/bin:$PATH" ENV PATH="/opt/venv/bin:$PATH"
# Create group and user with fallback to prevent build failures
# We use the ARG values here, but with a fallback mechanism to avoid build failures
RUN ( \
set -e; \
echo "Attempting to create/verify user with PUID=${PUID} and PGID=${PGID}..."; \
\
# Initialize variables \
TARGET_USER="nodeuser"; \
TARGET_GROUP="nodeuser"; \
NEW_GID="${PGID}"; \
NEW_UID="${PUID}"; \
\
# Step 1: Handle GID and group first \
echo "Setting up group for GID ${NEW_GID}..."; \
if getent group "${NEW_GID}" > /dev/null; then \
# GID exists, check which group has it \
EXISTING_GROUP=$(getent group "${NEW_GID}" | cut -d: -f1); \
echo "GID ${NEW_GID} is already used by group '${EXISTING_GROUP}'."; \
\
if [ "${EXISTING_GROUP}" = "${TARGET_GROUP}" ]; then \
echo "Group '${TARGET_GROUP}' already exists with correct GID ${NEW_GID}."; \
else \
# GID exists but used by a different group (likely 'node') \
echo "Will create '${TARGET_GROUP}' with a different GID to avoid conflict."; \
# Check if TARGET_GROUP exists but with wrong GID \
if getent group "${TARGET_GROUP}" > /dev/null; then \
echo "Group '${TARGET_GROUP}' exists but with wrong GID. Deleting it."; \
delgroup "${TARGET_GROUP}" || true; \
fi; \
# Create TARGET_GROUP with GID+1 (or find next available GID) \
NEXT_GID=$((${NEW_GID} + 1)); \
while getent group "${NEXT_GID}" > /dev/null; do \
NEXT_GID=$((${NEXT_GID} + 1)); \
done; \
echo "Creating group '${TARGET_GROUP}' with new GID ${NEXT_GID}."; \
addgroup -S -g "${NEXT_GID}" "${TARGET_GROUP}"; \
NEW_GID="${NEXT_GID}"; \
fi; \
else \
# GID does not exist - create group with desired GID \
echo "Creating group '${TARGET_GROUP}' with GID ${NEW_GID}."; \
addgroup -S -g "${NEW_GID}" "${TARGET_GROUP}"; \
fi; \
\
# Verify group was created \
echo "Verifying group '${TARGET_GROUP}' exists..."; \
getent group "${TARGET_GROUP}" || (echo "ERROR: Failed to find group '${TARGET_GROUP}'!"; exit 1); \
GID_FOR_USER=$(getent group "${TARGET_GROUP}" | cut -d: -f3); \
echo "Final group: '${TARGET_GROUP}' with GID ${GID_FOR_USER}"; \
\
# Step 2: Handle UID and user \
echo "Setting up user with UID ${NEW_UID}..."; \
if getent passwd "${NEW_UID}" > /dev/null; then \
# UID exists, check which user has it \
EXISTING_USER=$(getent passwd "${NEW_UID}" | cut -d: -f1); \
echo "UID ${NEW_UID} is already used by user '${EXISTING_USER}'."; \
\
if [ "${EXISTING_USER}" = "${TARGET_USER}" ]; then \
echo "User '${TARGET_USER}' already exists with correct UID ${NEW_UID}."; \
# Check if user needs group update \
CURRENT_GID=$(getent passwd "${TARGET_USER}" | cut -d: -f4); \
if [ "${CURRENT_GID}" != "${GID_FOR_USER}" ]; then \
echo "User '${TARGET_USER}' has wrong GID (${CURRENT_GID}). Modifying..."; \
deluser "${TARGET_USER}"; \
adduser -S -D -u "${NEW_UID}" -G "${TARGET_GROUP}" -s /bin/sh "${TARGET_USER}"; \
fi; \
else \
# Another user has our UID (e.g., 'node'). Delete it. \
echo "Deleting existing user '${EXISTING_USER}' with UID ${NEW_UID}."; \
deluser "${EXISTING_USER}" || true; \
\
# Now check if TARGET_USER exists but with wrong UID \
if getent passwd "${TARGET_USER}" > /dev/null; then \
echo "User '${TARGET_USER}' exists but with wrong UID. Updating..."; \
deluser "${TARGET_USER}" || true; \
fi; \
\
# Create user \
echo "Creating user '${TARGET_USER}' with UID ${NEW_UID} and group '${TARGET_GROUP}'."; \
adduser -S -D -u "${NEW_UID}" -G "${TARGET_GROUP}" -s /bin/sh "${TARGET_USER}"; \
fi; \
else \
# UID does not exist - check if user exists with wrong UID \
if getent passwd "${TARGET_USER}" > /dev/null; then \
echo "User '${TARGET_USER}' exists but with wrong UID. Updating..."; \
deluser "${TARGET_USER}" || true; \
fi; \
\
# Create user with desired UID \
echo "Creating user '${TARGET_USER}' with UID ${NEW_UID} and group '${TARGET_GROUP}'."; \
adduser -S -D -u "${NEW_UID}" -G "${TARGET_GROUP}" -s /bin/sh "${TARGET_USER}"; \
fi; \
\
# Create and set permissions on home directory \
echo "Setting up home directory for ${TARGET_USER}..."; \
mkdir -p /home/${TARGET_USER} && \
chown -R ${TARGET_USER}:${TARGET_GROUP} /home/${TARGET_USER} && \
chmod 755 /home/${TARGET_USER}; \
\
# Verify user was created \
echo "Verifying user '${TARGET_USER}' exists..."; \
getent passwd "${TARGET_USER}" || (echo "ERROR: Failed to find user '${TARGET_USER}'!"; exit 1); \
\
# Clean up and verify system files \
echo "Ensuring root user definition is pristine..."; \
chown root:root /etc/passwd /etc/group && \
chmod 644 /etc/passwd /etc/group && \
getent passwd root || (echo "ERROR: root not found after user/group operations!"; exit 1); \
\
# Print final status \
echo "Final user/group setup:"; \
id "${TARGET_USER}"; \
)
WORKDIR /usr/src/app WORKDIR /usr/src/app
# Set UMASK - this applies to processes run by the user created in this stage
# The entrypoint will also set it based on the ENV var at runtime.
RUN umask ${UMASK}
# Dependencies stage # Dependencies stage
FROM base as deps FROM base as deps
# Change ownership early so npm cache is owned correctly COPY package*.json ./
RUN chown nodeuser:nodeuser /usr/src/app
# Switch to nodeuser before running npm commands
USER nodeuser
COPY --chown=nodeuser:nodeuser package*.json ./
RUN npm ci --only=production && \ RUN npm ci --only=production && \
# Remove npm cache # Remove npm cache
npm cache clean --force npm cache clean --force
# Switch back to root for the next stages if needed
USER root
# Development stage # Development stage
FROM deps as development FROM deps as development
USER root
ENV NODE_ENV=development ENV NODE_ENV=development
# Create and set up directories # Install dev dependencies
RUN mkdir -p /usr/src/app/local_uploads /usr/src/app/uploads && \
chown -R nodeuser:nodeuser /usr/src/app/local_uploads /usr/src/app/uploads
COPY --chown=nodeuser:nodeuser package*.json ./
RUN npm install && \ RUN npm install && \
npm cache clean --force npm cache clean --force
COPY --chown=nodeuser:nodeuser src/ ./src/ # Create upload directory
COPY --chown=nodeuser:nodeuser public/ ./public/ RUN mkdir -p uploads
# Check if __tests__ and dev exist in your project root, if not, these COPY lines will fail for dev target
# COPY --chown=nodeuser:nodeuser __tests__/ ./__tests__/
# COPY --chown=nodeuser:nodeuser dev/ ./dev/
COPY --chown=nodeuser:nodeuser .eslintrc.json .eslintignore .prettierrc nodemon.json ./
# Switch back to nodeuser for runtime # Copy source with specific paths to avoid unnecessary files
USER nodeuser COPY src/ ./src/
EXPOSE 3000 COPY public/ ./public/
COPY __tests__/ ./__tests__/
# Production stage COPY dev/ ./dev/
FROM deps as production COPY .eslintrc.json .eslintignore ./
USER root
ENV NODE_ENV=production
ENV UPLOAD_DIR /app/uploads
# Create and set up directories
RUN mkdir -p /usr/src/app/local_uploads /usr/src/app/uploads && \
chown -R nodeuser:nodeuser /usr/src/app/local_uploads /usr/src/app/uploads
# Copy only necessary source files and ensure ownership
COPY --chown=nodeuser:nodeuser src/ ./src/
COPY --chown=nodeuser:nodeuser public/ ./public/
# Copy the entrypoint script and make it executable
COPY --chown=root:root src/scripts/entrypoint.sh /usr/local/bin/entrypoint.sh
RUN chmod +x /usr/local/bin/entrypoint.sh
# Expose port # Expose port
EXPOSE 3000 EXPOSE 3000
# Set the entrypoint CMD ["npm", "run", "dev"]
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
# Final user should be nodeuser for runtime # Production stage
USER nodeuser FROM deps as production
ENV NODE_ENV=production
ENV UPLOAD_DIR /app/uploads
# Create upload directory
# RUN mkdir -p uploads # No longer strictly needed here as volume mapping is expected, but harmless
# Copy only necessary source files
COPY src/ ./src/
COPY public/ ./public/
# Expose port
EXPOSE 3000
# Default command to run (passed to entrypoint)
CMD ["npm", "start"] CMD ["npm", "start"]

View File

@@ -254,4 +254,6 @@ See [Local Development (Recommended Quick Start)](LOCAL_DEVELOPMENT.md) for loca
Made with ❤️ by [DumbWare.io](https://dumbware.io) Made with ❤️ by [DumbWare.io](https://dumbware.io)
## Future Features ## Future Features
- Camera Upload for Mobile
> Got an idea? [Open an issue](https://github.com/dumbwareio/dumbdrop/issues) or [submit a PR](https://github.com/dumbwareio/dumbdrop/pulls) > Got an idea? [Open an issue](https://github.com/dumbwareio/dumbdrop/issues) or [submit a PR](https://github.com/dumbwareio/dumbdrop/pulls)
```

View File

@@ -19,24 +19,8 @@ services:
# FOOTER_LINKS: "My Site @ https://example.com,Docs @ https://docs.example.com" # Custom footer links # FOOTER_LINKS: "My Site @ https://example.com,Docs @ https://docs.example.com" # Custom footer links
# PORT: 3000 # Server port (default: 3000) # PORT: 3000 # Server port (default: 3000)
# NODE_ENV: production # Node environment (development/production) # NODE_ENV: production # Node environment (development/production)
# DEBUG: false # Debug mode for verbose logging (default: false in production, true in development)
# APPRISE_URL: "" # Apprise notification URL for upload notifications (default: none) # APPRISE_URL: "" # Apprise notification URL for upload notifications (default: none)
# APPRISE_MESSAGE: "New file uploaded - {filename} ({size}), Storage used {storage}" # Notification message template with placeholders: {filename}, {size}, {storage} # APPRISE_MESSAGE: "New file uploaded - {filename} ({size}), Storage used {storage}" # Notification message template with placeholders: {filename}, {size}, {storage}
# APPRISE_SIZE_UNIT: "Auto" # Size unit for notifications (B, KB, MB, GB, TB, or Auto) # APPRISE_SIZE_UNIT: "Auto" # Size unit for notifications (B, KB, MB, GB, TB, or Auto)
# ALLOWED_EXTENSIONS: ".jpg,.jpeg,.png,.pdf,.doc,.docx,.txt" # Comma-separated list of allowed file extensions (default: all allowed) # ALLOWED_EXTENSIONS: ".jpg,.jpeg,.png,.pdf,.doc,.docx,.txt" # Comma-separated list of allowed file extensions (default: all allowed)
# PUID: 1000 # User ID for file ownership (default: 1000)
# PGID: 1000 # Group ID for file ownership (default: 1000)
# UMASK: "000" # File permissions mask (default: 000)
restart: unless-stopped
# user: "${PUID}:${PGID}" # Don't set user here, entrypoint handles it
# Consider adding healthcheck
# healthcheck:
# test: ["CMD", "curl", "--fail", "http://localhost:3000/health"] # Assuming a /health endpoint exists
# interval: 30s
# timeout: 10s
# retries: 3
# healthcheck:
# test: ["CMD", "curl", "--fail", "http://localhost:3000/health"]
# interval: 30s
# timeout: 10s
# retries: 3
# start_period: 30s

View File

@@ -49,6 +49,9 @@
<div id="uploadProgress"></div> <!-- Original progress bar container --> <div id="uploadProgress"></div> <!-- Original progress bar container -->
<div id="fileList" class="file-list"></div> <!-- Original file list container --> <div id="fileList" class="file-list"></div> <!-- Original file list container -->
<button id="uploadButton" class="upload-button" style="display: none;">Upload Files</button> <button id="uploadButton" class="upload-button" style="display: none;">Upload Files</button>
<footer>
{{FOOTER_CONTENT}}
</footer>
</div> </div>
<script defer> <script defer>
@@ -56,51 +59,67 @@
const CHUNK_SIZE = 1024 * 1024 * 5; // 5MB chunks const CHUNK_SIZE = 1024 * 1024 * 5; // 5MB chunks
const RETRY_DELAY = 1000; // 1 second delay between retries const RETRY_DELAY = 1000; // 1 second delay between retries
// Read MAX_RETRIES from the injected server value, with a fallback
const MAX_RETRIES_STR = '{{MAX_RETRIES}}'; const MAX_RETRIES_STR = '{{MAX_RETRIES}}';
let maxRetries = 5; let maxRetries = 5; // Default value
if (MAX_RETRIES_STR && MAX_RETRIES_STR !== '{{MAX_RETRIES}}') { if (MAX_RETRIES_STR && MAX_RETRIES_STR !== '{{MAX_RETRIES}}') {
const parsedRetries = parseInt(MAX_RETRIES_STR, 10); const parsedRetries = parseInt(MAX_RETRIES_STR, 10);
if (!isNaN(parsedRetries) && parsedRetries >= 0) maxRetries = parsedRetries; if (!isNaN(parsedRetries) && parsedRetries >= 0) {
else console.warn(`Invalid MAX_RETRIES value "${MAX_RETRIES_STR}", defaulting to ${maxRetries}.`); maxRetries = parsedRetries;
} else console.warn('MAX_RETRIES not injected by server, defaulting to 5.'); } else {
console.warn(`Invalid MAX_RETRIES value "${MAX_RETRIES_STR}" received from server, defaulting to ${maxRetries}.`);
}
} else {
console.warn('MAX_RETRIES not injected by server, defaulting to 5.');
}
window.MAX_RETRIES = maxRetries; window.MAX_RETRIES = maxRetries;
console.log(`Max retries for chunk uploads: ${window.MAX_RETRIES}`); console.log(`Max retries for chunk uploads: ${window.MAX_RETRIES}`);
const AUTO_UPLOAD_STR = '{{AUTO_UPLOAD}}'; const AUTO_UPLOAD_STR = '{{AUTO_UPLOAD}}';
const AUTO_UPLOAD = ['true', '1', 'yes'].includes(AUTO_UPLOAD_STR.toLowerCase()); const AUTO_UPLOAD = ['true', '1', 'yes'].includes(AUTO_UPLOAD_STR.toLowerCase());
// --- NEW: Variable to track active uploads --- // Utility function to generate a unique batch ID
let activeUploadCount = 0; function generateBatchId() {
return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
}
function generateBatchId() { return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`; } // Utility function to format file sizes
function formatFileSize(bytes) { if (bytes === 0) return '0 Bytes'; const k = 1024; const sizes = ['Bytes', 'KB', 'MB', 'GB']; const i = Math.floor(Math.log(bytes) / Math.log(k)); return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i]; } function formatFileSize(bytes) {
if (bytes === 0) return '0 Bytes';
const k = 1024;
const sizes = ['Bytes', 'KB', 'MB', 'GB'];
const i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
}
class FileUploader { class FileUploader {
constructor(file, batchId) { constructor(file, batchId) {
this.file = file; this.file = file;
this.batchId = batchId; this.batchId = batchId;
this.uploadId = null; this.uploadId = null; // Application's upload ID
this.position = 0; this.position = 0;
this.progressElement = null; this.progressElement = null; // For the separate progress bar
this.chunkSize = CHUNK_SIZE; this.chunkSize = CHUNK_SIZE; // Use constant
this.lastUploadedBytes = 0; this.lastUploadedBytes = 0; // Used for rate calculation in original progress bar
this.lastUploadTime = null; this.lastUploadTime = null; // Used for rate calculation
this.uploadRate = 0; this.uploadRate = 0; // Used for rate calculation
this.maxRetries = window.MAX_RETRIES; this.maxRetries = window.MAX_RETRIES;
this.retryDelay = RETRY_DELAY; this.retryDelay = RETRY_DELAY;
// *** ADDED for S3/Adapter logic *** // *** ADDED partNumber for S3 ***
this.partNumber = 1; this.partNumber = 1;
// *** ADDED completed flag ***
this.completed = false; this.completed = false;
} }
async start() { async start() {
try { try {
this.createProgressElement(); // Original: create separate progress bar this.createProgressElement(); // Create the progress bar UI element
this.updateProgress(0); this.updateProgress(0); // Initial progress update to 0%
await this.initUpload(); await this.initUpload();
// Handle zero-byte files completed during init
if (this.uploadId && this.uploadId.startsWith('zero-byte-')) { if (this.uploadId && this.uploadId.startsWith('zero-byte-')) {
console.log(`Zero-byte file ${this.file.name} handled by server init.`); console.log(`Zero-byte file ${this.file.name} handled by server during init.`);
this.updateProgress(100); this.updateProgress(100);
this.completed = true; this.completed = true;
return true; return true;
@@ -109,10 +128,11 @@
if (this.uploadId && this.file.size > 0) { if (this.uploadId && this.file.size > 0) {
await this.uploadChunks(); await this.uploadChunks();
} else if (this.file.size === 0 && !this.completed) { } else if (this.file.size === 0 && !this.completed) {
console.warn(`File ${this.file.name} is zero bytes, init didn't indicate completion. Assuming complete.`); console.warn(`File ${this.file.name} is zero bytes, but init didn't indicate completion.`);
this.updateProgress(100); this.updateProgress(100);
this.completed = true; this.completed = true;
} }
// Return completion status
return this.completed; return this.completed;
} catch (error) { } catch (error) {
console.error(`Upload failed for ${this.file.webkitRelativePath || this.file.name}:`, error); console.error(`Upload failed for ${this.file.webkitRelativePath || this.file.name}:`, error);
@@ -120,178 +140,291 @@
this.progressElement.infoSpan.textContent = `Error: ${error.message}`; this.progressElement.infoSpan.textContent = `Error: ${error.message}`;
this.progressElement.infoSpan.style.color = 'var(--danger-color)'; this.progressElement.infoSpan.style.color = 'var(--danger-color)';
} }
await this.cancelUploadOnServer(); await this.cancelUploadOnServer(); // Attempt cancellation
this.completed = false; this.completed = false;
return false; return false;
} }
} }
async initUpload() { async initUpload() {
// (initUpload logic is identical to the previous version - uses fetch to /init)
const uploadPath = this.file.webkitRelativePath || this.file.name; const uploadPath = this.file.webkitRelativePath || this.file.name;
const consistentPath = uploadPath.replace(/\\/g, '/'); const consistentPath = uploadPath.replace(/\\/g, '/');
console.log(`[Uploader] Init for: ${consistentPath} (Size: ${this.file.size})`); console.log(`[Uploader] Initializing upload for: ${consistentPath} (Size: ${this.file.size}, Batch: ${this.batchId})`);
const headers = { 'Content-Type': 'application/json' }; const headers = { 'Content-Type': 'application/json' };
if (this.batchId) headers['X-Batch-ID'] = this.batchId; if (this.batchId) headers['X-Batch-ID'] = this.batchId;
const apiUrlPath = '/api/upload/init'; const apiUrlPath = '/api/upload/init';
const fullApiUrl = window.BASE_URL + (apiUrlPath.startsWith('/') ? apiUrlPath.substring(1) : apiUrlPath); const fullApiUrl = window.BASE_URL + (apiUrlPath.startsWith('/') ? apiUrlPath.substring(1) : apiUrlPath);
const response = await fetch(fullApiUrl, { const response = await fetch(fullApiUrl, {
method: 'POST', headers, method: 'POST',
headers,
body: JSON.stringify({ filename: consistentPath, fileSize: this.file.size }) body: JSON.stringify({ filename: consistentPath, fileSize: this.file.size })
}); });
if (!response.ok) { if (!response.ok) {
const errData = await response.json().catch(() => ({ error: `Server error ${response.status}` })); const errorData = await response.json().catch(() => ({ error: `Server error ${response.status}` }));
throw new Error(errData.details || errData.error || `Init failed: ${response.status}`); throw new Error(errorData.details || errorData.error || `Init failed: ${response.status}`);
} }
const data = await response.json(); const data = await response.json();
if (!data.uploadId) throw new Error('Server did not return uploadId'); if (!data.uploadId) throw new Error('Server did not return uploadId');
this.uploadId = data.uploadId; this.uploadId = data.uploadId;
console.log(`[Uploader] Init success. App Upload ID: ${this.uploadId}`); console.log(`[Uploader] Init successful. App Upload ID: ${this.uploadId}`);
} }
async uploadChunks() { async uploadChunks() {
if (!this.progressElement) this.createProgressElement(); // Ensure progress bar exists // Create progress element if not already done (might happen if start didn't create it due to early exit/error)
if (!this.progressElement) this.createProgressElement();
while (this.position < this.file.size && !this.completed) { while (this.position < this.file.size && !this.completed) { // Check completed flag
const chunkStartPosition = this.position; const chunkStartPosition = this.position;
const chunk = await this.readChunk(); const chunk = await this.readChunk(); // Reads based on this.position, updates this.position
const currentPartNumber = this.partNumber; const currentPartNumber = this.partNumber; // *** Get current part number ***
try { try {
console.debug(`[Uploader] Attempting Part ${currentPartNumber}, Bytes ${chunkStartPosition}-${this.position-1}`);
// *** Pass partNumber to upload function ***
const result = await this.uploadChunkWithRetry(chunk, chunkStartPosition, currentPartNumber); const result = await this.uploadChunkWithRetry(chunk, chunkStartPosition, currentPartNumber);
// Update original progress bar with server's progress
this.updateProgress(result.progress); // *** Increment part number AFTER successful upload ***
this.partNumber++; this.partNumber++;
// *** Check if server response indicates completion ***
if (result.completed) { if (result.completed) {
console.log(`[Uploader] Server indicated completion after Part ${currentPartNumber}.`);
this.completed = true; this.completed = true;
this.updateProgress(100); // Ensure it hits 100% this.updateProgress(100); // Update original progress bar
break; break; // Exit loop
} }
} catch (error) { } catch (error) {
console.error(`[Uploader] UploadChunks failed for Part ${this.partNumber}, File: ${this.file.name}`); console.error(`[Uploader] UploadChunks failed permanently after retries for Part ${this.partNumber}. File: ${this.file.webkitRelativePath || this.file.name}`);
throw error; throw error; // Propagate up
} }
} }
// Check completion after loop, same as before
if (!this.completed && this.position >= this.file.size) { if (!this.completed && this.position >= this.file.size) {
this.completed = true; this.updateProgress(100); console.warn(`[Uploader] Reached end of file but not marked completed by server. Assuming complete.`);
this.completed = true;
this.updateProgress(100);
} }
} }
async readChunk() { async readChunk() {
// (readChunk logic is identical)
const start = this.position; const start = this.position;
const end = Math.min(this.position + this.chunkSize, this.file.size); const end = Math.min(this.position + this.chunkSize, this.file.size);
const blob = this.file.slice(start, end); const blob = this.file.slice(start, end);
this.position = end; this.position = end; // Update position *after* slicing
return await blob.arrayBuffer(); return await blob.arrayBuffer();
} }
// *** MODIFIED: Added partNumber parameter ***
async uploadChunkWithRetry(chunk, chunkStartPosition, partNumber) { async uploadChunkWithRetry(chunk, chunkStartPosition, partNumber) {
const chunkApiUrlPath = `/api/upload/chunk/${this.uploadId}?partNumber=${partNumber}`; // *** ADDED partNumber *** // *** MODIFIED: Append partNumber query parameter to URL ***
const chunkApiUrlPath = `/api/upload/chunk/${this.uploadId}?partNumber=${partNumber}`;
const fullChunkApiUrl = window.BASE_URL + (chunkApiUrlPath.startsWith('/') ? chunkApiUrlPath.substring(1) : chunkApiUrlPath); const fullChunkApiUrl = window.BASE_URL + (chunkApiUrlPath.startsWith('/') ? chunkApiUrlPath.substring(1) : chunkApiUrlPath);
let lastError = null; let lastError = null;
for (let attempt = 0; attempt <= this.maxRetries; attempt++) { for (let attempt = 0; attempt <= this.maxRetries; attempt++) {
try { try {
if (attempt > 0) { if (attempt > 0) {
console.warn(`[Uploader] Retrying Part ${partNumber} (Attempt ${attempt}/${this.maxRetries})`); console.warn(`[Uploader] Retrying Part ${partNumber} upload for ${this.file.webkitRelativePath || this.file.name} (Attempt ${attempt}/${this.maxRetries})...`);
this.updateProgressElementInfo(`Retrying attempt ${attempt}...`, 'var(--warning-color)'); this.updateProgressElementInfo(`Retrying attempt ${attempt}...`, 'var(--warning-color)');
} else if (this.progressElement) { // Update info for first attempt } else {
this.updateProgressElementInfo(`uploading part ${partNumber}...`); // Update status text for the current part (optional, depends if you want this level of detail)
// this.updateProgressElementInfo(`uploading part ${partNumber}...`);
} }
const controller = new AbortController(); const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 60000); // 60s timeout // Increase timeout slightly for S3 potentially
const timeoutId = setTimeout(() => controller.abort(), 60000);
const response = await fetch(fullChunkApiUrl, { console.debug(`[Uploader] Sending Part ${partNumber} to ${fullChunkApiUrl}`);
const response = await fetch(fullChunkApiUrl, { // Use modified URL
method: 'POST', method: 'POST',
headers: { 'Content-Type': 'application/octet-stream', 'X-Batch-ID': this.batchId }, headers: {
body: chunk, signal: controller.signal 'Content-Type': 'application/octet-stream',
'X-Batch-ID': this.batchId
},
body: chunk,
signal: controller.signal
}); });
clearTimeout(timeoutId); clearTimeout(timeoutId);
if (response.ok) { if (response.ok) {
const data = await response.json(); // Contains { bytesReceived, progress, completed } const data = await response.json(); // Contains { bytesReceived, progress, completed }
if (attempt > 0) console.log(`[Uploader] Part ${partNumber} success on retry ${attempt}.`); if (attempt > 0) console.log(`[Uploader] Part ${partNumber} upload successful on retry attempt ${attempt}.`);
else console.debug(`[Uploader] Part ${partNumber} uploaded successfully.`); else console.debug(`[Uploader] Part ${partNumber} uploaded successfully.`);
if(this.progressElement) this.updateProgressElementInfo('uploading...'); // Reset info
return data; // *** RETURN server data (has 'completed' flag) *** // *** Use server-provided progress for original progress bar ***
this.updateProgress(data.progress);
this.updateProgressElementInfo('uploading...'); // Reset info message
// *** Return the data which includes the 'completed' flag ***
return data;
} else { } else {
// (Error handling logic for non-OK responses remains the same)
let errorText = `Server error ${response.status}`; try { errorText = (await response.json()).error || errorText } catch(e){} let errorText = `Server error ${response.status}`; try { errorText = (await response.json()).error || errorText } catch(e){}
if (response.status === 404 && attempt > 0) { if (response.status === 404 && attempt > 0) {
console.warn(`[Uploader] 404 on retry (Part ${partNumber}), assuming completed.`); console.warn(`[Uploader] Received 404 on retry, assuming completed.`);
this.completed = true; // Mark as completed this.completed = true;
// this.updateProgress(100); // updateProgress is called from uploadChunks this.updateProgress(100); // Update original progress bar
return { completed: true, progress: 100, bytesReceived: this.file.size }; return { completed: true, progress: 100, bytesReceived: this.file.size }; // Simulate success
} }
lastError = new Error(`Failed Part ${partNumber}: ${errorText}`); lastError = new Error(`Failed Part ${partNumber}: ${errorText}`);
console.error(`Attempt ${attempt} failed: ${lastError.message}`); console.error(`Attempt ${attempt} failed: ${lastError.message}`);
this.updateProgressElementInfo(`Attempt ${attempt} failed: ${response.statusText}`, 'var(--danger-color)'); this.updateProgressElementInfo(`Attempt ${attempt} failed: ${response.statusText}`, 'var(--danger-color)');
} }
} catch (error) { } catch (error) {
// (Network/Abort error handling remains the same)
lastError = error; lastError = error;
if (error.name === 'AbortError') { console.error(`Part ${partNumber} Attempt ${attempt} timed out.`); this.updateProgressElementInfo(`Attempt ${attempt} timed out`, 'var(--danger-color)');} if (error.name === 'AbortError') { console.error(`Attempt ${attempt} timed out.`); this.updateProgressElementInfo(`Attempt ${attempt} timed out`, 'var(--danger-color)');}
else { console.error(`Part ${partNumber} Attempt ${attempt} network error: ${error.message}`); this.updateProgressElementInfo(`Attempt ${attempt} network error`, 'var(--danger-color)'); } else { console.error(`Attempt ${attempt} network error: ${error.message}`); this.updateProgressElementInfo(`Attempt ${attempt} network error`, 'var(--danger-color)'); }
} }
// (Retry delay logic remains the same)
if (attempt < this.maxRetries) await new Promise(r => setTimeout(r, Math.min(this.retryDelay * Math.pow(2, attempt), 30000))); if (attempt < this.maxRetries) await new Promise(r => setTimeout(r, Math.min(this.retryDelay * Math.pow(2, attempt), 30000)));
} } // End retry loop
console.error(`[Uploader] Part ${partNumber} failed permanently after ${this.maxRetries} retries.`);
console.error(`[Uploader] Part ${partNumber} upload failed permanently after ${this.maxRetries} retries.`);
this.updateProgressElementInfo(`Upload failed after ${this.maxRetries} retries`, 'var(--danger-color)'); this.updateProgressElementInfo(`Upload failed after ${this.maxRetries} retries`, 'var(--danger-color)');
throw lastError || new Error(`Part ${partNumber} failed after ${this.maxRetries} retries.`); throw lastError || new Error(`Part ${partNumber} failed after ${this.maxRetries} retries.`);
} }
// --- Original Progress Bar UI Methods --- // --- Original Progress Bar UI Methods ---
// (These methods remain identical to the original file content)
createProgressElement() { createProgressElement() {
if (this.progressElement) return; if (this.progressElement) return; // Avoid duplicates if called multiple times
const container = document.createElement('div'); container.className = 'progress-container';
const label = document.createElement('div'); label.className = 'progress-label'; label.textContent = this.file.webkitRelativePath || this.file.name; const container = document.createElement('div');
const progress = document.createElement('div'); progress.className = 'progress'; container.className = 'progress-container';
const bar = document.createElement('div'); bar.className = 'progress-bar'; container.setAttribute('data-upload-id', this.uploadId || `pending-${this.file.name}`); // Use unique identifier
const status = document.createElement('div'); status.className = 'progress-status';
const info = document.createElement('div'); info.className = 'progress-info'; info.textContent = 'initializing...'; const label = document.createElement('div');
const details = document.createElement('div'); details.className = 'progress-details'; details.textContent = `0 Bytes of ${formatFileSize(this.file.size)} (0.0%)`; label.className = 'progress-label';
status.appendChild(info); status.appendChild(details); progress.appendChild(bar); label.textContent = this.file.webkitRelativePath || this.file.name;
container.appendChild(label); container.appendChild(progress); container.appendChild(status);
const progress = document.createElement('div');
progress.className = 'progress';
const bar = document.createElement('div');
bar.className = 'progress-bar';
const status = document.createElement('div');
status.className = 'progress-status';
const info = document.createElement('div');
info.className = 'progress-info';
info.textContent = 'initializing...';
const details = document.createElement('div');
details.className = 'progress-details';
details.textContent = `0 Bytes of ${formatFileSize(this.file.size)} (0.0%)`;
status.appendChild(info);
status.appendChild(details);
progress.appendChild(bar);
container.appendChild(label);
container.appendChild(progress);
container.appendChild(status);
document.getElementById('uploadProgress').appendChild(container); document.getElementById('uploadProgress').appendChild(container);
this.progressElement = { container, bar, infoSpan: info, detailsSpan: details }; this.progressElement = { container, bar, infoSpan: info, detailsSpan: details };
this.lastUploadTime = Date.now(); this.lastUploadedBytes = 0; this.uploadRate = 0; this.lastUploadTime = Date.now(); // Initialize for rate calculation
} this.lastUploadedBytes = 0;
updateProgress(percent) {
if (!this.progressElement) this.createProgressElement(); if (!this.progressElement) return;
const clampedPercent = Math.max(0, Math.min(100, percent));
this.progressElement.bar.style.width = `${clampedPercent}%`;
const currentTime = Date.now(); const timeDiff = (currentTime - (this.lastUploadTime || currentTime)) / 1000;
const bytesDiff = this.position - this.lastUploadedBytes; // Use this.position for rate too
if (timeDiff > 0.1 && bytesDiff > 0) { this.uploadRate = bytesDiff / timeDiff; this.lastUploadedBytes = this.position; this.lastUploadTime = currentTime; }
else if (timeDiff > 5) { this.uploadRate = 0; }
let rateText = 'Calculating...';
if (this.uploadRate > 0) { const u=['B/s','KB/s','MB/s','GB/s']; let i=0,r=this.uploadRate; while(r>=1024&&i<u.length-1){r/=1024;i++;} rateText=`${r.toFixed(1)} ${u[i]}`; }
else if (this.position > 0 || clampedPercent > 0) { rateText = '0.0 B/s'; }
const statusText = clampedPercent >= 100 ? 'complete' : 'uploading...';
if (!this.progressElement.infoSpan.textContent.startsWith('Retry') && !this.progressElement.infoSpan.textContent.startsWith('Attempt') && !this.progressElement.infoSpan.textContent.startsWith('Error')) {
this.updateProgressElementInfo(`${rateText} · ${statusText}`);
}
this.progressElement.detailsSpan.textContent = `${formatFileSize(this.position)} of ${formatFileSize(this.file.size)} (${clampedPercent.toFixed(1)}%)`;
if (clampedPercent === 100) {
this.progressElement.container.style.opacity = '0.5'; // Original had fade out
setTimeout(() => { if (this.progressElement && this.progressElement.container) { this.progressElement.container.remove(); this.progressElement = null; }}, 2000);
}
}
updateProgressElementInfo(message, color = '') { if (this.progressElement && this.progressElement.infoSpan) { this.progressElement.infoSpan.textContent = message; this.progressElement.infoSpan.style.color = color; }}
async cancelUploadOnServer() { if (!this.uploadId || this.completed || this.uploadId.startsWith('zero-byte-')) return; console.log(`[Uploader] Server cancel for ${this.uploadId}`); try { const p=`/api/upload/cancel/${this.uploadId}`; const u=window.BASE_URL+(p.startsWith('/')?p.substring(1):p); fetch(u,{method:'POST'}).catch(e=>console.warn('Cancel req failed:',e));}catch(e){console.warn('Cancel init err:',e);}}
} }
updateProgress(percent) {
// Ensure element exists, create if necessary (though start() usually does)
if (!this.progressElement) this.createProgressElement();
if (!this.progressElement) return; // Still couldn't create it? Bail.
const clampedPercent = Math.max(0, Math.min(100, percent));
this.progressElement.bar.style.width = `${clampedPercent}%`;
// Calculate upload rate using server response bytes (as original)
// Note: For S3, data.bytesReceived might not perfectly reflect total uploaded bytes.
// We'll use this.position primarily for display bytes, but rate calculation follows original logic.
const currentTime = Date.now();
const timeDiff = (currentTime - (this.lastUploadTime || currentTime)) / 1000;
// Using this.position for rate might be visually smoother, but let's stick to original logic for now.
// We need the `bytesReceived` from the server response if we want to use it here...
// Let's fallback to using this.position for rate calculation as well, like the progress display.
const bytesDiff = this.position - this.lastUploadedBytes;
if (timeDiff > 0.1 && bytesDiff > 0) {
this.uploadRate = bytesDiff / timeDiff;
this.lastUploadedBytes = this.position;
this.lastUploadTime = currentTime;
} else if (timeDiff > 5) { // Reset rate if stalled
this.uploadRate = 0;
}
// Format rate (same as original)
let rateText = 'Calculating...';
if (this.uploadRate > 0) { /* ... format rate ... */ const units=['B/s','KB/s','MB/s','GB/s']; let i=0, r=this.uploadRate; while(r>=1024 && i<units.length-1){r/=1024;i++;} rateText=`${r.toFixed(1)} ${units[i]}`; }
else if (this.position > 0 || clampedPercent > 0) { rateText = '0.0 B/s'; }
// Update info/details (same as original)
const statusText = clampedPercent >= 100 ? 'complete' : 'uploading...';
if (!this.progressElement.infoSpan.textContent.startsWith('Retry') &&
!this.progressElement.infoSpan.textContent.startsWith('Attempt') &&
!this.progressElement.infoSpan.textContent.startsWith('Error')) {
this.updateProgressElementInfo(`${rateText} · ${statusText}`);
}
// Display progress using this.position (client's view) and clampedPercent
this.progressElement.detailsSpan.textContent =
`${formatFileSize(this.position)} of ${formatFileSize(this.file.size)} (${clampedPercent.toFixed(1)}%)`;
// Fade out (same as original)
if (clampedPercent === 100) {
this.progressElement.container.style.opacity = '0.5';
setTimeout(() => {
if (this.progressElement && this.progressElement.container) {
this.progressElement.container.remove();
this.progressElement = null;
}
}, 2000);
}
}
updateProgressElementInfo(message, color = '') {
// (Identical to original)
if (this.progressElement && this.progressElement.infoSpan) {
this.progressElement.infoSpan.textContent = message;
this.progressElement.infoSpan.style.color = color;
}
}
// --- Cancellation Logic ---
async cancelUploadOnServer() {
// (Identical to original, just ensure checks use this.completed and this.uploadId)
if (!this.uploadId || this.completed || this.uploadId.startsWith('zero-byte-')) return;
console.log(`[Uploader] Attempting server cancel for ${this.uploadId}`);
try {
const cancelApiUrlPath = `/api/upload/cancel/${this.uploadId}`;
const fullUrl = window.BASE_URL + (cancelApiUrlPath.startsWith('/') ? cancelApiUrlPath.substring(1) : cancelApiUrlPath);
fetch(fullUrl, { method: 'POST' }).catch(err => console.warn(`Cancel request failed:`, err));
} catch (e) { console.warn(`Error initiating cancel:`, e); }
}
} // End FileUploader Class
// --- Original UI Handlers and Logic --- // --- Original UI Handlers and Logic ---
// (All the following code remains identical to the original file)
const dropZone = document.getElementById('dropZone'); const dropZone = document.getElementById('dropZone');
const fileInput = document.getElementById('fileInput'); const fileInput = document.getElementById('fileInput');
const folderInput = document.getElementById('folderInput'); const folderInput = document.getElementById('folderInput');
const fileListDiv = document.getElementById('fileList'); // Original div for list const fileList = document.getElementById('fileList'); // Refers to the original div#fileList
const uploadButton = document.getElementById('uploadButton'); const uploadButton = document.getElementById('uploadButton');
let filesToUpload = []; // Use a different name than original `files` for clarity let filesToUpload = []; // Renamed variable
async function getAllFileEntries(dataTransferItems) { /* ... (original implementation from previous message) ... */ // --- Drag and Drop Folder Handling (getAllFileEntries) ---
async function getAllFileEntries(dataTransferItems) {
// (Keep original implementation)
console.debug('Starting getAllFileEntries with items:', Array.from(dataTransferItems).map(item => ({ kind: item.kind, type: item.type }))); console.debug('Starting getAllFileEntries with items:', Array.from(dataTransferItems).map(item => ({ kind: item.kind, type: item.type })));
let fileEntries = []; let rootFolderName = null; let fileEntries = []; let rootFolderName = null;
async function traverseEntry(entry, path = '') { async function traverseEntry(entry, path = '') {
console.debug('Traversing entry:', { name: entry.name, isFile: entry.isFile, isDirectory: entry.isDirectory, currentPath: path });
if (entry.isFile) { if (entry.isFile) {
const file = await new Promise((resolve, reject) => entry.file(f => { const file = await new Promise((resolve, reject) => entry.file(f => {
const fileWithPath = new File([f], entry.name, { type: f.type, lastModified: f.lastModified }); const fileWithPath = new File([f], entry.name, { type: f.type, lastModified: f.lastModified });
@@ -312,137 +445,137 @@
const entryPromises = Array.from(dataTransferItems).map(item => item.webkitGetAsEntry()).filter(Boolean).map(entry => traverseEntry(entry)); const entryPromises = Array.from(dataTransferItems).map(item => item.webkitGetAsEntry()).filter(Boolean).map(entry => traverseEntry(entry));
await Promise.all(entryPromises); await Promise.all(entryPromises);
fileEntries.sort((a, b) => (a.webkitRelativePath || a.name).localeCompare(b.webkitRelativePath || b.name)); fileEntries.sort((a, b) => (a.webkitRelativePath || a.name).localeCompare(b.webkitRelativePath || b.name));
console.debug('getAllFileEntries result:', fileEntries.map(f=>f.webkitRelativePath || f.name));
return fileEntries; return fileEntries;
} catch (error) { console.error('Error in getAllFileEntries:', error); throw error; } } catch (error) { console.error('Error in getAllFileEntries:', error); throw error; }
} }
['dragenter', 'dragover', 'dragleave', 'drop'].forEach(ev => { dropZone.addEventListener(ev, preventDefaults, false); document.body.addEventListener(ev, preventDefaults, false); }); // --- Event Listeners (Original) ---
['dragenter', 'dragover'].forEach(ev => dropZone.addEventListener(ev, highlight, false)); ['dragenter', 'dragover', 'dragleave', 'drop'].forEach(ev => { dropZone.addEventListener(ev, preventDefaults); document.body.addEventListener(ev, preventDefaults); });
['dragleave', 'drop'].forEach(ev => dropZone.addEventListener(ev, unhighlight, false)); ['dragenter', 'dragover'].forEach(ev => dropZone.addEventListener(ev, highlight));
['dragleave', 'drop'].forEach(ev => dropZone.addEventListener(ev, unhighlight));
dropZone.addEventListener('drop', handleDrop); dropZone.addEventListener('drop', handleDrop);
fileInput.addEventListener('change', handleFilesFromInput); fileInput.addEventListener('change', handleFilesFromInput); // Use renamed handler
folderInput.addEventListener('change', handleFilesFromInput); folderInput.addEventListener('change', handleFilesFromInput); // Use renamed handler
uploadButton.addEventListener('click', startUploads); uploadButton.addEventListener('click', startUploads);
// --- Event Handler Functions (Original) ---
function preventDefaults(e) { e.preventDefault(); e.stopPropagation(); } function preventDefaults(e) { e.preventDefault(); e.stopPropagation(); }
function highlight() { dropZone.classList.add('highlight'); } function highlight(e) { dropZone.classList.add('highlight'); }
function unhighlight() { dropZone.classList.remove('highlight'); } function unhighlight(e) { dropZone.classList.remove('highlight'); }
async function handleDrop(e) { async function handleDrop(e) {
// Use original logic, just assign to filesToUpload
const items = e.dataTransfer.items; const items = e.dataTransfer.items;
fileListDiv.innerHTML = ''; // Clear old list display if (items && items.length > 0 && items[0].webkitGetAsEntry) {
uploadButton.style.display = 'none'; const loadingItem = document.createElement('div'); loadingItem.className = 'file-item loading'; loadingItem.textContent = 'Processing dropped items...'; fileList.innerHTML = ''; fileList.appendChild(loadingItem); uploadButton.style.display = 'none';
const loadingItem = document.createElement('div'); loadingItem.className = 'file-item loading'; loadingItem.textContent = 'Processing dropped items...'; fileListDiv.appendChild(loadingItem);
try { try {
let newFiles; const newFiles = await getAllFileEntries(items); if (newFiles.length === 0) throw new Error('No valid files found.');
if (items && items.length > 0 && items[0].webkitGetAsEntry) newFiles = await getAllFileEntries(items); filesToUpload = newFiles; updateFileList(); if (AUTO_UPLOAD) startUploads(); else uploadButton.style.display = 'block';
else newFiles = [...e.dataTransfer.files].filter(f => f.size >= 0); } catch (error) { console.error('Error processing dropped items:', error); loadingItem.textContent = `Error: ${error.message}`; loadingItem.style.color = 'var(--danger-color)'; setTimeout(() => loadingItem.remove(), 3000); filesToUpload = []; updateFileList(); }
if (newFiles.length === 0) { loadingItem.textContent = 'No files found.'; setTimeout(() => loadingItem.remove(), 2000); return; } finally { if (loadingItem.parentNode === fileList) loadingItem.remove(); }
filesToUpload = newFiles; updateFileList(); } else {
if (AUTO_UPLOAD) startUploads(); else if (filesToUpload.length > 0) uploadButton.style.display = 'block'; filesToUpload = [...e.dataTransfer.files].filter(f => f.size >= 0); updateFileList(); if (AUTO_UPLOAD) startUploads(); else if (filesToUpload.length > 0) uploadButton.style.display = 'block';
} catch (error) { console.error('Error handling drop:', error); loadingItem.textContent = `Error: ${error.message}`; loadingItem.style.color = 'var(--danger-color)'; setTimeout(() => {loadingItem.remove(); updateFileList();}, 3000); filesToUpload = []; }
finally { if (loadingItem.parentNode === fileListDiv && filesToUpload.length > 0) loadingItem.remove(); } // Remove loading only if files are shown
} }
}
function handleFilesFromInput(e) { function handleFilesFromInput(e) {
// Use original logic, just assign to filesToUpload
const input = e.target; const selectedFiles = [...input.files]; const input = e.target; const selectedFiles = [...input.files];
if (input.id === 'folderInput' && selectedFiles.length > 0 && !('webkitRelativePath' in selectedFiles[0])) { alert('Folder upload not fully supported.'); filesToUpload = []; } if (input.id === 'folderInput' && selectedFiles.length > 0 && !('webkitRelativePath' in selectedFiles[0])) { alert('Folder upload not fully supported.'); filesToUpload = []; }
else filesToUpload = selectedFiles.filter(f => f.size >= 0); else { filesToUpload = selectedFiles.filter(f => f.size >= 0); if (input.id === 'folderInput') console.log('Folder files:', filesToUpload.map(f => ({ name: f.name, path: f.webkitRelativePath }))); }
updateFileList(); updateFileList();
if (AUTO_UPLOAD && filesToUpload.length > 0) startUploads(); else if (filesToUpload.length > 0) uploadButton.style.display = 'block'; else uploadButton.style.display = 'none'; if (AUTO_UPLOAD && filesToUpload.length > 0) startUploads();
else if (filesToUpload.length > 0) uploadButton.style.display = 'block'; else uploadButton.style.display = 'none';
input.value = ''; input.value = '';
} }
function updateFileList() { // Original simple list display
// --- File List UI Update (Original Simple List) ---
function updateFileList() {
// Keep the original simpler list rendering
console.debug('Updating original file list UI for', filesToUpload.length, 'files'); console.debug('Updating original file list UI for', filesToUpload.length, 'files');
fileListDiv.innerHTML = ''; fileList.innerHTML = ''; // Clear current list
if (filesToUpload.length === 0) { if (filesToUpload.length === 0) {
fileListDiv.innerHTML = '<div class="file-item placeholder">No files selected.</div>'; fileList.innerHTML = '<div class="file-item placeholder">No files selected.</div>'; // Show placeholder in original div
uploadButton.style.display = 'none'; uploadButton.style.display = 'none';
return; return;
} }
filesToUpload.forEach(file => { filesToUpload.forEach(file => {
const fileItem = document.createElement('div'); const fileItem = document.createElement('div');
fileItem.className = 'file-item'; fileItem.className = 'file-item'; // Use original class
const displayName = file.webkitRelativePath || file.name; const displayName = file.webkitRelativePath || file.name;
fileItem.innerHTML = `📄 ${displayName} (${formatFileSize(file.size)})`; fileItem.innerHTML = `📄 ${displayName} (${formatFileSize(file.size)})`;
fileListDiv.appendChild(fileItem); fileList.appendChild(fileItem);
}); });
uploadButton.style.display = (!AUTO_UPLOAD && filesToUpload.length > 0) ? 'block' : 'none'; uploadButton.style.display = (!AUTO_UPLOAD && filesToUpload.length > 0) ? 'block' : 'none';
} }
// Add original styles for list items (if they were in the script, otherwise they are in styles.css)
const style = document.createElement('style'); const style = document.createElement('style');
style.textContent = ` style.textContent = `
.file-list { /* Original styles for the list container */ margin-top: 20px; display: flex; flex-direction: column; gap: 10px; } .file-item { background: var(--container-bg); padding: 10px 15px; border-radius: 5px; text-align: left; box-shadow: 0 2px 4px rgba(0,0,0,0.1); margin-bottom: 10px; }
.file-item { background: var(--container-bg); padding: 10px 15px; border-radius: 5px; text-align: left; box-shadow: 0 2px 4px rgba(0,0,0,0.1); } .file-item.placeholder { text-align: center; opacity: 0.6; box-shadow: none; background: transparent; }
.file-item.placeholder { text-align: center; opacity: 0.6; box-shadow: none; background: transparent; border: none; } /* Ensure placeholder has no border if list had one */ .file-item.loading { text-align: center; padding: 15px; background: var(--container-bg); border-radius: 5px; margin: 10px 0; animation: pulse 1.5s infinite; }
.file-item.loading { text-align: center; padding: 15px; background: var(--container-bg); border-radius: 5px; animation: pulse 1.5s infinite; }
@keyframes pulse { 0% { opacity: 0.6; } 50% { opacity: 1; } 100% { opacity: 0.6; } } @keyframes pulse { 0% { opacity: 0.6; } 50% { opacity: 1; } 100% { opacity: 0.6; } }
/* Styles for the separate progress bars, from original */ /* Ensure progress bar styles don't conflict if somehow left over */
#uploadProgress { margin: 20px 0; display: flex; flex-direction: column; gap: 15px; } .progress-container { transition: opacity 0.5s ease-out; }
.progress-container { background: var(--container-bg); padding: 15px; border-radius: 5px; box-shadow: 0 2px 4px rgba(0,0,0,0.1); transition: opacity 0.5s ease-out; }
.progress-label { text-align: left; margin-bottom: 8px; color: var(--text-color); font-size: 0.9rem; }
.progress-status { display: flex; justify-content: space-between; align-items: center; font-size: 0.8rem; color: var(--text-color); opacity: 0.8; margin-top: 8px; }
.progress-info { text-align: left; } .progress-details { text-align: right; }
.progress { background: var(--progress-bg); border-radius: 10px; height: 8px; overflow: hidden; margin-top: 8px; margin-bottom: 8px; }
.progress-bar { height: 100%; background: var(--highlight-color); transition: width 0.3s ease; }
`; `;
document.head.appendChild(style); document.head.appendChild(style);
// --- Upload Process (Original Structure) ---
async function startUploads() { async function startUploads() {
if (filesToUpload.length === 0) { Toastify({ text: "No files selected.", duration: 3000 }).showToast(); return; } if (filesToUpload.length === 0) { Toastify({ text: "No files selected.", duration: 3000 }).showToast(); return; }
uploadButton.disabled = true; uploadButton.textContent = 'Uploading...';
document.getElementById('uploadProgress').innerHTML = ''; // Clear old progress bars uploadButton.disabled = true;
uploadButton.textContent = 'Uploading...';
document.getElementById('uploadProgress').innerHTML = ''; // Clear the separate progress bar container
const batchId = generateBatchId(); const batchId = generateBatchId();
let successfulUploads = 0, failedUploads = 0; let successfulUploads = 0;
let failedUploads = 0;
// Process uploads sequentially (same loop as original)
for (const file of filesToUpload) { for (const file of filesToUpload) {
// --- NEW: Increment active upload counter --- const uploader = new FileUploader(file, batchId); // Create uploader instance
activeUploadCount++;
const uploader = new FileUploader(file, batchId);
try { try {
if (await uploader.start()) successfulUploads++; const success = await uploader.start(); // Start the upload
else failedUploads++; if (success) successfulUploads++; else failedUploads++;
} } catch (error) {
catch (error) { console.error(`Unhandled error during upload start for ${file.name}:`, error);
console.error(`Unhandled error for ${file.name}:`, error);
failedUploads++; failedUploads++;
} finally { // Progress bar might show error via uploader's catch block
// --- NEW: Decrement active upload counter ---
activeUploadCount--;
}
} }
} // End for...of loop
// --- Show Summary Toast (Original logic) ---
const totalFiles = filesToUpload.length; const totalFiles = filesToUpload.length;
let msg = `Uploaded ${successfulUploads} of ${totalFiles} files`; let toastMessage = `Uploaded ${successfulUploads} of ${totalFiles} files`;
let bg = successfulUploads === totalFiles ? "#4CAF50" : (successfulUploads > 0 ? "#ff9800" : "#f44336"); let toastBackground = successfulUploads === totalFiles ? "#4CAF50" : "#f44336";
Toastify({ text: msg, duration: 3000, gravity: "bottom", position: "right", style: { background: bg } }).showToast(); if (successfulUploads > 0 && failedUploads > 0) toastBackground = "#ff9800"; // Orange if partial success
Toastify({ text: toastMessage, duration: 3000, gravity: "bottom", position: "right", style: { background: toastBackground } }).showToast();
filesToUpload = []; updateFileList(); // --- Reset UI State (Original logic) ---
uploadButton.disabled = false; uploadButton.textContent = 'Upload Files'; uploadButton.style.display = 'none'; filesToUpload = []; // Clear the list of files
fileInput.value = ''; folderInput.value = ''; updateFileList(); // Clear the displayed file list
// Progress bars are removed automatically by the uploader on completion/error
uploadButton.disabled = false;
uploadButton.textContent = 'Upload Files';
uploadButton.style.display = 'none';
fileInput.value = '';
folderInput.value = '';
} }
function setTheme(theme) { document.documentElement.setAttribute('data-theme', theme); localStorage.setItem('theme', theme); const m=document.querySelectorAll('.theme-toggle-icon .moon'); const s=document.querySelectorAll('.theme-toggle-icon .sun'); if(theme==='dark'){m.forEach(p=>p.style.display='none');s.forEach(p=>p.style.display='');}else{m.forEach(p=>p.style.display='');s.forEach(p=>p.style.display='none');} } // --- Theme Management (Original) ---
function setTheme(theme) { document.documentElement.setAttribute('data-theme', theme); localStorage.setItem('theme', theme); const m=document.querySelectorAll('.moon'); const s=document.querySelectorAll('.sun'); if(theme==='dark'){m.forEach(p=>p.style.display='none');s.forEach(p=>p.style.display='');}else{m.forEach(p=>p.style.display='');s.forEach(p=>p.style.display='none');} }
function toggleTheme() { const c=document.documentElement.getAttribute('data-theme'); setTheme(c==='dark'?'light':'dark'); } function toggleTheme() { const c=document.documentElement.getAttribute('data-theme'); setTheme(c==='dark'?'light':'dark'); }
const savedTheme = localStorage.getItem('theme'); const prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches; setTheme(savedTheme || (prefersDark ? 'dark' : 'light')); const savedTheme = localStorage.getItem('theme') || (window.matchMedia('(prefers-color-scheme: dark)').matches ? 'dark' : 'light'); setTheme(savedTheme);
updateFileList(); // Initialize list on load
// --- NEW: beforeunload event listener --- // --- Initial Setup ---
window.addEventListener('beforeunload', function (e) { updateFileList(); // Initialize the simple file list display
if (activeUploadCount > 0) {
// Standard message for the confirmation dialog
const confirmationMessage = 'Uploads are in progress. If you leave this page, ongoing uploads will be interrupted. Are you sure you want to leave?';
// For modern browsers:
e.returnValue = confirmationMessage;
// For older browsers:
return confirmationMessage;
}
});
</script> </script>
<footer>
{{FOOTER_CONTENT}}
</footer>
</body> </body>
</html> </html>

View File

@@ -39,7 +39,7 @@ body {
display: flex; display: flex;
justify-content: center; justify-content: center;
padding-top: 2rem; padding-top: 2rem;
padding-bottom: 150px; padding-bottom: 80px;
color: var(--text-color); color: var(--text-color);
transition: background-color 0.3s ease, color 0.3s ease; transition: background-color 0.3s ease, color 0.3s ease;
} }
@@ -47,7 +47,7 @@ body {
.container { .container {
width: 100%; width: 100%;
max-width: 600px; max-width: 600px;
padding: 20px 20px 80px 20px; padding: 20px;
text-align: center; text-align: center;
position: relative; position: relative;
} }
@@ -364,19 +364,20 @@ button:disabled {
/* Footer Styles */ /* Footer Styles */
footer { footer {
position: fixed; position: fixed;
bottom: 0; bottom: 10px;
left: 0; left: 0;
right: 0; right: 0;
width: 100%; width: 100%;
max-width: 600px;
margin-left: auto;
margin-right: auto;
padding: 15px; padding: 15px;
text-align: center; text-align: center;
font-size: 0.85rem; font-size: 0.85rem;
color: var(--text-color); color: var(--text-color);
opacity: 0.9; opacity: 0.7;
border-top: 1px solid var(--border-color); border-top: 1px solid var(--border-color);
transition: background-color 0.3s ease, color 0.3s ease; transition: background-color 0.3s ease, color 0.3s ease;
background-color: var(--bg-color);
z-index: 100;
} }
footer a { footer a {

View File

@@ -136,11 +136,6 @@ app.get('/login.html', (req, res) => {
} }
}); });
// --- Health Check Endpoint ---
app.get('/health', (req, res) => {
res.status(200).json({ status: 'UP', message: 'Server is healthy' });
});
// --- Static File Serving --- // --- Static File Serving ---
// Serve static files (CSS, JS, assets) from the 'public' directory // Serve static files (CSS, JS, assets) from the 'public' directory
// Use express.static middleware, placed AFTER specific HTML routes // Use express.static middleware, placed AFTER specific HTML routes

View File

@@ -1,13 +1,39 @@
// File: src/config/index.js
require('dotenv').config(); require('dotenv').config();
const { validatePin } = require('../utils/security'); const { validatePin } = require('../utils/security');
const logger = require('../utils/logger'); const logger = require('../utils/logger'); // Use the default logger instance
const fs = require('fs'); const fs = require('fs');
const path = require('path'); const path = require('path');
// const { version } = require('../../package.json'); // version not currently used, can be removed or kept const { version } = require('../../package.json'); // Get version from package.json
// --- Environment Variables Reference --- // --- Environment Variables Reference ---
/* (Comments listing all ENV vars - keep as is) */ /*
STORAGE_TYPE - Storage backend ('local' or 's3', default: 'local')
// --- Local Storage ---
UPLOAD_DIR - Directory for uploads (Docker/production, if STORAGE_TYPE=local)
LOCAL_UPLOAD_DIR - Directory for uploads (local dev, fallback: './local_uploads', if STORAGE_TYPE=local)
// --- S3 Storage ---
S3_REGION - AWS Region for S3 Bucket (required if STORAGE_TYPE=s3)
S3_BUCKET_NAME - Name of the S3 Bucket (required if STORAGE_TYPE=s3)
S3_ACCESS_KEY_ID - S3 Access Key ID (required if STORAGE_TYPE=s3)
S3_SECRET_ACCESS_KEY - S3 Secret Access Key (required if STORAGE_TYPE=s3)
S3_ENDPOINT_URL - Custom S3 endpoint URL (optional, for non-AWS S3)
S3_FORCE_PATH_STYLE - Force path-style access (true/false, optional, for non-AWS S3)
// --- Common ---
PORT - Port for the server (default: 3000)
NODE_ENV - Node environment (default: 'development')
BASE_URL - Base URL for the app (default: http://localhost:${PORT})
MAX_FILE_SIZE - Max upload size in MB (default: 1024)
AUTO_UPLOAD - Enable auto-upload (true/false, default: false)
DUMBDROP_PIN - Security PIN for uploads (required for protected endpoints)
DUMBDROP_TITLE - Site title (default: 'DumbDrop')
APPRISE_URL - Apprise notification URL (optional)
APPRISE_MESSAGE - Notification message template (default provided)
APPRISE_SIZE_UNIT - Size unit for notifications (optional)
ALLOWED_EXTENSIONS - Comma-separated list of allowed file extensions (optional)
ALLOWED_IFRAME_ORIGINS- Comma-separated list of allowed iframe origins (optional)
CLIENT_MAX_RETRIES - Max retries for client chunk uploads (default: 5)
DEMO_MODE - Enable demo mode (true/false, default: false)
*/
// --- Helper for clear configuration logging --- // --- Helper for clear configuration logging ---
const logConfig = (message, level = 'info') => { const logConfig = (message, level = 'info') => {
@@ -18,7 +44,7 @@ const logConfig = (message, level = 'info') => {
// --- Default configurations --- // --- Default configurations ---
const DEFAULT_PORT = 3000; const DEFAULT_PORT = 3000;
const DEFAULT_SITE_TITLE = 'DumbDrop'; const DEFAULT_SITE_TITLE = 'DumbDrop';
const DEFAULT_BASE_URL_PREFIX = 'http://localhost'; // Prefix, port added later const DEFAULT_BASE_URL = 'http://localhost:3000';
const DEFAULT_CLIENT_MAX_RETRIES = 5; const DEFAULT_CLIENT_MAX_RETRIES = 5;
const DEFAULT_STORAGE_TYPE = 'local'; const DEFAULT_STORAGE_TYPE = 'local';
@@ -28,62 +54,81 @@ const logAndReturn = (key, value, isDefault = false, sensitive = false) => {
return value; return value;
}; };
// --- Utility to detect if running in local development mode ---
// (This helps decide whether to *create* LOCAL_UPLOAD_DIR, but doesn't affect UPLOAD_DIR usage in Docker)
function isLocalDevelopment() { function isLocalDevelopment() {
return process.env.NODE_ENV !== 'production' && !process.env.UPLOAD_DIR; return process.env.NODE_ENV !== 'production' && !process.env.UPLOAD_DIR;
} }
/**
* Determine the local upload directory path.
* Only relevant when STORAGE_TYPE is 'local'.
* @returns {string|null} The path, or null if storage is not local.
*/
function determineLocalUploadDirectory() { function determineLocalUploadDirectory() {
if (process.env.STORAGE_TYPE && process.env.STORAGE_TYPE.toLowerCase() !== 'local') { if (process.env.STORAGE_TYPE && process.env.STORAGE_TYPE.toLowerCase() !== 'local') {
return null; // Not using local storage return null; // Not using local storage
} }
let uploadDir; let uploadDir;
if (process.env.UPLOAD_DIR) { if (process.env.UPLOAD_DIR) {
uploadDir = process.env.UPLOAD_DIR; uploadDir = process.env.UPLOAD_DIR;
// logger.info(`[Local Storage] Upload directory set from UPLOAD_DIR: ${uploadDir}`); // Logger might not be fully init here logger.info(`[Local Storage] Upload directory set from UPLOAD_DIR: ${uploadDir}`);
} else if (process.env.LOCAL_UPLOAD_DIR) { } else if (process.env.LOCAL_UPLOAD_DIR) {
uploadDir = process.env.LOCAL_UPLOAD_DIR; uploadDir = process.env.LOCAL_UPLOAD_DIR;
// logger.warn(`[Local Storage] Upload directory using LOCAL_UPLOAD_DIR fallback: ${uploadDir}`); logger.warn(`[Local Storage] Upload directory using LOCAL_UPLOAD_DIR fallback: ${uploadDir}`);
} else { } else {
uploadDir = './local_uploads'; uploadDir = './local_uploads'; // Default local path
// logger.warn(`[Local Storage] Upload directory using default fallback: ${uploadDir}`); logger.warn(`[Local Storage] Upload directory using default fallback: ${uploadDir}`);
} }
// logger.info(`[Local Storage] Final upload directory path: ${path.resolve(uploadDir)}`); logger.info(`[Local Storage] Final upload directory path: ${path.resolve(uploadDir)}`);
return path.resolve(uploadDir); // Always resolve to absolute return uploadDir;
} }
/**
* Ensure the local upload directory exists (if applicable and in local dev).
*/
function ensureLocalUploadDirExists(dirPath) { function ensureLocalUploadDirExists(dirPath) {
if (!dirPath || !isLocalDevelopment()) { if (!dirPath || !isLocalDevelopment()) {
return; return; // Only create if using local storage in a local dev environment
} }
try { try {
if (!fs.existsSync(dirPath)) { if (!fs.existsSync(dirPath)) {
fs.mkdirSync(dirPath, { recursive: true }); fs.mkdirSync(dirPath, { recursive: true });
console.log(`[INFO] CONFIGURATION: [Local Storage] Created local upload directory: ${dirPath}`); logger.info(`[Local Storage] Created local upload directory: ${dirPath}`);
} else { } else {
console.log(`[INFO] CONFIGURATION: [Local Storage] Local upload directory exists: ${dirPath}`); logger.info(`[Local Storage] Local upload directory exists: ${dirPath}`);
} }
// Basic writability check
fs.accessSync(dirPath, fs.constants.W_OK); fs.accessSync(dirPath, fs.constants.W_OK);
console.log(`[SUCCESS] CONFIGURATION: [Local Storage] Local upload directory is writable: ${dirPath}`); logger.success(`[Local Storage] Local upload directory is writable: ${dirPath}`);
} catch (err) { } catch (err) {
console.error(`[ERROR] CONFIGURATION: [Local Storage] Failed to create or access local upload directory: ${dirPath}. Error: ${err.message}`); logger.error(`[Local Storage] Failed to create or access local upload directory: ${dirPath}. Error: ${err.message}`);
throw new Error(`Upload directory "${dirPath}" is not accessible or writable.`); throw new Error(`Upload directory "${dirPath}" is not accessible or writable.`);
} }
} }
// --- Determine Storage Type ---
const storageTypeInput = process.env.STORAGE_TYPE || DEFAULT_STORAGE_TYPE; const storageTypeInput = process.env.STORAGE_TYPE || DEFAULT_STORAGE_TYPE;
const storageType = ['local', 's3'].includes(storageTypeInput.toLowerCase()) const storageType = ['local', 's3'].includes(storageTypeInput.toLowerCase())
? storageTypeInput.toLowerCase() ? storageTypeInput.toLowerCase()
: DEFAULT_STORAGE_TYPE; : DEFAULT_STORAGE_TYPE;
if (storageTypeInput.toLowerCase() !== storageType) { if (storageTypeInput.toLowerCase() !== storageType) {
console.warn(`[WARN] CONFIGURATION: Invalid STORAGE_TYPE "${storageTypeInput}", using default: "${storageType}"`); logger.warn(`Invalid STORAGE_TYPE "${storageTypeInput}", using default: "${storageType}"`);
} }
const resolvedLocalUploadDir = determineLocalUploadDirectory(); // Determine and potentially ensure local upload directory
if (storageType === 'local' && resolvedLocalUploadDir) { // Only ensure if actually using local storage const resolvedLocalUploadDir = determineLocalUploadDirectory(); // Will be null if STORAGE_TYPE is 's3'
if (resolvedLocalUploadDir) {
ensureLocalUploadDirExists(resolvedLocalUploadDir); ensureLocalUploadDirExists(resolvedLocalUploadDir);
} }
/**
* Function to parse the FOOTER_LINKS environment variable
* @param {string} linksString - The input string containing links
* @returns {Array} - An array of objects containing text and URL
*/
const parseFooterLinks = (linksString) => { const parseFooterLinks = (linksString) => {
if (!linksString) return []; if (!linksString) return [];
return linksString.split(',') return linksString.split(',')
@@ -91,45 +136,85 @@ const parseFooterLinks = (linksString) => {
const parts = linkPair.split('@').map(part => part.trim()); const parts = linkPair.split('@').map(part => part.trim());
if (parts.length === 2 && parts[0] && parts[1] && (parts[1].startsWith('http://') || parts[1].startsWith('https://'))) { if (parts.length === 2 && parts[0] && parts[1] && (parts[1].startsWith('http://') || parts[1].startsWith('https://'))) {
return { text: parts[0], url: parts[1] }; return { text: parts[0], url: parts[1] };
} } else {
// logger.warn(`Invalid format or URL in FOOTER_LINKS: "${linkPair}".`); // Logger might not be fully init logger.warn(`Invalid format or URL in FOOTER_LINKS: "${linkPair}". Expected "Text @ http(s)://URL". Skipping.`);
return null; return null;
}
}) })
.filter(link => link !== null); .filter(link => link !== null);
}; };
const port = parseInt(process.env.PORT || DEFAULT_PORT, 10); /**
const baseUrl = process.env.BASE_URL || `${DEFAULT_BASE_URL_PREFIX}:${port}/`; * Application configuration
* Loads and validates environment variables
*/
const config = { const config = {
port, // =====================
// Core Settings
// =====================
port: parseInt(process.env.PORT || DEFAULT_PORT, 10),
nodeEnv: process.env.NODE_ENV || 'development', nodeEnv: process.env.NODE_ENV || 'development',
baseUrl, baseUrl: process.env.BASE_URL || `${DEFAULT_BASE_URL.replace(/:3000$/, '')}:${process.env.PORT || DEFAULT_PORT}/`, // Ensure trailing slash
isDemoMode: process.env.DEMO_MODE === 'true', isDemoMode: process.env.DEMO_MODE === 'true',
storageType,
uploadDir: storageType === 'local' ? resolvedLocalUploadDir : path.resolve(process.env.UPLOAD_DIR || process.env.LOCAL_UPLOAD_DIR || './uploads'), // For S3, metadata dir. Fallback required. // =====================
// Storage Settings
// =====================
storageType: logAndReturn('STORAGE_TYPE', storageType, storageType === DEFAULT_STORAGE_TYPE),
/**
* The primary directory for storing files or metadata.
* If STORAGE_TYPE=local, this is where files are stored.
* If STORAGE_TYPE=s3, this is where '.metadata' lives.
* We default to the determined local path or a standard './uploads' if S3 is used.
*/
uploadDir: resolvedLocalUploadDir || path.resolve('./uploads'), // S3 needs a place for metadata too
// --- S3 Specific (only relevant if storageType is 's3') ---
s3Region: process.env.S3_REGION || null, s3Region: process.env.S3_REGION || null,
s3BucketName: process.env.S3_BUCKET_NAME || null, s3BucketName: process.env.S3_BUCKET_NAME || null,
s3AccessKeyId: process.env.S3_ACCESS_KEY_ID || null, s3AccessKeyId: process.env.S3_ACCESS_KEY_ID || null,
s3SecretAccessKey: process.env.S3_SECRET_ACCESS_KEY || null, s3SecretAccessKey: process.env.S3_SECRET_ACCESS_KEY || null,
s3EndpointUrl: process.env.S3_ENDPOINT_URL || null, s3EndpointUrl: process.env.S3_ENDPOINT_URL || null, // Default to null (AWS default endpoint)
s3ForcePathStyle: process.env.S3_FORCE_PATH_STYLE === 'true', s3ForcePathStyle: process.env.S3_FORCE_PATH_STYLE === 'true', // Default to false
// =====================
// Upload Behavior
// =====================
maxFileSize: (() => { maxFileSize: (() => {
const sizeInMB = parseInt(process.env.MAX_FILE_SIZE || '1024', 10); const sizeInMB = parseInt(process.env.MAX_FILE_SIZE || '1024', 10);
return (isNaN(sizeInMB) || sizeInMB <= 0 ? 1024 : sizeInMB) * 1024 * 1024; if (isNaN(sizeInMB) || sizeInMB <= 0) {
logger.error('Invalid MAX_FILE_SIZE, must be a positive number. Using 1024MB.');
return 1024 * 1024 * 1024;
}
return sizeInMB * 1024 * 1024; // Convert MB to bytes
})(), })(),
autoUpload: process.env.AUTO_UPLOAD === 'true', autoUpload: process.env.AUTO_UPLOAD === 'true',
allowedExtensions: process.env.ALLOWED_EXTENSIONS ? allowedExtensions: process.env.ALLOWED_EXTENSIONS ?
process.env.ALLOWED_EXTENSIONS.split(',').map(ext => ext.trim().toLowerCase().replace(/^\./, '.')).filter(Boolean) : process.env.ALLOWED_EXTENSIONS.split(',').map(ext => ext.trim().toLowerCase().replace(/^\./, '.')).filter(Boolean) : // Ensure dot prefix
null, null,
clientMaxRetries: (() => { clientMaxRetries: (() => {
const retries = parseInt(process.env.CLIENT_MAX_RETRIES || DEFAULT_CLIENT_MAX_RETRIES, 10); const envValue = process.env.CLIENT_MAX_RETRIES;
return (isNaN(retries) || retries < 0) ? DEFAULT_CLIENT_MAX_RETRIES : retries; const defaultValue = DEFAULT_CLIENT_MAX_RETRIES;
if (envValue === undefined) return logAndReturn('CLIENT_MAX_RETRIES', defaultValue, true);
const retries = parseInt(envValue, 10);
if (isNaN(retries) || retries < 0) {
logger.warn(`Invalid CLIENT_MAX_RETRIES value: "${envValue}". Using default: ${defaultValue}`);
return logAndReturn('CLIENT_MAX_RETRIES', defaultValue, true);
}
return logAndReturn('CLIENT_MAX_RETRIES', retries);
})(), })(),
pin: validatePin(process.env.DUMBDROP_PIN), // validatePin uses logger, ensure logger is available
// =====================
// Security
// =====================
pin: validatePin(process.env.DUMBDROP_PIN),
allowedIframeOrigins: process.env.ALLOWED_IFRAME_ORIGINS ? allowedIframeOrigins: process.env.ALLOWED_IFRAME_ORIGINS ?
process.env.ALLOWED_IFRAME_ORIGINS.split(',').map(origin => origin.trim()).filter(Boolean) : process.env.ALLOWED_IFRAME_ORIGINS.split(',').map(origin => origin.trim()).filter(Boolean) :
null, null,
// =====================
// UI & Notifications
// =====================
siteTitle: process.env.DUMBDROP_TITLE || DEFAULT_SITE_TITLE, siteTitle: process.env.DUMBDROP_TITLE || DEFAULT_SITE_TITLE,
footerLinks: parseFooterLinks(process.env.FOOTER_LINKS), footerLinks: parseFooterLinks(process.env.FOOTER_LINKS),
appriseUrl: process.env.APPRISE_URL || null, appriseUrl: process.env.APPRISE_URL || null,
@@ -137,86 +222,113 @@ const config = {
appriseSizeUnit: process.env.APPRISE_SIZE_UNIT || 'Auto', appriseSizeUnit: process.env.APPRISE_SIZE_UNIT || 'Auto',
}; };
// --- Log Configuration (after logger is confirmed available) --- // --- Log Sensitive & Conditional Config ---
// Moved logging to after config object is built, so logger is definitely available logConfig(`NODE_ENV: ${config.nodeEnv}`);
logger.info(`--- Configuration Start ---`); logConfig(`PORT: ${config.port}`);
logAndReturn('NODE_ENV', config.nodeEnv); logConfig(`BASE_URL: ${config.baseUrl}`);
logAndReturn('PORT', config.port); logConfig(`DEMO_MODE: ${config.isDemoMode}`);
logAndReturn('BASE_URL', config.baseUrl);
logAndReturn('DEMO_MODE', config.isDemoMode);
logAndReturn('STORAGE_TYPE', config.storageType);
if (config.storageType === 'local') { if (config.storageType === 'local') {
logAndReturn('Upload Directory (Local Storage)', config.uploadDir); logConfig(`Upload Directory (Local): ${config.uploadDir}`);
} else { } else {
logAndReturn('Metadata Directory (S3 Mode)', config.uploadDir); // Clarify role for S3 logConfig(`Metadata Directory (S3 Mode): ${config.uploadDir}`); // Clarify role in S3 mode
logAndReturn('S3_REGION', config.s3Region); logAndReturn('S3_REGION', config.s3Region);
logAndReturn('S3_BUCKET_NAME', config.s3BucketName); logAndReturn('S3_BUCKET_NAME', config.s3BucketName);
logAndReturn('S3_ACCESS_KEY_ID', config.s3AccessKeyId, false, true); logAndReturn('S3_ACCESS_KEY_ID', config.s3AccessKeyId, false, true); // Sensitive
logAndReturn('S3_SECRET_ACCESS_KEY', config.s3SecretAccessKey, false, true); logAndReturn('S3_SECRET_ACCESS_KEY', config.s3SecretAccessKey, false, true); // Sensitive
if (config.s3EndpointUrl) logAndReturn('S3_ENDPOINT_URL', config.s3EndpointUrl); if (config.s3EndpointUrl) logAndReturn('S3_ENDPOINT_URL', config.s3EndpointUrl);
logAndReturn('S3_FORCE_PATH_STYLE', config.s3ForcePathStyle); logAndReturn('S3_FORCE_PATH_STYLE', config.s3ForcePathStyle);
} }
logger.info(`Max File Size: ${config.maxFileSize / (1024 * 1024)}MB`); logConfig(`Max File Size: ${config.maxFileSize / (1024 * 1024)}MB`);
logger.info(`Auto Upload: ${config.autoUpload}`); logConfig(`Auto Upload: ${config.autoUpload}`);
if (config.allowedExtensions) logger.info(`Allowed Extensions: ${config.allowedExtensions.join(', ')}`); if (config.allowedExtensions) logConfig(`Allowed Extensions: ${config.allowedExtensions.join(', ')}`);
if (config.pin) logAndReturn('DUMBDROP_PIN', config.pin, false, true); if (config.pin) logAndReturn('DUMBDROP_PIN', config.pin, false, true); // Sensitive
if (config.allowedIframeOrigins) logger.info(`Allowed Iframe Origins: ${config.allowedIframeOrigins.join(', ')}`); if (config.allowedIframeOrigins) logConfig(`Allowed Iframe Origins: ${config.allowedIframeOrigins.join(', ')}`);
if (config.appriseUrl) logAndReturn('APPRISE_URL', config.appriseUrl); if (config.appriseUrl) logAndReturn('APPRISE_URL', config.appriseUrl);
logger.info(`Client Max Retries: ${config.clientMaxRetries}`);
logger.info(`--- Configuration End ---`);
// --- Configuration Validation ---
function validateConfig() { function validateConfig() {
const errors = []; const errors = [];
if (config.port <= 0 || config.port > 65535) errors.push('PORT must be a valid number between 1 and 65535');
if (config.maxFileSize <= 0) errors.push('MAX_FILE_SIZE must be greater than 0');
try {
new URL(config.baseUrl);
if (!config.baseUrl.endsWith('/')) errors.push('BASE_URL must end with a trailing slash ("/"). Current: ' + config.baseUrl);
} catch (err) { errors.push(`BASE_URL must be a valid URL. Error: ${err.message}`); }
if (config.storageType === 's3') { if (!config.port || config.port <= 0 || config.port > 65535) {
if (!config.s3Region) errors.push('S3_REGION is required for S3 storage'); errors.push('PORT must be a valid number between 1 and 65535');
if (!config.s3BucketName) errors.push('S3_BUCKET_NAME is required for S3 storage');
if (!config.s3AccessKeyId) errors.push('S3_ACCESS_KEY_ID is required for S3 storage');
if (!config.s3SecretAccessKey) errors.push('S3_SECRET_ACCESS_KEY is required for S3 storage');
if (config.s3ForcePathStyle && !config.s3EndpointUrl) {
logger.warn('[Config Validation] S3_FORCE_PATH_STYLE is true, but S3_ENDPOINT_URL is not set. This may not work as expected with default AWS endpoints.');
}
} else if (config.storageType === 'local') {
if (!config.uploadDir) errors.push('Upload directory (UPLOAD_DIR or LOCAL_UPLOAD_DIR) is required for local storage.');
else {
try { fs.accessSync(config.uploadDir, fs.constants.W_OK); }
catch (err) { errors.push(`Local upload directory "${config.uploadDir}" is not writable or does not exist.`); }
}
} }
// Metadata directory check (for both local file metadata and S3 upload state metadata) if (config.maxFileSize <= 0) {
if (!config.uploadDir) { // This condition might be redundant if local storage dir is already checked errors.push('MAX_FILE_SIZE must be greater than 0');
errors.push('A base directory (UPLOAD_DIR or LOCAL_UPLOAD_DIR) is required for metadata storage.'); }
} else {
try { // Validate BASE_URL format and trailing slash
const metadataBase = path.resolve(config.uploadDir); // Base for .metadata try {
if (!fs.existsSync(metadataBase)) { let url = new URL(config.baseUrl);
fs.mkdirSync(metadataBase, { recursive: true }); if (!config.baseUrl.endsWith('/')) {
logger.info(`[Config Validation] Created base directory for metadata: ${metadataBase}`); errors.push('BASE_URL must end with a trailing slash ("/"). Current value: ' + config.baseUrl);
// Attempt to fix it for runtime, but still report error
// config.baseUrl = config.baseUrl + '/';
} }
fs.accessSync(metadataBase, fs.constants.W_OK); // Check writability of the parent of .metadata
} catch (err) { } catch (err) {
errors.push(`Cannot access or create base directory for metadata at "${config.uploadDir}". Error: ${err.message}`); errors.push(`BASE_URL must be a valid URL. Error: ${err.message}`);
}
// Validate S3 configuration if STORAGE_TYPE is 's3'
if (config.storageType === 's3') {
if (!config.s3Region) errors.push('S3_REGION is required when STORAGE_TYPE is "s3"');
if (!config.s3BucketName) errors.push('S3_BUCKET_NAME is required when STORAGE_TYPE is "s3"');
if (!config.s3AccessKeyId) errors.push('S3_ACCESS_KEY_ID is required when STORAGE_TYPE is "s3"');
if (!config.s3SecretAccessKey) errors.push('S3_SECRET_ACCESS_KEY is required when STORAGE_TYPE is "s3"');
if (config.s3ForcePathStyle && !config.s3EndpointUrl) {
logger.warn('S3_FORCE_PATH_STYLE is true, but S3_ENDPOINT_URL is not set. This typically requires a custom endpoint.');
} }
} }
// Validate local storage dir only if type is local
if (config.storageType === 'local') {
if (!config.uploadDir) {
errors.push('Upload directory could not be determined for local storage.');
} else {
// Check existence and writability again (ensureLocalUploadDirExists might have failed)
try {
fs.accessSync(config.uploadDir, fs.constants.W_OK);
} catch (err) {
errors.push(`Local upload directory "${config.uploadDir}" is not writable or does not exist.`);
}
}
}
// Check metadata dir existence/writability regardless of storage type, as S3 uses it too
try {
const metadataParentDir = path.dirname(path.join(config.uploadDir, '.metadata'));
if (!fs.existsSync(metadataParentDir)) {
fs.mkdirSync(metadataParentDir, { recursive: true });
logger.info(`Created base directory for metadata: ${metadataParentDir}`);
}
fs.accessSync(metadataParentDir, fs.constants.W_OK);
} catch (err) {
errors.push(`Cannot access or create directory for metadata storage at "${config.uploadDir}". Error: ${err.message}`);
}
if (config.nodeEnv === 'production') {
if (!config.appriseUrl) {
logger.info('Apprise notifications disabled (APPRISE_URL not set).');
}
}
if (errors.length > 0) { if (errors.length > 0) {
logger.error('--- CONFIGURATION ERRORS ---'); logger.error('--- CONFIGURATION ERRORS ---');
errors.forEach(err => logger.error(`- ${err}`)); errors.forEach(err => logger.error(`- ${err}`));
logger.error('-----------------------------'); logger.error('-----------------------------');
throw new Error('Configuration validation failed. Please check environment variables and correct the issues.'); throw new Error('Configuration validation failed. Please check environment variables.');
}
logger.success('[Config Validation] Configuration validated successfully.');
} }
Object.freeze(config); // Freeze after logging and validation logger.success('Configuration validated successfully.');
}
module.exports = { config, validateConfig }; // Freeze configuration to prevent modifications after initial load
Object.freeze(config);
module.exports = {
config,
validateConfig
};

View File

@@ -16,118 +16,157 @@ const { isDemoMode } = require('../utils/demoMode'); // Keep demo check for spec
// Initialize upload // Initialize upload
router.post('/init', async (req, res) => { router.post('/init', async (req, res) => {
if (isDemoMode() && config.storageType !== 's3') { // S3 demo might still hit the adapter for presigned URLs etc. // Note: Demo mode might bypass storage adapter logic via middleware or adapter factory itself.
// but local demo can be simpler. // If specific demo responses are needed here, keep the check.
const { filename = 'demo_file.txt', fileSize = 0 } = req.body; if (isDemoMode()) {
// Simplified Demo Response (assuming demoAdapter handles non-persistence)
const { filename = 'demo_file', fileSize = 0 } = req.body;
const demoUploadId = 'demo-' + Math.random().toString(36).substr(2, 9); const demoUploadId = 'demo-' + Math.random().toString(36).substr(2, 9);
logger.info(`[DEMO /init] Req for ${filename}, size ${fileSize}. ID ${demoUploadId}`); logger.info(`[DEMO] Init request for ${filename}, size ${fileSize}. Returning ID ${demoUploadId}`);
if (Number(fileSize) === 0) { if (Number(fileSize) === 0) {
logger.success(`[DEMO /init] Sim complete zero-byte: ${filename}`); logger.success(`[DEMO] Simulated completion of zero-byte file: ${filename}`);
// Potentially call demoAdapter.completeUpload or similar mock logic if needed
} }
return res.json({ uploadId: demoUploadId }); return res.json({ uploadId: demoUploadId });
} }
const { filename, fileSize } = req.body; const { filename, fileSize } = req.body;
const clientBatchId = req.headers['x-batch-id']; const clientBatchId = req.headers['x-batch-id']; // Adapter might use this
// --- Basic validations ---
if (!filename) return res.status(400).json({ error: 'Missing filename' }); if (!filename) return res.status(400).json({ error: 'Missing filename' });
if (fileSize === undefined || fileSize === null) return res.status(400).json({ error: 'Missing fileSize' }); if (fileSize === undefined || fileSize === null) return res.status(400).json({ error: 'Missing fileSize' });
const size = Number(fileSize); const size = Number(fileSize);
if (isNaN(size) || size < 0) return res.status(400).json({ error: 'Invalid file size' }); if (isNaN(size) || size < 0) return res.status(400).json({ error: 'Invalid file size' });
// --- Max File Size Check ---
if (size > config.maxFileSize) { if (size > config.maxFileSize) {
logger.warn(`Upload rejected: File size ${size} exceeds limit ${config.maxFileSize} for ${filename}`); logger.warn(`Upload rejected: File size ${size} exceeds limit ${config.maxFileSize}`);
return res.status(413).json({ error: 'File too large', limit: config.maxFileSize }); return res.status(413).json({ error: 'File too large', limit: config.maxFileSize });
} }
// --- Extension Check ---
// Perform extension check before handing off to adapter
if (config.allowedExtensions && config.allowedExtensions.length > 0) { if (config.allowedExtensions && config.allowedExtensions.length > 0) {
const fileExt = path.extname(filename).toLowerCase(); const fileExt = path.extname(filename).toLowerCase();
// Check if the extracted extension (including '.') is in the allowed list
if (!fileExt || !config.allowedExtensions.includes(fileExt)) { if (!fileExt || !config.allowedExtensions.includes(fileExt)) {
logger.warn(`Upload rejected: File type not allowed: ${filename} (Ext: ${fileExt || 'none'})`); logger.warn(`Upload rejected: File type not allowed: ${filename} (Extension: ${fileExt || 'none'})`);
return res.status(400).json({ error: 'File type not allowed', receivedExtension: fileExt || 'none' }); return res.status(400).json({ error: 'File type not allowed', receivedExtension: fileExt || 'none' });
} }
logger.debug(`File extension ${fileExt} allowed for ${filename}`); logger.debug(`File extension ${fileExt} allowed for ${filename}`);
} }
try { try {
// Delegate initialization to the storage adapter
const result = await storageAdapter.initUpload(filename, size, clientBatchId); const result = await storageAdapter.initUpload(filename, size, clientBatchId);
// Respond with the uploadId generated by the adapter/system
res.json({ uploadId: result.uploadId }); res.json({ uploadId: result.uploadId });
} catch (err) { } catch (err) {
logger.error(`[Route /init] Upload initialization failed for "${filename}": ${err.name} - ${err.message}`, err.stack); logger.error(`[Route /init] Upload initialization failed: ${err.message}`, err.stack);
// Map common errors
let statusCode = 500; let statusCode = 500;
let clientMessage = 'Failed to initialize upload.'; let clientMessage = 'Failed to initialize upload.';
if (err.message.includes('Invalid batch ID format')) { if (err.message.includes('Invalid batch ID format')) {
statusCode = 400; clientMessage = err.message; statusCode = 400;
} else if (err.name === 'NoSuchBucket' || err.name === 'AccessDenied') { clientMessage = err.message;
statusCode = 500; clientMessage = 'Storage configuration error.'; } else if (err.name === 'NoSuchBucket' || err.name === 'AccessDenied') { // S3 Specific
} else if (err.code === 'EACCES' || err.code === 'EPERM' || err.message.includes('writable') || err.message.includes('metadata directory')) { statusCode = 500; // Internal config error
statusCode = 500; clientMessage = 'Storage permission or access error.'; clientMessage = 'Storage configuration error.';
} else if (err.message.includes('S3 Client configuration failed')) { } else if (err.code === 'EACCES' || err.code === 'EPERM' || err.message.includes('writable')) { // Local Specific
statusCode = 503; clientMessage = 'Storage service unavailable or misconfigured.'; statusCode = 500;
clientMessage = 'Storage permission or access error.';
} }
res.status(statusCode).json({ error: clientMessage, details: config.nodeEnv === 'development' ? err.message : undefined }); // Add more specific error mapping based on adapter exceptions if needed
res.status(statusCode).json({ error: clientMessage, details: err.message }); // Include details only for logging/debugging
} }
}); });
// Upload chunk // Upload chunk
router.post('/chunk/:uploadId', express.raw({ router.post('/chunk/:uploadId', express.raw({
limit: config.maxFileSize + (10 * 1024 * 1024), limit: config.maxFileSize + (10 * 1024 * 1024), // Allow slightly larger raw body than max file size
type: 'application/octet-stream' type: 'application/octet-stream'
}), async (req, res) => { }), async (req, res) => {
const { uploadId } = req.params; const { uploadId } = req.params;
const chunk = req.body; const chunk = req.body;
const partNumber = parseInt(req.query.partNumber, 10); // Ensure partNumber is parsed const clientBatchId = req.headers['x-batch-id']; // May be useful for logging context
// ** CRITICAL FOR S3: Get Part Number from client **
// Client needs to send this, e.g., ?partNumber=1, ?partNumber=2, ...
const partNumber = parseInt(req.query.partNumber || '1', 10);
if (isNaN(partNumber) || partNumber < 1) { if (isNaN(partNumber) || partNumber < 1) {
logger.error(`[Route /chunk] Invalid partNumber for ${uploadId}: ${req.query.partNumber}`); logger.error(`[Route /chunk] Invalid partNumber received: ${req.query.partNumber}`);
return res.status(400).json({ error: 'Missing or invalid partNumber query parameter (must be >= 1)' }); return res.status(400).json({ error: 'Missing or invalid partNumber query parameter (must be >= 1)' });
} }
if (isDemoMode() && config.storageType !== 's3') { // Demo mode handling (simplified)
logger.debug(`[DEMO /chunk] Chunk for ${uploadId}, part ${partNumber}, size ${chunk?.length || 0}`); if (isDemoMode()) {
const demoProgress = Math.min(100, (Math.random() * 50) + (partNumber * 10) ); // Simulate increasing progress logger.debug(`[DEMO /chunk] Received chunk for ${uploadId}, part ${partNumber}, size ${chunk?.length || 0}`);
const completed = demoProgress >= 100; // Simulate progress - more sophisticated logic could go in a demoAdapter
if (completed) logger.info(`[DEMO /chunk] Sim completion for ${uploadId}`); const demoProgress = Math.min(100, Math.random() * 100);
return res.json({ bytesReceived: 0, progress: demoProgress, completed }); const completed = demoProgress > 95; // Simulate completion occasionally
if (completed) {
logger.info(`[DEMO /chunk] Simulated completion for ${uploadId}`);
}
return res.json({ bytesReceived: 0, progress: demoProgress, completed }); // Approximate response
} }
if (!chunk || chunk.length === 0) { if (!chunk || chunk.length === 0) {
logger.warn(`[Route /chunk] Empty chunk for ${uploadId}, part ${partNumber}`); logger.warn(`[Route /chunk] Received empty chunk for uploadId: ${uploadId}, part ${partNumber}`);
return res.status(400).json({ error: 'Empty chunk received' }); return res.status(400).json({ error: 'Empty chunk received' });
} }
try { try {
// Delegate chunk storage to the adapter
const result = await storageAdapter.storeChunk(uploadId, chunk, partNumber); const result = await storageAdapter.storeChunk(uploadId, chunk, partNumber);
// If the adapter indicates completion after storing this chunk, finalize the upload
if (result.completed) { if (result.completed) {
logger.info(`[Route /chunk] Part ${partNumber} for ${uploadId} triggered completion. Finalizing...`); logger.info(`[Route /chunk] Chunk ${partNumber} for ${uploadId} triggered completion. Finalizing...`);
try { try {
const completionResult = await storageAdapter.completeUpload(uploadId); const completionResult = await storageAdapter.completeUpload(uploadId);
logger.success(`[Route /chunk] Finalized upload ${uploadId}. Path/Key: ${completionResult.finalPath}`); logger.success(`[Route /chunk] Successfully finalized upload ${uploadId}. Final path/key: ${completionResult.finalPath}`);
// Send final success response (ensure progress is 100)
return res.json({ bytesReceived: result.bytesReceived, progress: 100, completed: true }); return res.json({ bytesReceived: result.bytesReceived, progress: 100, completed: true });
} catch (completionError) { } catch (completionError) {
logger.error(`[Route /chunk] CRITICAL: Failed to finalize ${uploadId} after part ${partNumber}: ${completionError.message}`, completionError.stack); logger.error(`[Route /chunk] CRITICAL: Failed to finalize completed upload ${uploadId} after storing chunk ${partNumber}: ${completionError.message}`, completionError.stack);
return res.status(500).json({ error: 'Upload chunk received, but failed to finalize.', details: config.nodeEnv === 'development' ? completionError.message : undefined }); // What to return to client? The chunk was stored, but completion failed.
// Return 500, indicating server-side issue during finalization.
return res.status(500).json({ error: 'Upload chunk received, but failed to finalize.', details: completionError.message });
} }
} else { } else {
// Chunk stored, but upload not yet complete, return progress
res.json({ bytesReceived: result.bytesReceived, progress: result.progress, completed: false }); res.json({ bytesReceived: result.bytesReceived, progress: result.progress, completed: false });
} }
} catch (err) { } catch (err) {
logger.error(`[Route /chunk] Chunk upload failed for ${uploadId}, part ${partNumber}: ${err.name} - ${err.message}`, err.stack); logger.error(`[Route /chunk] Chunk upload failed for ${uploadId}, part ${partNumber}: ${err.message}`, err.stack);
// Map common errors
let statusCode = 500; let statusCode = 500;
let clientMessage = 'Failed to process chunk.'; let clientMessage = 'Failed to process chunk.';
if (err.message.includes('Upload session not found') || err.name === 'NoSuchUpload' || err.code === 'ENOENT' || err.name === 'NotFound' || err.name === 'NoSuchKey') { if (err.message.includes('Upload session not found') || err.name === 'NoSuchUpload' || err.code === 'ENOENT') {
statusCode = 404; clientMessage = 'Upload session not found or already completed/aborted.'; statusCode = 404;
} else if (err.name === 'InvalidPart' || err.name === 'InvalidPartOrder') { clientMessage = 'Upload session not found or already completed/aborted.';
statusCode = 400; clientMessage = 'Invalid upload chunk sequence or data.'; } else if (err.name === 'InvalidPart' || err.name === 'InvalidPartOrder') { // S3 Specific
} else if (err.name === 'SlowDown' || (err.$metadata && err.$metadata.httpStatusCode === 503) ) { statusCode = 400;
statusCode = 429; clientMessage = 'Storage provider rate limit exceeded, please try again later.'; clientMessage = 'Invalid upload chunk sequence or data.';
} else if (err.code === 'EACCES' || err.code === 'EPERM' ) { } else if (err.name === 'SlowDown') { // S3 Throttling
statusCode = 500; clientMessage = 'Storage permission error while writing chunk.'; statusCode = 429;
clientMessage = 'Upload rate limit exceeded by storage provider, please try again later.';
} else if (err.code === 'EACCES' || err.code === 'EPERM' ) { // Local specific
statusCode = 500;
clientMessage = 'Storage permission error while writing chunk.';
} }
res.status(statusCode).json({ error: clientMessage, details: config.nodeEnv === 'development' ? err.message : undefined }); // Add more specific error mapping if needed
res.status(statusCode).json({ error: clientMessage, details: err.message });
} }
}); });
@@ -135,27 +174,27 @@ router.post('/chunk/:uploadId', express.raw({
router.post('/cancel/:uploadId', async (req, res) => { router.post('/cancel/:uploadId', async (req, res) => {
const { uploadId } = req.params; const { uploadId } = req.params;
if (isDemoMode() && config.storageType !== 's3') { if (isDemoMode()) {
logger.info(`[DEMO /cancel] Request for ${uploadId}`); logger.info(`[DEMO /cancel] Request received for ${uploadId}`);
// Call demoAdapter.abortUpload(uploadId) if it exists?
return res.json({ message: 'Upload cancelled (Demo)' }); return res.json({ message: 'Upload cancelled (Demo)' });
} }
logger.info(`[Route /cancel] Cancel request for upload: ${uploadId}`); logger.info(`[Route /cancel] Received cancel request for upload: ${uploadId}`);
try { try {
// Delegate cancellation to the storage adapter
await storageAdapter.abortUpload(uploadId); await storageAdapter.abortUpload(uploadId);
res.json({ message: 'Upload cancelled successfully or was already inactive.' }); res.json({ message: 'Upload cancelled successfully or was already inactive.' });
} catch (err) { } catch (err) {
logger.error(`[Route /cancel] Error during cancellation for ${uploadId}: ${err.name} - ${err.message}`, err.stack); // Abort errors are often less critical, log them but maybe return success anyway
// Generally, client doesn't need to know if server-side abort failed catastrophically, logger.error(`[Route /cancel] Error during upload cancellation for ${uploadId}: ${err.message}`, err.stack);
// as long as client stops sending. However, if it's a config error, 500 is appropriate. // Don't necessarily send 500, as the goal is just to stop the upload client-side
let statusCode = err.name === 'NoSuchUpload' ? 200 : 500; // If not found, it's like success for client // Maybe just return success but log the server-side issue?
let clientMessage = err.name === 'NoSuchUpload' ? 'Upload already inactive or not found.' : 'Failed to cancel upload on server.'; // Or return 500 if S3 abort fails significantly? Let's return 500 for now.
if (err.name === 'AccessDenied' || err.name === 'NoSuchBucket') { res.status(500).json({ error: 'Failed to cancel upload on server.', details: err.message });
clientMessage = 'Storage configuration error during cancel.';
statusCode = 500;
}
res.status(statusCode).json({ message: clientMessage, details: config.nodeEnv === 'development' ? err.message : undefined });
} }
}); });
module.exports = { router }; // Only export the router object // Export the router, remove previous function exports
module.exports = { router };

View File

@@ -1,110 +0,0 @@
#!/bin/sh
# Simple entrypoint script to manage user permissions and execute CMD
# Exit immediately if a command exits with a non-zero status.
set -e
# Function to log messages
log_info() {
echo "[INFO] Entrypoint: $1"
}
log_warning() {
echo "[WARN] Entrypoint: $1"
}
log_error() {
echo "[ERROR] Entrypoint: $1" >&2
}
log_info "Starting entrypoint script..."
# Default user/group/umask values
DEFAULT_UID=1000
DEFAULT_GID=1000
DEFAULT_UMASK=022
# Default upload directory if not set by user (should align with Dockerfile/compose)
DEFAULT_UPLOAD_DIR="/usr/src/app/local_uploads"
# Check if PUID or PGID environment variables are set by the user
if [ -z "${PUID}" ] && [ -z "${PGID}" ]; then
# --- Run as Root ---
log_info "PUID/PGID not set, running as root."
# Set umask (use UMASK env var if provided, otherwise default)
CURRENT_UMASK=${UMASK:-$DEFAULT_UMASK}
log_info "Setting umask to ${CURRENT_UMASK}"
umask "${CURRENT_UMASK}"
# Execute the command passed to the entrypoint as root
log_info "Executing command as root: $@"
exec "$@"
else
# --- Run as Custom User (nodeuser with adjusted UID/GID) ---
log_info "PUID/PGID set, configuring user 'nodeuser'..."
# Use provided UID/GID or default if only one is set
CURRENT_UID=${PUID:-$DEFAULT_UID}
CURRENT_GID=${PGID:-$DEFAULT_GID}
CURRENT_UMASK=${UMASK:-$DEFAULT_UMASK}
# Read the upload directory from ENV var or use default
TARGET_UPLOAD_DIR=${UPLOAD_DIR:-$DEFAULT_UPLOAD_DIR}
log_info "Target UID: ${CURRENT_UID}, GID: ${CURRENT_GID}, UMASK: ${CURRENT_UMASK}"
log_info "Target Upload Dir: ${TARGET_UPLOAD_DIR}"
# Check if user/group exists (should exist from Dockerfile)
if ! getent group nodeuser > /dev/null 2>&1; then
log_warning "Group 'nodeuser' not found, creating with GID ${CURRENT_GID}..."
addgroup -g "${CURRENT_GID}" nodeuser
else
EXISTING_GID=$(getent group nodeuser | cut -d: -f3)
if [ "${EXISTING_GID}" != "${CURRENT_GID}" ]; then
log_info "Updating 'nodeuser' group GID from ${EXISTING_GID} to ${CURRENT_GID}..."
groupmod -o -g "${CURRENT_GID}" nodeuser
fi
fi
if ! getent passwd nodeuser > /dev/null 2>&1; then
log_warning "User 'nodeuser' not found, creating with UID ${CURRENT_UID}..."
adduser -u "${CURRENT_UID}" -G nodeuser -s /bin/sh -D nodeuser
else
EXISTING_UID=$(getent passwd nodeuser | cut -d: -f3)
if [ "${EXISTING_UID}" != "${CURRENT_UID}" ]; then
log_info "Updating 'nodeuser' user UID from ${EXISTING_UID} to ${CURRENT_UID}..."
usermod -o -u "${CURRENT_UID}" nodeuser
fi
fi
# Ensure the base application directory ownership is correct
log_info "Ensuring ownership of /usr/src/app..."
chown -R nodeuser:nodeuser /usr/src/app || log_warning "Could not chown /usr/src/app"
# Ensure the target upload directory exists and has correct ownership
if [ -n "${TARGET_UPLOAD_DIR}" ]; then
if [ ! -d "${TARGET_UPLOAD_DIR}" ]; then
log_info "Creating directory: ${TARGET_UPLOAD_DIR}"
# Use -p to create parent directories as needed
mkdir -p "${TARGET_UPLOAD_DIR}"
# Chown after creation
chown nodeuser:nodeuser "${TARGET_UPLOAD_DIR}" || log_warning "Could not chown ${TARGET_UPLOAD_DIR}"
else
# Directory exists, ensure ownership
log_info "Ensuring ownership of ${TARGET_UPLOAD_DIR}..."
chown -R nodeuser:nodeuser "${TARGET_UPLOAD_DIR}" || log_warning "Could not chown ${TARGET_UPLOAD_DIR}"
fi
else
log_warning "UPLOAD_DIR variable is not set or is empty, skipping ownership check for upload directory."
fi
# Set the umask
log_info "Setting umask to ${CURRENT_UMASK}"
umask "${CURRENT_UMASK}"
# Execute the command passed to the entrypoint using su-exec to drop privileges
log_info "Executing command as nodeuser (${CURRENT_UID}:${CURRENT_GID}): $@"
exec su-exec nodeuser "$@"
fi
log_info "Entrypoint script finished (should not reach here if exec worked)."

View File

@@ -1,110 +1,124 @@
/** /**
* Server entry point that starts the HTTP server and manages connections. * Server entry point that starts the HTTP server and manages connections.
* Handles graceful shutdown, connection tracking, and server initialization. * Handles graceful shutdown, connection tracking, and server initialization.
* Provides development mode directory listing functionality.
*/ */
const { app, initialize, config } = require('./app'); // config is now also exported from app.js const { app, initialize, config } = require('./app');
const logger = require('./utils/logger'); const logger = require('./utils/logger');
const fs = require('fs'); // Keep for readdirSync if needed for local dev logging const fs = require('fs');
const { executeCleanup } = require('./utils/cleanup'); const { executeCleanup } = require('./utils/cleanup');
const { generatePWAManifest } = require('./scripts/pwa-manifest-generator'); const { generatePWAManifest } = require('./scripts/pwa-manifest-generator')
// Track open connections
const connections = new Set(); const connections = new Set();
/**
* Start the server and initialize the application
* @returns {Promise<http.Server>} The HTTP server instance
*/
async function startServer() { async function startServer() {
try { try {
await initialize(); // This will call validateConfig and load storage adapter via app.js // Initialize the application
await initialize();
// Start the server
const server = app.listen(config.port, () => { const server = app.listen(config.port, () => {
logger.info(`Server running at ${config.baseUrl}`); logger.info(`Server running at ${config.baseUrl}`);
// ** MODIFIED LOGGING ** logger.info(`Upload directory: ${config.uploadDisplayPath}`);
logger.info(`Active Storage Type: ${config.storageType}`);
logger.info(`Data Directory (for uploads or metadata): ${config.uploadDir}`);
if (config.nodeEnv === 'development' && config.storageType === 'local') { // List directory contents in development
if (config.nodeEnv === 'development') {
try { try {
// Only list contents if it's local storage and dev mode
if (fs.existsSync(config.uploadDir)) {
const files = fs.readdirSync(config.uploadDir); const files = fs.readdirSync(config.uploadDir);
logger.info(`Current local upload directory contents (${config.uploadDir}):`); logger.info(`Current directory contents (${files.length} files):`);
files.forEach(file => logger.info(`- ${file}`)); files.forEach(file => {
} else { logger.info(`- ${file}`);
logger.warn(`Local upload directory ${config.uploadDir} does not exist for listing.`); });
}
} catch (err) { } catch (err) {
logger.error(`Failed to list local upload directory contents: ${err.message}`); logger.error(`Failed to list directory contents: ${err.message}`);
} }
} }
}); });
// Dynamically generate PWA manifest into public folder
generatePWAManifest(); generatePWAManifest();
// Track new connections
server.on('connection', (connection) => { server.on('connection', (connection) => {
connections.add(connection); connections.add(connection);
connection.on('close', () => connections.delete(connection)); connection.on('close', () => {
connections.delete(connection);
});
}); });
let isShuttingDown = false; // Shutdown handler function
let isShuttingDown = false; // Prevent multiple shutdowns
const shutdownHandler = async (signal) => { const shutdownHandler = async (signal) => {
if (isShuttingDown) return; if (isShuttingDown) return;
isShuttingDown = true; isShuttingDown = true;
logger.info(`${signal} received. Shutting down gracefully...`); logger.info(`${signal} received. Shutting down gracefully...`);
// Start a shorter force shutdown timer
const forceShutdownTimer = setTimeout(() => { const forceShutdownTimer = setTimeout(() => {
logger.error('Force shutdown due to timeout.'); logger.error('Force shutdown initiated');
process.exit(1); process.exit(1);
}, 5000); // Increased slightly }, 3000); // 3 seconds maximum for total shutdown
try { try {
server.closeIdleConnections?.(); // Node 18+ // 1. Stop accepting new connections immediately
server.unref();
const closePromises = Array.from(connections).map(conn => new Promise(resolve => { // 2. Close all existing connections with a shorter timeout
conn.on('close', resolve); // Ensure close event resolves const connectionClosePromises = Array.from(connections).map(conn => {
conn.destroy(); // Actively destroy connections return new Promise(resolve => {
})); conn.end(() => {
connections.delete(conn);
await Promise.race([
Promise.all(closePromises),
new Promise(resolve => setTimeout(resolve, 2000)) // Max 2s for connections
]);
connections.clear();
await new Promise((resolve, reject) => {
server.close((err) => {
if (err) return reject(err);
logger.info('Server closed.');
resolve(); resolve();
}); });
}); });
});
await executeCleanup(1500); // Max 1.5s for cleanup // Wait for connections to close with a timeout
await Promise.race([
Promise.all(connectionClosePromises),
new Promise(resolve => setTimeout(resolve, 1000)) // 1 second timeout for connections
]);
// 3. Close the server
await new Promise((resolve) => server.close(resolve));
logger.info('Server closed');
// 4. Run cleanup tasks with a shorter timeout
await executeCleanup(1000); // 1 second timeout for cleanup
// Clear the force shutdown timer since we completed gracefully
clearTimeout(forceShutdownTimer); clearTimeout(forceShutdownTimer);
logger.info('Shutdown complete.'); process.exitCode = 0;
process.exit(0); process.exit(0); // Ensure immediate exit
} catch (error) { } catch (error) {
clearTimeout(forceShutdownTimer); // Clear timer on error too
logger.error(`Error during shutdown: ${error.message}`); logger.error(`Error during shutdown: ${error.message}`);
process.exit(1); process.exit(1);
} }
}; };
// Handle both SIGTERM and SIGINT
process.on('SIGTERM', () => shutdownHandler('SIGTERM')); process.on('SIGTERM', () => shutdownHandler('SIGTERM'));
process.on('SIGINT', () => shutdownHandler('SIGINT')); process.on('SIGINT', () => shutdownHandler('SIGINT'));
return server; return server;
} catch (error) { } catch (error) {
logger.error('Failed to start server:', error); logger.error('Failed to start server:', error);
// Ensure process exits if startServer itself fails before listener setup
process.exitCode = 1;
throw error; throw error;
} }
} }
// Only start the server if this file is run directly
if (require.main === module) { if (require.main === module) {
startServer().catch((error) => { startServer().catch((error) => {
// Error already logged by startServer logger.error('Server failed to start:', error);
// process.exitCode is already set if startServer throws process.exitCode = 1;
throw error;
}); });
} }

View File

@@ -3,7 +3,6 @@
* Handles file operations for storing files on AWS S3 or S3-compatible services. * Handles file operations for storing files on AWS S3 or S3-compatible services.
* Implements the storage interface expected by the application routes. * Implements the storage interface expected by the application routes.
* Uses local files in '.metadata' directory to track multipart upload progress. * Uses local files in '.metadata' directory to track multipart upload progress.
* Attempts to make top-level folder prefixes unique per batch if collisions occur.
*/ */
const { const {
@@ -15,25 +14,25 @@ const {
ListObjectsV2Command, ListObjectsV2Command,
GetObjectCommand, GetObjectCommand,
DeleteObjectCommand, DeleteObjectCommand,
PutObjectCommand, PutObjectCommand // For zero-byte files
HeadObjectCommand
} = require('@aws-sdk/client-s3'); } = require('@aws-sdk/client-s3');
const { getSignedUrl } = require("@aws-sdk/s3-request-presigner"); const { getSignedUrl } = require("@aws-sdk/s3-request-presigner");
const fs = require('fs').promises; const fs = require('fs').promises;
const fsSync = require('fs'); const fsSync = require('fs'); // For synchronous checks
const path = require('path'); const path = require('path');
const crypto = require('crypto'); const crypto = require('crypto');
const util = require('util'); // For detailed error logging
const { config } = require('../config'); const { config } = require('../config');
const logger = require('../utils/logger'); const logger = require('../utils/logger');
const { const {
sanitizePathPreserveDirs, sanitizePathPreserveDirs,
formatFileSize isValidBatchId,
formatFileSize // Keep for potential future use or consistency
} = require('../utils/fileUtils'); } = require('../utils/fileUtils');
const { sendNotification } = require('../services/notifications'); const { sendNotification } = require('../services/notifications'); // Needed for completion
const METADATA_DIR = path.join(config.uploadDir, '.metadata'); // --- Constants ---
const UPLOAD_TIMEOUT = 30 * 60 * 1000; // For local metadata cleanup const METADATA_DIR = path.join(config.uploadDir, '.metadata'); // Use local dir for metadata state
const UPLOAD_TIMEOUT = 30 * 60 * 1000; // 30 minutes timeout for stale *local* metadata cleanup
// --- S3 Client Initialization --- // --- S3 Client Initialization ---
let s3Client; let s3Client;
@@ -47,17 +46,27 @@ try {
...(config.s3EndpointUrl && { endpoint: config.s3EndpointUrl }), ...(config.s3EndpointUrl && { endpoint: config.s3EndpointUrl }),
...(config.s3ForcePathStyle && { forcePathStyle: true }), ...(config.s3ForcePathStyle && { forcePathStyle: true }),
}; };
if (s3ClientConfig.endpoint) logger.info(`[S3 Adapter] Configuring S3 client for endpoint: ${s3ClientConfig.endpoint}`);
if (s3ClientConfig.forcePathStyle) logger.info(`[S3 Adapter] Configuring S3 client with forcePathStyle: true`); if (s3ClientConfig.endpoint) {
logger.info(`[S3 Adapter] Configuring S3 client for endpoint: ${s3ClientConfig.endpoint}`);
}
if (s3ClientConfig.forcePathStyle) {
logger.info(`[S3 Adapter] Configuring S3 client with forcePathStyle: true`);
}
s3Client = new S3Client(s3ClientConfig); s3Client = new S3Client(s3ClientConfig);
logger.success('[S3 Adapter] S3 Client configured successfully.'); logger.success('[S3 Adapter] S3 Client configured successfully.');
} catch (error) { } catch (error) {
logger.error(`[S3 Adapter] Failed to configure S3 client: ${error.message}`); logger.error(`[S3 Adapter] Failed to configure S3 client: ${error.message}`);
// This is critical, throw an error to prevent the adapter from being used incorrectly
throw new Error('S3 Client configuration failed. Check S3 environment variables.'); throw new Error('S3 Client configuration failed. Check S3 environment variables.');
} }
// --- Metadata Helper Functions --- // --- Metadata Helper Functions (Adapted for S3, store state locally) ---
async function ensureMetadataDirExists() { async function ensureMetadataDirExists() {
// Reuse logic from local adapter - S3 adapter still needs local dir for state
try { try {
if (!fsSync.existsSync(METADATA_DIR)) { if (!fsSync.existsSync(METADATA_DIR)) {
await fs.mkdir(METADATA_DIR, { recursive: true }); await fs.mkdir(METADATA_DIR, { recursive: true });
@@ -70,6 +79,7 @@ async function ensureMetadataDirExists() {
} }
} }
// Read/Write/Delete functions are identical to localAdapter as they manage local state files
async function readUploadMetadata(uploadId) { async function readUploadMetadata(uploadId) {
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) { if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
logger.warn(`[S3 Adapter] Attempted to read metadata with invalid uploadId: ${uploadId}`); logger.warn(`[S3 Adapter] Attempted to read metadata with invalid uploadId: ${uploadId}`);
@@ -78,11 +88,12 @@ async function readUploadMetadata(uploadId) {
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`); const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
try { try {
const data = await fs.readFile(metaFilePath, 'utf8'); const data = await fs.readFile(metaFilePath, 'utf8');
// Ensure 'parts' is always an array on read
const metadata = JSON.parse(data); const metadata = JSON.parse(data);
metadata.parts = metadata.parts || []; metadata.parts = metadata.parts || [];
return metadata; return metadata;
} catch (err) { } catch (err) {
if (err.code === 'ENOENT') return null; if (err.code === 'ENOENT') { return null; }
logger.error(`[S3 Adapter] Error reading metadata for ${uploadId}: ${err.message}`); logger.error(`[S3 Adapter] Error reading metadata for ${uploadId}: ${err.message}`);
throw err; throw err;
} }
@@ -95,7 +106,7 @@ async function writeUploadMetadata(uploadId, metadata) {
} }
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`); const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
metadata.lastActivity = Date.now(); metadata.lastActivity = Date.now();
metadata.parts = metadata.parts || []; metadata.parts = metadata.parts || []; // Ensure parts array exists
try { try {
const tempMetaPath = `${metaFilePath}.${crypto.randomBytes(4).toString('hex')}.tmp`; const tempMetaPath = `${metaFilePath}.${crypto.randomBytes(4).toString('hex')}.tmp`;
await fs.writeFile(tempMetaPath, JSON.stringify(metadata, null, 2)); await fs.writeFile(tempMetaPath, JSON.stringify(metadata, null, 2));
@@ -117,323 +128,465 @@ async function deleteUploadMetadata(uploadId) {
await fs.unlink(metaFilePath); await fs.unlink(metaFilePath);
logger.debug(`[S3 Adapter] Deleted metadata file: ${uploadId}.meta`); logger.debug(`[S3 Adapter] Deleted metadata file: ${uploadId}.meta`);
} catch (err) { } catch (err) {
if (err.code !== 'ENOENT') logger.error(`[S3 Adapter] Error deleting metadata file ${uploadId}.meta: ${err.message}`); if (err.code !== 'ENOENT') {
logger.error(`[S3 Adapter] Error deleting metadata file ${uploadId}.meta: ${err.message}`);
}
} }
} }
// Ensure metadata dir exists on initialization
ensureMetadataDirExists().catch(err => { ensureMetadataDirExists().catch(err => {
logger.error(`[S3 Adapter] Initialization failed (metadata dir): ${err.message}`); logger.error(`[S3 Adapter] Initialization failed: ${err.message}`);
process.exit(1); process.exit(1); // Exit if we can't manage metadata state
}); });
// --- S3 Object/Prefix Utilities ---
const batchS3PrefixMappings = new Map(); // In-memory: originalTopLevelFolder-batchId -> actualS3Prefix
async function s3ObjectExists(key) {
logger.info(`[S3 Adapter] s3ObjectExists: Checking key "${key}"`);
try {
await s3Client.send(new HeadObjectCommand({ Bucket: config.s3BucketName, Key: key }));
logger.info(`[S3 Adapter] s3ObjectExists: HeadObject success for key "${key}". Key EXISTS.`);
return true;
} catch (error) {
// logger.error(`[S3 Adapter DEBUG] Full error object for HeadObject on key "${key}":\n`, util.inspect(error, { showHidden: false, depth: null, colors: false }));
if (error.name === 'NotFound' || error.name === 'NoSuchKey' || (error.$metadata && error.$metadata.httpStatusCode === 404)) {
logger.info(`[S3 Adapter] s3ObjectExists: Key "${key}" NOT found (404-like error).`);
return false;
}
if (error.name === '403' || (error.$metadata && error.$metadata.httpStatusCode === 403)) {
logger.warn(`[S3 Adapter] s3ObjectExists: Received 403 Forbidden for key "${key}". For unique key generation, treating this as 'likely does not exist'.`);
return false;
}
logger.error(`[S3 Adapter] s3ObjectExists: Unhandled error type "${error.name}" for key "${key}": ${error.message}`);
throw error;
}
}
async function getUniqueS3FolderPrefix(originalPrefix, batchId) {
if (!originalPrefix || !originalPrefix.endsWith('/')) {
logger.error("[S3 Adapter] getUniqueS3FolderPrefix: originalPrefix must be a non-empty string ending with '/'");
return originalPrefix; // Or throw error
}
const prefixMapKey = `${originalPrefix}-${batchId}`;
if (batchS3PrefixMappings.has(prefixMapKey)) {
return batchS3PrefixMappings.get(prefixMapKey);
}
let currentPrefixToCheck = originalPrefix;
let counter = 1;
const baseName = originalPrefix.slice(0, -1); // "MyFolder" from "MyFolder/"
async function prefixHasObjects(prefix) {
try {
const listResponse = await s3Client.send(new ListObjectsV2Command({
Bucket: config.s3BucketName, Prefix: prefix, MaxKeys: 1
}));
return listResponse.KeyCount > 0;
} catch (error) {
logger.error(`[S3 Adapter] Error listing objects for prefix check "${prefix}": ${error.message}`);
throw error; // Propagate error if listing fails for permission reasons etc.
}
}
while (await prefixHasObjects(currentPrefixToCheck)) {
logger.warn(`[S3 Adapter] S3 prefix "${currentPrefixToCheck}" is not empty. Generating unique prefix for base "${baseName}/".`);
currentPrefixToCheck = `${baseName}-${counter}/`; // Use hyphen for suffix
counter++;
}
if (currentPrefixToCheck !== originalPrefix) {
logger.info(`[S3 Adapter] Using unique S3 folder prefix: "${currentPrefixToCheck}" for original "${originalPrefix}" in batch "${batchId}"`);
}
batchS3PrefixMappings.set(prefixMapKey, currentPrefixToCheck);
return currentPrefixToCheck;
}
// --- Interface Implementation --- // --- Interface Implementation ---
/**
* Initializes an S3 multipart upload session (or direct put for zero-byte).
* @param {string} filename - Original filename/path from client.
* @param {number} fileSize - Total size of the file.
* @param {string} clientBatchId - Optional batch ID from client.
* @returns {Promise<{uploadId: string}>} Object containing the application's upload ID.
*/
async function initUpload(filename, fileSize, clientBatchId) { async function initUpload(filename, fileSize, clientBatchId) {
await ensureMetadataDirExists(); await ensureMetadataDirExists(); // Re-check before operation
const size = Number(fileSize); const size = Number(fileSize);
const appUploadId = crypto.randomBytes(16).toString('hex'); const appUploadId = crypto.randomBytes(16).toString('hex'); // Our internal ID
const batchId = clientBatchId || `${Date.now()}-${crypto.randomBytes(4).toString('hex').substring(0, 9)}`;
const originalSanitizedFullpath = sanitizePathPreserveDirs(filename); // e.g., "MyFolder/image.jpg" or "image.jpg" // --- Path handling and Sanitization for S3 Key ---
let s3KeyStructure = path.normalize(originalSanitizedFullpath) const sanitizedFilename = sanitizePathPreserveDirs(filename);
.replace(/^(\.\.(\/|\\|$))+/, '').replace(/\\/g, '/').replace(/^\/+/, ''); // S3 keys should not start with /
const s3Key = path.normalize(sanitizedFilename)
.replace(/^(\.\.(\/|\\|$))+/, '')
.replace(/\\/g, '/')
.replace(/^\/+/, '');
let effectiveBasePrefix = ""; // e.g., "MyFolder-1/" or "" logger.info(`[S3 Adapter] Init request for S3 Key: ${s3Key}`);
const pathParts = s3KeyStructure.split('/');
const isNestedPath = pathParts.length > 1;
let relativePathInFolder = s3KeyStructure;
if (isNestedPath) {
const originalTopLevelFolder = pathParts[0] + '/'; // "MyFolder/"
effectiveBasePrefix = await getUniqueS3FolderPrefix(originalTopLevelFolder, batchId);
relativePathInFolder = pathParts.slice(1).join('/'); // "SubFolder/image.jpg" or "image.jpg"
s3KeyStructure = effectiveBasePrefix + relativePathInFolder;
}
logger.info(`[S3 Adapter] Init: Original Full Path: "${originalSanitizedFullpath}", Effective Base Prefix: "${effectiveBasePrefix}", Relative Path In Folder: "${relativePathInFolder}"`);
let finalS3Key = s3KeyStructure;
let fileCounter = 1;
const fileDir = path.dirname(s3KeyStructure);
const fileExt = path.extname(s3KeyStructure);
const fileBaseName = path.basename(s3KeyStructure, fileExt);
while (await s3ObjectExists(finalS3Key)) {
logger.warn(`[S3 Adapter] S3 file key already exists: "${finalS3Key}". Generating unique file key.`);
finalS3Key = (fileDir === "." ? "" : fileDir + "/") + `${fileBaseName}-${fileCounter}${fileExt}`; // Use hyphen
fileCounter++;
}
if (finalS3Key !== s3KeyStructure) {
logger.info(`[S3 Adapter] Using unique S3 file key: "${finalS3Key}"`);
}
// --- Handle Zero-Byte Files ---
if (size === 0) { if (size === 0) {
try { try {
await s3Client.send(new PutObjectCommand({ const putCommand = new PutObjectCommand({
Bucket: config.s3BucketName, Key: finalS3Key, Body: '', ContentLength: 0 Bucket: config.s3BucketName,
})); Key: s3Key,
logger.success(`[S3 Adapter] Completed zero-byte file: ${finalS3Key}`); Body: '', // Empty body
sendNotification(originalSanitizedFullpath, 0, config); ContentLength: 0
return { uploadId: `zero-byte-${appUploadId}` }; });
await s3Client.send(putCommand);
logger.success(`[S3 Adapter] Completed zero-byte file upload directly: ${s3Key}`);
// No metadata needed for zero-byte files as they are completed atomically
sendNotification(filename, 0, config); // Send notification (use original filename)
// Return an uploadId that won't conflict or be processable by chunk/complete
return { uploadId: `zero-byte-${appUploadId}` }; // Or maybe return null/special status?
// Returning a unique ID might be safer for client state.
} catch (putErr) { } catch (putErr) {
logger.error(`[S3 Adapter] Failed zero-byte PUT for ${finalS3Key}: ${putErr.message}`); logger.error(`[S3 Adapter] Failed to put zero-byte object ${s3Key}: ${putErr.message}`);
throw putErr; throw putErr; // Let the route handler deal with it
} }
} }
// --- Initiate Multipart Upload for Non-Zero Files ---
try { try {
const createCommand = new CreateMultipartUploadCommand({ Bucket: config.s3BucketName, Key: finalS3Key }); const createCommand = new CreateMultipartUploadCommand({
Bucket: config.s3BucketName,
Key: s3Key,
// TODO: Consider adding ContentType if available/reliable: metadata.contentType
// TODO: Consider adding Metadata: { 'original-filename': filename } ?
});
const response = await s3Client.send(createCommand); const response = await s3Client.send(createCommand);
const s3UploadId = response.UploadId; const s3UploadId = response.UploadId;
if (!s3UploadId) throw new Error('S3 did not return UploadId');
logger.info(`[S3 Adapter] Multipart initiated for ${finalS3Key} (S3 UploadId: ${s3UploadId})`);
if (!s3UploadId) {
throw new Error('S3 did not return an UploadId');
}
logger.info(`[S3 Adapter] Initiated multipart upload for ${s3Key} (S3 UploadId: ${s3UploadId})`);
// --- Create and Persist Local Metadata ---
const batchId = clientBatchId || `${Date.now()}-${crypto.randomBytes(4).toString('hex').substring(0, 9)}`;
const metadata = { const metadata = {
appUploadId, s3UploadId, s3Key: finalS3Key, appUploadId: appUploadId, // Store our ID
originalFilename: originalSanitizedFullpath, // Use the full original path for notification s3UploadId: s3UploadId,
fileSize: size, bytesReceived: 0, parts: [], batchId, s3Key: s3Key,
createdAt: Date.now(), lastActivity: Date.now() originalFilename: filename, // Keep original for notifications etc.
fileSize: size,
bytesReceived: 0, // Track approximate bytes locally
parts: [], // Array to store { PartNumber, ETag }
batchId,
createdAt: Date.now(),
lastActivity: Date.now()
}; };
await writeUploadMetadata(appUploadId, metadata);
return { uploadId: appUploadId }; await writeUploadMetadata(appUploadId, metadata); // Write metadata keyed by our appUploadId
return { uploadId: appUploadId }; // Return OUR internal upload ID to the client
} catch (err) { } catch (err) {
logger.error(`[S3 Adapter] Failed multipart init for ${finalS3Key}: ${err.message}`); logger.error(`[S3 Adapter] Failed to initiate multipart upload for ${s3Key}: ${err.message}`);
// TODO: Map specific S3 errors (e.g., NoSuchBucket, AccessDenied) to better client messages
throw err; throw err;
} }
} }
/**
* Uploads a chunk as a part to S3.
* @param {string} appUploadId - The application's upload ID.
* @param {Buffer} chunk - The data chunk to store.
* @param {number} partNumber - The sequential number of this part (starting from 1).
* @returns {Promise<{bytesReceived: number, progress: number, completed: boolean}>} Upload status.
*/
async function storeChunk(appUploadId, chunk, partNumber) { async function storeChunk(appUploadId, chunk, partNumber) {
const chunkSize = chunk.length; const chunkSize = chunk.length;
if (!chunkSize) throw new Error('Empty chunk received'); if (!chunkSize) throw new Error('Empty chunk received');
if (partNumber < 1) throw new Error('PartNumber must be 1 or greater'); if (partNumber < 1) throw new Error('PartNumber must be 1 or greater');
const metadata = await readUploadMetadata(appUploadId); const metadata = await readUploadMetadata(appUploadId);
if (!metadata || !metadata.s3UploadId) { if (!metadata || !metadata.s3UploadId) { // Check for s3UploadId presence
logger.warn(`[S3 Adapter] Metadata or S3 UploadId not found for chunk: ${appUploadId}`); logger.warn(`[S3 Adapter] Metadata or S3 UploadId not found for chunk: ${appUploadId}. Upload might be complete, cancelled, or zero-byte.`);
throw new Error('Upload session not found or already completed'); throw new Error('Upload session not found or already completed');
} }
// --- Sanity Check ---
// S3 handles duplicate part uploads gracefully (last one wins), so less critical than local append.
// We still track bytesReceived locally for progress approximation.
if (metadata.bytesReceived >= metadata.fileSize && metadata.fileSize > 0) { if (metadata.bytesReceived >= metadata.fileSize && metadata.fileSize > 0) {
logger.warn(`[S3 Adapter] Chunk for already completed upload ${appUploadId}. Ignoring.`); logger.warn(`[S3 Adapter] Received chunk for already completed upload ${appUploadId}. Ignoring.`);
return { bytesReceived: metadata.bytesReceived, progress: 100, completed: true }; // Can't really finalize again easily without full parts list. Indicate completion based on local state.
const progress = metadata.fileSize > 0 ? 100 : 0;
return { bytesReceived: metadata.bytesReceived, progress, completed: true };
} }
try {
const cmd = new UploadPartCommand({
Bucket: config.s3BucketName, Key: metadata.s3Key, UploadId: metadata.s3UploadId,
Body: chunk, PartNumber: partNumber, ContentLength: chunkSize
});
const response = await s3Client.send(cmd);
const etag = response.ETag;
if (!etag) throw new Error(`S3 ETag missing for Part ${partNumber}`);
try {
const uploadPartCommand = new UploadPartCommand({
Bucket: config.s3BucketName,
Key: metadata.s3Key,
UploadId: metadata.s3UploadId,
Body: chunk,
PartNumber: partNumber,
ContentLength: chunkSize // Required for UploadPart
});
const response = await s3Client.send(uploadPartCommand);
const etag = response.ETag;
if (!etag) {
throw new Error(`S3 did not return an ETag for PartNumber ${partNumber}`);
}
// --- Update Local Metadata ---
// Ensure parts are stored correctly
metadata.parts = metadata.parts || [];
metadata.parts.push({ PartNumber: partNumber, ETag: etag }); metadata.parts.push({ PartNumber: partNumber, ETag: etag });
// Sort parts just in case uploads happen out of order client-side (though unlikely with current client)
metadata.parts.sort((a, b) => a.PartNumber - b.PartNumber); metadata.parts.sort((a, b) => a.PartNumber - b.PartNumber);
metadata.bytesReceived = Math.min((metadata.bytesReceived || 0) + chunkSize, metadata.fileSize);
// Update approximate bytes received
metadata.bytesReceived = (metadata.bytesReceived || 0) + chunkSize;
// Cap bytesReceived at fileSize for progress calculation
metadata.bytesReceived = Math.min(metadata.bytesReceived, metadata.fileSize);
await writeUploadMetadata(appUploadId, metadata); await writeUploadMetadata(appUploadId, metadata);
const progress = metadata.fileSize === 0 ? 100 : Math.min(Math.round((metadata.bytesReceived / metadata.fileSize) * 100), 100); // --- Calculate Progress ---
const progress = metadata.fileSize === 0 ? 100 :
Math.min(Math.round((metadata.bytesReceived / metadata.fileSize) * 100), 100);
logger.debug(`[S3 Adapter] Part ${partNumber} uploaded for ${appUploadId} (ETag: ${etag}). Progress: ~${progress}%`);
// Check for completion potential based on local byte tracking
const completed = metadata.bytesReceived >= metadata.fileSize; const completed = metadata.bytesReceived >= metadata.fileSize;
logger.debug(`[S3 Adapter] Part ${partNumber} for ${appUploadId} (Key: ${metadata.s3Key}). ETag: ${etag}. Progress: ~${progress}%. Completed: ${completed}`); if (completed) {
logger.info(`[S3 Adapter] Upload ${appUploadId} potentially complete based on bytes received.`);
}
return { bytesReceived: metadata.bytesReceived, progress, completed }; return { bytesReceived: metadata.bytesReceived, progress, completed };
} catch (err) { } catch (err) {
logger.error(`[S3 Adapter] Failed Part ${partNumber} for ${appUploadId} (Key: ${metadata.s3Key}): ${err.message}`); logger.error(`[S3 Adapter] Failed to upload part ${partNumber} for ${appUploadId} (Key: ${metadata.s3Key}): ${err.message}`);
// TODO: Map specific S3 errors (InvalidPart, SlowDown, etc.)
throw err; throw err;
} }
} }
/**
* Finalizes a completed S3 multipart upload.
* @param {string} appUploadId - The application's upload ID.
* @returns {Promise<{filename: string, size: number, finalPath: string}>} Details of the completed file (finalPath is S3 Key).
*/
async function completeUpload(appUploadId) { async function completeUpload(appUploadId) {
const metadata = await readUploadMetadata(appUploadId); const metadata = await readUploadMetadata(appUploadId);
if (!metadata || !metadata.s3UploadId || !metadata.parts || metadata.parts.length === 0) { if (!metadata || !metadata.s3UploadId || !metadata.parts || metadata.parts.length === 0) {
throw new Error('Upload completion failed: Missing metadata/parts'); logger.warn(`[S3 Adapter] completeUpload called for ${appUploadId}, but metadata, S3 UploadId, or parts list is missing/empty. Assuming already completed or invalid state.`);
// Check if object exists as a fallback? Risky.
throw new Error('Upload completion failed: Required metadata or parts list not found');
} }
// Basic check if enough bytes were tracked locally (approximate check)
if (metadata.bytesReceived < metadata.fileSize) { if (metadata.bytesReceived < metadata.fileSize) {
logger.warn(`[S3 Adapter] Completing ${appUploadId} with ${metadata.bytesReceived}/${metadata.fileSize} bytes tracked.`); logger.warn(`[S3 Adapter] Attempting to complete upload ${appUploadId} but locally tracked bytes (${metadata.bytesReceived}) are less than expected size (${metadata.fileSize}). Proceeding anyway.`);
} }
try { try {
const cmd = new CompleteMultipartUploadCommand({ const completeCommand = new CompleteMultipartUploadCommand({
Bucket: config.s3BucketName, Key: metadata.s3Key, UploadId: metadata.s3UploadId, Bucket: config.s3BucketName,
MultipartUpload: { Parts: metadata.parts }, Key: metadata.s3Key,
UploadId: metadata.s3UploadId,
MultipartUpload: {
Parts: metadata.parts // Use the collected parts { PartNumber, ETag }
},
}); });
const response = await s3Client.send(cmd);
logger.success(`[S3 Adapter] Finalized: ${metadata.s3Key} (ETag: ${response.ETag})`); const response = await s3Client.send(completeCommand);
// Example response: { ETag: '"..."', Location: '...', Key: '...', Bucket: '...' }
logger.success(`[S3 Adapter] Finalized multipart upload: ${metadata.s3Key} (ETag: ${response.ETag})`);
// Clean up local metadata AFTER successful S3 completion
await deleteUploadMetadata(appUploadId); await deleteUploadMetadata(appUploadId);
// Send notification
sendNotification(metadata.originalFilename, metadata.fileSize, config); sendNotification(metadata.originalFilename, metadata.fileSize, config);
// Return info consistent with local adapter where possible
return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.s3Key }; return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.s3Key };
} catch (err) { } catch (err) {
logger.error(`[S3 Adapter] Failed CompleteMultipartUpload for ${metadata.s3Key}: ${err.message}`); logger.error(`[S3 Adapter] Failed to complete multipart upload for ${appUploadId} (Key: ${metadata.s3Key}): ${err.message}`);
if (err.Code === 'NoSuchUpload' || err.name === 'NoSuchUpload') { // Specific S3 errors like InvalidPartOrder, EntityTooSmall might occur here.
logger.warn(`[S3 Adapter] NoSuchUpload on complete for ${appUploadId}. Assuming completed/aborted.`); // If Complete fails, S3 *might* have already assembled it (rare).
await deleteUploadMetadata(appUploadId).catch(()=>{}); // Check if the object now exists? If so, maybe delete metadata? Complex recovery.
// For now, just log the error and throw. The local metadata will persist.
if (err.Code === 'NoSuchUpload') {
logger.warn(`[S3 Adapter] CompleteMultipartUpload failed with NoSuchUpload for ${appUploadId}. Assuming already completed or aborted.`);
await deleteUploadMetadata(appUploadId).catch(()=>{}); // Attempt metadata cleanup
// Check if final object exists?
try { try {
await s3Client.send(new HeadObjectCommand({ Bucket: config.s3BucketName, Key: metadata.s3Key })); // Use GetObject or HeadObject to check
logger.info(`[S3 Adapter] Final object ${metadata.s3Key} exists after NoSuchUpload. Treating as completed.`); await s3Client.send(new GetObjectCommand({ Bucket: config.s3BucketName, Key: metadata.s3Key }));
logger.info(`[S3 Adapter] Final object ${metadata.s3Key} exists after NoSuchUpload error. Treating as completed.`);
return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.s3Key }; return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.s3Key };
} catch (headErr) { throw new Error('Completion failed: Session & final object not found.'); } } catch (headErr) {
// Final object doesn't exist either.
throw new Error('Completion failed: Upload session not found and final object does not exist.');
}
} }
throw err; throw err;
} }
} }
/**
* Aborts an ongoing S3 multipart upload.
* @param {string} appUploadId - The application's upload ID.
* @returns {Promise<void>}
*/
async function abortUpload(appUploadId) { async function abortUpload(appUploadId) {
const metadata = await readUploadMetadata(appUploadId); const metadata = await readUploadMetadata(appUploadId);
if (!metadata || !metadata.s3UploadId) { if (!metadata || !metadata.s3UploadId) {
logger.warn(`[S3 Adapter] Abort for non-existent/completed upload: ${appUploadId}`); logger.warn(`[S3 Adapter] Abort request for non-existent or completed upload: ${appUploadId}`);
await deleteUploadMetadata(appUploadId); return; await deleteUploadMetadata(appUploadId); // Clean up local metadata if it exists anyway
return;
} }
try { try {
await s3Client.send(new AbortMultipartUploadCommand({ const abortCommand = new AbortMultipartUploadCommand({
Bucket: config.s3BucketName, Key: metadata.s3Key, UploadId: metadata.s3UploadId, Bucket: config.s3BucketName,
})); Key: metadata.s3Key,
logger.info(`[S3 Adapter] Aborted: ${appUploadId} (Key: ${metadata.s3Key})`); UploadId: metadata.s3UploadId,
});
await s3Client.send(abortCommand);
logger.info(`[S3 Adapter] Aborted multipart upload: ${appUploadId} (Key: ${metadata.s3Key})`);
} catch (err) { } catch (err) {
if (err.name !== 'NoSuchUpload') { if (err.name === 'NoSuchUpload') {
logger.error(`[S3 Adapter] Failed Abort for ${metadata.s3Key}: ${err.message}`); throw err; logger.warn(`[S3 Adapter] Multipart upload ${appUploadId} (Key: ${metadata.s3Key}) not found during abort. Already aborted or completed.`);
} else {
logger.error(`[S3 Adapter] Failed to abort multipart upload for ${appUploadId} (Key: ${metadata.s3Key}): ${err.message}`);
// Don't delete local metadata if abort failed, might be retryable or need manual cleanup
throw err; // Rethrow S3 error
} }
logger.warn(`[S3 Adapter] NoSuchUpload on abort for ${metadata.s3Key}. Already aborted/completed.`);
} }
// Delete local metadata AFTER successful abort or if NoSuchUpload
await deleteUploadMetadata(appUploadId); await deleteUploadMetadata(appUploadId);
} }
/**
* Lists files in the S3 bucket.
* @returns {Promise<Array<{filename: string, size: number, formattedSize: string, uploadDate: Date}>>} List of files.
*/
async function listFiles() { async function listFiles() {
try { try {
let isTruncated = true; let continuationToken; const allFiles = []; const command = new ListObjectsV2Command({
while(isTruncated) { Bucket: config.s3BucketName,
const params = { Bucket: config.s3BucketName }; // Optional: Add Prefix if you want to list within a specific 'folder'
if (continuationToken) params.ContinuationToken = continuationToken; // Prefix: 'uploads/'
const response = await s3Client.send(new ListObjectsV2Command(params)); });
(response.Contents || []).forEach(item => allFiles.push({ // TODO: Add pagination handling if expecting >1000 objects
filename: item.Key, size: item.Size, const response = await s3Client.send(command);
formattedSize: formatFileSize(item.Size), uploadDate: item.LastModified
const files = (response.Contents || [])
// Optional: Filter out objects that might represent folders if necessary
// .filter(item => !(item.Key.endsWith('/') && item.Size === 0))
.map(item => ({
filename: item.Key, // S3 Key is the filename/path
size: item.Size,
formattedSize: formatFileSize(item.Size), // Use utility
uploadDate: item.LastModified
})); }));
isTruncated = response.IsTruncated;
continuationToken = response.NextContinuationToken; // Sort by date, newest first
} files.sort((a, b) => b.uploadDate.getTime() - a.uploadDate.getTime());
allFiles.sort((a, b) => b.uploadDate.getTime() - a.uploadDate.getTime());
return allFiles; return files;
} catch (err) { } catch (err) {
logger.error(`[S3 Adapter] Failed list objects in ${config.s3BucketName}: ${err.message}`); throw err; logger.error(`[S3 Adapter] Failed to list objects in bucket ${config.s3BucketName}: ${err.message}`);
throw err;
} }
} }
/**
* Generates a presigned URL for downloading an S3 object.
* @param {string} s3Key - The S3 Key (filename/path) of the object.
* @returns {Promise<{type: string, value: string}>} Object indicating type ('url') and value (the presigned URL).
*/
async function getDownloadUrlOrStream(s3Key) { async function getDownloadUrlOrStream(s3Key) {
if (!s3Key || s3Key.includes('..') || s3Key.startsWith('/')) throw new Error('Invalid filename for download'); // Input `s3Key` is assumed to be sanitized by the calling route/logic
if (!s3Key || s3Key.includes('..') || s3Key.startsWith('/')) {
logger.error(`[S3 Adapter] Invalid S3 key detected for download: ${s3Key}`);
throw new Error('Invalid filename');
}
try { try {
const cmd = new GetObjectCommand({ Bucket: config.s3BucketName, Key: s3Key }); const command = new GetObjectCommand({
const url = await getSignedUrl(s3Client, cmd, { expiresIn: 3600 }); Bucket: config.s3BucketName,
logger.info(`[S3 Adapter] Presigned URL for ${s3Key}`); Key: s3Key,
// Optional: Override response headers like filename
// ResponseContentDisposition: `attachment; filename="${path.basename(s3Key)}"`
});
// Generate presigned URL (expires in 1 hour by default, adjustable)
const url = await getSignedUrl(s3Client, command, { expiresIn: 3600 });
logger.info(`[S3 Adapter] Generated presigned URL for ${s3Key}`);
return { type: 'url', value: url }; return { type: 'url', value: url };
} catch (err) { } catch (err) {
logger.error(`[S3 Adapter] Failed presigned URL for ${s3Key}: ${err.message}`); logger.error(`[S3 Adapter] Failed to generate presigned URL for ${s3Key}: ${err.message}`);
if (err.name === 'NoSuchKey') throw new Error('File not found in S3'); throw err; if (err.name === 'NoSuchKey') {
throw new Error('File not found in S3');
}
throw err; // Re-throw other S3 errors
} }
} }
/**
* Deletes an object from the S3 bucket.
* @param {string} s3Key - The S3 Key (filename/path) of the object to delete.
* @returns {Promise<void>}
*/
async function deleteFile(s3Key) { async function deleteFile(s3Key) {
if (!s3Key || s3Key.includes('..') || s3Key.startsWith('/')) throw new Error('Invalid filename for delete'); // Input `s3Key` is assumed to be sanitized
if (!s3Key || s3Key.includes('..') || s3Key.startsWith('/')) {
logger.error(`[S3 Adapter] Invalid S3 key detected for delete: ${s3Key}`);
throw new Error('Invalid filename');
}
try { try {
await s3Client.send(new DeleteObjectCommand({ Bucket: config.s3BucketName, Key: s3Key })); const command = new DeleteObjectCommand({
logger.info(`[S3 Adapter] Deleted: ${s3Key}`); Bucket: config.s3BucketName,
Key: s3Key,
});
await s3Client.send(command);
logger.info(`[S3 Adapter] Deleted object: ${s3Key}`);
} catch (err) { } catch (err) {
logger.error(`[S3 Adapter] Failed delete for ${s3Key}: ${err.message}`); throw err; // DeleteObject is idempotent, so NoSuchKey isn't typically an error unless you need to know.
logger.error(`[S3 Adapter] Failed to delete object ${s3Key}: ${err.message}`);
throw err;
} }
} }
/**
* Cleans up stale *local* metadata files for S3 uploads.
* Relies on S3 Lifecycle Policies for actual S3 cleanup.
* @returns {Promise<void>}
*/
async function cleanupStale() { async function cleanupStale() {
logger.info('[S3 Adapter] Cleaning stale local metadata...'); logger.info('[S3 Adapter] Running cleanup for stale local metadata files...');
let cleaned = 0, checked = 0; let cleanedCount = 0;
let checkedCount = 0;
try { try {
await ensureMetadataDirExists(); const files = await fs.readdir(METADATA_DIR); const now = Date.now(); await ensureMetadataDirExists(); // Re-check
const files = await fs.readdir(METADATA_DIR);
const now = Date.now();
for (const file of files) { for (const file of files) {
if (file.endsWith('.meta')) { if (file.endsWith('.meta')) {
checked++; const id = file.replace('.meta',''); const fp = path.join(METADATA_DIR, file); checkedCount++;
const appUploadId = file.replace('.meta', '');
const metaFilePath = path.join(METADATA_DIR, file);
try { try {
const meta = JSON.parse(await fs.readFile(fp, 'utf8')); const data = await fs.readFile(metaFilePath, 'utf8');
if (now - (meta.lastActivity || meta.createdAt || 0) > UPLOAD_TIMEOUT) { const metadata = JSON.parse(data);
logger.warn(`[S3 Adapter] Stale local meta: ${file}, S3 ID: ${meta.s3UploadId||'N/A'}`);
await deleteUploadMetadata(id); cleaned++; // Check inactivity based on local metadata timestamp
if (now - (metadata.lastActivity || metadata.createdAt || 0) > UPLOAD_TIMEOUT) {
logger.warn(`[S3 Adapter] Found stale local metadata: ${file}. Last activity: ${new Date(metadata.lastActivity || metadata.createdAt)}. S3 UploadId: ${metadata.s3UploadId || 'N/A'}`);
// Only delete the LOCAL metadata file. DO NOT ABORT S3 UPLOAD HERE.
await deleteUploadMetadata(appUploadId); // Use helper
cleanedCount++;
}
} catch (readErr) {
logger.error(`[S3 Adapter] Error reading/parsing local metadata ${metaFilePath} during cleanup: ${readErr.message}. Skipping.`);
await fs.unlink(metaFilePath).catch(()=>{ logger.warn(`[S3 Adapter] Failed to delete potentially corrupt local metadata file: ${metaFilePath}`) });
} }
} catch (e) { logger.error(`[S3 Adapter] Error parsing meta ${fp}: ${e.message}`); await fs.unlink(fp).catch(()=>{}); }
} else if (file.endsWith('.tmp')) { } else if (file.endsWith('.tmp')) {
const tmpP = path.join(METADATA_DIR, file); // Clean up potential leftover temp metadata files (same as local adapter)
try { if (now - (await fs.stat(tmpP)).mtime.getTime() > UPLOAD_TIMEOUT) { logger.warn(`[S3 Adapter] Deleting stale tmp meta: ${file}`); await fs.unlink(tmpP); }} const tempMetaPath = path.join(METADATA_DIR, file);
catch (e) { if (e.code!=='ENOENT') logger.error(`[S3 Adapter] Error stat/unlink tmp meta ${tmpP}: ${e.message}`);} try {
const stats = await fs.stat(tempMetaPath);
if (now - stats.mtime.getTime() > UPLOAD_TIMEOUT) {
logger.warn(`[S3 Adapter] Deleting stale temporary local metadata file: ${file}`);
await fs.unlink(tempMetaPath);
}
} catch (statErr) {
if (statErr.code !== 'ENOENT') {
logger.error(`[S3 Adapter] Error checking temp local metadata file ${tempMetaPath}: ${statErr.message}`);
} }
} }
if (checked > 0 || cleaned > 0) logger.info(`[S3 Adapter] Local meta cleanup: Checked ${checked}, Cleaned ${cleaned}.`); }
logger.warn(`[S3 Adapter] IMPORTANT: Configure S3 Lifecycle Rules on bucket '${config.s3BucketName}' to clean incomplete multipart uploads.`); }
if (checkedCount > 0 || cleanedCount > 0) {
logger.info(`[S3 Adapter] Local metadata cleanup finished. Checked: ${checkedCount}, Cleaned stale local files: ${cleanedCount}.`);
}
// Log the crucial recommendation
logger.warn(`[S3 Adapter] IMPORTANT: For S3 storage, configure Lifecycle Rules on your bucket (${config.s3BucketName}) or use provider-specific tools to automatically clean up incomplete multipart uploads after a few days. This adapter only cleans up local tracking files.`);
} catch (err) { } catch (err) {
if (err.code==='ENOENT'&&err.path===METADATA_DIR) logger.warn('[S3 Adapter] Local meta dir not found for cleanup.'); if (err.code === 'ENOENT' && err.path === METADATA_DIR) {
else logger.error(`[S3 Adapter] Error local meta cleanup: ${err.message}`); logger.warn('[S3 Adapter] Local metadata directory not found during cleanup scan.');
} else {
logger.error(`[S3 Adapter] Error during local metadata cleanup scan: ${err.message}`);
} }
// Basic batchS3PrefixMappings cleanup
if (batchS3PrefixMappings.size > 1000) {
logger.warn(`[S3 Adapter] Clearing batchS3PrefixMappings (size: ${batchS3PrefixMappings.size}).`);
batchS3PrefixMappings.clear();
} }
} }
module.exports = { module.exports = {
initUpload, storeChunk, completeUpload, abortUpload, initUpload,
listFiles, getDownloadUrlOrStream, deleteFile, cleanupStale storeChunk,
completeUpload,
abortUpload,
listFiles,
getDownloadUrlOrStream,
deleteFile,
cleanupStale
}; };